summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--context/data/scite/context/lexers/data/scite-context-data-metapost.lua2
-rw-r--r--context/data/scite/context/scite-context-data-metapost.properties11
-rw-r--r--doc/context/scripts/mkiv/mtx-pdf.html1
-rw-r--r--doc/context/scripts/mkiv/mtx-pdf.man3
-rw-r--r--doc/context/scripts/mkiv/mtx-pdf.xml1
-rw-r--r--doc/context/sources/general/manuals/luametatex/luametatex-introduction.tex2
-rw-r--r--doc/context/sources/general/manuals/luametatex/luametatex-nodes.tex4
-rw-r--r--doc/context/sources/general/manuals/luametatex/luametatex-pdf.tex10
-rw-r--r--doc/context/sources/general/manuals/musings/musings-hownotto.tex76
-rw-r--r--doc/context/sources/general/manuals/musings/musings.tex1
-rw-r--r--metapost/context/base/mpxl/mp-base.mpxl6
-rw-r--r--metapost/context/base/mpxl/mp-xbox.mpxl5
-rw-r--r--scripts/context/lua/mtx-context.lua22
-rw-r--r--scripts/context/lua/mtx-fonts.lua11
-rw-r--r--scripts/context/lua/mtx-pdf.lua109
-rw-r--r--scripts/context/lua/mtxrun.lua109
-rw-r--r--scripts/context/stubs/mswin/mtxrun.lua109
-rw-r--r--scripts/context/stubs/unix/mtxrun109
-rw-r--r--scripts/context/stubs/win64/mtxrun.lua109
-rw-r--r--source/luametatex/CMakeLists.txt1
-rw-r--r--source/luametatex/build.sh12
-rw-r--r--source/luametatex/cmake/mimalloc.cmake25
-rw-r--r--source/luametatex/source/libraries/mimalloc/CMakeLists.txt120
-rw-r--r--source/luametatex/source/libraries/mimalloc/cmake/mimalloc-config-version.cmake4
-rw-r--r--source/luametatex/source/libraries/mimalloc/include/mimalloc-track.h62
-rw-r--r--source/luametatex/source/libraries/mimalloc/include/mimalloc.h16
-rw-r--r--source/luametatex/source/libraries/mimalloc/include/mimalloc/atomic.h (renamed from source/luametatex/source/libraries/mimalloc/include/mimalloc-atomic.h)11
-rw-r--r--source/luametatex/source/libraries/mimalloc/include/mimalloc/internal.h (renamed from source/luametatex/source/libraries/mimalloc/include/mimalloc-internal.h)257
-rw-r--r--source/luametatex/source/libraries/mimalloc/include/mimalloc/prim.h311
-rw-r--r--source/luametatex/source/libraries/mimalloc/include/mimalloc/track.h147
-rw-r--r--source/luametatex/source/libraries/mimalloc/include/mimalloc/types.h (renamed from source/luametatex/source/libraries/mimalloc/include/mimalloc-types.h)34
-rw-r--r--source/luametatex/source/libraries/mimalloc/readme.md157
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/alloc-aligned.c61
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/alloc-override.c2
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/alloc-posix.c4
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/alloc.c176
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/arena.c29
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/bitmap.c2
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/bitmap.h2
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/heap.c46
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/init.c122
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/options.c185
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/os.c1077
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/page.c23
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/prim/osx/alloc-override-zone.c (renamed from source/luametatex/source/libraries/mimalloc/src/alloc-override-osx.c)4
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/prim/osx/prim.c9
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/prim/prim.c24
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/prim/readme.md9
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/prim/unix/prim.c838
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/prim/wasi/prim.c265
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/prim/windows/etw-mimalloc.wprp61
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/prim/windows/etw.h905
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/prim/windows/etw.manbin0 -> 3926 bytes
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/prim/windows/prim.c607
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/prim/windows/readme.md17
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/random.c162
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/region.c27
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/segment-cache.c100
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/segment.c50
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/static.c29
-rw-r--r--source/luametatex/source/libraries/mimalloc/src/stats.c228
-rw-r--r--source/luametatex/source/lua/lmtinterface.h9
-rw-r--r--source/luametatex/source/lua/lmttexlib.c15
-rw-r--r--source/luametatex/source/luacore/lua54/src/lcode.c75
-rw-r--r--source/luametatex/source/luacore/lua54/src/ldebug.c29
-rw-r--r--source/luametatex/source/luacore/lua54/src/ldump.c8
-rw-r--r--source/luametatex/source/luacore/lua54/src/lundump.c2
-rw-r--r--source/luametatex/source/luametatex.h2
-rw-r--r--source/luametatex/source/luarest/lmtfilelib.c10
-rw-r--r--source/luametatex/source/tex/texcommands.c3
-rw-r--r--source/luametatex/source/tex/texdumpdata.h2
-rw-r--r--source/luametatex/source/tex/texequivalents.h4
-rw-r--r--source/luametatex/source/tex/texfont.c14
-rw-r--r--source/luametatex/source/tex/texfont.h1
-rw-r--r--source/luametatex/source/tex/texmaincontrol.c9
-rw-r--r--source/luametatex/source/tex/texmath.c6
-rw-r--r--source/luametatex/source/tex/texmlist.c117
-rw-r--r--source/luametatex/source/tex/texnodes.c2
-rw-r--r--source/luametatex/source/tex/texscanning.c72
-rw-r--r--source/luametatex/source/tex/textypes.h41
-rw-r--r--tex/context/base/mkii/cont-new.mkii2
-rw-r--r--tex/context/base/mkii/context.mkii2
-rw-r--r--tex/context/base/mkiv/anch-pos.lua9
-rw-r--r--tex/context/base/mkiv/attr-ini.lua18
-rw-r--r--tex/context/base/mkiv/bibl-bib.lua8
-rw-r--r--tex/context/base/mkiv/char-def.lua1
-rw-r--r--tex/context/base/mkiv/char-ini.lua44
-rw-r--r--tex/context/base/mkiv/char-tex.lua44
-rw-r--r--tex/context/base/mkiv/char-utf.lua34
-rw-r--r--tex/context/base/mkiv/chem-ini.lua8
-rw-r--r--tex/context/base/mkiv/cont-new.mkiv2
-rw-r--r--tex/context/base/mkiv/context.mkiv2
-rw-r--r--tex/context/base/mkiv/core-con.lua10
-rw-r--r--tex/context/base/mkiv/core-dat.lua18
-rw-r--r--tex/context/base/mkiv/core-two.lua9
-rw-r--r--tex/context/base/mkiv/core-uti.lua28
-rw-r--r--tex/context/base/mkiv/data-con.lua24
-rw-r--r--tex/context/base/mkiv/data-res.lua33
-rw-r--r--tex/context/base/mkiv/data-tar.lua12
-rw-r--r--tex/context/base/mkiv/data-tmp.lua23
-rw-r--r--tex/context/base/mkiv/data-zip.lua18
-rw-r--r--tex/context/base/mkiv/file-ini.lua7
-rw-r--r--tex/context/base/mkiv/file-mod.lua16
-rw-r--r--tex/context/base/mkiv/font-afk.lua8
-rw-r--r--tex/context/base/mkiv/font-con.lua65
-rw-r--r--tex/context/base/mkiv/font-ctx.lua39
-rw-r--r--tex/context/base/mkiv/font-def.lua92
-rw-r--r--tex/context/base/mkiv/font-enc.lua42
-rw-r--r--tex/context/base/mkiv/font-fbk.lua4
-rw-r--r--tex/context/base/mkiv/font-imp-tex.lua47
-rw-r--r--tex/context/base/mkiv/font-ini.lua4
-rw-r--r--tex/context/base/mkiv/font-log.lua9
-rw-r--r--tex/context/base/mkiv/font-nod.lua5
-rw-r--r--tex/context/base/mkiv/font-one.lua90
-rw-r--r--tex/context/base/mkiv/font-onr.lua40
-rw-r--r--tex/context/base/mkiv/font-ota.lua6
-rw-r--r--tex/context/base/mkiv/font-ots.lua225
-rw-r--r--tex/context/base/mkiv/font-syn.lua31
-rw-r--r--tex/context/base/mkiv/font-tfm.lua29
-rw-r--r--tex/context/base/mkiv/font-trt.lua8
-rw-r--r--tex/context/base/mkiv/font-vir.lua11
-rw-r--r--tex/context/base/mkiv/l-dir.lua17
-rw-r--r--tex/context/base/mkiv/lang-url.lua10
-rw-r--r--tex/context/base/mkiv/luat-cbk.lua132
-rw-r--r--tex/context/base/mkiv/luat-ini.lua8
-rw-r--r--tex/context/base/mkiv/lxml-aux.lua18
-rw-r--r--tex/context/base/mkiv/lxml-ent.lua12
-rw-r--r--tex/context/base/mkiv/lxml-lpt.lua75
-rw-r--r--tex/context/base/mkiv/lxml-mis.lua11
-rw-r--r--tex/context/base/mkiv/lxml-tab.lua254
-rw-r--r--tex/context/base/mkiv/math-map.lua32
-rw-r--r--tex/context/base/mkiv/meta-fun.lua22
-rw-r--r--tex/context/base/mkiv/mlib-fio.lua12
-rw-r--r--tex/context/base/mkiv/mlib-run.lua28
-rw-r--r--tex/context/base/mkiv/mult-mps.lua2
-rw-r--r--tex/context/base/mkiv/node-ini.lua64
-rw-r--r--tex/context/base/mkiv/node-res.lua5
-rw-r--r--tex/context/base/mkiv/node-tra.lua6
-rw-r--r--tex/context/base/mkiv/pack-obj.lua6
-rw-r--r--tex/context/base/mkiv/pack-rul.lua4
-rw-r--r--tex/context/base/mkiv/publ-dat.lua6
-rw-r--r--tex/context/base/mkiv/publ-ini.lua3
-rw-r--r--tex/context/base/mkiv/publ-ini.mkiv2
-rw-r--r--tex/context/base/mkiv/regi-ini.lua11
-rw-r--r--tex/context/base/mkiv/sort-ini.lua82
-rw-r--r--tex/context/base/mkiv/status-files.pdfbin24657 -> 24625 bytes
-rw-r--r--tex/context/base/mkiv/status-lua.pdfbin267358 -> 267345 bytes
-rw-r--r--tex/context/base/mkiv/syst-con.lua7
-rw-r--r--tex/context/base/mkiv/syst-ini.mkiv3
-rw-r--r--tex/context/base/mkiv/tabl-tbl.mkiv3
-rw-r--r--tex/context/base/mkiv/trac-lmx.lua3
-rw-r--r--tex/context/base/mkiv/util-dim.lua234
-rw-r--r--tex/context/base/mkiv/util-fmt.lua70
-rw-r--r--tex/context/base/mkiv/util-seq.lua14
-rw-r--r--tex/context/base/mkxl/attr-ini.lmt18
-rw-r--r--tex/context/base/mkxl/char-tex.lmt68
-rw-r--r--tex/context/base/mkxl/cont-new.mkxl2
-rw-r--r--tex/context/base/mkxl/context.mkxl106
-rw-r--r--tex/context/base/mkxl/core-dat.lmt225
-rw-r--r--tex/context/base/mkxl/core-dat.mkxl50
-rw-r--r--tex/context/base/mkxl/core-pag.lmt160
-rw-r--r--tex/context/base/mkxl/core-pag.mkxl68
-rw-r--r--tex/context/base/mkxl/core-two.lmt210
-rw-r--r--tex/context/base/mkxl/core-two.mkxl194
-rw-r--r--tex/context/base/mkxl/core-uti.lmt34
-rw-r--r--tex/context/base/mkxl/file-mod.lmt16
-rw-r--r--tex/context/base/mkxl/font-con.lmt75
-rw-r--r--tex/context/base/mkxl/font-ctx.lmt26
-rw-r--r--tex/context/base/mkxl/font-def.lmt88
-rw-r--r--tex/context/base/mkxl/font-fbk.lmt4
-rw-r--r--tex/context/base/mkxl/font-fil.mklx2
-rw-r--r--tex/context/base/mkxl/font-ini.lmt4
-rw-r--r--tex/context/base/mkxl/font-ini.mklx10
-rw-r--r--tex/context/base/mkxl/font-mat.mklx20
-rw-r--r--tex/context/base/mkxl/font-one.lmt90
-rw-r--r--tex/context/base/mkxl/font-onr.lmt42
-rw-r--r--tex/context/base/mkxl/font-ota.lmt6
-rw-r--r--tex/context/base/mkxl/font-ots.lmt223
-rw-r--r--tex/context/base/mkxl/font-tfm.lmt30
-rw-r--r--tex/context/base/mkxl/lang-url.lmt10
-rw-r--r--tex/context/base/mkxl/lpdf-ano.lmt1
-rw-r--r--tex/context/base/mkxl/lpdf-pde.lmt6
-rw-r--r--tex/context/base/mkxl/luat-cbk.lmt29
-rw-r--r--tex/context/base/mkxl/luat-cod.mkxl2
-rw-r--r--tex/context/base/mkxl/luat-ini.lmt8
-rw-r--r--tex/context/base/mkxl/math-act.lmt267
-rw-r--r--tex/context/base/mkxl/math-ali.mkxl78
-rw-r--r--tex/context/base/mkxl/math-fnt.lmt8
-rw-r--r--tex/context/base/mkxl/math-frc.mkxl8
-rw-r--r--tex/context/base/mkxl/math-ini.mkxl123
-rw-r--r--tex/context/base/mkxl/math-map.lmt32
-rw-r--r--tex/context/base/mkxl/math-noa.lmt64
-rw-r--r--tex/context/base/mkxl/math-rad.mklx6
-rw-r--r--tex/context/base/mkxl/math-spa.lmt28
-rw-r--r--tex/context/base/mkxl/math-stc.mklx22
-rw-r--r--tex/context/base/mkxl/math-twk.mkxl7
-rw-r--r--tex/context/base/mkxl/math-vfu.lmt120
-rw-r--r--tex/context/base/mkxl/meta-imp-newmath.mkxl76
-rw-r--r--tex/context/base/mkxl/mlib-run.lmt32
-rw-r--r--tex/context/base/mkxl/node-ini.lmt10
-rw-r--r--tex/context/base/mkxl/node-res.lmt5
-rw-r--r--tex/context/base/mkxl/node-tra.lmt6
-rw-r--r--tex/context/base/mkxl/pack-obj.lmt6
-rw-r--r--tex/context/base/mkxl/pack-rul.lmt4
-rw-r--r--tex/context/base/mkxl/publ-ini.mkxl2
-rw-r--r--tex/context/base/mkxl/regi-ini.lmt7
-rw-r--r--tex/context/base/mkxl/scrn-wid.lmt38
-rw-r--r--tex/context/base/mkxl/spac-pag.mkxl1
-rw-r--r--tex/context/base/mkxl/strc-itm.lmt59
-rw-r--r--tex/context/base/mkxl/strc-lst.lmt2
-rw-r--r--tex/context/base/mkxl/strc-ref.lmt2
-rw-r--r--tex/context/base/mkxl/strc-reg.lmt59
-rw-r--r--tex/context/base/mkxl/strc-reg.mkxl31
-rw-r--r--tex/context/base/mkxl/tabl-ntb.mkxl17
-rw-r--r--tex/context/base/mkxl/tabl-tbl.mkxl51
-rw-r--r--tex/context/base/mkxl/trac-vis.lmt2
-rw-r--r--tex/context/base/mkxl/typo-cln.lmt109
-rw-r--r--tex/context/base/mkxl/typo-cln.mkxl2
-rw-r--r--tex/context/base/mkxl/typo-dha.lmt481
-rw-r--r--tex/context/base/mkxl/typo-dir.mkxl4
-rw-r--r--tex/context/base/mkxl/typo-prc.mklx2
-rw-r--r--tex/context/fonts/mkiv/bonum-math.lfg7
-rw-r--r--tex/context/fonts/mkiv/cambria-math.lfg20
-rw-r--r--tex/context/fonts/mkiv/common-math-jmn.lfg15
-rw-r--r--tex/context/fonts/mkiv/concrete-math.lfg7
-rw-r--r--tex/context/fonts/mkiv/dejavu-math.lfg7
-rw-r--r--tex/context/fonts/mkiv/ebgaramond-math.lfg25
-rw-r--r--tex/context/fonts/mkiv/erewhon-math.lfg22
-rw-r--r--tex/context/fonts/mkiv/kpfonts-math.lfg22
-rw-r--r--tex/context/fonts/mkiv/libertinus-math.lfg23
-rw-r--r--tex/context/fonts/mkiv/lucida-math.lfg37
-rw-r--r--tex/context/fonts/mkiv/modern-math.lfg9
-rw-r--r--tex/context/fonts/mkiv/newcomputermodern-math.lfg9
-rw-r--r--tex/context/fonts/mkiv/pagella-math.lfg9
-rw-r--r--tex/context/fonts/mkiv/schola-math.lfg7
-rw-r--r--tex/context/fonts/mkiv/stixtwo-math.lfg23
-rw-r--r--tex/context/fonts/mkiv/termes-math.lfg7
-rw-r--r--tex/context/fonts/mkiv/type-imp-antykwa.mkiv9
-rw-r--r--tex/context/fonts/mkiv/type-imp-concrete.mkiv2
-rw-r--r--tex/context/fonts/mkiv/type-imp-ebgaramond.mkiv10
-rw-r--r--tex/context/fonts/mkiv/type-imp-iwona.mkiv8
-rw-r--r--tex/context/fonts/mkiv/type-imp-kurier.mkiv8
-rw-r--r--tex/context/fonts/mkiv/xcharter-math.lfg22
-rw-r--r--tex/context/modules/mkiv/m-tikz.mkiv1
-rw-r--r--tex/context/modules/mkiv/s-abbreviations-logos.tex15
-rw-r--r--tex/context/modules/mkiv/x-asciimath.lua17
-rw-r--r--tex/context/modules/mkxl/m-tikz.mkxl1
-rw-r--r--tex/generic/context/luatex/luatex-fonts-merged.lua30
-rw-r--r--tex/generic/context/luatex/luatex-mplib.lua70
-rw-r--r--tex/generic/context/luatex/luatex-preprocessor.lua12
-rw-r--r--tex/latex/context/ppchtex/m-ch-de.sty19
-rw-r--r--tex/latex/context/ppchtex/m-ch-en.sty19
-rw-r--r--tex/latex/context/ppchtex/m-ch-nl.sty19
-rw-r--r--tex/latex/context/ppchtex/m-pictex.sty5
254 files changed, 8722 insertions, 5017 deletions
diff --git a/context/data/scite/context/lexers/data/scite-context-data-metapost.lua b/context/data/scite/context/lexers/data/scite-context-data-metapost.lua
index 53271d674..f06e35969 100644
--- a/context/data/scite/context/lexers/data/scite-context-data-metapost.lua
+++ b/context/data/scite/context/lexers/data/scite-context-data-metapost.lua
@@ -1,5 +1,5 @@
return {
- ["commands"]={ "on", "off", "interpath", "upto", "downto", "beginfig", "endfig", "beginglyph", "endglyph", "beginfont", "endfont", "rotatedaround", "reflectedabout", "arrowhead", "currentpen", "currentpicture", "cuttings", "defaultfont", "extra_beginfig", "extra_endfig", "down", "evenly", "fullcircle", "halfcircle", "identity", "in", "left", "pensquare", "penrazor", "penspec", "origin", "quartercircle", "right", "unitsquare", "up", "withdots", "abs", "bbox", "ceiling", "cutafter", "cutbefore", "dir", "directionpoint", "div", "dotprod", "intersectionpoint", "inverse", "mod", "round", "unitvector", "whatever", "cutdraw", "draw", "drawarrow", "drawdblarrow", "fill", "filldraw", "drawdot", "loggingall", "interact", "tracingall", "tracingnone", "pickup", "undraw", "unfill", "unfilldraw", "buildcycle", "dashpattern", "decr", "dotlabel", "dotlabels", "drawoptions", "incr", "label", "labels", "max", "min", "thelabel", "z", "beginchar", "blacker", "capsule_end", "change_width", "define_blacker_pixels", "define_corrected_pixels", "define_good_x_pixels", "define_good_y_pixels", "define_horizontal_corrected_pixels", "define_pixels", "define_whole_blacker_pixels", "define_whole_pixels", "define_whole_vertical_blacker_pixels", "define_whole_vertical_pixels", "endchar", "extra_beginchar", "extra_endchar", "extra_setup", "font_coding_scheme", "clearxy", "clearit", "clearpen", "shipit", "font_extra_space", "exitunless", "relax", "hide", "gobble", "gobbled", "stop", "blankpicture", "counterclockwise", "tensepath", "takepower", "direction", "softjoin", "makelabel", "rotatedabout", "flex", "superellipse", "image", "nullpen", "savepen", "clearpen", "penpos", "penlabels", "range", "thru", "z", "laboff", "bye", "red", "green", "blue", "cyan", "magenta", "yellow", "black", "white", "background", "mm", "pt", "dd", "bp", "cm", "pc", "cc", "in", "dk", "triplet", "quadruplet", "totransform", "bymatrix", "closedcurve", "closedlines", "primitive", "permanent", "immutable", "mutable", "frozen", "showproperty", "showhashentry", "top", "bot", "lft", "rt", "ulft", "urt", "llft", "lrt" },
+ ["commands"]={ "on", "off", "interpath", "upto", "downto", "beginfig", "endfig", "beginglyph", "endglyph", "beginfont", "endfont", "rotatedaround", "reflectedabout", "arrowhead", "currentpen", "currentpicture", "cuttings", "defaultfont", "extra_beginfig", "extra_endfig", "down", "evenly", "fullcircle", "halfcircle", "identity", "in", "left", "pensquare", "penrazor", "penspec", "origin", "quartercircle", "right", "unitsquare", "up", "withdots", "abs", "bbox", "ceiling", "cutafter", "cutbefore", "dir", "directionpoint", "div", "dotprod", "intersectionpoint", "inverse", "mod", "round", "unitvector", "whatever", "cutdraw", "draw", "drawarrow", "drawdblarrow", "fill", "filldraw", "drawdot", "loggingall", "interact", "tracingall", "tracingnone", "pickup", "undraw", "unfill", "unfilldraw", "buildcycle", "dashpattern", "decr", "dotlabel", "dotlabels", "drawoptions", "incr", "label", "labels", "max", "min", "thelabel", "z", "beginchar", "blacker", "capsule_end", "change_width", "define_blacker_pixels", "define_corrected_pixels", "define_good_x_pixels", "define_good_y_pixels", "define_horizontal_corrected_pixels", "define_pixels", "define_whole_blacker_pixels", "define_whole_pixels", "define_whole_vertical_blacker_pixels", "define_whole_vertical_pixels", "endchar", "extra_beginchar", "extra_endchar", "extra_setup", "font_coding_scheme", "clearxy", "clearit", "clearpen", "shipit", "font_extra_space", "exitunless", "relax", "hide", "gobble", "gobbled", "stop", "blankpicture", "counterclockwise", "tensepath", "takepower", "direction", "softjoin", "makelabel", "rotatedabout", "flex", "superellipse", "image", "nullpen", "savepen", "clearpen", "penpos", "penlabels", "range", "thru", "z", "laboff", "bye", "red", "green", "blue", "cyan", "magenta", "yellow", "black", "white", "background", "mm", "pt", "dd", "bp", "cm", "pc", "cc", "in", "dk", "es", "ts", "triplet", "quadruplet", "totransform", "bymatrix", "closedcurve", "closedlines", "primitive", "permanent", "immutable", "mutable", "frozen", "showproperty", "showhashentry", "top", "bot", "lft", "rt", "ulft", "urt", "llft", "lrt" },
["disabled"]={ "verbatimtex", "troffmode" },
["internals"]={ "mitered", "rounded", "beveled", "butt", "squared", "eps", "epsilon", "infinity", "bboxmargin", "ahlength", "ahangle", "labeloffset", "dotlabeldiam", "defaultpen", "defaultscale", "join_radius", "charscale", "inicatcoderegime", "texcatcoderegime", "luacatcoderegime", "notcatcoderegime", "vrbcatcoderegime", "prtcatcoderegime", "ctxcatcoderegime", "txtcatcoderegime", "catcoderegime", "ditto", "EOF", "pen_lft", "pen_rt", "pen_top", "pen_bot" },
["metafont"]={ "autorounding", "beginchar", "blacker", "boundarychar", "capsule_def", "capsule_end", "change_width", "chardp", "chardx", "chardy", "charexists", "charext", "charht", "charic", "charlist", "charwd", "cull", "cullit", "currenttransform", "currentwindow", "define_blacker_pixels", "define_corrected_pixels", "define_good_x_pixels", "define_good_y_pixels", "define_horizontal_corrected_pixels", "define_pixels", "define_whole_blacker_pixels", "define_whole_pixels", "define_whole_vertical_blacker_pixels", "define_whole_vertical_pixels", "designsize", "display", "displaying", "endchar", "extensible", "extra_beginchar", "extra_endchar", "extra_setup", "fillin", "font_coding_scheme", "font_extra_space", "font_identifier", "font_normal_shrink", "font_normal_space", "font_normal_stretch", "font_quad", "font_size", "font_slant", "font_x_height", "fontdimen", "fontmaking", "gfcorners", "granularity", "grayfont", "headerbyte", "hppp", "hround", "imagerules", "italcorr", "kern", "labelfont", "ligtable", "lowres_fix", "makebox", "makegrid", "maketicks", "mode_def", "mode_setup", "nodisplays", "notransforms", "numspecial", "o_correction", "openit", "openwindow", "pixels_per_inch", "proofing", "proofoffset", "proofrule", "proofrulethickness", "rulepen", "screenchars", "screenrule", "screenstrokes", "screen_cols", "screen_rows", "showit", "slantfont", "smode", "smoothing", "titlefont", "totalweight", "tracingedges", "tracingpens", "turningcheck", "unitpixel", "vppp", "vround", "xoffset", "yoffset" },
diff --git a/context/data/scite/context/scite-context-data-metapost.properties b/context/data/scite/context/scite-context-data-metapost.properties
index 0dff1ccd5..badf73828 100644
--- a/context/data/scite/context/scite-context-data-metapost.properties
+++ b/context/data/scite/context/scite-context-data-metapost.properties
@@ -28,11 +28,12 @@ penlabels range thru z laboff \
bye red green blue cyan \
magenta yellow black white background \
mm pt dd bp cm \
-pc cc in dk triplet \
-quadruplet totransform bymatrix closedcurve closedlines \
-primitive permanent immutable mutable frozen \
-showproperty showhashentry top bot lft \
-rt ulft urt llft lrt
+pc cc in dk es \
+ts triplet quadruplet totransform bymatrix \
+closedcurve closedlines primitive permanent immutable \
+mutable frozen showproperty showhashentry top \
+bot lft rt ulft urt \
+llft lrt
keywordclass.metapost.disabled=\
verbatimtex troffmode
diff --git a/doc/context/scripts/mkiv/mtx-pdf.html b/doc/context/scripts/mkiv/mtx-pdf.html
index 2932b942c..52c6b5608 100644
--- a/doc/context/scripts/mkiv/mtx-pdf.html
+++ b/doc/context/scripts/mkiv/mtx-pdf.html
@@ -44,6 +44,7 @@
<tr><th>--pretty</th><td></td><td>replace newlines in metadata</td></tr>
<tr><th>--fonts</th><td></td><td>show used fonts (--detail)</td></tr>
<tr><th>--object</th><td></td><td>show object"/></td></tr>
+ <tr><th>--linkjs</th><td></td><td>show links"/></td></tr>
<tr><th/><td/><td/></tr>
</table>
<br/>
diff --git a/doc/context/scripts/mkiv/mtx-pdf.man b/doc/context/scripts/mkiv/mtx-pdf.man
index 54fdb6d14..a8fbade52 100644
--- a/doc/context/scripts/mkiv/mtx-pdf.man
+++ b/doc/context/scripts/mkiv/mtx-pdf.man
@@ -25,6 +25,9 @@ show used fonts (--detail)
.TP
.B --object
show object"/>
+.TP
+.B --linkjs
+show links"/>
.SH AUTHOR
More information about ConTeXt and the tools that come with it can be found at:
diff --git a/doc/context/scripts/mkiv/mtx-pdf.xml b/doc/context/scripts/mkiv/mtx-pdf.xml
index 08b04d85e..c4779d876 100644
--- a/doc/context/scripts/mkiv/mtx-pdf.xml
+++ b/doc/context/scripts/mkiv/mtx-pdf.xml
@@ -13,6 +13,7 @@
<flag name="pretty"><short>replace newlines in metadata</short></flag>
<flag name="fonts"><short>show used fonts (<ref name="detail)"/></short></flag>
<flag name="object"><short>show object"/></short></flag>
+ <flag name="linkjs"><short>show links"/></short></flag>
</subcategory>
<subcategory>
<example><command>mtxrun --script pdf --info foo.pdf</command></example>
diff --git a/doc/context/sources/general/manuals/luametatex/luametatex-introduction.tex b/doc/context/sources/general/manuals/luametatex/luametatex-introduction.tex
index 08b7d94bc..142f18c32 100644
--- a/doc/context/sources/general/manuals/luametatex/luametatex-introduction.tex
+++ b/doc/context/sources/general/manuals/luametatex/luametatex-introduction.tex
@@ -130,7 +130,7 @@ Hans Hagen
\NC \LUAMETATEX\ Version \EQ \currentdate \NC \NR
\NC \CONTEXT\ Version \EQ LMTX \contextversion \NC \NR
\NC \LUATEX\ Team \EQ Hans Hagen, Hartmut Henkel, Taco Hoekwater, Luigi Scarso \NC \NR
-\NC \LUAMETATEX\ Team \EQ Hans Hagen, Alan Braslau, Mojca Miklavec and Wolfgang Schuster, Mikael Sundqvist \NC \NR
+\NC \LUAMETATEX\ Team \EQ Hans Hagen, Alan Braslau, Mojca Miklavec, Wolfgang Schuster and Mikael Sundqvist \NC \NR
\NC resources and info at \EQ www.contextgarden.net\space\vl\space
www.pragma-ade.nl\space\vl\space
www.luametatex.org\space\vl\space
diff --git a/doc/context/sources/general/manuals/luametatex/luametatex-nodes.tex b/doc/context/sources/general/manuals/luametatex/luametatex-nodes.tex
index c3ac0a294..bccb899c0 100644
--- a/doc/context/sources/general/manuals/luametatex/luametatex-nodes.tex
+++ b/doc/context/sources/general/manuals/luametatex/luametatex-nodes.tex
@@ -1381,14 +1381,14 @@ The \type {traversechar} iterator loops over the \nod {glyph} nodes in a list.
Only nodes with a subtype less than 256 are seen.
\startfunctioncall
-<direct> n, font, char = node.direct.traversechar(<direct> n)
+<direct> n, char, font = node.direct.traversechar(<direct> n)
\stopfunctioncall
The \type{traverseglyph} iterator loops over a list and returns the list and
filters all glyphs:
\startfunctioncall
-<direct> n, font, char = node.traverseglyph(<direct> n)
+<direct> n, char, font = node.traverseglyph(<direct> n)
\stopfunctioncall
These functions are only available for direct nodes.
diff --git a/doc/context/sources/general/manuals/luametatex/luametatex-pdf.tex b/doc/context/sources/general/manuals/luametatex/luametatex-pdf.tex
index 1394df9f5..b08433b29 100644
--- a/doc/context/sources/general/manuals/luametatex/luametatex-pdf.tex
+++ b/doc/context/sources/general/manuals/luametatex/luametatex-pdf.tex
@@ -83,12 +83,12 @@ pdfe.getstatus(<pdfe document>)
The returned codes are:
\starttabulate[|c|l|]
-\DB value \BC explanation \NC \NR
+\DB value \BC explanation \NC \NR
\TB
-\NC \type {-2} \NC the document failed to open \NC \NR
-\NC \type {-1} \NC the document is (still) protected \NC \NR
-\NC \type {0} \NC the document is not encrypted \NC \NR
-\NC \type {2} \NC the document has been unencrypted \NC \NR
+\NC \type {-2} \NC the document is (still) protected \NC \NR
+\NC \type {-1} \NC the document failed to open \NC \NR
+\NC \type {0} \NC the document is not encrypted \NC \NR
+\NC \type {1} \NC the document has been unencrypted \NC \NR
\LL
\stoptabulate
diff --git a/doc/context/sources/general/manuals/musings/musings-hownotto.tex b/doc/context/sources/general/manuals/musings/musings-hownotto.tex
new file mode 100644
index 000000000..48e9adcf3
--- /dev/null
+++ b/doc/context/sources/general/manuals/musings/musings-hownotto.tex
@@ -0,0 +1,76 @@
+% language=us runpath=texruns:manuals/musings
+
+\startcomponent musings-hownotto
+
+\environment musings-style
+
+\startchapter[title={How not to install \CONTEXT}]
+
+\startalign[flushleft]
+
+Installing LuaMetaTeX can be a complex process that requires some technical
+expertise, but the following steps should give you a general idea of what is
+involved:
+
+\startitemize[n]
+
+\startitem
+ First, you need to ensure that you have a recent version of the Lua
+ programming language installed on your system. You can download the latest
+ version of Lua from the official website at http://www.lua.org/download.html.
+ \footnote {The \LUA\ code needed is part of the source tree that can be
+ downloaded from GitHub or websites.}
+\stopitem
+
+\startitem
+ Next, you need to download the latest version of the MetaTeX distribution,
+ which includes the LuaMetaTeX engine, from the official ConTeXt Garden
+ website at https://wiki.contextgarden.net/ConTeXt_Standalone. \footnote
+ {There is no \METATEX, although we sometimes joke about it.}
+\stopitem
+
+\startitem
+ Once you have downloaded the MetaTeX distribution, extract the files to a
+ directory on your system. \footnote {So here one is stuck.}
+\stopitem
+
+\startitem
+ You can then run the LuaMetaTeX engine by opening a command prompt or
+ terminal window and navigating to the directory where you extracted the
+ MetaTeX files. From there, you can run the command "luametatex" followed by
+ the name of the TeX file you want to process. \footnote {Shouldn't it be
+ compiled first? And even then it needs some format, so one needs \type
+ {context} and \type {mtxrun}.}
+\stopitem
+
+\startitem
+ To make it easier to use LuaMetaTeX with your favorite text editor, you may
+ also want to install a TeX distribution such as TeX Live or MiKTeX, which
+ includes support for LuaMetaTeX. These distributions typically include a
+ graphical user interface that makes it easier to manage your TeX installation
+ and configure your system for use with LuaMetaTeX. \footnote {Indeed
+ installing \TEXLIVE\ is easier, as is installing the smaller reference
+ installation which uses \LUAMETATEX\ as its own installer. As far as we know,
+ \MIKTEX\ doesn't include \LMTX. And yes, consulting the documentation might
+ be best.}
+\stopitem
+
+\stopitemize
+
+It's worth noting that the exact steps for installing LuaMetaTeX may vary
+depending on your operating system and the specific TeX distribution you are
+using. For more detailed instructions, you may want to consult the official
+documentation for LuaMetaTeX and the TeX distribution you are using. \footnote
+{So here is the way out of the proposed mess.}
+
+\start \blank
+\bf by ChatGPT
+\footnote {Queried by Mikael Sundqvist.}
+\footnote {Which (at least here) is more about well formed sentences than about
+verified content. We can only hope that new \TEX\ users are able to recognize
+a fake.}
+\stop
+
+\stopalign
+
+\stoptext
diff --git a/doc/context/sources/general/manuals/musings/musings.tex b/doc/context/sources/general/manuals/musings/musings.tex
index bccab890a..2d0e7c094 100644
--- a/doc/context/sources/general/manuals/musings/musings.tex
+++ b/doc/context/sources/general/manuals/musings/musings.tex
@@ -29,6 +29,7 @@
\component musings-dontusetex
\component musings-speed
\component musings-texlive
+ \component musings-hownotto
\stopbodymatter
\stopproduct
diff --git a/metapost/context/base/mpxl/mp-base.mpxl b/metapost/context/base/mpxl/mp-base.mpxl
index 9bba87d23..ba2012b7e 100644
--- a/metapost/context/base/mpxl/mp-base.mpxl
+++ b/metapost/context/base/mpxl/mp-base.mpxl
@@ -639,7 +639,9 @@ permanent interpath, solve, buildcycle, tolerance ;
%% units of measure
-mm := 2.83464 ;
+newinternal mm, pt, dd, bp, cm, pc, cc, in, dk, es, ts ;
+
+mm := 2.83464 ; % ibm odd/even rounding
pt := 0.99626 ;
dd := 1.06601 ; % 1.0660068107174
bp := 1 ;
@@ -648,6 +650,8 @@ pc := 11.95517 ;
cc := 12.79213 ;
in := 72 ;
dk := 6.41577 ; % 6.4157650704225 ;
+es := 71.13174 ; % ibm odd/even rounding
+ts := 7.11317 ;
immutable mm, pt, bp, cm, in ; % we don't protect (yet): dd, pc cc (used as locals)
diff --git a/metapost/context/base/mpxl/mp-xbox.mpxl b/metapost/context/base/mpxl/mp-xbox.mpxl
index 365bec515..bfc844d9e 100644
--- a/metapost/context/base/mpxl/mp-xbox.mpxl
+++ b/metapost/context/base/mpxl/mp-xbox.mpxl
@@ -10,10 +10,7 @@
% copyright : Public domain
% patched : Hans Hagen
%
-% The code is the same but I've added a boxes_ namespace for some so that we don't
-% clash with metafun.
-
-% The code is the same but I've added s boxes_ namespace for soem so that we don't
+% The code is the same but I've added s boxes_ namespace for somd so that we don't
% clash with metafun. Loading and initialization is now under metafun control.
if known metafun_loaded_xbox : endinput ; fi ;
diff --git a/scripts/context/lua/mtx-context.lua b/scripts/context/lua/mtx-context.lua
index 014a7d4ef..a5dfd5a7d 100644
--- a/scripts/context/lua/mtx-context.lua
+++ b/scripts/context/lua/mtx-context.lua
@@ -704,6 +704,8 @@ function scripts.context.run(ctxdata,filename)
--
a_batchmode = (a_batchmode and "batchmode") or (a_nonstopmode and "nonstopmode") or (a_scrollmode and "scrollmode") or nil
--
+ local changed = { }
+ --
for i=1,#filelist do
--
local filename = filelist[i]
@@ -731,6 +733,10 @@ function scripts.context.run(ctxdata,filename)
-- local jobname = removesuffix(filename)
local ctxname = ctxdata and ctxdata.ctxname
--
+ if changed[jobname] == nil then
+ changed[jobname] = false
+ end
+ --
local analysis = preamble_analyze(filename)
--
if a_mkii or analysis.engine == 'pdftex' or analysis.engine == 'xetex' then
@@ -962,6 +968,7 @@ function scripts.context.run(ctxdata,filename)
if not multipass_forcedruns then
newhash = multipass_hashfiles(jobname)
if multipass_changed(oldhash,newhash) then
+ changed[jobname] = true
oldhash = newhash
else
break
@@ -1096,6 +1103,21 @@ function scripts.context.run(ctxdata,filename)
end
end
--
+ if #filelist > 1 then
+ local done = false
+ for k, v in sortedhash(changed) do
+ if v then
+ if not done then
+ report()
+ done = true
+ end
+ report("file %a was changed",k)
+ end
+ end
+ if done then
+ report()
+ end
+ end
end
function scripts.context.pipe() -- still used?
diff --git a/scripts/context/lua/mtx-fonts.lua b/scripts/context/lua/mtx-fonts.lua
index 37107ea1e..746a010c8 100644
--- a/scripts/context/lua/mtx-fonts.lua
+++ b/scripts/context/lua/mtx-fonts.lua
@@ -358,9 +358,14 @@ local function list_specifications(t,info)
fontweight(entry.fontweight),
}
end
- table.insert(s,1,{"familyname","weight","style","width","variant","fontname","filename","subfont","fontweight"})
- table.insert(s,2,{"","","","","","","","",""})
- utilities.formatters.formatcolumns(s)
+ local h = {
+ {"familyname","weight","style","width","variant","fontname","filename","subfont","fontweight"},
+ {"","","","","","","","",""}
+ }
+ utilities.formatters.formatcolumns(s,false,h)
+ for k=1,#h do
+ write_nl(h[k])
+ end
for k=1,#s do
write_nl(s[k])
end
diff --git a/scripts/context/lua/mtx-pdf.lua b/scripts/context/lua/mtx-pdf.lua
index 0ce17ec5b..a1803a801 100644
--- a/scripts/context/lua/mtx-pdf.lua
+++ b/scripts/context/lua/mtx-pdf.lua
@@ -9,7 +9,7 @@ if not modules then modules = { } end modules ['mtx-pdf'] = {
local tonumber = tonumber
local format, gmatch, gsub, match, find = string.format, string.gmatch, string.gsub, string.match, string.find
local utfchar = utf.char
-local concat = table.concat
+local concat, insert, swapped = table.concat, table.insert, table.swapped
local setmetatableindex, sortedhash, sortedkeys = table.setmetatableindex, table.sortedhash, table.sortedkeys
local helpinfo = [[
@@ -28,6 +28,7 @@ local helpinfo = [[
<flag name="pretty"><short>replace newlines in metadata</short></flag>
<flag name="fonts"><short>show used fonts (<ref name="detail)"/></short></flag>
<flag name="object"><short>show object"/></short></flag>
+ <flag name="linkjs"><short>show links"/></short></flag>
</subcategory>
<subcategory>
<example><command>mtxrun --script pdf --info foo.pdf</command></example>
@@ -365,6 +366,110 @@ function scripts.pdf.object(filename,n)
end
end
+function scripts.pdf.links(filename,asked)
+ local pdffile = loadpdffile(filename)
+ if pdffile then
+
+ local pages = pdffile.pages
+ local nofpages = pdffile.nofpages
+
+ if asked and (asked < 1 or asked > nofpages) then
+ report("")
+ report("no page %i, last page %i",asked,nofpages)
+ report("")
+ return
+ end
+
+ local reverse = swapped(pages)
+
+ local function show(pagenumber)
+ local page = pages[pagenumber]
+ local annots = page.Annots
+ if annots then
+ report("")
+ report("annotations @ page %i",pagenumber)
+ report("")
+ for i=1,#annots do
+ local annot = annots[i]
+ if annot.Subtype == "Link" then
+ local A = annot.A
+ if A then
+ local S = A.S
+ local D = A.D
+ if S == "GoTo" then
+ if D then
+ local D1 = D[1]
+ local R1 = reverse[D1]
+ if tonumber(R1) then
+ report("intern, page % 4i",R1 or 0)
+ else
+ report("intern, name %s",tostring(D1))
+ end
+ end
+ elseif S == "GoToR" then
+ if D then
+ local F = A.F
+ if F then
+ local D1 = D[1]
+ if tonumber(D1) then
+ report("extern, page % 4i, file %s",D1 + 1,F)
+ else
+ report("extern, page % 4i, file %s, name %s",0,F,D[1])
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+ end
+
+ if asked then
+ show(asked)
+ else
+ for pagenumber=1,nofpages do
+ show(pagenumber)
+ end
+ end
+
+ local destinations = pdffile.destinations
+ if destinations then
+ if asked then
+ report("")
+ report("destinations to page %i",asked)
+ report("")
+ for k, v in sortedhash(destinations) do
+ local D = v.D
+ if D then
+ local p = reverse[D[1]] or 0
+ if p == asked then
+ report(k)
+ end
+ end
+ end
+ else
+ report("")
+ report("destinations")
+ report("")
+ local list = setmetatableindex("table")
+ for k, v in sortedhash(destinations) do
+ local D = v.D
+ if D then
+ local p = reverse[D[1]]
+ report("tag %s, page % 4i",k,p)
+ insert(list[p],k)
+ end
+ end
+ for k, v in sortedhash(list) do
+ report("")
+ report("page %i, names % t",k,v)
+ end
+ end
+ end
+ end
+end
+
-- scripts.pdf.info("e:/tmp/oeps.pdf")
-- scripts.pdf.metadata("e:/tmp/oeps.pdf")
-- scripts.pdf.fonts("e:/tmp/oeps.pdf")
@@ -382,6 +487,8 @@ elseif environment.argument("fonts") then
scripts.pdf.fonts(filename)
elseif environment.argument("object") then
scripts.pdf.object(filename,tonumber(environment.argument("object")))
+elseif environment.argument("links") then
+ scripts.pdf.links(filename,tonumber(environment.argument("page")))
elseif environment.argument("exporthelp") then
application.export(environment.argument("exporthelp"),filename)
else
diff --git a/scripts/context/lua/mtxrun.lua b/scripts/context/lua/mtxrun.lua
index 49ae4a1bb..0cb821561 100644
--- a/scripts/context/lua/mtxrun.lua
+++ b/scripts/context/lua/mtxrun.lua
@@ -5136,7 +5136,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["l-dir"] = package.loaded["l-dir"] or true
--- original size: 18893, stripped down to: 11170
+-- original size: 19139, stripped down to: 11345
if not modules then modules={} end modules ['l-dir']={
version=1.001,
@@ -5154,7 +5154,7 @@ dir=dir or {}
local dir=dir
local lfs=lfs
local attributes=lfs.attributes
-local walkdir=lfs.dir
+local scandir=lfs.dir
local isdir=lfs.isdir
local isfile=lfs.isfile
local currentdir=lfs.currentdir
@@ -5185,6 +5185,15 @@ else
lfs.isdir=isdir
lfs.isfile=isfile
end
+local isreadable=file.isreadable
+local walkdir=function(p,...)
+ if isreadable(p.."/.") then
+ return scandir(p,...)
+ else
+ return function() end
+ end
+end
+lfs.walkdir=walkdir
function dir.current()
return (gsub(currentdir(),"\\","/"))
end
@@ -9942,7 +9951,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["util-fmt"] = package.loaded["util-fmt"] or true
--- original size: 2541, stripped down to: 1624
+-- original size: 3379, stripped down to: 2273
if not modules then modules={} end modules ['util-fmt']={
version=1.001,
@@ -9955,19 +9964,21 @@ utilities=utilities or {}
utilities.formatters=utilities.formatters or {}
local formatters=utilities.formatters
local concat,format=table.concat,string.format
-local tostring,type=tostring,type
+local tostring,type,unpack=tostring,type,unpack
local strip=string.strip
local lpegmatch=lpeg.match
local stripper=lpeg.patterns.stripzeros
function formatters.stripzeros(str)
return lpegmatch(stripper,str)
end
-function formatters.formatcolumns(result,between)
+function formatters.formatcolumns(result,between,header)
if result and #result>0 then
- between=between or " "
- local widths,numbers={},{}
+ local widths={}
+ local numbers={}
+ local templates={}
local first=result[1]
local n=#first
+ between=between or " "
for i=1,n do
widths[i]=0
end
@@ -9989,31 +10000,61 @@ function formatters.formatcolumns(result,between)
end
end
end
+ if header then
+ for i=1,#header do
+ local h=header[i]
+ for j=1,n do
+ local hj=tostring(h[j])
+ h[j]=hj
+ local w=#hj
+ if w>widths[j] then
+ widths[j]=w
+ end
+ end
+ end
+ end
for i=1,n do
local w=widths[i]
if numbers[i] then
if w>80 then
- widths[i]="%s"..between
- else
- widths[i]="%0"..w.."i"..between
+ templates[i]="%s"..between
+ else
+ templates[i]="% "..w.."i"..between
end
else
if w>80 then
- widths[i]="%s"..between
- elseif w>0 then
- widths[i]="%-"..w.."s"..between
+ templates[i]="%s"..between
+ elseif w>0 then
+ templates[i]="%-"..w.."s"..between
else
- widths[i]="%s"
+ templates[i]="%s"
end
end
end
- local template=strip(concat(widths))
+ local template=strip(concat(templates))
for i=1,#result do
local str=format(template,unpack(result[i]))
result[i]=strip(str)
end
+ if header then
+ for i=1,n do
+ local w=widths[i]
+ if w>80 then
+ templates[i]="%s"..between
+ elseif w>0 then
+ templates[i]="%-"..w.."s"..between
+ else
+ templates[i]="%s"
+ end
+ end
+ local template=strip(concat(templates))
+ for i=1,#header do
+ local str=format(template,unpack(header[i]))
+ header[i]=strip(str)
+ end
+ end
end
- return result
+ return result,header
end
@@ -16397,7 +16438,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["lxml-tab"] = package.loaded["lxml-tab"] or true
--- original size: 62810, stripped down to: 36225
+-- original size: 62221, stripped down to: 36225
if not modules then modules={} end modules ['lxml-tab']={
version=1.001,
@@ -17878,7 +17919,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["lxml-lpt"] = package.loaded["lxml-lpt"] or true
--- original size: 54733, stripped down to: 31258
+-- original size: 54589, stripped down to: 31258
if not modules then modules={} end modules ['lxml-lpt']={
version=1.001,
@@ -19129,7 +19170,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["lxml-mis"] = package.loaded["lxml-mis"] or true
--- original size: 3574, stripped down to: 1808
+-- original size: 3542, stripped down to: 1808
if not modules then modules={} end modules ['lxml-mis']={
version=1.001,
@@ -19198,7 +19239,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["lxml-aux"] = package.loaded["lxml-aux"] or true
--- original size: 34661, stripped down to: 21511
+-- original size: 34522, stripped down to: 21511
if not modules then modules={} end modules ['lxml-aux']={
version=1.001,
@@ -21705,7 +21746,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["data-tmp"] = package.loaded["data-tmp"] or true
--- original size: 16456, stripped down to: 11636
+-- original size: 16433, stripped down to: 11636
if not modules then modules={} end modules ['data-tmp']={
version=1.100,
@@ -22240,7 +22281,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["data-res"] = package.loaded["data-res"] or true
--- original size: 69576, stripped down to: 44470
+-- original size: 70711, stripped down to: 44839
if not modules then modules={} end modules ['data-res']={
version=1.001,
@@ -22308,13 +22349,15 @@ local criticalvars={
if environment.default_texmfcnf then
resolvers.luacnfspec="home:texmf/web2c;"..environment.default_texmfcnf
else
- resolvers.luacnfspec=concat ({
- "home:texmf/web2c",
- "selfautoparent:/texmf-local/web2c",
- "selfautoparent:/texmf-context/web2c",
- "selfautoparent:/texmf-dist/web2c",
- "selfautoparent:/texmf/web2c",
- },";")
+ local texroot=environment.texroot
+ resolvers.luacnfspec="home:texmf/web2c;selfautoparent:/texmf-local/web2c;selfautoparent:/texmf-context/web2c;selfautoparent:/texmf/web2c"
+ if texroot and isdir(texroot.."/texmf-context") then
+ elseif texroot and isdir(texroot.."/texmf-dist") then
+ resolvers.luacnfspec="home:texmf/web2c;selfautoparent:/texmf-local/web2c;selfautoparent:/texmf-dist/web2c;selfautoparent:/texmf/web2c"
+ elseif ostype~="windows" and isdir("/etc/texmf/web2c") then
+ resolvers.luacnfspec="home:texmf/web2c;/etc/texmf/web2c;selfautodir:/share/texmf/web2c"
+ else
+ end
end
local unset_variable="unset"
local formats=resolvers.formats
@@ -24249,7 +24292,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["data-con"] = package.loaded["data-con"] or true
--- original size: 5487, stripped down to: 3757
+-- original size: 5477, stripped down to: 3757
if not modules then modules={} end modules ['data-con']={
version=1.100,
@@ -24467,7 +24510,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["data-zip"] = package.loaded["data-zip"] or true
--- original size: 10805, stripped down to: 7951
+-- original size: 10789, stripped down to: 7951
if not modules then modules={} end modules ['data-zip']={
version=1.001,
@@ -26095,8 +26138,8 @@ end -- of closure
-- used libraries : l-bit32.lua l-lua.lua l-macro.lua l-sandbox.lua l-package.lua l-lpeg.lua l-function.lua l-string.lua l-table.lua l-io.lua l-number.lua l-set.lua l-os.lua l-file.lua l-gzip.lua l-md5.lua l-sha.lua l-url.lua l-dir.lua l-boolean.lua l-unicode.lua l-math.lua util-str.lua util-tab.lua util-fil.lua util-sac.lua util-sto.lua util-prs.lua util-fmt.lua util-soc-imp-reset.lua util-soc-imp-socket.lua util-soc-imp-copas.lua util-soc-imp-ltn12.lua util-soc-imp-mime.lua util-soc-imp-url.lua util-soc-imp-headers.lua util-soc-imp-tp.lua util-soc-imp-http.lua util-soc-imp-ftp.lua util-soc-imp-smtp.lua trac-set.lua trac-log.lua trac-inf.lua trac-pro.lua util-lua.lua util-deb.lua util-tpl.lua util-sbx.lua util-mrg.lua util-env.lua luat-env.lua util-zip.lua lxml-tab.lua lxml-lpt.lua lxml-mis.lua lxml-aux.lua lxml-xml.lua trac-xml.lua data-ini.lua data-exp.lua data-env.lua data-tmp.lua data-met.lua data-res.lua data-pre.lua data-inp.lua data-out.lua data-fil.lua data-con.lua data-use.lua data-zip.lua data-tre.lua data-sch.lua data-lua.lua data-aux.lua data-tmf.lua data-lst.lua libs-ini.lua luat-sta.lua luat-fmt.lua
-- skipped libraries : -
--- original bytes : 1035917
--- stripped bytes : 408296
+-- original bytes : 1037183
+-- stripped bytes : 408369
-- end library merge
diff --git a/scripts/context/stubs/mswin/mtxrun.lua b/scripts/context/stubs/mswin/mtxrun.lua
index 49ae4a1bb..0cb821561 100644
--- a/scripts/context/stubs/mswin/mtxrun.lua
+++ b/scripts/context/stubs/mswin/mtxrun.lua
@@ -5136,7 +5136,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["l-dir"] = package.loaded["l-dir"] or true
--- original size: 18893, stripped down to: 11170
+-- original size: 19139, stripped down to: 11345
if not modules then modules={} end modules ['l-dir']={
version=1.001,
@@ -5154,7 +5154,7 @@ dir=dir or {}
local dir=dir
local lfs=lfs
local attributes=lfs.attributes
-local walkdir=lfs.dir
+local scandir=lfs.dir
local isdir=lfs.isdir
local isfile=lfs.isfile
local currentdir=lfs.currentdir
@@ -5185,6 +5185,15 @@ else
lfs.isdir=isdir
lfs.isfile=isfile
end
+local isreadable=file.isreadable
+local walkdir=function(p,...)
+ if isreadable(p.."/.") then
+ return scandir(p,...)
+ else
+ return function() end
+ end
+end
+lfs.walkdir=walkdir
function dir.current()
return (gsub(currentdir(),"\\","/"))
end
@@ -9942,7 +9951,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["util-fmt"] = package.loaded["util-fmt"] or true
--- original size: 2541, stripped down to: 1624
+-- original size: 3379, stripped down to: 2273
if not modules then modules={} end modules ['util-fmt']={
version=1.001,
@@ -9955,19 +9964,21 @@ utilities=utilities or {}
utilities.formatters=utilities.formatters or {}
local formatters=utilities.formatters
local concat,format=table.concat,string.format
-local tostring,type=tostring,type
+local tostring,type,unpack=tostring,type,unpack
local strip=string.strip
local lpegmatch=lpeg.match
local stripper=lpeg.patterns.stripzeros
function formatters.stripzeros(str)
return lpegmatch(stripper,str)
end
-function formatters.formatcolumns(result,between)
+function formatters.formatcolumns(result,between,header)
if result and #result>0 then
- between=between or " "
- local widths,numbers={},{}
+ local widths={}
+ local numbers={}
+ local templates={}
local first=result[1]
local n=#first
+ between=between or " "
for i=1,n do
widths[i]=0
end
@@ -9989,31 +10000,61 @@ function formatters.formatcolumns(result,between)
end
end
end
+ if header then
+ for i=1,#header do
+ local h=header[i]
+ for j=1,n do
+ local hj=tostring(h[j])
+ h[j]=hj
+ local w=#hj
+ if w>widths[j] then
+ widths[j]=w
+ end
+ end
+ end
+ end
for i=1,n do
local w=widths[i]
if numbers[i] then
if w>80 then
- widths[i]="%s"..between
- else
- widths[i]="%0"..w.."i"..between
+ templates[i]="%s"..between
+ else
+ templates[i]="% "..w.."i"..between
end
else
if w>80 then
- widths[i]="%s"..between
- elseif w>0 then
- widths[i]="%-"..w.."s"..between
+ templates[i]="%s"..between
+ elseif w>0 then
+ templates[i]="%-"..w.."s"..between
else
- widths[i]="%s"
+ templates[i]="%s"
end
end
end
- local template=strip(concat(widths))
+ local template=strip(concat(templates))
for i=1,#result do
local str=format(template,unpack(result[i]))
result[i]=strip(str)
end
+ if header then
+ for i=1,n do
+ local w=widths[i]
+ if w>80 then
+ templates[i]="%s"..between
+ elseif w>0 then
+ templates[i]="%-"..w.."s"..between
+ else
+ templates[i]="%s"
+ end
+ end
+ local template=strip(concat(templates))
+ for i=1,#header do
+ local str=format(template,unpack(header[i]))
+ header[i]=strip(str)
+ end
+ end
end
- return result
+ return result,header
end
@@ -16397,7 +16438,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["lxml-tab"] = package.loaded["lxml-tab"] or true
--- original size: 62810, stripped down to: 36225
+-- original size: 62221, stripped down to: 36225
if not modules then modules={} end modules ['lxml-tab']={
version=1.001,
@@ -17878,7 +17919,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["lxml-lpt"] = package.loaded["lxml-lpt"] or true
--- original size: 54733, stripped down to: 31258
+-- original size: 54589, stripped down to: 31258
if not modules then modules={} end modules ['lxml-lpt']={
version=1.001,
@@ -19129,7 +19170,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["lxml-mis"] = package.loaded["lxml-mis"] or true
--- original size: 3574, stripped down to: 1808
+-- original size: 3542, stripped down to: 1808
if not modules then modules={} end modules ['lxml-mis']={
version=1.001,
@@ -19198,7 +19239,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["lxml-aux"] = package.loaded["lxml-aux"] or true
--- original size: 34661, stripped down to: 21511
+-- original size: 34522, stripped down to: 21511
if not modules then modules={} end modules ['lxml-aux']={
version=1.001,
@@ -21705,7 +21746,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["data-tmp"] = package.loaded["data-tmp"] or true
--- original size: 16456, stripped down to: 11636
+-- original size: 16433, stripped down to: 11636
if not modules then modules={} end modules ['data-tmp']={
version=1.100,
@@ -22240,7 +22281,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["data-res"] = package.loaded["data-res"] or true
--- original size: 69576, stripped down to: 44470
+-- original size: 70711, stripped down to: 44839
if not modules then modules={} end modules ['data-res']={
version=1.001,
@@ -22308,13 +22349,15 @@ local criticalvars={
if environment.default_texmfcnf then
resolvers.luacnfspec="home:texmf/web2c;"..environment.default_texmfcnf
else
- resolvers.luacnfspec=concat ({
- "home:texmf/web2c",
- "selfautoparent:/texmf-local/web2c",
- "selfautoparent:/texmf-context/web2c",
- "selfautoparent:/texmf-dist/web2c",
- "selfautoparent:/texmf/web2c",
- },";")
+ local texroot=environment.texroot
+ resolvers.luacnfspec="home:texmf/web2c;selfautoparent:/texmf-local/web2c;selfautoparent:/texmf-context/web2c;selfautoparent:/texmf/web2c"
+ if texroot and isdir(texroot.."/texmf-context") then
+ elseif texroot and isdir(texroot.."/texmf-dist") then
+ resolvers.luacnfspec="home:texmf/web2c;selfautoparent:/texmf-local/web2c;selfautoparent:/texmf-dist/web2c;selfautoparent:/texmf/web2c"
+ elseif ostype~="windows" and isdir("/etc/texmf/web2c") then
+ resolvers.luacnfspec="home:texmf/web2c;/etc/texmf/web2c;selfautodir:/share/texmf/web2c"
+ else
+ end
end
local unset_variable="unset"
local formats=resolvers.formats
@@ -24249,7 +24292,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["data-con"] = package.loaded["data-con"] or true
--- original size: 5487, stripped down to: 3757
+-- original size: 5477, stripped down to: 3757
if not modules then modules={} end modules ['data-con']={
version=1.100,
@@ -24467,7 +24510,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["data-zip"] = package.loaded["data-zip"] or true
--- original size: 10805, stripped down to: 7951
+-- original size: 10789, stripped down to: 7951
if not modules then modules={} end modules ['data-zip']={
version=1.001,
@@ -26095,8 +26138,8 @@ end -- of closure
-- used libraries : l-bit32.lua l-lua.lua l-macro.lua l-sandbox.lua l-package.lua l-lpeg.lua l-function.lua l-string.lua l-table.lua l-io.lua l-number.lua l-set.lua l-os.lua l-file.lua l-gzip.lua l-md5.lua l-sha.lua l-url.lua l-dir.lua l-boolean.lua l-unicode.lua l-math.lua util-str.lua util-tab.lua util-fil.lua util-sac.lua util-sto.lua util-prs.lua util-fmt.lua util-soc-imp-reset.lua util-soc-imp-socket.lua util-soc-imp-copas.lua util-soc-imp-ltn12.lua util-soc-imp-mime.lua util-soc-imp-url.lua util-soc-imp-headers.lua util-soc-imp-tp.lua util-soc-imp-http.lua util-soc-imp-ftp.lua util-soc-imp-smtp.lua trac-set.lua trac-log.lua trac-inf.lua trac-pro.lua util-lua.lua util-deb.lua util-tpl.lua util-sbx.lua util-mrg.lua util-env.lua luat-env.lua util-zip.lua lxml-tab.lua lxml-lpt.lua lxml-mis.lua lxml-aux.lua lxml-xml.lua trac-xml.lua data-ini.lua data-exp.lua data-env.lua data-tmp.lua data-met.lua data-res.lua data-pre.lua data-inp.lua data-out.lua data-fil.lua data-con.lua data-use.lua data-zip.lua data-tre.lua data-sch.lua data-lua.lua data-aux.lua data-tmf.lua data-lst.lua libs-ini.lua luat-sta.lua luat-fmt.lua
-- skipped libraries : -
--- original bytes : 1035917
--- stripped bytes : 408296
+-- original bytes : 1037183
+-- stripped bytes : 408369
-- end library merge
diff --git a/scripts/context/stubs/unix/mtxrun b/scripts/context/stubs/unix/mtxrun
index 49ae4a1bb..0cb821561 100644
--- a/scripts/context/stubs/unix/mtxrun
+++ b/scripts/context/stubs/unix/mtxrun
@@ -5136,7 +5136,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["l-dir"] = package.loaded["l-dir"] or true
--- original size: 18893, stripped down to: 11170
+-- original size: 19139, stripped down to: 11345
if not modules then modules={} end modules ['l-dir']={
version=1.001,
@@ -5154,7 +5154,7 @@ dir=dir or {}
local dir=dir
local lfs=lfs
local attributes=lfs.attributes
-local walkdir=lfs.dir
+local scandir=lfs.dir
local isdir=lfs.isdir
local isfile=lfs.isfile
local currentdir=lfs.currentdir
@@ -5185,6 +5185,15 @@ else
lfs.isdir=isdir
lfs.isfile=isfile
end
+local isreadable=file.isreadable
+local walkdir=function(p,...)
+ if isreadable(p.."/.") then
+ return scandir(p,...)
+ else
+ return function() end
+ end
+end
+lfs.walkdir=walkdir
function dir.current()
return (gsub(currentdir(),"\\","/"))
end
@@ -9942,7 +9951,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["util-fmt"] = package.loaded["util-fmt"] or true
--- original size: 2541, stripped down to: 1624
+-- original size: 3379, stripped down to: 2273
if not modules then modules={} end modules ['util-fmt']={
version=1.001,
@@ -9955,19 +9964,21 @@ utilities=utilities or {}
utilities.formatters=utilities.formatters or {}
local formatters=utilities.formatters
local concat,format=table.concat,string.format
-local tostring,type=tostring,type
+local tostring,type,unpack=tostring,type,unpack
local strip=string.strip
local lpegmatch=lpeg.match
local stripper=lpeg.patterns.stripzeros
function formatters.stripzeros(str)
return lpegmatch(stripper,str)
end
-function formatters.formatcolumns(result,between)
+function formatters.formatcolumns(result,between,header)
if result and #result>0 then
- between=between or " "
- local widths,numbers={},{}
+ local widths={}
+ local numbers={}
+ local templates={}
local first=result[1]
local n=#first
+ between=between or " "
for i=1,n do
widths[i]=0
end
@@ -9989,31 +10000,61 @@ function formatters.formatcolumns(result,between)
end
end
end
+ if header then
+ for i=1,#header do
+ local h=header[i]
+ for j=1,n do
+ local hj=tostring(h[j])
+ h[j]=hj
+ local w=#hj
+ if w>widths[j] then
+ widths[j]=w
+ end
+ end
+ end
+ end
for i=1,n do
local w=widths[i]
if numbers[i] then
if w>80 then
- widths[i]="%s"..between
- else
- widths[i]="%0"..w.."i"..between
+ templates[i]="%s"..between
+ else
+ templates[i]="% "..w.."i"..between
end
else
if w>80 then
- widths[i]="%s"..between
- elseif w>0 then
- widths[i]="%-"..w.."s"..between
+ templates[i]="%s"..between
+ elseif w>0 then
+ templates[i]="%-"..w.."s"..between
else
- widths[i]="%s"
+ templates[i]="%s"
end
end
end
- local template=strip(concat(widths))
+ local template=strip(concat(templates))
for i=1,#result do
local str=format(template,unpack(result[i]))
result[i]=strip(str)
end
+ if header then
+ for i=1,n do
+ local w=widths[i]
+ if w>80 then
+ templates[i]="%s"..between
+ elseif w>0 then
+ templates[i]="%-"..w.."s"..between
+ else
+ templates[i]="%s"
+ end
+ end
+ local template=strip(concat(templates))
+ for i=1,#header do
+ local str=format(template,unpack(header[i]))
+ header[i]=strip(str)
+ end
+ end
end
- return result
+ return result,header
end
@@ -16397,7 +16438,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["lxml-tab"] = package.loaded["lxml-tab"] or true
--- original size: 62810, stripped down to: 36225
+-- original size: 62221, stripped down to: 36225
if not modules then modules={} end modules ['lxml-tab']={
version=1.001,
@@ -17878,7 +17919,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["lxml-lpt"] = package.loaded["lxml-lpt"] or true
--- original size: 54733, stripped down to: 31258
+-- original size: 54589, stripped down to: 31258
if not modules then modules={} end modules ['lxml-lpt']={
version=1.001,
@@ -19129,7 +19170,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["lxml-mis"] = package.loaded["lxml-mis"] or true
--- original size: 3574, stripped down to: 1808
+-- original size: 3542, stripped down to: 1808
if not modules then modules={} end modules ['lxml-mis']={
version=1.001,
@@ -19198,7 +19239,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["lxml-aux"] = package.loaded["lxml-aux"] or true
--- original size: 34661, stripped down to: 21511
+-- original size: 34522, stripped down to: 21511
if not modules then modules={} end modules ['lxml-aux']={
version=1.001,
@@ -21705,7 +21746,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["data-tmp"] = package.loaded["data-tmp"] or true
--- original size: 16456, stripped down to: 11636
+-- original size: 16433, stripped down to: 11636
if not modules then modules={} end modules ['data-tmp']={
version=1.100,
@@ -22240,7 +22281,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["data-res"] = package.loaded["data-res"] or true
--- original size: 69576, stripped down to: 44470
+-- original size: 70711, stripped down to: 44839
if not modules then modules={} end modules ['data-res']={
version=1.001,
@@ -22308,13 +22349,15 @@ local criticalvars={
if environment.default_texmfcnf then
resolvers.luacnfspec="home:texmf/web2c;"..environment.default_texmfcnf
else
- resolvers.luacnfspec=concat ({
- "home:texmf/web2c",
- "selfautoparent:/texmf-local/web2c",
- "selfautoparent:/texmf-context/web2c",
- "selfautoparent:/texmf-dist/web2c",
- "selfautoparent:/texmf/web2c",
- },";")
+ local texroot=environment.texroot
+ resolvers.luacnfspec="home:texmf/web2c;selfautoparent:/texmf-local/web2c;selfautoparent:/texmf-context/web2c;selfautoparent:/texmf/web2c"
+ if texroot and isdir(texroot.."/texmf-context") then
+ elseif texroot and isdir(texroot.."/texmf-dist") then
+ resolvers.luacnfspec="home:texmf/web2c;selfautoparent:/texmf-local/web2c;selfautoparent:/texmf-dist/web2c;selfautoparent:/texmf/web2c"
+ elseif ostype~="windows" and isdir("/etc/texmf/web2c") then
+ resolvers.luacnfspec="home:texmf/web2c;/etc/texmf/web2c;selfautodir:/share/texmf/web2c"
+ else
+ end
end
local unset_variable="unset"
local formats=resolvers.formats
@@ -24249,7 +24292,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["data-con"] = package.loaded["data-con"] or true
--- original size: 5487, stripped down to: 3757
+-- original size: 5477, stripped down to: 3757
if not modules then modules={} end modules ['data-con']={
version=1.100,
@@ -24467,7 +24510,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["data-zip"] = package.loaded["data-zip"] or true
--- original size: 10805, stripped down to: 7951
+-- original size: 10789, stripped down to: 7951
if not modules then modules={} end modules ['data-zip']={
version=1.001,
@@ -26095,8 +26138,8 @@ end -- of closure
-- used libraries : l-bit32.lua l-lua.lua l-macro.lua l-sandbox.lua l-package.lua l-lpeg.lua l-function.lua l-string.lua l-table.lua l-io.lua l-number.lua l-set.lua l-os.lua l-file.lua l-gzip.lua l-md5.lua l-sha.lua l-url.lua l-dir.lua l-boolean.lua l-unicode.lua l-math.lua util-str.lua util-tab.lua util-fil.lua util-sac.lua util-sto.lua util-prs.lua util-fmt.lua util-soc-imp-reset.lua util-soc-imp-socket.lua util-soc-imp-copas.lua util-soc-imp-ltn12.lua util-soc-imp-mime.lua util-soc-imp-url.lua util-soc-imp-headers.lua util-soc-imp-tp.lua util-soc-imp-http.lua util-soc-imp-ftp.lua util-soc-imp-smtp.lua trac-set.lua trac-log.lua trac-inf.lua trac-pro.lua util-lua.lua util-deb.lua util-tpl.lua util-sbx.lua util-mrg.lua util-env.lua luat-env.lua util-zip.lua lxml-tab.lua lxml-lpt.lua lxml-mis.lua lxml-aux.lua lxml-xml.lua trac-xml.lua data-ini.lua data-exp.lua data-env.lua data-tmp.lua data-met.lua data-res.lua data-pre.lua data-inp.lua data-out.lua data-fil.lua data-con.lua data-use.lua data-zip.lua data-tre.lua data-sch.lua data-lua.lua data-aux.lua data-tmf.lua data-lst.lua libs-ini.lua luat-sta.lua luat-fmt.lua
-- skipped libraries : -
--- original bytes : 1035917
--- stripped bytes : 408296
+-- original bytes : 1037183
+-- stripped bytes : 408369
-- end library merge
diff --git a/scripts/context/stubs/win64/mtxrun.lua b/scripts/context/stubs/win64/mtxrun.lua
index 49ae4a1bb..0cb821561 100644
--- a/scripts/context/stubs/win64/mtxrun.lua
+++ b/scripts/context/stubs/win64/mtxrun.lua
@@ -5136,7 +5136,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["l-dir"] = package.loaded["l-dir"] or true
--- original size: 18893, stripped down to: 11170
+-- original size: 19139, stripped down to: 11345
if not modules then modules={} end modules ['l-dir']={
version=1.001,
@@ -5154,7 +5154,7 @@ dir=dir or {}
local dir=dir
local lfs=lfs
local attributes=lfs.attributes
-local walkdir=lfs.dir
+local scandir=lfs.dir
local isdir=lfs.isdir
local isfile=lfs.isfile
local currentdir=lfs.currentdir
@@ -5185,6 +5185,15 @@ else
lfs.isdir=isdir
lfs.isfile=isfile
end
+local isreadable=file.isreadable
+local walkdir=function(p,...)
+ if isreadable(p.."/.") then
+ return scandir(p,...)
+ else
+ return function() end
+ end
+end
+lfs.walkdir=walkdir
function dir.current()
return (gsub(currentdir(),"\\","/"))
end
@@ -9942,7 +9951,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["util-fmt"] = package.loaded["util-fmt"] or true
--- original size: 2541, stripped down to: 1624
+-- original size: 3379, stripped down to: 2273
if not modules then modules={} end modules ['util-fmt']={
version=1.001,
@@ -9955,19 +9964,21 @@ utilities=utilities or {}
utilities.formatters=utilities.formatters or {}
local formatters=utilities.formatters
local concat,format=table.concat,string.format
-local tostring,type=tostring,type
+local tostring,type,unpack=tostring,type,unpack
local strip=string.strip
local lpegmatch=lpeg.match
local stripper=lpeg.patterns.stripzeros
function formatters.stripzeros(str)
return lpegmatch(stripper,str)
end
-function formatters.formatcolumns(result,between)
+function formatters.formatcolumns(result,between,header)
if result and #result>0 then
- between=between or " "
- local widths,numbers={},{}
+ local widths={}
+ local numbers={}
+ local templates={}
local first=result[1]
local n=#first
+ between=between or " "
for i=1,n do
widths[i]=0
end
@@ -9989,31 +10000,61 @@ function formatters.formatcolumns(result,between)
end
end
end
+ if header then
+ for i=1,#header do
+ local h=header[i]
+ for j=1,n do
+ local hj=tostring(h[j])
+ h[j]=hj
+ local w=#hj
+ if w>widths[j] then
+ widths[j]=w
+ end
+ end
+ end
+ end
for i=1,n do
local w=widths[i]
if numbers[i] then
if w>80 then
- widths[i]="%s"..between
- else
- widths[i]="%0"..w.."i"..between
+ templates[i]="%s"..between
+ else
+ templates[i]="% "..w.."i"..between
end
else
if w>80 then
- widths[i]="%s"..between
- elseif w>0 then
- widths[i]="%-"..w.."s"..between
+ templates[i]="%s"..between
+ elseif w>0 then
+ templates[i]="%-"..w.."s"..between
else
- widths[i]="%s"
+ templates[i]="%s"
end
end
end
- local template=strip(concat(widths))
+ local template=strip(concat(templates))
for i=1,#result do
local str=format(template,unpack(result[i]))
result[i]=strip(str)
end
+ if header then
+ for i=1,n do
+ local w=widths[i]
+ if w>80 then
+ templates[i]="%s"..between
+ elseif w>0 then
+ templates[i]="%-"..w.."s"..between
+ else
+ templates[i]="%s"
+ end
+ end
+ local template=strip(concat(templates))
+ for i=1,#header do
+ local str=format(template,unpack(header[i]))
+ header[i]=strip(str)
+ end
+ end
end
- return result
+ return result,header
end
@@ -16397,7 +16438,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["lxml-tab"] = package.loaded["lxml-tab"] or true
--- original size: 62810, stripped down to: 36225
+-- original size: 62221, stripped down to: 36225
if not modules then modules={} end modules ['lxml-tab']={
version=1.001,
@@ -17878,7 +17919,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["lxml-lpt"] = package.loaded["lxml-lpt"] or true
--- original size: 54733, stripped down to: 31258
+-- original size: 54589, stripped down to: 31258
if not modules then modules={} end modules ['lxml-lpt']={
version=1.001,
@@ -19129,7 +19170,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["lxml-mis"] = package.loaded["lxml-mis"] or true
--- original size: 3574, stripped down to: 1808
+-- original size: 3542, stripped down to: 1808
if not modules then modules={} end modules ['lxml-mis']={
version=1.001,
@@ -19198,7 +19239,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["lxml-aux"] = package.loaded["lxml-aux"] or true
--- original size: 34661, stripped down to: 21511
+-- original size: 34522, stripped down to: 21511
if not modules then modules={} end modules ['lxml-aux']={
version=1.001,
@@ -21705,7 +21746,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["data-tmp"] = package.loaded["data-tmp"] or true
--- original size: 16456, stripped down to: 11636
+-- original size: 16433, stripped down to: 11636
if not modules then modules={} end modules ['data-tmp']={
version=1.100,
@@ -22240,7 +22281,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["data-res"] = package.loaded["data-res"] or true
--- original size: 69576, stripped down to: 44470
+-- original size: 70711, stripped down to: 44839
if not modules then modules={} end modules ['data-res']={
version=1.001,
@@ -22308,13 +22349,15 @@ local criticalvars={
if environment.default_texmfcnf then
resolvers.luacnfspec="home:texmf/web2c;"..environment.default_texmfcnf
else
- resolvers.luacnfspec=concat ({
- "home:texmf/web2c",
- "selfautoparent:/texmf-local/web2c",
- "selfautoparent:/texmf-context/web2c",
- "selfautoparent:/texmf-dist/web2c",
- "selfautoparent:/texmf/web2c",
- },";")
+ local texroot=environment.texroot
+ resolvers.luacnfspec="home:texmf/web2c;selfautoparent:/texmf-local/web2c;selfautoparent:/texmf-context/web2c;selfautoparent:/texmf/web2c"
+ if texroot and isdir(texroot.."/texmf-context") then
+ elseif texroot and isdir(texroot.."/texmf-dist") then
+ resolvers.luacnfspec="home:texmf/web2c;selfautoparent:/texmf-local/web2c;selfautoparent:/texmf-dist/web2c;selfautoparent:/texmf/web2c"
+ elseif ostype~="windows" and isdir("/etc/texmf/web2c") then
+ resolvers.luacnfspec="home:texmf/web2c;/etc/texmf/web2c;selfautodir:/share/texmf/web2c"
+ else
+ end
end
local unset_variable="unset"
local formats=resolvers.formats
@@ -24249,7 +24292,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["data-con"] = package.loaded["data-con"] or true
--- original size: 5487, stripped down to: 3757
+-- original size: 5477, stripped down to: 3757
if not modules then modules={} end modules ['data-con']={
version=1.100,
@@ -24467,7 +24510,7 @@ do -- create closure to overcome 200 locals limit
package.loaded["data-zip"] = package.loaded["data-zip"] or true
--- original size: 10805, stripped down to: 7951
+-- original size: 10789, stripped down to: 7951
if not modules then modules={} end modules ['data-zip']={
version=1.001,
@@ -26095,8 +26138,8 @@ end -- of closure
-- used libraries : l-bit32.lua l-lua.lua l-macro.lua l-sandbox.lua l-package.lua l-lpeg.lua l-function.lua l-string.lua l-table.lua l-io.lua l-number.lua l-set.lua l-os.lua l-file.lua l-gzip.lua l-md5.lua l-sha.lua l-url.lua l-dir.lua l-boolean.lua l-unicode.lua l-math.lua util-str.lua util-tab.lua util-fil.lua util-sac.lua util-sto.lua util-prs.lua util-fmt.lua util-soc-imp-reset.lua util-soc-imp-socket.lua util-soc-imp-copas.lua util-soc-imp-ltn12.lua util-soc-imp-mime.lua util-soc-imp-url.lua util-soc-imp-headers.lua util-soc-imp-tp.lua util-soc-imp-http.lua util-soc-imp-ftp.lua util-soc-imp-smtp.lua trac-set.lua trac-log.lua trac-inf.lua trac-pro.lua util-lua.lua util-deb.lua util-tpl.lua util-sbx.lua util-mrg.lua util-env.lua luat-env.lua util-zip.lua lxml-tab.lua lxml-lpt.lua lxml-mis.lua lxml-aux.lua lxml-xml.lua trac-xml.lua data-ini.lua data-exp.lua data-env.lua data-tmp.lua data-met.lua data-res.lua data-pre.lua data-inp.lua data-out.lua data-fil.lua data-con.lua data-use.lua data-zip.lua data-tre.lua data-sch.lua data-lua.lua data-aux.lua data-tmf.lua data-lst.lua libs-ini.lua luat-sta.lua luat-fmt.lua
-- skipped libraries : -
--- original bytes : 1035917
--- stripped bytes : 408296
+-- original bytes : 1037183
+-- stripped bytes : 408369
-- end library merge
diff --git a/source/luametatex/CMakeLists.txt b/source/luametatex/CMakeLists.txt
index 29553c701..639c9b140 100644
--- a/source/luametatex/CMakeLists.txt
+++ b/source/luametatex/CMakeLists.txt
@@ -145,6 +145,7 @@ if (MSVC)
-Wcast-qual
-Wno-unknown-pragmas
+ -Wno-unused-result
-fno-strict-aliasing
)
diff --git a/source/luametatex/build.sh b/source/luametatex/build.sh
index 2f5514316..a8a834a7c 100644
--- a/source/luametatex/build.sh
+++ b/source/luametatex/build.sh
@@ -15,6 +15,9 @@
# mtxrun.lua (latest version)
# context.lua (latest version)
+# This test is not yet okay but I have no time (or motivation) to look into it now, so for now we don't
+# use ninja (not that critical).
+
#NINJA=$(which ninja);
#if (NINJA) then
# NINJA="-G Ninja"
@@ -49,6 +52,15 @@ then
cd build/mingw-64-ucrt
cmake $NINJA -DCMAKE_TOOLCHAIN_FILE=./cmake/mingw-64-ucrt.cmake ../..
+
+elif [ "$1" = "cygwin" ] || [ "$1" = "--cygwin" ]
+then
+ PLATFORM="cygwin"
+ SUFFIX=".exe"
+ mkdir -p build/cygwin
+ cd build/cygwin
+ cmake $NINJA ../..
+
else
PLATFORM="native"
diff --git a/source/luametatex/cmake/mimalloc.cmake b/source/luametatex/cmake/mimalloc.cmake
index 78d3944e2..02992344e 100644
--- a/source/luametatex/cmake/mimalloc.cmake
+++ b/source/luametatex/cmake/mimalloc.cmake
@@ -1,24 +1,26 @@
include("source/libraries/mimalloc/cmake/mimalloc-config-version.cmake")
set(mimalloc_sources
-
- source/libraries/mimalloc/src/stats.c
- source/libraries/mimalloc/src/random.c
- source/libraries/mimalloc/src/os.c
- source/libraries/mimalloc/src/bitmap.c
- source/libraries/mimalloc/src/arena.c
- # source/libraries/mimalloc/src/region.c
- source/libraries/mimalloc/src/segment-cache.c
- source/libraries/mimalloc/src/segment.c
- source/libraries/mimalloc/src/page.c
source/libraries/mimalloc/src/alloc.c
source/libraries/mimalloc/src/alloc-aligned.c
source/libraries/mimalloc/src/alloc-posix.c
+ source/libraries/mimalloc/src/arena.c
+ source/libraries/mimalloc/src/bitmap.c
source/libraries/mimalloc/src/heap.c
- source/libraries/mimalloc/src/options.c
source/libraries/mimalloc/src/init.c
+ source/libraries/mimalloc/src/options.c
+ source/libraries/mimalloc/src/os.c
+ source/libraries/mimalloc/src/page.c
+ source/libraries/mimalloc/src/random.c
+ source/libraries/mimalloc/src/segment.c
+ source/libraries/mimalloc/src/segment-cache.c
+ source/libraries/mimalloc/src/stats.c
+ source/libraries/mimalloc/src/prim/prim.c
)
+set(mi_cflags "")
+set(mi_libraries "")
+
add_library(mimalloc STATIC ${mimalloc_sources})
# set(CMAKE_C_STANDARD 11)
@@ -27,6 +29,7 @@ add_library(mimalloc STATIC ${mimalloc_sources})
target_include_directories(mimalloc PRIVATE
source/libraries/mimalloc
source/libraries/mimalloc/src
+ source/libraries/mimalloc/prim
source/libraries/mimalloc/include
)
diff --git a/source/luametatex/source/libraries/mimalloc/CMakeLists.txt b/source/luametatex/source/libraries/mimalloc/CMakeLists.txt
index 74c1f2916..35d5d6509 100644
--- a/source/luametatex/source/libraries/mimalloc/CMakeLists.txt
+++ b/source/luametatex/source/libraries/mimalloc/CMakeLists.txt
@@ -6,12 +6,13 @@ set(CMAKE_CXX_STANDARD 17)
option(MI_SECURE "Use full security mitigations (like guard pages, allocation randomization, double-free mitigation, and free-list corruption detection)" OFF)
option(MI_DEBUG_FULL "Use full internal heap invariant checking in DEBUG mode (expensive)" OFF)
-option(MI_PADDING "Enable padding to detect heap block overflow (used only in DEBUG mode or with Valgrind)" ON)
+option(MI_PADDING "Enable padding to detect heap block overflow (always on in DEBUG or SECURE mode, or with Valgrind/ASAN)" OFF)
option(MI_OVERRIDE "Override the standard malloc interface (e.g. define entry points for malloc() etc)" ON)
option(MI_XMALLOC "Enable abort() call on memory allocation failure by default" OFF)
option(MI_SHOW_ERRORS "Show error and warning messages by default (only enabled by default in DEBUG mode)" OFF)
-option(MI_VALGRIND "Compile with Valgrind support (adds a small overhead)" OFF)
-option(MI_ASAN "Compile with address sanitizer support (adds a small overhead)" OFF)
+option(MI_TRACK_VALGRIND "Compile with Valgrind support (adds a small overhead)" OFF)
+option(MI_TRACK_ASAN "Compile with address sanitizer support (adds a small overhead)" OFF)
+option(MI_TRACK_ETW "Compile with Windows event tracing (ETW) support (adds a small overhead)" OFF)
option(MI_USE_CXX "Use the C++ compiler to compile the library (instead of the C compiler)" OFF)
option(MI_SEE_ASM "Generate assembly files" OFF)
option(MI_OSX_INTERPOSE "Use interpose to override standard malloc on macOS" ON)
@@ -24,7 +25,8 @@ option(MI_BUILD_OBJECT "Build object library" ON)
option(MI_BUILD_TESTS "Build test executables" ON)
option(MI_DEBUG_TSAN "Build with thread sanitizer (needs clang)" OFF)
option(MI_DEBUG_UBSAN "Build with undefined-behavior sanitizer (needs clang++)" OFF)
-option(MI_SKIP_COLLECT_ON_EXIT, "Skip collecting memory on program exit" OFF)
+option(MI_SKIP_COLLECT_ON_EXIT "Skip collecting memory on program exit" OFF)
+option(MI_NO_PADDING "Force no use of padding even in DEBUG mode etc." OFF)
# deprecated options
option(MI_CHECK_FULL "Use full internal invariant checking in DEBUG mode (deprecated, use MI_DEBUG_FULL instead)" OFF)
@@ -36,21 +38,24 @@ include(GNUInstallDirs)
include("cmake/mimalloc-config-version.cmake")
set(mi_sources
- src/stats.c
- src/random.c
- src/os.c
- src/bitmap.c
- src/arena.c
- src/segment-cache.c
- src/segment.c
- src/page.c
src/alloc.c
src/alloc-aligned.c
src/alloc-posix.c
+ src/arena.c
+ src/bitmap.c
src/heap.c
+ src/init.c
src/options.c
- src/init.c)
+ src/os.c
+ src/page.c
+ src/random.c
+ src/segment.c
+ src/segment-cache.c
+ src/stats.c
+ src/prim/prim.c)
+set(mi_cflags "")
+set(mi_libraries "")
# -----------------------------------------------------------------------------
# Convenience: set default build type depending on the build directory
@@ -87,7 +92,7 @@ if(MI_OVERRIDE)
if(MI_OSX_ZONE)
# use zone's on macOS
message(STATUS " Use malloc zone to override malloc (MI_OSX_ZONE=ON)")
- list(APPEND mi_sources src/alloc-override-osx.c)
+ list(APPEND mi_sources src/prim/osx/alloc-override-zone.c)
list(APPEND mi_defines MI_OSX_ZONE=1)
if (NOT MI_OSX_INTERPOSE)
message(STATUS " WARNING: zone overriding usually also needs interpose (use -DMI_OSX_INTERPOSE=ON)")
@@ -122,43 +127,60 @@ endif()
if(MI_SECURE)
message(STATUS "Set full secure build (MI_SECURE=ON)")
- list(APPEND mi_defines MI_SECURE=4)
- #if (MI_VALGRIND)
- # message(WARNING "Secure mode is a bit weakened when compiling with Valgrind support as buffer overflow detection is no longer byte-precise (if running without valgrind)")
- #endif()
+ list(APPEND mi_defines MI_SECURE=4)
endif()
-if(MI_VALGRIND)
+if(MI_TRACK_VALGRIND)
CHECK_INCLUDE_FILES("valgrind/valgrind.h;valgrind/memcheck.h" MI_HAS_VALGRINDH)
if (NOT MI_HAS_VALGRINDH)
- set(MI_VALGRIND OFF)
+ set(MI_TRACK_VALGRIND OFF)
message(WARNING "Cannot find the 'valgrind/valgrind.h' and 'valgrind/memcheck.h' -- install valgrind first")
- message(STATUS "Compile **without** Valgrind support (MI_VALGRIND=OFF)")
+ message(STATUS "Compile **without** Valgrind support (MI_TRACK_VALGRIND=OFF)")
else()
- message(STATUS "Compile with Valgrind support (MI_VALGRIND=ON)")
- list(APPEND mi_defines MI_VALGRIND=1)
+ message(STATUS "Compile with Valgrind support (MI_TRACK_VALGRIND=ON)")
+ list(APPEND mi_defines MI_TRACK_VALGRIND=1)
endif()
endif()
-if(MI_ASAN)
- if (MI_VALGRIND)
- set(MI_ASAN OFF)
- message(WARNING "Cannot enable address sanitizer support with also Valgrind support enabled (MI_ASAN=OFF)")
- else()
+if(MI_TRACK_ASAN)
+ if (APPLE AND MI_OVERRIDE)
+ set(MI_TRACK_ASAN OFF)
+ message(WARNING "Cannot enable address sanitizer support on macOS if MI_OVERRIDE is ON (MI_TRACK_ASAN=OFF)")
+ endif()
+ if (MI_TRACK_VALGRIND)
+ set(MI_TRACK_ASAN OFF)
+ message(WARNING "Cannot enable address sanitizer support with also Valgrind support enabled (MI_TRACK_ASAN=OFF)")
+ endif()
+ if(MI_TRACK_ASAN)
CHECK_INCLUDE_FILES("sanitizer/asan_interface.h" MI_HAS_ASANH)
if (NOT MI_HAS_ASANH)
- set(MI_ASAN OFF)
+ set(MI_TRACK_ASAN OFF)
message(WARNING "Cannot find the 'sanitizer/asan_interface.h' -- install address sanitizer support first")
- message(STATUS "Compile **without** address sanitizer support (MI_ASAN=OFF)")
+ message(STATUS "Compile **without** address sanitizer support (MI_TRACK_ASAN=OFF)")
else()
- message(STATUS "Compile with address sanitizer support (MI_ASAN=ON)")
- list(APPEND mi_defines MI_ASAN=1)
+ message(STATUS "Compile with address sanitizer support (MI_TRACK_ASAN=ON)")
+ list(APPEND mi_defines MI_TRACK_ASAN=1)
list(APPEND mi_cflags -fsanitize=address)
- list(APPEND CMAKE_EXE_LINKER_FLAGS -fsanitize=address)
+ list(APPEND mi_libraries -fsanitize=address)
endif()
endif()
endif()
+if(MI_TRACK_ETW)
+ if(NOT WIN32)
+ set(MI_TRACK_ETW OFF)
+ message(WARNING "Can only enable ETW support on Windows (MI_TRACK_ETW=OFF)")
+ endif()
+ if (MI_TRACK_VALGRIND OR MI_TRACK_ASAN)
+ set(MI_TRACK_ETW OFF)
+ message(WARNING "Cannot enable ETW support with also Valgrind or ASAN support enabled (MI_TRACK_ETW=OFF)")
+ endif()
+ if(MI_TRACK_ETW)
+ message(STATUS "Compile with Windows event tracing support (MI_TRACK_ETW=ON)")
+ list(APPEND mi_defines MI_TRACK_ETW=1)
+ endif()
+endif()
+
if(MI_SEE_ASM)
message(STATUS "Generate assembly listings (MI_SEE_ASM=ON)")
list(APPEND mi_cflags -save-temps)
@@ -179,9 +201,14 @@ if(MI_DEBUG_FULL)
list(APPEND mi_defines MI_DEBUG=3) # full invariant checking
endif()
-if(NOT MI_PADDING)
- message(STATUS "Disable padding of heap blocks in debug mode (MI_PADDING=OFF)")
+if(MI_NO_PADDING)
+ message(STATUS "Suppress any padding of heap blocks (MI_NO_PADDING=ON)")
list(APPEND mi_defines MI_PADDING=0)
+else()
+ if(MI_PADDING)
+ message(STATUS "Enable explicit padding of heap blocks (MI_PADDING=ON)")
+ list(APPEND mi_defines MI_PADDING=1)
+ endif()
endif()
if(MI_XMALLOC)
@@ -199,7 +226,7 @@ if(MI_DEBUG_TSAN)
message(STATUS "Build with thread sanitizer (MI_DEBUG_TSAN=ON)")
list(APPEND mi_defines MI_TSAN=1)
list(APPEND mi_cflags -fsanitize=thread -g -O1)
- list(APPEND CMAKE_EXE_LINKER_FLAGS -fsanitize=thread)
+ list(APPEND mi_libraries -fsanitize=thread)
else()
message(WARNING "Can only use thread sanitizer with clang (MI_DEBUG_TSAN=ON but ignored)")
endif()
@@ -210,7 +237,7 @@ if(MI_DEBUG_UBSAN)
if(CMAKE_CXX_COMPILER_ID MATCHES "Clang")
message(STATUS "Build with undefined-behavior sanitizer (MI_DEBUG_UBSAN=ON)")
list(APPEND mi_cflags -fsanitize=undefined -g -fno-sanitize-recover=undefined)
- list(APPEND CMAKE_EXE_LINKER_FLAGS -fsanitize=undefined)
+ list(APPEND mi_libraries -fsanitize=undefined)
if (NOT MI_USE_CXX)
message(STATUS "(switch to use C++ due to MI_DEBUG_UBSAN)")
set(MI_USE_CXX "ON")
@@ -235,6 +262,11 @@ if(MI_USE_CXX)
endif()
endif()
+if(CMAKE_SYSTEM_NAME MATCHES "Haiku")
+ SET(CMAKE_INSTALL_LIBDIR ~/config/non-packaged/lib)
+ SET(CMAKE_INSTALL_INCLUDEDIR ~/config/non-packaged/headers)
+ endif()
+
# Compiler flags
if(CMAKE_C_COMPILER_ID MATCHES "AppleClang|Clang|GNU")
list(APPEND mi_cflags -Wall -Wextra -Wno-unknown-pragmas -fvisibility=hidden)
@@ -314,10 +346,10 @@ set(mi_basename "mimalloc")
if(MI_SECURE)
set(mi_basename "${mi_basename}-secure")
endif()
-if(MI_VALGRIND)
+if(MI_TRACK_VALGRIND)
set(mi_basename "${mi_basename}-valgrind")
endif()
-if(MI_ASAN)
+if(MI_TRACK_ASAN)
set(mi_basename "${mi_basename}-asan")
endif()
string(TOLOWER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE_LC)
@@ -430,12 +462,20 @@ if (MI_BUILD_OBJECT)
$<INSTALL_INTERFACE:${mi_install_incdir}>
)
+ # Copy the generated object file (`static.o`) to the output directory (as `mimalloc.o`)
+ if(NOT WIN32)
+ set(mimalloc-obj-static "${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/mimalloc-obj.dir/src/static.c${CMAKE_C_OUTPUT_EXTENSION}")
+ set(mimalloc-obj-out "${CMAKE_CURRENT_BINARY_DIR}/${mi_basename}${CMAKE_C_OUTPUT_EXTENSION}")
+ add_custom_command(OUTPUT ${mimalloc-obj-out} DEPENDS mimalloc-obj COMMAND "${CMAKE_COMMAND}" -E copy "${mimalloc-obj-static}" "${mimalloc-obj-out}")
+ add_custom_target(mimalloc-obj-target ALL DEPENDS ${mimalloc-obj-out})
+ endif()
+
# the following seems to lead to cmake warnings/errors on some systems, disable for now :-(
# install(TARGETS mimalloc-obj EXPORT mimalloc DESTINATION ${mi_install_objdir})
# the FILES expression can also be: $<TARGET_OBJECTS:mimalloc-obj>
# but that fails cmake versions less than 3.10 so we leave it as is for now
- install(FILES ${CMAKE_CURRENT_BINARY_DIR}/CMakeFiles/mimalloc-obj.dir/src/static.c${CMAKE_C_OUTPUT_EXTENSION}
+ install(FILES ${mimalloc-obj-static}
DESTINATION ${mi_install_objdir}
RENAME ${mi_basename}${CMAKE_C_OUTPUT_EXTENSION} )
endif()
diff --git a/source/luametatex/source/libraries/mimalloc/cmake/mimalloc-config-version.cmake b/source/luametatex/source/libraries/mimalloc/cmake/mimalloc-config-version.cmake
index 64d710533..842c733ee 100644
--- a/source/luametatex/source/libraries/mimalloc/cmake/mimalloc-config-version.cmake
+++ b/source/luametatex/source/libraries/mimalloc/cmake/mimalloc-config-version.cmake
@@ -1,6 +1,6 @@
set(mi_version_major 2)
-set(mi_version_minor 0)
-set(mi_version_patch 9)
+set(mi_version_minor 1)
+set(mi_version_patch 0)
set(mi_version ${mi_version_major}.${mi_version_minor})
set(PACKAGE_VERSION ${mi_version})
diff --git a/source/luametatex/source/libraries/mimalloc/include/mimalloc-track.h b/source/luametatex/source/libraries/mimalloc/include/mimalloc-track.h
deleted file mode 100644
index f60d7acd0..000000000
--- a/source/luametatex/source/libraries/mimalloc/include/mimalloc-track.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/* ----------------------------------------------------------------------------
-Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
-This is free software; you can redistribute it and/or modify it under the
-terms of the MIT license. A copy of the license can be found in the file
-"LICENSE" at the root of this distribution.
------------------------------------------------------------------------------*/
-#pragma once
-#ifndef MIMALLOC_TRACK_H
-#define MIMALLOC_TRACK_H
-
-// ------------------------------------------------------
-// Track memory ranges with macros for tools like Valgrind
-// address sanitizer, or other memory checkers.
-// ------------------------------------------------------
-
-#if MI_VALGRIND
-
-#define MI_TRACK_ENABLED 1
-#define MI_TRACK_TOOL "valgrind"
-
-#include <valgrind/valgrind.h>
-#include <valgrind/memcheck.h>
-
-#define mi_track_malloc(p,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,MI_PADDING_SIZE /*red zone*/,zero)
-#define mi_track_resize(p,oldsize,newsize) VALGRIND_RESIZEINPLACE_BLOCK(p,oldsize,newsize,MI_PADDING_SIZE /*red zone*/)
-#define mi_track_free(p) VALGRIND_FREELIKE_BLOCK(p,MI_PADDING_SIZE /*red zone*/)
-#define mi_track_free_size(p,_size) mi_track_free(p)
-#define mi_track_mem_defined(p,size) VALGRIND_MAKE_MEM_DEFINED(p,size)
-#define mi_track_mem_undefined(p,size) VALGRIND_MAKE_MEM_UNDEFINED(p,size)
-#define mi_track_mem_noaccess(p,size) VALGRIND_MAKE_MEM_NOACCESS(p,size)
-
-#elif MI_ASAN
-
-#define MI_TRACK_ENABLED 1
-#define MI_TRACK_TOOL "asan"
-
-#include <sanitizer/asan_interface.h>
-
-#define mi_track_malloc(p,size,zero) ASAN_UNPOISON_MEMORY_REGION(p,size)
-#define mi_track_resize(p,oldsize,newsize) ASAN_POISON_MEMORY_REGION(p,oldsize); ASAN_UNPOISON_MEMORY_REGION(p,newsize)
-#define mi_track_free(p) ASAN_POISON_MEMORY_REGION(p,mi_usable_size(p))
-#define mi_track_free_size(p,size) ASAN_POISON_MEMORY_REGION(p,size)
-#define mi_track_mem_defined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size)
-#define mi_track_mem_undefined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size)
-#define mi_track_mem_noaccess(p,size) ASAN_POISON_MEMORY_REGION(p,size)
-
-#else
-
-#define MI_TRACK_ENABLED 0
-#define MI_TRACK_TOOL "none"
-
-#define mi_track_malloc(p,size,zero)
-#define mi_track_resize(p,oldsize,newsize)
-#define mi_track_free(p)
-#define mi_track_free_size(p,_size)
-#define mi_track_mem_defined(p,size)
-#define mi_track_mem_undefined(p,size)
-#define mi_track_mem_noaccess(p,size)
-
-#endif
-
-#endif
diff --git a/source/luametatex/source/libraries/mimalloc/include/mimalloc.h b/source/luametatex/source/libraries/mimalloc/include/mimalloc.h
index 9b72fbfda..1372cb3f2 100644
--- a/source/luametatex/source/libraries/mimalloc/include/mimalloc.h
+++ b/source/luametatex/source/libraries/mimalloc/include/mimalloc.h
@@ -1,5 +1,5 @@
/* ----------------------------------------------------------------------------
-Copyright (c) 2018-2022, Microsoft Research, Daan Leijen
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
@@ -8,7 +8,7 @@ terms of the MIT license. A copy of the license can be found in the file
#ifndef MIMALLOC_H
#define MIMALLOC_H
-#define MI_MALLOC_VERSION 209 // major + 2 digits minor
+#define MI_MALLOC_VERSION 210 // major + 2 digits minor
// ------------------------------------------------------
// Compiler specific attributes
@@ -477,11 +477,13 @@ template<class T1,class T2> bool operator==(const mi_stl_allocator<T1>& , const
template<class T1,class T2> bool operator!=(const mi_stl_allocator<T1>& , const mi_stl_allocator<T2>& ) mi_attr_noexcept { return false; }
-#if (__cplusplus >= 201103L) || (_MSC_VER > 1900) // C++11
+#if (__cplusplus >= 201103L) || (_MSC_VER >= 1900) // C++11
+#define MI_HAS_HEAP_STL_ALLOCATOR 1
+
#include <memory> // std::shared_ptr
// Common base class for STL allocators in a specific heap
-template<class T, bool destroy> struct _mi_heap_stl_allocator_common : public _mi_stl_allocator_common<T> {
+template<class T, bool _mi_destroy> struct _mi_heap_stl_allocator_common : public _mi_stl_allocator_common<T> {
using typename _mi_stl_allocator_common<T>::size_type;
using typename _mi_stl_allocator_common<T>::value_type;
using typename _mi_stl_allocator_common<T>::pointer;
@@ -500,7 +502,7 @@ template<class T, bool destroy> struct _mi_heap_stl_allocator_common : public _m
#endif
void collect(bool force) { mi_heap_collect(this->heap.get(), force); }
- template<class U> bool is_equal(const _mi_heap_stl_allocator_common<U, destroy>& x) const { return (this->heap == x.heap); }
+ template<class U> bool is_equal(const _mi_heap_stl_allocator_common<U, _mi_destroy>& x) const { return (this->heap == x.heap); }
protected:
std::shared_ptr<mi_heap_t> heap;
@@ -508,10 +510,10 @@ protected:
_mi_heap_stl_allocator_common() {
mi_heap_t* hp = mi_heap_new();
- this->heap.reset(hp, (destroy ? &heap_destroy : &heap_delete)); /* calls heap_delete/destroy when the refcount drops to zero */
+ this->heap.reset(hp, (_mi_destroy ? &heap_destroy : &heap_delete)); /* calls heap_delete/destroy when the refcount drops to zero */
}
_mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common& x) mi_attr_noexcept : heap(x.heap) { }
- template<class U> _mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common<U, destroy>& x) mi_attr_noexcept : heap(x.heap) { }
+ template<class U> _mi_heap_stl_allocator_common(const _mi_heap_stl_allocator_common<U, _mi_destroy>& x) mi_attr_noexcept : heap(x.heap) { }
private:
static void heap_delete(mi_heap_t* hp) { if (hp != NULL) { mi_heap_delete(hp); } }
diff --git a/source/luametatex/source/libraries/mimalloc/include/mimalloc-atomic.h b/source/luametatex/source/libraries/mimalloc/include/mimalloc/atomic.h
index c66f80493..fe79fbcaf 100644
--- a/source/luametatex/source/libraries/mimalloc/include/mimalloc-atomic.h
+++ b/source/luametatex/source/libraries/mimalloc/include/mimalloc/atomic.h
@@ -1,5 +1,5 @@
/* ----------------------------------------------------------------------------
-Copyright (c) 2018-2021 Microsoft Research, Daan Leijen
+Copyright (c) 2018-2023 Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
@@ -275,6 +275,15 @@ static inline intptr_t mi_atomic_subi(_Atomic(intptr_t)*p, intptr_t sub) {
return (intptr_t)mi_atomic_addi(p, -sub);
}
+typedef _Atomic(uintptr_t) mi_atomic_once_t;
+
+// Returns true only on the first invocation
+static inline bool mi_atomic_once( mi_atomic_once_t* once ) {
+ if (mi_atomic_load_relaxed(once) != 0) return false; // quick test
+ uintptr_t expected = 0;
+ return mi_atomic_cas_strong_acq_rel(once, &expected, 1); // try to set to 1
+}
+
// Yield
#if defined(__cplusplus)
#include <thread>
diff --git a/source/luametatex/source/libraries/mimalloc/include/mimalloc-internal.h b/source/luametatex/source/libraries/mimalloc/include/mimalloc/internal.h
index a68e69662..a4495c161 100644
--- a/source/luametatex/source/libraries/mimalloc/include/mimalloc-internal.h
+++ b/source/luametatex/source/libraries/mimalloc/include/mimalloc/internal.h
@@ -1,5 +1,5 @@
/* ----------------------------------------------------------------------------
-Copyright (c) 2018-2022, Microsoft Research, Daan Leijen
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
@@ -8,8 +8,14 @@ terms of the MIT license. A copy of the license can be found in the file
#ifndef MIMALLOC_INTERNAL_H
#define MIMALLOC_INTERNAL_H
-#include "mimalloc-types.h"
-#include "mimalloc-track.h"
+
+// --------------------------------------------------------------------------
+// This file contains the interal API's of mimalloc and various utility
+// functions and macros.
+// --------------------------------------------------------------------------
+
+#include "mimalloc/types.h"
+#include "mimalloc/track.h"
#if (MI_DEBUG>0)
#define mi_trace_message(...) _mi_trace_message(__VA_ARGS__)
@@ -44,6 +50,7 @@ terms of the MIT license. A copy of the license can be found in the file
#define mi_decl_externc
#endif
+// pthreads
#if !defined(_WIN32) && !defined(__wasi__)
#define MI_USE_PTHREADS
#include <pthread.h>
@@ -73,36 +80,46 @@ extern mi_decl_cache_align mi_stats_t _mi_stats_main;
extern mi_decl_cache_align const mi_page_t _mi_page_empty;
bool _mi_is_main_thread(void);
size_t _mi_current_thread_count(void);
-bool _mi_preloading(void); // true while the C runtime is not ready
+bool _mi_preloading(void); // true while the C runtime is not ready
+mi_threadid_t _mi_thread_id(void) mi_attr_noexcept;
+mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
+void _mi_thread_done(mi_heap_t* heap);
// os.c
-size_t _mi_os_page_size(void);
void _mi_os_init(void); // called from process init
void* _mi_os_alloc(size_t size, mi_stats_t* stats); // to allocate thread local data
void _mi_os_free(void* p, size_t size, mi_stats_t* stats); // to free thread local data
-
-bool _mi_os_protect(void* addr, size_t size);
-bool _mi_os_unprotect(void* addr, size_t size);
-bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* stats);
-bool _mi_os_decommit(void* p, size_t size, mi_stats_t* stats);
-bool _mi_os_reset(void* p, size_t size, mi_stats_t* stats);
-// bool _mi_os_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
+size_t _mi_os_page_size(void);
size_t _mi_os_good_alloc_size(size_t size);
bool _mi_os_has_overcommit(void);
+
bool _mi_os_reset(void* addr, size_t size, mi_stats_t* tld_stats);
+bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
+bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
+bool _mi_os_protect(void* addr, size_t size);
+bool _mi_os_unprotect(void* addr, size_t size);
+void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, mi_stats_t* stats);
void* _mi_os_alloc_aligned_offset(size_t size, size_t alignment, size_t align_offset, bool commit, bool* large, mi_stats_t* tld_stats);
void _mi_os_free_aligned(void* p, size_t size, size_t alignment, size_t align_offset, bool was_committed, mi_stats_t* tld_stats);
+void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size);
+bool _mi_os_use_large_page(size_t size, size_t alignment);
+size_t _mi_os_large_page_size(void);
+
+void _mi_os_free_ex(void* p, size_t size, bool was_committed, mi_stats_t* stats);
+void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize);
+void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats);
// arena.c
-void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
-void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
-void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset, size_t memid, bool all_committed, mi_stats_t* stats);
mi_arena_id_t _mi_arena_id_none(void);
-bool _mi_arena_memid_is_suitable(size_t memid, mi_arena_id_t req_arena_id);
+void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset, size_t memid, bool all_committed, mi_stats_t* stats);
+void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
+void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
+bool _mi_arena_memid_is_suitable(size_t arena_memid, mi_arena_id_t request_arena_id);
+bool _mi_arena_is_os_allocated(size_t arena_memid);
// "segment-cache.c"
-void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
+void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool large_allowed, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
bool _mi_segment_cache_push(void* start, size_t size, size_t memid, const mi_commit_mask_t* commit_mask, const mi_commit_mask_t* decommit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld);
void _mi_segment_cache_collect(bool force, mi_os_tld_t* tld);
void _mi_segment_cache_free_all(mi_os_tld_t* tld);
@@ -127,8 +144,6 @@ void _mi_abandoned_reclaim_all(mi_heap_t* heap, mi_segments_tld_t* tld);
void _mi_abandoned_await_readers(void);
void _mi_abandoned_collect(mi_heap_t* heap, bool force, mi_segments_tld_t* tld);
-
-
// "page.c"
void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_alignment) mi_attr_noexcept mi_attr_malloc;
@@ -160,7 +175,6 @@ void _mi_heap_destroy_all(void);
// "stats.c"
void _mi_stats_done(mi_stats_t* stats);
-
mi_msecs_t _mi_clock_now(void);
mi_msecs_t _mi_clock_end(mi_msecs_t start);
mi_msecs_t _mi_clock_start(void);
@@ -173,6 +187,16 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool
mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* page, const void* p);
bool _mi_free_delayed_block(mi_block_t* block);
void _mi_free_generic(const mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept; // for runtime integration
+void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size);
+
+// option.c, c primitives
+char _mi_toupper(char c);
+int _mi_strnicmp(const char* s, const char* t, size_t n);
+void _mi_strlcpy(char* dest, const char* src, size_t dest_size);
+void _mi_strlcat(char* dest, const char* src, size_t dest_size);
+size_t _mi_strlen(const char* s);
+size_t _mi_strnlen(const char* s, size_t max_len);
+
#if MI_DEBUG>1
bool _mi_page_is_valid(mi_page_t* page);
@@ -340,93 +364,11 @@ static inline bool mi_count_size_overflow(size_t count, size_t size, size_t* tot
}
-/* ----------------------------------------------------------------------------------------
-The thread local default heap: `_mi_get_default_heap` returns the thread local heap.
-On most platforms (Windows, Linux, FreeBSD, NetBSD, etc), this just returns a
-__thread local variable (`_mi_heap_default`). With the initial-exec TLS model this ensures
-that the storage will always be available (allocated on the thread stacks).
-On some platforms though we cannot use that when overriding `malloc` since the underlying
-TLS implementation (or the loader) will call itself `malloc` on a first access and recurse.
-We try to circumvent this in an efficient way:
-- macOSX : we use an unused TLS slot from the OS allocated slots (MI_TLS_SLOT). On OSX, the
- loader itself calls `malloc` even before the modules are initialized.
-- OpenBSD: we use an unused slot from the pthread block (MI_TLS_PTHREAD_SLOT_OFS).
-- DragonFly: defaults are working but seem slow compared to freeBSD (see PR #323)
+/*----------------------------------------------------------------------------------------
+ Heap functions
------------------------------------------------------------------------------------------- */
extern const mi_heap_t _mi_heap_empty; // read-only empty heap, initial value of the thread local default heap
-extern bool _mi_process_is_initialized;
-mi_heap_t* _mi_heap_main_get(void); // statically allocated main backing heap
-
-#if defined(MI_MALLOC_OVERRIDE)
-#if defined(__APPLE__) // macOS
-#define MI_TLS_SLOT 89 // seems unused?
-// #define MI_TLS_RECURSE_GUARD 1
-// other possible unused ones are 9, 29, __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY4 (94), __PTK_FRAMEWORK_GC_KEY9 (112) and __PTK_FRAMEWORK_OLDGC_KEY9 (89)
-// see <https://github.com/rweichler/substrate/blob/master/include/pthread_machdep.h>
-#elif defined(__OpenBSD__)
-// use end bytes of a name; goes wrong if anyone uses names > 23 characters (ptrhread specifies 16)
-// see <https://github.com/openbsd/src/blob/master/lib/libc/include/thread_private.h#L371>
-#define MI_TLS_PTHREAD_SLOT_OFS (6*sizeof(int) + 4*sizeof(void*) + 24)
-// #elif defined(__DragonFly__)
-// #warning "mimalloc is not working correctly on DragonFly yet."
-// #define MI_TLS_PTHREAD_SLOT_OFS (4 + 1*sizeof(void*)) // offset `uniqueid` (also used by gdb?) <https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/lib/libthread_xu/thread/thr_private.h#L458>
-#elif defined(__ANDROID__)
-// See issue #381
-#define MI_TLS_PTHREAD
-#endif
-#endif
-
-#if defined(MI_TLS_SLOT)
-static inline void* mi_tls_slot(size_t slot) mi_attr_noexcept; // forward declaration
-#elif defined(MI_TLS_PTHREAD_SLOT_OFS)
-static inline mi_heap_t** mi_tls_pthread_heap_slot(void) {
- pthread_t self = pthread_self();
- #if defined(__DragonFly__)
- if (self==NULL) {
- mi_heap_t* pheap_main = _mi_heap_main_get();
- return &pheap_main;
- }
- #endif
- return (mi_heap_t**)((uint8_t*)self + MI_TLS_PTHREAD_SLOT_OFS);
-}
-#elif defined(MI_TLS_PTHREAD)
-extern pthread_key_t _mi_heap_default_key;
-#endif
-
-// Default heap to allocate from (if not using TLS- or pthread slots).
-// Do not use this directly but use through `mi_heap_get_default()` (or the unchecked `mi_get_default_heap`).
-// This thread local variable is only used when neither MI_TLS_SLOT, MI_TLS_PTHREAD, or MI_TLS_PTHREAD_SLOT_OFS are defined.
-// However, on the Apple M1 we do use the address of this variable as the unique thread-id (issue #356).
-extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate from
-
-static inline mi_heap_t* mi_get_default_heap(void) {
-#if defined(MI_TLS_SLOT)
- mi_heap_t* heap = (mi_heap_t*)mi_tls_slot(MI_TLS_SLOT);
- if mi_unlikely(heap == NULL) {
- #ifdef __GNUC__
- __asm(""); // prevent conditional load of the address of _mi_heap_empty
- #endif
- heap = (mi_heap_t*)&_mi_heap_empty;
- }
- return heap;
-#elif defined(MI_TLS_PTHREAD_SLOT_OFS)
- mi_heap_t* heap = *mi_tls_pthread_heap_slot();
- return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap);
-#elif defined(MI_TLS_PTHREAD)
- mi_heap_t* heap = (mi_unlikely(_mi_heap_default_key == (pthread_key_t)(-1)) ? _mi_heap_main_get() : (mi_heap_t*)pthread_getspecific(_mi_heap_default_key));
- return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap);
-#else
- #if defined(MI_TLS_RECURSE_GUARD)
- if (mi_unlikely(!_mi_process_is_initialized)) return _mi_heap_main_get();
- #endif
- return _mi_heap_default;
-#endif
-}
-
-static inline bool mi_heap_is_default(const mi_heap_t* heap) {
- return (heap == mi_get_default_heap());
-}
static inline bool mi_heap_is_backing(const mi_heap_t* heap) {
return (heap->tld->heap_backing == heap);
@@ -454,11 +396,6 @@ static inline mi_page_t* _mi_heap_get_free_small_page(mi_heap_t* heap, size_t si
return heap->pages_free_direct[idx];
}
-// Get the page belonging to a certain size class
-static inline mi_page_t* _mi_get_free_small_page(size_t size) {
- return _mi_heap_get_free_small_page(mi_get_default_heap(), size);
-}
-
// Segment that contains the pointer
// Large aligned blocks may be aligned at N*MI_SEGMENT_SIZE (inside a huge segment > MI_SEGMENT_SIZE),
// and we need align "down" to the segment info which is `MI_SEGMENT_SIZE` bytes before it;
@@ -834,107 +771,6 @@ static inline size_t _mi_os_numa_node_count(void) {
}
-// -------------------------------------------------------------------
-// Getting the thread id should be performant as it is called in the
-// fast path of `_mi_free` and we specialize for various platforms.
-// We only require _mi_threadid() to return a unique id for each thread.
-// -------------------------------------------------------------------
-#if defined(_WIN32)
-
-#define WIN32_LEAN_AND_MEAN
-#include <windows.h>
-static inline mi_threadid_t _mi_thread_id(void) mi_attr_noexcept {
- // Windows: works on Intel and ARM in both 32- and 64-bit
- return (uintptr_t)NtCurrentTeb();
-}
-
-// We use assembly for a fast thread id on the main platforms. The TLS layout depends on
-// both the OS and libc implementation so we use specific tests for each main platform.
-// If you test on another platform and it works please send a PR :-)
-// see also https://akkadia.org/drepper/tls.pdf for more info on the TLS register.
-#elif defined(__GNUC__) && ( \
- (defined(__GLIBC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \
- || (defined(__APPLE__) && (defined(__x86_64__) || defined(__aarch64__))) \
- || (defined(__BIONIC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \
- || (defined(__FreeBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \
- || (defined(__OpenBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \
- )
-
-static inline void* mi_tls_slot(size_t slot) mi_attr_noexcept {
- void* res;
- const size_t ofs = (slot*sizeof(void*));
- #if defined(__i386__)
- __asm__("movl %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86 32-bit always uses GS
- #elif defined(__APPLE__) && defined(__x86_64__)
- __asm__("movq %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 macOSX uses GS
- #elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
- __asm__("movl %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x32 ABI
- #elif defined(__x86_64__)
- __asm__("movq %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 Linux, BSD uses FS
- #elif defined(__arm__)
- void** tcb; MI_UNUSED(ofs);
- __asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
- res = tcb[slot];
- #elif defined(__aarch64__)
- void** tcb; MI_UNUSED(ofs);
- #if defined(__APPLE__) // M1, issue #343
- __asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb));
- #else
- __asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
- #endif
- res = tcb[slot];
- #endif
- return res;
-}
-
-// setting a tls slot is only used on macOS for now
-static inline void mi_tls_slot_set(size_t slot, void* value) mi_attr_noexcept {
- const size_t ofs = (slot*sizeof(void*));
- #if defined(__i386__)
- __asm__("movl %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // 32-bit always uses GS
- #elif defined(__APPLE__) && defined(__x86_64__)
- __asm__("movq %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 macOS uses GS
- #elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
- __asm__("movl %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x32 ABI
- #elif defined(__x86_64__)
- __asm__("movq %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 Linux, BSD uses FS
- #elif defined(__arm__)
- void** tcb; MI_UNUSED(ofs);
- __asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
- tcb[slot] = value;
- #elif defined(__aarch64__)
- void** tcb; MI_UNUSED(ofs);
- #if defined(__APPLE__) // M1, issue #343
- __asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb));
- #else
- __asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
- #endif
- tcb[slot] = value;
- #endif
-}
-
-static inline mi_threadid_t _mi_thread_id(void) mi_attr_noexcept {
- #if defined(__BIONIC__)
- // issue #384, #495: on the Bionic libc (Android), slot 1 is the thread id
- // see: https://github.com/aosp-mirror/platform_bionic/blob/c44b1d0676ded732df4b3b21c5f798eacae93228/libc/platform/bionic/tls_defines.h#L86
- return (uintptr_t)mi_tls_slot(1);
- #else
- // in all our other targets, slot 0 is the thread id
- // glibc: https://sourceware.org/git/?p=glibc.git;a=blob_plain;f=sysdeps/x86_64/nptl/tls.h
- // apple: https://github.com/apple/darwin-xnu/blob/main/libsyscall/os/tsd.h#L36
- return (uintptr_t)mi_tls_slot(0);
- #endif
-}
-
-#else
-
-// otherwise use portable C, taking the address of a thread local variable (this is still very fast on most platforms).
-static inline mi_threadid_t _mi_thread_id(void) mi_attr_noexcept {
- return (uintptr_t)&_mi_heap_default;
-}
-
-#endif
-
// -----------------------------------------------------------------------
// Count bits: trailing or leading zeros (with MI_INTPTR_BITS on all zero)
@@ -964,6 +800,7 @@ static inline size_t mi_ctz(uintptr_t x) {
#elif defined(_MSC_VER)
#include <limits.h> // LONG_MAX
+#include <intrin.h> // BitScanReverse64
#define MI_HAVE_FAST_BITSCAN
static inline size_t mi_clz(uintptr_t x) {
if (x==0) return MI_INTPTR_BITS;
diff --git a/source/luametatex/source/libraries/mimalloc/include/mimalloc/prim.h b/source/luametatex/source/libraries/mimalloc/include/mimalloc/prim.h
new file mode 100644
index 000000000..68f0871e8
--- /dev/null
+++ b/source/luametatex/source/libraries/mimalloc/include/mimalloc/prim.h
@@ -0,0 +1,311 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+#pragma once
+#ifndef MIMALLOC_PRIM_H
+#define MIMALLOC_PRIM_H
+
+
+// --------------------------------------------------------------------------
+// This file specifies the primitive portability API.
+// Each OS/host needs to implement these primitives, see `src/prim`
+// for implementations on Window, macOS, WASI, and Linux/Unix.
+//
+// note: on all primitive functions, we always get:
+// addr != NULL and page aligned
+// size > 0 and page aligned
+// return value is an error code an int where 0 is success.
+// --------------------------------------------------------------------------
+
+// OS memory configuration
+typedef struct mi_os_mem_config_s {
+ size_t page_size; // 4KiB
+ size_t large_page_size; // 2MiB
+ size_t alloc_granularity; // smallest allocation size (on Windows 64KiB)
+ bool has_overcommit; // can we reserve more memory than can be actually committed?
+ bool must_free_whole; // must allocated blocks free as a whole (false for mmap, true for VirtualAlloc)
+} mi_os_mem_config_t;
+
+// Initialize
+void _mi_prim_mem_init( mi_os_mem_config_t* config );
+
+// Free OS memory
+int _mi_prim_free(void* addr, size_t size );
+
+// Allocate OS memory. Return NULL on error.
+// The `try_alignment` is just a hint and the returned pointer does not have to be aligned.
+// pre: !commit => !allow_large
+// try_alignment >= _mi_os_page_size() and a power of 2
+int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, void** addr);
+
+// Commit memory. Returns error code or 0 on success.
+int _mi_prim_commit(void* addr, size_t size, bool commit);
+
+// Reset memory. The range keeps being accessible but the content might be reset.
+// Returns error code or 0 on success.
+int _mi_prim_reset(void* addr, size_t size);
+
+// Protect memory. Returns error code or 0 on success.
+int _mi_prim_protect(void* addr, size_t size, bool protect);
+
+// Allocate huge (1GiB) pages possibly associated with a NUMA node.
+// pre: size > 0 and a multiple of 1GiB.
+// addr is either NULL or an address hint.
+// numa_node is either negative (don't care), or a numa node number.
+int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, void** addr);
+
+// Return the current NUMA node
+size_t _mi_prim_numa_node(void);
+
+// Return the number of logical NUMA nodes
+size_t _mi_prim_numa_node_count(void);
+
+// Clock ticks
+mi_msecs_t _mi_prim_clock_now(void);
+
+// Return process information (only for statistics)
+typedef struct mi_process_info_s {
+ mi_msecs_t elapsed;
+ mi_msecs_t utime;
+ mi_msecs_t stime;
+ size_t current_rss;
+ size_t peak_rss;
+ size_t current_commit;
+ size_t peak_commit;
+ size_t page_faults;
+} mi_process_info_t;
+
+void _mi_prim_process_info(mi_process_info_t* pinfo);
+
+// Default stderr output. (only for warnings etc. with verbose enabled)
+// msg != NULL && _mi_strlen(msg) > 0
+void _mi_prim_out_stderr( const char* msg );
+
+// Get an environment variable. (only for options)
+// name != NULL, result != NULL, result_size >= 64
+bool _mi_prim_getenv(const char* name, char* result, size_t result_size);
+
+
+// Fill a buffer with strong randomness; return `false` on error or if
+// there is no strong randomization available.
+bool _mi_prim_random_buf(void* buf, size_t buf_len);
+
+// Called on the first thread start, and should ensure `_mi_thread_done` is called on thread termination.
+void _mi_prim_thread_init_auto_done(void);
+
+// Called on process exit and may take action to clean up resources associated with the thread auto done.
+void _mi_prim_thread_done_auto_done(void);
+
+// Called when the default heap for a thread changes
+void _mi_prim_thread_associate_default_heap(mi_heap_t* heap);
+
+
+//-------------------------------------------------------------------
+// Thread id: `_mi_prim_thread_id()`
+//
+// Getting the thread id should be performant as it is called in the
+// fast path of `_mi_free` and we specialize for various platforms as
+// inlined definitions. Regular code should call `init.c:_mi_thread_id()`.
+// We only require _mi_prim_thread_id() to return a unique id
+// for each thread (unequal to zero).
+//-------------------------------------------------------------------
+
+static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept;
+
+#if defined(_WIN32)
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
+ // Windows: works on Intel and ARM in both 32- and 64-bit
+ return (uintptr_t)NtCurrentTeb();
+}
+
+// We use assembly for a fast thread id on the main platforms. The TLS layout depends on
+// both the OS and libc implementation so we use specific tests for each main platform.
+// If you test on another platform and it works please send a PR :-)
+// see also https://akkadia.org/drepper/tls.pdf for more info on the TLS register.
+#elif defined(__GNUC__) && ( \
+ (defined(__GLIBC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \
+ || (defined(__APPLE__) && (defined(__x86_64__) || defined(__aarch64__))) \
+ || (defined(__BIONIC__) && (defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__))) \
+ || (defined(__FreeBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \
+ || (defined(__OpenBSD__) && (defined(__x86_64__) || defined(__i386__) || defined(__aarch64__))) \
+ )
+
+static inline void* mi_prim_tls_slot(size_t slot) mi_attr_noexcept {
+ void* res;
+ const size_t ofs = (slot*sizeof(void*));
+ #if defined(__i386__)
+ __asm__("movl %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86 32-bit always uses GS
+ #elif defined(__APPLE__) && defined(__x86_64__)
+ __asm__("movq %%gs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 macOSX uses GS
+ #elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
+ __asm__("movl %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x32 ABI
+ #elif defined(__x86_64__)
+ __asm__("movq %%fs:%1, %0" : "=r" (res) : "m" (*((void**)ofs)) : ); // x86_64 Linux, BSD uses FS
+ #elif defined(__arm__)
+ void** tcb; MI_UNUSED(ofs);
+ __asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
+ res = tcb[slot];
+ #elif defined(__aarch64__)
+ void** tcb; MI_UNUSED(ofs);
+ #if defined(__APPLE__) // M1, issue #343
+ __asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb));
+ #else
+ __asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
+ #endif
+ res = tcb[slot];
+ #endif
+ return res;
+}
+
+// setting a tls slot is only used on macOS for now
+static inline void mi_prim_tls_slot_set(size_t slot, void* value) mi_attr_noexcept {
+ const size_t ofs = (slot*sizeof(void*));
+ #if defined(__i386__)
+ __asm__("movl %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // 32-bit always uses GS
+ #elif defined(__APPLE__) && defined(__x86_64__)
+ __asm__("movq %1,%%gs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 macOS uses GS
+ #elif defined(__x86_64__) && (MI_INTPTR_SIZE==4)
+ __asm__("movl %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x32 ABI
+ #elif defined(__x86_64__)
+ __asm__("movq %1,%%fs:%0" : "=m" (*((void**)ofs)) : "rn" (value) : ); // x86_64 Linux, BSD uses FS
+ #elif defined(__arm__)
+ void** tcb; MI_UNUSED(ofs);
+ __asm__ volatile ("mrc p15, 0, %0, c13, c0, 3\nbic %0, %0, #3" : "=r" (tcb));
+ tcb[slot] = value;
+ #elif defined(__aarch64__)
+ void** tcb; MI_UNUSED(ofs);
+ #if defined(__APPLE__) // M1, issue #343
+ __asm__ volatile ("mrs %0, tpidrro_el0\nbic %0, %0, #7" : "=r" (tcb));
+ #else
+ __asm__ volatile ("mrs %0, tpidr_el0" : "=r" (tcb));
+ #endif
+ tcb[slot] = value;
+ #endif
+}
+
+static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
+ #if defined(__BIONIC__)
+ // issue #384, #495: on the Bionic libc (Android), slot 1 is the thread id
+ // see: https://github.com/aosp-mirror/platform_bionic/blob/c44b1d0676ded732df4b3b21c5f798eacae93228/libc/platform/bionic/tls_defines.h#L86
+ return (uintptr_t)mi_prim_tls_slot(1);
+ #else
+ // in all our other targets, slot 0 is the thread id
+ // glibc: https://sourceware.org/git/?p=glibc.git;a=blob_plain;f=sysdeps/x86_64/nptl/tls.h
+ // apple: https://github.com/apple/darwin-xnu/blob/main/libsyscall/os/tsd.h#L36
+ return (uintptr_t)mi_prim_tls_slot(0);
+ #endif
+}
+
+#else
+
+// otherwise use portable C, taking the address of a thread local variable (this is still very fast on most platforms).
+static inline mi_threadid_t _mi_prim_thread_id(void) mi_attr_noexcept {
+ return (uintptr_t)&_mi_heap_default;
+}
+
+#endif
+
+
+
+/* ----------------------------------------------------------------------------------------
+The thread local default heap: `_mi_prim_get_default_heap()`
+This is inlined here as it is on the fast path for allocation functions.
+
+On most platforms (Windows, Linux, FreeBSD, NetBSD, etc), this just returns a
+__thread local variable (`_mi_heap_default`). With the initial-exec TLS model this ensures
+that the storage will always be available (allocated on the thread stacks).
+
+On some platforms though we cannot use that when overriding `malloc` since the underlying
+TLS implementation (or the loader) will call itself `malloc` on a first access and recurse.
+We try to circumvent this in an efficient way:
+- macOSX : we use an unused TLS slot from the OS allocated slots (MI_TLS_SLOT). On OSX, the
+ loader itself calls `malloc` even before the modules are initialized.
+- OpenBSD: we use an unused slot from the pthread block (MI_TLS_PTHREAD_SLOT_OFS).
+- DragonFly: defaults are working but seem slow compared to freeBSD (see PR #323)
+------------------------------------------------------------------------------------------- */
+
+// defined in `init.c`; do not use these directly
+extern mi_decl_thread mi_heap_t* _mi_heap_default; // default heap to allocate from
+extern bool _mi_process_is_initialized; // has mi_process_init been called?
+
+static inline mi_heap_t* mi_prim_get_default_heap(void);
+
+#if defined(MI_MALLOC_OVERRIDE)
+#if defined(__APPLE__) // macOS
+ #define MI_TLS_SLOT 89 // seems unused?
+ // #define MI_TLS_RECURSE_GUARD 1
+ // other possible unused ones are 9, 29, __PTK_FRAMEWORK_JAVASCRIPTCORE_KEY4 (94), __PTK_FRAMEWORK_GC_KEY9 (112) and __PTK_FRAMEWORK_OLDGC_KEY9 (89)
+ // see <https://github.com/rweichler/substrate/blob/master/include/pthread_machdep.h>
+#elif defined(__OpenBSD__)
+ // use end bytes of a name; goes wrong if anyone uses names > 23 characters (ptrhread specifies 16)
+ // see <https://github.com/openbsd/src/blob/master/lib/libc/include/thread_private.h#L371>
+ #define MI_TLS_PTHREAD_SLOT_OFS (6*sizeof(int) + 4*sizeof(void*) + 24)
+ // #elif defined(__DragonFly__)
+ // #warning "mimalloc is not working correctly on DragonFly yet."
+ // #define MI_TLS_PTHREAD_SLOT_OFS (4 + 1*sizeof(void*)) // offset `uniqueid` (also used by gdb?) <https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/lib/libthread_xu/thread/thr_private.h#L458>
+#elif defined(__ANDROID__)
+ // See issue #381
+ #define MI_TLS_PTHREAD
+#endif
+#endif
+
+
+#if defined(MI_TLS_SLOT)
+
+static inline mi_heap_t* mi_prim_get_default_heap(void) {
+ mi_heap_t* heap = (mi_heap_t*)mi_prim_tls_slot(MI_TLS_SLOT);
+ if mi_unlikely(heap == NULL) {
+ #ifdef __GNUC__
+ __asm(""); // prevent conditional load of the address of _mi_heap_empty
+ #endif
+ heap = (mi_heap_t*)&_mi_heap_empty;
+ }
+ return heap;
+}
+
+#elif defined(MI_TLS_PTHREAD_SLOT_OFS)
+
+static inline mi_heap_t** mi_prim_tls_pthread_heap_slot(void) {
+ pthread_t self = pthread_self();
+ #if defined(__DragonFly__)
+ if (self==NULL) return NULL;
+ #endif
+ return (mi_heap_t**)((uint8_t*)self + MI_TLS_PTHREAD_SLOT_OFS);
+}
+
+static inline mi_heap_t* mi_prim_get_default_heap(void) {
+ mi_heap_t** pheap = mi_prim_tls_pthread_heap_slot();
+ if mi_unlikely(pheap == NULL) return _mi_heap_main_get();
+ mi_heap_t* heap = *pheap;
+ if mi_unlikely(heap == NULL) return (mi_heap_t*)&_mi_heap_empty;
+ return heap;
+}
+
+#elif defined(MI_TLS_PTHREAD)
+
+extern pthread_key_t _mi_heap_default_key;
+static inline mi_heap_t* mi_prim_get_default_heap(void) {
+ mi_heap_t* heap = (mi_unlikely(_mi_heap_default_key == (pthread_key_t)(-1)) ? _mi_heap_main_get() : (mi_heap_t*)pthread_getspecific(_mi_heap_default_key));
+ return (mi_unlikely(heap == NULL) ? (mi_heap_t*)&_mi_heap_empty : heap);
+}
+
+#else // default using a thread local variable; used on most platforms.
+
+static inline mi_heap_t* mi_prim_get_default_heap(void) {
+ #if defined(MI_TLS_RECURSE_GUARD)
+ if (mi_unlikely(!_mi_process_is_initialized)) return _mi_heap_main_get();
+ #endif
+ return _mi_heap_default;
+}
+
+#endif // mi_prim_get_default_heap()
+
+
+
+#endif // MIMALLOC_PRIM_H
diff --git a/source/luametatex/source/libraries/mimalloc/include/mimalloc/track.h b/source/luametatex/source/libraries/mimalloc/include/mimalloc/track.h
new file mode 100644
index 000000000..f78e8daa7
--- /dev/null
+++ b/source/luametatex/source/libraries/mimalloc/include/mimalloc/track.h
@@ -0,0 +1,147 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+#pragma once
+#ifndef MIMALLOC_TRACK_H
+#define MIMALLOC_TRACK_H
+
+/* ------------------------------------------------------------------------------------------------------
+Track memory ranges with macros for tools like Valgrind address sanitizer, or other memory checkers.
+These can be defined for tracking allocation:
+
+ #define mi_track_malloc_size(p,reqsize,size,zero)
+ #define mi_track_free_size(p,_size)
+
+The macros are set up such that the size passed to `mi_track_free_size`
+always matches the size of `mi_track_malloc_size`. (currently, `size == mi_usable_size(p)`).
+The `reqsize` is what the user requested, and `size >= reqsize`.
+The `size` is either byte precise (and `size==reqsize`) if `MI_PADDING` is enabled,
+or otherwise it is the usable block size which may be larger than the original request.
+Use `_mi_block_size_of(void* p)` to get the full block size that was allocated (including padding etc).
+The `zero` parameter is `true` if the allocated block is zero initialized.
+
+Optional:
+
+ #define mi_track_align(p,alignedp,offset,size)
+ #define mi_track_resize(p,oldsize,newsize)
+ #define mi_track_init()
+
+The `mi_track_align` is called right after a `mi_track_malloc` for aligned pointers in a block.
+The corresponding `mi_track_free` still uses the block start pointer and original size (corresponding to the `mi_track_malloc`).
+The `mi_track_resize` is currently unused but could be called on reallocations within a block.
+`mi_track_init` is called at program start.
+
+The following macros are for tools like asan and valgrind to track whether memory is
+defined, undefined, or not accessible at all:
+
+ #define mi_track_mem_defined(p,size)
+ #define mi_track_mem_undefined(p,size)
+ #define mi_track_mem_noaccess(p,size)
+
+-------------------------------------------------------------------------------------------------------*/
+
+#if MI_TRACK_VALGRIND
+// valgrind tool
+
+#define MI_TRACK_ENABLED 1
+#define MI_TRACK_HEAP_DESTROY 1 // track free of individual blocks on heap_destroy
+#define MI_TRACK_TOOL "valgrind"
+
+#include <valgrind/valgrind.h>
+#include <valgrind/memcheck.h>
+
+#define mi_track_malloc_size(p,reqsize,size,zero) VALGRIND_MALLOCLIKE_BLOCK(p,size,MI_PADDING_SIZE /*red zone*/,zero)
+#define mi_track_free_size(p,_size) VALGRIND_FREELIKE_BLOCK(p,MI_PADDING_SIZE /*red zone*/)
+#define mi_track_resize(p,oldsize,newsize) VALGRIND_RESIZEINPLACE_BLOCK(p,oldsize,newsize,MI_PADDING_SIZE /*red zone*/)
+#define mi_track_mem_defined(p,size) VALGRIND_MAKE_MEM_DEFINED(p,size)
+#define mi_track_mem_undefined(p,size) VALGRIND_MAKE_MEM_UNDEFINED(p,size)
+#define mi_track_mem_noaccess(p,size) VALGRIND_MAKE_MEM_NOACCESS(p,size)
+
+#elif MI_TRACK_ASAN
+// address sanitizer
+
+#define MI_TRACK_ENABLED 1
+#define MI_TRACK_HEAP_DESTROY 0
+#define MI_TRACK_TOOL "asan"
+
+#include <sanitizer/asan_interface.h>
+
+#define mi_track_malloc_size(p,reqsize,size,zero) ASAN_UNPOISON_MEMORY_REGION(p,size)
+#define mi_track_free_size(p,size) ASAN_POISON_MEMORY_REGION(p,size)
+#define mi_track_mem_defined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size)
+#define mi_track_mem_undefined(p,size) ASAN_UNPOISON_MEMORY_REGION(p,size)
+#define mi_track_mem_noaccess(p,size) ASAN_POISON_MEMORY_REGION(p,size)
+
+#elif MI_TRACK_ETW
+// windows event tracing
+
+#define MI_TRACK_ENABLED 1
+#define MI_TRACK_HEAP_DESTROY 0
+#define MI_TRACK_TOOL "ETW"
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include "../src/prim/windows/etw.h"
+
+#define mi_track_init() EventRegistermicrosoft_windows_mimalloc();
+#define mi_track_malloc_size(p,reqsize,size,zero) EventWriteETW_MI_ALLOC((UINT64)(p), size)
+#define mi_track_free_size(p,size) EventWriteETW_MI_FREE((UINT64)(p), size)
+
+#else
+// no tracking
+
+#define MI_TRACK_ENABLED 0
+#define MI_TRACK_HEAP_DESTROY 0
+#define MI_TRACK_TOOL "none"
+
+#define mi_track_malloc_size(p,reqsize,size,zero)
+#define mi_track_free_size(p,_size)
+
+#endif
+
+// -------------------
+// Utility definitions
+
+#ifndef mi_track_resize
+#define mi_track_resize(p,oldsize,newsize) mi_track_free_size(p,oldsize); mi_track_malloc(p,newsize,false)
+#endif
+
+#ifndef mi_track_align
+#define mi_track_align(p,alignedp,offset,size) mi_track_mem_noaccess(p,offset)
+#endif
+
+#ifndef mi_track_init
+#define mi_track_init()
+#endif
+
+#ifndef mi_track_mem_defined
+#define mi_track_mem_defined(p,size)
+#endif
+
+#ifndef mi_track_mem_undefined
+#define mi_track_mem_undefined(p,size)
+#endif
+
+#ifndef mi_track_mem_noaccess
+#define mi_track_mem_noaccess(p,size)
+#endif
+
+
+#if MI_PADDING
+#define mi_track_malloc(p,reqsize,zero) \
+ if ((p)!=NULL) { \
+ mi_assert_internal(mi_usable_size(p)==(reqsize)); \
+ mi_track_malloc_size(p,reqsize,reqsize,zero); \
+ }
+#else
+#define mi_track_malloc(p,reqsize,zero) \
+ if ((p)!=NULL) { \
+ mi_assert_internal(mi_usable_size(p)>=(reqsize)); \
+ mi_track_malloc_size(p,reqsize,mi_usable_size(p),zero); \
+ }
+#endif
+
+#endif
diff --git a/source/luametatex/source/libraries/mimalloc/include/mimalloc-types.h b/source/luametatex/source/libraries/mimalloc/include/mimalloc/types.h
index f3af528e5..c7ddaaaef 100644
--- a/source/luametatex/source/libraries/mimalloc/include/mimalloc-types.h
+++ b/source/luametatex/source/libraries/mimalloc/include/mimalloc/types.h
@@ -1,5 +1,5 @@
/* ----------------------------------------------------------------------------
-Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
@@ -8,9 +8,20 @@ terms of the MIT license. A copy of the license can be found in the file
#ifndef MIMALLOC_TYPES_H
#define MIMALLOC_TYPES_H
+// --------------------------------------------------------------------------
+// This file contains the main type definitions for mimalloc:
+// mi_heap_t : all data for a thread-local heap, contains
+// lists of all managed heap pages.
+// mi_segment_t : a larger chunk of memory (32GiB) from where pages
+// are allocated.
+// mi_page_t : a mimalloc page (usually 64KiB or 512KiB) from
+// where objects are allocated.
+// --------------------------------------------------------------------------
+
+
#include <stddef.h> // ptrdiff_t
#include <stdint.h> // uintptr_t, uint16_t, etc
-#include "mimalloc-atomic.h" // _Atomic
+#include "mimalloc/atomic.h" // _Atomic
#ifdef _MSC_VER
#pragma warning(disable:4214) // bitfield is not int
@@ -29,8 +40,10 @@ terms of the MIT license. A copy of the license can be found in the file
// Define NDEBUG in the release version to disable assertions.
// #define NDEBUG
-// Define MI_VALGRIND to enable valgrind support
-// #define MI_VALGRIND 1
+// Define MI_TRACK_<tool> to enable tracking support
+// #define MI_TRACK_VALGRIND 1
+// #define MI_TRACK_ASAN 1
+// #define MI_TRACK_ETW 1
// Define MI_STAT as 1 to maintain statistics; set it to 2 to have detailed statistics (but costs some performance).
// #define MI_STAT 1
@@ -58,11 +71,16 @@ terms of the MIT license. A copy of the license can be found in the file
#endif
// Reserve extra padding at the end of each block to be more resilient against heap block overflows.
-// The padding can detect byte-precise buffer overflow on free.
-#if !defined(MI_PADDING) && (MI_DEBUG>=1 || MI_VALGRIND)
+// The padding can detect buffer overflow on free.
+#if !defined(MI_PADDING) && (MI_SECURE>=3 || MI_DEBUG>=1 || (MI_TRACK_VALGRIND || MI_TRACK_ASAN || MI_TRACK_ETW))
#define MI_PADDING 1
#endif
+// Check padding bytes; allows byte-precise buffer overflow detection
+#if !defined(MI_PADDING_CHECK) && MI_PADDING && (MI_SECURE>=3 || MI_DEBUG>=1)
+#define MI_PADDING_CHECK 1
+#endif
+
// Encoded free lists allow detection of corrupted free lists
// and can detect buffer overflows, modify after free, and double `free`s.
@@ -290,8 +308,8 @@ typedef struct mi_page_s {
uint32_t xblock_size; // size available in each block (always `>0`)
mi_block_t* local_free; // list of deferred free blocks by this thread (migrates to `free`)
- #ifdef MI_ENCODE_FREELIST
- uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`)
+ #if (MI_ENCODE_FREELIST || MI_PADDING)
+ uintptr_t keys[2]; // two random keys to encode the free lists (see `_mi_block_next`) or padding canary
#endif
_Atomic(mi_thread_free_t) xthread_free; // list of deferred free blocks freed by other threads
diff --git a/source/luametatex/source/libraries/mimalloc/readme.md b/source/luametatex/source/libraries/mimalloc/readme.md
index 10195b026..98b9af675 100644
--- a/source/luametatex/source/libraries/mimalloc/readme.md
+++ b/source/luametatex/source/libraries/mimalloc/readme.md
@@ -9,18 +9,18 @@
mimalloc (pronounced "me-malloc")
is a general purpose allocator with excellent [performance](#performance) characteristics.
-Initially developed by Daan Leijen for the run-time systems of the
+Initially developed by Daan Leijen for the runtime systems of the
[Koka](https://koka-lang.github.io) and [Lean](https://github.com/leanprover/lean) languages.
-Latest release tag: `v2.0.9` (2022-12-23).
-Latest stable tag: `v1.7.9` (2022-12-23).
+Latest release tag: `v2.1.0` (2023-03-29).
+Latest stable tag: `v1.8.0` (2023-03-29).
mimalloc is a drop-in replacement for `malloc` and can be used in other programs
without code changes, for example, on dynamically linked ELF-based systems (Linux, BSD, etc.) you can use it as:
```
> LD_PRELOAD=/usr/lib/libmimalloc.so myprogram
```
-It also has an easy way to override the default allocator in [Windows](#override_on_windows). Notable aspects of the design include:
+It also includes a robust way to override the default allocator in [Windows](#override_on_windows). Notable aspects of the design include:
- __small and consistent__: the library is about 8k LOC using simple and
consistent data structures. This makes it very suitable
@@ -78,13 +78,18 @@ Note: the `v2.x` version has a new algorithm for managing internal mimalloc page
and fragmentation compared to mimalloc `v1.x` (especially for large workloads). Should otherwise have similar performance
(see [below](#performance)); please report if you observe any significant performance regression.
-* 2022-12-23, `v1.7.9`, `v2.0.9`: Supports building with asan and improved [Valgrind](#valgrind) support.
+* 2023-03-29, `v1.8.0`, `v2.1.0`: Improved support dynamic overriding on Windows 11. Improved tracing precision
+ with [asan](#asan) and [Valgrind](#valgrind), and added Windows event tracing [ETW](#ETW) (contributed by Xinglong He). Created an OS
+ abstraction layer to make it easier to port and separate platform dependent code (in `src/prim`). Fixed C++ STL compilation on older Microsoft C++ compilers, and various small bug fixes.
+
+* 2022-12-23, `v1.7.9`, `v2.0.9`: Supports building with [asan](#asan) and improved [Valgrind](#valgrind) support.
Support abitrary large alignments (in particular for `std::pmr` pools).
Added C++ STL allocators attached to a specific heap (thanks @vmarkovtsev).
Heap walks now visit all object (including huge objects). Support Windows nano server containers (by Johannes Schindelin,@dscho). Various small bug fixes.
-* 2022-11-03, `v1.7.7`, `v2.0.7`: Initial support for [Valgrind](#valgrind) for leak testing and heap block overflow detection. Initial
- support for attaching heaps to a specific memory area (only in v2). Fix `realloc` behavior for zero size blocks, remove restriction to integral multiple of the alignment in `alloc_align`, improved aligned allocation performance, reduced contention with many threads on few processors (thank you @dposluns!), vs2022 support, support `pkg-config`, .
+* 2022-11-03, `v1.7.7`, `v2.0.7`: Initial support for [Valgrind](#valgrind) for leak testing and heap block overflow
+ detection. Initial
+ support for attaching heaps to a speficic memory area (only in v2). Fix `realloc` behavior for zero size blocks, remove restriction to integral multiple of the alignment in `alloc_align`, improved aligned allocation performance, reduced contention with many threads on few processors (thank you @dposluns!), vs2022 support, support `pkg-config`, .
* 2022-04-14, `v1.7.6`, `v2.0.6`: fix fallback path for aligned OS allocation on Windows, improve Windows aligned allocation
even when compiling with older SDK's, fix dynamic overriding on macOS Monterey, fix MSVC C++ dynamic overriding, fix
@@ -346,44 +351,6 @@ When _mimalloc_ is built using debug mode, various checks are done at runtime to
- Double free's, and freeing invalid heap pointers are detected.
- Corrupted free-lists and some forms of use-after-free are detected.
-## Valgrind
-
-Generally, we recommend using the standard allocator with the amazing [Valgrind] tool (and
-also for other address sanitizers).
-However, it is possible to build mimalloc with Valgrind support. This has a small performance
-overhead but does allow detecting memory leaks and byte-precise buffer overflows directly on final
-executables. To build with valgrind support, use the `MI_VALGRIND=ON` cmake option:
-
-```
-> cmake ../.. -DMI_VALGRIND=ON
-```
-
-This can also be combined with secure mode or debug mode.
-You can then run your programs directly under valgrind:
-
-```
-> valgrind <myprogram>
-```
-
-If you rely on overriding `malloc`/`free` by mimalloc (instead of using the `mi_malloc`/`mi_free` API directly),
-you also need to tell `valgrind` to not intercept those calls itself, and use:
-
-```
-> MIMALLOC_SHOW_STATS=1 valgrind --soname-synonyms=somalloc=*mimalloc* -- <myprogram>
-```
-
-By setting the `MIMALLOC_SHOW_STATS` environment variable you can check that mimalloc is indeed
-used and not the standard allocator. Even though the [Valgrind option][valgrind-soname]
-is called `--soname-synonyms`, this also
-works when overriding with a static library or object file. Unfortunately, it is not possible to
-dynamically override mimalloc using `LD_PRELOAD` together with `valgrind`.
-See also the `test/test-wrong.c` file to test with `valgrind`.
-
-Valgrind support is in its initial development -- please report any issues.
-
-[Valgrind]: https://valgrind.org/
-[valgrind-soname]: https://valgrind.org/docs/manual/manual-core.html#opt.soname-synonyms
-
# Overriding Standard Malloc
@@ -393,7 +360,7 @@ Overriding the standard `malloc` (and `new`) can be done either _dynamically_ or
This is the recommended way to override the standard malloc interface.
-### Override on Linux, BSD
+### Dynamic Override on Linux, BSD
On these ELF-based systems we preload the mimalloc shared
library so all calls to the standard `malloc` interface are
@@ -412,7 +379,7 @@ or run with the debug version to get detailed statistics:
> env MIMALLOC_SHOW_STATS=1 LD_PRELOAD=/usr/lib/libmimalloc-debug.so myprogram
```
-### Override on MacOS
+### Dynamic Override on MacOS
On macOS we can also preload the mimalloc shared
library so all calls to the standard `malloc` interface are
@@ -425,7 +392,7 @@ Note that certain security restrictions may apply when doing this from
the [shell](https://stackoverflow.com/questions/43941322/dyld-insert-libraries-ignored-when-calling-application-through-bash).
-### Override on Windows
+### Dynamic Override on Windows
<span id="override_on_windows">Overriding on Windows</span> is robust and has the
particular advantage to be able to redirect all malloc/free calls that go through
@@ -458,13 +425,13 @@ Such patching can be done for example with [CFF Explorer](https://ntcore.com/?pa
On Unix-like systems, you can also statically link with _mimalloc_ to override the standard
malloc interface. The recommended way is to link the final program with the
-_mimalloc_ single object file (`mimalloc-override.o`). We use
+_mimalloc_ single object file (`mimalloc.o`). We use
an object file instead of a library file as linkers give preference to
that over archives to resolve symbols. To ensure that the standard
malloc interface resolves to the _mimalloc_ library, link it as the first
object file. For example:
```
-> gcc -o myprogram mimalloc-override.o myfile1.c ...
+> gcc -o myprogram mimalloc.o myfile1.c ...
```
Another way to override statically that works on all platforms, is to
@@ -474,6 +441,96 @@ This is provided by [`mimalloc-override.h`](https://github.com/microsoft/mimallo
under your control or otherwise mixing of pointers from different heaps may occur!
+## Tools
+
+Generally, we recommend using the standard allocator with memory tracking tools, but mimalloc
+can also be build to support the [address sanitizer][asan] or the excellent [Valgrind] tool.
+Moreover, it can be build to support Windows event tracing ([ETW]).
+This has a small performance overhead but does allow detecting memory leaks and byte-precise
+buffer overflows directly on final executables. See also the `test/test-wrong.c` file to test with various tools.
+
+### Valgrind
+
+To build with [valgrind] support, use the `MI_TRACK_VALGRIND=ON` cmake option:
+
+```
+> cmake ../.. -DMI_TRACK_VALGRIND=ON
+```
+
+This can also be combined with secure mode or debug mode.
+You can then run your programs directly under valgrind:
+
+```
+> valgrind <myprogram>
+```
+
+If you rely on overriding `malloc`/`free` by mimalloc (instead of using the `mi_malloc`/`mi_free` API directly),
+you also need to tell `valgrind` to not intercept those calls itself, and use:
+
+```
+> MIMALLOC_SHOW_STATS=1 valgrind --soname-synonyms=somalloc=*mimalloc* -- <myprogram>
+```
+
+By setting the `MIMALLOC_SHOW_STATS` environment variable you can check that mimalloc is indeed
+used and not the standard allocator. Even though the [Valgrind option][valgrind-soname]
+is called `--soname-synonyms`, this also
+works when overriding with a static library or object file. Unfortunately, it is not possible to
+dynamically override mimalloc using `LD_PRELOAD` together with `valgrind`.
+See also the `test/test-wrong.c` file to test with `valgrind`.
+
+Valgrind support is in its initial development -- please report any issues.
+
+[Valgrind]: https://valgrind.org/
+[valgrind-soname]: https://valgrind.org/docs/manual/manual-core.html#opt.soname-synonyms
+
+### ASAN
+
+To build with the address sanitizer, use the `-DMI_TRACK_ASAN=ON` cmake option:
+
+```
+> cmake ../.. -DMI_TRACK_ASAN=ON
+```
+
+This can also be combined with secure mode or debug mode.
+You can then run your programs as:'
+
+```
+> ASAN_OPTIONS=verbosity=1 <myprogram>
+```
+
+When you link a program with an address sanitizer build of mimalloc, you should
+generally compile that program too with the address sanitizer enabled.
+For example, assuming you build mimalloc in `out/debug`:
+
+```
+clang -g -o test-wrong -Iinclude test/test-wrong.c out/debug/libmimalloc-asan-debug.a -lpthread -fsanitize=address -fsanitize-recover=address
+```
+
+Since the address sanitizer redirects the standard allocation functions, on some platforms (macOSX for example)
+it is required to compile mimalloc with `-DMI_OVERRIDE=OFF`.
+Adress sanitizer support is in its initial development -- please report any issues.
+
+[asan]: https://github.com/google/sanitizers/wiki/AddressSanitizer
+
+### ETW
+
+Event tracing for Windows ([ETW]) provides a high performance way to capture all allocations though
+mimalloc and analyze them later. To build with ETW support, use the `-DMI_TRACE_ETW=ON` cmake option.
+
+You can then capture an allocation trace using the Windows performance recorder (WPR), using the
+`src/prim/windows/etw-mimalloc.wprp` profile. In an admin prompt, you can use:
+```
+> wpr -start src\prim\windows\etw-mimalloc.wprp -filemode
+> <my_mimalloc_program>
+> wpr -stop <my_mimalloc_program>.etl
+```
+and then open `<my_mimalloc_program>.etl` in the Windows Performance Analyzer (WPA), or
+use a tool like [TraceControl] that is specialized for analyzing mimalloc traces.
+
+[ETW]: https://learn.microsoft.com/en-us/windows-hardware/test/wpt/event-tracing-for-windows
+[TraceControl]: https://github.com/xinglonghe/TraceControl
+
+
# Performance
Last update: 2021-01-30
diff --git a/source/luametatex/source/libraries/mimalloc/src/alloc-aligned.c b/source/luametatex/source/libraries/mimalloc/src/alloc-aligned.c
index 9fe82890f..e79a22208 100644
--- a/source/luametatex/source/libraries/mimalloc/src/alloc-aligned.c
+++ b/source/luametatex/source/libraries/mimalloc/src/alloc-aligned.c
@@ -6,9 +6,10 @@ terms of the MIT license. A copy of the license can be found in the file
-----------------------------------------------------------------------------*/
#include "mimalloc.h"
-#include "mimalloc-internal.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/prim.h" // mi_prim_get_default_heap
-#include <string.h> // memset
+#include <string.h> // memset
// ------------------------------------------------------
// Aligned Allocation
@@ -46,7 +47,7 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
oversize = (size <= MI_SMALL_SIZE_MAX ? MI_SMALL_SIZE_MAX + 1 /* ensure we use generic malloc path */ : size);
p = _mi_heap_malloc_zero_ex(heap, oversize, false, alignment); // the page block size should be large enough to align in the single huge page block
// zero afterwards as only the area from the aligned_p may be committed!
- if (p == NULL) return NULL;
+ if (p == NULL) return NULL;
}
else {
// otherwise over-allocate
@@ -61,30 +62,30 @@ static mi_decl_noinline void* mi_heap_malloc_zero_aligned_at_fallback(mi_heap_t*
mi_assert_internal(adjust < alignment);
void* aligned_p = (void*)((uintptr_t)p + adjust);
if (aligned_p != p) {
- mi_page_set_has_aligned(_mi_ptr_page(p), true);
+ mi_page_t* page = _mi_ptr_page(p);
+ mi_page_set_has_aligned(page, true);
+ _mi_padding_shrink(page, (mi_block_t*)p, adjust + size);
}
+ // todo: expand padding if overallocated ?
mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size);
mi_assert_internal(p == _mi_page_ptr_unalign(_mi_ptr_segment(aligned_p), _mi_ptr_page(aligned_p), aligned_p));
mi_assert_internal(((uintptr_t)aligned_p + offset) % alignment == 0);
- mi_assert_internal(mi_page_usable_block_size(_mi_ptr_page(p)) >= adjust + size);
-
+ mi_assert_internal(mi_usable_size(aligned_p)>=size);
+ mi_assert_internal(mi_usable_size(p) == mi_usable_size(aligned_p)+adjust);
+
// now zero the block if needed
- if (zero && alignment > MI_ALIGNMENT_MAX) {
- const ptrdiff_t diff = (uint8_t*)aligned_p - (uint8_t*)p;
- const ptrdiff_t zsize = mi_page_usable_block_size(_mi_ptr_page(p)) - diff - MI_PADDING_SIZE;
- if (zsize > 0) { _mi_memzero(aligned_p, zsize); }
+ if (alignment > MI_ALIGNMENT_MAX) {
+ // for the tracker, on huge aligned allocations only from the start of the large block is defined
+ mi_track_mem_undefined(aligned_p, size);
+ if (zero) {
+ _mi_memzero(aligned_p, mi_usable_size(aligned_p));
+ }
}
- #if MI_TRACK_ENABLED
if (p != aligned_p) {
- mi_track_free_size(p, oversize);
- mi_track_malloc(aligned_p, size, zero);
- }
- else {
- mi_track_resize(aligned_p, oversize, size);
- }
- #endif
+ mi_track_align(p,aligned_p,adjust,mi_usable_size(aligned_p));
+ }
return aligned_p;
}
@@ -187,27 +188,27 @@ mi_decl_nodiscard mi_decl_restrict void* mi_heap_calloc_aligned(mi_heap_t* heap,
}
mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
- return mi_heap_malloc_aligned_at(mi_get_default_heap(), size, alignment, offset);
+ return mi_heap_malloc_aligned_at(mi_prim_get_default_heap(), size, alignment, offset);
}
mi_decl_nodiscard mi_decl_restrict void* mi_malloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
- return mi_heap_malloc_aligned(mi_get_default_heap(), size, alignment);
+ return mi_heap_malloc_aligned(mi_prim_get_default_heap(), size, alignment);
}
mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned_at(size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
- return mi_heap_zalloc_aligned_at(mi_get_default_heap(), size, alignment, offset);
+ return mi_heap_zalloc_aligned_at(mi_prim_get_default_heap(), size, alignment, offset);
}
mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_aligned(size_t size, size_t alignment) mi_attr_noexcept {
- return mi_heap_zalloc_aligned(mi_get_default_heap(), size, alignment);
+ return mi_heap_zalloc_aligned(mi_prim_get_default_heap(), size, alignment);
}
mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned_at(size_t count, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
- return mi_heap_calloc_aligned_at(mi_get_default_heap(), count, size, alignment, offset);
+ return mi_heap_calloc_aligned_at(mi_prim_get_default_heap(), count, size, alignment, offset);
}
mi_decl_nodiscard mi_decl_restrict void* mi_calloc_aligned(size_t count, size_t size, size_t alignment) mi_attr_noexcept {
- return mi_heap_calloc_aligned(mi_get_default_heap(), count, size, alignment);
+ return mi_heap_calloc_aligned(mi_prim_get_default_heap(), count, size, alignment);
}
@@ -282,25 +283,25 @@ mi_decl_nodiscard void* mi_heap_recalloc_aligned(mi_heap_t* heap, void* p, size_
}
mi_decl_nodiscard void* mi_realloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
- return mi_heap_realloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset);
+ return mi_heap_realloc_aligned_at(mi_prim_get_default_heap(), p, newsize, alignment, offset);
}
mi_decl_nodiscard void* mi_realloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
- return mi_heap_realloc_aligned(mi_get_default_heap(), p, newsize, alignment);
+ return mi_heap_realloc_aligned(mi_prim_get_default_heap(), p, newsize, alignment);
}
mi_decl_nodiscard void* mi_rezalloc_aligned_at(void* p, size_t newsize, size_t alignment, size_t offset) mi_attr_noexcept {
- return mi_heap_rezalloc_aligned_at(mi_get_default_heap(), p, newsize, alignment, offset);
+ return mi_heap_rezalloc_aligned_at(mi_prim_get_default_heap(), p, newsize, alignment, offset);
}
mi_decl_nodiscard void* mi_rezalloc_aligned(void* p, size_t newsize, size_t alignment) mi_attr_noexcept {
- return mi_heap_rezalloc_aligned(mi_get_default_heap(), p, newsize, alignment);
+ return mi_heap_rezalloc_aligned(mi_prim_get_default_heap(), p, newsize, alignment);
}
mi_decl_nodiscard void* mi_recalloc_aligned_at(void* p, size_t newcount, size_t size, size_t alignment, size_t offset) mi_attr_noexcept {
- return mi_heap_recalloc_aligned_at(mi_get_default_heap(), p, newcount, size, alignment, offset);
+ return mi_heap_recalloc_aligned_at(mi_prim_get_default_heap(), p, newcount, size, alignment, offset);
}
mi_decl_nodiscard void* mi_recalloc_aligned(void* p, size_t newcount, size_t size, size_t alignment) mi_attr_noexcept {
- return mi_heap_recalloc_aligned(mi_get_default_heap(), p, newcount, size, alignment);
+ return mi_heap_recalloc_aligned(mi_prim_get_default_heap(), p, newcount, size, alignment);
}
diff --git a/source/luametatex/source/libraries/mimalloc/src/alloc-override.c b/source/luametatex/source/libraries/mimalloc/src/alloc-override.c
index 84a0d19df..40098ac58 100644
--- a/source/luametatex/source/libraries/mimalloc/src/alloc-override.c
+++ b/source/luametatex/source/libraries/mimalloc/src/alloc-override.c
@@ -57,7 +57,7 @@ typedef struct mi_nothrow_s { int _tag; } mi_nothrow_t;
// functions that are interposed (or the interposing does not work)
#define MI_OSX_IS_INTERPOSED
- mi_decl_externc static size_t mi_malloc_size_checked(void *p) {
+ mi_decl_externc size_t mi_malloc_size_checked(void *p) {
if (!mi_is_in_heap_region(p)) return 0;
return mi_usable_size(p);
}
diff --git a/source/luametatex/source/libraries/mimalloc/src/alloc-posix.c b/source/luametatex/source/libraries/mimalloc/src/alloc-posix.c
index e6505f290..b6f09d1a1 100644
--- a/source/luametatex/source/libraries/mimalloc/src/alloc-posix.c
+++ b/source/luametatex/source/libraries/mimalloc/src/alloc-posix.c
@@ -10,7 +10,7 @@ terms of the MIT license. A copy of the license can be found in the file
// for convenience and used when overriding these functions.
// ------------------------------------------------------------------------
#include "mimalloc.h"
-#include "mimalloc-internal.h"
+#include "mimalloc/internal.h"
// ------------------------------------------------------
// Posix & Unix functions definitions
@@ -149,7 +149,7 @@ int mi_dupenv_s(char** buf, size_t* size, const char* name) mi_attr_noexcept {
else {
*buf = mi_strdup(p);
if (*buf==NULL) return ENOMEM;
- if (size != NULL) *size = strlen(p);
+ if (size != NULL) *size = _mi_strlen(p);
}
return 0;
}
diff --git a/source/luametatex/source/libraries/mimalloc/src/alloc.c b/source/luametatex/source/libraries/mimalloc/src/alloc.c
index 86453f152..24045162d 100644
--- a/source/luametatex/source/libraries/mimalloc/src/alloc.c
+++ b/source/luametatex/source/libraries/mimalloc/src/alloc.c
@@ -9,12 +9,12 @@ terms of the MIT license. A copy of the license can be found in the file
#endif
#include "mimalloc.h"
-#include "mimalloc-internal.h"
-#include "mimalloc-atomic.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
+#include "mimalloc/prim.h" // _mi_prim_thread_id()
-
-#include <string.h> // memset, strlen
-#include <stdlib.h> // malloc, exit
+#include <string.h> // memset, strlen (for mi_strdup)
+#include <stdlib.h> // malloc, abort
#define MI_IN_ALLOC_C
#include "alloc-override.c"
@@ -40,7 +40,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
// allow use of the block internally
// note: when tracking we need to avoid ever touching the MI_PADDING since
- // that is tracked by valgrind etc. as non-accessible (through the red-zone, see `mimalloc-track.h`)
+ // that is tracked by valgrind etc. as non-accessible (through the red-zone, see `mimalloc/track.h`)
mi_track_mem_undefined(block, mi_page_usable_block_size(page));
// zero the block? note: we need to zero the full block size (issue #63)
@@ -50,7 +50,7 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
_mi_memzero_aligned(block, zsize - MI_PADDING_SIZE);
}
-#if (MI_DEBUG>0) && !MI_TRACK_ENABLED
+#if (MI_DEBUG>0) && !MI_TRACK_ENABLED && !MI_TSAN
if (!page->is_zero && !zero && !mi_page_is_huge(page)) {
memset(block, MI_DEBUG_UNINIT, mi_page_usable_block_size(page));
}
@@ -70,20 +70,22 @@ extern inline void* _mi_page_malloc(mi_heap_t* heap, mi_page_t* page, size_t siz
}
#endif
-#if (MI_PADDING > 0) && defined(MI_ENCODE_FREELIST) && !MI_TRACK_ENABLED
+#if MI_PADDING // && !MI_TRACK_ENABLED
mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + mi_page_usable_block_size(page));
ptrdiff_t delta = ((uint8_t*)padding - (uint8_t*)block - (size - MI_PADDING_SIZE));
- #if (MI_DEBUG>1)
+ #if (MI_DEBUG>=2)
mi_assert_internal(delta >= 0 && mi_page_usable_block_size(page) >= (size - MI_PADDING_SIZE + delta));
mi_track_mem_defined(padding,sizeof(mi_padding_t)); // note: re-enable since mi_page_usable_block_size may set noaccess
#endif
padding->canary = (uint32_t)(mi_ptr_encode(page,block,page->keys));
padding->delta = (uint32_t)(delta);
+ #if MI_PADDING_CHECK
if (!mi_page_is_huge(page)) {
uint8_t* fill = (uint8_t*)padding - delta;
const size_t maxpad = (delta > MI_MAX_ALIGN_SIZE ? MI_MAX_ALIGN_SIZE : delta); // set at most N initial padding bytes
for (size_t i = 0; i < maxpad; i++) { fill[i] = MI_DEBUG_PADDING; }
}
+ #endif
#endif
return block;
@@ -96,21 +98,18 @@ static inline mi_decl_restrict void* mi_heap_malloc_small_zero(mi_heap_t* heap,
mi_assert(heap->thread_id == 0 || heap->thread_id == tid); // heaps are thread local
#endif
mi_assert(size <= MI_SMALL_SIZE_MAX);
-#if (MI_PADDING)
- if (size == 0) {
- size = sizeof(void*);
- }
-#endif
+ #if (MI_PADDING)
+ if (size == 0) { size = sizeof(void*); }
+ #endif
mi_page_t* page = _mi_heap_get_free_small_page(heap, size + MI_PADDING_SIZE);
- void* p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
- mi_assert_internal(p == NULL || mi_usable_size(p) >= size);
-#if MI_STAT>1
+ void* const p = _mi_page_malloc(heap, page, size + MI_PADDING_SIZE, zero);
+ mi_track_malloc(p,size,zero);
+ #if MI_STAT>1
if (p != NULL) {
- if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); }
+ if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
}
-#endif
- mi_track_malloc(p,size,zero);
+ #endif
return p;
}
@@ -120,7 +119,7 @@ mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc_small(mi_h
}
mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc_small(size_t size) mi_attr_noexcept {
- return mi_heap_malloc_small(mi_get_default_heap(), size);
+ return mi_heap_malloc_small(mi_prim_get_default_heap(), size);
}
// The main allocation function
@@ -133,14 +132,13 @@ extern inline void* _mi_heap_malloc_zero_ex(mi_heap_t* heap, size_t size, bool z
mi_assert(heap!=NULL);
mi_assert(heap->thread_id == 0 || heap->thread_id == _mi_thread_id()); // heaps are thread local
void* const p = _mi_malloc_generic(heap, size + MI_PADDING_SIZE, zero, huge_alignment); // note: size can overflow but it is detected in malloc_generic
- mi_assert_internal(p == NULL || mi_usable_size(p) >= size);
+ mi_track_malloc(p,size,zero);
#if MI_STAT>1
if (p != NULL) {
- if (!mi_heap_is_initialized(heap)) { heap = mi_get_default_heap(); }
+ if (!mi_heap_is_initialized(heap)) { heap = mi_prim_get_default_heap(); }
mi_heap_stat_increase(heap, malloc, mi_usable_size(p));
}
#endif
- mi_track_malloc(p,size,zero);
return p;
}
}
@@ -154,12 +152,12 @@ mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_malloc(mi_heap_t*
}
mi_decl_nodiscard extern inline mi_decl_restrict void* mi_malloc(size_t size) mi_attr_noexcept {
- return mi_heap_malloc(mi_get_default_heap(), size);
+ return mi_heap_malloc(mi_prim_get_default_heap(), size);
}
// zero initialized small block
mi_decl_nodiscard mi_decl_restrict void* mi_zalloc_small(size_t size) mi_attr_noexcept {
- return mi_heap_malloc_small_zero(mi_get_default_heap(), size, true);
+ return mi_heap_malloc_small_zero(mi_prim_get_default_heap(), size, true);
}
mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t* heap, size_t size) mi_attr_noexcept {
@@ -167,7 +165,7 @@ mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_zalloc(mi_heap_t*
}
mi_decl_nodiscard mi_decl_restrict void* mi_zalloc(size_t size) mi_attr_noexcept {
- return mi_heap_zalloc(mi_get_default_heap(),size);
+ return mi_heap_zalloc(mi_prim_get_default_heap(),size);
}
@@ -225,7 +223,7 @@ static inline bool mi_check_is_double_free(const mi_page_t* page, const mi_block
// Check for heap block overflow by setting up padding at the end of the block
// ---------------------------------------------------------------------------
-#if (MI_PADDING>0) && defined(MI_ENCODE_FREELIST) && !MI_TRACK_ENABLED
+#if MI_PADDING // && !MI_TRACK_ENABLED
static bool mi_page_decode_padding(const mi_page_t* page, const mi_block_t* block, size_t* delta, size_t* bsize) {
*bsize = mi_page_usable_block_size(page);
const mi_padding_t* const padding = (mi_padding_t*)((uint8_t*)block + *bsize);
@@ -249,6 +247,40 @@ static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* bl
return (ok ? bsize - delta : 0);
}
+// When a non-thread-local block is freed, it becomes part of the thread delayed free
+// list that is freed later by the owning heap. If the exact usable size is too small to
+// contain the pointer for the delayed list, then shrink the padding (by decreasing delta)
+// so it will later not trigger an overflow error in `mi_free_block`.
+void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
+ size_t bsize;
+ size_t delta;
+ bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
+ mi_assert_internal(ok);
+ if (!ok || (bsize - delta) >= min_size) return; // usually already enough space
+ mi_assert_internal(bsize >= min_size);
+ if (bsize < min_size) return; // should never happen
+ size_t new_delta = (bsize - min_size);
+ mi_assert_internal(new_delta < bsize);
+ mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize);
+ mi_track_mem_defined(padding,sizeof(mi_padding_t));
+ padding->delta = (uint32_t)new_delta;
+ mi_track_mem_noaccess(padding,sizeof(mi_padding_t));
+}
+#else
+static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
+ MI_UNUSED(block);
+ return mi_page_usable_block_size(page);
+}
+
+void _mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
+ MI_UNUSED(page);
+ MI_UNUSED(block);
+ MI_UNUSED(min_size);
+}
+#endif
+
+#if MI_PADDING && MI_PADDING_CHECK
+
static bool mi_verify_padding(const mi_page_t* page, const mi_block_t* block, size_t* size, size_t* wrong) {
size_t bsize;
size_t delta;
@@ -281,39 +313,13 @@ static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
}
}
-// When a non-thread-local block is freed, it becomes part of the thread delayed free
-// list that is freed later by the owning heap. If the exact usable size is too small to
-// contain the pointer for the delayed list, then shrink the padding (by decreasing delta)
-// so it will later not trigger an overflow error in `mi_free_block`.
-static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
- size_t bsize;
- size_t delta;
- bool ok = mi_page_decode_padding(page, block, &delta, &bsize);
- mi_assert_internal(ok);
- if (!ok || (bsize - delta) >= min_size) return; // usually already enough space
- mi_assert_internal(bsize >= min_size);
- if (bsize < min_size) return; // should never happen
- size_t new_delta = (bsize - min_size);
- mi_assert_internal(new_delta < bsize);
- mi_padding_t* padding = (mi_padding_t*)((uint8_t*)block + bsize);
- padding->delta = (uint32_t)new_delta;
-}
#else
+
static void mi_check_padding(const mi_page_t* page, const mi_block_t* block) {
MI_UNUSED(page);
MI_UNUSED(block);
}
-static size_t mi_page_usable_size_of(const mi_page_t* page, const mi_block_t* block) {
- MI_UNUSED(block);
- return mi_page_usable_block_size(page);
-}
-
-static void mi_padding_shrink(const mi_page_t* page, const mi_block_t* block, const size_t min_size) {
- MI_UNUSED(page);
- MI_UNUSED(block);
- MI_UNUSED(min_size);
-}
#endif
// only maintain stats for smaller objects if requested
@@ -377,7 +383,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
// The padding check may access the non-thread-owned page for the key values.
// that is safe as these are constant and the page won't be freed (as the block is not freed yet).
mi_check_padding(page, block);
- mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
+ _mi_padding_shrink(page, block, sizeof(mi_block_t)); // for small size, ensure we can fit the delayed thread pointers without triggering overflow detection
// huge page segments are always abandoned and can be freed immediately
mi_segment_t* segment = _mi_page_segment(page);
@@ -395,7 +401,7 @@ static mi_decl_noinline void _mi_free_block_mt(mi_page_t* page, mi_block_t* bloc
#endif
}
- #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED // note: when tracking, cannot use mi_usable_size with multi-threading
+ #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED && !MI_TSAN // note: when tracking, cannot use mi_usable_size with multi-threading
if (segment->kind != MI_SEGMENT_HUGE) { // not for huge segments as we just reset the content
memset(block, MI_DEBUG_FREED, mi_usable_size(block));
}
@@ -449,7 +455,7 @@ static inline void _mi_free_block(mi_page_t* page, bool local, mi_block_t* block
// owning thread can free a block directly
if mi_unlikely(mi_check_is_double_free(page, block)) return;
mi_check_padding(page, block);
- #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED
+ #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED && !MI_TSAN
if (!mi_page_is_huge(page)) { // huge page content may be already decommitted
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
}
@@ -481,8 +487,8 @@ mi_block_t* _mi_page_ptr_unalign(const mi_segment_t* segment, const mi_page_t* p
void mi_decl_noinline _mi_free_generic(const mi_segment_t* segment, mi_page_t* page, bool is_local, void* p) mi_attr_noexcept {
mi_block_t* const block = (mi_page_has_aligned(page) ? _mi_page_ptr_unalign(segment, page, p) : (mi_block_t*)p);
- mi_stat_free(page, block); // stat_free may access the padding
- mi_track_free(p);
+ mi_stat_free(page, block); // stat_free may access the padding
+ mi_track_free_size(block, mi_page_usable_size_of(page,block));
_mi_free_block(page, is_local, block);
}
@@ -535,7 +541,7 @@ void mi_free(void* p) mi_attr_noexcept
{
if mi_unlikely(p == NULL) return;
mi_segment_t* const segment = mi_checked_ptr_segment(p,"mi_free");
- const bool is_local= (_mi_thread_id() == mi_atomic_load_relaxed(&segment->thread_id));
+ const bool is_local= (_mi_prim_thread_id() == mi_atomic_load_relaxed(&segment->thread_id));
mi_page_t* const page = _mi_segment_page_of(segment, p);
if mi_likely(is_local) { // thread-local free?
@@ -545,10 +551,10 @@ void mi_free(void* p) mi_attr_noexcept
if mi_unlikely(mi_check_is_double_free(page, block)) return;
mi_check_padding(page, block);
mi_stat_free(page, block);
- #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED
+ #if (MI_DEBUG!=0) && !MI_TRACK_ENABLED && !MI_TSAN
memset(block, MI_DEBUG_FREED, mi_page_block_size(page));
#endif
- mi_track_free(p);
+ mi_track_free_size(p, mi_page_usable_size_of(page,block)); // faster then mi_usable_size as we already know the page and that p is unaligned
mi_block_set_next(page, block, page->local_free);
page->local_free = block;
if mi_unlikely(--page->used == 0) { // using this expression generates better code than: page->used--; if (mi_page_all_free(page))
@@ -648,7 +654,7 @@ mi_decl_nodiscard extern inline mi_decl_restrict void* mi_heap_calloc(mi_heap_t*
}
mi_decl_nodiscard mi_decl_restrict void* mi_calloc(size_t count, size_t size) mi_attr_noexcept {
- return mi_heap_calloc(mi_get_default_heap(),count,size);
+ return mi_heap_calloc(mi_prim_get_default_heap(),count,size);
}
// Uninitialized `calloc`
@@ -659,7 +665,7 @@ mi_decl_nodiscard extern mi_decl_restrict void* mi_heap_mallocn(mi_heap_t* heap,
}
mi_decl_nodiscard mi_decl_restrict void* mi_mallocn(size_t count, size_t size) mi_attr_noexcept {
- return mi_heap_mallocn(mi_get_default_heap(),count,size);
+ return mi_heap_mallocn(mi_prim_get_default_heap(),count,size);
}
// Expand (or shrink) in place (or fail)
@@ -682,9 +688,9 @@ void* _mi_heap_realloc_zero(mi_heap_t* heap, void* p, size_t newsize, bool zero)
// (this means that returning NULL always indicates an error, and `p` will not have been freed in that case.)
const size_t size = _mi_usable_size(p,"mi_realloc"); // also works if p == NULL (with size 0)
if mi_unlikely(newsize <= size && newsize >= (size / 2) && newsize > 0) { // note: newsize must be > 0 or otherwise we return NULL for realloc(NULL,0)
- // todo: adjust potential padding to reflect the new size?
- mi_track_free_size(p, size);
- mi_track_malloc(p,newsize,true);
+ mi_assert_internal(p!=NULL);
+ // todo: do not track as the usable size is still the same in the free; adjust potential padding?
+ // mi_track_resize(p,size,newsize)
return p; // reallocation still fits and not more than 50% waste
}
void* newp = mi_heap_malloc(heap,newsize);
@@ -736,24 +742,24 @@ mi_decl_nodiscard void* mi_heap_recalloc(mi_heap_t* heap, void* p, size_t count,
mi_decl_nodiscard void* mi_realloc(void* p, size_t newsize) mi_attr_noexcept {
- return mi_heap_realloc(mi_get_default_heap(),p,newsize);
+ return mi_heap_realloc(mi_prim_get_default_heap(),p,newsize);
}
mi_decl_nodiscard void* mi_reallocn(void* p, size_t count, size_t size) mi_attr_noexcept {
- return mi_heap_reallocn(mi_get_default_heap(),p,count,size);
+ return mi_heap_reallocn(mi_prim_get_default_heap(),p,count,size);
}
// Reallocate but free `p` on errors
mi_decl_nodiscard void* mi_reallocf(void* p, size_t newsize) mi_attr_noexcept {
- return mi_heap_reallocf(mi_get_default_heap(),p,newsize);
+ return mi_heap_reallocf(mi_prim_get_default_heap(),p,newsize);
}
mi_decl_nodiscard void* mi_rezalloc(void* p, size_t newsize) mi_attr_noexcept {
- return mi_heap_rezalloc(mi_get_default_heap(), p, newsize);
+ return mi_heap_rezalloc(mi_prim_get_default_heap(), p, newsize);
}
mi_decl_nodiscard void* mi_recalloc(void* p, size_t count, size_t size) mi_attr_noexcept {
- return mi_heap_recalloc(mi_get_default_heap(), p, count, size);
+ return mi_heap_recalloc(mi_prim_get_default_heap(), p, count, size);
}
@@ -774,7 +780,7 @@ mi_decl_nodiscard mi_decl_restrict char* mi_heap_strdup(mi_heap_t* heap, const c
}
mi_decl_nodiscard mi_decl_restrict char* mi_strdup(const char* s) mi_attr_noexcept {
- return mi_heap_strdup(mi_get_default_heap(), s);
+ return mi_heap_strdup(mi_prim_get_default_heap(), s);
}
// `strndup` using mi_malloc
@@ -791,7 +797,7 @@ mi_decl_nodiscard mi_decl_restrict char* mi_heap_strndup(mi_heap_t* heap, const
}
mi_decl_nodiscard mi_decl_restrict char* mi_strndup(const char* s, size_t n) mi_attr_noexcept {
- return mi_heap_strndup(mi_get_default_heap(),s,n);
+ return mi_heap_strndup(mi_prim_get_default_heap(),s,n);
}
#ifndef __wasi__
@@ -860,7 +866,7 @@ char* mi_heap_realpath(mi_heap_t* heap, const char* fname, char* resolved_name)
#endif
mi_decl_nodiscard mi_decl_restrict char* mi_realpath(const char* fname, char* resolved_name) mi_attr_noexcept {
- return mi_heap_realpath(mi_get_default_heap(),fname,resolved_name);
+ return mi_heap_realpath(mi_prim_get_default_heap(),fname,resolved_name);
}
#endif
@@ -927,7 +933,7 @@ static bool mi_try_new_handler(bool nothrow) {
}
#endif
-static mi_decl_noinline void* mi_heap_try_new(mi_heap_t* heap, size_t size, bool nothrow ) {
+mi_decl_export mi_decl_noinline void* mi_heap_try_new(mi_heap_t* heap, size_t size, bool nothrow ) {
void* p = NULL;
while(p == NULL && mi_try_new_handler(nothrow)) {
p = mi_heap_malloc(heap,size);
@@ -936,22 +942,22 @@ static mi_decl_noinline void* mi_heap_try_new(mi_heap_t* heap, size_t size, bool
}
static mi_decl_noinline void* mi_try_new(size_t size, bool nothrow) {
- return mi_heap_try_new(mi_get_default_heap(), size, nothrow);
+ return mi_heap_try_new(mi_prim_get_default_heap(), size, nothrow);
}
-mi_decl_nodiscard mi_decl_restrict extern inline void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) {
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_alloc_new(mi_heap_t* heap, size_t size) {
void* p = mi_heap_malloc(heap,size);
if mi_unlikely(p == NULL) return mi_heap_try_new(heap, size, false);
return p;
}
mi_decl_nodiscard mi_decl_restrict void* mi_new(size_t size) {
- return mi_heap_alloc_new(mi_get_default_heap(), size);
+ return mi_heap_alloc_new(mi_prim_get_default_heap(), size);
}
-mi_decl_nodiscard mi_decl_restrict extern inline void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) {
+mi_decl_nodiscard mi_decl_restrict void* mi_heap_alloc_new_n(mi_heap_t* heap, size_t count, size_t size) {
size_t total;
if mi_unlikely(mi_count_size_overflow(count, size, &total)) {
mi_try_new_handler(false); // on overflow we invoke the try_new_handler once to potentially throw std::bad_alloc
@@ -963,7 +969,7 @@ mi_decl_nodiscard mi_decl_restrict extern inline void* mi_heap_alloc_new_n(mi_he
}
mi_decl_nodiscard mi_decl_restrict void* mi_new_n(size_t count, size_t size) {
- return mi_heap_alloc_new_n(mi_get_default_heap(), size, count);
+ return mi_heap_alloc_new_n(mi_prim_get_default_heap(), size, count);
}
@@ -1024,8 +1030,8 @@ void* _mi_externs[] = {
(void*)&mi_zalloc_small,
(void*)&mi_heap_malloc,
(void*)&mi_heap_zalloc,
- (void*)&mi_heap_malloc_small,
- (void*)&mi_heap_alloc_new,
- (void*)&mi_heap_alloc_new_n
+ (void*)&mi_heap_malloc_small
+ // (void*)&mi_heap_alloc_new,
+ // (void*)&mi_heap_alloc_new_n
};
#endif
diff --git a/source/luametatex/source/libraries/mimalloc/src/arena.c b/source/luametatex/source/libraries/mimalloc/src/arena.c
index 80dd47869..43defe009 100644
--- a/source/luametatex/source/libraries/mimalloc/src/arena.c
+++ b/source/luametatex/source/libraries/mimalloc/src/arena.c
@@ -11,18 +11,16 @@ large blocks (>= MI_ARENA_MIN_BLOCK_SIZE, 4MiB).
In contrast to the rest of mimalloc, the arenas are shared between
threads and need to be accessed using atomic operations.
-Currently arenas are only used to for huge OS page (1GiB) reservations,
-or direct OS memory reservations -- otherwise it delegates to direct allocation from the OS.
-In the future, we can expose an API to manually add more kinds of arenas
-which is sometimes needed for embedded devices or shared memory for example.
-(We can also employ this with WASI or `sbrk` systems to reserve large arenas
- on demand and be able to reuse them efficiently).
+Arenas are used to for huge OS page (1GiB) reservations or for reserving
+OS memory upfront which can be improve performance or is sometimes needed
+on embedded devices. We can also employ this with WASI or `sbrk` systems
+to reserve large arenas upfront and be able to reuse the memory more effectively.
The arena allocation needs to be thread safe and we use an atomic bitmap to allocate.
-----------------------------------------------------------------------------*/
#include "mimalloc.h"
-#include "mimalloc-internal.h"
-#include "mimalloc-atomic.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
#include <string.h> // memset
#include <errno.h> // ENOMEM
@@ -30,17 +28,6 @@ The arena allocation needs to be thread safe and we use an atomic bitmap to allo
#include "bitmap.h" // atomic bitmap
-// os.c
-void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, mi_stats_t* stats);
-void _mi_os_free_ex(void* p, size_t size, bool was_committed, mi_stats_t* stats);
-
-void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_secs, size_t* pages_reserved, size_t* psize);
-void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats);
-
-bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
-bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
-
-
/* -----------------------------------------------------------
Arena allocation
----------------------------------------------------------- */
@@ -130,6 +117,10 @@ bool _mi_arena_memid_is_suitable(size_t arena_memid, mi_arena_id_t request_arena
return mi_arena_id_is_suitable(id, exclusive, request_arena_id);
}
+bool _mi_arena_is_os_allocated(size_t arena_memid) {
+ return (arena_memid == MI_MEMID_OS);
+}
+
static size_t mi_block_count_of_size(size_t size) {
return _mi_divide_up(size, MI_ARENA_BLOCK_SIZE);
}
diff --git a/source/luametatex/source/libraries/mimalloc/src/bitmap.c b/source/luametatex/source/libraries/mimalloc/src/bitmap.c
index 4ea9f4afa..6fe745ac1 100644
--- a/source/luametatex/source/libraries/mimalloc/src/bitmap.c
+++ b/source/luametatex/source/libraries/mimalloc/src/bitmap.c
@@ -18,7 +18,7 @@ between the fields. (This is used in arena allocation)
---------------------------------------------------------------------------- */
#include "mimalloc.h"
-#include "mimalloc-internal.h"
+#include "mimalloc/internal.h"
#include "bitmap.h"
/* -----------------------------------------------------------
diff --git a/source/luametatex/source/libraries/mimalloc/src/bitmap.h b/source/luametatex/source/libraries/mimalloc/src/bitmap.h
index 0c501ec1f..3476ea46b 100644
--- a/source/luametatex/source/libraries/mimalloc/src/bitmap.h
+++ b/source/luametatex/source/libraries/mimalloc/src/bitmap.h
@@ -1,5 +1,5 @@
/* ----------------------------------------------------------------------------
-Copyright (c) 2019-2020 Microsoft Research, Daan Leijen
+Copyright (c) 2019-2023 Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
diff --git a/source/luametatex/source/libraries/mimalloc/src/heap.c b/source/luametatex/source/libraries/mimalloc/src/heap.c
index ac2d042bf..7103281f0 100644
--- a/source/luametatex/source/libraries/mimalloc/src/heap.c
+++ b/source/luametatex/source/libraries/mimalloc/src/heap.c
@@ -6,8 +6,9 @@ terms of the MIT license. A copy of the license can be found in the file
-----------------------------------------------------------------------------*/
#include "mimalloc.h"
-#include "mimalloc-internal.h"
-#include "mimalloc-atomic.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
+#include "mimalloc/prim.h" // mi_prim_get_default_heap
#include <string.h> // memset, memcpy
@@ -30,15 +31,18 @@ static bool mi_heap_visit_pages(mi_heap_t* heap, heap_page_visitor_fun* fn, void
// visit all pages
#if MI_DEBUG>1
size_t total = heap->page_count;
- #endif
size_t count = 0;
+ #endif
+
for (size_t i = 0; i <= MI_BIN_FULL; i++) {
mi_page_queue_t* pq = &heap->pages[i];
mi_page_t* page = pq->first;
while(page != NULL) {
mi_page_t* next = page->next; // save next in case the page gets removed from the queue
mi_assert_internal(mi_page_heap(page) == heap);
+ #if MI_DEBUG>1
count++;
+ #endif
if (!fn(heap, pq, page, arg1, arg2)) return false;
page = next; // and continue
}
@@ -178,7 +182,7 @@ void mi_heap_collect(mi_heap_t* heap, bool force) mi_attr_noexcept {
}
void mi_collect(bool force) mi_attr_noexcept {
- mi_heap_collect(mi_get_default_heap(), force);
+ mi_heap_collect(mi_prim_get_default_heap(), force);
}
@@ -188,9 +192,14 @@ void mi_collect(bool force) mi_attr_noexcept {
mi_heap_t* mi_heap_get_default(void) {
mi_thread_init();
- return mi_get_default_heap();
+ return mi_prim_get_default_heap();
+}
+
+static bool mi_heap_is_default(const mi_heap_t* heap) {
+ return (heap == mi_prim_get_default_heap());
}
+
mi_heap_t* mi_heap_get_backing(void) {
mi_heap_t* heap = mi_heap_get_default();
mi_assert_internal(heap!=NULL);
@@ -237,9 +246,6 @@ static void mi_heap_reset_pages(mi_heap_t* heap) {
mi_assert_internal(mi_heap_is_initialized(heap));
// TODO: copy full empty heap instead?
memset(&heap->pages_free_direct, 0, sizeof(heap->pages_free_direct));
-#ifdef MI_MEDIUM_DIRECT
- memset(&heap->pages_free_medium, 0, sizeof(heap->pages_free_medium));
-#endif
_mi_memcpy_aligned(&heap->pages, &_mi_heap_empty.pages, sizeof(heap->pages));
heap->thread_delayed_free = NULL;
heap->page_count = 0;
@@ -330,6 +336,14 @@ void _mi_heap_destroy_pages(mi_heap_t* heap) {
mi_heap_reset_pages(heap);
}
+#if MI_TRACK_HEAP_DESTROY
+static bool mi_cdecl mi_heap_track_block_free(const mi_heap_t* heap, const mi_heap_area_t* area, void* block, size_t block_size, void* arg) {
+ MI_UNUSED(heap); MI_UNUSED(area); MI_UNUSED(arg); MI_UNUSED(block_size);
+ mi_track_free_size(block,mi_usable_size(block));
+ return true;
+}
+#endif
+
void mi_heap_destroy(mi_heap_t* heap) {
mi_assert(heap != NULL);
mi_assert(mi_heap_is_initialized(heap));
@@ -341,6 +355,10 @@ void mi_heap_destroy(mi_heap_t* heap) {
mi_heap_delete(heap);
}
else {
+ // track all blocks as freed
+ #if MI_TRACK_HEAP_DESTROY
+ mi_heap_visit_blocks(heap, true, mi_heap_track_block_free, NULL);
+ #endif
// free all pages
_mi_heap_destroy_pages(heap);
mi_heap_free(heap);
@@ -425,7 +443,7 @@ mi_heap_t* mi_heap_set_default(mi_heap_t* heap) {
mi_assert(mi_heap_is_initialized(heap));
if (heap==NULL || !mi_heap_is_initialized(heap)) return NULL;
mi_assert_expensive(mi_heap_is_valid(heap));
- mi_heap_t* old = mi_get_default_heap();
+ mi_heap_t* old = mi_prim_get_default_heap();
_mi_heap_set_default_direct(heap);
return old;
}
@@ -475,7 +493,7 @@ bool mi_heap_check_owned(mi_heap_t* heap, const void* p) {
}
bool mi_check_owned(const void* p) {
- return mi_heap_check_owned(mi_get_default_heap(), p);
+ return mi_heap_check_owned(mi_prim_get_default_heap(), p);
}
/* -----------------------------------------------------------
@@ -518,9 +536,13 @@ static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_v
uintptr_t free_map[MI_MAX_BLOCKS / sizeof(uintptr_t)];
memset(free_map, 0, sizeof(free_map));
+ #if MI_DEBUG>1
size_t free_count = 0;
+ #endif
for (mi_block_t* block = page->free; block != NULL; block = mi_block_next(page,block)) {
+ #if MI_DEBUG>1
free_count++;
+ #endif
mi_assert_internal((uint8_t*)block >= pstart && (uint8_t*)block < (pstart + psize));
size_t offset = (uint8_t*)block - pstart;
mi_assert_internal(offset % bsize == 0);
@@ -533,7 +555,9 @@ static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_v
mi_assert_internal(page->capacity == (free_count + page->used));
// walk through all blocks skipping the free ones
+ #if MI_DEBUG>1
size_t used_count = 0;
+ #endif
for (size_t i = 0; i < page->capacity; i++) {
size_t bitidx = (i / sizeof(uintptr_t));
size_t bit = i - (bitidx * sizeof(uintptr_t));
@@ -542,7 +566,9 @@ static bool mi_heap_area_visit_blocks(const mi_heap_area_ex_t* xarea, mi_block_v
i += (sizeof(uintptr_t) - 1); // skip a run of free blocks
}
else if ((m & ((uintptr_t)1 << bit)) == 0) {
+ #if MI_DEBUG>1
used_count++;
+ #endif
uint8_t* block = pstart + (i * bsize);
if (!visitor(mi_page_heap(page), area, block, ubsize, arg)) return false;
}
diff --git a/source/luametatex/source/libraries/mimalloc/src/init.c b/source/luametatex/source/libraries/mimalloc/src/init.c
index c416208cf..51d42acd9 100644
--- a/source/luametatex/source/libraries/mimalloc/src/init.c
+++ b/source/luametatex/source/libraries/mimalloc/src/init.c
@@ -5,11 +5,13 @@ terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
#include "mimalloc.h"
-#include "mimalloc-internal.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/prim.h"
#include <string.h> // memcpy, memset
#include <stdlib.h> // atexit
+
// Empty page used to initialize the small free pages array
const mi_page_t _mi_page_empty = {
0, false, false, false, false,
@@ -22,7 +24,7 @@ const mi_page_t _mi_page_empty = {
0, // used
0, // xblock_size
NULL, // local_free
- #if MI_ENCODE_FREELIST
+ #if (MI_PADDING || MI_ENCODE_FREELIST)
{ 0, 0 },
#endif
MI_ATOMIC_VAR_INIT(0), // xthread_free
@@ -130,6 +132,10 @@ mi_decl_cache_align static const mi_tld_t tld_empty = {
{ MI_STATS_NULL } // stats
};
+mi_threadid_t _mi_thread_id(void) mi_attr_noexcept {
+ return _mi_prim_thread_id();
+}
+
// the thread-local default heap for allocation
mi_decl_thread mi_heap_t* _mi_heap_default = (mi_heap_t*)&_mi_heap_empty;
@@ -259,13 +265,13 @@ static void mi_thread_data_collect(void) {
// Initialize the thread local default heap, called from `mi_thread_init`
static bool _mi_heap_init(void) {
- if (mi_heap_is_initialized(mi_get_default_heap())) return true;
+ if (mi_heap_is_initialized(mi_prim_get_default_heap())) return true;
if (_mi_is_main_thread()) {
// mi_assert_internal(_mi_heap_main.thread_id != 0); // can happen on freeBSD where alloc is called before any initialization
// the main heap is statically allocated
mi_heap_main_init();
_mi_heap_set_default_direct(&_mi_heap_main);
- //mi_assert_internal(_mi_heap_default->tld->heap_backing == mi_get_default_heap());
+ //mi_assert_internal(_mi_heap_default->tld->heap_backing == mi_prim_get_default_heap());
}
else {
// use `_mi_os_alloc` to allocate directly from the OS
@@ -363,54 +369,12 @@ static bool _mi_heap_done(mi_heap_t* heap) {
// to set up the thread local keys.
// --------------------------------------------------------
-static void _mi_thread_done(mi_heap_t* default_heap);
-
-#if defined(_WIN32) && defined(MI_SHARED_LIB)
- // nothing to do as it is done in DllMain
-#elif defined(_WIN32) && !defined(MI_SHARED_LIB)
- // use thread local storage keys to detect thread ending
- #include <windows.h>
- #include <fibersapi.h>
- #if (_WIN32_WINNT < 0x600) // before Windows Vista
- WINBASEAPI DWORD WINAPI FlsAlloc( _In_opt_ PFLS_CALLBACK_FUNCTION lpCallback );
- WINBASEAPI PVOID WINAPI FlsGetValue( _In_ DWORD dwFlsIndex );
- WINBASEAPI BOOL WINAPI FlsSetValue( _In_ DWORD dwFlsIndex, _In_opt_ PVOID lpFlsData );
- WINBASEAPI BOOL WINAPI FlsFree(_In_ DWORD dwFlsIndex);
- #endif
- static DWORD mi_fls_key = (DWORD)(-1);
- static void NTAPI mi_fls_done(PVOID value) {
- mi_heap_t* heap = (mi_heap_t*)value;
- if (heap != NULL) {
- _mi_thread_done(heap);
- FlsSetValue(mi_fls_key, NULL); // prevent recursion as _mi_thread_done may set it back to the main heap, issue #672
- }
- }
-#elif defined(MI_USE_PTHREADS)
- // use pthread local storage keys to detect thread ending
- // (and used with MI_TLS_PTHREADS for the default heap)
- pthread_key_t _mi_heap_default_key = (pthread_key_t)(-1);
- static void mi_pthread_done(void* value) {
- if (value!=NULL) _mi_thread_done((mi_heap_t*)value);
- }
-#elif defined(__wasi__)
-// no pthreads in the WebAssembly Standard Interface
-#else
- #pragma message("define a way to call mi_thread_done when a thread is done")
-#endif
-
// Set up handlers so `mi_thread_done` is called automatically
static void mi_process_setup_auto_thread_done(void) {
static bool tls_initialized = false; // fine if it races
if (tls_initialized) return;
tls_initialized = true;
- #if defined(_WIN32) && defined(MI_SHARED_LIB)
- // nothing to do as it is done in DllMain
- #elif defined(_WIN32) && !defined(MI_SHARED_LIB)
- mi_fls_key = FlsAlloc(&mi_fls_done);
- #elif defined(MI_USE_PTHREADS)
- mi_assert_internal(_mi_heap_default_key == (pthread_key_t)(-1));
- pthread_key_create(&_mi_heap_default_key, &mi_pthread_done);
- #endif
+ _mi_prim_thread_init_auto_done();
_mi_heap_set_default_direct(&_mi_heap_main);
}
@@ -442,13 +406,26 @@ void mi_thread_init(void) mi_attr_noexcept
}
void mi_thread_done(void) mi_attr_noexcept {
- _mi_thread_done(mi_get_default_heap());
+ _mi_thread_done(NULL);
}
-static void _mi_thread_done(mi_heap_t* heap) {
+void _mi_thread_done(mi_heap_t* heap)
+{
+ // calling with NULL implies using the default heap
+ if (heap == NULL) {
+ heap = mi_prim_get_default_heap();
+ if (heap == NULL) return;
+ }
+
+ // prevent re-entrancy through heap_done/heap_set_default_direct (issue #699)
+ if (!mi_heap_is_initialized(heap)) {
+ return;
+ }
+
+ // adjust stats
mi_atomic_decrement_relaxed(&thread_count);
_mi_stat_decrease(&_mi_stats_main.threads, 1);
-
+
// check thread-id as on Windows shutdown with FLS the main (exit) thread may call this on thread-local heaps...
if (heap->thread_id != _mi_thread_id()) return;
@@ -459,7 +436,7 @@ static void _mi_thread_done(mi_heap_t* heap) {
void _mi_heap_set_default_direct(mi_heap_t* heap) {
mi_assert_internal(heap != NULL);
#if defined(MI_TLS_SLOT)
- mi_tls_slot_set(MI_TLS_SLOT,heap);
+ mi_prim_tls_slot_set(MI_TLS_SLOT,heap);
#elif defined(MI_TLS_PTHREAD_SLOT_OFS)
*mi_tls_pthread_heap_slot() = heap;
#elif defined(MI_TLS_PTHREAD)
@@ -470,16 +447,7 @@ void _mi_heap_set_default_direct(mi_heap_t* heap) {
// ensure the default heap is passed to `_mi_thread_done`
// setting to a non-NULL value also ensures `mi_thread_done` is called.
- #if defined(_WIN32) && defined(MI_SHARED_LIB)
- // nothing to do as it is done in DllMain
- #elif defined(_WIN32) && !defined(MI_SHARED_LIB)
- mi_assert_internal(mi_fls_key != 0);
- FlsSetValue(mi_fls_key, heap);
- #elif defined(MI_USE_PTHREADS)
- if (_mi_heap_default_key != (pthread_key_t)(-1)) { // can happen during recursive invocation on freeBSD
- pthread_setspecific(_mi_heap_default_key, heap);
- }
- #endif
+ _mi_prim_thread_associate_default_heap(heap);
}
@@ -492,7 +460,7 @@ static bool os_preloading = true; // true until this module is initialized
static bool mi_redirected = false; // true if malloc redirects to mi_malloc
// Returns true if this module has not been initialized; Don't use C runtime routines until it returns false.
-bool _mi_preloading(void) {
+bool mi_decl_noinline _mi_preloading(void) {
return os_preloading;
}
@@ -535,9 +503,9 @@ static void mi_allocator_done(void) {
// Called once by the process loader
static void mi_process_load(void) {
mi_heap_main_init();
- #if defined(MI_TLS_RECURSE_GUARD)
+ #if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD)
volatile mi_heap_t* dummy = _mi_heap_default; // access TLS to allocate it before setting tls_initialized to true;
- MI_UNUSED(dummy);
+ if (dummy == NULL) return; // use dummy or otherwise the access may get optimized away (issue #697)
#endif
os_preloading = false;
mi_assert_internal(_mi_is_main_thread());
@@ -568,7 +536,7 @@ static void mi_detect_cpu_features(void) {
// FSRM for fast rep movsb support (AMD Zen3+ (~2020) or Intel Ice Lake+ (~2017))
int32_t cpu_info[4];
__cpuid(cpu_info, 7);
- _mi_cpu_has_fsrm = ((cpu_info[3] & (1 << 4)) != 0); // bit 4 of EDX : see <https ://en.wikipedia.org/wiki/CPUID#EAX=7,_ECX=0:_Extended_Features>
+ _mi_cpu_has_fsrm = ((cpu_info[3] & (1 << 4)) != 0); // bit 4 of EDX : see <https://en.wikipedia.org/wiki/CPUID#EAX=7,_ECX=0:_Extended_Features>
}
#else
static void mi_detect_cpu_features(void) {
@@ -579,29 +547,34 @@ static void mi_detect_cpu_features(void) {
// Initialize the process; called by thread_init or the process loader
void mi_process_init(void) mi_attr_noexcept {
// ensure we are called once
- if (_mi_process_is_initialized) return;
- _mi_verbose_message("process init: 0x%zx\n", _mi_thread_id());
+ static mi_atomic_once_t process_init;
+ if (!mi_atomic_once(&process_init)) return;
_mi_process_is_initialized = true;
+ _mi_verbose_message("process init: 0x%zx\n", _mi_thread_id());
mi_process_setup_auto_thread_done();
mi_detect_cpu_features();
_mi_os_init();
mi_heap_main_init();
- #if (MI_DEBUG)
+ #if MI_DEBUG
_mi_verbose_message("debug level : %d\n", MI_DEBUG);
#endif
_mi_verbose_message("secure level: %d\n", MI_SECURE);
_mi_verbose_message("mem tracking: %s\n", MI_TRACK_TOOL);
+ #if MI_TSAN
+ _mi_verbose_message("thread santizer enabled\n");
+ #endif
mi_thread_init();
- #if defined(_WIN32) && !defined(MI_SHARED_LIB)
- // When building as a static lib the FLS cleanup happens to early for the main thread.
+ #if defined(_WIN32)
+ // On windows, when building as a static lib the FLS cleanup happens to early for the main thread.
// To avoid this, set the FLS value for the main thread to NULL so the fls cleanup
// will not call _mi_thread_done on the (still executing) main thread. See issue #508.
- FlsSetValue(mi_fls_key, NULL);
+ _mi_prim_thread_associate_default_heap(NULL);
#endif
mi_stats_reset(); // only call stat reset *after* thread init (or the heap tld == NULL)
+ mi_track_init();
if (mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
size_t pages = mi_option_get_clamp(mi_option_reserve_huge_os_pages, 0, 128*1024);
@@ -629,10 +602,9 @@ static void mi_cdecl mi_process_done(void) {
if (process_done) return;
process_done = true;
- #if defined(_WIN32) && !defined(MI_SHARED_LIB)
- FlsFree(mi_fls_key); // call thread-done on all threads (except the main thread) to prevent dangling callback pointer if statically linked with a DLL; Issue #208
- #endif
-
+ // release any thread specific resources and ensure _mi_thread_done is called on all but the main thread
+ _mi_prim_thread_done_auto_done();
+
#ifndef MI_SKIP_COLLECT_ON_EXIT
#if (MI_DEBUG != 0) || !defined(MI_SHARED_LIB)
// free all memory if possible on process exit. This is not needed for a stand-alone process
diff --git a/source/luametatex/source/libraries/mimalloc/src/options.c b/source/luametatex/source/libraries/mimalloc/src/options.c
index e53538f5f..c39e20526 100644
--- a/source/luametatex/source/libraries/mimalloc/src/options.c
+++ b/source/luametatex/source/libraries/mimalloc/src/options.c
@@ -5,19 +5,14 @@ terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
#include "mimalloc.h"
-#include "mimalloc-internal.h"
-#include "mimalloc-atomic.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
+#include "mimalloc/prim.h" // mi_prim_out_stderr
-#include <stdio.h>
-#include <stdlib.h> // strtol
-#include <string.h> // strncpy, strncat, strlen, strstr
-#include <ctype.h> // toupper
+#include <stdio.h> // FILE
+#include <stdlib.h> // abort
#include <stdarg.h>
-#ifdef _MSC_VER
-#pragma warning(disable:4996) // strncpy, strncat
-#endif
-
static long mi_max_error_count = 16; // stop outputting errors after this (use < 0 for no limit)
static long mi_max_warning_count = 16; // stop outputting warnings after this (use < 0 for no limit)
@@ -28,9 +23,6 @@ int mi_version(void) mi_attr_noexcept {
return MI_MALLOC_VERSION;
}
-#ifdef _WIN32
-#include <conio.h>
-#endif
// --------------------------------------------------------
// Options
@@ -171,41 +163,11 @@ void mi_option_disable(mi_option_t option) {
mi_option_set_enabled(option,false);
}
-
static void mi_cdecl mi_out_stderr(const char* msg, void* arg) {
MI_UNUSED(arg);
- if (msg == NULL) return;
- #ifdef _WIN32
- // on windows with redirection, the C runtime cannot handle locale dependent output
- // after the main thread closes so we use direct console output.
- if (!_mi_preloading()) {
- // _cputs(msg); // _cputs cannot be used at is aborts if it fails to lock the console
- static HANDLE hcon = INVALID_HANDLE_VALUE;
- static bool hconIsConsole;
- if (hcon == INVALID_HANDLE_VALUE) {
- CONSOLE_SCREEN_BUFFER_INFO sbi;
- hcon = GetStdHandle(STD_ERROR_HANDLE);
- hconIsConsole = ((hcon != INVALID_HANDLE_VALUE) && GetConsoleScreenBufferInfo(hcon, &sbi));
- }
- const size_t len = strlen(msg);
- if (len > 0 && len < UINT32_MAX) {
- DWORD written = 0;
- if (hconIsConsole) {
- WriteConsoleA(hcon, msg, (DWORD)len, &written, NULL);
- }
- else if (hcon != INVALID_HANDLE_VALUE) {
- // use direct write if stderr was redirected
- WriteFile(hcon, msg, (DWORD)len, &written, NULL);
- }
- else {
- // finally fall back to fputs after all
- fputs(msg, stderr);
- }
- }
+ if (msg != NULL && msg[0] != 0) {
+ _mi_prim_out_stderr(msg);
}
- #else
- fputs(msg, stderr);
- #endif
}
// Since an output function can be registered earliest in the `main`
@@ -222,7 +184,7 @@ static void mi_cdecl mi_out_buf(const char* msg, void* arg) {
MI_UNUSED(arg);
if (msg==NULL) return;
if (mi_atomic_load_relaxed(&out_len)>=MI_MAX_DELAY_OUTPUT) return;
- size_t n = strlen(msg);
+ size_t n = _mi_strlen(msg);
if (n==0) return;
// claim space
size_t start = mi_atomic_add_acq_rel(&out_len, n);
@@ -314,7 +276,7 @@ static mi_decl_noinline void mi_recurse_exit_prim(void) {
static bool mi_recurse_enter(void) {
#if defined(__APPLE__) || defined(MI_TLS_RECURSE_GUARD)
- if (_mi_preloading()) return true;
+ if (_mi_preloading()) return false;
#endif
return mi_recurse_enter_prim();
}
@@ -359,9 +321,9 @@ void _mi_fprintf( mi_output_fun* out, void* arg, const char* fmt, ... ) {
}
static void mi_vfprintf_thread(mi_output_fun* out, void* arg, const char* prefix, const char* fmt, va_list args) {
- if (prefix != NULL && strlen(prefix) <= 32 && !_mi_is_main_thread()) {
+ if (prefix != NULL && _mi_strnlen(prefix,33) <= 32 && !_mi_is_main_thread()) {
char tprefix[64];
-/* HH */ snprintf(tprefix, sizeof(tprefix), "%sthread 0x%x: ", prefix, (unsigned) _mi_thread_id()); /* HH: %z is unknown */
+ snprintf(tprefix, sizeof(tprefix), "%sthread 0x%llx: ", prefix, (unsigned long long)_mi_thread_id());
mi_vfprintf(out, arg, tprefix, fmt, args);
}
else {
@@ -464,8 +426,20 @@ void _mi_error_message(int err, const char* fmt, ...) {
// --------------------------------------------------------
// Initialize options by checking the environment
// --------------------------------------------------------
+char _mi_toupper(char c) {
+ if (c >= 'a' && c <= 'z') return (c - 'a' + 'A');
+ else return c;
+}
-static void mi_strlcpy(char* dest, const char* src, size_t dest_size) {
+int _mi_strnicmp(const char* s, const char* t, size_t n) {
+ if (n == 0) return 0;
+ for (; *s != 0 && *t != 0 && n > 0; s++, t++, n--) {
+ if (_mi_toupper(*s) != _mi_toupper(*t)) break;
+ }
+ return (n == 0 ? 0 : *s - *t);
+}
+
+void _mi_strlcpy(char* dest, const char* src, size_t dest_size) {
if (dest==NULL || src==NULL || dest_size == 0) return;
// copy until end of src, or when dest is (almost) full
while (*src != 0 && dest_size > 1) {
@@ -476,7 +450,7 @@ static void mi_strlcpy(char* dest, const char* src, size_t dest_size) {
*dest = 0;
}
-static void mi_strlcat(char* dest, const char* src, size_t dest_size) {
+void _mi_strlcat(char* dest, const char* src, size_t dest_size) {
if (dest==NULL || src==NULL || dest_size == 0) return;
// find end of string in the dest buffer
while (*dest != 0 && dest_size > 1) {
@@ -484,7 +458,21 @@ static void mi_strlcat(char* dest, const char* src, size_t dest_size) {
dest_size--;
}
// and catenate
- mi_strlcpy(dest, src, dest_size);
+ _mi_strlcpy(dest, src, dest_size);
+}
+
+size_t _mi_strlen(const char* s) {
+ if (s==NULL) return 0;
+ size_t len = 0;
+ while(s[len] != 0) { len++; }
+ return len;
+}
+
+size_t _mi_strnlen(const char* s, size_t max_len) {
+ if (s==NULL) return 0;
+ size_t len = 0;
+ while(s[len] != 0 && len < max_len) { len++; }
+ return len;
}
#ifdef MI_NO_GETENV
@@ -495,93 +483,27 @@ static bool mi_getenv(const char* name, char* result, size_t result_size) {
return false;
}
#else
-#if defined _WIN32
-// On Windows use GetEnvironmentVariable instead of getenv to work
-// reliably even when this is invoked before the C runtime is initialized.
-// i.e. when `_mi_preloading() == true`.
-// Note: on windows, environment names are not case sensitive.
-#include <windows.h>
static bool mi_getenv(const char* name, char* result, size_t result_size) {
- result[0] = 0;
- size_t len = GetEnvironmentVariableA(name, result, (DWORD)result_size);
- return (len > 0 && len < result_size);
-}
-#elif !defined(MI_USE_ENVIRON) || (MI_USE_ENVIRON!=0)
-// On Posix systemsr use `environ` to acces environment variables
-// even before the C runtime is initialized.
-#if defined(__APPLE__) && defined(__has_include) && __has_include(<crt_externs.h>)
-#include <crt_externs.h>
-static char** mi_get_environ(void) {
- return (*_NSGetEnviron());
-}
-#else
-extern char** environ;
-static char** mi_get_environ(void) {
- return environ;
+ if (name==NULL || result == NULL || result_size < 64) return false;
+ return _mi_prim_getenv(name,result,result_size);
}
#endif
-static int mi_strnicmp(const char* s, const char* t, size_t n) {
- if (n == 0) return 0;
- for (; *s != 0 && *t != 0 && n > 0; s++, t++, n--) {
- if (toupper(*s) != toupper(*t)) break;
- }
- return (n == 0 ? 0 : *s - *t);
-}
-static bool mi_getenv(const char* name, char* result, size_t result_size) {
- if (name==NULL) return false;
- const size_t len = strlen(name);
- if (len == 0) return false;
- char** env = mi_get_environ();
- if (env == NULL) return false;
- // compare up to 256 entries
- for (int i = 0; i < 256 && env[i] != NULL; i++) {
- const char* s = env[i];
- if (mi_strnicmp(name, s, len) == 0 && s[len] == '=') { // case insensitive
- // found it
- mi_strlcpy(result, s + len + 1, result_size);
- return true;
- }
- }
- return false;
-}
-#else
-// fallback: use standard C `getenv` but this cannot be used while initializing the C runtime
-static bool mi_getenv(const char* name, char* result, size_t result_size) {
- // cannot call getenv() when still initializing the C runtime.
- if (_mi_preloading()) return false;
- const char* s = getenv(name);
- if (s == NULL) {
- // we check the upper case name too.
- char buf[64+1];
- size_t len = strlen(name);
- if (len >= sizeof(buf)) len = sizeof(buf) - 1;
- for (size_t i = 0; i < len; i++) {
- buf[i] = toupper(name[i]);
- }
- buf[len] = 0;
- s = getenv(buf);
- }
- if (s != NULL && strlen(s) < result_size) {
- mi_strlcpy(result, s, result_size);
- return true;
- }
- else {
- return false;
- }
-}
-#endif // !MI_USE_ENVIRON
-#endif // !MI_NO_GETENV
+
+// TODO: implement ourselves to reduce dependencies on the C runtime
+#include <stdlib.h> // strtol
+#include <string.h> // strstr
+
static void mi_option_init(mi_option_desc_t* desc) {
// Read option value from the environment
char s[64+1];
char buf[64+1];
- mi_strlcpy(buf, "mimalloc_", sizeof(buf));
- mi_strlcat(buf, desc->name, sizeof(buf));
+ _mi_strlcpy(buf, "mimalloc_", sizeof(buf));
+ _mi_strlcat(buf, desc->name, sizeof(buf));
bool found = mi_getenv(buf,s,sizeof(s));
if (!found && desc->legacy_name != NULL) {
- mi_strlcpy(buf, "mimalloc_", sizeof(buf));
- mi_strlcat(buf, desc->legacy_name, sizeof(buf));
+ _mi_strlcpy(buf, "mimalloc_", sizeof(buf));
+ _mi_strlcat(buf, desc->legacy_name, sizeof(buf));
found = mi_getenv(buf,s,sizeof(s));
if (found) {
_mi_warning_message("environment option \"mimalloc_%s\" is deprecated -- use \"mimalloc_%s\" instead.\n", desc->legacy_name, desc->name );
@@ -589,10 +511,9 @@ static void mi_option_init(mi_option_desc_t* desc) {
}
if (found) {
- size_t len = strlen(s);
- if (len >= sizeof(buf)) len = sizeof(buf) - 1;
+ size_t len = _mi_strnlen(s,sizeof(buf)-1);
for (size_t i = 0; i < len; i++) {
- buf[i] = (char)toupper(s[i]);
+ buf[i] = _mi_toupper(s[i]);
}
buf[len] = 0;
if (buf[0]==0 || strstr("1;TRUE;YES;ON", buf) != NULL) {
diff --git a/source/luametatex/source/libraries/mimalloc/src/os.c b/source/luametatex/source/libraries/mimalloc/src/os.c
index 0f9847417..75895c1b1 100644
--- a/source/luametatex/source/libraries/mimalloc/src/os.c
+++ b/source/luametatex/source/libraries/mimalloc/src/os.c
@@ -1,118 +1,48 @@
/* ----------------------------------------------------------------------------
-Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
-#ifndef _DEFAULT_SOURCE
-#define _DEFAULT_SOURCE // ensure mmap flags are defined
-#endif
-
-#if defined(__sun)
-// illumos provides new mman.h api when any of these are defined
-// otherwise the old api based on caddr_t which predates the void pointers one.
-// stock solaris provides only the former, chose to atomically to discard those
-// flags only here rather than project wide tough.
-#undef _XOPEN_SOURCE
-#undef _POSIX_C_SOURCE
-#endif
#include "mimalloc.h"
-#include "mimalloc-internal.h"
-#include "mimalloc-atomic.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
+#include "mimalloc/prim.h"
-#include <string.h> // strerror
-
-#ifdef _MSC_VER
-#pragma warning(disable:4996) // strerror
-#endif
-
-#if defined(__wasi__)
-#define MI_USE_SBRK
-#endif
-
-#if defined(_WIN32)
-#include <windows.h>
-#elif defined(__wasi__)
-#include <unistd.h> // sbrk
-#else
-#include <sys/mman.h> // mmap
-#include <unistd.h> // sysconf
-#if defined(__linux__)
-#include <features.h>
-#include <fcntl.h>
-#if defined(__GLIBC__)
-#include <linux/mman.h> // linux mmap flags
-#else
-#include <sys/mman.h>
-#endif
-#endif
-#if defined(__APPLE__)
-#include <TargetConditionals.h>
-#if !TARGET_IOS_IPHONE && !TARGET_IOS_SIMULATOR
-#include <mach/vm_statistics.h>
-#endif
-#endif
-#if defined(__FreeBSD__) || defined(__DragonFly__)
-#include <sys/param.h>
-#if __FreeBSD_version >= 1200000
-#include <sys/cpuset.h>
-#include <sys/domainset.h>
-#endif
-#include <sys/sysctl.h>
-#endif
-#endif
/* -----------------------------------------------------------
Initialization.
On windows initializes support for aligned allocation and
large OS pages (if MIMALLOC_LARGE_OS_PAGES is true).
----------------------------------------------------------- */
-bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
-bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats);
-static void* mi_align_up_ptr(void* p, size_t alignment) {
- return (void*)_mi_align_up((uintptr_t)p, alignment);
-}
-
-static void* mi_align_down_ptr(void* p, size_t alignment) {
- return (void*)_mi_align_down((uintptr_t)p, alignment);
-}
-
-
-// page size (initialized properly in `os_init`)
-static size_t os_page_size = 4096;
-
-// minimal allocation granularity
-static size_t os_alloc_granularity = 4096;
-
-// if non-zero, use large page allocation
-static size_t large_os_page_size = 0;
-
-// is memory overcommit allowed?
-// set dynamically in _mi_os_init (and if true we use MAP_NORESERVE)
-static bool os_overcommit = true;
+static mi_os_mem_config_t mi_os_mem_config = {
+ 4096, // page size
+ 0, // large page size (usually 2MiB)
+ 4096, // allocation granularity
+ true, // has overcommit? (if true we use MAP_NORESERVE on mmap systems)
+ false // must free whole? (on mmap systems we can free anywhere in a mapped range, but on Windows we must free the entire span)
+};
bool _mi_os_has_overcommit(void) {
- return os_overcommit;
+ return mi_os_mem_config.has_overcommit;
}
// OS (small) page size
size_t _mi_os_page_size(void) {
- return os_page_size;
+ return mi_os_mem_config.page_size;
}
// if large OS pages are supported (2 or 4MiB), then return the size, otherwise return the small page size (4KiB)
size_t _mi_os_large_page_size(void) {
- return (large_os_page_size != 0 ? large_os_page_size : _mi_os_page_size());
+ return (mi_os_mem_config.large_page_size != 0 ? mi_os_mem_config.large_page_size : _mi_os_page_size());
}
-#if !defined(MI_USE_SBRK) && !defined(__wasi__)
-static bool use_large_os_page(size_t size, size_t alignment) {
+bool _mi_os_use_large_page(size_t size, size_t alignment) {
// if we have access, check the size and alignment requirements
- if (large_os_page_size == 0 || !mi_option_is_enabled(mi_option_large_os_pages)) return false;
- return ((size % large_os_page_size) == 0 && (alignment % large_os_page_size) == 0);
+ if (mi_os_mem_config.large_page_size == 0 || !mi_option_is_enabled(mi_option_large_os_pages)) return false;
+ return ((size % mi_os_mem_config.large_page_size) == 0 && (alignment % mi_os_mem_config.large_page_size) == 0);
}
-#endif
// round to a good OS allocation size (bounded by max 12.5% waste)
size_t _mi_os_good_alloc_size(size_t size) {
@@ -126,177 +56,24 @@ size_t _mi_os_good_alloc_size(size_t size) {
return _mi_align_up(size, align_size);
}
-#if defined(_WIN32)
-// We use VirtualAlloc2 for aligned allocation, but it is only supported on Windows 10 and Windows Server 2016.
-// So, we need to look it up dynamically to run on older systems. (use __stdcall for 32-bit compatibility)
-// NtAllocateVirtualAllocEx is used for huge OS page allocation (1GiB)
-// We define a minimal MEM_EXTENDED_PARAMETER ourselves in order to be able to compile with older SDK's.
-typedef enum MI_MEM_EXTENDED_PARAMETER_TYPE_E {
- MiMemExtendedParameterInvalidType = 0,
- MiMemExtendedParameterAddressRequirements,
- MiMemExtendedParameterNumaNode,
- MiMemExtendedParameterPartitionHandle,
- MiMemExtendedParameterUserPhysicalHandle,
- MiMemExtendedParameterAttributeFlags,
- MiMemExtendedParameterMax
-} MI_MEM_EXTENDED_PARAMETER_TYPE;
-
-typedef struct DECLSPEC_ALIGN(8) MI_MEM_EXTENDED_PARAMETER_S {
- struct { DWORD64 Type : 8; DWORD64 Reserved : 56; } Type;
- union { DWORD64 ULong64; PVOID Pointer; SIZE_T Size; HANDLE Handle; DWORD ULong; } Arg;
-} MI_MEM_EXTENDED_PARAMETER;
-
-typedef struct MI_MEM_ADDRESS_REQUIREMENTS_S {
- PVOID LowestStartingAddress;
- PVOID HighestEndingAddress;
- SIZE_T Alignment;
-} MI_MEM_ADDRESS_REQUIREMENTS;
-
-#define MI_MEM_EXTENDED_PARAMETER_NONPAGED_HUGE 0x00000010
-
-#include <winternl.h>
-typedef PVOID (__stdcall *PVirtualAlloc2)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, MI_MEM_EXTENDED_PARAMETER*, ULONG);
-typedef NTSTATUS (__stdcall *PNtAllocateVirtualMemoryEx)(HANDLE, PVOID*, SIZE_T*, ULONG, ULONG, MI_MEM_EXTENDED_PARAMETER*, ULONG);
-static PVirtualAlloc2 pVirtualAlloc2 = NULL;
-static PNtAllocateVirtualMemoryEx pNtAllocateVirtualMemoryEx = NULL;
-
-// Similarly, GetNumaProcesorNodeEx is only supported since Windows 7
-typedef struct MI_PROCESSOR_NUMBER_S { WORD Group; BYTE Number; BYTE Reserved; } MI_PROCESSOR_NUMBER;
-
-typedef VOID (__stdcall *PGetCurrentProcessorNumberEx)(MI_PROCESSOR_NUMBER* ProcNumber);
-typedef BOOL (__stdcall *PGetNumaProcessorNodeEx)(MI_PROCESSOR_NUMBER* Processor, PUSHORT NodeNumber);
-typedef BOOL (__stdcall* PGetNumaNodeProcessorMaskEx)(USHORT Node, PGROUP_AFFINITY ProcessorMask);
-typedef BOOL (__stdcall *PGetNumaProcessorNode)(UCHAR Processor, PUCHAR NodeNumber);
-static PGetCurrentProcessorNumberEx pGetCurrentProcessorNumberEx = NULL;
-static PGetNumaProcessorNodeEx pGetNumaProcessorNodeEx = NULL;
-static PGetNumaNodeProcessorMaskEx pGetNumaNodeProcessorMaskEx = NULL;
-static PGetNumaProcessorNode pGetNumaProcessorNode = NULL;
-
-static bool mi_win_enable_large_os_pages(void)
-{
- if (large_os_page_size > 0) return true;
-
- // Try to see if large OS pages are supported
- // To use large pages on Windows, we first need access permission
- // Set "Lock pages in memory" permission in the group policy editor
- // <https://devblogs.microsoft.com/oldnewthing/20110128-00/?p=11643>
- unsigned long err = 0;
- HANDLE token = NULL;
- BOOL ok = OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &token);
- if (ok) {
- TOKEN_PRIVILEGES tp;
- ok = LookupPrivilegeValue(NULL, TEXT("SeLockMemoryPrivilege"), &tp.Privileges[0].Luid);
- if (ok) {
- tp.PrivilegeCount = 1;
- tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
- ok = AdjustTokenPrivileges(token, FALSE, &tp, 0, (PTOKEN_PRIVILEGES)NULL, 0);
- if (ok) {
- err = GetLastError();
- ok = (err == ERROR_SUCCESS);
- if (ok) {
- large_os_page_size = GetLargePageMinimum();
- }
- }
- }
- CloseHandle(token);
- }
- if (!ok) {
- if (err == 0) err = GetLastError();
- _mi_warning_message("cannot enable large OS page support, error %lu\n", err);
- }
- return (ok!=0);
-}
-
-void _mi_os_init(void)
-{
- os_overcommit = false;
- // get the page size
- SYSTEM_INFO si;
- GetSystemInfo(&si);
- if (si.dwPageSize > 0) os_page_size = si.dwPageSize;
- if (si.dwAllocationGranularity > 0) os_alloc_granularity = si.dwAllocationGranularity;
- // get the VirtualAlloc2 function
- HINSTANCE hDll;
- hDll = LoadLibrary(TEXT("kernelbase.dll"));
- if (hDll != NULL) {
- // use VirtualAlloc2FromApp if possible as it is available to Windows store apps
- pVirtualAlloc2 = (PVirtualAlloc2)(void (*)(void))GetProcAddress(hDll, "VirtualAlloc2FromApp");
- if (pVirtualAlloc2==NULL) pVirtualAlloc2 = (PVirtualAlloc2)(void (*)(void))GetProcAddress(hDll, "VirtualAlloc2");
- FreeLibrary(hDll);
- }
- // NtAllocateVirtualMemoryEx is used for huge page allocation
- hDll = LoadLibrary(TEXT("ntdll.dll"));
- if (hDll != NULL) {
- pNtAllocateVirtualMemoryEx = (PNtAllocateVirtualMemoryEx)(void (*)(void))GetProcAddress(hDll, "NtAllocateVirtualMemoryEx");
- FreeLibrary(hDll);
- }
- // Try to use Win7+ numa API
- hDll = LoadLibrary(TEXT("kernel32.dll"));
- if (hDll != NULL) {
- pGetCurrentProcessorNumberEx = (PGetCurrentProcessorNumberEx)(void (*)(void))GetProcAddress(hDll, "GetCurrentProcessorNumberEx");
- pGetNumaProcessorNodeEx = (PGetNumaProcessorNodeEx)(void (*)(void))GetProcAddress(hDll, "GetNumaProcessorNodeEx");
- pGetNumaNodeProcessorMaskEx = (PGetNumaNodeProcessorMaskEx)(void (*)(void))GetProcAddress(hDll, "GetNumaNodeProcessorMaskEx");
- pGetNumaProcessorNode = (PGetNumaProcessorNode)(void (*)(void))GetProcAddress(hDll, "GetNumaProcessorNode");
- FreeLibrary(hDll);
- }
- if (mi_option_is_enabled(mi_option_large_os_pages) || mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
- mi_win_enable_large_os_pages();
- }
-}
-#elif defined(__wasi__)
void _mi_os_init(void) {
- os_overcommit = false;
- os_page_size = 64*MI_KiB; // WebAssembly has a fixed page size: 64KiB
- os_alloc_granularity = 16;
+ _mi_prim_mem_init(&mi_os_mem_config);
}
-#else // generic unix
-
-static void os_detect_overcommit(void) {
-#if defined(__linux__)
- int fd = open("/proc/sys/vm/overcommit_memory", O_RDONLY);
- if (fd < 0) return;
- char buf[32];
- ssize_t nread = read(fd, &buf, sizeof(buf));
- close(fd);
- // <https://www.kernel.org/doc/Documentation/vm/overcommit-accounting>
- // 0: heuristic overcommit, 1: always overcommit, 2: never overcommit (ignore NORESERVE)
- if (nread >= 1) {
- os_overcommit = (buf[0] == '0' || buf[0] == '1');
- }
-#elif defined(__FreeBSD__)
- int val = 0;
- size_t olen = sizeof(val);
- if (sysctlbyname("vm.overcommit", &val, &olen, NULL, 0) == 0) {
- os_overcommit = (val != 0);
- }
-#else
- // default: overcommit is true
-#endif
-}
-void _mi_os_init(void) {
- // get the page size
- long result = sysconf(_SC_PAGESIZE);
- if (result > 0) {
- os_page_size = (size_t)result;
- os_alloc_granularity = os_page_size;
- }
- large_os_page_size = 2*MI_MiB; // TODO: can we query the OS for this?
- os_detect_overcommit();
-}
-#endif
+/* -----------------------------------------------------------
+ Util
+-------------------------------------------------------------- */
+bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats);
+bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats);
+static void* mi_align_up_ptr(void* p, size_t alignment) {
+ return (void*)_mi_align_up((uintptr_t)p, alignment);
+}
-#if defined(MADV_NORMAL)
-static int mi_madvise(void* addr, size_t length, int advice) {
- #if defined(__sun)
- return madvise((caddr_t)addr, length, advice); // Solaris needs cast (issue #520)
- #else
- return madvise(addr, length, advice);
- #endif
+static void* mi_align_down_ptr(void* p, size_t alignment) {
+ return (void*)_mi_align_down((uintptr_t)p, alignment);
}
-#endif
/* -----------------------------------------------------------
@@ -319,7 +96,7 @@ static mi_decl_cache_align _Atomic(uintptr_t)aligned_base;
#define MI_HINT_AREA ((uintptr_t)4 << 40) // upto 6TiB (since before win8 there is "only" 8TiB available to processes)
#define MI_HINT_MAX ((uintptr_t)30 << 40) // wrap after 30TiB (area after 32TiB is used for huge OS pages)
-static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size)
+void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size)
{
if (try_alignment <= 1 || try_alignment > MI_SEGMENT_SIZE) return NULL;
size = _mi_align_up(size, MI_SEGMENT_SIZE);
@@ -332,7 +109,7 @@ static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size)
if (hint == 0 || hint > MI_HINT_MAX) { // wrap or initialize
uintptr_t init = MI_HINT_BASE;
#if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of aligned allocations unless in debug mode
- uintptr_t r = _mi_heap_random_next(mi_get_default_heap());
+ uintptr_t r = _mi_heap_random_next(mi_prim_get_default_heap());
init = init + ((MI_SEGMENT_SIZE * ((r>>17) & 0xFFFFF)) % MI_HINT_AREA); // (randomly 20 bits)*4MiB == 0 to 4TiB
#endif
uintptr_t expected = hint + size;
@@ -343,361 +120,39 @@ static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size)
return (void*)hint;
}
#else
-static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size) {
+void* _mi_os_get_aligned_hint(size_t try_alignment, size_t size) {
MI_UNUSED(try_alignment); MI_UNUSED(size);
return NULL;
}
#endif
+
/* -----------------------------------------------------------
Free memory
-------------------------------------------------------------- */
-static bool mi_os_mem_free(void* addr, size_t size, bool was_committed, mi_stats_t* stats)
-{
- if (addr == NULL || size == 0) return true; // || _mi_os_is_huge_reserved(addr)
- bool err = false;
-#if defined(_WIN32)
- DWORD errcode = 0;
- err = (VirtualFree(addr, 0, MEM_RELEASE) == 0);
- if (err) { errcode = GetLastError(); }
- if (errcode == ERROR_INVALID_ADDRESS) {
- // In mi_os_mem_alloc_aligned the fallback path may have returned a pointer inside
- // the memory region returned by VirtualAlloc; in that case we need to free using
- // the start of the region.
- MEMORY_BASIC_INFORMATION info = { 0 };
- VirtualQuery(addr, &info, sizeof(info));
- if (info.AllocationBase < addr && ((uint8_t*)addr - (uint8_t*)info.AllocationBase) < (ptrdiff_t)MI_SEGMENT_SIZE) {
- errcode = 0;
- err = (VirtualFree(info.AllocationBase, 0, MEM_RELEASE) == 0);
- if (err) { errcode = GetLastError(); }
- }
- }
- if (errcode != 0) {
- _mi_warning_message("unable to release OS memory: error code 0x%x, addr: %p, size: %zu\n", errcode, addr, size);
- }
-#elif defined(MI_USE_SBRK) || defined(__wasi__)
- err = false; // sbrk heap cannot be shrunk
-#else
- err = (munmap(addr, size) == -1);
- if (err) {
- _mi_warning_message("unable to release OS memory: %s, addr: %p, size: %zu\n", strerror(errno), addr, size);
+static void mi_os_mem_free(void* addr, size_t size, bool was_committed, mi_stats_t* tld_stats) {
+ MI_UNUSED(tld_stats);
+ mi_assert_internal((size % _mi_os_page_size()) == 0);
+ if (addr == NULL || size == 0) return; // || _mi_os_is_huge_reserved(addr)
+ int err = _mi_prim_free(addr, size);
+ if (err != 0) {
+ _mi_warning_message("unable to free OS memory (error: %d (0x%x), size: 0x%zx bytes, address: %p)\n", err, err, size, addr);
}
-#endif
+ mi_stats_t* stats = &_mi_stats_main;
if (was_committed) { _mi_stat_decrease(&stats->committed, size); }
_mi_stat_decrease(&stats->reserved, size);
- return !err;
-}
-
-
-/* -----------------------------------------------------------
- Raw allocation on Windows (VirtualAlloc)
--------------------------------------------------------------- */
-
-#ifdef _WIN32
-
-#define MEM_COMMIT_RESERVE (MEM_COMMIT|MEM_RESERVE)
-
-static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment, DWORD flags) {
-#if (MI_INTPTR_SIZE >= 8)
- // on 64-bit systems, try to use the virtual address area after 2TiB for 4MiB aligned allocations
- if (addr == NULL) {
- void* hint = mi_os_get_aligned_hint(try_alignment,size);
- if (hint != NULL) {
- void* p = VirtualAlloc(hint, size, flags, PAGE_READWRITE);
- if (p != NULL) return p;
- _mi_verbose_message("warning: unable to allocate hinted aligned OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %zu, flags: 0x%x)\n", size, GetLastError(), hint, try_alignment, flags);
- // fall through on error
- }
- }
-#endif
- // on modern Windows try use VirtualAlloc2 for aligned allocation
- if (try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0 && pVirtualAlloc2 != NULL) {
- MI_MEM_ADDRESS_REQUIREMENTS reqs = { 0, 0, 0 };
- reqs.Alignment = try_alignment;
- MI_MEM_EXTENDED_PARAMETER param = { {0, 0}, {0} };
- param.Type.Type = MiMemExtendedParameterAddressRequirements;
- param.Arg.Pointer = &reqs;
- void* p = (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, &param, 1);
- if (p != NULL) return p;
- _mi_warning_message("unable to allocate aligned OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %zu, flags: 0x%x)\n", size, GetLastError(), addr, try_alignment, flags);
- // fall through on error
- }
- // last resort
- return VirtualAlloc(addr, size, flags, PAGE_READWRITE);
}
-static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DWORD flags, bool large_only, bool allow_large, bool* is_large) {
- mi_assert_internal(!(large_only && !allow_large));
- static _Atomic(size_t) large_page_try_ok; // = 0;
- void* p = NULL;
- // Try to allocate large OS pages (2MiB) if allowed or required.
- if ((large_only || use_large_os_page(size, try_alignment))
- && allow_large && (flags&MEM_COMMIT)!=0 && (flags&MEM_RESERVE)!=0) {
- size_t try_ok = mi_atomic_load_acquire(&large_page_try_ok);
- if (!large_only && try_ok > 0) {
- // if a large page allocation fails, it seems the calls to VirtualAlloc get very expensive.
- // therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times.
- mi_atomic_cas_strong_acq_rel(&large_page_try_ok, &try_ok, try_ok - 1);
- }
- else {
- // large OS pages must always reserve and commit.
- *is_large = true;
- p = mi_win_virtual_allocx(addr, size, try_alignment, flags | MEM_LARGE_PAGES);
- if (large_only) return p;
- // fall back to non-large page allocation on error (`p == NULL`).
- if (p == NULL) {
- mi_atomic_store_release(&large_page_try_ok,10UL); // on error, don't try again for the next N allocations
- }
- }
- }
- // Fall back to regular page allocation
- if (p == NULL) {
- *is_large = ((flags&MEM_LARGE_PAGES) != 0);
- p = mi_win_virtual_allocx(addr, size, try_alignment, flags);
- }
- if (p == NULL) {
- _mi_warning_message("unable to allocate OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %zu, flags: 0x%x, large only: %d, allow large: %d)\n", size, GetLastError(), addr, try_alignment, flags, large_only, allow_large);
- }
- return p;
-}
-
-/* -----------------------------------------------------------
- Raw allocation using `sbrk` or `wasm_memory_grow`
--------------------------------------------------------------- */
-
-#elif defined(MI_USE_SBRK) || defined(__wasi__)
-#if defined(MI_USE_SBRK)
- static void* mi_memory_grow( size_t size ) {
- void* p = sbrk(size);
- if (p == (void*)(-1)) return NULL;
- #if !defined(__wasi__) // on wasi this is always zero initialized already (?)
- memset(p,0,size);
- #endif
- return p;
- }
-#elif defined(__wasi__)
- static void* mi_memory_grow( size_t size ) {
- size_t base = (size > 0 ? __builtin_wasm_memory_grow(0,_mi_divide_up(size, _mi_os_page_size()))
- : __builtin_wasm_memory_size(0));
- if (base == SIZE_MAX) return NULL;
- return (void*)(base * _mi_os_page_size());
- }
-#endif
-
-#if defined(MI_USE_PTHREADS)
-static pthread_mutex_t mi_heap_grow_mutex = PTHREAD_MUTEX_INITIALIZER;
-#endif
-
-static void* mi_heap_grow(size_t size, size_t try_alignment) {
- void* p = NULL;
- if (try_alignment <= 1) {
- // `sbrk` is not thread safe in general so try to protect it (we could skip this on WASM but leave it in for now)
- #if defined(MI_USE_PTHREADS)
- pthread_mutex_lock(&mi_heap_grow_mutex);
- #endif
- p = mi_memory_grow(size);
- #if defined(MI_USE_PTHREADS)
- pthread_mutex_unlock(&mi_heap_grow_mutex);
- #endif
- }
- else {
- void* base = NULL;
- size_t alloc_size = 0;
- // to allocate aligned use a lock to try to avoid thread interaction
- // between getting the current size and actual allocation
- // (also, `sbrk` is not thread safe in general)
- #if defined(MI_USE_PTHREADS)
- pthread_mutex_lock(&mi_heap_grow_mutex);
- #endif
- {
- void* current = mi_memory_grow(0); // get current size
- if (current != NULL) {
- void* aligned_current = mi_align_up_ptr(current, try_alignment); // and align from there to minimize wasted space
- alloc_size = _mi_align_up( ((uint8_t*)aligned_current - (uint8_t*)current) + size, _mi_os_page_size());
- base = mi_memory_grow(alloc_size);
- }
- }
- #if defined(MI_USE_PTHREADS)
- pthread_mutex_unlock(&mi_heap_grow_mutex);
- #endif
- if (base != NULL) {
- p = mi_align_up_ptr(base, try_alignment);
- if ((uint8_t*)p + size > (uint8_t*)base + alloc_size) {
- // another thread used wasm_memory_grow/sbrk in-between and we do not have enough
- // space after alignment. Give up (and waste the space as we cannot shrink :-( )
- // (in `mi_os_mem_alloc_aligned` this will fall back to overallocation to align)
- p = NULL;
- }
- }
- }
- if (p == NULL) {
- _mi_warning_message("unable to allocate sbrk/wasm_memory_grow OS memory (%zu bytes, %zu alignment)\n", size, try_alignment);
- errno = ENOMEM;
- return NULL;
- }
- mi_assert_internal( try_alignment == 0 || (uintptr_t)p % try_alignment == 0 );
- return p;
-}
-
-/* -----------------------------------------------------------
- Raw allocation on Unix's (mmap)
--------------------------------------------------------------- */
-#else
-#define MI_OS_USE_MMAP
-static void* mi_unix_mmapx(void* addr, size_t size, size_t try_alignment, int protect_flags, int flags, int fd) {
- MI_UNUSED(try_alignment);
- #if defined(MAP_ALIGNED) // BSD
- if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0) {
- size_t n = mi_bsr(try_alignment);
- if (((size_t)1 << n) == try_alignment && n >= 12 && n <= 30) { // alignment is a power of 2 and 4096 <= alignment <= 1GiB
- flags |= MAP_ALIGNED(n);
- void* p = mmap(addr, size, protect_flags, flags | MAP_ALIGNED(n), fd, 0);
- if (p!=MAP_FAILED) return p;
- // fall back to regular mmap
- }
- }
- #elif defined(MAP_ALIGN) // Solaris
- if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0) {
- void* p = mmap((void*)try_alignment, size, protect_flags, flags | MAP_ALIGN, fd, 0); // addr parameter is the required alignment
- if (p!=MAP_FAILED) return p;
- // fall back to regular mmap
- }
- #endif
- #if (MI_INTPTR_SIZE >= 8) && !defined(MAP_ALIGNED)
- // on 64-bit systems, use the virtual address area after 2TiB for 4MiB aligned allocations
- if (addr == NULL) {
- void* hint = mi_os_get_aligned_hint(try_alignment, size);
- if (hint != NULL) {
- void* p = mmap(hint, size, protect_flags, flags, fd, 0);
- if (p!=MAP_FAILED) return p;
- // fall back to regular mmap
- }
- }
- #endif
- // regular mmap
- void* p = mmap(addr, size, protect_flags, flags, fd, 0);
- if (p!=MAP_FAILED) return p;
- // failed to allocate
- return NULL;
-}
-static int mi_unix_mmap_fd(void) {
-#if defined(VM_MAKE_TAG)
- // macOS: tracking anonymous page with a specific ID. (All up to 98 are taken officially but LLVM sanitizers had taken 99)
- int os_tag = (int)mi_option_get(mi_option_os_tag);
- if (os_tag < 100 || os_tag > 255) os_tag = 100;
- return VM_MAKE_TAG(os_tag);
-#else
- return -1;
-#endif
+void _mi_os_free_ex(void* addr, size_t size, bool was_committed, mi_stats_t* tld_stats) {
+ const size_t csize = _mi_os_good_alloc_size(size);
+ mi_os_mem_free(addr,csize,was_committed,tld_stats);
}
-static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int protect_flags, bool large_only, bool allow_large, bool* is_large) {
- void* p = NULL;
- #if !defined(MAP_ANONYMOUS)
- #define MAP_ANONYMOUS MAP_ANON
- #endif
- #if !defined(MAP_NORESERVE)
- #define MAP_NORESERVE 0
- #endif
- const int fd = mi_unix_mmap_fd();
- int flags = MAP_PRIVATE | MAP_ANONYMOUS;
- if (_mi_os_has_overcommit()) {
- flags |= MAP_NORESERVE;
- }
- #if defined(PROT_MAX)
- protect_flags |= PROT_MAX(PROT_READ | PROT_WRITE); // BSD
- #endif
- // huge page allocation
- if ((large_only || use_large_os_page(size, try_alignment)) && allow_large) {
- static _Atomic(size_t) large_page_try_ok; // = 0;
- size_t try_ok = mi_atomic_load_acquire(&large_page_try_ok);
- if (!large_only && try_ok > 0) {
- // If the OS is not configured for large OS pages, or the user does not have
- // enough permission, the `mmap` will always fail (but it might also fail for other reasons).
- // Therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times
- // to avoid too many failing calls to mmap.
- mi_atomic_cas_strong_acq_rel(&large_page_try_ok, &try_ok, try_ok - 1);
- }
- else {
- int lflags = flags & ~MAP_NORESERVE; // using NORESERVE on huge pages seems to fail on Linux
- int lfd = fd;
- #ifdef MAP_ALIGNED_SUPER
- lflags |= MAP_ALIGNED_SUPER;
- #endif
- #ifdef MAP_HUGETLB
- lflags |= MAP_HUGETLB;
- #endif
- #ifdef MAP_HUGE_1GB
- static bool mi_huge_pages_available = true;
- if ((size % MI_GiB) == 0 && mi_huge_pages_available) {
- lflags |= MAP_HUGE_1GB;
- }
- else
- #endif
- {
- #ifdef MAP_HUGE_2MB
- lflags |= MAP_HUGE_2MB;
- #endif
- }
- #ifdef VM_FLAGS_SUPERPAGE_SIZE_2MB
- lfd |= VM_FLAGS_SUPERPAGE_SIZE_2MB;
- #endif
- if (large_only || lflags != flags) {
- // try large OS page allocation
- *is_large = true;
- p = mi_unix_mmapx(addr, size, try_alignment, protect_flags, lflags, lfd);
- #ifdef MAP_HUGE_1GB
- if (p == NULL && (lflags & MAP_HUGE_1GB) != 0) {
- mi_huge_pages_available = false; // don't try huge 1GiB pages again
- _mi_warning_message("unable to allocate huge (1GiB) page, trying large (2MiB) pages instead (error %i)\n", errno);
- lflags = ((lflags & ~MAP_HUGE_1GB) | MAP_HUGE_2MB);
- p = mi_unix_mmapx(addr, size, try_alignment, protect_flags, lflags, lfd);
- }
- #endif
- if (large_only) return p;
- if (p == NULL) {
- mi_atomic_store_release(&large_page_try_ok, (size_t)8); // on error, don't try again for the next N allocations
- }
- }
- }
- }
- // regular allocation
- if (p == NULL) {
- *is_large = false;
- p = mi_unix_mmapx(addr, size, try_alignment, protect_flags, flags, fd);
- if (p != NULL) {
- #if defined(MADV_HUGEPAGE)
- // Many Linux systems don't allow MAP_HUGETLB but they support instead
- // transparent huge pages (THP). Generally, it is not required to call `madvise` with MADV_HUGE
- // though since properly aligned allocations will already use large pages if available
- // in that case -- in particular for our large regions (in `memory.c`).
- // However, some systems only allow THP if called with explicit `madvise`, so
- // when large OS pages are enabled for mimalloc, we call `madvise` anyways.
- if (allow_large && use_large_os_page(size, try_alignment)) {
- if (mi_madvise(p, size, MADV_HUGEPAGE) == 0) {
- *is_large = true; // possibly
- };
- }
- #elif defined(__sun)
- if (allow_large && use_large_os_page(size, try_alignment)) {
- struct memcntl_mha cmd = {0};
- cmd.mha_pagesize = large_os_page_size;
- cmd.mha_cmd = MHA_MAPSIZE_VA;
- if (memcntl((caddr_t)p, size, MC_HAT_ADVISE, (caddr_t)&cmd, 0, 0) == 0) {
- *is_large = true;
- }
- }
- #endif
- }
- }
- if (p == NULL) {
- _mi_warning_message("unable to allocate OS memory (%zu bytes, error code: %i, address: %p, large only: %d, allow large: %d)\n", size, errno, addr, large_only, allow_large);
- }
- return p;
+void _mi_os_free(void* p, size_t size, mi_stats_t* tld_stats) {
+ _mi_os_free_ex(p, size, true, tld_stats);
}
-#endif
/* -----------------------------------------------------------
@@ -711,7 +166,11 @@ static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, boo
if (!commit) allow_large = false;
if (try_alignment == 0) try_alignment = 1; // avoid 0 to ensure there will be no divide by zero when aligning
- void* p = NULL;
+ void* p = NULL;
+ int err = _mi_prim_alloc(size, try_alignment, commit, allow_large, is_large, &p);
+ if (err != 0) {
+ _mi_warning_message("unable to allocate OS memory (error: %d (0x%x), size: 0x%zx bytes, align: 0x%zx, commit: %d, allow large: %d)\n", err, err, size, try_alignment, commit, allow_large);
+ }
/*
if (commit && allow_large) {
p = _mi_os_try_alloc_from_huge_reserved(size, try_alignment);
@@ -722,18 +181,6 @@ static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, boo
}
*/
- #if defined(_WIN32)
- int flags = MEM_RESERVE;
- if (commit) { flags |= MEM_COMMIT; }
- p = mi_win_virtual_alloc(NULL, size, try_alignment, flags, false, allow_large, is_large);
- #elif defined(MI_USE_SBRK) || defined(__wasi__)
- MI_UNUSED(allow_large);
- *is_large = false;
- p = mi_heap_grow(size, try_alignment);
- #else
- int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE);
- p = mi_unix_mmap(NULL, size, try_alignment, protect_flags, false, allow_large, is_large);
- #endif
mi_stat_counter_increase(stats->mmap_calls, 1);
if (p != NULL) {
_mi_stat_increase(&stats->reserved, size);
@@ -760,39 +207,40 @@ static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit,
// if not aligned, free it, overallocate, and unmap around it
if (((uintptr_t)p % alignment != 0)) {
mi_os_mem_free(p, size, commit, stats);
- _mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (%zu bytes, address: %p, alignment: %zu, commit: %d)\n", size, p, alignment, commit);
+ _mi_warning_message("unable to allocate aligned OS memory directly, fall back to over-allocation (size: 0x%zx bytes, address: %p, alignment: 0x%zx, commit: %d)\n", size, p, alignment, commit);
if (size >= (SIZE_MAX - alignment)) return NULL; // overflow
const size_t over_size = size + alignment;
-#if _WIN32
- // over-allocate uncommitted (virtual) memory
- p = mi_os_mem_alloc(over_size, 0 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, stats);
- if (p == NULL) return NULL;
+ if (mi_os_mem_config.must_free_whole) { // win32 virtualAlloc cannot free parts of an allocate block
+ // over-allocate uncommitted (virtual) memory
+ p = mi_os_mem_alloc(over_size, 0 /*alignment*/, false /* commit? */, false /* allow_large */, is_large, stats);
+ if (p == NULL) return NULL;
- // set p to the aligned part in the full region
- // note: this is dangerous on Windows as VirtualFree needs the actual region pointer
- // but in mi_os_mem_free we handle this (hopefully exceptional) situation.
- p = mi_align_up_ptr(p, alignment);
+ // set p to the aligned part in the full region
+ // note: this is dangerous on Windows as VirtualFree needs the actual region pointer
+ // but in mi_os_mem_free we handle this (hopefully exceptional) situation.
+ p = mi_align_up_ptr(p, alignment);
- // explicitly commit only the aligned part
- if (commit) {
- _mi_os_commit(p, size, NULL, stats);
+ // explicitly commit only the aligned part
+ if (commit) {
+ _mi_os_commit(p, size, NULL, stats);
+ }
+ }
+ else { // mmap can free inside an allocation
+ // overallocate...
+ p = mi_os_mem_alloc(over_size, 1, commit, false, is_large, stats);
+ if (p == NULL) return NULL;
+ // and selectively unmap parts around the over-allocated area. (noop on sbrk)
+ void* aligned_p = mi_align_up_ptr(p, alignment);
+ size_t pre_size = (uint8_t*)aligned_p - (uint8_t*)p;
+ size_t mid_size = _mi_align_up(size, _mi_os_page_size());
+ size_t post_size = over_size - pre_size - mid_size;
+ mi_assert_internal(pre_size < over_size&& post_size < over_size&& mid_size >= size);
+ if (pre_size > 0) mi_os_mem_free(p, pre_size, commit, stats);
+ if (post_size > 0) mi_os_mem_free((uint8_t*)aligned_p + mid_size, post_size, commit, stats);
+ // we can return the aligned pointer on `mmap` (and sbrk) systems
+ p = aligned_p;
}
-#else
- // overallocate...
- p = mi_os_mem_alloc(over_size, 1, commit, false, is_large, stats);
- if (p == NULL) return NULL;
- // and selectively unmap parts around the over-allocated area. (noop on sbrk)
- void* aligned_p = mi_align_up_ptr(p, alignment);
- size_t pre_size = (uint8_t*)aligned_p - (uint8_t*)p;
- size_t mid_size = _mi_align_up(size, _mi_os_page_size());
- size_t post_size = over_size - pre_size - mid_size;
- mi_assert_internal(pre_size < over_size && post_size < over_size && mid_size >= size);
- if (pre_size > 0) mi_os_mem_free(p, pre_size, commit, stats);
- if (post_size > 0) mi_os_mem_free((uint8_t*)aligned_p + mid_size, post_size, commit, stats);
- // we can return the aligned pointer on `mmap` (and sbrk) systems
- p = aligned_p;
-#endif
}
mi_assert_internal(p == NULL || (p != NULL && ((uintptr_t)p % alignment) == 0));
@@ -801,7 +249,7 @@ static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit,
/* -----------------------------------------------------------
- OS API: alloc, free, alloc_aligned
+ OS API: alloc and alloc_aligned
----------------------------------------------------------- */
void* _mi_os_alloc(size_t size, mi_stats_t* tld_stats) {
@@ -813,21 +261,9 @@ void* _mi_os_alloc(size_t size, mi_stats_t* tld_stats) {
return mi_os_mem_alloc(size, 0, true, false, &is_large, stats);
}
-void _mi_os_free_ex(void* p, size_t size, bool was_committed, mi_stats_t* tld_stats) {
- MI_UNUSED(tld_stats);
- mi_stats_t* stats = &_mi_stats_main;
- if (size == 0 || p == NULL) return;
- size = _mi_os_good_alloc_size(size);
- mi_os_mem_free(p, size, was_committed, stats);
-}
-
-void _mi_os_free(void* p, size_t size, mi_stats_t* stats) {
- _mi_os_free_ex(p, size, true, stats);
-}
-
void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, mi_stats_t* tld_stats)
{
- MI_UNUSED(&mi_os_get_aligned_hint); // suppress unused warnings
+ MI_UNUSED(&_mi_os_get_aligned_hint); // suppress unused warnings
MI_UNUSED(tld_stats);
if (size == 0) return NULL;
size = _mi_os_good_alloc_size(size);
@@ -880,11 +316,11 @@ void _mi_os_free_aligned(void* p, size_t size, size_t alignment, size_t align_of
_mi_os_free_ex(start, size + extra, was_committed, tld_stats);
}
+
/* -----------------------------------------------------------
OS memory API: reset, commit, decommit, protect, unprotect.
----------------------------------------------------------- */
-
// OS page align within a given area, either conservative (pages inside the area only),
// or not (straddling pages outside the area is possible)
static void* mi_os_page_align_areax(bool conservative, void* addr, size_t size, size_t* newsize) {
@@ -909,18 +345,6 @@ static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t*
return mi_os_page_align_areax(true, addr, size, newsize);
}
-static void mi_mprotect_hint(int err) {
-#if defined(MI_OS_USE_MMAP) && (MI_SECURE>=2) // guard page around every mimalloc page
- if (err == ENOMEM) {
- _mi_warning_message("the previous warning may have been caused by a low memory map limit.\n"
- " On Linux this is controlled by the vm.max_map_count. For example:\n"
- " > sudo sysctl -w vm.max_map_count=262144\n");
- }
-#else
- MI_UNUSED(err);
-#endif
-}
-
// Commit/Decommit memory.
// Usually commit is aligned liberal, while decommit is aligned conservative.
// (but not for the reset version where we want commit to be conservative as well)
@@ -930,7 +354,6 @@ static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservativ
size_t csize;
void* start = mi_os_page_align_areax(conservative, addr, size, &csize);
if (csize == 0) return true; // || _mi_os_is_huge_reserved(addr))
- int err = 0;
if (commit) {
_mi_stat_increase(&stats->committed, size); // use size for precise commit vs. decommit
_mi_stat_counter_increase(&stats->commit_calls, 1);
@@ -939,56 +362,9 @@ static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservativ
_mi_stat_decrease(&stats->committed, size);
}
- #if defined(_WIN32)
- if (commit) {
- // *is_zero = true; // note: if the memory was already committed, the call succeeds but the memory is not zero'd
- void* p = VirtualAlloc(start, csize, MEM_COMMIT, PAGE_READWRITE);
- err = (p == start ? 0 : GetLastError());
- }
- else {
- BOOL ok = VirtualFree(start, csize, MEM_DECOMMIT);
- err = (ok ? 0 : GetLastError());
- }
- #elif defined(__wasi__)
- // WebAssembly guests can't control memory protection
- #elif 0 && defined(MAP_FIXED) && !defined(__APPLE__)
- // Linux: disabled for now as mmap fixed seems much more expensive than MADV_DONTNEED (and splits VMA's?)
- if (commit) {
- // commit: just change the protection
- err = mprotect(start, csize, (PROT_READ | PROT_WRITE));
- if (err != 0) { err = errno; }
- }
- else {
- // decommit: use mmap with MAP_FIXED to discard the existing memory (and reduce rss)
- const int fd = mi_unix_mmap_fd();
- void* p = mmap(start, csize, PROT_NONE, (MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE), fd, 0);
- if (p != start) { err = errno; }
- }
- #else
- // Linux, macOSX and others.
- if (commit) {
- // commit: ensure we can access the area
- err = mprotect(start, csize, (PROT_READ | PROT_WRITE));
- if (err != 0) { err = errno; }
- }
- else {
- #if defined(MADV_DONTNEED) && MI_DEBUG == 0 && MI_SECURE == 0
- // decommit: use MADV_DONTNEED as it decreases rss immediately (unlike MADV_FREE)
- // (on the other hand, MADV_FREE would be good enough.. it is just not reflected in the stats :-( )
- err = madvise(start, csize, MADV_DONTNEED);
- #else
- // decommit: just disable access (also used in debug and secure mode to trap on illegal access)
- err = mprotect(start, csize, PROT_NONE);
- if (err != 0) { err = errno; }
- #endif
- //#if defined(MADV_FREE_REUSE)
- // while ((err = mi_madvise(start, csize, MADV_FREE_REUSE)) != 0 && errno == EAGAIN) { errno = 0; }
- //#endif
- }
- #endif
+ int err = _mi_prim_commit(start, csize, commit);
if (err != 0) {
- _mi_warning_message("%s error: start: %p, csize: 0x%zx, err: %i\n", commit ? "commit" : "decommit", start, csize, err);
- mi_mprotect_hint(err);
+ _mi_warning_message("cannot %s OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", commit ? "commit" : "decommit", err, err, start, csize);
}
mi_assert_internal(err == 0);
return (err == 0);
@@ -1027,45 +403,17 @@ static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats)
else _mi_stat_decrease(&stats->reset, csize);
if (!reset) return true; // nothing to do on unreset!
- #if (MI_DEBUG>1) && !MI_TRACK_ENABLED
+ #if (MI_DEBUG>1) && !MI_TRACK_ENABLED // && !MI_TSAN
if (MI_SECURE==0) {
memset(start, 0, csize); // pretend it is eagerly reset
}
#endif
-#if defined(_WIN32)
- // Testing shows that for us (on `malloc-large`) MEM_RESET is 2x faster than DiscardVirtualMemory
- void* p = VirtualAlloc(start, csize, MEM_RESET, PAGE_READWRITE);
- mi_assert_internal(p == start);
- #if 1
- if (p == start && start != NULL) {
- VirtualUnlock(start,csize); // VirtualUnlock after MEM_RESET removes the memory from the working set
- }
- #endif
- if (p != start) return false;
-#else
-#if defined(MADV_FREE)
- static _Atomic(size_t) advice = MI_ATOMIC_VAR_INIT(MADV_FREE);
- int oadvice = (int)mi_atomic_load_relaxed(&advice);
- int err;
- while ((err = mi_madvise(start, csize, oadvice)) != 0 && errno == EAGAIN) { errno = 0; };
- if (err != 0 && errno == EINVAL && oadvice == MADV_FREE) {
- // if MADV_FREE is not supported, fall back to MADV_DONTNEED from now on
- mi_atomic_store_release(&advice, (size_t)MADV_DONTNEED);
- err = mi_madvise(start, csize, MADV_DONTNEED);
- }
-#elif defined(__wasi__)
- int err = 0;
-#else
- int err = mi_madvise(start, csize, MADV_DONTNEED);
-#endif
+ int err = _mi_prim_reset(start, csize);
if (err != 0) {
- _mi_warning_message("madvise reset error: start: %p, csize: 0x%zx, errno: %i\n", start, csize, errno);
+ _mi_warning_message("cannot reset OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", err, err, start, csize);
}
- //mi_assert(err == 0);
- if (err != 0) return false;
-#endif
- return true;
+ return (err == 0);
}
// Signal to the OS that the address range is no longer in use
@@ -1098,20 +446,9 @@ static bool mi_os_protectx(void* addr, size_t size, bool protect) {
_mi_warning_message("cannot mprotect memory allocated in huge OS pages\n");
}
*/
- int err = 0;
-#ifdef _WIN32
- DWORD oldprotect = 0;
- BOOL ok = VirtualProtect(start, csize, protect ? PAGE_NOACCESS : PAGE_READWRITE, &oldprotect);
- err = (ok ? 0 : GetLastError());
-#elif defined(__wasi__)
- err = 0;
-#else
- err = mprotect(start, csize, protect ? PROT_NONE : (PROT_READ | PROT_WRITE));
- if (err != 0) { err = errno; }
-#endif
+ int err = _mi_prim_protect(start,csize,protect);
if (err != 0) {
- _mi_warning_message("mprotect error: start: %p, csize: 0x%zx, err: %i\n", start, csize, err);
- mi_mprotect_hint(err);
+ _mi_warning_message("cannot %s OS memory (error: %d (0x%x), address: %p, size: 0x%zx bytes)\n", (protect ? "protect" : "unprotect"), err, err, start, csize);
}
return (err == 0);
}
@@ -1126,115 +463,12 @@ bool _mi_os_unprotect(void* addr, size_t size) {
-bool _mi_os_shrink(void* p, size_t oldsize, size_t newsize, mi_stats_t* stats) {
- // page align conservatively within the range
- mi_assert_internal(oldsize > newsize && p != NULL);
- if (oldsize < newsize || p == NULL) return false;
- if (oldsize == newsize) return true;
-
- // oldsize and newsize should be page aligned or we cannot shrink precisely
- void* addr = (uint8_t*)p + newsize;
- size_t size = 0;
- void* start = mi_os_page_align_area_conservative(addr, oldsize - newsize, &size);
- if (size == 0 || start != addr) return false;
-
-#ifdef _WIN32
- // we cannot shrink on windows, but we can decommit
- return _mi_os_decommit(start, size, stats);
-#else
- return mi_os_mem_free(start, size, true, stats);
-#endif
-}
-
-
/* ----------------------------------------------------------------------------
Support for allocating huge OS pages (1Gib) that are reserved up-front
and possibly associated with a specific NUMA node. (use `numa_node>=0`)
-----------------------------------------------------------------------------*/
#define MI_HUGE_OS_PAGE_SIZE (MI_GiB)
-#if defined(_WIN32) && (MI_INTPTR_SIZE >= 8)
-static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node)
-{
- mi_assert_internal(size%MI_GiB == 0);
- mi_assert_internal(addr != NULL);
- const DWORD flags = MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE;
-
- mi_win_enable_large_os_pages();
-
- MI_MEM_EXTENDED_PARAMETER params[3] = { {{0,0},{0}},{{0,0},{0}},{{0,0},{0}} };
- // on modern Windows try use NtAllocateVirtualMemoryEx for 1GiB huge pages
- static bool mi_huge_pages_available = true;
- if (pNtAllocateVirtualMemoryEx != NULL && mi_huge_pages_available) {
- params[0].Type.Type = MiMemExtendedParameterAttributeFlags;
- params[0].Arg.ULong64 = MI_MEM_EXTENDED_PARAMETER_NONPAGED_HUGE;
- ULONG param_count = 1;
- if (numa_node >= 0) {
- param_count++;
- params[1].Type.Type = MiMemExtendedParameterNumaNode;
- params[1].Arg.ULong = (unsigned)numa_node;
- }
- SIZE_T psize = size;
- void* base = addr;
- NTSTATUS err = (*pNtAllocateVirtualMemoryEx)(GetCurrentProcess(), &base, &psize, flags, PAGE_READWRITE, params, param_count);
- if (err == 0 && base != NULL) {
- return base;
- }
- else {
- // fall back to regular large pages
- mi_huge_pages_available = false; // don't try further huge pages
- _mi_warning_message("unable to allocate using huge (1GiB) pages, trying large (2MiB) pages instead (status 0x%lx)\n", err);
- }
- }
- // on modern Windows try use VirtualAlloc2 for numa aware large OS page allocation
- if (pVirtualAlloc2 != NULL && numa_node >= 0) {
- params[0].Type.Type = MiMemExtendedParameterNumaNode;
- params[0].Arg.ULong = (unsigned)numa_node;
- return (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, params, 1);
- }
-
- // otherwise use regular virtual alloc on older windows
- return VirtualAlloc(addr, size, flags, PAGE_READWRITE);
-}
-
-#elif defined(MI_OS_USE_MMAP) && (MI_INTPTR_SIZE >= 8) && !defined(__HAIKU__)
-#include <sys/syscall.h>
-#ifndef MPOL_PREFERRED
-#define MPOL_PREFERRED 1
-#endif
-#if defined(SYS_mbind)
-static long mi_os_mbind(void* start, unsigned long len, unsigned long mode, const unsigned long* nmask, unsigned long maxnode, unsigned flags) {
- return syscall(SYS_mbind, start, len, mode, nmask, maxnode, flags);
-}
-#else
-static long mi_os_mbind(void* start, unsigned long len, unsigned long mode, const unsigned long* nmask, unsigned long maxnode, unsigned flags) {
- MI_UNUSED(start); MI_UNUSED(len); MI_UNUSED(mode); MI_UNUSED(nmask); MI_UNUSED(maxnode); MI_UNUSED(flags);
- return 0;
-}
-#endif
-static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node) {
- mi_assert_internal(size%MI_GiB == 0);
- bool is_large = true;
- void* p = mi_unix_mmap(addr, size, MI_SEGMENT_SIZE, PROT_READ | PROT_WRITE, true, true, &is_large);
- if (p == NULL) return NULL;
- if (numa_node >= 0 && numa_node < 8*MI_INTPTR_SIZE) { // at most 64 nodes
- unsigned long numa_mask = (1UL << numa_node);
- // TODO: does `mbind` work correctly for huge OS pages? should we
- // use `set_mempolicy` before calling mmap instead?
- // see: <https://lkml.org/lkml/2017/2/9/875>
- long err = mi_os_mbind(p, size, MPOL_PREFERRED, &numa_mask, 8*MI_INTPTR_SIZE, 0);
- if (err != 0) {
- _mi_warning_message("failed to bind huge (1GiB) pages to numa node %d: %s\n", numa_node, strerror(errno));
- }
- }
- return p;
-}
-#else
-static void* mi_os_alloc_huge_os_pagesx(void* addr, size_t size, int numa_node) {
- MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(numa_node);
- return NULL;
-}
-#endif
#if (MI_INTPTR_SIZE >= 8)
// To ensure proper alignment, use our own area for huge OS pages
@@ -1253,10 +487,10 @@ static uint8_t* mi_os_claim_huge_pages(size_t pages, size_t* total_size) {
if (start == 0) {
// Initialize the start address after the 32TiB area
start = ((uintptr_t)32 << 40); // 32TiB virtual start address
-#if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of huge pages unless in debug mode
- uintptr_t r = _mi_heap_random_next(mi_get_default_heap());
+ #if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of huge pages unless in debug mode
+ uintptr_t r = _mi_heap_random_next(mi_prim_get_default_heap());
start = start + ((uintptr_t)MI_HUGE_OS_PAGE_SIZE * ((r>>17) & 0x0FFF)); // (randomly 12bits)*1GiB == between 0 to 4TiB
-#endif
+ #endif
}
end = start + size;
mi_assert_internal(end % MI_SEGMENT_SIZE == 0);
@@ -1285,23 +519,29 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse
// We allocate one page at the time to be able to abort if it takes too long
// or to at least allocate as many as available on the system.
mi_msecs_t start_t = _mi_clock_start();
- size_t page;
- for (page = 0; page < pages; page++) {
+ size_t page = 0;
+ while (page < pages) {
// allocate a page
void* addr = start + (page * MI_HUGE_OS_PAGE_SIZE);
- void* p = mi_os_alloc_huge_os_pagesx(addr, MI_HUGE_OS_PAGE_SIZE, numa_node);
+ void* p = NULL;
+ int err = _mi_prim_alloc_huge_os_pages(addr, MI_HUGE_OS_PAGE_SIZE, numa_node, &p);
+ if (err != 0) {
+ _mi_warning_message("unable to allocate huge OS page (error: %d (0x%x), address: %p, size: %zx bytes)\n", err, err, addr, MI_HUGE_OS_PAGE_SIZE);
+ break;
+ }
// Did we succeed at a contiguous address?
if (p != addr) {
// no success, issue a warning and break
if (p != NULL) {
- _mi_warning_message("could not allocate contiguous huge page %zu at %p\n", page, addr);
+ _mi_warning_message("could not allocate contiguous huge OS page %zu at %p\n", page, addr);
_mi_os_free(p, MI_HUGE_OS_PAGE_SIZE, &_mi_stats_main);
}
break;
}
// success, record it
+ page++; // increase before timeout check (see issue #711)
_mi_stat_increase(&_mi_stats_main.committed, MI_HUGE_OS_PAGE_SIZE);
_mi_stat_increase(&_mi_stats_main.reserved, MI_HUGE_OS_PAGE_SIZE);
@@ -1315,7 +555,7 @@ void* _mi_os_alloc_huge_os_pages(size_t pages, int numa_node, mi_msecs_t max_mse
}
}
if (elapsed > max_msecs) {
- _mi_warning_message("huge page allocation timed out\n");
+ _mi_warning_message("huge OS page allocation timed out (after allocating %zu page(s))\n", page);
break;
}
}
@@ -1341,113 +581,6 @@ void _mi_os_free_huge_pages(void* p, size_t size, mi_stats_t* stats) {
/* ----------------------------------------------------------------------------
Support NUMA aware allocation
-----------------------------------------------------------------------------*/
-#ifdef _WIN32
-static size_t mi_os_numa_nodex(void) {
- USHORT numa_node = 0;
- if (pGetCurrentProcessorNumberEx != NULL && pGetNumaProcessorNodeEx != NULL) {
- // Extended API is supported
- MI_PROCESSOR_NUMBER pnum;
- (*pGetCurrentProcessorNumberEx)(&pnum);
- USHORT nnode = 0;
- BOOL ok = (*pGetNumaProcessorNodeEx)(&pnum, &nnode);
- if (ok) { numa_node = nnode; }
- }
- else if (pGetNumaProcessorNode != NULL) {
- // Vista or earlier, use older API that is limited to 64 processors. Issue #277
- DWORD pnum = GetCurrentProcessorNumber();
- UCHAR nnode = 0;
- BOOL ok = pGetNumaProcessorNode((UCHAR)pnum, &nnode);
- if (ok) { numa_node = nnode; }
- }
- return numa_node;
-}
-
-static size_t mi_os_numa_node_countx(void) {
- ULONG numa_max = 0;
- GetNumaHighestNodeNumber(&numa_max);
- // find the highest node number that has actual processors assigned to it. Issue #282
- while(numa_max > 0) {
- if (pGetNumaNodeProcessorMaskEx != NULL) {
- // Extended API is supported
- GROUP_AFFINITY affinity;
- if ((*pGetNumaNodeProcessorMaskEx)((USHORT)numa_max, &affinity)) {
- if (affinity.Mask != 0) break; // found the maximum non-empty node
- }
- }
- else {
- // Vista or earlier, use older API that is limited to 64 processors.
- ULONGLONG mask;
- if (GetNumaNodeProcessorMask((UCHAR)numa_max, &mask)) {
- if (mask != 0) break; // found the maximum non-empty node
- };
- }
- // max node was invalid or had no processor assigned, try again
- numa_max--;
- }
- return ((size_t)numa_max + 1);
-}
-#elif defined(__linux__)
-#include <sys/syscall.h> // getcpu
-#include <stdio.h> // access
-
-static size_t mi_os_numa_nodex(void) {
-#ifdef SYS_getcpu
- unsigned long node = 0;
- unsigned long ncpu = 0;
- long err = syscall(SYS_getcpu, &ncpu, &node, NULL);
- if (err != 0) return 0;
- return node;
-#else
- return 0;
-#endif
-}
-static size_t mi_os_numa_node_countx(void) {
- char buf[128];
- unsigned node = 0;
- for(node = 0; node < 256; node++) {
- // enumerate node entries -- todo: it there a more efficient way to do this? (but ensure there is no allocation)
- snprintf(buf, 127, "/sys/devices/system/node/node%u", node + 1);
- if (access(buf,R_OK) != 0) break;
- }
- return (node+1);
-}
-#elif defined(__FreeBSD__) && __FreeBSD_version >= 1200000
-static size_t mi_os_numa_nodex(void) {
- domainset_t dom;
- size_t node;
- int policy;
- if (cpuset_getdomain(CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, sizeof(dom), &dom, &policy) == -1) return 0ul;
- for (node = 0; node < MAXMEMDOM; node++) {
- if (DOMAINSET_ISSET(node, &dom)) return node;
- }
- return 0ul;
-}
-static size_t mi_os_numa_node_countx(void) {
- size_t ndomains = 0;
- size_t len = sizeof(ndomains);
- if (sysctlbyname("vm.ndomains", &ndomains, &len, NULL, 0) == -1) return 0ul;
- return ndomains;
-}
-#elif defined(__DragonFly__)
-static size_t mi_os_numa_nodex(void) {
- // TODO: DragonFly does not seem to provide any userland means to get this information.
- return 0ul;
-}
-static size_t mi_os_numa_node_countx(void) {
- size_t ncpus = 0, nvirtcoresperphys = 0;
- size_t len = sizeof(size_t);
- if (sysctlbyname("hw.ncpu", &ncpus, &len, NULL, 0) == -1) return 0ul;
- if (sysctlbyname("hw.cpu_topology_ht_ids", &nvirtcoresperphys, &len, NULL, 0) == -1) return 0ul;
- return nvirtcoresperphys * ncpus;
-}
-#else
-static size_t mi_os_numa_nodex(void) {
- return 0;
-}
-static size_t mi_os_numa_node_countx(void) {
- return 1;
-}
-#endif
_Atomic(size_t) _mi_numa_node_count; // = 0 // cache the node count
@@ -1459,7 +592,7 @@ size_t _mi_os_numa_node_count_get(void) {
count = (size_t)ncount;
}
else {
- count = mi_os_numa_node_countx(); // or detect dynamically
+ count = _mi_prim_numa_node_count(); // or detect dynamically
if (count == 0) count = 1;
}
mi_atomic_store_release(&_mi_numa_node_count, count); // save it
@@ -1473,7 +606,7 @@ int _mi_os_numa_node_get(mi_os_tld_t* tld) {
size_t numa_count = _mi_os_numa_node_count();
if (numa_count<=1) return 0; // optimize on single numa node systems: always node 0
// never more than the node count and >= 0
- size_t numa_node = mi_os_numa_nodex();
+ size_t numa_node = _mi_prim_numa_node();
if (numa_node >= numa_count) { numa_node = numa_node % numa_count; }
return (int)numa_node;
}
diff --git a/source/luametatex/source/libraries/mimalloc/src/page.c b/source/luametatex/source/libraries/mimalloc/src/page.c
index 4250ff358..d0da87a1f 100644
--- a/source/luametatex/source/libraries/mimalloc/src/page.c
+++ b/source/luametatex/source/libraries/mimalloc/src/page.c
@@ -12,8 +12,8 @@ terms of the MIT license. A copy of the license can be found in the file
----------------------------------------------------------- */
#include "mimalloc.h"
-#include "mimalloc-internal.h"
-#include "mimalloc-atomic.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
/* -----------------------------------------------------------
Definition of page queues for each block size
@@ -92,10 +92,12 @@ static bool mi_page_is_valid_init(mi_page_t* page) {
}
#endif
+ #if !MI_TRACK_ENABLED && !MI_TSAN
mi_block_t* tfree = mi_page_thread_free(page);
mi_assert_internal(mi_page_list_is_valid(page, tfree));
//size_t tfree_count = mi_page_list_count(page, tfree);
//mi_assert_internal(tfree_count <= page->thread_freed + 1);
+ #endif
size_t free_count = mi_page_list_count(page, page->free) + mi_page_list_count(page, page->local_free);
mi_assert_internal(page->used + free_count == page->capacity);
@@ -103,6 +105,8 @@ static bool mi_page_is_valid_init(mi_page_t* page) {
return true;
}
+extern bool _mi_process_is_initialized; // has mi_process_init been called?
+
bool _mi_page_is_valid(mi_page_t* page) {
mi_assert_internal(mi_page_is_valid_init(page));
#if MI_SECURE
@@ -663,7 +667,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
mi_assert_internal(page_size / block_size < (1L<<16));
page->reserved = (uint16_t)(page_size / block_size);
mi_assert_internal(page->reserved > 0);
- #ifdef MI_ENCODE_FREELIST
+ #if (MI_PADDING || MI_ENCODE_FREELIST)
page->keys[0] = _mi_heap_random_next(heap);
page->keys[1] = _mi_heap_random_next(heap);
#endif
@@ -683,7 +687,7 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
mi_assert_internal(page->prev == NULL);
mi_assert_internal(page->retire_expire == 0);
mi_assert_internal(!mi_page_has_aligned(page));
- #if (MI_ENCODE_FREELIST)
+ #if (MI_PADDING || MI_ENCODE_FREELIST)
mi_assert_internal(page->keys[0] != 0);
mi_assert_internal(page->keys[1] != 0);
#endif
@@ -703,12 +707,16 @@ static void mi_page_init(mi_heap_t* heap, mi_page_t* page, size_t block_size, mi
static mi_page_t* mi_page_queue_find_free_ex(mi_heap_t* heap, mi_page_queue_t* pq, bool first_try)
{
// search through the pages in "next fit" order
+ #if MI_STAT
size_t count = 0;
+ #endif
mi_page_t* page = pq->first;
while (page != NULL)
{
mi_page_t* next = page->next; // remember next
+ #if MI_STAT
count++;
+ #endif
// 0. collect freed blocks by us and other threads
_mi_page_free_collect(page, false);
@@ -869,7 +877,9 @@ static mi_page_t* mi_find_page(mi_heap_t* heap, size_t size, size_t huge_alignme
}
else {
// otherwise find a page with free blocks in our size segregated queues
- mi_assert_internal(size >= MI_PADDING_SIZE);
+ #if MI_PADDING
+ mi_assert_internal(size >= MI_PADDING_SIZE);
+ #endif
return mi_find_free_page(heap, size);
}
}
@@ -884,8 +894,7 @@ void* _mi_malloc_generic(mi_heap_t* heap, size_t size, bool zero, size_t huge_al
// initialize if necessary
if mi_unlikely(!mi_heap_is_initialized(heap)) {
- mi_thread_init(); // calls `_mi_heap_init` in turn
- heap = mi_get_default_heap();
+ heap = mi_heap_get_default(); // calls mi_thread_init
if mi_unlikely(!mi_heap_is_initialized(heap)) { return NULL; }
}
mi_assert_internal(mi_heap_is_initialized(heap));
diff --git a/source/luametatex/source/libraries/mimalloc/src/alloc-override-osx.c b/source/luametatex/source/libraries/mimalloc/src/prim/osx/alloc-override-zone.c
index a2819a8bf..80bcfa939 100644
--- a/source/luametatex/source/libraries/mimalloc/src/alloc-override-osx.c
+++ b/source/luametatex/source/libraries/mimalloc/src/prim/osx/alloc-override-zone.c
@@ -6,7 +6,7 @@ terms of the MIT license. A copy of the license can be found in the file
-----------------------------------------------------------------------------*/
#include "mimalloc.h"
-#include "mimalloc-internal.h"
+#include "mimalloc/internal.h"
#if defined(MI_MALLOC_OVERRIDE)
@@ -420,7 +420,7 @@ __attribute__((constructor(0)))
#else
__attribute__((constructor)) // seems not supported by g++-11 on the M1
#endif
-static void _mi_macos_override_malloc() {
+static void _mi_macos_override_malloc(void) {
malloc_zone_t* purgeable_zone = NULL;
#if defined(MAC_OS_X_VERSION_10_6) && (MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6)
diff --git a/source/luametatex/source/libraries/mimalloc/src/prim/osx/prim.c b/source/luametatex/source/libraries/mimalloc/src/prim/osx/prim.c
new file mode 100644
index 000000000..8a2f4e8aa
--- /dev/null
+++ b/source/luametatex/source/libraries/mimalloc/src/prim/osx/prim.c
@@ -0,0 +1,9 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+// We use the unix/prim.c with the mmap API on macOSX
+#include "../unix/prim.c"
diff --git a/source/luametatex/source/libraries/mimalloc/src/prim/prim.c b/source/luametatex/source/libraries/mimalloc/src/prim/prim.c
new file mode 100644
index 000000000..9a597d8eb
--- /dev/null
+++ b/source/luametatex/source/libraries/mimalloc/src/prim/prim.c
@@ -0,0 +1,24 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+// Select the implementation of the primitives
+// depending on the OS.
+
+#if defined(_WIN32)
+#include "windows/prim.c" // VirtualAlloc (Windows)
+
+#elif defined(__APPLE__)
+#include "osx/prim.c" // macOSX (actually defers to mmap in unix/prim.c)
+
+#elif defined(__wasi__)
+#define MI_USE_SBRK
+#include "wasi/prim.c" // memory-grow or sbrk (Wasm)
+
+#else
+#include "unix/prim.c" // mmap() (Linux, macOSX, BSD, Illumnos, Haiku, DragonFly, etc.)
+
+#endif
diff --git a/source/luametatex/source/libraries/mimalloc/src/prim/readme.md b/source/luametatex/source/libraries/mimalloc/src/prim/readme.md
new file mode 100644
index 000000000..380dd3a71
--- /dev/null
+++ b/source/luametatex/source/libraries/mimalloc/src/prim/readme.md
@@ -0,0 +1,9 @@
+## Portability Primitives
+
+This is the portability layer where all primitives needed from the OS are defined.
+
+- `include/mimalloc/prim.h`: primitive portability API definition.
+- `prim.c`: Selects one of `unix/prim.c`, `wasi/prim.c`, or `windows/prim.c` depending on the host platform
+ (and on macOS, `osx/prim.c` defers to `unix/prim.c`).
+
+Note: still work in progress, there may still be places in the sources that still depend on OS ifdef's. \ No newline at end of file
diff --git a/source/luametatex/source/libraries/mimalloc/src/prim/unix/prim.c b/source/luametatex/source/libraries/mimalloc/src/prim/unix/prim.c
new file mode 100644
index 000000000..8d9c7a723
--- /dev/null
+++ b/source/luametatex/source/libraries/mimalloc/src/prim/unix/prim.c
@@ -0,0 +1,838 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+// This file is included in `src/prim/prim.c`
+
+#ifndef _DEFAULT_SOURCE
+#define _DEFAULT_SOURCE // ensure mmap flags and syscall are defined
+#endif
+
+#if defined(__sun)
+// illumos provides new mman.h api when any of these are defined
+// otherwise the old api based on caddr_t which predates the void pointers one.
+// stock solaris provides only the former, chose to atomically to discard those
+// flags only here rather than project wide tough.
+#undef _XOPEN_SOURCE
+#undef _POSIX_C_SOURCE
+#endif
+
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
+#include "mimalloc/prim.h"
+
+#include <sys/mman.h> // mmap
+#include <unistd.h> // sysconf
+
+#if defined(__linux__)
+ #include <features.h>
+ #include <fcntl.h>
+ #if defined(__GLIBC__)
+ #include <linux/mman.h> // linux mmap flags
+ #else
+ #include <sys/mman.h>
+ #endif
+#elif defined(__APPLE__)
+ #include <TargetConditionals.h>
+ #if !TARGET_IOS_IPHONE && !TARGET_IOS_SIMULATOR
+ #include <mach/vm_statistics.h>
+ #endif
+#elif defined(__FreeBSD__) || defined(__DragonFly__)
+ #include <sys/param.h>
+ #if __FreeBSD_version >= 1200000
+ #include <sys/cpuset.h>
+ #include <sys/domainset.h>
+ #endif
+ #include <sys/sysctl.h>
+#endif
+
+#if !defined(__HAIKU__) && !defined(__APPLE__) && !defined(__CYGWIN__)
+ #define MI_HAS_SYSCALL_H
+ #include <sys/syscall.h>
+#endif
+
+//------------------------------------------------------------------------------------
+// Use syscalls for some primitives to allow for libraries that override open/read/close etc.
+// and do allocation themselves; using syscalls prevents recursion when mimalloc is
+// still initializing (issue #713)
+//------------------------------------------------------------------------------------
+
+#if defined(MI_HAS_SYSCALL_H) && defined(SYS_open) && defined(SYS_close) && defined(SYS_read) && defined(SYS_access)
+
+static int mi_prim_open(const char* fpath, int open_flags) {
+ return syscall(SYS_open,fpath,open_flags,0);
+}
+static ssize_t mi_prim_read(int fd, void* buf, size_t bufsize) {
+ return syscall(SYS_read,fd,buf,bufsize);
+}
+static int mi_prim_close(int fd) {
+ return syscall(SYS_close,fd);
+}
+static int mi_prim_access(const char *fpath, int mode) {
+ return syscall(SYS_access,fpath,mode);
+}
+
+#elif !defined(__APPLE__) // avoid unused warnings
+
+static int mi_prim_open(const char* fpath, int open_flags) {
+ return open(fpath,open_flags,0);
+}
+static mi_ssize_t mi_prim_read(int fd, void* buf, size_t bufsize) {
+ return read(fd,buf,bufsize);
+}
+static int mi_prim_close(int fd) {
+ return close(fd);
+}
+static int mi_prim_access(const char *fpath, int mode) {
+ return access(fpath,mode);
+}
+
+#endif
+
+
+
+//---------------------------------------------
+// init
+//---------------------------------------------
+
+static bool unix_detect_overcommit(void) {
+ bool os_overcommit = true;
+#if defined(__linux__)
+ int fd = mi_prim_open("/proc/sys/vm/overcommit_memory", O_RDONLY);
+ if (fd >= 0) {
+ char buf[32];
+ ssize_t nread = mi_prim_read(fd, &buf, sizeof(buf));
+ mi_prim_close(fd);
+ // <https://www.kernel.org/doc/Documentation/vm/overcommit-accounting>
+ // 0: heuristic overcommit, 1: always overcommit, 2: never overcommit (ignore NORESERVE)
+ if (nread >= 1) {
+ os_overcommit = (buf[0] == '0' || buf[0] == '1');
+ }
+ }
+#elif defined(__FreeBSD__)
+ int val = 0;
+ size_t olen = sizeof(val);
+ if (sysctlbyname("vm.overcommit", &val, &olen, NULL, 0) == 0) {
+ os_overcommit = (val != 0);
+ }
+#else
+ // default: overcommit is true
+#endif
+ return os_overcommit;
+}
+
+void _mi_prim_mem_init( mi_os_mem_config_t* config ) {
+ long psize = sysconf(_SC_PAGESIZE);
+ if (psize > 0) {
+ config->page_size = (size_t)psize;
+ config->alloc_granularity = (size_t)psize;
+ }
+ config->large_page_size = 2*MI_MiB; // TODO: can we query the OS for this?
+ config->has_overcommit = unix_detect_overcommit();
+ config->must_free_whole = false; // mmap can free in parts
+}
+
+
+//---------------------------------------------
+// free
+//---------------------------------------------
+
+int _mi_prim_free(void* addr, size_t size ) {
+ bool err = (munmap(addr, size) == -1);
+ return (err ? errno : 0);
+}
+
+
+//---------------------------------------------
+// mmap
+//---------------------------------------------
+
+static int unix_madvise(void* addr, size_t size, int advice) {
+ #if defined(__sun)
+ return madvise((caddr_t)addr, size, advice); // Solaris needs cast (issue #520)
+ #else
+ return madvise(addr, size, advice);
+ #endif
+}
+
+static void* unix_mmap_prim(void* addr, size_t size, size_t try_alignment, int protect_flags, int flags, int fd) {
+ MI_UNUSED(try_alignment);
+ void* p = NULL;
+ #if defined(MAP_ALIGNED) // BSD
+ if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0) {
+ size_t n = mi_bsr(try_alignment);
+ if (((size_t)1 << n) == try_alignment && n >= 12 && n <= 30) { // alignment is a power of 2 and 4096 <= alignment <= 1GiB
+ p = mmap(addr, size, protect_flags, flags | MAP_ALIGNED(n), fd, 0);
+ if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
+ int err = errno;
+ _mi_warning_message("unable to directly request aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, hint);
+ }
+ if (p!=MAP_FAILED) return p;
+ // fall back to regular mmap
+ }
+ }
+ #elif defined(MAP_ALIGN) // Solaris
+ if (addr == NULL && try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0) {
+ p = mmap((void*)try_alignment, size, protect_flags, flags | MAP_ALIGN, fd, 0); // addr parameter is the required alignment
+ if (p!=MAP_FAILED) return p;
+ // fall back to regular mmap
+ }
+ #endif
+ #if (MI_INTPTR_SIZE >= 8) && !defined(MAP_ALIGNED)
+ // on 64-bit systems, use the virtual address area after 2TiB for 4MiB aligned allocations
+ if (addr == NULL) {
+ void* hint = _mi_os_get_aligned_hint(try_alignment, size);
+ if (hint != NULL) {
+ p = mmap(hint, size, protect_flags, flags, fd, 0);
+ if (p==MAP_FAILED || !_mi_is_aligned(p,try_alignment)) {
+ int err = errno;
+ _mi_warning_message("unable to directly request hinted aligned OS memory (error: %d (0x%x), size: 0x%zx bytes, alignment: 0x%zx, hint address: %p)\n", err, err, size, try_alignment, hint);
+ }
+ if (p!=MAP_FAILED) return p;
+ // fall back to regular mmap
+ }
+ }
+ #endif
+ // regular mmap
+ p = mmap(addr, size, protect_flags, flags, fd, 0);
+ if (p!=MAP_FAILED) return p;
+ // failed to allocate
+ return NULL;
+}
+
+static void* unix_mmap(void* addr, size_t size, size_t try_alignment, int protect_flags, bool large_only, bool allow_large, bool* is_large) {
+ void* p = NULL;
+ #if !defined(MAP_ANONYMOUS)
+ #define MAP_ANONYMOUS MAP_ANON
+ #endif
+ #if !defined(MAP_NORESERVE)
+ #define MAP_NORESERVE 0
+ #endif
+ int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+ int fd = -1;
+ if (_mi_os_has_overcommit()) {
+ flags |= MAP_NORESERVE;
+ }
+ #if defined(PROT_MAX)
+ protect_flags |= PROT_MAX(PROT_READ | PROT_WRITE); // BSD
+ #endif
+ #if defined(VM_MAKE_TAG)
+ // macOS: tracking anonymous page with a specific ID. (All up to 98 are taken officially but LLVM sanitizers had taken 99)
+ int os_tag = (int)mi_option_get(mi_option_os_tag);
+ if (os_tag < 100 || os_tag > 255) { os_tag = 100; }
+ fd = VM_MAKE_TAG(os_tag);
+ #endif
+ // huge page allocation
+ if ((large_only || _mi_os_use_large_page(size, try_alignment)) && allow_large) {
+ static _Atomic(size_t) large_page_try_ok; // = 0;
+ size_t try_ok = mi_atomic_load_acquire(&large_page_try_ok);
+ if (!large_only && try_ok > 0) {
+ // If the OS is not configured for large OS pages, or the user does not have
+ // enough permission, the `mmap` will always fail (but it might also fail for other reasons).
+ // Therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times
+ // to avoid too many failing calls to mmap.
+ mi_atomic_cas_strong_acq_rel(&large_page_try_ok, &try_ok, try_ok - 1);
+ }
+ else {
+ int lflags = flags & ~MAP_NORESERVE; // using NORESERVE on huge pages seems to fail on Linux
+ int lfd = fd;
+ #ifdef MAP_ALIGNED_SUPER
+ lflags |= MAP_ALIGNED_SUPER;
+ #endif
+ #ifdef MAP_HUGETLB
+ lflags |= MAP_HUGETLB;
+ #endif
+ #ifdef MAP_HUGE_1GB
+ static bool mi_huge_pages_available = true;
+ if ((size % MI_GiB) == 0 && mi_huge_pages_available) {
+ lflags |= MAP_HUGE_1GB;
+ }
+ else
+ #endif
+ {
+ #ifdef MAP_HUGE_2MB
+ lflags |= MAP_HUGE_2MB;
+ #endif
+ }
+ #ifdef VM_FLAGS_SUPERPAGE_SIZE_2MB
+ lfd |= VM_FLAGS_SUPERPAGE_SIZE_2MB;
+ #endif
+ if (large_only || lflags != flags) {
+ // try large OS page allocation
+ *is_large = true;
+ p = unix_mmap_prim(addr, size, try_alignment, protect_flags, lflags, lfd);
+ #ifdef MAP_HUGE_1GB
+ if (p == NULL && (lflags & MAP_HUGE_1GB) != 0) {
+ mi_huge_pages_available = false; // don't try huge 1GiB pages again
+ _mi_warning_message("unable to allocate huge (1GiB) page, trying large (2MiB) pages instead (errno: %i)\n", errno);
+ lflags = ((lflags & ~MAP_HUGE_1GB) | MAP_HUGE_2MB);
+ p = unix_mmap_prim(addr, size, try_alignment, protect_flags, lflags, lfd);
+ }
+ #endif
+ if (large_only) return p;
+ if (p == NULL) {
+ mi_atomic_store_release(&large_page_try_ok, (size_t)8); // on error, don't try again for the next N allocations
+ }
+ }
+ }
+ }
+ // regular allocation
+ if (p == NULL) {
+ *is_large = false;
+ p = unix_mmap_prim(addr, size, try_alignment, protect_flags, flags, fd);
+ if (p != NULL) {
+ #if defined(MADV_HUGEPAGE)
+ // Many Linux systems don't allow MAP_HUGETLB but they support instead
+ // transparent huge pages (THP). Generally, it is not required to call `madvise` with MADV_HUGE
+ // though since properly aligned allocations will already use large pages if available
+ // in that case -- in particular for our large regions (in `memory.c`).
+ // However, some systems only allow THP if called with explicit `madvise`, so
+ // when large OS pages are enabled for mimalloc, we call `madvise` anyways.
+ if (allow_large && _mi_os_use_large_page(size, try_alignment)) {
+ if (unix_madvise(p, size, MADV_HUGEPAGE) == 0) {
+ *is_large = true; // possibly
+ };
+ }
+ #elif defined(__sun)
+ if (allow_large && _mi_os_use_large_page(size, try_alignment)) {
+ struct memcntl_mha cmd = {0};
+ cmd.mha_pagesize = large_os_page_size;
+ cmd.mha_cmd = MHA_MAPSIZE_VA;
+ if (memcntl((caddr_t)p, size, MC_HAT_ADVISE, (caddr_t)&cmd, 0, 0) == 0) {
+ *is_large = true;
+ }
+ }
+ #endif
+ }
+ }
+ return p;
+}
+
+// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned.
+int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, void** addr) {
+ mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
+ mi_assert_internal(commit || !allow_large);
+ mi_assert_internal(try_alignment > 0);
+
+ int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE);
+ *addr = unix_mmap(NULL, size, try_alignment, protect_flags, false, allow_large, is_large);
+ return (*addr != NULL ? 0 : errno);
+}
+
+
+//---------------------------------------------
+// Commit/Reset
+//---------------------------------------------
+
+static void unix_mprotect_hint(int err) {
+ #if defined(__linux__) && (MI_SECURE>=2) // guard page around every mimalloc page
+ if (err == ENOMEM) {
+ _mi_warning_message("The next warning may be caused by a low memory map limit.\n"
+ " On Linux this is controlled by the vm.max_map_count -- maybe increase it?\n"
+ " For example: sudo sysctl -w vm.max_map_count=262144\n");
+ }
+ #else
+ MI_UNUSED(err);
+ #endif
+}
+
+
+int _mi_prim_commit(void* start, size_t size, bool commit) {
+ /*
+ #if 0 && defined(MAP_FIXED) && !defined(__APPLE__)
+ // Linux: disabled for now as mmap fixed seems much more expensive than MADV_DONTNEED (and splits VMA's?)
+ if (commit) {
+ // commit: just change the protection
+ err = mprotect(start, csize, (PROT_READ | PROT_WRITE));
+ if (err != 0) { err = errno; }
+ }
+ else {
+ // decommit: use mmap with MAP_FIXED to discard the existing memory (and reduce rss)
+ const int fd = mi_unix_mmap_fd();
+ void* p = mmap(start, csize, PROT_NONE, (MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE), fd, 0);
+ if (p != start) { err = errno; }
+ }
+ #else
+ */
+ int err = 0;
+ if (commit) {
+ // commit: ensure we can access the area
+ err = mprotect(start, size, (PROT_READ | PROT_WRITE));
+ if (err != 0) { err = errno; }
+ }
+ else {
+ #if defined(MADV_DONTNEED) && MI_DEBUG == 0 && MI_SECURE == 0
+ // decommit: use MADV_DONTNEED as it decreases rss immediately (unlike MADV_FREE)
+ // (on the other hand, MADV_FREE would be good enough.. it is just not reflected in the stats :-( )
+ err = unix_madvise(start, size, MADV_DONTNEED);
+ #else
+ // decommit: just disable access (also used in debug and secure mode to trap on illegal access)
+ err = mprotect(start, size, PROT_NONE);
+ if (err != 0) { err = errno; }
+ #endif
+ }
+ unix_mprotect_hint(err);
+ return err;
+}
+
+int _mi_prim_reset(void* start, size_t size) {
+ #if defined(MADV_FREE)
+ static _Atomic(size_t) advice = MI_ATOMIC_VAR_INIT(MADV_FREE);
+ int oadvice = (int)mi_atomic_load_relaxed(&advice);
+ int err;
+ while ((err = unix_madvise(start, size, oadvice)) != 0 && errno == EAGAIN) { errno = 0; };
+ if (err != 0 && errno == EINVAL && oadvice == MADV_FREE) {
+ // if MADV_FREE is not supported, fall back to MADV_DONTNEED from now on
+ mi_atomic_store_release(&advice, (size_t)MADV_DONTNEED);
+ err = unix_madvise(start, size, MADV_DONTNEED);
+ }
+ #else
+ int err = unix_madvise(start, csize, MADV_DONTNEED);
+ #endif
+ return err;
+}
+
+int _mi_prim_protect(void* start, size_t size, bool protect) {
+ int err = mprotect(start, size, protect ? PROT_NONE : (PROT_READ | PROT_WRITE));
+ if (err != 0) { err = errno; }
+ unix_mprotect_hint(err);
+ return err;
+}
+
+
+
+//---------------------------------------------
+// Huge page allocation
+//---------------------------------------------
+
+#if (MI_INTPTR_SIZE >= 8) && !defined(__HAIKU__) && !defined(__CYGWIN__)
+
+#ifndef MPOL_PREFERRED
+#define MPOL_PREFERRED 1
+#endif
+
+#if defined(MI_HAS_SYSCALL_H) && defined(SYS_mbind)
+static long mi_prim_mbind(void* start, unsigned long len, unsigned long mode, const unsigned long* nmask, unsigned long maxnode, unsigned flags) {
+ return syscall(SYS_mbind, start, len, mode, nmask, maxnode, flags);
+}
+#else
+static long mi_prim_mbind(void* start, unsigned long len, unsigned long mode, const unsigned long* nmask, unsigned long maxnode, unsigned flags) {
+ MI_UNUSED(start); MI_UNUSED(len); MI_UNUSED(mode); MI_UNUSED(nmask); MI_UNUSED(maxnode); MI_UNUSED(flags);
+ return 0;
+}
+#endif
+
+int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, void** addr) {
+ bool is_large = true;
+ *addr = unix_mmap(hint_addr, size, MI_SEGMENT_SIZE, PROT_READ | PROT_WRITE, true, true, &is_large);
+ if (*addr != NULL && numa_node >= 0 && numa_node < 8*MI_INTPTR_SIZE) { // at most 64 nodes
+ unsigned long numa_mask = (1UL << numa_node);
+ // TODO: does `mbind` work correctly for huge OS pages? should we
+ // use `set_mempolicy` before calling mmap instead?
+ // see: <https://lkml.org/lkml/2017/2/9/875>
+ long err = mi_prim_mbind(*addr, size, MPOL_PREFERRED, &numa_mask, 8*MI_INTPTR_SIZE, 0);
+ if (err != 0) {
+ err = errno;
+ _mi_warning_message("failed to bind huge (1GiB) pages to numa node %d (error: %d (0x%x))\n", numa_node, err, err);
+ }
+ }
+ return (*addr != NULL ? 0 : errno);
+}
+
+#else
+
+int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, void** addr) {
+ MI_UNUSED(hint_addr); MI_UNUSED(size); MI_UNUSED(numa_node);
+ *addr = NULL;
+ return ENOMEM;
+}
+
+#endif
+
+//---------------------------------------------
+// NUMA nodes
+//---------------------------------------------
+
+#if defined(__linux__)
+
+#include <stdio.h> // snprintf
+
+size_t _mi_prim_numa_node(void) {
+ #if defined(MI_HAS_SYSCALL_H) && defined(SYS_getcpu)
+ unsigned long node = 0;
+ unsigned long ncpu = 0;
+ long err = syscall(SYS_getcpu, &ncpu, &node, NULL);
+ if (err != 0) return 0;
+ return node;
+ #else
+ return 0;
+ #endif
+}
+
+size_t _mi_prim_numa_node_count(void) {
+ char buf[128];
+ unsigned node = 0;
+ for(node = 0; node < 256; node++) {
+ // enumerate node entries -- todo: it there a more efficient way to do this? (but ensure there is no allocation)
+ snprintf(buf, 127, "/sys/devices/system/node/node%u", node + 1);
+ if (mi_prim_access(buf,R_OK) != 0) break;
+ }
+ return (node+1);
+}
+
+#elif defined(__FreeBSD__) && __FreeBSD_version >= 1200000
+
+size_t _mi_prim_numa_node(void) {
+ domainset_t dom;
+ size_t node;
+ int policy;
+ if (cpuset_getdomain(CPU_LEVEL_CPUSET, CPU_WHICH_PID, -1, sizeof(dom), &dom, &policy) == -1) return 0ul;
+ for (node = 0; node < MAXMEMDOM; node++) {
+ if (DOMAINSET_ISSET(node, &dom)) return node;
+ }
+ return 0ul;
+}
+
+size_t _mi_prim_numa_node_count(void) {
+ size_t ndomains = 0;
+ size_t len = sizeof(ndomains);
+ if (sysctlbyname("vm.ndomains", &ndomains, &len, NULL, 0) == -1) return 0ul;
+ return ndomains;
+}
+
+#elif defined(__DragonFly__)
+
+size_t _mi_prim_numa_node(void) {
+ // TODO: DragonFly does not seem to provide any userland means to get this information.
+ return 0ul;
+}
+
+size_t _mi_prim_numa_node_count(void) {
+ size_t ncpus = 0, nvirtcoresperphys = 0;
+ size_t len = sizeof(size_t);
+ if (sysctlbyname("hw.ncpu", &ncpus, &len, NULL, 0) == -1) return 0ul;
+ if (sysctlbyname("hw.cpu_topology_ht_ids", &nvirtcoresperphys, &len, NULL, 0) == -1) return 0ul;
+ return nvirtcoresperphys * ncpus;
+}
+
+#else
+
+size_t _mi_prim_numa_node(void) {
+ return 0;
+}
+
+size_t _mi_prim_numa_node_count(void) {
+ return 1;
+}
+
+#endif
+
+// ----------------------------------------------------------------
+// Clock
+// ----------------------------------------------------------------
+
+#include <time.h>
+
+#if defined(CLOCK_REALTIME) || defined(CLOCK_MONOTONIC)
+
+mi_msecs_t _mi_prim_clock_now(void) {
+ struct timespec t;
+ #ifdef CLOCK_MONOTONIC
+ clock_gettime(CLOCK_MONOTONIC, &t);
+ #else
+ clock_gettime(CLOCK_REALTIME, &t);
+ #endif
+ return ((mi_msecs_t)t.tv_sec * 1000) + ((mi_msecs_t)t.tv_nsec / 1000000);
+}
+
+#else
+
+// low resolution timer
+mi_msecs_t _mi_prim_clock_now(void) {
+ #if !defined(CLOCKS_PER_SEC) || (CLOCKS_PER_SEC == 1000) || (CLOCKS_PER_SEC == 0)
+ return (mi_msecs_t)clock();
+ #elif (CLOCKS_PER_SEC < 1000)
+ return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC);
+ #else
+ return (mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000);
+ #endif
+}
+
+#endif
+
+
+
+
+//----------------------------------------------------------------
+// Process info
+//----------------------------------------------------------------
+
+#if defined(__unix__) || defined(__unix) || defined(unix) || defined(__APPLE__) || defined(__HAIKU__)
+#include <stdio.h>
+#include <unistd.h>
+#include <sys/resource.h>
+
+#if defined(__APPLE__)
+#include <mach/mach.h>
+#endif
+
+#if defined(__HAIKU__)
+#include <kernel/OS.h>
+#endif
+
+static mi_msecs_t timeval_secs(const struct timeval* tv) {
+ return ((mi_msecs_t)tv->tv_sec * 1000L) + ((mi_msecs_t)tv->tv_usec / 1000L);
+}
+
+void _mi_prim_process_info(mi_process_info_t* pinfo)
+{
+ struct rusage rusage;
+ getrusage(RUSAGE_SELF, &rusage);
+ pinfo->utime = timeval_secs(&rusage.ru_utime);
+ pinfo->stime = timeval_secs(&rusage.ru_stime);
+#if !defined(__HAIKU__)
+ pinfo->page_faults = rusage.ru_majflt;
+#endif
+#if defined(__HAIKU__)
+ // Haiku does not have (yet?) a way to
+ // get these stats per process
+ thread_info tid;
+ area_info mem;
+ ssize_t c;
+ get_thread_info(find_thread(0), &tid);
+ while (get_next_area_info(tid.team, &c, &mem) == B_OK) {
+ pinfo->peak_rss += mem.ram_size;
+ }
+ pinfo->page_faults = 0;
+#elif defined(__APPLE__)
+ pinfo->peak_rss = rusage.ru_maxrss; // macos reports in bytes
+ struct mach_task_basic_info info;
+ mach_msg_type_number_t infoCount = MACH_TASK_BASIC_INFO_COUNT;
+ if (task_info(mach_task_self(), MACH_TASK_BASIC_INFO, (task_info_t)&info, &infoCount) == KERN_SUCCESS) {
+ pinfo->current_rss = (size_t)info.resident_size;
+ }
+#else
+ pinfo->peak_rss = rusage.ru_maxrss * 1024; // Linux/BSD report in KiB
+#endif
+ // use defaults for commit
+}
+
+#else
+
+#ifndef __wasi__
+// WebAssembly instances are not processes
+#pragma message("define a way to get process info")
+#endif
+
+void _mi_prim_process_info(mi_process_info_t* pinfo)
+{
+ // use defaults
+ MI_UNUSED(pinfo);
+}
+
+#endif
+
+
+//----------------------------------------------------------------
+// Output
+//----------------------------------------------------------------
+
+void _mi_prim_out_stderr( const char* msg ) {
+ fputs(msg,stderr);
+}
+
+
+//----------------------------------------------------------------
+// Environment
+//----------------------------------------------------------------
+
+#if !defined(MI_USE_ENVIRON) || (MI_USE_ENVIRON!=0)
+// On Posix systemsr use `environ` to access environment variables
+// even before the C runtime is initialized.
+#if defined(__APPLE__) && defined(__has_include) && __has_include(<crt_externs.h>)
+#include <crt_externs.h>
+static char** mi_get_environ(void) {
+ return (*_NSGetEnviron());
+}
+#else
+extern char** environ;
+static char** mi_get_environ(void) {
+ return environ;
+}
+#endif
+bool _mi_prim_getenv(const char* name, char* result, size_t result_size) {
+ if (name==NULL) return false;
+ const size_t len = _mi_strlen(name);
+ if (len == 0) return false;
+ char** env = mi_get_environ();
+ if (env == NULL) return false;
+ // compare up to 10000 entries
+ for (int i = 0; i < 10000 && env[i] != NULL; i++) {
+ const char* s = env[i];
+ if (_mi_strnicmp(name, s, len) == 0 && s[len] == '=') { // case insensitive
+ // found it
+ _mi_strlcpy(result, s + len + 1, result_size);
+ return true;
+ }
+ }
+ return false;
+}
+#else
+// fallback: use standard C `getenv` but this cannot be used while initializing the C runtime
+bool _mi_prim_getenv(const char* name, char* result, size_t result_size) {
+ // cannot call getenv() when still initializing the C runtime.
+ if (_mi_preloading()) return false;
+ const char* s = getenv(name);
+ if (s == NULL) {
+ // we check the upper case name too.
+ char buf[64+1];
+ size_t len = _mi_strnlen(name,sizeof(buf)-1);
+ for (size_t i = 0; i < len; i++) {
+ buf[i] = _mi_toupper(name[i]);
+ }
+ buf[len] = 0;
+ s = getenv(buf);
+ }
+ if (s == NULL || _mi_strnlen(s,result_size) >= result_size) return false;
+ _mi_strlcpy(result, s, result_size);
+ return true;
+}
+#endif // !MI_USE_ENVIRON
+
+
+//----------------------------------------------------------------
+// Random
+//----------------------------------------------------------------
+
+#if defined(__APPLE__)
+
+#include <AvailabilityMacros.h>
+#if defined(MAC_OS_X_VERSION_10_10) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_10
+#include <CommonCrypto/CommonCryptoError.h>
+#include <CommonCrypto/CommonRandom.h>
+#endif
+bool _mi_prim_random_buf(void* buf, size_t buf_len) {
+ #if defined(MAC_OS_X_VERSION_10_15) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_15
+ // We prefere CCRandomGenerateBytes as it returns an error code while arc4random_buf
+ // may fail silently on macOS. See PR #390, and <https://opensource.apple.com/source/Libc/Libc-1439.40.11/gen/FreeBSD/arc4random.c.auto.html>
+ return (CCRandomGenerateBytes(buf, buf_len) == kCCSuccess);
+ #else
+ // fall back on older macOS
+ arc4random_buf(buf, buf_len);
+ return true;
+ #endif
+}
+
+#elif defined(__ANDROID__) || defined(__DragonFly__) || \
+ defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || \
+ defined(__sun)
+
+#include <stdlib.h>
+bool _mi_prim_random_buf(void* buf, size_t buf_len) {
+ arc4random_buf(buf, buf_len);
+ return true;
+}
+
+#elif defined(__linux__) || defined(__HAIKU__)
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+
+bool _mi_prim_random_buf(void* buf, size_t buf_len) {
+ // Modern Linux provides `getrandom` but different distributions either use `sys/random.h` or `linux/random.h`
+ // and for the latter the actual `getrandom` call is not always defined.
+ // (see <https://stackoverflow.com/questions/45237324/why-doesnt-getrandom-compile>)
+ // We therefore use a syscall directly and fall back dynamically to /dev/urandom when needed.
+ #if defined(MI_HAS_SYSCALL_H) && defined(SYS_getrandom)
+ #ifndef GRND_NONBLOCK
+ #define GRND_NONBLOCK (1)
+ #endif
+ static _Atomic(uintptr_t) no_getrandom; // = 0
+ if (mi_atomic_load_acquire(&no_getrandom)==0) {
+ ssize_t ret = syscall(SYS_getrandom, buf, buf_len, GRND_NONBLOCK);
+ if (ret >= 0) return (buf_len == (size_t)ret);
+ if (errno != ENOSYS) return false;
+ mi_atomic_store_release(&no_getrandom, (uintptr_t)1); // don't call again, and fall back to /dev/urandom
+ }
+ #endif
+ int flags = O_RDONLY;
+ #if defined(O_CLOEXEC)
+ flags |= O_CLOEXEC;
+ #endif
+ int fd = mi_prim_open("/dev/urandom", flags);
+ if (fd < 0) return false;
+ size_t count = 0;
+ while(count < buf_len) {
+ ssize_t ret = mi_prim_read(fd, (char*)buf + count, buf_len - count);
+ if (ret<=0) {
+ if (errno!=EAGAIN && errno!=EINTR) break;
+ }
+ else {
+ count += ret;
+ }
+ }
+ mi_prim_close(fd);
+ return (count==buf_len);
+}
+
+#else
+
+bool _mi_prim_random_buf(void* buf, size_t buf_len) {
+ return false;
+}
+
+#endif
+
+
+//----------------------------------------------------------------
+// Thread init/done
+//----------------------------------------------------------------
+
+#if defined(MI_USE_PTHREADS)
+
+// use pthread local storage keys to detect thread ending
+// (and used with MI_TLS_PTHREADS for the default heap)
+pthread_key_t _mi_heap_default_key = (pthread_key_t)(-1);
+
+static void mi_pthread_done(void* value) {
+ if (value!=NULL) {
+ _mi_thread_done((mi_heap_t*)value);
+ }
+}
+
+void _mi_prim_thread_init_auto_done(void) {
+ mi_assert_internal(_mi_heap_default_key == (pthread_key_t)(-1));
+ pthread_key_create(&_mi_heap_default_key, &mi_pthread_done);
+}
+
+void _mi_prim_thread_done_auto_done(void) {
+ // nothing to do
+}
+
+void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
+ if (_mi_heap_default_key != (pthread_key_t)(-1)) { // can happen during recursive invocation on freeBSD
+ pthread_setspecific(_mi_heap_default_key, heap);
+ }
+}
+
+#else
+
+void _mi_prim_thread_init_auto_done(void) {
+ // nothing
+}
+
+void _mi_prim_thread_done_auto_done(void) {
+ // nothing
+}
+
+void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
+ MI_UNUSED(heap);
+}
+
+#endif
diff --git a/source/luametatex/source/libraries/mimalloc/src/prim/wasi/prim.c b/source/luametatex/source/libraries/mimalloc/src/prim/wasi/prim.c
new file mode 100644
index 000000000..cb3ce1a7f
--- /dev/null
+++ b/source/luametatex/source/libraries/mimalloc/src/prim/wasi/prim.c
@@ -0,0 +1,265 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+// This file is included in `src/prim/prim.c`
+
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
+#include "mimalloc/prim.h"
+
+//---------------------------------------------
+// Initialize
+//---------------------------------------------
+
+void _mi_prim_mem_init( mi_os_mem_config_t* config ) {
+ config->page_size = 64*MI_KiB; // WebAssembly has a fixed page size: 64KiB
+ config->alloc_granularity = 16;
+ config->has_overcommit = false;
+ config->must_free_whole = true;
+}
+
+//---------------------------------------------
+// Free
+//---------------------------------------------
+
+int _mi_prim_free(void* addr, size_t size ) {
+ MI_UNUSED(addr); MI_UNUSED(size);
+ // wasi heap cannot be shrunk
+ return 0;
+}
+
+
+//---------------------------------------------
+// Allocation: sbrk or memory_grow
+//---------------------------------------------
+
+#if defined(MI_USE_SBRK)
+ static void* mi_memory_grow( size_t size ) {
+ void* p = sbrk(size);
+ if (p == (void*)(-1)) return NULL;
+ #if !defined(__wasi__) // on wasi this is always zero initialized already (?)
+ memset(p,0,size);
+ #endif
+ return p;
+ }
+#elif defined(__wasi__)
+ static void* mi_memory_grow( size_t size ) {
+ size_t base = (size > 0 ? __builtin_wasm_memory_grow(0,_mi_divide_up(size, _mi_os_page_size()))
+ : __builtin_wasm_memory_size(0));
+ if (base == SIZE_MAX) return NULL;
+ return (void*)(base * _mi_os_page_size());
+ }
+#endif
+
+#if defined(MI_USE_PTHREADS)
+static pthread_mutex_t mi_heap_grow_mutex = PTHREAD_MUTEX_INITIALIZER;
+#endif
+
+static void* mi_prim_mem_grow(size_t size, size_t try_alignment) {
+ void* p = NULL;
+ if (try_alignment <= 1) {
+ // `sbrk` is not thread safe in general so try to protect it (we could skip this on WASM but leave it in for now)
+ #if defined(MI_USE_PTHREADS)
+ pthread_mutex_lock(&mi_heap_grow_mutex);
+ #endif
+ p = mi_memory_grow(size);
+ #if defined(MI_USE_PTHREADS)
+ pthread_mutex_unlock(&mi_heap_grow_mutex);
+ #endif
+ }
+ else {
+ void* base = NULL;
+ size_t alloc_size = 0;
+ // to allocate aligned use a lock to try to avoid thread interaction
+ // between getting the current size and actual allocation
+ // (also, `sbrk` is not thread safe in general)
+ #if defined(MI_USE_PTHREADS)
+ pthread_mutex_lock(&mi_heap_grow_mutex);
+ #endif
+ {
+ void* current = mi_memory_grow(0); // get current size
+ if (current != NULL) {
+ void* aligned_current = mi_align_up_ptr(current, try_alignment); // and align from there to minimize wasted space
+ alloc_size = _mi_align_up( ((uint8_t*)aligned_current - (uint8_t*)current) + size, _mi_os_page_size());
+ base = mi_memory_grow(alloc_size);
+ }
+ }
+ #if defined(MI_USE_PTHREADS)
+ pthread_mutex_unlock(&mi_heap_grow_mutex);
+ #endif
+ if (base != NULL) {
+ p = mi_align_up_ptr(base, try_alignment);
+ if ((uint8_t*)p + size > (uint8_t*)base + alloc_size) {
+ // another thread used wasm_memory_grow/sbrk in-between and we do not have enough
+ // space after alignment. Give up (and waste the space as we cannot shrink :-( )
+ // (in `mi_os_mem_alloc_aligned` this will fall back to overallocation to align)
+ p = NULL;
+ }
+ }
+ }
+ /*
+ if (p == NULL) {
+ _mi_warning_message("unable to allocate sbrk/wasm_memory_grow OS memory (%zu bytes, %zu alignment)\n", size, try_alignment);
+ errno = ENOMEM;
+ return NULL;
+ }
+ */
+ mi_assert_internal( p == NULL || try_alignment == 0 || (uintptr_t)p % try_alignment == 0 );
+ return p;
+}
+
+// Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned.
+int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, void** addr) {
+ MI_UNUSED(allow_large); MI_UNUSED(commit);
+ *is_large = false;
+ *addr = mi_prim_mem_grow(size, try_alignment);
+ return (*addr != NULL ? 0 : ENOMEM);
+}
+
+
+//---------------------------------------------
+// Commit/Reset/Protect
+//---------------------------------------------
+
+int _mi_prim_commit(void* addr, size_t size, bool commit) {
+ MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(commit);
+ return 0;
+}
+
+int _mi_prim_reset(void* addr, size_t size) {
+ MI_UNUSED(addr); MI_UNUSED(size);
+ return 0;
+}
+
+int _mi_prim_protect(void* addr, size_t size, bool protect) {
+ MI_UNUSED(addr); MI_UNUSED(size); MI_UNUSED(protect);
+ return 0;
+}
+
+
+//---------------------------------------------
+// Huge pages and NUMA nodes
+//---------------------------------------------
+
+int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, void** addr) {
+ MI_UNUSED(hint_addr); MI_UNUSED(size); MI_UNUSED(numa_node);
+ *addr = NULL;
+ return ENOSYS;
+}
+
+size_t _mi_prim_numa_node(void) {
+ return 0;
+}
+
+size_t _mi_prim_numa_node_count(void) {
+ return 1;
+}
+
+
+//----------------------------------------------------------------
+// Clock
+//----------------------------------------------------------------
+
+#include <time.h>
+
+#if defined(CLOCK_REALTIME) || defined(CLOCK_MONOTONIC)
+
+mi_msecs_t _mi_prim_clock_now(void) {
+ struct timespec t;
+ #ifdef CLOCK_MONOTONIC
+ clock_gettime(CLOCK_MONOTONIC, &t);
+ #else
+ clock_gettime(CLOCK_REALTIME, &t);
+ #endif
+ return ((mi_msecs_t)t.tv_sec * 1000) + ((mi_msecs_t)t.tv_nsec / 1000000);
+}
+
+#else
+
+// low resolution timer
+mi_msecs_t _mi_prim_clock_now(void) {
+ #if !defined(CLOCKS_PER_SEC) || (CLOCKS_PER_SEC == 1000) || (CLOCKS_PER_SEC == 0)
+ return (mi_msecs_t)clock();
+ #elif (CLOCKS_PER_SEC < 1000)
+ return (mi_msecs_t)clock() * (1000 / (mi_msecs_t)CLOCKS_PER_SEC);
+ #else
+ return (mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000);
+ #endif
+}
+
+#endif
+
+
+//----------------------------------------------------------------
+// Process info
+//----------------------------------------------------------------
+
+void _mi_prim_process_info(mi_process_info_t* pinfo)
+{
+ // use defaults
+ MI_UNUSED(pinfo);
+}
+
+
+//----------------------------------------------------------------
+// Output
+//----------------------------------------------------------------
+
+void _mi_prim_out_stderr( const char* msg ) {
+ fputs(msg,stderr);
+}
+
+
+//----------------------------------------------------------------
+// Environment
+//----------------------------------------------------------------
+
+bool _mi_prim_getenv(const char* name, char* result, size_t result_size) {
+ // cannot call getenv() when still initializing the C runtime.
+ if (_mi_preloading()) return false;
+ const char* s = getenv(name);
+ if (s == NULL) {
+ // we check the upper case name too.
+ char buf[64+1];
+ size_t len = _mi_strnlen(name,sizeof(buf)-1);
+ for (size_t i = 0; i < len; i++) {
+ buf[i] = _mi_toupper(name[i]);
+ }
+ buf[len] = 0;
+ s = getenv(buf);
+ }
+ if (s == NULL || _mi_strnlen(s,result_size) >= result_size) return false;
+ _mi_strlcpy(result, s, result_size);
+ return true;
+}
+
+
+//----------------------------------------------------------------
+// Random
+//----------------------------------------------------------------
+
+bool _mi_prim_random_buf(void* buf, size_t buf_len) {
+ return false;
+}
+
+
+//----------------------------------------------------------------
+// Thread init/done
+//----------------------------------------------------------------
+
+void _mi_prim_thread_init_auto_done(void) {
+ // nothing
+}
+
+void _mi_prim_thread_done_auto_done(void) {
+ // nothing
+}
+
+void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
+ MI_UNUSED(heap);
+}
diff --git a/source/luametatex/source/libraries/mimalloc/src/prim/windows/etw-mimalloc.wprp b/source/luametatex/source/libraries/mimalloc/src/prim/windows/etw-mimalloc.wprp
new file mode 100644
index 000000000..b00cd7adf
--- /dev/null
+++ b/source/luametatex/source/libraries/mimalloc/src/prim/windows/etw-mimalloc.wprp
@@ -0,0 +1,61 @@
+<WindowsPerformanceRecorder Version="1.0">
+ <Profiles>
+ <SystemCollector Id="WPR_initiated_WprApp_WPR_System_Collector" Name="WPR_initiated_WprApp_WPR System Collector">
+ <BufferSize Value="1024" />
+ <Buffers Value="100" />
+ </SystemCollector>
+ <EventCollector Id="Mimalloc_Collector" Name="Mimalloc Collector">
+ <BufferSize Value="1024" />
+ <Buffers Value="100" />
+ </EventCollector>
+ <SystemProvider Id="WPR_initiated_WprApp_WPR_System_Collector_Provider">
+ <Keywords>
+ <Keyword Value="Loader" />
+ </Keywords>
+ </SystemProvider>
+ <EventProvider Id="MimallocEventProvider" Name="138f4dbb-ee04-4899-aa0a-572ad4475779" NonPagedMemory="true" Stack="true">
+ <EventFilters FilterIn="true">
+ <EventId Value="100" />
+ <EventId Value="101" />
+ </EventFilters>
+ </EventProvider>
+ <Profile Id="CustomHeap.Verbose.File" Name="CustomHeap" Description="RunningProfile:CustomHeap.Verbose.File" LoggingMode="File" DetailLevel="Verbose">
+ <ProblemCategories>
+ <ProblemCategory Value="Resource Analysis" />
+ </ProblemCategories>
+ <Collectors>
+ <SystemCollectorId Value="WPR_initiated_WprApp_WPR_System_Collector">
+ <SystemProviderId Value="WPR_initiated_WprApp_WPR_System_Collector_Provider" />
+ </SystemCollectorId>
+ <EventCollectorId Value="Mimalloc_Collector">
+ <EventProviders>
+ <EventProviderId Value="MimallocEventProvider" >
+ <Keywords>
+ <Keyword Value="100"/>
+ <Keyword Value="101"/>
+ </Keywords>
+ </EventProviderId>
+ </EventProviders>
+ </EventCollectorId>
+ </Collectors>
+ <TraceMergeProperties>
+ <TraceMergeProperty Id="BaseVerboseTraceMergeProperties" Name="BaseTraceMergeProperties">
+ <DeletePreMergedTraceFiles Value="true" />
+ <FileCompression Value="false" />
+ <InjectOnly Value="false" />
+ <SkipMerge Value="false" />
+ <CustomEvents>
+ <CustomEvent Value="ImageId" />
+ <CustomEvent Value="BuildInfo" />
+ <CustomEvent Value="VolumeMapping" />
+ <CustomEvent Value="EventMetadata" />
+ <CustomEvent Value="PerfTrackMetadata" />
+ <CustomEvent Value="WinSAT" />
+ <CustomEvent Value="NetworkInterface" />
+ </CustomEvents>
+ </TraceMergeProperty>
+ </TraceMergeProperties>
+ </Profile>
+ </Profiles>
+</WindowsPerformanceRecorder>
+
diff --git a/source/luametatex/source/libraries/mimalloc/src/prim/windows/etw.h b/source/luametatex/source/libraries/mimalloc/src/prim/windows/etw.h
new file mode 100644
index 000000000..4e0a092a1
--- /dev/null
+++ b/source/luametatex/source/libraries/mimalloc/src/prim/windows/etw.h
@@ -0,0 +1,905 @@
+//**********************************************************************`
+//* This is an include file generated by Message Compiler. *`
+//* *`
+//* Copyright (c) Microsoft Corporation. All Rights Reserved. *`
+//**********************************************************************`
+#pragma once
+
+//*****************************************************************************
+//
+// Notes on the ETW event code generated by MC:
+//
+// - Structures and arrays of structures are treated as an opaque binary blob.
+// The caller is responsible for packing the data for the structure into a
+// single region of memory, with no padding between values. The macro will
+// have an extra parameter for the length of the blob.
+// - Arrays of nul-terminated strings must be packed by the caller into a
+// single binary blob containing the correct number of strings, with a nul
+// after each string. The size of the blob is specified in characters, and
+// includes the final nul.
+// - Arrays of SID are treated as a single binary blob. The caller is
+// responsible for packing the SID values into a single region of memory with
+// no padding.
+// - The length attribute on the data element in the manifest is significant
+// for values with intype win:UnicodeString, win:AnsiString, or win:Binary.
+// The length attribute must be specified for win:Binary, and is optional for
+// win:UnicodeString and win:AnsiString (if no length is given, the strings
+// are assumed to be nul-terminated). For win:UnicodeString, the length is
+// measured in characters, not bytes.
+// - For an array of win:UnicodeString, win:AnsiString, or win:Binary, the
+// length attribute applies to every value in the array, so every value in
+// the array must have the same length. The values in the array are provided
+// to the macro via a single pointer -- the caller is responsible for packing
+// all of the values into a single region of memory with no padding between
+// values.
+// - Values of type win:CountedUnicodeString, win:CountedAnsiString, and
+// win:CountedBinary can be generated and collected on Vista or later.
+// However, they may not decode properly without the Windows 10 2018 Fall
+// Update.
+// - Arrays of type win:CountedUnicodeString, win:CountedAnsiString, and
+// win:CountedBinary must be packed by the caller into a single region of
+// memory. The format for each item is a UINT16 byte-count followed by that
+// many bytes of data. When providing the array to the generated macro, you
+// must provide the total size of the packed array data, including the UINT16
+// sizes for each item. In the case of win:CountedUnicodeString, the data
+// size is specified in WCHAR (16-bit) units. In the case of
+// win:CountedAnsiString and win:CountedBinary, the data size is specified in
+// bytes.
+//
+//*****************************************************************************
+
+#include <wmistr.h>
+#include <evntrace.h>
+#include <evntprov.h>
+
+#ifndef ETW_INLINE
+ #ifdef _ETW_KM_
+ // In kernel mode, save stack space by never inlining templates.
+ #define ETW_INLINE DECLSPEC_NOINLINE __inline
+ #else
+ // In user mode, save code size by inlining templates as appropriate.
+ #define ETW_INLINE __inline
+ #endif
+#endif // ETW_INLINE
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+//
+// MCGEN_DISABLE_PROVIDER_CODE_GENERATION macro:
+// Define this macro to have the compiler skip the generated functions in this
+// header.
+//
+#ifndef MCGEN_DISABLE_PROVIDER_CODE_GENERATION
+
+//
+// MCGEN_USE_KERNEL_MODE_APIS macro:
+// Controls whether the generated code uses kernel-mode or user-mode APIs.
+// - Set to 0 to use Windows user-mode APIs such as EventRegister.
+// - Set to 1 to use Windows kernel-mode APIs such as EtwRegister.
+// Default is based on whether the _ETW_KM_ macro is defined (i.e. by wdm.h).
+// Note that the APIs can also be overridden directly, e.g. by setting the
+// MCGEN_EVENTWRITETRANSFER or MCGEN_EVENTREGISTER macros.
+//
+#ifndef MCGEN_USE_KERNEL_MODE_APIS
+ #ifdef _ETW_KM_
+ #define MCGEN_USE_KERNEL_MODE_APIS 1
+ #else
+ #define MCGEN_USE_KERNEL_MODE_APIS 0
+ #endif
+#endif // MCGEN_USE_KERNEL_MODE_APIS
+
+//
+// MCGEN_HAVE_EVENTSETINFORMATION macro:
+// Controls how McGenEventSetInformation uses the EventSetInformation API.
+// - Set to 0 to disable the use of EventSetInformation
+// (McGenEventSetInformation will always return an error).
+// - Set to 1 to directly invoke MCGEN_EVENTSETINFORMATION.
+// - Set to 2 to to locate EventSetInformation at runtime via GetProcAddress
+// (user-mode) or MmGetSystemRoutineAddress (kernel-mode).
+// Default is determined as follows:
+// - If MCGEN_EVENTSETINFORMATION has been customized, set to 1
+// (i.e. use MCGEN_EVENTSETINFORMATION).
+// - Else if the target OS version has EventSetInformation, set to 1
+// (i.e. use MCGEN_EVENTSETINFORMATION).
+// - Else set to 2 (i.e. try to dynamically locate EventSetInformation).
+// Note that an McGenEventSetInformation function will only be generated if one
+// or more provider in a manifest has provider traits.
+//
+#ifndef MCGEN_HAVE_EVENTSETINFORMATION
+ #ifdef MCGEN_EVENTSETINFORMATION // if MCGEN_EVENTSETINFORMATION has been customized,
+ #define MCGEN_HAVE_EVENTSETINFORMATION 1 // directly invoke MCGEN_EVENTSETINFORMATION(...).
+ #elif MCGEN_USE_KERNEL_MODE_APIS // else if using kernel-mode APIs,
+ #if NTDDI_VERSION >= 0x06040000 // if target OS is Windows 10 or later,
+ #define MCGEN_HAVE_EVENTSETINFORMATION 1 // directly invoke MCGEN_EVENTSETINFORMATION(...).
+ #else // else
+ #define MCGEN_HAVE_EVENTSETINFORMATION 2 // find "EtwSetInformation" via MmGetSystemRoutineAddress.
+ #endif // else (using user-mode APIs)
+ #else // if target OS and SDK is Windows 8 or later,
+ #if WINVER >= 0x0602 && defined(EVENT_FILTER_TYPE_SCHEMATIZED)
+ #define MCGEN_HAVE_EVENTSETINFORMATION 1 // directly invoke MCGEN_EVENTSETINFORMATION(...).
+ #else // else
+ #define MCGEN_HAVE_EVENTSETINFORMATION 2 // find "EventSetInformation" via GetModuleHandleExW/GetProcAddress.
+ #endif
+ #endif
+#endif // MCGEN_HAVE_EVENTSETINFORMATION
+
+//
+// MCGEN Override Macros
+//
+// The following override macros may be defined before including this header
+// to control the APIs used by this header:
+//
+// - MCGEN_EVENTREGISTER
+// - MCGEN_EVENTUNREGISTER
+// - MCGEN_EVENTSETINFORMATION
+// - MCGEN_EVENTWRITETRANSFER
+//
+// If the the macro is undefined, the MC implementation will default to the
+// corresponding ETW APIs. For example, if the MCGEN_EVENTREGISTER macro is
+// undefined, the EventRegister[MyProviderName] macro will use EventRegister
+// in user mode and will use EtwRegister in kernel mode.
+//
+// To prevent issues from conflicting definitions of these macros, the value
+// of the override macro will be used as a suffix in certain internal function
+// names. Because of this, the override macros must follow certain rules:
+//
+// - The macro must be defined before any MC-generated header is included and
+// must not be undefined or redefined after any MC-generated header is
+// included. Different translation units (i.e. different .c or .cpp files)
+// may set the macros to different values, but within a translation unit
+// (within a single .c or .cpp file), the macro must be set once and not
+// changed.
+// - The override must be an object-like macro, not a function-like macro
+// (i.e. the override macro must not have a parameter list).
+// - The override macro's value must be a simple identifier, i.e. must be
+// something that starts with a letter or '_' and contains only letters,
+// numbers, and '_' characters.
+// - If the override macro's value is the name of a second object-like macro,
+// the second object-like macro must follow the same rules. (The override
+// macro's value can also be the name of a function-like macro, in which
+// case the function-like macro does not need to follow the same rules.)
+//
+// For example, the following will cause compile errors:
+//
+// #define MCGEN_EVENTWRITETRANSFER MyNamespace::MyClass::MyFunction // Value has non-identifier characters (colon).
+// #define MCGEN_EVENTWRITETRANSFER GetEventWriteFunctionPointer(7) // Value has non-identifier characters (parentheses).
+// #define MCGEN_EVENTWRITETRANSFER(h,e,a,r,c,d) EventWrite(h,e,c,d) // Override is defined as a function-like macro.
+// #define MY_OBJECT_LIKE_MACRO MyNamespace::MyClass::MyEventWriteFunction
+// #define MCGEN_EVENTWRITETRANSFER MY_OBJECT_LIKE_MACRO // Evaluates to something with non-identifier characters (colon).
+//
+// The following would be ok:
+//
+// #define MCGEN_EVENTWRITETRANSFER MyEventWriteFunction1 // OK, suffix will be "MyEventWriteFunction1".
+// #define MY_OBJECT_LIKE_MACRO MyEventWriteFunction2
+// #define MCGEN_EVENTWRITETRANSFER MY_OBJECT_LIKE_MACRO // OK, suffix will be "MyEventWriteFunction2".
+// #define MY_FUNCTION_LIKE_MACRO(h,e,a,r,c,d) MyNamespace::MyClass::MyEventWriteFunction3(h,e,c,d)
+// #define MCGEN_EVENTWRITETRANSFER MY_FUNCTION_LIKE_MACRO // OK, suffix will be "MY_FUNCTION_LIKE_MACRO".
+//
+#ifndef MCGEN_EVENTREGISTER
+ #if MCGEN_USE_KERNEL_MODE_APIS
+ #define MCGEN_EVENTREGISTER EtwRegister
+ #else
+ #define MCGEN_EVENTREGISTER EventRegister
+ #endif
+#endif // MCGEN_EVENTREGISTER
+#ifndef MCGEN_EVENTUNREGISTER
+ #if MCGEN_USE_KERNEL_MODE_APIS
+ #define MCGEN_EVENTUNREGISTER EtwUnregister
+ #else
+ #define MCGEN_EVENTUNREGISTER EventUnregister
+ #endif
+#endif // MCGEN_EVENTUNREGISTER
+#ifndef MCGEN_EVENTSETINFORMATION
+ #if MCGEN_USE_KERNEL_MODE_APIS
+ #define MCGEN_EVENTSETINFORMATION EtwSetInformation
+ #else
+ #define MCGEN_EVENTSETINFORMATION EventSetInformation
+ #endif
+#endif // MCGEN_EVENTSETINFORMATION
+#ifndef MCGEN_EVENTWRITETRANSFER
+ #if MCGEN_USE_KERNEL_MODE_APIS
+ #define MCGEN_EVENTWRITETRANSFER EtwWriteTransfer
+ #else
+ #define MCGEN_EVENTWRITETRANSFER EventWriteTransfer
+ #endif
+#endif // MCGEN_EVENTWRITETRANSFER
+
+//
+// MCGEN_EVENT_ENABLED macro:
+// Override to control how the EventWrite[EventName] macros determine whether
+// an event is enabled. The default behavior is for EventWrite[EventName] to
+// use the EventEnabled[EventName] macros.
+//
+#ifndef MCGEN_EVENT_ENABLED
+#define MCGEN_EVENT_ENABLED(EventName) EventEnabled##EventName()
+#endif
+
+//
+// MCGEN_EVENT_ENABLED_FORCONTEXT macro:
+// Override to control how the EventWrite[EventName]_ForContext macros
+// determine whether an event is enabled. The default behavior is for
+// EventWrite[EventName]_ForContext to use the
+// EventEnabled[EventName]_ForContext macros.
+//
+#ifndef MCGEN_EVENT_ENABLED_FORCONTEXT
+#define MCGEN_EVENT_ENABLED_FORCONTEXT(pContext, EventName) EventEnabled##EventName##_ForContext(pContext)
+#endif
+
+//
+// MCGEN_ENABLE_CHECK macro:
+// Determines whether the specified event would be considered as enabled
+// based on the state of the specified context. Slightly faster than calling
+// McGenEventEnabled directly.
+//
+#ifndef MCGEN_ENABLE_CHECK
+#define MCGEN_ENABLE_CHECK(Context, Descriptor) (Context.IsEnabled && McGenEventEnabled(&Context, &Descriptor))
+#endif
+
+#if !defined(MCGEN_TRACE_CONTEXT_DEF)
+#define MCGEN_TRACE_CONTEXT_DEF
+// This structure is for use by MC-generated code and should not be used directly.
+typedef struct _MCGEN_TRACE_CONTEXT
+{
+ TRACEHANDLE RegistrationHandle;
+ TRACEHANDLE Logger; // Used as pointer to provider traits.
+ ULONGLONG MatchAnyKeyword;
+ ULONGLONG MatchAllKeyword;
+ ULONG Flags;
+ ULONG IsEnabled;
+ UCHAR Level;
+ UCHAR Reserve;
+ USHORT EnableBitsCount;
+ PULONG EnableBitMask;
+ const ULONGLONG* EnableKeyWords;
+ const UCHAR* EnableLevel;
+} MCGEN_TRACE_CONTEXT, *PMCGEN_TRACE_CONTEXT;
+#endif // MCGEN_TRACE_CONTEXT_DEF
+
+#if !defined(MCGEN_LEVEL_KEYWORD_ENABLED_DEF)
+#define MCGEN_LEVEL_KEYWORD_ENABLED_DEF
+//
+// Determines whether an event with a given Level and Keyword would be
+// considered as enabled based on the state of the specified context.
+// Note that you may want to use MCGEN_ENABLE_CHECK instead of calling this
+// function directly.
+//
+FORCEINLINE
+BOOLEAN
+McGenLevelKeywordEnabled(
+ _In_ PMCGEN_TRACE_CONTEXT EnableInfo,
+ _In_ UCHAR Level,
+ _In_ ULONGLONG Keyword
+ )
+{
+ //
+ // Check if the event Level is lower than the level at which
+ // the channel is enabled.
+ // If the event Level is 0 or the channel is enabled at level 0,
+ // all levels are enabled.
+ //
+
+ if ((Level <= EnableInfo->Level) || // This also covers the case of Level == 0.
+ (EnableInfo->Level == 0)) {
+
+ //
+ // Check if Keyword is enabled
+ //
+
+ if ((Keyword == (ULONGLONG)0) ||
+ ((Keyword & EnableInfo->MatchAnyKeyword) &&
+ ((Keyword & EnableInfo->MatchAllKeyword) == EnableInfo->MatchAllKeyword))) {
+ return TRUE;
+ }
+ }
+
+ return FALSE;
+}
+#endif // MCGEN_LEVEL_KEYWORD_ENABLED_DEF
+
+#if !defined(MCGEN_EVENT_ENABLED_DEF)
+#define MCGEN_EVENT_ENABLED_DEF
+//
+// Determines whether the specified event would be considered as enabled based
+// on the state of the specified context. Note that you may want to use
+// MCGEN_ENABLE_CHECK instead of calling this function directly.
+//
+FORCEINLINE
+BOOLEAN
+McGenEventEnabled(
+ _In_ PMCGEN_TRACE_CONTEXT EnableInfo,
+ _In_ PCEVENT_DESCRIPTOR EventDescriptor
+ )
+{
+ return McGenLevelKeywordEnabled(EnableInfo, EventDescriptor->Level, EventDescriptor->Keyword);
+}
+#endif // MCGEN_EVENT_ENABLED_DEF
+
+#if !defined(MCGEN_CONTROL_CALLBACK)
+#define MCGEN_CONTROL_CALLBACK
+
+// This function is for use by MC-generated code and should not be used directly.
+DECLSPEC_NOINLINE __inline
+VOID
+__stdcall
+McGenControlCallbackV2(
+ _In_ LPCGUID SourceId,
+ _In_ ULONG ControlCode,
+ _In_ UCHAR Level,
+ _In_ ULONGLONG MatchAnyKeyword,
+ _In_ ULONGLONG MatchAllKeyword,
+ _In_opt_ PEVENT_FILTER_DESCRIPTOR FilterData,
+ _Inout_opt_ PVOID CallbackContext
+ )
+/*++
+
+Routine Description:
+
+ This is the notification callback for Windows Vista and later.
+
+Arguments:
+
+ SourceId - The GUID that identifies the session that enabled the provider.
+
+ ControlCode - The parameter indicates whether the provider
+ is being enabled or disabled.
+
+ Level - The level at which the event is enabled.
+
+ MatchAnyKeyword - The bitmask of keywords that the provider uses to
+ determine the category of events that it writes.
+
+ MatchAllKeyword - This bitmask additionally restricts the category
+ of events that the provider writes.
+
+ FilterData - The provider-defined data.
+
+ CallbackContext - The context of the callback that is defined when the provider
+ called EtwRegister to register itself.
+
+Remarks:
+
+ ETW calls this function to notify provider of enable/disable
+
+--*/
+{
+ PMCGEN_TRACE_CONTEXT Ctx = (PMCGEN_TRACE_CONTEXT)CallbackContext;
+ ULONG Ix;
+#ifndef MCGEN_PRIVATE_ENABLE_CALLBACK_V2
+ UNREFERENCED_PARAMETER(SourceId);
+ UNREFERENCED_PARAMETER(FilterData);
+#endif
+
+ if (Ctx == NULL) {
+ return;
+ }
+
+ switch (ControlCode) {
+
+ case EVENT_CONTROL_CODE_ENABLE_PROVIDER:
+ Ctx->Level = Level;
+ Ctx->MatchAnyKeyword = MatchAnyKeyword;
+ Ctx->MatchAllKeyword = MatchAllKeyword;
+ Ctx->IsEnabled = EVENT_CONTROL_CODE_ENABLE_PROVIDER;
+
+ for (Ix = 0; Ix < Ctx->EnableBitsCount; Ix += 1) {
+ if (McGenLevelKeywordEnabled(Ctx, Ctx->EnableLevel[Ix], Ctx->EnableKeyWords[Ix]) != FALSE) {
+ Ctx->EnableBitMask[Ix >> 5] |= (1 << (Ix % 32));
+ } else {
+ Ctx->EnableBitMask[Ix >> 5] &= ~(1 << (Ix % 32));
+ }
+ }
+ break;
+
+ case EVENT_CONTROL_CODE_DISABLE_PROVIDER:
+ Ctx->IsEnabled = EVENT_CONTROL_CODE_DISABLE_PROVIDER;
+ Ctx->Level = 0;
+ Ctx->MatchAnyKeyword = 0;
+ Ctx->MatchAllKeyword = 0;
+ if (Ctx->EnableBitsCount > 0) {
+#pragma warning(suppress: 26451) // Arithmetic overflow cannot occur, no matter the value of EnableBitCount
+ RtlZeroMemory(Ctx->EnableBitMask, (((Ctx->EnableBitsCount - 1) / 32) + 1) * sizeof(ULONG));
+ }
+ break;
+
+ default:
+ break;
+ }
+
+#ifdef MCGEN_PRIVATE_ENABLE_CALLBACK_V2
+ //
+ // Call user defined callback
+ //
+ MCGEN_PRIVATE_ENABLE_CALLBACK_V2(
+ SourceId,
+ ControlCode,
+ Level,
+ MatchAnyKeyword,
+ MatchAllKeyword,
+ FilterData,
+ CallbackContext
+ );
+#endif // MCGEN_PRIVATE_ENABLE_CALLBACK_V2
+
+ return;
+}
+
+#endif // MCGEN_CONTROL_CALLBACK
+
+#ifndef _mcgen_PENABLECALLBACK
+ #if MCGEN_USE_KERNEL_MODE_APIS
+ #define _mcgen_PENABLECALLBACK PETWENABLECALLBACK
+ #else
+ #define _mcgen_PENABLECALLBACK PENABLECALLBACK
+ #endif
+#endif // _mcgen_PENABLECALLBACK
+
+#if !defined(_mcgen_PASTE2)
+// This macro is for use by MC-generated code and should not be used directly.
+#define _mcgen_PASTE2(a, b) _mcgen_PASTE2_imp(a, b)
+#define _mcgen_PASTE2_imp(a, b) a##b
+#endif // _mcgen_PASTE2
+
+#if !defined(_mcgen_PASTE3)
+// This macro is for use by MC-generated code and should not be used directly.
+#define _mcgen_PASTE3(a, b, c) _mcgen_PASTE3_imp(a, b, c)
+#define _mcgen_PASTE3_imp(a, b, c) a##b##_##c
+#endif // _mcgen_PASTE3
+
+//
+// Macro validation
+//
+
+// Validate MCGEN_EVENTREGISTER:
+
+// Trigger an error if MCGEN_EVENTREGISTER is not an unqualified (simple) identifier:
+struct _mcgen_PASTE2(MCGEN_EVENTREGISTER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTREGISTER);
+
+// Trigger an error if MCGEN_EVENTREGISTER is redefined:
+typedef struct _mcgen_PASTE2(MCGEN_EVENTREGISTER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTREGISTER)
+ MCGEN_EVENTREGISTER_must_not_be_redefined_between_headers;
+
+// Trigger an error if MCGEN_EVENTREGISTER is defined as a function-like macro:
+typedef void MCGEN_EVENTREGISTER_must_not_be_a_functionLike_macro_MCGEN_EVENTREGISTER;
+typedef int _mcgen_PASTE2(MCGEN_EVENTREGISTER_must_not_be_a_functionLike_macro_, MCGEN_EVENTREGISTER);
+
+// Validate MCGEN_EVENTUNREGISTER:
+
+// Trigger an error if MCGEN_EVENTUNREGISTER is not an unqualified (simple) identifier:
+struct _mcgen_PASTE2(MCGEN_EVENTUNREGISTER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTUNREGISTER);
+
+// Trigger an error if MCGEN_EVENTUNREGISTER is redefined:
+typedef struct _mcgen_PASTE2(MCGEN_EVENTUNREGISTER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTUNREGISTER)
+ MCGEN_EVENTUNREGISTER_must_not_be_redefined_between_headers;
+
+// Trigger an error if MCGEN_EVENTUNREGISTER is defined as a function-like macro:
+typedef void MCGEN_EVENTUNREGISTER_must_not_be_a_functionLike_macro_MCGEN_EVENTUNREGISTER;
+typedef int _mcgen_PASTE2(MCGEN_EVENTUNREGISTER_must_not_be_a_functionLike_macro_, MCGEN_EVENTUNREGISTER);
+
+// Validate MCGEN_EVENTSETINFORMATION:
+
+// Trigger an error if MCGEN_EVENTSETINFORMATION is not an unqualified (simple) identifier:
+struct _mcgen_PASTE2(MCGEN_EVENTSETINFORMATION_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTSETINFORMATION);
+
+// Trigger an error if MCGEN_EVENTSETINFORMATION is redefined:
+typedef struct _mcgen_PASTE2(MCGEN_EVENTSETINFORMATION_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTSETINFORMATION)
+ MCGEN_EVENTSETINFORMATION_must_not_be_redefined_between_headers;
+
+// Trigger an error if MCGEN_EVENTSETINFORMATION is defined as a function-like macro:
+typedef void MCGEN_EVENTSETINFORMATION_must_not_be_a_functionLike_macro_MCGEN_EVENTSETINFORMATION;
+typedef int _mcgen_PASTE2(MCGEN_EVENTSETINFORMATION_must_not_be_a_functionLike_macro_, MCGEN_EVENTSETINFORMATION);
+
+// Validate MCGEN_EVENTWRITETRANSFER:
+
+// Trigger an error if MCGEN_EVENTWRITETRANSFER is not an unqualified (simple) identifier:
+struct _mcgen_PASTE2(MCGEN_EVENTWRITETRANSFER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTWRITETRANSFER);
+
+// Trigger an error if MCGEN_EVENTWRITETRANSFER is redefined:
+typedef struct _mcgen_PASTE2(MCGEN_EVENTWRITETRANSFER_definition_must_be_an_unqualified_identifier_, MCGEN_EVENTWRITETRANSFER)
+ MCGEN_EVENTWRITETRANSFER_must_not_be_redefined_between_headers;;
+
+// Trigger an error if MCGEN_EVENTWRITETRANSFER is defined as a function-like macro:
+typedef void MCGEN_EVENTWRITETRANSFER_must_not_be_a_functionLike_macro_MCGEN_EVENTWRITETRANSFER;
+typedef int _mcgen_PASTE2(MCGEN_EVENTWRITETRANSFER_must_not_be_a_functionLike_macro_, MCGEN_EVENTWRITETRANSFER);
+
+#ifndef McGenEventWrite_def
+#define McGenEventWrite_def
+
+// This macro is for use by MC-generated code and should not be used directly.
+#define McGenEventWrite _mcgen_PASTE2(McGenEventWrite_, MCGEN_EVENTWRITETRANSFER)
+
+// This function is for use by MC-generated code and should not be used directly.
+DECLSPEC_NOINLINE __inline
+ULONG __stdcall
+McGenEventWrite(
+ _In_ PMCGEN_TRACE_CONTEXT Context,
+ _In_ PCEVENT_DESCRIPTOR Descriptor,
+ _In_opt_ LPCGUID ActivityId,
+ _In_range_(1, 128) ULONG EventDataCount,
+ _Pre_cap_(EventDataCount) EVENT_DATA_DESCRIPTOR* EventData
+ )
+{
+ const USHORT UNALIGNED* Traits;
+
+ // Some customized MCGEN_EVENTWRITETRANSFER macros might ignore ActivityId.
+ UNREFERENCED_PARAMETER(ActivityId);
+
+ Traits = (const USHORT UNALIGNED*)(UINT_PTR)Context->Logger;
+
+ if (Traits == NULL) {
+ EventData[0].Ptr = 0;
+ EventData[0].Size = 0;
+ EventData[0].Reserved = 0;
+ } else {
+ EventData[0].Ptr = (ULONG_PTR)Traits;
+ EventData[0].Size = *Traits;
+ EventData[0].Reserved = 2; // EVENT_DATA_DESCRIPTOR_TYPE_PROVIDER_METADATA
+ }
+
+ return MCGEN_EVENTWRITETRANSFER(
+ Context->RegistrationHandle,
+ Descriptor,
+ ActivityId,
+ NULL,
+ EventDataCount,
+ EventData);
+}
+#endif // McGenEventWrite_def
+
+#if !defined(McGenEventRegisterUnregister)
+#define McGenEventRegisterUnregister
+
+// This macro is for use by MC-generated code and should not be used directly.
+#define McGenEventRegister _mcgen_PASTE2(McGenEventRegister_, MCGEN_EVENTREGISTER)
+
+#pragma warning(push)
+#pragma warning(disable:6103)
+// This function is for use by MC-generated code and should not be used directly.
+DECLSPEC_NOINLINE __inline
+ULONG __stdcall
+McGenEventRegister(
+ _In_ LPCGUID ProviderId,
+ _In_opt_ _mcgen_PENABLECALLBACK EnableCallback,
+ _In_opt_ PVOID CallbackContext,
+ _Inout_ PREGHANDLE RegHandle
+ )
+/*++
+
+Routine Description:
+
+ This function registers the provider with ETW.
+
+Arguments:
+
+ ProviderId - Provider ID to register with ETW.
+
+ EnableCallback - Callback to be used.
+
+ CallbackContext - Context for the callback.
+
+ RegHandle - Pointer to registration handle.
+
+Remarks:
+
+ Should not be called if the provider is already registered (i.e. should not
+ be called if *RegHandle != 0). Repeatedly registering a provider is a bug
+ and may indicate a race condition. However, for compatibility with previous
+ behavior, this function will return SUCCESS in this case.
+
+--*/
+{
+ ULONG Error;
+
+ if (*RegHandle != 0)
+ {
+ Error = 0; // ERROR_SUCCESS
+ }
+ else
+ {
+ Error = MCGEN_EVENTREGISTER(ProviderId, EnableCallback, CallbackContext, RegHandle);
+ }
+
+ return Error;
+}
+#pragma warning(pop)
+
+// This macro is for use by MC-generated code and should not be used directly.
+#define McGenEventUnregister _mcgen_PASTE2(McGenEventUnregister_, MCGEN_EVENTUNREGISTER)
+
+// This function is for use by MC-generated code and should not be used directly.
+DECLSPEC_NOINLINE __inline
+ULONG __stdcall
+McGenEventUnregister(_Inout_ PREGHANDLE RegHandle)
+/*++
+
+Routine Description:
+
+ Unregister from ETW and set *RegHandle = 0.
+
+Arguments:
+
+ RegHandle - the pointer to the provider registration handle
+
+Remarks:
+
+ If provider has not been registered (i.e. if *RegHandle == 0),
+ return SUCCESS. It is safe to call McGenEventUnregister even if the
+ call to McGenEventRegister returned an error.
+
+--*/
+{
+ ULONG Error;
+
+ if(*RegHandle == 0)
+ {
+ Error = 0; // ERROR_SUCCESS
+ }
+ else
+ {
+ Error = MCGEN_EVENTUNREGISTER(*RegHandle);
+ *RegHandle = (REGHANDLE)0;
+ }
+
+ return Error;
+}
+
+#endif // McGenEventRegisterUnregister
+
+#ifndef _mcgen_EVENT_BIT_SET
+ #if defined(_M_IX86) || defined(_M_X64)
+ // This macro is for use by MC-generated code and should not be used directly.
+ #define _mcgen_EVENT_BIT_SET(EnableBits, BitPosition) ((((const unsigned char*)EnableBits)[BitPosition >> 3] & (1u << (BitPosition & 7))) != 0)
+ #else // CPU type
+ // This macro is for use by MC-generated code and should not be used directly.
+ #define _mcgen_EVENT_BIT_SET(EnableBits, BitPosition) ((EnableBits[BitPosition >> 5] & (1u << (BitPosition & 31))) != 0)
+ #endif // CPU type
+#endif // _mcgen_EVENT_BIT_SET
+
+#endif // MCGEN_DISABLE_PROVIDER_CODE_GENERATION
+
+//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+// Provider "microsoft-windows-mimalloc" event count 2
+//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+// Provider GUID = 138f4dbb-ee04-4899-aa0a-572ad4475779
+EXTERN_C __declspec(selectany) const GUID ETW_MI_Provider = {0x138f4dbb, 0xee04, 0x4899, {0xaa, 0x0a, 0x57, 0x2a, 0xd4, 0x47, 0x57, 0x79}};
+
+#ifndef ETW_MI_Provider_Traits
+#define ETW_MI_Provider_Traits NULL
+#endif // ETW_MI_Provider_Traits
+
+//
+// Event Descriptors
+//
+EXTERN_C __declspec(selectany) const EVENT_DESCRIPTOR ETW_MI_ALLOC = {0x64, 0x1, 0x0, 0x4, 0x0, 0x0, 0x0};
+#define ETW_MI_ALLOC_value 0x64
+EXTERN_C __declspec(selectany) const EVENT_DESCRIPTOR ETW_MI_FREE = {0x65, 0x1, 0x0, 0x4, 0x0, 0x0, 0x0};
+#define ETW_MI_FREE_value 0x65
+
+//
+// MCGEN_DISABLE_PROVIDER_CODE_GENERATION macro:
+// Define this macro to have the compiler skip the generated functions in this
+// header.
+//
+#ifndef MCGEN_DISABLE_PROVIDER_CODE_GENERATION
+
+//
+// Event Enablement Bits
+// These variables are for use by MC-generated code and should not be used directly.
+//
+EXTERN_C __declspec(selectany) DECLSPEC_CACHEALIGN ULONG microsoft_windows_mimallocEnableBits[1];
+EXTERN_C __declspec(selectany) const ULONGLONG microsoft_windows_mimallocKeywords[1] = {0x0};
+EXTERN_C __declspec(selectany) const unsigned char microsoft_windows_mimallocLevels[1] = {4};
+
+//
+// Provider context
+//
+EXTERN_C __declspec(selectany) MCGEN_TRACE_CONTEXT ETW_MI_Provider_Context = {0, (ULONG_PTR)ETW_MI_Provider_Traits, 0, 0, 0, 0, 0, 0, 1, microsoft_windows_mimallocEnableBits, microsoft_windows_mimallocKeywords, microsoft_windows_mimallocLevels};
+
+//
+// Provider REGHANDLE
+//
+#define microsoft_windows_mimallocHandle (ETW_MI_Provider_Context.RegistrationHandle)
+
+//
+// This macro is set to 0, indicating that the EventWrite[Name] macros do not
+// have an Activity parameter. This is controlled by the -km and -um options.
+//
+#define ETW_MI_Provider_EventWriteActivity 0
+
+//
+// Register with ETW using the control GUID specified in the manifest.
+// Invoke this macro during module initialization (i.e. program startup,
+// DLL process attach, or driver load) to initialize the provider.
+// Note that if this function returns an error, the error means that
+// will not work, but no action needs to be taken -- even if EventRegister
+// returns an error, it is generally safe to use EventWrite and
+// EventUnregister macros (they will be no-ops if EventRegister failed).
+//
+#ifndef EventRegistermicrosoft_windows_mimalloc
+#define EventRegistermicrosoft_windows_mimalloc() McGenEventRegister(&ETW_MI_Provider, McGenControlCallbackV2, &ETW_MI_Provider_Context, &microsoft_windows_mimallocHandle)
+#endif
+
+//
+// Register with ETW using a specific control GUID (i.e. a GUID other than what
+// is specified in the manifest). Advanced scenarios only.
+//
+#ifndef EventRegisterByGuidmicrosoft_windows_mimalloc
+#define EventRegisterByGuidmicrosoft_windows_mimalloc(Guid) McGenEventRegister(&(Guid), McGenControlCallbackV2, &ETW_MI_Provider_Context, &microsoft_windows_mimallocHandle)
+#endif
+
+//
+// Unregister with ETW and close the provider.
+// Invoke this macro during module shutdown (i.e. program exit, DLL process
+// detach, or driver unload) to unregister the provider.
+// Note that you MUST call EventUnregister before DLL or driver unload
+// (not optional): failure to unregister a provider before DLL or driver unload
+// will result in crashes.
+//
+#ifndef EventUnregistermicrosoft_windows_mimalloc
+#define EventUnregistermicrosoft_windows_mimalloc() McGenEventUnregister(&microsoft_windows_mimallocHandle)
+#endif
+
+//
+// MCGEN_ENABLE_FORCONTEXT_CODE_GENERATION macro:
+// Define this macro to enable support for caller-allocated provider context.
+//
+#ifdef MCGEN_ENABLE_FORCONTEXT_CODE_GENERATION
+
+//
+// Advanced scenarios: Caller-allocated provider context.
+// Use when multiple differently-configured provider handles are needed,
+// e.g. for container-aware drivers, one context per container.
+//
+// Usage:
+//
+// - Caller enables the feature before including this header, e.g.
+// #define MCGEN_ENABLE_FORCONTEXT_CODE_GENERATION 1
+// - Caller allocates memory, e.g. pContext = malloc(sizeof(McGenContext_microsoft_windows_mimalloc));
+// - Caller registers the provider, e.g. EventRegistermicrosoft_windows_mimalloc_ForContext(pContext);
+// - Caller writes events, e.g. EventWriteMyEvent_ForContext(pContext, ...);
+// - Caller unregisters, e.g. EventUnregistermicrosoft_windows_mimalloc_ForContext(pContext);
+// - Caller frees memory, e.g. free(pContext);
+//
+
+typedef struct tagMcGenContext_microsoft_windows_mimalloc {
+ // The fields of this structure are subject to change and should
+ // not be accessed directly. To access the provider's REGHANDLE,
+ // use microsoft_windows_mimallocHandle_ForContext(pContext).
+ MCGEN_TRACE_CONTEXT Context;
+ ULONG EnableBits[1];
+} McGenContext_microsoft_windows_mimalloc;
+
+#define EventRegistermicrosoft_windows_mimalloc_ForContext(pContext) _mcgen_PASTE2(_mcgen_RegisterForContext_microsoft_windows_mimalloc_, MCGEN_EVENTREGISTER)(&ETW_MI_Provider, pContext)
+#define EventRegisterByGuidmicrosoft_windows_mimalloc_ForContext(Guid, pContext) _mcgen_PASTE2(_mcgen_RegisterForContext_microsoft_windows_mimalloc_, MCGEN_EVENTREGISTER)(&(Guid), pContext)
+#define EventUnregistermicrosoft_windows_mimalloc_ForContext(pContext) McGenEventUnregister(&(pContext)->Context.RegistrationHandle)
+
+//
+// Provider REGHANDLE for caller-allocated context.
+//
+#define microsoft_windows_mimallocHandle_ForContext(pContext) ((pContext)->Context.RegistrationHandle)
+
+// This function is for use by MC-generated code and should not be used directly.
+// Initialize and register the caller-allocated context.
+__inline
+ULONG __stdcall
+_mcgen_PASTE2(_mcgen_RegisterForContext_microsoft_windows_mimalloc_, MCGEN_EVENTREGISTER)(
+ _In_ LPCGUID pProviderId,
+ _Out_ McGenContext_microsoft_windows_mimalloc* pContext)
+{
+ RtlZeroMemory(pContext, sizeof(*pContext));
+ pContext->Context.Logger = (ULONG_PTR)ETW_MI_Provider_Traits;
+ pContext->Context.EnableBitsCount = 1;
+ pContext->Context.EnableBitMask = pContext->EnableBits;
+ pContext->Context.EnableKeyWords = microsoft_windows_mimallocKeywords;
+ pContext->Context.EnableLevel = microsoft_windows_mimallocLevels;
+ return McGenEventRegister(
+ pProviderId,
+ McGenControlCallbackV2,
+ &pContext->Context,
+ &pContext->Context.RegistrationHandle);
+}
+
+// This function is for use by MC-generated code and should not be used directly.
+// Trigger a compile error if called with the wrong parameter type.
+FORCEINLINE
+_Ret_ McGenContext_microsoft_windows_mimalloc*
+_mcgen_CheckContextType_microsoft_windows_mimalloc(_In_ McGenContext_microsoft_windows_mimalloc* pContext)
+{
+ return pContext;
+}
+
+#endif // MCGEN_ENABLE_FORCONTEXT_CODE_GENERATION
+
+//
+// Enablement check macro for event "ETW_MI_ALLOC"
+//
+#define EventEnabledETW_MI_ALLOC() _mcgen_EVENT_BIT_SET(microsoft_windows_mimallocEnableBits, 0)
+#define EventEnabledETW_MI_ALLOC_ForContext(pContext) _mcgen_EVENT_BIT_SET(_mcgen_CheckContextType_microsoft_windows_mimalloc(pContext)->EnableBits, 0)
+
+//
+// Event write macros for event "ETW_MI_ALLOC"
+//
+#define EventWriteETW_MI_ALLOC(Address, Size) \
+ MCGEN_EVENT_ENABLED(ETW_MI_ALLOC) \
+ ? _mcgen_TEMPLATE_FOR_ETW_MI_ALLOC(&ETW_MI_Provider_Context, &ETW_MI_ALLOC, Address, Size) : 0
+#define EventWriteETW_MI_ALLOC_AssumeEnabled(Address, Size) \
+ _mcgen_TEMPLATE_FOR_ETW_MI_ALLOC(&ETW_MI_Provider_Context, &ETW_MI_ALLOC, Address, Size)
+#define EventWriteETW_MI_ALLOC_ForContext(pContext, Address, Size) \
+ MCGEN_EVENT_ENABLED_FORCONTEXT(pContext, ETW_MI_ALLOC) \
+ ? _mcgen_TEMPLATE_FOR_ETW_MI_ALLOC(&(pContext)->Context, &ETW_MI_ALLOC, Address, Size) : 0
+#define EventWriteETW_MI_ALLOC_ForContextAssumeEnabled(pContext, Address, Size) \
+ _mcgen_TEMPLATE_FOR_ETW_MI_ALLOC(&_mcgen_CheckContextType_microsoft_windows_mimalloc(pContext)->Context, &ETW_MI_ALLOC, Address, Size)
+
+// This macro is for use by MC-generated code and should not be used directly.
+#define _mcgen_TEMPLATE_FOR_ETW_MI_ALLOC _mcgen_PASTE2(McTemplateU0xx_, MCGEN_EVENTWRITETRANSFER)
+
+//
+// Enablement check macro for event "ETW_MI_FREE"
+//
+#define EventEnabledETW_MI_FREE() _mcgen_EVENT_BIT_SET(microsoft_windows_mimallocEnableBits, 0)
+#define EventEnabledETW_MI_FREE_ForContext(pContext) _mcgen_EVENT_BIT_SET(_mcgen_CheckContextType_microsoft_windows_mimalloc(pContext)->EnableBits, 0)
+
+//
+// Event write macros for event "ETW_MI_FREE"
+//
+#define EventWriteETW_MI_FREE(Address, Size) \
+ MCGEN_EVENT_ENABLED(ETW_MI_FREE) \
+ ? _mcgen_TEMPLATE_FOR_ETW_MI_FREE(&ETW_MI_Provider_Context, &ETW_MI_FREE, Address, Size) : 0
+#define EventWriteETW_MI_FREE_AssumeEnabled(Address, Size) \
+ _mcgen_TEMPLATE_FOR_ETW_MI_FREE(&ETW_MI_Provider_Context, &ETW_MI_FREE, Address, Size)
+#define EventWriteETW_MI_FREE_ForContext(pContext, Address, Size) \
+ MCGEN_EVENT_ENABLED_FORCONTEXT(pContext, ETW_MI_FREE) \
+ ? _mcgen_TEMPLATE_FOR_ETW_MI_FREE(&(pContext)->Context, &ETW_MI_FREE, Address, Size) : 0
+#define EventWriteETW_MI_FREE_ForContextAssumeEnabled(pContext, Address, Size) \
+ _mcgen_TEMPLATE_FOR_ETW_MI_FREE(&_mcgen_CheckContextType_microsoft_windows_mimalloc(pContext)->Context, &ETW_MI_FREE, Address, Size)
+
+// This macro is for use by MC-generated code and should not be used directly.
+#define _mcgen_TEMPLATE_FOR_ETW_MI_FREE _mcgen_PASTE2(McTemplateU0xx_, MCGEN_EVENTWRITETRANSFER)
+
+#endif // MCGEN_DISABLE_PROVIDER_CODE_GENERATION
+
+//
+// MCGEN_DISABLE_PROVIDER_CODE_GENERATION macro:
+// Define this macro to have the compiler skip the generated functions in this
+// header.
+//
+#ifndef MCGEN_DISABLE_PROVIDER_CODE_GENERATION
+
+//
+// Template Functions
+//
+
+//
+// Function for template "ETW_CUSTOM_HEAP_ALLOC_DATA" (and possibly others).
+// This function is for use by MC-generated code and should not be used directly.
+//
+#ifndef McTemplateU0xx_def
+#define McTemplateU0xx_def
+ETW_INLINE
+ULONG
+_mcgen_PASTE2(McTemplateU0xx_, MCGEN_EVENTWRITETRANSFER)(
+ _In_ PMCGEN_TRACE_CONTEXT Context,
+ _In_ PCEVENT_DESCRIPTOR Descriptor,
+ _In_ const unsigned __int64 _Arg0,
+ _In_ const unsigned __int64 _Arg1
+ )
+{
+#define McTemplateU0xx_ARGCOUNT 2
+
+ EVENT_DATA_DESCRIPTOR EventData[McTemplateU0xx_ARGCOUNT + 1];
+
+ EventDataDescCreate(&EventData[1],&_Arg0, sizeof(const unsigned __int64) );
+
+ EventDataDescCreate(&EventData[2],&_Arg1, sizeof(const unsigned __int64) );
+
+ return McGenEventWrite(Context, Descriptor, NULL, McTemplateU0xx_ARGCOUNT + 1, EventData);
+}
+#endif // McTemplateU0xx_def
+
+#endif // MCGEN_DISABLE_PROVIDER_CODE_GENERATION
+
+#if defined(__cplusplus)
+}
+#endif
diff --git a/source/luametatex/source/libraries/mimalloc/src/prim/windows/etw.man b/source/luametatex/source/libraries/mimalloc/src/prim/windows/etw.man
new file mode 100644
index 000000000..cfd1f8a9e
--- /dev/null
+++ b/source/luametatex/source/libraries/mimalloc/src/prim/windows/etw.man
Binary files differ
diff --git a/source/luametatex/source/libraries/mimalloc/src/prim/windows/prim.c b/source/luametatex/source/libraries/mimalloc/src/prim/windows/prim.c
new file mode 100644
index 000000000..e3dc33e32
--- /dev/null
+++ b/source/luametatex/source/libraries/mimalloc/src/prim/windows/prim.c
@@ -0,0 +1,607 @@
+/* ----------------------------------------------------------------------------
+Copyright (c) 2018-2023, Microsoft Research, Daan Leijen
+This is free software; you can redistribute it and/or modify it under the
+terms of the MIT license. A copy of the license can be found in the file
+"LICENSE" at the root of this distribution.
+-----------------------------------------------------------------------------*/
+
+// This file is included in `src/prim/prim.c`
+
+#include "mimalloc.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
+#include "mimalloc/prim.h"
+#include <stdio.h> // fputs, stderr
+
+
+//---------------------------------------------
+// Dynamically bind Windows API points for portability
+//---------------------------------------------
+
+// We use VirtualAlloc2 for aligned allocation, but it is only supported on Windows 10 and Windows Server 2016.
+// So, we need to look it up dynamically to run on older systems. (use __stdcall for 32-bit compatibility)
+// NtAllocateVirtualAllocEx is used for huge OS page allocation (1GiB)
+// We define a minimal MEM_EXTENDED_PARAMETER ourselves in order to be able to compile with older SDK's.
+typedef enum MI_MEM_EXTENDED_PARAMETER_TYPE_E {
+ MiMemExtendedParameterInvalidType = 0,
+ MiMemExtendedParameterAddressRequirements,
+ MiMemExtendedParameterNumaNode,
+ MiMemExtendedParameterPartitionHandle,
+ MiMemExtendedParameterUserPhysicalHandle,
+ MiMemExtendedParameterAttributeFlags,
+ MiMemExtendedParameterMax
+} MI_MEM_EXTENDED_PARAMETER_TYPE;
+
+typedef struct DECLSPEC_ALIGN(8) MI_MEM_EXTENDED_PARAMETER_S {
+ struct { DWORD64 Type : 8; DWORD64 Reserved : 56; } Type;
+ union { DWORD64 ULong64; PVOID Pointer; SIZE_T Size; HANDLE Handle; DWORD ULong; } Arg;
+} MI_MEM_EXTENDED_PARAMETER;
+
+typedef struct MI_MEM_ADDRESS_REQUIREMENTS_S {
+ PVOID LowestStartingAddress;
+ PVOID HighestEndingAddress;
+ SIZE_T Alignment;
+} MI_MEM_ADDRESS_REQUIREMENTS;
+
+#define MI_MEM_EXTENDED_PARAMETER_NONPAGED_HUGE 0x00000010
+
+#include <winternl.h>
+typedef PVOID (__stdcall *PVirtualAlloc2)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, MI_MEM_EXTENDED_PARAMETER*, ULONG);
+typedef NTSTATUS (__stdcall *PNtAllocateVirtualMemoryEx)(HANDLE, PVOID*, SIZE_T*, ULONG, ULONG, MI_MEM_EXTENDED_PARAMETER*, ULONG);
+static PVirtualAlloc2 pVirtualAlloc2 = NULL;
+static PNtAllocateVirtualMemoryEx pNtAllocateVirtualMemoryEx = NULL;
+
+// Similarly, GetNumaProcesorNodeEx is only supported since Windows 7
+typedef struct MI_PROCESSOR_NUMBER_S { WORD Group; BYTE Number; BYTE Reserved; } MI_PROCESSOR_NUMBER;
+
+typedef VOID (__stdcall *PGetCurrentProcessorNumberEx)(MI_PROCESSOR_NUMBER* ProcNumber);
+typedef BOOL (__stdcall *PGetNumaProcessorNodeEx)(MI_PROCESSOR_NUMBER* Processor, PUSHORT NodeNumber);
+typedef BOOL (__stdcall* PGetNumaNodeProcessorMaskEx)(USHORT Node, PGROUP_AFFINITY ProcessorMask);
+typedef BOOL (__stdcall *PGetNumaProcessorNode)(UCHAR Processor, PUCHAR NodeNumber);
+static PGetCurrentProcessorNumberEx pGetCurrentProcessorNumberEx = NULL;
+static PGetNumaProcessorNodeEx pGetNumaProcessorNodeEx = NULL;
+static PGetNumaNodeProcessorMaskEx pGetNumaNodeProcessorMaskEx = NULL;
+static PGetNumaProcessorNode pGetNumaProcessorNode = NULL;
+
+//---------------------------------------------
+// Enable large page support dynamically (if possible)
+//---------------------------------------------
+
+static bool win_enable_large_os_pages(size_t* large_page_size)
+{
+ static bool large_initialized = false;
+ if (large_initialized) return (_mi_os_large_page_size() > 0);
+ large_initialized = true;
+
+ // Try to see if large OS pages are supported
+ // To use large pages on Windows, we first need access permission
+ // Set "Lock pages in memory" permission in the group policy editor
+ // <https://devblogs.microsoft.com/oldnewthing/20110128-00/?p=11643>
+ unsigned long err = 0;
+ HANDLE token = NULL;
+ BOOL ok = OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &token);
+ if (ok) {
+ TOKEN_PRIVILEGES tp;
+ ok = LookupPrivilegeValue(NULL, TEXT("SeLockMemoryPrivilege"), &tp.Privileges[0].Luid);
+ if (ok) {
+ tp.PrivilegeCount = 1;
+ tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
+ ok = AdjustTokenPrivileges(token, FALSE, &tp, 0, (PTOKEN_PRIVILEGES)NULL, 0);
+ if (ok) {
+ err = GetLastError();
+ ok = (err == ERROR_SUCCESS);
+ if (ok && large_page_size != NULL) {
+ *large_page_size = GetLargePageMinimum();
+ }
+ }
+ }
+ CloseHandle(token);
+ }
+ if (!ok) {
+ if (err == 0) err = GetLastError();
+ _mi_warning_message("cannot enable large OS page support, error %lu\n", err);
+ }
+ return (ok!=0);
+}
+
+
+//---------------------------------------------
+// Initialize
+//---------------------------------------------
+
+void _mi_prim_mem_init( mi_os_mem_config_t* config )
+{
+ config->has_overcommit = false;
+ config->must_free_whole = true;
+ // get the page size
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ if (si.dwPageSize > 0) { config->page_size = si.dwPageSize; }
+ if (si.dwAllocationGranularity > 0) { config->alloc_granularity = si.dwAllocationGranularity; }
+ // get the VirtualAlloc2 function
+ HINSTANCE hDll;
+ hDll = LoadLibrary(TEXT("kernelbase.dll"));
+ if (hDll != NULL) {
+ // use VirtualAlloc2FromApp if possible as it is available to Windows store apps
+ pVirtualAlloc2 = (PVirtualAlloc2)(void (*)(void))GetProcAddress(hDll, "VirtualAlloc2FromApp");
+ if (pVirtualAlloc2==NULL) pVirtualAlloc2 = (PVirtualAlloc2)(void (*)(void))GetProcAddress(hDll, "VirtualAlloc2");
+ FreeLibrary(hDll);
+ }
+ // NtAllocateVirtualMemoryEx is used for huge page allocation
+ hDll = LoadLibrary(TEXT("ntdll.dll"));
+ if (hDll != NULL) {
+ pNtAllocateVirtualMemoryEx = (PNtAllocateVirtualMemoryEx)(void (*)(void))GetProcAddress(hDll, "NtAllocateVirtualMemoryEx");
+ FreeLibrary(hDll);
+ }
+ // Try to use Win7+ numa API
+ hDll = LoadLibrary(TEXT("kernel32.dll"));
+ if (hDll != NULL) {
+ pGetCurrentProcessorNumberEx = (PGetCurrentProcessorNumberEx)(void (*)(void))GetProcAddress(hDll, "GetCurrentProcessorNumberEx");
+ pGetNumaProcessorNodeEx = (PGetNumaProcessorNodeEx)(void (*)(void))GetProcAddress(hDll, "GetNumaProcessorNodeEx");
+ pGetNumaNodeProcessorMaskEx = (PGetNumaNodeProcessorMaskEx)(void (*)(void))GetProcAddress(hDll, "GetNumaNodeProcessorMaskEx");
+ pGetNumaProcessorNode = (PGetNumaProcessorNode)(void (*)(void))GetProcAddress(hDll, "GetNumaProcessorNode");
+ FreeLibrary(hDll);
+ }
+ if (mi_option_is_enabled(mi_option_large_os_pages) || mi_option_is_enabled(mi_option_reserve_huge_os_pages)) {
+ win_enable_large_os_pages(&config->large_page_size);
+ }
+}
+
+
+//---------------------------------------------
+// Free
+//---------------------------------------------
+
+int _mi_prim_free(void* addr, size_t size ) {
+ MI_UNUSED(size);
+ DWORD errcode = 0;
+ bool err = (VirtualFree(addr, 0, MEM_RELEASE) == 0);
+ if (err) { errcode = GetLastError(); }
+ if (errcode == ERROR_INVALID_ADDRESS) {
+ // In mi_os_mem_alloc_aligned the fallback path may have returned a pointer inside
+ // the memory region returned by VirtualAlloc; in that case we need to free using
+ // the start of the region.
+ MEMORY_BASIC_INFORMATION info = { 0 };
+ VirtualQuery(addr, &info, sizeof(info));
+ if (info.AllocationBase < addr && ((uint8_t*)addr - (uint8_t*)info.AllocationBase) < (ptrdiff_t)MI_SEGMENT_SIZE) {
+ errcode = 0;
+ err = (VirtualFree(info.AllocationBase, 0, MEM_RELEASE) == 0);
+ if (err) { errcode = GetLastError(); }
+ }
+ }
+ return (int)errcode;
+}
+
+
+//---------------------------------------------
+// VirtualAlloc
+//---------------------------------------------
+
+static void* win_virtual_alloc_prim(void* addr, size_t size, size_t try_alignment, DWORD flags) {
+ #if (MI_INTPTR_SIZE >= 8)
+ // on 64-bit systems, try to use the virtual address area after 2TiB for 4MiB aligned allocations
+ if (addr == NULL) {
+ void* hint = _mi_os_get_aligned_hint(try_alignment,size);
+ if (hint != NULL) {
+ void* p = VirtualAlloc(hint, size, flags, PAGE_READWRITE);
+ if (p != NULL) return p;
+ _mi_verbose_message("warning: unable to allocate hinted aligned OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %zu, flags: 0x%x)\n", size, GetLastError(), hint, try_alignment, flags);
+ // fall through on error
+ }
+ }
+ #endif
+ // on modern Windows try use VirtualAlloc2 for aligned allocation
+ if (try_alignment > 1 && (try_alignment % _mi_os_page_size()) == 0 && pVirtualAlloc2 != NULL) {
+ MI_MEM_ADDRESS_REQUIREMENTS reqs = { 0, 0, 0 };
+ reqs.Alignment = try_alignment;
+ MI_MEM_EXTENDED_PARAMETER param = { {0, 0}, {0} };
+ param.Type.Type = MiMemExtendedParameterAddressRequirements;
+ param.Arg.Pointer = &reqs;
+ void* p = (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, &param, 1);
+ if (p != NULL) return p;
+ _mi_warning_message("unable to allocate aligned OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %zu, flags: 0x%x)\n", size, GetLastError(), addr, try_alignment, flags);
+ // fall through on error
+ }
+ // last resort
+ return VirtualAlloc(addr, size, flags, PAGE_READWRITE);
+}
+
+static void* win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DWORD flags, bool large_only, bool allow_large, bool* is_large) {
+ mi_assert_internal(!(large_only && !allow_large));
+ static _Atomic(size_t) large_page_try_ok; // = 0;
+ void* p = NULL;
+ // Try to allocate large OS pages (2MiB) if allowed or required.
+ if ((large_only || _mi_os_use_large_page(size, try_alignment))
+ && allow_large && (flags&MEM_COMMIT)!=0 && (flags&MEM_RESERVE)!=0) {
+ size_t try_ok = mi_atomic_load_acquire(&large_page_try_ok);
+ if (!large_only && try_ok > 0) {
+ // if a large page allocation fails, it seems the calls to VirtualAlloc get very expensive.
+ // therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times.
+ mi_atomic_cas_strong_acq_rel(&large_page_try_ok, &try_ok, try_ok - 1);
+ }
+ else {
+ // large OS pages must always reserve and commit.
+ *is_large = true;
+ p = win_virtual_alloc_prim(addr, size, try_alignment, flags | MEM_LARGE_PAGES);
+ if (large_only) return p;
+ // fall back to non-large page allocation on error (`p == NULL`).
+ if (p == NULL) {
+ mi_atomic_store_release(&large_page_try_ok,10UL); // on error, don't try again for the next N allocations
+ }
+ }
+ }
+ // Fall back to regular page allocation
+ if (p == NULL) {
+ *is_large = ((flags&MEM_LARGE_PAGES) != 0);
+ p = win_virtual_alloc_prim(addr, size, try_alignment, flags);
+ }
+ //if (p == NULL) { _mi_warning_message("unable to allocate OS memory (%zu bytes, error code: 0x%x, address: %p, alignment: %zu, flags: 0x%x, large only: %d, allow large: %d)\n", size, GetLastError(), addr, try_alignment, flags, large_only, allow_large); }
+ return p;
+}
+
+int _mi_prim_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, void** addr) {
+ mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0);
+ mi_assert_internal(commit || !allow_large);
+ mi_assert_internal(try_alignment > 0);
+ int flags = MEM_RESERVE;
+ if (commit) { flags |= MEM_COMMIT; }
+ *addr = win_virtual_alloc(NULL, size, try_alignment, flags, false, allow_large, is_large);
+ return (*addr != NULL ? 0 : (int)GetLastError());
+}
+
+
+//---------------------------------------------
+// Commit/Reset/Protect
+//---------------------------------------------
+#ifdef _MSC_VER
+#pragma warning(disable:6250) // suppress warning calling VirtualFree without MEM_RELEASE (for decommit)
+#endif
+
+int _mi_prim_commit(void* addr, size_t size, bool commit) {
+ if (commit) {
+ void* p = VirtualAlloc(addr, size, MEM_COMMIT, PAGE_READWRITE);
+ return (p == addr ? 0 : (int)GetLastError());
+ }
+ else {
+ BOOL ok = VirtualFree(addr, size, MEM_DECOMMIT);
+ return (ok ? 0 : (int)GetLastError());
+ }
+}
+
+int _mi_prim_reset(void* addr, size_t size) {
+ void* p = VirtualAlloc(addr, size, MEM_RESET, PAGE_READWRITE);
+ mi_assert_internal(p == addr);
+ #if 1
+ if (p == addr && addr != NULL) {
+ VirtualUnlock(addr,size); // VirtualUnlock after MEM_RESET removes the memory from the working set
+ }
+ #endif
+ return (p == addr ? 0 : (int)GetLastError());
+}
+
+int _mi_prim_protect(void* addr, size_t size, bool protect) {
+ DWORD oldprotect = 0;
+ BOOL ok = VirtualProtect(addr, size, protect ? PAGE_NOACCESS : PAGE_READWRITE, &oldprotect);
+ return (ok ? 0 : (int)GetLastError());
+}
+
+
+//---------------------------------------------
+// Huge page allocation
+//---------------------------------------------
+
+static void* _mi_prim_alloc_huge_os_pagesx(void* hint_addr, size_t size, int numa_node)
+{
+ const DWORD flags = MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE;
+
+ win_enable_large_os_pages(NULL);
+
+ MI_MEM_EXTENDED_PARAMETER params[3] = { {{0,0},{0}},{{0,0},{0}},{{0,0},{0}} };
+ // on modern Windows try use NtAllocateVirtualMemoryEx for 1GiB huge pages
+ static bool mi_huge_pages_available = true;
+ if (pNtAllocateVirtualMemoryEx != NULL && mi_huge_pages_available) {
+ params[0].Type.Type = MiMemExtendedParameterAttributeFlags;
+ params[0].Arg.ULong64 = MI_MEM_EXTENDED_PARAMETER_NONPAGED_HUGE;
+ ULONG param_count = 1;
+ if (numa_node >= 0) {
+ param_count++;
+ params[1].Type.Type = MiMemExtendedParameterNumaNode;
+ params[1].Arg.ULong = (unsigned)numa_node;
+ }
+ SIZE_T psize = size;
+ void* base = hint_addr;
+ NTSTATUS err = (*pNtAllocateVirtualMemoryEx)(GetCurrentProcess(), &base, &psize, flags, PAGE_READWRITE, params, param_count);
+ if (err == 0 && base != NULL) {
+ return base;
+ }
+ else {
+ // fall back to regular large pages
+ mi_huge_pages_available = false; // don't try further huge pages
+ _mi_warning_message("unable to allocate using huge (1GiB) pages, trying large (2MiB) pages instead (status 0x%lx)\n", err);
+ }
+ }
+ // on modern Windows try use VirtualAlloc2 for numa aware large OS page allocation
+ if (pVirtualAlloc2 != NULL && numa_node >= 0) {
+ params[0].Type.Type = MiMemExtendedParameterNumaNode;
+ params[0].Arg.ULong = (unsigned)numa_node;
+ return (*pVirtualAlloc2)(GetCurrentProcess(), hint_addr, size, flags, PAGE_READWRITE, params, 1);
+ }
+
+ // otherwise use regular virtual alloc on older windows
+ return VirtualAlloc(hint_addr, size, flags, PAGE_READWRITE);
+}
+
+int _mi_prim_alloc_huge_os_pages(void* hint_addr, size_t size, int numa_node, void** addr) {
+ *addr = _mi_prim_alloc_huge_os_pagesx(hint_addr,size,numa_node);
+ return (*addr != NULL ? 0 : (int)GetLastError());
+}
+
+
+//---------------------------------------------
+// Numa nodes
+//---------------------------------------------
+
+size_t _mi_prim_numa_node(void) {
+ USHORT numa_node = 0;
+ if (pGetCurrentProcessorNumberEx != NULL && pGetNumaProcessorNodeEx != NULL) {
+ // Extended API is supported
+ MI_PROCESSOR_NUMBER pnum;
+ (*pGetCurrentProcessorNumberEx)(&pnum);
+ USHORT nnode = 0;
+ BOOL ok = (*pGetNumaProcessorNodeEx)(&pnum, &nnode);
+ if (ok) { numa_node = nnode; }
+ }
+ else if (pGetNumaProcessorNode != NULL) {
+ // Vista or earlier, use older API that is limited to 64 processors. Issue #277
+ DWORD pnum = GetCurrentProcessorNumber();
+ UCHAR nnode = 0;
+ BOOL ok = pGetNumaProcessorNode((UCHAR)pnum, &nnode);
+ if (ok) { numa_node = nnode; }
+ }
+ return numa_node;
+}
+
+size_t _mi_prim_numa_node_count(void) {
+ ULONG numa_max = 0;
+ GetNumaHighestNodeNumber(&numa_max);
+ // find the highest node number that has actual processors assigned to it. Issue #282
+ while(numa_max > 0) {
+ if (pGetNumaNodeProcessorMaskEx != NULL) {
+ // Extended API is supported
+ GROUP_AFFINITY affinity;
+ if ((*pGetNumaNodeProcessorMaskEx)((USHORT)numa_max, &affinity)) {
+ if (affinity.Mask != 0) break; // found the maximum non-empty node
+ }
+ }
+ else {
+ // Vista or earlier, use older API that is limited to 64 processors.
+ ULONGLONG mask;
+ if (GetNumaNodeProcessorMask((UCHAR)numa_max, &mask)) {
+ if (mask != 0) break; // found the maximum non-empty node
+ };
+ }
+ // max node was invalid or had no processor assigned, try again
+ numa_max--;
+ }
+ return ((size_t)numa_max + 1);
+}
+
+
+//----------------------------------------------------------------
+// Clock
+//----------------------------------------------------------------
+
+static mi_msecs_t mi_to_msecs(LARGE_INTEGER t) {
+ static LARGE_INTEGER mfreq; // = 0
+ if (mfreq.QuadPart == 0LL) {
+ LARGE_INTEGER f;
+ QueryPerformanceFrequency(&f);
+ mfreq.QuadPart = f.QuadPart/1000LL;
+ if (mfreq.QuadPart == 0) mfreq.QuadPart = 1;
+ }
+ return (mi_msecs_t)(t.QuadPart / mfreq.QuadPart);
+}
+
+mi_msecs_t _mi_prim_clock_now(void) {
+ LARGE_INTEGER t;
+ QueryPerformanceCounter(&t);
+ return mi_to_msecs(t);
+}
+
+
+//----------------------------------------------------------------
+// Process Info
+//----------------------------------------------------------------
+
+#include <windows.h>
+#include <psapi.h>
+
+static mi_msecs_t filetime_msecs(const FILETIME* ftime) {
+ ULARGE_INTEGER i;
+ i.LowPart = ftime->dwLowDateTime;
+ i.HighPart = ftime->dwHighDateTime;
+ mi_msecs_t msecs = (i.QuadPart / 10000); // FILETIME is in 100 nano seconds
+ return msecs;
+}
+
+typedef BOOL (WINAPI *PGetProcessMemoryInfo)(HANDLE, PPROCESS_MEMORY_COUNTERS, DWORD);
+static PGetProcessMemoryInfo pGetProcessMemoryInfo = NULL;
+
+void _mi_prim_process_info(mi_process_info_t* pinfo)
+{
+ FILETIME ct;
+ FILETIME ut;
+ FILETIME st;
+ FILETIME et;
+ GetProcessTimes(GetCurrentProcess(), &ct, &et, &st, &ut);
+ pinfo->utime = filetime_msecs(&ut);
+ pinfo->stime = filetime_msecs(&st);
+
+ // load psapi on demand
+ if (pGetProcessMemoryInfo == NULL) {
+ HINSTANCE hDll = LoadLibrary(TEXT("psapi.dll"));
+ if (hDll != NULL) {
+ pGetProcessMemoryInfo = (PGetProcessMemoryInfo)(void (*)(void))GetProcAddress(hDll, "GetProcessMemoryInfo");
+ }
+ }
+
+ // get process info
+ PROCESS_MEMORY_COUNTERS info;
+ memset(&info, 0, sizeof(info));
+ if (pGetProcessMemoryInfo != NULL) {
+ pGetProcessMemoryInfo(GetCurrentProcess(), &info, sizeof(info));
+ }
+ pinfo->current_rss = (size_t)info.WorkingSetSize;
+ pinfo->peak_rss = (size_t)info.PeakWorkingSetSize;
+ pinfo->current_commit = (size_t)info.PagefileUsage;
+ pinfo->peak_commit = (size_t)info.PeakPagefileUsage;
+ pinfo->page_faults = (size_t)info.PageFaultCount;
+}
+
+//----------------------------------------------------------------
+// Output
+//----------------------------------------------------------------
+
+void _mi_prim_out_stderr( const char* msg )
+{
+ // on windows with redirection, the C runtime cannot handle locale dependent output
+ // after the main thread closes so we use direct console output.
+ if (!_mi_preloading()) {
+ // _cputs(msg); // _cputs cannot be used at is aborts if it fails to lock the console
+ static HANDLE hcon = INVALID_HANDLE_VALUE;
+ static bool hconIsConsole;
+ if (hcon == INVALID_HANDLE_VALUE) {
+ CONSOLE_SCREEN_BUFFER_INFO sbi;
+ hcon = GetStdHandle(STD_ERROR_HANDLE);
+ hconIsConsole = ((hcon != INVALID_HANDLE_VALUE) && GetConsoleScreenBufferInfo(hcon, &sbi));
+ }
+ const size_t len = _mi_strlen(msg);
+ if (len > 0 && len < UINT32_MAX) {
+ DWORD written = 0;
+ if (hconIsConsole) {
+ WriteConsoleA(hcon, msg, (DWORD)len, &written, NULL);
+ }
+ else if (hcon != INVALID_HANDLE_VALUE) {
+ // use direct write if stderr was redirected
+ WriteFile(hcon, msg, (DWORD)len, &written, NULL);
+ }
+ else {
+ // finally fall back to fputs after all
+ fputs(msg, stderr);
+ }
+ }
+ }
+}
+
+
+//----------------------------------------------------------------
+// Environment
+//----------------------------------------------------------------
+
+// On Windows use GetEnvironmentVariable instead of getenv to work
+// reliably even when this is invoked before the C runtime is initialized.
+// i.e. when `_mi_preloading() == true`.
+// Note: on windows, environment names are not case sensitive.
+bool _mi_prim_getenv(const char* name, char* result, size_t result_size) {
+ result[0] = 0;
+ size_t len = GetEnvironmentVariableA(name, result, (DWORD)result_size);
+ return (len > 0 && len < result_size);
+}
+
+
+
+//----------------------------------------------------------------
+// Random
+//----------------------------------------------------------------
+
+#if defined(MI_USE_RTLGENRANDOM) // || defined(__cplusplus)
+// We prefer to use BCryptGenRandom instead of (the unofficial) RtlGenRandom but when using
+// dynamic overriding, we observed it can raise an exception when compiled with C++, and
+// sometimes deadlocks when also running under the VS debugger.
+// In contrast, issue #623 implies that on Windows Server 2019 we need to use BCryptGenRandom.
+// To be continued..
+#pragma comment (lib,"advapi32.lib")
+#define RtlGenRandom SystemFunction036
+mi_decl_externc BOOLEAN NTAPI RtlGenRandom(PVOID RandomBuffer, ULONG RandomBufferLength);
+
+bool _mi_prim_random_buf(void* buf, size_t buf_len) {
+ return (RtlGenRandom(buf, (ULONG)buf_len) != 0);
+}
+
+#else
+
+#ifndef BCRYPT_USE_SYSTEM_PREFERRED_RNG
+#define BCRYPT_USE_SYSTEM_PREFERRED_RNG 0x00000002
+#endif
+
+typedef LONG (NTAPI *PBCryptGenRandom)(HANDLE, PUCHAR, ULONG, ULONG);
+static PBCryptGenRandom pBCryptGenRandom = NULL;
+
+bool _mi_prim_random_buf(void* buf, size_t buf_len) {
+ if (pBCryptGenRandom == NULL) {
+ HINSTANCE hDll = LoadLibrary(TEXT("bcrypt.dll"));
+ if (hDll != NULL) {
+ pBCryptGenRandom = (PBCryptGenRandom)(void (*)(void))GetProcAddress(hDll, "BCryptGenRandom");
+ }
+ if (pBCryptGenRandom == NULL) return false;
+ }
+ return (pBCryptGenRandom(NULL, (PUCHAR)buf, (ULONG)buf_len, BCRYPT_USE_SYSTEM_PREFERRED_RNG) >= 0);
+}
+
+#endif // MI_USE_RTLGENRANDOM
+
+//----------------------------------------------------------------
+// Thread init/done
+//----------------------------------------------------------------
+
+#if !defined(MI_SHARED_LIB)
+
+// use thread local storage keys to detect thread ending
+#include <fibersapi.h>
+#if (_WIN32_WINNT < 0x600) // before Windows Vista
+WINBASEAPI DWORD WINAPI FlsAlloc( _In_opt_ PFLS_CALLBACK_FUNCTION lpCallback );
+WINBASEAPI PVOID WINAPI FlsGetValue( _In_ DWORD dwFlsIndex );
+WINBASEAPI BOOL WINAPI FlsSetValue( _In_ DWORD dwFlsIndex, _In_opt_ PVOID lpFlsData );
+WINBASEAPI BOOL WINAPI FlsFree(_In_ DWORD dwFlsIndex);
+#endif
+
+static DWORD mi_fls_key = (DWORD)(-1);
+
+static void NTAPI mi_fls_done(PVOID value) {
+ mi_heap_t* heap = (mi_heap_t*)value;
+ if (heap != NULL) {
+ _mi_thread_done(heap);
+ FlsSetValue(mi_fls_key, NULL); // prevent recursion as _mi_thread_done may set it back to the main heap, issue #672
+ }
+}
+
+void _mi_prim_thread_init_auto_done(void) {
+ mi_fls_key = FlsAlloc(&mi_fls_done);
+}
+
+void _mi_prim_thread_done_auto_done(void) {
+ // call thread-done on all threads (except the main thread) to prevent
+ // dangling callback pointer if statically linked with a DLL; Issue #208
+ FlsFree(mi_fls_key);
+}
+
+void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
+ mi_assert_internal(mi_fls_key != (DWORD)(-1));
+ FlsSetValue(mi_fls_key, heap);
+}
+
+#else
+
+// Dll; nothing to do as in that case thread_done is handled through the DLL_THREAD_DETACH event.
+
+void _mi_prim_thread_init_auto_done(void) {
+}
+
+void _mi_prim_thread_done_auto_done(void) {
+}
+
+void _mi_prim_thread_associate_default_heap(mi_heap_t* heap) {
+ MI_UNUSED(heap);
+}
+
+#endif
diff --git a/source/luametatex/source/libraries/mimalloc/src/prim/windows/readme.md b/source/luametatex/source/libraries/mimalloc/src/prim/windows/readme.md
new file mode 100644
index 000000000..217c3d174
--- /dev/null
+++ b/source/luametatex/source/libraries/mimalloc/src/prim/windows/readme.md
@@ -0,0 +1,17 @@
+## Primitives:
+
+- `prim.c` contains Windows primitives for OS allocation.
+
+## Event Tracing for Windows (ETW)
+
+- `etw.h` is generated from `etw.man` which contains the manifest for mimalloc events.
+ (100 is an allocation, 101 is for a free)
+
+- `etw-mimalloc.wprp` is a profile for the Windows Performance Recorder (WPR).
+ In an admin prompt, you can use:
+ ```
+ > wpr -start src\prim\windows\etw-mimalloc.wprp -filemode
+ > <my mimalloc program>
+ > wpr -stop test.etl
+ ```
+ and then open `test.etl` in the Windows Performance Analyzer (WPA). \ No newline at end of file
diff --git a/source/luametatex/source/libraries/mimalloc/src/random.c b/source/luametatex/source/libraries/mimalloc/src/random.c
index 06d4ba4ad..4fc8b2f8f 100644
--- a/source/luametatex/source/libraries/mimalloc/src/random.c
+++ b/source/luametatex/source/libraries/mimalloc/src/random.c
@@ -4,14 +4,10 @@ This is free software; you can redistribute it and/or modify it under the
terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
-#ifndef _DEFAULT_SOURCE
-#define _DEFAULT_SOURCE // for syscall() on Linux
-#endif
-
#include "mimalloc.h"
-#include "mimalloc-internal.h"
-
-#include <string.h> // memset
+#include "mimalloc/internal.h"
+#include "mimalloc/prim.h" // _mi_prim_random_buf
+#include <string.h> // memset
/* ----------------------------------------------------------------------------
We use our own PRNG to keep predictable performance of random number generation
@@ -158,159 +154,13 @@ uintptr_t _mi_random_next(mi_random_ctx_t* ctx) {
/* ----------------------------------------------------------------------------
-To initialize a fresh random context we rely on the OS:
-- Windows : BCryptGenRandom (or RtlGenRandom)
-- macOS : CCRandomGenerateBytes, arc4random_buf
-- bsd,wasi : arc4random_buf
-- Linux : getrandom,/dev/urandom
+To initialize a fresh random context.
If we cannot get good randomness, we fall back to weak randomness based on a timer and ASLR.
-----------------------------------------------------------------------------*/
-#if defined(_WIN32)
-
-#if defined(MI_USE_RTLGENRANDOM) // || defined(__cplusplus)
-// We prefer to use BCryptGenRandom instead of (the unofficial) RtlGenRandom but when using
-// dynamic overriding, we observed it can raise an exception when compiled with C++, and
-// sometimes deadlocks when also running under the VS debugger.
-// In contrast, issue #623 implies that on Windows Server 2019 we need to use BCryptGenRandom.
-// To be continued..
-#pragma comment (lib,"advapi32.lib")
-#define RtlGenRandom SystemFunction036
-#ifdef __cplusplus
-extern "C" {
-#endif
-BOOLEAN NTAPI RtlGenRandom(PVOID RandomBuffer, ULONG RandomBufferLength);
-#ifdef __cplusplus
-}
-#endif
-static bool os_random_buf(void* buf, size_t buf_len) {
- return (RtlGenRandom(buf, (ULONG)buf_len) != 0);
-}
-#else
-
-#ifndef BCRYPT_USE_SYSTEM_PREFERRED_RNG
-#define BCRYPT_USE_SYSTEM_PREFERRED_RNG 0x00000002
-#endif
-
-typedef LONG (NTAPI *PBCryptGenRandom)(HANDLE, PUCHAR, ULONG, ULONG);
-static PBCryptGenRandom pBCryptGenRandom = NULL;
-
-static bool os_random_buf(void* buf, size_t buf_len) {
- if (pBCryptGenRandom == NULL) {
- HINSTANCE hDll = LoadLibrary(TEXT("bcrypt.dll"));
- if (hDll != NULL) {
- pBCryptGenRandom = (PBCryptGenRandom)(void (*)(void))GetProcAddress(hDll, "BCryptGenRandom");
- }
- }
- if (pBCryptGenRandom == NULL) {
- return false;
- }
- else {
- return (pBCryptGenRandom(NULL, (PUCHAR)buf, (ULONG)buf_len, BCRYPT_USE_SYSTEM_PREFERRED_RNG) >= 0);
- }
-}
-#endif
-
-#elif defined(__APPLE__)
-#include <AvailabilityMacros.h>
-#if defined(MAC_OS_X_VERSION_10_10) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_10
-#include <CommonCrypto/CommonCryptoError.h>
-#include <CommonCrypto/CommonRandom.h>
-#endif
-static bool os_random_buf(void* buf, size_t buf_len) {
- #if defined(MAC_OS_X_VERSION_10_15) && MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_15
- // We prefere CCRandomGenerateBytes as it returns an error code while arc4random_buf
- // may fail silently on macOS. See PR #390, and <https://opensource.apple.com/source/Libc/Libc-1439.40.11/gen/FreeBSD/arc4random.c.auto.html>
- return (CCRandomGenerateBytes(buf, buf_len) == kCCSuccess);
- #else
- // fall back on older macOS
- arc4random_buf(buf, buf_len);
- return true;
- #endif
-}
-
-#elif defined(__ANDROID__) || defined(__DragonFly__) || \
- defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) || \
- defined(__sun) // todo: what to use with __wasi__?
-#include <stdlib.h>
-static bool os_random_buf(void* buf, size_t buf_len) {
- arc4random_buf(buf, buf_len);
- return true;
-}
-#elif defined(__linux__) || defined(__HAIKU__)
-#if defined(__linux__)
-#include <sys/syscall.h>
-#endif
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <errno.h>
-static bool os_random_buf(void* buf, size_t buf_len) {
- // Modern Linux provides `getrandom` but different distributions either use `sys/random.h` or `linux/random.h`
- // and for the latter the actual `getrandom` call is not always defined.
- // (see <https://stackoverflow.com/questions/45237324/why-doesnt-getrandom-compile>)
- // We therefore use a syscall directly and fall back dynamically to /dev/urandom when needed.
-#ifdef SYS_getrandom
- #ifndef GRND_NONBLOCK
- #define GRND_NONBLOCK (1)
- #endif
- static _Atomic(uintptr_t) no_getrandom; // = 0
- if (mi_atomic_load_acquire(&no_getrandom)==0) {
- ssize_t ret = syscall(SYS_getrandom, buf, buf_len, GRND_NONBLOCK);
- if (ret >= 0) return (buf_len == (size_t)ret);
- if (errno != ENOSYS) return false;
- mi_atomic_store_release(&no_getrandom, 1UL); // don't call again, and fall back to /dev/urandom
- }
-#endif
- int flags = O_RDONLY;
- #if defined(O_CLOEXEC)
- flags |= O_CLOEXEC;
- #endif
- int fd = open("/dev/urandom", flags, 0);
- if (fd < 0) return false;
- size_t count = 0;
- while(count < buf_len) {
- ssize_t ret = read(fd, (char*)buf + count, buf_len - count);
- if (ret<=0) {
- if (errno!=EAGAIN && errno!=EINTR) break;
- }
- else {
- count += ret;
- }
- }
- close(fd);
- return (count==buf_len);
-}
-#else
-static bool os_random_buf(void* buf, size_t buf_len) {
- return false;
-}
-#endif
-
-#if defined(_WIN32)
-#include <windows.h>
-#elif defined(__APPLE__)
-#include <mach/mach_time.h>
-#else
-#include <time.h>
-#endif
-
uintptr_t _mi_os_random_weak(uintptr_t extra_seed) {
uintptr_t x = (uintptr_t)&_mi_os_random_weak ^ extra_seed; // ASLR makes the address random
-
- #if defined(_WIN32)
- LARGE_INTEGER pcount;
- QueryPerformanceCounter(&pcount);
- x ^= (uintptr_t)(pcount.QuadPart);
- #elif defined(__APPLE__)
- x ^= (uintptr_t)mach_absolute_time();
- #else
- struct timespec time;
- clock_gettime(CLOCK_MONOTONIC, &time);
- x ^= (uintptr_t)time.tv_sec;
- x ^= (uintptr_t)time.tv_nsec;
- #endif
+ x ^= _mi_prim_clock_now();
// and do a few randomization steps
uintptr_t max = ((x ^ (x >> 17)) & 0x0F) + 1;
for (uintptr_t i = 0; i < max; i++) {
@@ -322,7 +172,7 @@ uintptr_t _mi_os_random_weak(uintptr_t extra_seed) {
static void mi_random_init_ex(mi_random_ctx_t* ctx, bool use_weak) {
uint8_t key[32];
- if (use_weak || !os_random_buf(key, sizeof(key))) {
+ if (use_weak || !_mi_prim_random_buf(key, sizeof(key))) {
// if we fail to get random data from the OS, we fall back to a
// weak random source based on the current time
#if !defined(__wasi__)
diff --git a/source/luametatex/source/libraries/mimalloc/src/region.c b/source/luametatex/source/libraries/mimalloc/src/region.c
index 3571abb60..6c8ffb79c 100644
--- a/source/luametatex/source/libraries/mimalloc/src/region.c
+++ b/source/luametatex/source/libraries/mimalloc/src/region.c
@@ -32,30 +32,15 @@ Possible issues:
do this better without adding too much complexity?
-----------------------------------------------------------------------------*/
#include "mimalloc.h"
-#include "mimalloc-internal.h"
-#include "mimalloc-atomic.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
#include <string.h> // memset
#include "bitmap.h"
-// Internal raw OS interface
-size_t _mi_os_large_page_size(void);
-bool _mi_os_protect(void* addr, size_t size);
-bool _mi_os_unprotect(void* addr, size_t size);
-bool _mi_os_commit(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
-bool _mi_os_decommit(void* p, size_t size, mi_stats_t* stats);
-bool _mi_os_reset(void* p, size_t size, mi_stats_t* stats);
-bool _mi_os_unreset(void* p, size_t size, bool* is_zero, mi_stats_t* stats);
-bool _mi_os_commit_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* stats);
-
-// arena.c
-mi_arena_id_t _mi_arena_id_none(void);
-void _mi_arena_free(void* p, size_t size, size_t alignment, size_t align_offset, size_t memid, bool all_committed, mi_stats_t* stats);
-void* _mi_arena_alloc(size_t size, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
-void* _mi_arena_alloc_aligned(size_t size, size_t alignment, size_t align_offset, bool* commit, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t req_arena_id, size_t* memid, mi_os_tld_t* tld);
-
-
+// os.c
+bool _mi_os_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* tld_stats);
// Constants
#if (MI_INTPTR_SIZE==8)
@@ -330,7 +315,7 @@ static void* mi_region_try_alloc(size_t blocks, bool* commit, bool* large, bool*
}
mi_assert_internal(!_mi_bitmap_is_any_claimed(&region->reset, 1, blocks, bit_idx));
- #if (MI_DEBUG>=2) && !MI_TRACK_ENABLED
+ #if (MI_DEBUG>=2) && !MI_TRACK_ENABLED // && !MI_TSAN
if (*commit) { ((uint8_t*)p)[0] = 0; }
#endif
@@ -376,7 +361,7 @@ void* _mi_mem_alloc_aligned(size_t size, size_t alignment, size_t align_offset,
if (p != NULL) {
mi_assert_internal(((uintptr_t)p + align_offset) % alignment == 0);
- #if (MI_DEBUG>=2) && !MI_TRACK_ENABLED
+ #if (MI_DEBUG>=2) && !MI_TRACK_ENABLED // && !MI_TSAN
if (*commit) { ((uint8_t*)p)[0] = 0; } // ensure the memory is committed
#endif
}
diff --git a/source/luametatex/source/libraries/mimalloc/src/segment-cache.c b/source/luametatex/source/libraries/mimalloc/src/segment-cache.c
index d93fd6441..eeae1b508 100644
--- a/source/luametatex/source/libraries/mimalloc/src/segment-cache.c
+++ b/source/luametatex/source/libraries/mimalloc/src/segment-cache.c
@@ -11,10 +11,10 @@ terms of the MIT license. A copy of the license can be found in the file
The full memory map of all segments is also implemented here.
-----------------------------------------------------------------------------*/
#include "mimalloc.h"
-#include "mimalloc-internal.h"
-#include "mimalloc-atomic.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
-#include "bitmap.h" // atomic bitmap
+#include "./bitmap.h" // atomic bitmap
//#define MI_CACHE_DISABLE 1 // define to completely disable the segment cache
@@ -35,8 +35,8 @@ typedef struct mi_cache_slot_s {
static mi_decl_cache_align mi_cache_slot_t cache[MI_CACHE_MAX]; // = 0
-static mi_decl_cache_align mi_bitmap_field_t cache_available[MI_CACHE_FIELDS] = { MI_CACHE_BITS_SET }; // zero bit = available!
-static mi_decl_cache_align mi_bitmap_field_t cache_available_large[MI_CACHE_FIELDS] = { MI_CACHE_BITS_SET };
+static mi_decl_cache_align mi_bitmap_field_t cache_unavailable[MI_CACHE_FIELDS] = { MI_CACHE_BITS_SET }; // zero bit = available!
+static mi_decl_cache_align mi_bitmap_field_t cache_unavailable_large[MI_CACHE_FIELDS] = { MI_CACHE_BITS_SET };
static mi_decl_cache_align mi_bitmap_field_t cache_inuse[MI_CACHE_FIELDS]; // zero bit = free
static bool mi_cdecl mi_segment_cache_is_suitable(mi_bitmap_index_t bitidx, void* arg) {
@@ -48,7 +48,8 @@ static bool mi_cdecl mi_segment_cache_is_suitable(mi_bitmap_index_t bitidx, void
mi_decl_noinline static void* mi_segment_cache_pop_ex(
bool all_suitable,
size_t size, mi_commit_mask_t* commit_mask,
- mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero,
+ mi_commit_mask_t* decommit_mask, bool large_allowed,
+ bool* large, bool* is_pinned, bool* is_zero,
mi_arena_id_t _req_arena_id, size_t* memid, mi_os_tld_t* tld)
{
#ifdef MI_CACHE_DISABLE
@@ -66,23 +67,28 @@ mi_decl_noinline static void* mi_segment_cache_pop_ex(
if (start_field >= MI_CACHE_FIELDS) start_field = 0;
}
- // find an available slot
+ // find an available slot and make it unavailable
mi_bitmap_index_t bitidx = 0;
bool claimed = false;
mi_arena_id_t req_arena_id = _req_arena_id;
mi_bitmap_pred_fun_t pred_fun = (all_suitable ? NULL : &mi_segment_cache_is_suitable); // cannot pass NULL as the arena may be exclusive itself; todo: do not put exclusive arenas in the cache?
- if (*large) { // large allowed?
- claimed = _mi_bitmap_try_find_from_claim_pred(cache_available_large, MI_CACHE_FIELDS, start_field, 1, pred_fun, &req_arena_id, &bitidx);
+ if (large_allowed) { // large allowed?
+ claimed = _mi_bitmap_try_find_from_claim_pred(cache_unavailable_large, MI_CACHE_FIELDS, start_field, 1, pred_fun, &req_arena_id, &bitidx);
if (claimed) *large = true;
}
if (!claimed) {
- claimed = _mi_bitmap_try_find_from_claim_pred (cache_available, MI_CACHE_FIELDS, start_field, 1, pred_fun, &req_arena_id, &bitidx);
+ claimed = _mi_bitmap_try_find_from_claim_pred (cache_unavailable, MI_CACHE_FIELDS, start_field, 1, pred_fun, &req_arena_id, &bitidx);
if (claimed) *large = false;
}
if (!claimed) return NULL;
+ // no longer available but still in-use
+ mi_assert_internal(_mi_bitmap_is_claimed(cache_unavailable, MI_CACHE_FIELDS, 1, bitidx));
+ mi_assert_internal(_mi_bitmap_is_claimed(cache_unavailable_large, MI_CACHE_FIELDS, 1, bitidx));
+ mi_assert_internal(_mi_bitmap_is_claimed(cache_inuse, MI_CACHE_FIELDS, 1, bitidx));
+
// found a slot
mi_cache_slot_t* slot = &cache[mi_bitmap_index_bit(bitidx)];
void* p = slot->p;
@@ -95,16 +101,15 @@ mi_decl_noinline static void* mi_segment_cache_pop_ex(
mi_atomic_storei64_release(&slot->expire,(mi_msecs_t)0);
// mark the slot as free again
- mi_assert_internal(_mi_bitmap_is_claimed(cache_inuse, MI_CACHE_FIELDS, 1, bitidx));
_mi_bitmap_unclaim(cache_inuse, MI_CACHE_FIELDS, 1, bitidx);
return p;
#endif
}
-mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t _req_arena_id, size_t* memid, mi_os_tld_t* tld)
+mi_decl_noinline void* _mi_segment_cache_pop(size_t size, mi_commit_mask_t* commit_mask, mi_commit_mask_t* decommit_mask, bool large_allowed, bool* large, bool* is_pinned, bool* is_zero, mi_arena_id_t _req_arena_id, size_t* memid, mi_os_tld_t* tld)
{
- return mi_segment_cache_pop_ex(false, size, commit_mask, decommit_mask, large, is_pinned, is_zero, _req_arena_id, memid, tld);
+ return mi_segment_cache_pop_ex(false, size, commit_mask, decommit_mask, large_allowed, large, is_pinned, is_zero, _req_arena_id, memid, tld);
}
static mi_decl_noinline void mi_commit_mask_decommit(mi_commit_mask_t* cmask, void* p, size_t total, mi_stats_t* stats)
@@ -113,10 +118,11 @@ static mi_decl_noinline void mi_commit_mask_decommit(mi_commit_mask_t* cmask, vo
// nothing
}
else if (mi_commit_mask_is_full(cmask)) {
+ // decommit the whole in one call
_mi_os_decommit(p, total, stats);
}
else {
- // todo: one call to decommit the whole at once?
+ // decommit parts
mi_assert_internal((total%MI_COMMIT_MASK_BITS)==0);
size_t part = total/MI_COMMIT_MASK_BITS;
size_t idx;
@@ -148,21 +154,25 @@ static mi_decl_noinline void mi_segment_cache_purge(bool visit_all, bool force,
if (expire != 0 && (force || now >= expire)) { // racy read
// seems expired, first claim it from available
purged++;
- mi_bitmap_index_t bitidx = mi_bitmap_index_create_from_bit(idx);
- if (_mi_bitmap_claim(cache_available, MI_CACHE_FIELDS, 1, bitidx, NULL)) {
- // was available, we claimed it
+ mi_bitmap_index_t bitidx = mi_bitmap_index_create_from_bit(idx);
+ if (_mi_bitmap_claim(cache_unavailable, MI_CACHE_FIELDS, 1, bitidx, NULL)) { // no need to check large as those cannot be decommitted anyways
+ // it was available, we claimed it (and made it unavailable)
+ mi_assert_internal(_mi_bitmap_is_claimed(cache_unavailable, MI_CACHE_FIELDS, 1, bitidx));
+ mi_assert_internal(_mi_bitmap_is_claimed(cache_unavailable_large, MI_CACHE_FIELDS, 1, bitidx));
+ // we can now access it safely
expire = mi_atomic_loadi64_acquire(&slot->expire);
if (expire != 0 && (force || now >= expire)) { // safe read
+ mi_assert_internal(_mi_bitmap_is_claimed(cache_inuse, MI_CACHE_FIELDS, 1, bitidx));
// still expired, decommit it
mi_atomic_storei64_relaxed(&slot->expire,(mi_msecs_t)0);
- mi_assert_internal(!mi_commit_mask_is_empty(&slot->commit_mask) && _mi_bitmap_is_claimed(cache_available_large, MI_CACHE_FIELDS, 1, bitidx));
+ mi_assert_internal(!mi_commit_mask_is_empty(&slot->commit_mask));
_mi_abandoned_await_readers(); // wait until safe to decommit
// decommit committed parts
// TODO: instead of decommit, we could also free to the OS?
mi_commit_mask_decommit(&slot->commit_mask, slot->p, MI_SEGMENT_SIZE, tld->stats);
mi_commit_mask_create_empty(&slot->decommit_mask);
}
- _mi_bitmap_unclaim(cache_available, MI_CACHE_FIELDS, 1, bitidx); // make it available again for a pop
+ _mi_bitmap_unclaim(cache_unavailable, MI_CACHE_FIELDS, 1, bitidx); // make it available again for a pop
}
if (!visit_all && purged > MI_MAX_PURGE_PER_PUSH) break; // bound to no more than N purge tries per push
}
@@ -184,23 +194,20 @@ void _mi_segment_cache_free_all(mi_os_tld_t* tld) {
mi_commit_mask_t decommit_mask;
bool is_pinned;
bool is_zero;
+ bool is_large;
size_t memid;
const size_t size = MI_SEGMENT_SIZE;
- // iterate twice: first large pages, then regular memory
- for (int i = 0; i < 2; i++) {
- void* p;
- do {
- // keep popping and freeing the memory
- bool large = (i == 0);
- p = mi_segment_cache_pop_ex(true /* all */, size, &commit_mask, &decommit_mask,
- &large, &is_pinned, &is_zero, _mi_arena_id_none(), &memid, tld);
- if (p != NULL) {
- size_t csize = _mi_commit_mask_committed_size(&commit_mask, size);
- if (csize > 0 && !is_pinned) _mi_stat_decrease(&_mi_stats_main.committed, csize);
- _mi_arena_free(p, size, MI_SEGMENT_ALIGN, 0, memid, is_pinned /* pretend not committed to not double count decommits */, tld->stats);
- }
- } while (p != NULL);
- }
+ void* p;
+ do {
+ // keep popping and freeing the memory
+ p = mi_segment_cache_pop_ex(true /* all */, size, &commit_mask, &decommit_mask,
+ true /* allow large */, &is_large, &is_pinned, &is_zero, _mi_arena_id_none(), &memid, tld);
+ if (p != NULL) {
+ size_t csize = _mi_commit_mask_committed_size(&commit_mask, size);
+ if (csize > 0 && !is_pinned) { _mi_stat_decrease(&_mi_stats_main.committed, csize); }
+ _mi_arena_free(p, size, MI_SEGMENT_ALIGN, 0, memid, is_pinned /* pretend not committed to not double count decommits */, tld->stats);
+ }
+ } while (p != NULL);
}
mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t memid, const mi_commit_mask_t* commit_mask, const mi_commit_mask_t* decommit_mask, bool is_large, bool is_pinned, mi_os_tld_t* tld)
@@ -209,27 +216,34 @@ mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t me
return false;
#else
- // only for normal segment blocks
+ // purge expired entries
+ mi_segment_cache_purge(false /* limit purges to a constant N */, false /* don't force unexpired */, tld);
+
+ // only cache normal segment blocks
if (size != MI_SEGMENT_SIZE || ((uintptr_t)start % MI_SEGMENT_ALIGN) != 0) return false;
+ // Also do not cache arena allocated segments that cannot be decommitted. (as arena allocation is fast)
+ // This is a common case with reserved huge OS pages.
+ //
+ // (note: we could also allow segments that are already fully decommitted but that never happens
+ // as the first slice is always committed (for the segment metadata))
+ if (!_mi_arena_is_os_allocated(memid) && is_pinned) return false;
+
// numa node determines start field
int numa_node = _mi_os_numa_node(NULL);
size_t start_field = 0;
if (numa_node > 0) {
- start_field = (MI_CACHE_FIELDS / _mi_os_numa_node_count())*numa_node;
+ start_field = (MI_CACHE_FIELDS / _mi_os_numa_node_count()) * numa_node;
if (start_field >= MI_CACHE_FIELDS) start_field = 0;
}
- // purge expired entries
- mi_segment_cache_purge(false /* limit purges to a constant N */, false /* don't force unexpired */, tld);
-
// find an available slot
mi_bitmap_index_t bitidx;
bool claimed = _mi_bitmap_try_find_from_claim(cache_inuse, MI_CACHE_FIELDS, start_field, 1, &bitidx);
if (!claimed) return false;
- mi_assert_internal(_mi_bitmap_is_claimed(cache_available, MI_CACHE_FIELDS, 1, bitidx));
- mi_assert_internal(_mi_bitmap_is_claimed(cache_available_large, MI_CACHE_FIELDS, 1, bitidx));
+ mi_assert_internal(_mi_bitmap_is_claimed(cache_unavailable, MI_CACHE_FIELDS, 1, bitidx));
+ mi_assert_internal(_mi_bitmap_is_claimed(cache_unavailable_large, MI_CACHE_FIELDS, 1, bitidx));
#if MI_DEBUG>1
if (is_pinned || is_large) {
mi_assert_internal(mi_commit_mask_is_full(commit_mask));
@@ -257,7 +271,7 @@ mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t me
}
// make it available
- _mi_bitmap_unclaim((is_large ? cache_available_large : cache_available), MI_CACHE_FIELDS, 1, bitidx);
+ _mi_bitmap_unclaim((is_large ? cache_unavailable_large : cache_unavailable), MI_CACHE_FIELDS, 1, bitidx);
return true;
#endif
}
@@ -273,7 +287,7 @@ mi_decl_noinline bool _mi_segment_cache_push(void* start, size_t size, size_t me
#if (MI_INTPTR_SIZE==8)
-#define MI_MAX_ADDRESS ((size_t)20 << 40) // 20TB
+#define MI_MAX_ADDRESS ((size_t)40 << 40) // 20TB
#else
#define MI_MAX_ADDRESS ((size_t)2 << 30) // 2Gb
#endif
diff --git a/source/luametatex/source/libraries/mimalloc/src/segment.c b/source/luametatex/source/libraries/mimalloc/src/segment.c
index dc98e3e7b..3e56d50f5 100644
--- a/source/luametatex/source/libraries/mimalloc/src/segment.c
+++ b/source/luametatex/source/libraries/mimalloc/src/segment.c
@@ -5,8 +5,8 @@ terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
#include "mimalloc.h"
-#include "mimalloc-internal.h"
-#include "mimalloc-atomic.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
#include <string.h> // memset
#include <stdio.h>
@@ -316,7 +316,13 @@ static uint8_t* _mi_segment_page_start_from_slice(const mi_segment_t* segment, c
ptrdiff_t idx = slice - segment->slices;
size_t psize = (size_t)slice->slice_count * MI_SEGMENT_SLICE_SIZE;
// make the start not OS page aligned for smaller blocks to avoid page/cache effects
- size_t start_offset = (xblock_size >= MI_INTPTR_SIZE && xblock_size <= 1024 ? 3*MI_MAX_ALIGN_GUARANTEE : 0);
+ // note: the offset must always be an xblock_size multiple since we assume small allocations
+ // are aligned (see `mi_heap_malloc_aligned`).
+ size_t start_offset = 0;
+ if (xblock_size >= MI_INTPTR_SIZE) {
+ if (xblock_size <= 64) { start_offset = 3*xblock_size; }
+ else if (xblock_size <= 512) { start_offset = xblock_size; }
+ }
if (page_size != NULL) { *page_size = psize - start_offset; }
return (uint8_t*)segment + ((idx*MI_SEGMENT_SLICE_SIZE) + start_offset);
}
@@ -391,8 +397,10 @@ static void mi_segment_os_free(mi_segment_t* segment, mi_segments_tld_t* tld) {
if (size != MI_SEGMENT_SIZE || segment->mem_align_offset != 0 || segment->kind == MI_SEGMENT_HUGE || // only push regular segments on the cache
!_mi_segment_cache_push(segment, size, segment->memid, &segment->commit_mask, &segment->decommit_mask, segment->mem_is_large, segment->mem_is_pinned, tld->os))
{
- const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size);
- if (csize > 0 && !segment->mem_is_pinned) _mi_stat_decrease(&_mi_stats_main.committed, csize);
+ if (!segment->mem_is_pinned) {
+ const size_t csize = _mi_commit_mask_committed_size(&segment->commit_mask, size);
+ if (csize > 0) { _mi_stat_decrease(&_mi_stats_main.committed, csize); }
+ }
_mi_abandoned_await_readers(); // wait until safe to free
_mi_arena_free(segment, mi_segment_size(segment), segment->mem_alignment, segment->mem_align_offset, segment->memid, segment->mem_is_pinned /* pretend not committed to not double count decommits */, tld->stats);
}
@@ -503,6 +511,7 @@ static bool mi_segment_ensure_committed(mi_segment_t* segment, uint8_t* p, size_
mi_assert_internal(mi_commit_mask_all_set(&segment->commit_mask, &segment->decommit_mask));
// note: assumes commit_mask is always full for huge segments as otherwise the commit mask bits can overflow
if (mi_commit_mask_is_full(&segment->commit_mask) && mi_commit_mask_is_empty(&segment->decommit_mask)) return true; // fully committed
+ mi_assert_internal(segment->kind != MI_SEGMENT_HUGE);
return mi_segment_commitx(segment,true,p,size,stats);
}
@@ -632,7 +641,8 @@ static mi_slice_t* mi_segment_span_free_coalesce(mi_slice_t* slice, mi_segments_
// for huge pages, just mark as free but don't add to the queues
if (segment->kind == MI_SEGMENT_HUGE) {
- mi_assert_internal(segment->used == 1); // decreased right after this call in `mi_segment_page_clear`
+ // issue #691: segment->used can be 0 if the huge page block was freed while abandoned (reclaim will get here in that case)
+ mi_assert_internal((segment->used==0 && slice->xblock_size==0) || segment->used == 1); // decreased right after this call in `mi_segment_page_clear`
slice->xblock_size = 0; // mark as free anyways
// we should mark the last slice `xblock_size=0` now to maintain invariants but we skip it to
// avoid a possible cache miss (and the segment is about to be freed)
@@ -795,15 +805,13 @@ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment
const size_t extra = align_offset - info_size;
// recalculate due to potential guard pages
*psegment_slices = mi_segment_calculate_slices(required + extra, ppre_size, pinfo_slices);
- //segment_size += _mi_align_up(align_offset - info_size, MI_SEGMENT_SLICE_SIZE);
- //segment_slices = segment_size / MI_SEGMENT_SLICE_SIZE;
}
const size_t segment_size = (*psegment_slices) * MI_SEGMENT_SLICE_SIZE;
mi_segment_t* segment = NULL;
// get from cache?
if (page_alignment == 0) {
- segment = (mi_segment_t*)_mi_segment_cache_pop(segment_size, pcommit_mask, pdecommit_mask, &mem_large, &is_pinned, is_zero, req_arena_id, &memid, os_tld);
+ segment = (mi_segment_t*)_mi_segment_cache_pop(segment_size, pcommit_mask, pdecommit_mask, mem_large, &mem_large, &is_pinned, is_zero, req_arena_id, &memid, os_tld);
}
// get from OS
@@ -830,7 +838,10 @@ static mi_segment_t* mi_segment_os_alloc( size_t required, size_t page_alignment
if (!ok) return NULL; // failed to commit
mi_commit_mask_set(pcommit_mask, &commit_needed_mask);
}
- mi_track_mem_undefined(segment,commit_needed);
+ else if (*is_zero) {
+ // track zero initialization for valgrind
+ mi_track_mem_defined(segment, commit_needed * MI_COMMIT_SIZE);
+ }
segment->memid = memid;
segment->mem_is_pinned = is_pinned;
segment->mem_is_large = mem_large;
@@ -874,10 +885,13 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi
// zero the segment info? -- not always needed as it may be zero initialized from the OS
mi_atomic_store_ptr_release(mi_segment_t, &segment->abandoned_next, NULL); // tsan
- if (!is_zero) {
- ptrdiff_t ofs = offsetof(mi_segment_t, next);
+ {
+ ptrdiff_t ofs = offsetof(mi_segment_t, next);
size_t prefix = offsetof(mi_segment_t, slices) - ofs;
- memset((uint8_t*)segment+ofs, 0, prefix + sizeof(mi_slice_t)*(segment_slices+1)); // one more
+ size_t zsize = prefix + (sizeof(mi_slice_t) * (segment_slices + 1)); // one more
+ if (!is_zero) {
+ memset((uint8_t*)segment + ofs, 0, zsize);
+ }
}
segment->commit_mask = commit_mask; // on lazy commit, the initial part is always committed
@@ -893,6 +907,10 @@ static mi_segment_t* mi_segment_alloc(size_t required, size_t page_alignment, mi
mi_assert_internal(!mi_commit_mask_any_set(&segment->decommit_mask, &commit_needed_mask));
#endif
}
+ else {
+ segment->decommit_expire = 0;
+ mi_commit_mask_create_empty( &segment->decommit_mask );
+ }
// initialize segment info
const size_t slice_entries = (segment_slices > MI_SLICES_PER_SEGMENT ? MI_SLICES_PER_SEGMENT : segment_slices);
@@ -954,7 +972,9 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t
// Remove the free pages
mi_slice_t* slice = &segment->slices[0];
const mi_slice_t* end = mi_segment_slices_end(segment);
+ #if MI_DEBUG>1
size_t page_count = 0;
+ #endif
while (slice < end) {
mi_assert_internal(slice->slice_count > 0);
mi_assert_internal(slice->slice_offset == 0);
@@ -962,7 +982,9 @@ static void mi_segment_free(mi_segment_t* segment, bool force, mi_segments_tld_t
if (slice->xblock_size == 0 && segment->kind != MI_SEGMENT_HUGE) {
mi_segment_span_remove_from_queue(slice, tld);
}
+ #if MI_DEBUG>1
page_count++;
+ #endif
slice = slice + slice->slice_count;
}
mi_assert_internal(page_count == 2); // first page is allocated by the segment itself
@@ -1048,7 +1070,7 @@ We maintain a global list of abandoned segments that are
reclaimed on demand. Since this is shared among threads
the implementation needs to avoid the A-B-A problem on
popping abandoned segments: <https://en.wikipedia.org/wiki/ABA_problem>
-We use tagged pointers to avoid accidentially identifying
+We use tagged pointers to avoid accidentally identifying
reused segments, much like stamped references in Java.
Secondly, we maintain a reader counter to avoid resetting
or decommitting segments that have a pending read operation.
diff --git a/source/luametatex/source/libraries/mimalloc/src/static.c b/source/luametatex/source/libraries/mimalloc/src/static.c
index 5b34ddbb6..d992f4daf 100644
--- a/source/luametatex/source/libraries/mimalloc/src/static.c
+++ b/source/luametatex/source/libraries/mimalloc/src/static.c
@@ -14,26 +14,27 @@ terms of the MIT license. A copy of the license can be found in the file
#endif
#include "mimalloc.h"
-#include "mimalloc-internal.h"
+#include "mimalloc/internal.h"
// For a static override we create a single object file
// containing the whole library. If it is linked first
// it will override all the standard library allocation
// functions (on Unix's).
-#include "stats.c"
-#include "random.c"
-#include "os.c"
-#include "bitmap.c"
-#include "arena.c"
-#include "segment-cache.c"
-#include "segment.c"
-#include "page.c"
-#include "heap.c"
-#include "alloc.c"
+#include "alloc.c" // includes alloc-override.c
#include "alloc-aligned.c"
#include "alloc-posix.c"
-#if MI_OSX_ZONE
-#include "alloc-override-osx.c"
-#endif
+#include "arena.c"
+#include "bitmap.c"
+#include "heap.c"
#include "init.c"
#include "options.c"
+#include "os.c"
+#include "page.c" // includes page-queue.c
+#include "random.c"
+#include "segment.c"
+#include "segment-cache.c"
+#include "stats.c"
+#include "prim/prim.c"
+#if MI_OSX_ZONE
+#include "prim/osx/alloc-override-zone.c"
+#endif
diff --git a/source/luametatex/source/libraries/mimalloc/src/stats.c b/source/luametatex/source/libraries/mimalloc/src/stats.c
index 2a8b9404f..d2a316818 100644
--- a/source/luametatex/source/libraries/mimalloc/src/stats.c
+++ b/source/luametatex/source/libraries/mimalloc/src/stats.c
@@ -5,10 +5,11 @@ terms of the MIT license. A copy of the license can be found in the file
"LICENSE" at the root of this distribution.
-----------------------------------------------------------------------------*/
#include "mimalloc.h"
-#include "mimalloc-internal.h"
-#include "mimalloc-atomic.h"
+#include "mimalloc/internal.h"
+#include "mimalloc/atomic.h"
+#include "mimalloc/prim.h"
-#include <stdio.h> // fputs, stderr
+#include <stdio.h> // snprintf
#include <string.h> // memset
#if defined(_MSC_VER) && (_MSC_VER < 1920)
@@ -291,8 +292,6 @@ static void mi_cdecl mi_buffered_out(const char* msg, void* arg) {
// Print statistics
//------------------------------------------------------------
-static void mi_stat_process_info(mi_msecs_t* elapsed, mi_msecs_t* utime, mi_msecs_t* stime, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults);
-
static void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out0, void* arg0) mi_attr_noexcept {
// wrap the output function to be line buffered
char buf[256];
@@ -337,15 +336,15 @@ static void _mi_stats_print(mi_stats_t* stats, mi_output_fun* out0, void* arg0)
mi_stat_counter_print_avg(&stats->searches, "searches", out, arg);
_mi_fprintf(out, arg, "%10s: %7zu\n", "numa nodes", _mi_os_numa_node_count());
- mi_msecs_t elapsed;
- mi_msecs_t user_time;
- mi_msecs_t sys_time;
+ size_t elapsed;
+ size_t user_time;
+ size_t sys_time;
size_t current_rss;
size_t peak_rss;
size_t current_commit;
size_t peak_commit;
size_t page_faults;
- mi_stat_process_info(&elapsed, &user_time, &sys_time, &current_rss, &peak_rss, &current_commit, &peak_commit, &page_faults);
+ mi_process_info(&elapsed, &user_time, &sys_time, &current_rss, &peak_rss, &current_commit, &peak_commit, &page_faults);
_mi_fprintf(out, arg, "%10s: %7ld.%03ld s\n", "elapsed", elapsed/1000, elapsed%1000);
_mi_fprintf(out, arg, "%10s: user: %ld.%03ld s, system: %ld.%03ld s, faults: %lu, rss: ", "process",
user_time/1000, user_time%1000, sys_time/1000, sys_time%1000, (unsigned long)page_faults );
@@ -404,46 +403,12 @@ void mi_thread_stats_print_out(mi_output_fun* out, void* arg) mi_attr_noexcept {
// ----------------------------------------------------------------
// Basic timer for convenience; use milli-seconds to avoid doubles
// ----------------------------------------------------------------
-#ifdef _WIN32
-#include <windows.h>
-static mi_msecs_t mi_to_msecs(LARGE_INTEGER t) {
- static LARGE_INTEGER mfreq; // = 0
- if (mfreq.QuadPart == 0LL) {
- LARGE_INTEGER f;
- QueryPerformanceFrequency(&f);
- mfreq.QuadPart = f.QuadPart/1000LL;
- if (mfreq.QuadPart == 0) mfreq.QuadPart = 1;
- }
- return (mi_msecs_t)(t.QuadPart / mfreq.QuadPart);
-}
+
+static mi_msecs_t mi_clock_diff;
mi_msecs_t _mi_clock_now(void) {
- LARGE_INTEGER t;
- QueryPerformanceCounter(&t);
- return mi_to_msecs(t);
-}
-#else
-#include <time.h>
-#if defined(CLOCK_REALTIME) || defined(CLOCK_MONOTONIC)
-mi_msecs_t _mi_clock_now(void) {
- struct timespec t;
- #ifdef CLOCK_MONOTONIC
- clock_gettime(CLOCK_MONOTONIC, &t);
- #else
- clock_gettime(CLOCK_REALTIME, &t);
- #endif
- return ((mi_msecs_t)t.tv_sec * 1000) + ((mi_msecs_t)t.tv_nsec / 1000000);
-}
-#else
-// low resolution timer
-mi_msecs_t _mi_clock_now(void) {
- return ((mi_msecs_t)clock() / ((mi_msecs_t)CLOCKS_PER_SEC / 1000));
+ return _mi_prim_clock_now();
}
-#endif
-#endif
-
-
-static mi_msecs_t mi_clock_diff;
mi_msecs_t _mi_clock_start(void) {
if (mi_clock_diff == 0.0) {
@@ -463,156 +428,27 @@ mi_msecs_t _mi_clock_end(mi_msecs_t start) {
// Basic process statistics
// --------------------------------------------------------
-#if defined(_WIN32)
-#include <windows.h>
-
-static mi_msecs_t filetime_msecs(const FILETIME* ftime) {
- ULARGE_INTEGER i;
- i.LowPart = ftime->dwLowDateTime;
- i.HighPart = ftime->dwHighDateTime;
- mi_msecs_t msecs = (i.QuadPart / 10000); // FILETIME is in 100 nano seconds
- return msecs;
-}
-
-typedef struct _PROCESS_MEMORY_COUNTERS {
- DWORD cb;
- DWORD PageFaultCount;
- SIZE_T PeakWorkingSetSize;
- SIZE_T WorkingSetSize;
- SIZE_T QuotaPeakPagedPoolUsage;
- SIZE_T QuotaPagedPoolUsage;
- SIZE_T QuotaPeakNonPagedPoolUsage;
- SIZE_T QuotaNonPagedPoolUsage;
- SIZE_T PagefileUsage;
- SIZE_T PeakPagefileUsage;
-} PROCESS_MEMORY_COUNTERS;
-typedef PROCESS_MEMORY_COUNTERS* PPROCESS_MEMORY_COUNTERS;
-typedef BOOL (WINAPI *PGetProcessMemoryInfo)(HANDLE, PPROCESS_MEMORY_COUNTERS, DWORD);
-static PGetProcessMemoryInfo pGetProcessMemoryInfo = NULL;
-
-static void mi_stat_process_info(mi_msecs_t* elapsed, mi_msecs_t* utime, mi_msecs_t* stime, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults)
-{
- *elapsed = _mi_clock_end(mi_process_start);
- FILETIME ct;
- FILETIME ut;
- FILETIME st;
- FILETIME et;
- GetProcessTimes(GetCurrentProcess(), &ct, &et, &st, &ut);
- *utime = filetime_msecs(&ut);
- *stime = filetime_msecs(&st);
-
- // load psapi on demand
- if (pGetProcessMemoryInfo == NULL) {
- HINSTANCE hDll = LoadLibrary(TEXT("psapi.dll"));
- if (hDll != NULL) {
- pGetProcessMemoryInfo = (PGetProcessMemoryInfo)(void (*)(void))GetProcAddress(hDll, "GetProcessMemoryInfo");
- }
- }
-
- // get process info
- PROCESS_MEMORY_COUNTERS info;
- memset(&info, 0, sizeof(info));
- if (pGetProcessMemoryInfo != NULL) {
- pGetProcessMemoryInfo(GetCurrentProcess(), &info, sizeof(info));
- }
- *current_rss = (size_t)info.WorkingSetSize;
- *peak_rss = (size_t)info.PeakWorkingSetSize;
- *current_commit = (size_t)info.PagefileUsage;
- *peak_commit = (size_t)info.PeakPagefileUsage;
- *page_faults = (size_t)info.PageFaultCount;
-}
-
-#elif !defined(__wasi__) && (defined(__unix__) || defined(__unix) || defined(unix) || defined(__APPLE__) || defined(__HAIKU__))
-#include <stdio.h>
-#include <unistd.h>
-#include <sys/resource.h>
-
-#if defined(__APPLE__)
-#include <mach/mach.h>
-#endif
-
-#if defined(__HAIKU__)
-#include <kernel/OS.h>
-#endif
-
-static mi_msecs_t timeval_secs(const struct timeval* tv) {
- return ((mi_msecs_t)tv->tv_sec * 1000L) + ((mi_msecs_t)tv->tv_usec / 1000L);
-}
-
-static void mi_stat_process_info(mi_msecs_t* elapsed, mi_msecs_t* utime, mi_msecs_t* stime, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults)
-{
- *elapsed = _mi_clock_end(mi_process_start);
- struct rusage rusage;
- getrusage(RUSAGE_SELF, &rusage);
- *utime = timeval_secs(&rusage.ru_utime);
- *stime = timeval_secs(&rusage.ru_stime);
-#if !defined(__HAIKU__)
- *page_faults = rusage.ru_majflt;
-#endif
- // estimate commit using our stats
- *peak_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.peak));
- *current_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.current));
- *current_rss = *current_commit; // estimate
-#if defined(__HAIKU__)
- // Haiku does not have (yet?) a way to
- // get these stats per process
- thread_info tid;
- area_info mem;
- ssize_t c;
- get_thread_info(find_thread(0), &tid);
- while (get_next_area_info(tid.team, &c, &mem) == B_OK) {
- *peak_rss += mem.ram_size;
- }
- *page_faults = 0;
-#elif defined(__APPLE__)
- *peak_rss = rusage.ru_maxrss; // BSD reports in bytes
- struct mach_task_basic_info info;
- mach_msg_type_number_t infoCount = MACH_TASK_BASIC_INFO_COUNT;
- if (task_info(mach_task_self(), MACH_TASK_BASIC_INFO, (task_info_t)&info, &infoCount) == KERN_SUCCESS) {
- *current_rss = (size_t)info.resident_size;
- }
-#else
- *peak_rss = rusage.ru_maxrss * 1024; // Linux reports in KiB
-#endif
-}
-
-#else
-#ifndef __wasi__
-// WebAssembly instances are not processes
-#pragma message("define a way to get process info")
-#endif
-
-static void mi_stat_process_info(mi_msecs_t* elapsed, mi_msecs_t* utime, mi_msecs_t* stime, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults)
-{
- *elapsed = _mi_clock_end(mi_process_start);
- *peak_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.peak));
- *current_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.current));
- *peak_rss = *peak_commit;
- *current_rss = *current_commit;
- *page_faults = 0;
- *utime = 0;
- *stime = 0;
-}
-#endif
-
-
mi_decl_export void mi_process_info(size_t* elapsed_msecs, size_t* user_msecs, size_t* system_msecs, size_t* current_rss, size_t* peak_rss, size_t* current_commit, size_t* peak_commit, size_t* page_faults) mi_attr_noexcept
{
- mi_msecs_t elapsed = 0;
- mi_msecs_t utime = 0;
- mi_msecs_t stime = 0;
- size_t current_rss0 = 0;
- size_t peak_rss0 = 0;
- size_t current_commit0 = 0;
- size_t peak_commit0 = 0;
- size_t page_faults0 = 0;
- mi_stat_process_info(&elapsed,&utime, &stime, &current_rss0, &peak_rss0, &current_commit0, &peak_commit0, &page_faults0);
- if (elapsed_msecs!=NULL) *elapsed_msecs = (elapsed < 0 ? 0 : (elapsed < (mi_msecs_t)PTRDIFF_MAX ? (size_t)elapsed : PTRDIFF_MAX));
- if (user_msecs!=NULL) *user_msecs = (utime < 0 ? 0 : (utime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)utime : PTRDIFF_MAX));
- if (system_msecs!=NULL) *system_msecs = (stime < 0 ? 0 : (stime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)stime : PTRDIFF_MAX));
- if (current_rss!=NULL) *current_rss = current_rss0;
- if (peak_rss!=NULL) *peak_rss = peak_rss0;
- if (current_commit!=NULL) *current_commit = current_commit0;
- if (peak_commit!=NULL) *peak_commit = peak_commit0;
- if (page_faults!=NULL) *page_faults = page_faults0;
+ mi_process_info_t pinfo;
+ _mi_memzero(&pinfo,sizeof(pinfo));
+ pinfo.elapsed = _mi_clock_end(mi_process_start);
+ pinfo.current_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.current));
+ pinfo.peak_commit = (size_t)(mi_atomic_loadi64_relaxed((_Atomic(int64_t)*)&_mi_stats_main.committed.peak));
+ pinfo.current_rss = pinfo.current_commit;
+ pinfo.peak_rss = pinfo.peak_commit;
+ pinfo.utime = 0;
+ pinfo.stime = 0;
+ pinfo.page_faults = 0;
+
+ _mi_prim_process_info(&pinfo);
+
+ if (elapsed_msecs!=NULL) *elapsed_msecs = (pinfo.elapsed < 0 ? 0 : (pinfo.elapsed < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.elapsed : PTRDIFF_MAX));
+ if (user_msecs!=NULL) *user_msecs = (pinfo.utime < 0 ? 0 : (pinfo.utime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.utime : PTRDIFF_MAX));
+ if (system_msecs!=NULL) *system_msecs = (pinfo.stime < 0 ? 0 : (pinfo.stime < (mi_msecs_t)PTRDIFF_MAX ? (size_t)pinfo.stime : PTRDIFF_MAX));
+ if (current_rss!=NULL) *current_rss = pinfo.current_rss;
+ if (peak_rss!=NULL) *peak_rss = pinfo.peak_rss;
+ if (current_commit!=NULL) *current_commit = pinfo.current_commit;
+ if (peak_commit!=NULL) *peak_commit = pinfo.peak_commit;
+ if (page_faults!=NULL) *page_faults = pinfo.page_faults;
}
diff --git a/source/luametatex/source/lua/lmtinterface.h b/source/luametatex/source/lua/lmtinterface.h
index d98b55294..2636ea2d7 100644
--- a/source/luametatex/source/lua/lmtinterface.h
+++ b/source/luametatex/source/lua/lmtinterface.h
@@ -405,7 +405,13 @@ extern lmt_interface_info lmt_interface;
# define lmt_name_of_math_indirect(n) lmt_interface.math_indirect_values [n].name
# define lmt_name_of_field_type(n) lmt_interface.field_type_values [n].name
-/*tex This list will be made smaller because not all values need the boost. */
+/*tex
+ This list will be made smaller because not all values need the boost. Before we define the
+ lot we undefine some possibly conflicting snippets. Actually, we don't really define the
+ key's here but assemble more complex references to registry indices and variables.
+*/
+
+# undef quad /* CYGWIN */
# define declare_shared_lua_keys(L) \
/* */\
@@ -872,6 +878,7 @@ make_lua_key(L, mathcontrol);\
make_lua_key(L, mathdir);\
make_lua_key(L, mathfence);\
make_lua_key(L, mathfraction);\
+make_lua_key(L, mathkern);\
make_lua_key(L, mathkerns);\
make_lua_key(L, MathLeading);\
make_lua_key(L, mathoperator);\
diff --git a/source/luametatex/source/lua/lmttexlib.c b/source/luametatex/source/lua/lmttexlib.c
index 0ce5bfd08..e87a88292 100644
--- a/source/luametatex/source/lua/lmttexlib.c
+++ b/source/luametatex/source/lua/lmttexlib.c
@@ -1037,6 +1037,16 @@ static const char *texlib_aux_scan_dimen_part(lua_State * L, const char *ss, int
numerator = 49838;
denominator = 7739;
goto CONVERSION;
+ } else if (strncmp(str, "es", 2) == 0) {
+ str += 2;
+ numerator = 9176;
+ denominator = 129;
+ goto CONVERSION;
+ } else if (strncmp(str, "ts", 2) == 0) {
+ str += 2;
+ numerator = 4588;
+ denominator = 645;
+ goto CONVERSION;
} else if (strncmp(str, "em", 2) == 0) {
str += 2;
special = tex_get_font_em_width(cur_font_par);
@@ -1045,6 +1055,11 @@ static const char *texlib_aux_scan_dimen_part(lua_State * L, const char *ss, int
str += 2;
special = tex_get_font_ex_height(cur_font_par);
goto SPECIAL;
+ } else if (strncmp(str, "eu", 2) == 0) {
+ str += 2;
+ numerator = 9176 * eu_factor_par;
+ denominator = 129 * 10;
+ goto CONVERSION;
} else if (strncmp(str, "px", 2) == 0) {
str += 2;
special = px_dimen_par;
diff --git a/source/luametatex/source/luacore/lua54/src/lcode.c b/source/luametatex/source/luacore/lua54/src/lcode.c
index 911dbd5f1..1a371ca94 100644
--- a/source/luametatex/source/luacore/lua54/src/lcode.c
+++ b/source/luametatex/source/luacore/lua54/src/lcode.c
@@ -1352,6 +1352,35 @@ static int constfolding (FuncState *fs, int op, expdesc *e1,
/*
+** Convert a BinOpr to an OpCode (ORDER OPR - ORDER OP)
+*/
+l_sinline OpCode binopr2op (BinOpr opr, BinOpr baser, OpCode base) {
+ lua_assert(baser <= opr &&
+ ((baser == OPR_ADD && opr <= OPR_SHR) ||
+ (baser == OPR_LT && opr <= OPR_LE)));
+ return cast(OpCode, (cast_int(opr) - cast_int(baser)) + cast_int(base));
+}
+
+
+/*
+** Convert a UnOpr to an OpCode (ORDER OPR - ORDER OP)
+*/
+l_sinline OpCode unopr2op (UnOpr opr) {
+ return cast(OpCode, (cast_int(opr) - cast_int(OPR_MINUS)) +
+ cast_int(OP_UNM));
+}
+
+
+/*
+** Convert a BinOpr to a tag method (ORDER OPR - ORDER TM)
+*/
+l_sinline TMS binopr2TM (BinOpr opr) {
+ lua_assert(OPR_ADD <= opr && opr <= OPR_SHR);
+ return cast(TMS, (cast_int(opr) - cast_int(OPR_ADD)) + cast_int(TM_ADD));
+}
+
+
+/*
** Emit code for unary expressions that "produce values"
** (everything but 'not').
** Expression to produce final result will be encoded in 'e'.
@@ -1389,15 +1418,15 @@ static void finishbinexpval (FuncState *fs, expdesc *e1, expdesc *e2,
** Emit code for binary expressions that "produce values" over
** two registers.
*/
-static void codebinexpval (FuncState *fs, OpCode op,
+static void codebinexpval (FuncState *fs, BinOpr opr,
expdesc *e1, expdesc *e2, int line) {
+ OpCode op = binopr2op(opr, OPR_ADD, OP_ADD);
int v2 = luaK_exp2anyreg(fs, e2); /* make sure 'e2' is in a register */
/* 'e1' must be already in a register or it is a constant */
lua_assert((VNIL <= e1->k && e1->k <= VKSTR) ||
e1->k == VNONRELOC || e1->k == VRELOC);
lua_assert(OP_ADD <= op && op <= OP_SHR);
- finishbinexpval(fs, e1, e2, op, v2, 0, line, OP_MMBIN,
- cast(TMS, (op - OP_ADD) + TM_ADD));
+ finishbinexpval(fs, e1, e2, op, v2, 0, line, OP_MMBIN, binopr2TM(opr));
}
@@ -1418,9 +1447,9 @@ static void codebini (FuncState *fs, OpCode op,
*/
static void codebinK (FuncState *fs, BinOpr opr,
expdesc *e1, expdesc *e2, int flip, int line) {
- TMS event = cast(TMS, opr + TM_ADD);
+ TMS event = binopr2TM(opr);
int v2 = e2->u.info; /* K index */
- OpCode op = cast(OpCode, opr + OP_ADDK);
+ OpCode op = binopr2op(opr, OPR_ADD, OP_ADDK);
finishbinexpval(fs, e1, e2, op, v2, flip, line, OP_MMBINK, event);
}
@@ -1457,10 +1486,9 @@ static void swapexps (expdesc *e1, expdesc *e2) {
*/
static void codebinNoK (FuncState *fs, BinOpr opr,
expdesc *e1, expdesc *e2, int flip, int line) {
- OpCode op = cast(OpCode, opr + OP_ADD);
if (flip)
swapexps(e1, e2); /* back to original order */
- codebinexpval(fs, op, e1, e2, line); /* use standard operators */
+ codebinexpval(fs, opr, e1, e2, line); /* use standard operators */
}
@@ -1490,7 +1518,7 @@ static void codecommutative (FuncState *fs, BinOpr op,
flip = 1;
}
if (op == OPR_ADD && isSCint(e2)) /* immediate operand? */
- codebini(fs, cast(OpCode, OP_ADDI), e1, e2, flip, line, TM_ADD);
+ codebini(fs, OP_ADDI, e1, e2, flip, line, TM_ADD);
else
codearith(fs, op, e1, e2, flip, line);
}
@@ -1518,25 +1546,27 @@ static void codebitwise (FuncState *fs, BinOpr opr,
** Emit code for order comparisons. When using an immediate operand,
** 'isfloat' tells whether the original value was a float.
*/
-static void codeorder (FuncState *fs, OpCode op, expdesc *e1, expdesc *e2) {
+static void codeorder (FuncState *fs, BinOpr opr, expdesc *e1, expdesc *e2) {
int r1, r2;
int im;
int isfloat = 0;
+ OpCode op;
if (isSCnumber(e2, &im, &isfloat)) {
/* use immediate operand */
r1 = luaK_exp2anyreg(fs, e1);
r2 = im;
- op = cast(OpCode, (op - OP_LT) + OP_LTI);
+ op = binopr2op(opr, OPR_LT, OP_LTI);
}
else if (isSCnumber(e1, &im, &isfloat)) {
/* transform (A < B) to (B > A) and (A <= B) to (B >= A) */
r1 = luaK_exp2anyreg(fs, e2);
r2 = im;
- op = (op == OP_LT) ? OP_GTI : OP_GEI;
+ op = binopr2op(opr, OPR_LT, OP_GTI);
}
else { /* regular case, compare two registers */
r1 = luaK_exp2anyreg(fs, e1);
r2 = luaK_exp2anyreg(fs, e2);
+ op = binopr2op(opr, OPR_LT, OP_LT);
}
freeexps(fs, e1, e2);
e1->u.info = condjump(fs, op, r1, r2, isfloat, 1);
@@ -1579,16 +1609,16 @@ static void codeeq (FuncState *fs, BinOpr opr, expdesc *e1, expdesc *e2) {
/*
** Apply prefix operation 'op' to expression 'e'.
*/
-void luaK_prefix (FuncState *fs, UnOpr op, expdesc *e, int line) {
+void luaK_prefix (FuncState *fs, UnOpr opr, expdesc *e, int line) {
static const expdesc ef = {VKINT, {0}, NO_JUMP, NO_JUMP};
luaK_dischargevars(fs, e);
- switch (op) {
+ switch (opr) {
case OPR_MINUS: case OPR_BNOT: /* use 'ef' as fake 2nd operand */
- if (constfolding(fs, op + LUA_OPUNM, e, &ef))
+ if (constfolding(fs, opr + LUA_OPUNM, e, &ef))
break;
/* else */ /* FALLTHROUGH */
case OPR_LEN:
- codeunexpval(fs, cast(OpCode, op + OP_UNM), e, line);
+ codeunexpval(fs, unopr2op(opr), e, line);
break;
case OPR_NOT: codenot(fs, e); break;
default: lua_assert(0);
@@ -1718,30 +1748,27 @@ void luaK_posfix (FuncState *fs, BinOpr opr,
/* coded as (r1 >> -I) */;
}
else /* regular case (two registers) */
- codebinexpval(fs, OP_SHL, e1, e2, line);
+ codebinexpval(fs, opr, e1, e2, line);
break;
}
case OPR_SHR: {
if (isSCint(e2))
codebini(fs, OP_SHRI, e1, e2, 0, line, TM_SHR); /* r1 >> I */
else /* regular case (two registers) */
- codebinexpval(fs, OP_SHR, e1, e2, line);
+ codebinexpval(fs, opr, e1, e2, line);
break;
}
case OPR_EQ: case OPR_NE: {
codeeq(fs, opr, e1, e2);
break;
}
- case OPR_LT: case OPR_LE: {
- OpCode op = cast(OpCode, (opr - OPR_EQ) + OP_EQ);
- codeorder(fs, op, e1, e2);
- break;
- }
case OPR_GT: case OPR_GE: {
/* '(a > b)' <=> '(b < a)'; '(a >= b)' <=> '(b <= a)' */
- OpCode op = cast(OpCode, (opr - OPR_NE) + OP_EQ);
swapexps(e1, e2);
- codeorder(fs, op, e1, e2);
+ opr = cast(BinOpr, (opr - OPR_GT) + OPR_LT);
+ } /* FALLTHROUGH */
+ case OPR_LT: case OPR_LE: {
+ codeorder(fs, opr, e1, e2);
break;
}
default: lua_assert(0);
diff --git a/source/luametatex/source/luacore/lua54/src/ldebug.c b/source/luametatex/source/luacore/lua54/src/ldebug.c
index 3fae5cf25..7a61a780e 100644
--- a/source/luametatex/source/luacore/lua54/src/ldebug.c
+++ b/source/luametatex/source/luacore/lua54/src/ldebug.c
@@ -656,18 +656,19 @@ static const char *funcnamefromcall (lua_State *L, CallInfo *ci,
/*
-** Check whether pointer 'o' points to some value in the stack
-** frame of the current function. Because 'o' may not point to a
-** value in this stack, we cannot compare it with the region
-** boundaries (undefined behaviour in ISO C).
+** Check whether pointer 'o' points to some value in the stack frame of
+** the current function and, if so, returns its index. Because 'o' may
+** not point to a value in this stack, we cannot compare it with the
+** region boundaries (undefined behaviour in ISO C).
*/
-static int isinstack (CallInfo *ci, const TValue *o) {
- StkId pos;
- for (pos = ci->func.p + 1; pos < ci->top.p; pos++) {
- if (o == s2v(pos))
- return 1;
+static int instack (CallInfo *ci, const TValue *o) {
+ int pos;
+ StkId base = ci->func.p + 1;
+ for (pos = 0; base + pos < ci->top.p; pos++) {
+ if (o == s2v(base + pos))
+ return pos;
}
- return 0; /* not found */
+ return -1; /* not found */
}
@@ -708,9 +709,11 @@ static const char *varinfo (lua_State *L, const TValue *o) {
const char *kind = NULL;
if (isLua(ci)) {
kind = getupvalname(ci, o, &name); /* check whether 'o' is an upvalue */
- if (!kind && isinstack(ci, o)) /* no? try a register */
- kind = getobjname(ci_func(ci)->p, currentpc(ci),
- cast_int(cast(StkId, o) - (ci->func.p + 1)), &name);
+ if (!kind) { /* not an upvalue? */
+ int reg = instack(ci, o); /* try a register */
+ if (reg >= 0) /* is 'o' a register? */
+ kind = getobjname(ci_func(ci)->p, currentpc(ci), reg, &name);
+ }
}
return formatvarinfo(L, kind, name);
}
diff --git a/source/luametatex/source/luacore/lua54/src/ldump.c b/source/luametatex/source/luacore/lua54/src/ldump.c
index f848b669c..f231691b7 100644
--- a/source/luametatex/source/luacore/lua54/src/ldump.c
+++ b/source/luametatex/source/luacore/lua54/src/ldump.c
@@ -10,6 +10,7 @@
#include "lprefix.h"
+#include <limits.h>
#include <stddef.h>
#include "lua.h"
@@ -55,8 +56,11 @@ static void dumpByte (DumpState *D, int y) {
}
-/* dumpInt Buff Size */
-#define DIBS ((sizeof(size_t) * 8 / 7) + 1)
+/*
+** 'dumpSize' buffer size: each byte can store up to 7 bits. (The "+6"
+** rounds up the division.)
+*/
+#define DIBS ((sizeof(size_t) * CHAR_BIT + 6) / 7)
static void dumpSize (DumpState *D, size_t x) {
lu_byte buff[DIBS];
diff --git a/source/luametatex/source/luacore/lua54/src/lundump.c b/source/luametatex/source/luacore/lua54/src/lundump.c
index aba93f828..02aed64fb 100644
--- a/source/luametatex/source/luacore/lua54/src/lundump.c
+++ b/source/luametatex/source/luacore/lua54/src/lundump.c
@@ -248,6 +248,8 @@ static void loadDebug (LoadState *S, Proto *f) {
f->locvars[i].endpc = loadInt(S);
}
n = loadInt(S);
+ if (n != 0) /* does it have debug information? */
+ n = f->sizeupvalues; /* must be this many */
for (i = 0; i < n; i++)
f->upvalues[i].name = loadStringN(S, f);
}
diff --git a/source/luametatex/source/luametatex.h b/source/luametatex/source/luametatex.h
index 05c40321d..a318342c7 100644
--- a/source/luametatex/source/luametatex.h
+++ b/source/luametatex/source/luametatex.h
@@ -92,7 +92,7 @@
# define luametatex_version 210
# define luametatex_revision 8
# define luametatex_version_string "2.10.08"
-# define luametatex_development_id 20230320
+# define luametatex_development_id 20230331
# define luametatex_name_camelcase "LuaMetaTeX"
# define luametatex_name_lowercase "luametatex"
diff --git a/source/luametatex/source/luarest/lmtfilelib.c b/source/luametatex/source/luarest/lmtfilelib.c
index f78c05e64..66fe652f3 100644
--- a/source/luametatex/source/luarest/lmtfilelib.c
+++ b/source/luametatex/source/luarest/lmtfilelib.c
@@ -134,6 +134,16 @@
applied toy the times. I couldn't figure it out and don't want to waste more time on it.
*/
+ /*
+ A windows path should not end with a / so maybe we should check for that and remove it when
+ we have one. Even better is to add a period.
+
+ size_t l = wcslen(w) - 1;
+ if (w[l] == L'/') {
+ w[l] == L'\0');
+ }
+ */
+
typedef struct dir_data {
intptr_t handle;
int closed;
diff --git a/source/luametatex/source/tex/texcommands.c b/source/luametatex/source/tex/texcommands.c
index 76b171789..0eb084296 100644
--- a/source/luametatex/source/tex/texcommands.c
+++ b/source/luametatex/source/tex/texcommands.c
@@ -277,7 +277,8 @@ void tex_initialize_commands(void)
tex_primitive(luatex_command, "alignmentwrapsource", internal_int_cmd, alignment_wrap_source_code, internal_int_base);
/* tex_primitive(luatex_command, "pageboundarypenalty", internal_int_cmd, page_boundary_penalty_code, internal_int_base); */
tex_primitive(luatex_command, "linebreakcriterium", internal_int_cmd, line_break_criterium_code, internal_int_base);
-
+ tex_primitive(luatex_command, "eufactor", internal_int_cmd, eu_factor_code, internal_int_base);
+
/*tex dimensions */
tex_primitive(tex_command, "boxmaxdepth", internal_dimen_cmd, box_max_depth_code, internal_dimen_base);
diff --git a/source/luametatex/source/tex/texdumpdata.h b/source/luametatex/source/tex/texdumpdata.h
index 847bfa46f..a7b51e924 100644
--- a/source/luametatex/source/tex/texdumpdata.h
+++ b/source/luametatex/source/tex/texdumpdata.h
@@ -55,7 +55,7 @@
*/
-# define luametatex_format_fingerprint 686
+# define luametatex_format_fingerprint 687
/* These end up in the string pool. */
diff --git a/source/luametatex/source/tex/texequivalents.h b/source/luametatex/source/tex/texequivalents.h
index cc7f74ffc..c93c1dd04 100644
--- a/source/luametatex/source/tex/texequivalents.h
+++ b/source/luametatex/source/tex/texequivalents.h
@@ -578,6 +578,7 @@ typedef enum int_codes {
compatible anyway. Lesson learned.
*/
variable_family_code,
+ eu_factor_code,
/* those below these are not interfaced via primitives */
internal_par_state_code,
internal_dir_state_code,
@@ -607,7 +608,7 @@ typedef enum int_codes {
} int_codes;
# define first_int_code pre_tolerance_code
-# define last_int_code variable_family_code
+# define last_int_code eu_factor_code
typedef enum dimen_codes {
par_indent_code, /*tex indentation of paragraphs */
@@ -1372,6 +1373,7 @@ extern void tex_forced_word_define (int g, halfword p, singleword flag, halfword
# define cur_fam_par count_parameter(family_code)
# define variable_family_par count_parameter(variable_family_code)
+# define eu_factor_par count_parameter(eu_factor_code)
# define pre_display_direction_par count_parameter(pre_display_direction_code)
# define pre_display_penalty_par count_parameter(pre_display_penalty_code)
# define post_display_penalty_par count_parameter(post_display_penalty_code)
diff --git a/source/luametatex/source/tex/texfont.c b/source/luametatex/source/tex/texfont.c
index 0f1cf6117..b274b3ae9 100644
--- a/source/luametatex/source/tex/texfont.c
+++ b/source/luametatex/source/tex/texfont.c
@@ -1887,6 +1887,20 @@ extinfo *tex_char_extensible_recipe_from_font(halfword f, halfword c)
return ci->math ? ci->math->extensible_recipe : NULL;
}
+extinfo *tex_char_extensible_recipe_front_last(halfword f, halfword c)
+{
+ charinfo *ci = tex_aux_char_info(f, c);
+ while (ci) {
+ halfword next = ci->math ? ci->math->next : -1;
+ if (next > 0) { // no zero
+ ci = tex_aux_char_info(f, c);
+ } else {
+ return ci->math ? ci->math->extensible_recipe : NULL;
+ }
+ }
+ return NULL;
+}
+
scaled tex_char_left_margin_from_font(halfword f, halfword c)
{
charinfo *ci = tex_aux_char_info(f, c);
diff --git a/source/luametatex/source/tex/texfont.h b/source/luametatex/source/tex/texfont.h
index 0d3fa9fb3..2adadf45d 100644
--- a/source/luametatex/source/tex/texfont.h
+++ b/source/luametatex/source/tex/texfont.h
@@ -600,6 +600,7 @@ extern scaled tex_char_bottom_margin_from_font (halfword f, halfword c)
extern scaled tex_char_top_overshoot_from_font (halfword f, halfword c);
extern scaled tex_char_bottom_overshoot_from_font (halfword f, halfword c);
extern extinfo *tex_char_extensible_recipe_from_font (halfword f, halfword c);
+extern extinfo *tex_char_extensible_recipe_front_last (halfword f, halfword c);
extern halfword tex_char_unchecked_top_anchor_from_font (halfword f, halfword c);
extern halfword tex_char_unchecked_bottom_anchor_from_font (halfword f, halfword c);
diff --git a/source/luametatex/source/tex/texmaincontrol.c b/source/luametatex/source/tex/texmaincontrol.c
index 8af8020a7..e901906c4 100644
--- a/source/luametatex/source/tex/texmaincontrol.c
+++ b/source/luametatex/source/tex/texmaincontrol.c
@@ -5938,6 +5938,14 @@ void tex_assign_internal_int_value(int a, halfword p, int val)
}
goto DEFINE;
*/
+ case eu_factor_code:
+ if (val < 1) {
+ val = 1;
+ } else if (val > 50) {
+ val = 50;
+ }
+ tex_word_define(a, p, val);
+ break;
default:
DEFINE:
tex_word_define(a, p, val);
@@ -6576,6 +6584,7 @@ void tex_initialize_variables(void)
math_font_control_par = assumed_math_control;
math_eqno_gap_step_par = default_eqno_gap_step;
px_dimen_par = one_bp;
+ eu_factor_par = 1000;
show_node_details_par = 2; /*tex $>1$: |[subtype]| $>2$: |[attributes]| */
ex_hyphen_char_par = '-';
escape_char_par = '\\';
diff --git a/source/luametatex/source/tex/texmath.c b/source/luametatex/source/tex/texmath.c
index 0e52f7c70..7ace4f113 100644
--- a/source/luametatex/source/tex/texmath.c
+++ b/source/luametatex/source/tex/texmath.c
@@ -1763,10 +1763,10 @@ static int tex_aux_scan_active_math_char(mathcodeval *mval, int where)
return 0;
case active_char_cmd:
/*tex
- We reset the code so that we don't get a loop, whuich means that the macro that
+ We reset the code so that we don't get a loop, which means that the macro that
gets invoked has to set the amcode again if needed.
*/
- tex_set_am_code(character, other_char_cmd, 0);
+ tex_set_am_code(character, other_char_cmd, cur_level);
cur_cs = tex_active_to_cs(cur_chr, 1);
cur_cmd = eq_type(cur_cs);
cur_chr = eq_value(cur_cs);
@@ -1783,7 +1783,7 @@ static int tex_aux_scan_active_math_char(mathcodeval *mval, int where)
return 1;
}
} else if (mval->class_value == active_math_class_value) {
- /*tex We might eventually drop tthis feature in favor of the amcode. */
+ /*tex We might eventually drop this feature in favor of the amcode. */
cur_cs = tex_active_to_cs(cur_chr, 1);
cur_cmd = eq_type(cur_cs);
cur_chr = eq_value(cur_cs);
diff --git a/source/luametatex/source/tex/texmlist.c b/source/luametatex/source/tex/texmlist.c
index 3b7734ccf..53a0bd198 100644
--- a/source/luametatex/source/tex/texmlist.c
+++ b/source/luametatex/source/tex/texmlist.c
@@ -631,6 +631,31 @@ static void tex_aux_fake_delimiter(halfword result)
}
/*tex
+ A few helpers:
+*/
+
+inline static int tex_aux_has_delimiter(halfword delimiter, halfword size)
+{
+ return (
+ delimiter && (
+ (tex_fam_fnt(delimiter_small_family(delimiter), size) && delimiter_small_character(delimiter)) ||
+ (tex_fam_fnt(delimiter_large_family(delimiter), size) && delimiter_large_character(delimiter))
+ )
+ );
+}
+
+static inline int tex_aux_has_extensible(halfword delimiter, halfword size)
+{
+ if (delimiter && delimiter_small_character(delimiter)) {
+ halfword curfnt = tex_fam_fnt(delimiter_small_family(delimiter), size);
+ if (curfnt != null_font) {
+ return tex_char_extensible_recipe_front_last(curfnt, delimiter_small_character(delimiter)) ? 1 : 0;
+ }
+ }
+ return 0;
+}
+
+/*tex
A variant on a suggestion on the list based on analysis by Ulrik Vieth it in the mean
adapted. We keep these 500 and 2 because then we can use similar values.
*/
@@ -772,7 +797,7 @@ static halfword tex_aux_make_delimiter(halfword target, halfword delimiter, int
static halfword tex_aux_overbar(halfword box, scaled gap, scaled height, scaled krn, halfword att, quarterword index, halfword size, halfword fam, halfword topdelimiter, halfword style)
{
- halfword rule = topdelimiter
+ halfword rule = (topdelimiter && tex_aux_has_extensible(topdelimiter, size))
? tex_aux_make_delimiter(null, topdelimiter, size, box_width(box), 1, style, 0, NULL, NULL, 0, 0, NULL, 0)
: tex_aux_fraction_rule(box_width(box), height, att, index, size, fam);
/*tex Safeguard: */
@@ -801,9 +826,11 @@ static halfword tex_aux_overbar(halfword box, scaled gap, scaled height, scaled
return rule;
}
-static halfword tex_aux_underbar(halfword box, scaled gap, scaled height, scaled krn, halfword att, quarterword index, halfword size, halfword fam)
+static halfword tex_aux_underbar(halfword box, scaled gap, scaled height, scaled krn, halfword att, quarterword index, halfword size, halfword fam, halfword botdelimiter, halfword style)
{
- halfword rule = tex_aux_fraction_rule(box_width(box), height, att, index, size, fam);
+ halfword rule = (botdelimiter && tex_aux_has_extensible(botdelimiter, size))
+ ? tex_aux_make_delimiter(null, botdelimiter, size, box_width(box), 1, style, 0, NULL, NULL, 0, 0, NULL, 0)
+ : tex_aux_fraction_rule(box_width(box), height, att, index, size, fam);
if (gap) {
halfword kern = tex_new_kern_node(gap, vertical_math_kern_subtype);
tex_attach_attribute_list_attribute(kern, att);
@@ -1403,8 +1430,7 @@ static halfword tex_aux_make_delimiter(halfword target, halfword delimiter, int
extremes->height = 0;
extremes->depth = 0;
}
- if (delimiter && ! delimiter_small_family(delimiter) && ! delimiter_small_character(delimiter)
- && ! delimiter_large_family(delimiter) && ! delimiter_large_character(delimiter)) {
+ if (! tex_aux_has_delimiter(delimiter, size)) {
halfword result = tex_new_null_box_node(hlist_node, math_v_delimiter_list);
tex_attach_attribute_list_copy(result, delimiter);
if (! flat) {
@@ -2248,7 +2274,8 @@ static void tex_aux_make_under(halfword target, halfword style, halfword size, h
halfword result = tex_aux_underbar(
tex_aux_clean_box(noad_nucleus(target), tex_math_style_variant(style, math_parameter_under_line_variant), style, math_nucleus_list, 0, NULL),
vgap, thickness, kern,
- get_attribute_list(noad_nucleus(target)), math_under_rule_subtype, size, fam
+ get_attribute_list(noad_nucleus(target)), math_under_rule_subtype, size, fam,
+ null, style
);
node_subtype(result) = math_over_list;
kernel_math_list(noad_nucleus(target)) = result;
@@ -3566,42 +3593,6 @@ static halfword tex_aux_make_skewed_fraction(halfword target, int style, int siz
return fraction;
}
-static halfword tex_aux_make_stretched_fraction(halfword target, int style, int size, kernset *kerns)
-{
- halfword middle = null;
- halfword numerator = null;
- halfword denominator = null;
- scaled shift_up = 0;
- scaled shift_down = 0;
- scaled delta = 0;
- halfword middle_delimiter = fraction_middle_delimiter(target);
- halfword thickness = tex_aux_check_fraction_rule(target, style, size, stretched_fraction_subtype, NULL);
- halfword fraction = tex_new_null_box_node(vlist_node, math_fraction_list);
- (void) kerns;
- tex_attach_attribute_list_copy(fraction, target);
- tex_aux_wrap_fraction_parts(target, style, size, &numerator, &denominator, 1);
- tex_aux_calculate_fraction_shifts_normal(target, style, size, numerator, denominator, &shift_up, &shift_down, &delta);
- tex_aux_apply_fraction_shifts(fraction, numerator, denominator, shift_up, shift_down);
- middle = tex_aux_make_delimiter(target, middle_delimiter, size, box_width(fraction), 1, style, 0, NULL, NULL, 0, 0, NULL, 0);
- if (box_width(middle) < box_width(fraction)) {
- /*tex It's always in the details: */
- scaled delta = (box_width(fraction) - box_width(middle)) / 2;
- tex_aux_prepend_hkern_to_box_list(middle, delta, horizontal_math_kern_subtype, "narrow delimiter");
- tex_aux_append_hkern_to_box_list(middle, delta, horizontal_math_kern_subtype, "narrow delimiter");
- box_width(middle) = box_width(fraction);
- } else if (box_width(middle) > box_width(fraction)) {
- scaled delta = (box_width(middle) - box_width(fraction)) / 2;
- tex_aux_prepend_hkern_to_box_list(numerator, delta, horizontal_math_kern_subtype, "wide delimiter");
- tex_aux_append_hkern_to_box_list(numerator, delta, horizontal_math_kern_subtype, "wide delimiter");
- tex_aux_prepend_hkern_to_box_list(denominator, delta, horizontal_math_kern_subtype, "wide delimiter");
- tex_aux_append_hkern_to_box_list(denominator, delta, horizontal_math_kern_subtype, "wide delimiter");
- box_width(fraction) = box_width(middle);
- }
- tex_aux_compensate_fraction_rule(target, fraction, middle, thickness);
- box_list(fraction) = tex_aux_assemble_fraction(target, style, size, numerator, denominator, middle, delta, shift_up, shift_down);
- return fraction;
-}
-
static halfword tex_aux_make_ruled_fraction(halfword target, int style, int size, kernset *kerns, int fractiontype)
{
halfword numerator = null;
@@ -3630,6 +3621,46 @@ static halfword tex_aux_make_ruled_fraction(halfword target, int style, int size
return fraction;
}
+static halfword tex_aux_make_stretched_fraction(halfword target, int style, int size, kernset *kerns)
+{
+ halfword middle_delimiter = fraction_middle_delimiter(target);
+ if (tex_aux_has_extensible(middle_delimiter, size)) {
+ halfword middle = null;
+ halfword numerator = null;
+ halfword denominator = null;
+ scaled shift_up = 0;
+ scaled shift_down = 0;
+ scaled delta = 0;
+ halfword thickness = tex_aux_check_fraction_rule(target, style, size, stretched_fraction_subtype, NULL);
+ halfword fraction = tex_new_null_box_node(vlist_node, math_fraction_list);
+ (void) kerns;
+ tex_attach_attribute_list_copy(fraction, target);
+ tex_aux_wrap_fraction_parts(target, style, size, &numerator, &denominator, 1);
+ tex_aux_calculate_fraction_shifts_normal(target, style, size, numerator, denominator, &shift_up, &shift_down, &delta);
+ tex_aux_apply_fraction_shifts(fraction, numerator, denominator, shift_up, shift_down);
+ middle = tex_aux_make_delimiter(target, middle_delimiter, size, box_width(fraction), 1, style, 0, NULL, NULL, 0, 0, NULL, 0);
+ if (box_width(middle) < box_width(fraction)) {
+ /*tex It's always in the details: */
+ scaled delta = (box_width(fraction) - box_width(middle)) / 2;
+ tex_aux_prepend_hkern_to_box_list(middle, delta, horizontal_math_kern_subtype, "narrow delimiter");
+ tex_aux_append_hkern_to_box_list(middle, delta, horizontal_math_kern_subtype, "narrow delimiter");
+ box_width(middle) = box_width(fraction);
+ } else if (box_width(middle) > box_width(fraction)) {
+ scaled delta = (box_width(middle) - box_width(fraction)) / 2;
+ tex_aux_prepend_hkern_to_box_list(numerator, delta, horizontal_math_kern_subtype, "wide delimiter");
+ tex_aux_append_hkern_to_box_list(numerator, delta, horizontal_math_kern_subtype, "wide delimiter");
+ tex_aux_prepend_hkern_to_box_list(denominator, delta, horizontal_math_kern_subtype, "wide delimiter");
+ tex_aux_append_hkern_to_box_list(denominator, delta, horizontal_math_kern_subtype, "wide delimiter");
+ box_width(fraction) = box_width(middle);
+ }
+ tex_aux_compensate_fraction_rule(target, fraction, middle, thickness);
+ box_list(fraction) = tex_aux_assemble_fraction(target, style, size, numerator, denominator, middle, delta, shift_up, shift_down);
+ return fraction;
+ } else {
+ return tex_aux_make_ruled_fraction(target, style, size, kerns, over_fraction_subtype);
+ }
+}
+
/*tex
We intercept bad nodes created at the \LUA\ end but only partially. The fraction handler is
quite complex and uses a lot of parameters. You shouldn't mess with \TEX.
@@ -7146,7 +7177,7 @@ static void tex_mlist_to_hlist_finalize_list(mliststate *state)
tex_couple_nodes(p, box_list(l));
box_list(l) = null;
tex_flush_node(l);
- } else if (current_type == simple_noad && (current_subtype == math_end_class) || (current_subtype == math_begin_class)) {
+ } else if (current_type == simple_noad && (current_subtype == math_end_class || current_subtype == math_begin_class)) {
if (noad_new_hlist(current)) {
tex_flush_node(noad_new_hlist(current));
noad_new_hlist(current) = null;
diff --git a/source/luametatex/source/tex/texnodes.c b/source/luametatex/source/tex/texnodes.c
index 09aa74d57..49a5b8fa7 100644
--- a/source/luametatex/source/tex/texnodes.c
+++ b/source/luametatex/source/tex/texnodes.c
@@ -186,7 +186,7 @@ void lmt_nodelib_initialize(void) {
set_value_entry_key(subtypes_kern, italic_kern_subtype, italiccorrection)
set_value_entry_key(subtypes_kern, left_margin_kern_subtype, leftmarginkern)
set_value_entry_key(subtypes_kern, right_margin_kern_subtype, rightmarginkern)
- set_value_entry_key(subtypes_kern, explicit_math_kern_subtype, mathkerns)
+ set_value_entry_key(subtypes_kern, explicit_math_kern_subtype, mathkern)
set_value_entry_key(subtypes_kern, math_shape_kern_subtype, mathshapekern)
set_value_entry_key(subtypes_kern, horizontal_math_kern_subtype, horizontalmathkern)
set_value_entry_key(subtypes_kern, vertical_math_kern_subtype, verticalmathkern)
diff --git a/source/luametatex/source/tex/texscanning.c b/source/luametatex/source/tex/texscanning.c
index 23fc29d1c..aae30c6f0 100644
--- a/source/luametatex/source/tex/texscanning.c
+++ b/source/luametatex/source/tex/texscanning.c
@@ -2339,7 +2339,7 @@ typedef enum scanned_unit {
static int tex_aux_scan_unit(halfword *num, halfword *denom, halfword *value, halfword *order)
{
- AGAIN: /* only for true */
+//AGAIN: /* only for true */
do {
tex_get_x_token();
} while (cur_cmd == spacer_cmd);
@@ -2436,15 +2436,21 @@ static int tex_aux_scan_unit(halfword *num, halfword *denom, halfword *value, ha
}
break;
case 't': case 'T':
- if (order) {
- switch (chrtwo) {
- case 'r': case 'R':
- if (tex_scan_mandate_keyword("true", 2)) {
- /*tex This is now a bogus prefix that might get dropped! */
- goto AGAIN;
- }
- }
+ switch (chrtwo) {
+ case 's': case 'S':
+ *num = 4588;
+ *denom = 645;
+ return normal_unit_scanned;
}
+ // if (order) {
+ // switch (chrtwo) {
+ // case 'r': case 'R':
+ // if (tex_scan_mandate_keyword("true", 2)) {
+ // /*tex This is now a bogus prefix that might get dropped! */
+ // goto AGAIN;
+ // }
+ // }
+ // }
break;
case 'e': case 'E':
switch (chrtwo) {
@@ -2454,6 +2460,14 @@ static int tex_aux_scan_unit(halfword *num, halfword *denom, halfword *value, ha
case 'x': case 'X':
*value = tex_get_scaled_ex_height(cur_font_par);
return relative_unit_scanned;
+ case 's': case 'S':
+ *num = 9176;
+ *denom = 129;
+ return normal_unit_scanned;
+ case 'u': case 'U':
+ *num = 9176 * eu_factor_par;
+ *denom = 129 * 10;
+ return normal_unit_scanned;
}
break;
case 'f': case 'F':
@@ -2486,7 +2500,7 @@ static int tex_aux_scan_unit(halfword *num, halfword *denom, halfword *value, ha
/*tex
When we drop |true| support we can use the next variant which is a bit more efficient
- and also handles optional units. LAter we will see a more limited variant that also
+ and also handles optional units. Later we will see a more limited variant that also
includes the scaler.
*/
@@ -2557,6 +2571,17 @@ static int tex_aux_scan_unit_new(halfword *num, halfword *denom, halfword *value
}
}
break;
+ case 't': case 'T':
+ tex_get_x_token();
+ if (cur_cmd == letter_cmd || cur_cmd == other_char_cmd) {
+ switch (cur_chr) {
+ case 's': case 'S':
+ *num = 4588;
+ *denom = 645;
+ return normal_unit_scanned;
+ }
+ }
+ break;
case 'b': case 'B':
tex_get_x_token();
if (cur_cmd == letter_cmd || cur_cmd == other_char_cmd) {
@@ -2600,6 +2625,14 @@ static int tex_aux_scan_unit_new(halfword *num, halfword *denom, halfword *value
case 'x': case 'X':
*value = tex_get_scaled_ex_height(cur_font_par);
return relative_unit_scanned;
+ case 's': case 'S':
+ *num = 9176;
+ *denom = 129;
+ return normal_unit_scanned;
+ case 'u': case 'U':
+ *num = 9176 * eu_factor_par;
+ *denom = 129 * 10;
+ return normal_unit_scanned;
}
}
break;
@@ -4801,6 +4834,17 @@ static halfword tex_aux_scan_unit_applied(halfword value, halfword fraction, int
}
}
break;
+ case 't': case 'T':
+ tex_get_x_token();
+ if (cur_cmd == letter_cmd || cur_cmd == other_char_cmd) {
+ switch (cur_chr) {
+ case 's': case 'S':
+ num = 4588;
+ denom = 645;
+ goto NORMALUNIT;
+ }
+ }
+ break;
case 'b': case 'B':
tex_get_x_token();
if (cur_cmd == letter_cmd || cur_cmd == other_char_cmd) {
@@ -4842,6 +4886,14 @@ static halfword tex_aux_scan_unit_applied(halfword value, halfword fraction, int
return tex_get_scaled_em_width(cur_font_par);
case 'x': case 'X':
return tex_get_scaled_ex_height(cur_font_par);
+ case 's': case 'S':
+ num = 9176;
+ denom = 129;
+ goto NORMALUNIT;
+ case 'u': case 'U':
+ num = 9176 * eu_factor_par;
+ denom = 129 * 10;
+ goto NORMALUNIT;
}
}
break;
diff --git a/source/luametatex/source/tex/textypes.h b/source/luametatex/source/tex/textypes.h
index 2ca761e59..399839227 100644
--- a/source/luametatex/source/tex/textypes.h
+++ b/source/luametatex/source/tex/textypes.h
@@ -576,6 +576,47 @@ typedef union tokenword {
# define stp_language_size 250
/*tex
+ Units. At some point these will be used in texscanning and lmtexlib.
+*/
+
+
+# define bp_numerator 7227 // base point
+# define bp_denonimator 7200
+
+# define cc_numerator 14856 // cicero
+# define cc_denonimator 1157
+
+# define cm_numerator 7227 // centimeter
+# define cm_denonimator 254
+
+# define dd_numerator 1238 // didot
+# define dd_denonimator 1157
+
+# define dk_numerator 49838 // knuth
+# define dk_denonimator 7739
+
+# define es_numerator 9176 // edith
+# define es_denonimator 129
+
+# define in_numerator 7227 // inch
+# define in_denonimator 100
+
+# define mm_numerator 7227 // millimeter
+# define mm_denonimator 2540
+
+# define pc_numerator 12 // pica
+# define pc_denonimator 1
+
+# define pt_numerator 1 // point
+# define pt_denonimator 1
+
+# define sp_numerator 1 // scaled point
+# define sp_denonimator 1
+
+# define ts_numerator 4588 // tove
+# define ts_denonimator 645
+
+/*tex
These are used in the code, so when we want them to adapt, which is needed when we make them
configurable, we need to change this.
diff --git a/tex/context/base/mkii/cont-new.mkii b/tex/context/base/mkii/cont-new.mkii
index ec168c472..4eb526804 100644
--- a/tex/context/base/mkii/cont-new.mkii
+++ b/tex/context/base/mkii/cont-new.mkii
@@ -11,7 +11,7 @@
%C therefore copyrighted by \PRAGMA. See mreadme.pdf for
%C details.
-\newcontextversion{2023.03.20 15:42}
+\newcontextversion{2023.04.01 09:28}
%D This file is loaded at runtime, thereby providing an
%D excellent place for hacks, patches, extensions and new
diff --git a/tex/context/base/mkii/context.mkii b/tex/context/base/mkii/context.mkii
index 9e6c53624..a09091440 100644
--- a/tex/context/base/mkii/context.mkii
+++ b/tex/context/base/mkii/context.mkii
@@ -20,7 +20,7 @@
%D your styles an modules.
\edef\contextformat {\jobname}
-\edef\contextversion{2023.03.20 15:42}
+\edef\contextversion{2023.04.01 09:28}
%D For those who want to use this:
diff --git a/tex/context/base/mkiv/anch-pos.lua b/tex/context/base/mkiv/anch-pos.lua
index 77f55964f..cf3ed87fc 100644
--- a/tex/context/base/mkiv/anch-pos.lua
+++ b/tex/context/base/mkiv/anch-pos.lua
@@ -6,12 +6,9 @@ if not modules then modules = { } end modules ['anch-pos'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>We save positional information in the main utility table. Not only
-can we store much more information in <l n='lua'/> but it's also
-more efficient.</p>
---ldx]]--
-
+-- We save positional information in the main utility table. Not only can we store
+-- much more information in Lua but it's also more efficient.
+--
-- plus (extra) is obsolete but we will keep it for a while
--
-- maybe replace texsp by our own converter (stay at the lua end)
diff --git a/tex/context/base/mkiv/attr-ini.lua b/tex/context/base/mkiv/attr-ini.lua
index b05c343e5..cd1a0c549 100644
--- a/tex/context/base/mkiv/attr-ini.lua
+++ b/tex/context/base/mkiv/attr-ini.lua
@@ -9,10 +9,8 @@ if not modules then modules = { } end modules ['attr-ini'] = {
local next, type = next, type
local osexit = os.exit
---[[ldx--
-<p>We start with a registration system for atributes so that we can use the
-symbolic names later on.</p>
---ldx]]--
+-- We start with a registration system for atributes so that we can use the symbolic
+-- names later on.
local nodes = nodes
local context = context
@@ -54,17 +52,13 @@ storage.register("attributes/list", list, "attributes.list")
-- end
-- end
---[[ldx--
-<p>We reserve this one as we really want it to be always set (faster).</p>
---ldx]]--
+-- We reserve this one as we really want it to be always set (faster).
names[0], numbers["fontdynamic"] = "fontdynamic", 0
---[[ldx--
-<p>private attributes are used by the system and public ones are for users. We use dedicated
-ranges of numbers for them. Of course a the <l n='context'/> end a private attribute can be
-accessible too, so a private attribute can have a public appearance.</p>
---ldx]]--
+-- Private attributes are used by the system and public ones are for users. We use
+-- dedicated ranges of numbers for them. Of course a the TeX end a private attribute
+-- can be accessible too, so a private attribute can have a public appearance.
sharedstorage.attributes_last_private = sharedstorage.attributes_last_private or 15 -- very private
sharedstorage.attributes_last_public = sharedstorage.attributes_last_public or 1024 -- less private
diff --git a/tex/context/base/mkiv/bibl-bib.lua b/tex/context/base/mkiv/bibl-bib.lua
index baeb3d2f9..b7e478004 100644
--- a/tex/context/base/mkiv/bibl-bib.lua
+++ b/tex/context/base/mkiv/bibl-bib.lua
@@ -6,11 +6,9 @@ if not modules then modules = { } end modules ['bibl-bib'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>This is a prelude to integrated bibliography support. This file just loads
-bibtex files and converts them to xml so that the we access the content
-in a convenient way. Actually handling the data takes place elsewhere.</p>
---ldx]]--
+-- This is a prelude to integrated bibliography support. This file just loads bibtex
+-- files and converts them to xml so that the we access the content in a convenient
+-- way. Actually handling the data takes place elsewhere.
local lower, format, gsub, concat = string.lower, string.format, string.gsub, table.concat
local next = next
diff --git a/tex/context/base/mkiv/char-def.lua b/tex/context/base/mkiv/char-def.lua
index 5e9d7d05a..1d4c130e9 100644
--- a/tex/context/base/mkiv/char-def.lua
+++ b/tex/context/base/mkiv/char-def.lua
@@ -67155,6 +67155,7 @@ characters.data={
description="MINUS SIGN",
direction="es",
linebreak="pr",
+ mathextensible="h",
mathgroup="binary arithmetic",
mathspec={
{
diff --git a/tex/context/base/mkiv/char-ini.lua b/tex/context/base/mkiv/char-ini.lua
index db1b85cc5..627ba072c 100644
--- a/tex/context/base/mkiv/char-ini.lua
+++ b/tex/context/base/mkiv/char-ini.lua
@@ -36,20 +36,16 @@ local trace_defining = false trackers.register("characters.defining", fu
local report_defining = logs.reporter("characters")
---[[ldx--
-<p>This module implements some methods and creates additional datastructured
-from the big character table that we use for all kind of purposes:
-<type>char-def.lua</type>.</p>
-
-<p>We assume that at this point <type>characters.data</type> is already
-loaded!</p>
---ldx]]--
-
+-- This module implements some methods and creates additional datastructured from
+-- the big character table that we use for all kind of purposes: 'char-def.lua'.
+--
+-- We assume that at this point 'characters.data' is already populated!
+--
-- todo: in 'char-def.lua' assume defaults:
--
--- directions = l
--- cjkwd = a
--- linebreak = al
+-- directions = l
+-- cjkwd = a
+-- linebreak = al
characters = characters or { }
local characters = characters
@@ -62,9 +58,7 @@ else
os.exit()
end
---[[ldx--
-Extending the table.
---ldx]]--
+-- Extending the table.
if context and CONTEXTLMTXMODE == 0 then
@@ -84,9 +78,7 @@ if context and CONTEXTLMTXMODE == 0 then
end
---[[ldx--
-<p>This converts a string (if given) into a number.</p>
---ldx]]--
+-- This converts a string (if given) into a number.
local pattern = (P("0x") + P("U+")) * ((R("09","AF")^1 * P(-1)) / function(s) return tonumber(s,16) end)
@@ -957,10 +949,8 @@ characters.bidi = allocate {
on = "Other Neutrals",
}
---[[ldx--
-<p>At this point we assume that the big data table is loaded. From this
-table we derive a few more.</p>
---ldx]]--
+-- At this point we assume that the big data table is loaded. From this table we
+-- derive a few more.
if not characters.fallbacks then
@@ -1037,10 +1027,8 @@ setmetatableindex(characters.textclasses,function(t,k)
return false
end)
---[[ldx--
-<p>Next comes a whole series of helper methods. These are (will be) part
-of the official <l n='api'/>.</p>
---ldx]]--
+-- Next comes a whole series of helper methods. These are (will be) part of the
+-- official API.
-- we could make them virtual: characters.contextnames[n]
@@ -1433,9 +1421,7 @@ function characters.lettered(str,spacing)
return concat(new)
end
---[[ldx--
-<p>Requesting lower and uppercase codes:</p>
---ldx]]--
+-- Requesting lower and uppercase codes:
function characters.uccode(n) return uccodes[n] end -- obsolete
function characters.lccode(n) return lccodes[n] end -- obsolete
diff --git a/tex/context/base/mkiv/char-tex.lua b/tex/context/base/mkiv/char-tex.lua
index 7f544b147..09547d005 100644
--- a/tex/context/base/mkiv/char-tex.lua
+++ b/tex/context/base/mkiv/char-tex.lua
@@ -42,17 +42,14 @@ local trace_defining = false trackers.register("characters.defining", fu
local report_defining = logs.reporter("characters")
---[[ldx--
-<p>In order to deal with 8-bit output, we need to find a way to go from <l n='utf'/> to
-8-bit. This is handled in the <l n='luatex'/> engine itself.</p>
-
-<p>This leaves us problems with characters that are specific to <l n='tex'/> like
-<type>{}</type>, <type>$</type> and alike. We can remap some chars that tex input files
-are sensitive for to a private area (while writing to a utility file) and revert then
-to their original slot when we read in such a file. Instead of reverting, we can (when
-we resolve characters to glyphs) map them to their right glyph there. For this purpose
-we can use the private planes 0x0F0000 and 0x100000.</p>
---ldx]]--
+-- In order to deal with 8-bit output, we need to find a way to go from UTF to
+-- 8-bit. This is handled in the 32 bit engine itself. This leaves us problems with
+-- characters that are specific to TeX, like curly braces and dollars. We can remap
+-- some chars that tex input files are sensitive for to a private area (while
+-- writing to a utility file) and revert then to their original slot when we read in
+-- such a file. Instead of reverting, we can (when we resolve characters to glyphs)
+-- map them to their right glyph there. For this purpose we can use the private
+-- planes 0x0F0000 and 0x100000.
local low = allocate()
local high = allocate()
@@ -102,21 +99,6 @@ private.escape = utf.remapper(escapes) -- maybe: ,"dynamic"
private.replace = utf.remapper(low) -- maybe: ,"dynamic"
private.revert = utf.remapper(high) -- maybe: ,"dynamic"
---[[ldx--
-<p>We get a more efficient variant of this when we integrate
-replacements in collapser. This more or less renders the previous
-private code redundant. The following code is equivalent but the
-first snippet uses the relocated dollars.</p>
-
-<typing>
-[󰀤x󰀤] [$x$]
-</typing>
---ldx]]--
-
--- using the tree-lpeg-mapper would be nice but we also need to deal with end-of-string
--- cases: "\"\i" and don't want "\relax" to be seen as \r e lax" (for which we need to mess
--- with spaces
-
local accentmapping = allocate {
['"'] = { [""] = "¨",
A = "Ä", a = "ä",
@@ -452,10 +434,8 @@ implement { -- a waste of scanner but consistent
actions = texcharacters.defineaccents
}
---[[ldx--
-<p>Instead of using a <l n='tex'/> file to define the named glyphs, we
-use the table. After all, we have this information available anyway.</p>
---ldx]]--
+-- Instead of using a TeX file to define the named glyphs, we use the table. After
+-- all, we have this information available anyway.
function commands.makeactive(n,name) -- not used
contextsprint(ctxcatcodes,format("\\catcode%s=13\\unexpanded\\def %s{\\%s}",n,utfchar(n),name))
@@ -747,9 +727,7 @@ function characters.setactivecatcodes(cct)
tex.catcodetable = saved
end
---[[ldx--
-<p>Setting the lccodes is also done in a loop over the data table.</p>
---ldx]]--
+-- -- Setting the lccodes is also done in a loop over the data table.
-- function characters.setcodes() -- we could loop over csletters
-- if trace_defining then
diff --git a/tex/context/base/mkiv/char-utf.lua b/tex/context/base/mkiv/char-utf.lua
index e230370b5..f9cba36ca 100644
--- a/tex/context/base/mkiv/char-utf.lua
+++ b/tex/context/base/mkiv/char-utf.lua
@@ -6,21 +6,19 @@ if not modules then modules = { } end modules ['char-utf'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>When a sequence of <l n='utf'/> characters enters the application, it may be
-neccessary to collapse subsequences into their composed variant.</p>
-
-<p>This module implements methods for collapsing and expanding <l n='utf'/>
-sequences. We also provide means to deal with characters that are special to
-<l n='tex'/> as well as 8-bit characters that need to end up in special kinds
-of output (for instance <l n='pdf'/>).</p>
-
-<p>We implement these manipulations as filters. One can run multiple filters
-over a string.</p>
-
-<p>The old code has now been moved to char-obs.lua which we keep around for
-educational purposes.</p>
---ldx]]--
+-- When a sequence of UTF characters enters the application, it may be
+-- neccessary to collapse subsequences into their composed variant.
+--
+-- This module implements methods for collapsing and expanding UTF sequences. We
+-- also provide means to deal with characters that are special to TeX as well as
+-- 8-bit characters that need to end up in special kinds of output (for instance
+-- PDF).
+--
+-- We implement these manipulations as filters. One can run multiple filters over a
+-- string.
+--
+-- The old code has now been moved to char-obs.lua which we keep around for
+-- educational purposes.
local next, type = next, type
local gsub, find = string.gsub, string.find
@@ -55,10 +53,8 @@ characters.filters.utf = utffilters
local data = characters.data
---[[ldx--
-<p>It only makes sense to collapse at runtime, since we don't expect source code
-to depend on collapsing.</p>
---ldx]]--
+-- It only makes sense to collapse at runtime, since we don't expect source code to
+-- depend on collapsing.
-- for the moment, will be entries in char-def.lua .. this is just a subset that for
-- typographic (font) reasons we want to have split ... if we decompose all, we get
diff --git a/tex/context/base/mkiv/chem-ini.lua b/tex/context/base/mkiv/chem-ini.lua
index f7d10ffa2..06049807a 100644
--- a/tex/context/base/mkiv/chem-ini.lua
+++ b/tex/context/base/mkiv/chem-ini.lua
@@ -19,11 +19,9 @@ local cpatterns = patterns.context
chemistry = chemistry or { }
local chemistry = chemistry
---[[
-<p>The next code started out as adaptation of code from Wolfgang Schuster as
-posted on the mailing list. The current version supports nested braces and
-unbraced integers as scripts.</p>
-]]--
+-- The next code started out as adaptation of code from Wolfgang Schuster as posted
+-- on the mailing list. The current version supports nested braces and unbraced
+-- integers as scripts.
local moleculeparser = cpatterns.scripted
chemistry.moleculeparser = moleculeparser
diff --git a/tex/context/base/mkiv/cont-new.mkiv b/tex/context/base/mkiv/cont-new.mkiv
index 684cf24c8..f0fd15f3b 100644
--- a/tex/context/base/mkiv/cont-new.mkiv
+++ b/tex/context/base/mkiv/cont-new.mkiv
@@ -13,7 +13,7 @@
% \normalend % uncomment this to get the real base runtime
-\newcontextversion{2023.03.20 15:42}
+\newcontextversion{2023.04.01 09:28}
%D This file is loaded at runtime, thereby providing an excellent place for hacks,
%D patches, extensions and new features. There can be local overloads in cont-loc
diff --git a/tex/context/base/mkiv/context.mkiv b/tex/context/base/mkiv/context.mkiv
index 9b89b9bdf..c2735fa5e 100644
--- a/tex/context/base/mkiv/context.mkiv
+++ b/tex/context/base/mkiv/context.mkiv
@@ -49,7 +49,7 @@
%D {YYYY.MM.DD HH:MM} format.
\edef\contextformat {\jobname}
-\edef\contextversion{2023.03.20 15:42}
+\edef\contextversion{2023.04.01 09:28}
%D Kind of special:
diff --git a/tex/context/base/mkiv/core-con.lua b/tex/context/base/mkiv/core-con.lua
index f57eb6ef8..d3e108a7a 100644
--- a/tex/context/base/mkiv/core-con.lua
+++ b/tex/context/base/mkiv/core-con.lua
@@ -8,13 +8,9 @@ if not modules then modules = { } end modules ['core-con'] = {
-- todo: split into lang-con.lua and core-con.lua
---[[ldx--
-<p>This module implements a bunch of conversions. Some are more
-efficient than their <l n='tex'/> counterpart, some are even
-slower but look nicer this way.</p>
-
-<p>Some code may move to a module in the language namespace.</p>
---ldx]]--
+-- This module implements a bunch of conversions. Some are more efficient than their
+-- TeX counterpart, some are even slower but look nicer this way. Some code may move
+-- to a module in the language namespace.
local floor = math.floor
local osdate, ostime, ostimezone = os.date, os.time, os.timezone
diff --git a/tex/context/base/mkiv/core-dat.lua b/tex/context/base/mkiv/core-dat.lua
index b58a801d9..89521b185 100644
--- a/tex/context/base/mkiv/core-dat.lua
+++ b/tex/context/base/mkiv/core-dat.lua
@@ -6,10 +6,8 @@ if not modules then modules = { } end modules ['core-dat'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>This module provides a (multipass) container for arbitrary data. It
-replaces the twopass data mechanism.</p>
---ldx]]--
+-- This module provides a (multipass) container for arbitrary data. It replaces the
+-- twopass data mechanism.
local tonumber, tostring, type = tonumber, tostring, type
@@ -231,9 +229,7 @@ implement {
actions = datasetvariablefromjob
}
---[[ldx--
-<p>We also provide an efficient variant for page states.</p>
---ldx]]--
+-- We also provide an efficient variant for page states.
local collected = allocate()
local tobesaved = allocate()
@@ -250,13 +246,9 @@ local function initializer()
tobesaved = pagestates.tobesaved
end
-job.register('job.pagestates.collected', tobesaved, initializer, nil)
+job.register("job.pagestates.collected", tobesaved, initializer, nil)
-table.setmetatableindex(tobesaved, function(t,k)
- local v = { }
- t[k] = v
- return v
-end)
+table.setmetatableindex(tobesaved, "table")
local function setstate(settings)
local name = settings.name
diff --git a/tex/context/base/mkiv/core-two.lua b/tex/context/base/mkiv/core-two.lua
index 3ab2112b9..da37a6170 100644
--- a/tex/context/base/mkiv/core-two.lua
+++ b/tex/context/base/mkiv/core-two.lua
@@ -6,15 +6,14 @@ if not modules then modules = { } end modules ['core-two'] = {
license = "see context related readme files"
}
+-- This is actually one of the oldest MkIV files and basically a port of MkII but
+-- the old usage has long be phased out. Also, the public part is now handled by
+-- datasets which makes this a more private store.
+
local next = next
local remove, concat = table.remove, table.concat
local allocate = utilities.storage.allocate
---[[ldx--
-<p>We save multi-pass information in the main utility table. This is a
-bit of a mess because we support old and new methods.</p>
---ldx]]--
-
local collected = allocate()
local tobesaved = allocate()
diff --git a/tex/context/base/mkiv/core-uti.lua b/tex/context/base/mkiv/core-uti.lua
index 887ef9a75..e8a28c187 100644
--- a/tex/context/base/mkiv/core-uti.lua
+++ b/tex/context/base/mkiv/core-uti.lua
@@ -6,16 +6,13 @@ if not modules then modules = { } end modules ['core-uti'] = {
license = "see context related readme files"
}
--- todo: keep track of changes here (hm, track access, and only true when
--- accessed and changed)
-
---[[ldx--
-<p>A utility file has always been part of <l n='context'/> and with
-the move to <l n='luatex'/> we also moved a lot of multi-pass info
-to a <l n='lua'/> table. Instead of loading a <l n='tex'/> based
-utility file under different setups, we now load a table once. This
-saves much runtime but at the cost of more memory usage.</p>
---ldx]]--
+-- A utility file has always been part of ConTeXt and with the move to LuaTeX we
+-- also moved a lot of multi-pass info to a Lua table. Instead of loading a TeX
+-- based utility file under different setups, we now load a table once. This saves
+-- much runtime but at the cost of more memory usage.
+--
+-- In the meantime the overhead is a bit more due to the amount of data being saved
+-- and more agressive compacting.
local math = math
local format, match = string.format, string.match
@@ -46,14 +43,9 @@ local job = job
job.version = 1.32
job.packversion = 1.02
--- some day we will implement loading of other jobs and then we need
--- job.jobs
-
---[[ldx--
-<p>Variables are saved using in the previously defined table and passed
-onto <l n='tex'/> using the following method. Of course one can also
-directly access the variable using a <l n='lua'/> call.</p>
---ldx]]--
+-- Variables are saved using in the previously defined table and passed onto TeX
+-- using the following method. Of course one can also directly access the variable
+-- using a Lua call.
local savelist, comment = { }, { }
diff --git a/tex/context/base/mkiv/data-con.lua b/tex/context/base/mkiv/data-con.lua
index 51e0ce856..d7d3c7d46 100644
--- a/tex/context/base/mkiv/data-con.lua
+++ b/tex/context/base/mkiv/data-con.lua
@@ -13,19 +13,17 @@ local trace_cache = false trackers.register("resolvers.cache", functi
local trace_containers = false trackers.register("resolvers.containers", function(v) trace_containers = v end)
local trace_storage = false trackers.register("resolvers.storage", function(v) trace_storage = v end)
---[[ldx--
-<p>Once we found ourselves defining similar cache constructs several times,
-containers were introduced. Containers are used to collect tables in memory and
-reuse them when possible based on (unique) hashes (to be provided by the calling
-function).</p>
-
-<p>Caching to disk is disabled by default. Version numbers are stored in the
-saved table which makes it possible to change the table structures without
-bothering about the disk cache.</p>
-
-<p>Examples of usage can be found in the font related code. This code is not
-ideal but we need it in generic too so we compromise.</p>
---ldx]]--
+-- Once we found ourselves defining similar cache constructs several times,
+-- containers were introduced. Containers are used to collect tables in memory and
+-- reuse them when possible based on (unique) hashes (to be provided by the calling
+-- function).
+--
+-- Caching to disk is disabled by default. Version numbers are stored in the saved
+-- table which makes it possible to change the table structures without bothering
+-- about the disk cache.
+--
+-- Examples of usage can be found in the font related code. This code is not ideal
+-- but we need it in generic too so we compromise.
containers = containers or { }
local containers = containers
diff --git a/tex/context/base/mkiv/data-res.lua b/tex/context/base/mkiv/data-res.lua
index 8afc09b97..11e67f785 100644
--- a/tex/context/base/mkiv/data-res.lua
+++ b/tex/context/base/mkiv/data-res.lua
@@ -135,16 +135,35 @@ local criticalvars = {
-- we also report weird ones, with weird being: (1) duplicate /texmf or (2) no /web2c in
-- the names.
+-- if environment.default_texmfcnf then
+-- resolvers.luacnfspec = "home:texmf/web2c;" .. environment.default_texmfcnf -- texlive + home: for taco etc
+-- else
+-- resolvers.luacnfspec = concat ( {
+-- "home:texmf/web2c",
+-- "selfautoparent:/texmf-local/web2c",
+-- "selfautoparent:/texmf-context/web2c",
+-- "selfautoparent:/texmf-dist/web2c",
+-- "selfautoparent:/texmf/web2c",
+-- }, ";")
+-- end
+
if environment.default_texmfcnf then
+ -- this will go away (but then also no more checking in mtxrun.lua itself)
resolvers.luacnfspec = "home:texmf/web2c;" .. environment.default_texmfcnf -- texlive + home: for taco etc
else
- resolvers.luacnfspec = concat ( {
- "home:texmf/web2c",
- "selfautoparent:/texmf-local/web2c",
- "selfautoparent:/texmf-context/web2c",
- "selfautoparent:/texmf-dist/web2c",
- "selfautoparent:/texmf/web2c",
- }, ";")
+ local texroot = environment.texroot
+ resolvers.luacnfspec = "home:texmf/web2c;selfautoparent:/texmf-local/web2c;selfautoparent:/texmf-context/web2c;selfautoparent:/texmf/web2c"
+ if texroot and isdir(texroot .. "/texmf-context") then
+ -- we're okay and run the lean and mean installation
+ elseif texroot and isdir(texroot .. "/texmf-dist") then
+ -- we're in texlive where texmf-dist is leading
+ resolvers.luacnfspec = "home:texmf/web2c;selfautoparent:/texmf-local/web2c;selfautoparent:/texmf-dist/web2c;selfautoparent:/texmf/web2c"
+ elseif ostype ~= "windows" and isdir("/etc/texmf/web2c") then
+ -- we have some linux distribution that does it its own way
+ resolvers.luacnfspec = "home:texmf/web2c;/etc/texmf/web2c;selfautodir:/share/texmf/web2c"
+ else
+ -- we stick to the reference specification
+ end
end
local unset_variable = "unset"
diff --git a/tex/context/base/mkiv/data-tar.lua b/tex/context/base/mkiv/data-tar.lua
index 45de749b6..b2416330f 100644
--- a/tex/context/base/mkiv/data-tar.lua
+++ b/tex/context/base/mkiv/data-tar.lua
@@ -12,14 +12,10 @@ local trace_locating = false trackers.register("resolvers.locating", function(v
local report_tar = logs.reporter("resolvers","tar")
---[[ldx--
-<p>We use a url syntax for accessing the tar file itself and file in it:</p>
-
-<typing>
-tar:///oeps.tar?name=bla/bla.tex
-tar:///oeps.tar?tree=tex/texmf-local
-</typing>
---ldx]]--
+-- We use a url syntax for accessing the tar file itself and file in it:
+--
+-- tar:///oeps.tar?name=bla/bla.tex
+-- tar:///oeps.tar?tree=tex/texmf-local
local resolvers = resolvers
local findfile = resolvers.findfile
diff --git a/tex/context/base/mkiv/data-tmp.lua b/tex/context/base/mkiv/data-tmp.lua
index 1948f1ea5..21e0d1f4f 100644
--- a/tex/context/base/mkiv/data-tmp.lua
+++ b/tex/context/base/mkiv/data-tmp.lua
@@ -6,20 +6,15 @@ if not modules then modules = { } end modules ['data-tmp'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>This module deals with caching data. It sets up the paths and implements
-loaders and savers for tables. Best is to set the following variable. When not
-set, the usual paths will be checked. Personally I prefer the (users) temporary
-path.</p>
-
-</code>
-TEXMFCACHE=$TMP;$TEMP;$TMPDIR;$TEMPDIR;$HOME;$TEXMFVAR;$VARTEXMF;.
-</code>
-
-<p>Currently we do no locking when we write files. This is no real problem
-because most caching involves fonts and the chance of them being written at the
-same time is small. We also need to extend luatools with a recache feature.</p>
---ldx]]--
+-- This module deals with caching data. It sets up the paths and implements loaders
+-- and savers for tables. Best is to set the following variable. When not set, the
+-- usual paths will be checked. Personally I prefer the (users) temporary path.
+--
+-- TEXMFCACHE=$TMP;$TEMP;$TMPDIR;$TEMPDIR;$HOME;$TEXMFVAR;$VARTEXMF;.
+--
+-- Currently we do no locking when we write files. This is no real problem because
+-- most caching involves fonts and the chance of them being written at the same time
+-- is small. We also need to extend luatools with a recache feature.
local next, type = next, type
local pcall, loadfile, collectgarbage = pcall, loadfile, collectgarbage
diff --git a/tex/context/base/mkiv/data-zip.lua b/tex/context/base/mkiv/data-zip.lua
index 1a9310f17..40f38c855 100644
--- a/tex/context/base/mkiv/data-zip.lua
+++ b/tex/context/base/mkiv/data-zip.lua
@@ -14,17 +14,13 @@ local trace_locating = false trackers.register("resolvers.locating", function(v
local report_zip = logs.reporter("resolvers","zip")
---[[ldx--
-<p>We use a url syntax for accessing the zip file itself and file in it:</p>
-
-<typing>
-zip:///oeps.zip?name=bla/bla.tex
-zip:///oeps.zip?tree=tex/texmf-local
-zip:///texmf.zip?tree=/tex/texmf
-zip:///texmf.zip?tree=/tex/texmf-local
-zip:///texmf-mine.zip?tree=/tex/texmf-projects
-</typing>
---ldx]]--
+-- We use a url syntax for accessing the zip file itself and file in it:
+--
+-- zip:///oeps.zip?name=bla/bla.tex
+-- zip:///oeps.zip?tree=tex/texmf-local
+-- zip:///texmf.zip?tree=/tex/texmf
+-- zip:///texmf.zip?tree=/tex/texmf-local
+-- zip:///texmf-mine.zip?tree=/tex/texmf-projects
local resolvers = resolvers
local findfile = resolvers.findfile
diff --git a/tex/context/base/mkiv/file-ini.lua b/tex/context/base/mkiv/file-ini.lua
index 2a0271a9d..01bedeeeb 100644
--- a/tex/context/base/mkiv/file-ini.lua
+++ b/tex/context/base/mkiv/file-ini.lua
@@ -6,11 +6,8 @@ if not modules then modules = { } end modules ['file-ini'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>It's more convenient to manipulate filenames (paths) in <l n='lua'/> than in
-<l n='tex'/>. These methods have counterparts at the <l n='tex'/> end.</p>
---ldx]]--
-
+-- It's more convenient to manipulate filenames (paths) in Lua than in TeX. These
+-- methods have counterparts at the TeX end.
local implement = interfaces.implement
local setmacro = interfaces.setmacro
diff --git a/tex/context/base/mkiv/file-mod.lua b/tex/context/base/mkiv/file-mod.lua
index d392887ec..ac9ad938a 100644
--- a/tex/context/base/mkiv/file-mod.lua
+++ b/tex/context/base/mkiv/file-mod.lua
@@ -6,17 +6,11 @@ if not modules then modules = { } end modules ['file-mod'] = {
license = "see context related readme files"
}
--- This module will be redone! For instance, the prefixes will move to data-*
--- as they arr sort of generic along with home:// etc/.
-
--- context is not defined yet! todo! (we need to load tupp-fil after cld)
--- todo: move startreadingfile to lua and push regime there
-
---[[ldx--
-<p>It's more convenient to manipulate filenames (paths) in
-<l n='lua'/> than in <l n='tex'/>. These methods have counterparts
-at the <l n='tex'/> side.</p>
---ldx]]--
+-- This module will be redone! For instance, the prefixes will move to data-* as
+-- they are sort of generic along with home:// etc/.
+--
+-- It is more convenient to manipulate filenames (paths) in Lua than in TeX. The
+-- methods below have counterparts at the TeX end.
local format, find, concat, tonumber = string.format, string.find, table.concat, tonumber
local sortedhash = table.sortedhash
diff --git a/tex/context/base/mkiv/font-afk.lua b/tex/context/base/mkiv/font-afk.lua
index 761016d34..250c17e77 100644
--- a/tex/context/base/mkiv/font-afk.lua
+++ b/tex/context/base/mkiv/font-afk.lua
@@ -7,11 +7,9 @@ if not modules then modules = { } end modules ['font-afk'] = {
dataonly = true,
}
---[[ldx--
-<p>For ligatures, only characters with a code smaller than 128 make sense,
-anything larger is encoding dependent. An interesting complication is that a
-character can be in an encoding twice but is hashed once.</p>
---ldx]]--
+-- For ligatures, only characters with a code smaller than 128 make sense, anything
+-- larger is encoding dependent. An interesting complication is that a character can
+-- be in an encoding twice but is hashed once.
local allocate = utilities.storage.allocate
diff --git a/tex/context/base/mkiv/font-con.lua b/tex/context/base/mkiv/font-con.lua
index 066ea33ed..77708ee08 100644
--- a/tex/context/base/mkiv/font-con.lua
+++ b/tex/context/base/mkiv/font-con.lua
@@ -22,11 +22,9 @@ local trace_scaling = false trackers.register("fonts.scaling", function(v)
local report_defining = logs.reporter("fonts","defining")
--- watch out: no negative depths and negative eights permitted in regular fonts
-
---[[ldx--
-<p>Here we only implement a few helper functions.</p>
---ldx]]--
+-- Watch out: no negative depths and negative heights are permitted in regular
+-- fonts. Also, the code in LMTX is a bit different. Here we only implement a
+-- few helper functions.
local fonts = fonts
local constructors = fonts.constructors or { }
@@ -59,11 +57,9 @@ constructors.designsizes = designsizes
local loadedfonts = allocate()
constructors.loadedfonts = loadedfonts
---[[ldx--
-<p>We need to normalize the scale factor (in scaled points). This has to
-do with the fact that <l n='tex'/> uses a negative multiple of 1000 as
-a signal for a font scaled based on the design size.</p>
---ldx]]--
+-- We need to normalize the scale factor (in scaled points). This has to do with the
+-- fact that TeX uses a negative multiple of 1000 as a signal for a font scaled
+-- based on the design size.
local factors = {
pt = 65536.0,
@@ -118,22 +114,18 @@ function constructors.getmathparameter(tfmdata,name)
end
end
---[[ldx--
-<p>Beware, the boundingbox is passed as reference so we may not overwrite it
-in the process; numbers are of course copies. Here 65536 equals 1pt. (Due to
-excessive memory usage in CJK fonts, we no longer pass the boundingbox.)</p>
---ldx]]--
-
--- The scaler is only used for otf and afm and virtual fonts. If a virtual font has italic
--- correction make sure to set the hasitalics flag. Some more flags will be added in the
--- future.
-
---[[ldx--
-<p>The reason why the scaler was originally split, is that for a while we experimented
-with a helper function. However, in practice the <l n='api'/> calls are too slow to
-make this profitable and the <l n='lua'/> based variant was just faster. A days
-wasted day but an experience richer.</p>
---ldx]]--
+-- Beware, the boundingbox is passed as reference so we may not overwrite it in the
+-- process; numbers are of course copies. Here 65536 equals 1pt. (Due to excessive
+-- memory usage in CJK fonts, we no longer pass the boundingbox.)
+--
+-- The scaler is only used for OTF and AFM and virtual fonts. If a virtual font has
+-- italic correction make sure to set the hasitalics flag. Some more flags will be
+-- added in the future.
+--
+-- The reason why the scaler was originally split, is that for a while we
+-- experimented with a helper function. However, in practice the API calls are too
+-- slow to make this profitable and the Lua based variant was just faster. A days
+-- wasted day but an experience richer.
function constructors.cleanuptable(tfmdata)
-- This no longer makes sense because the addition of font.getcopy and its
@@ -1093,9 +1085,7 @@ function constructors.finalize(tfmdata)
return tfmdata
end
---[[ldx--
-<p>A unique hash value is generated by:</p>
---ldx]]--
+-- A unique hash value is generated by:
local hashmethods = { }
constructors.hashmethods = hashmethods
@@ -1154,13 +1144,11 @@ hashmethods.normal = function(list)
end
end
---[[ldx--
-<p>In principle we can share tfm tables when we are in need for a font, but then
-we need to define a font switch as an id/attr switch which is no fun, so in that
-case users can best use dynamic features ... so, we will not use that speedup. Okay,
-when we get rid of base mode we can optimize even further by sharing, but then we
-loose our testcases for <l n='luatex'/>.</p>
---ldx]]--
+-- In principle we can share tfm tables when we are in need for a font, but then we
+-- need to define a font switch as an id/attr switch which is no fun, so in that
+-- case users can best use dynamic features ... so, we will not use that speedup.
+-- Okay, when we get rid of base mode we can optimize even further by sharing, but
+-- then we loose our testcases for LuaTeX.
function constructors.hashinstance(specification,force)
local hash = specification.hash
@@ -1516,10 +1504,7 @@ do
end
---[[ldx--
-<p>We need to check for default features. For this we provide
-a helper function.</p>
---ldx]]--
+-- We need to check for default features. For this we provide a helper function.
function constructors.checkedfeatures(what,features)
local defaults = handlers[what].features.defaults
diff --git a/tex/context/base/mkiv/font-ctx.lua b/tex/context/base/mkiv/font-ctx.lua
index 2c56b5613..f9ad475ac 100644
--- a/tex/context/base/mkiv/font-ctx.lua
+++ b/tex/context/base/mkiv/font-ctx.lua
@@ -528,26 +528,19 @@ do
end
---[[ldx--
-<p>So far we haven't really dealt with features (or whatever we want
-to pass along with the font definition. We distinguish the following
-situations:</p>
-situations:</p>
-
-<code>
-name:xetex like specs
-name@virtual font spec
-name*context specification
-</code>
---ldx]]--
-
--- currently fonts are scaled while constructing the font, so we
--- have to do scaling of commands in the vf at that point using e.g.
--- "local scale = g.parameters.factor or 1" after all, we need to
--- work with copies anyway and scaling needs to be done at some point;
--- however, when virtual tricks are used as feature (makes more
--- sense) we scale the commands in fonts.constructors.scale (and set the
--- factor there)
+-- So far we haven't really dealt with features (or whatever we want to pass along
+-- with the font definition. We distinguish the following situations:
+--
+-- name:xetex like specs
+-- name@virtual font spec
+-- name*context specification
+--
+-- Currently fonts are scaled while constructing the font, so we have to do scaling
+-- of commands in the vf at that point using e.g. "local scale = g.parameters.factor
+-- or 1" after all, we need to work with copies anyway and scaling needs to be done
+-- at some point; however, when virtual tricks are used as feature (makes more
+-- sense) we scale the commands in fonts.constructors.scale (and set the factor
+-- there).
local loadfont = definers.loadfont
@@ -2385,10 +2378,8 @@ dimenfactors.em = nil
dimenfactors["%"] = nil
dimenfactors.pct = nil
---[[ldx--
-<p>Before a font is passed to <l n='tex'/> we scale it. Here we also need
-to scale virtual characters.</p>
---ldx]]--
+-- Before a font is passed to TeX we scale it. Here we also need to scale virtual
+-- characters.
do
diff --git a/tex/context/base/mkiv/font-def.lua b/tex/context/base/mkiv/font-def.lua
index b752b3258..01bced6d1 100644
--- a/tex/context/base/mkiv/font-def.lua
+++ b/tex/context/base/mkiv/font-def.lua
@@ -24,10 +24,9 @@ trackers.register("fonts.loading", "fonts.defining", "otf.loading", "afm.loading
local report_defining = logs.reporter("fonts","defining")
---[[ldx--
-<p>Here we deal with defining fonts. We do so by intercepting the
-default loader that only handles <l n='tfm'/>.</p>
---ldx]]--
+-- Here we deal with defining fonts. We do so by intercepting the default loader
+-- that only handles TFM files. Although, we started out that way but in the
+-- meantime we can hardly speak of TFM any more.
local fonts = fonts
local fontdata = fonts.hashes.identifiers
@@ -53,25 +52,18 @@ local designsizes = constructors.designsizes
local resolvefile = fontgoodies and fontgoodies.filenames and fontgoodies.filenames.resolve or function(s) return s end
---[[ldx--
-<p>We hardly gain anything when we cache the final (pre scaled)
-<l n='tfm'/> table. But it can be handy for debugging, so we no
-longer carry this code along. Also, we now have quite some reference
-to other tables so we would end up with lots of catches.</p>
---ldx]]--
-
---[[ldx--
-<p>We can prefix a font specification by <type>name:</type> or
-<type>file:</type>. The first case will result in a lookup in the
-synonym table.</p>
-
-<typing>
-[ name: | file: ] identifier [ separator [ specification ] ]
-</typing>
-
-<p>The following function split the font specification into components
-and prepares a table that will move along as we proceed.</p>
---ldx]]--
+-- We hardly gain anything when we cache the final (pre scaled) TFM table. But it
+-- can be handy for debugging, so we no longer carry this code along. Also, we now
+-- have quite some reference to other tables so we would end up with lots of
+-- catches.
+--
+-- We can prefix a font specification by "name:" or "file:". The first case will
+-- result in a lookup in the synonym table.
+--
+-- [ name: | file: ] identifier [ separator [ specification ] ]
+--
+-- The following function split the font specification into components and prepares
+-- a table that will move along as we proceed.
-- beware, we discard additional specs
--
@@ -164,9 +156,7 @@ if context then
end
---[[ldx--
-<p>We can resolve the filename using the next function:</p>
---ldx]]--
+-- We can resolve the filename using the next function:
definers.resolvers = definers.resolvers or { }
local resolvers = definers.resolvers
@@ -258,23 +248,17 @@ function definers.resolve(specification)
return specification
end
---[[ldx--
-<p>The main read function either uses a forced reader (as determined by
-a lookup) or tries to resolve the name using the list of readers.</p>
-
-<p>We need to cache when possible. We do cache raw tfm data (from <l
-n='tfm'/>, <l n='afm'/> or <l n='otf'/>). After that we can cache based
-on specificstion (name) and size, that is, <l n='tex'/> only needs a number
-for an already loaded fonts. However, it may make sense to cache fonts
-before they're scaled as well (store <l n='tfm'/>'s with applied methods
-and features). However, there may be a relation between the size and
-features (esp in virtual fonts) so let's not do that now.</p>
-
-<p>Watch out, here we do load a font, but we don't prepare the
-specification yet.</p>
---ldx]]--
-
--- very experimental:
+-- The main read function either uses a forced reader (as determined by a lookup) or
+-- tries to resolve the name using the list of readers.
+--
+-- We need to cache when possible. We do cache raw tfm data (from TFM, AFM or OTF).
+-- After that we can cache based on specificstion (name) and size, that is, TeX only
+-- needs a number for an already loaded fonts. However, it may make sense to cache
+-- fonts before they're scaled as well (store TFM's with applied methods and
+-- features). However, there may be a relation between the size and features (esp in
+-- virtual fonts) so let's not do that now.
+--
+-- Watch out, here we do load a font, but we don't prepare the specification yet.
function definers.applypostprocessors(tfmdata)
local postprocessors = tfmdata.postprocessors
@@ -439,17 +423,13 @@ function constructors.readanddefine(name,size) -- no id -- maybe a dummy first
return fontdata[id], id
end
---[[ldx--
-<p>So far the specifiers. Now comes the real definer. Here we cache
-based on id's. Here we also intercept the virtual font handler. Since
-it evolved stepwise I may rewrite this bit (combine code).</p>
-
-In the previously defined reader (the one resulting in a <l n='tfm'/>
-table) we cached the (scaled) instances. Here we cache them again, but
-this time based on id. We could combine this in one cache but this does
-not gain much. By the way, passing id's back to in the callback was
-introduced later in the development.</p>
---ldx]]--
+-- So far the specifiers. Now comes the real definer. Here we cache based on id's.
+-- Here we also intercept the virtual font handler.
+--
+-- In the previously defined reader (the one resulting in a TFM table) we cached the
+-- (scaled) instances. Here we cache them again, but this time based on id. We could
+-- combine this in one cache but this does not gain much. By the way, passing id's
+-- back to in the callback was introduced later in the development.
function definers.registered(hash)
local id = internalized[hash]
@@ -522,9 +502,7 @@ function font.getfont(id)
return fontdata[id] -- otherwise issues
end
---[[ldx--
-<p>We overload the <l n='tfm'/> reader.</p>
---ldx]]--
+-- We overload the <l n='tfm'/> reader.
if not context then
callbacks.register('define_font', definers.read, "definition of fonts (tfmdata preparation)")
diff --git a/tex/context/base/mkiv/font-enc.lua b/tex/context/base/mkiv/font-enc.lua
index f2f0595dd..732bc8907 100644
--- a/tex/context/base/mkiv/font-enc.lua
+++ b/tex/context/base/mkiv/font-enc.lua
@@ -16,10 +16,8 @@ local setmetatableindex = table.setmetatableindex
local allocate = utilities.storage.allocate
local mark = utilities.storage.mark
---[[ldx--
-<p>Because encodings are going to disappear, we don't bother defining
-them in tables. But we may do so some day, for consistency.</p>
---ldx]]--
+-- Because encodings are going to disappear, we don't bother defining them in
+-- tables. But we may do so some day, for consistency.
local report_encoding = logs.reporter("fonts","encoding")
@@ -43,24 +41,19 @@ function encodings.is_known(encoding)
return containers.is_valid(encodings.cache,encoding)
end
---[[ldx--
-<p>An encoding file looks like this:</p>
-
-<typing>
-/TeXnANSIEncoding [
-/.notdef
-/Euro
-...
-/ydieresis
-] def
-</typing>
-
-<p>Beware! The generic encoding files don't always apply to the ones that
-ship with fonts. This has to do with the fact that names follow (slightly)
-different standards. However, the fonts where this applies to (for instance
-Latin Modern or <l n='tex'> Gyre) come in OpenType variants too, so these
-will be used.</p>
---ldx]]--
+-- An encoding file looks like this:
+--
+-- /TeXnANSIEncoding [
+-- /.notdef
+-- /Euro
+-- ...
+-- /ydieresis
+-- ] def
+--
+-- Beware! The generic encoding files don't always apply to the ones that ship with
+-- fonts. This has to do with the fact that names follow (slightly) different
+-- standards. However, the fonts where this applies to (for instance Latin Modern or
+-- TeXGyre come in OpenType variants too, so these will be used.
local enccodes = characters.enccodes or { }
@@ -120,10 +113,7 @@ function encodings.load(filename)
return containers.write(encodings.cache, name, data)
end
---[[ldx--
-<p>There is no unicode encoding but for practical purposes we define
-one.</p>
---ldx]]--
+-- There is no unicode encoding but for practical purposes we define one.
-- maybe make this a function:
diff --git a/tex/context/base/mkiv/font-fbk.lua b/tex/context/base/mkiv/font-fbk.lua
index b6c9a430d..da04b50a8 100644
--- a/tex/context/base/mkiv/font-fbk.lua
+++ b/tex/context/base/mkiv/font-fbk.lua
@@ -10,10 +10,6 @@ local cos, tan, rad, format = math.cos, math.tan, math.rad, string.format
local utfbyte, utfchar = utf.byte, utf.char
local next = next
---[[ldx--
-<p>This is very experimental code!</p>
---ldx]]--
-
local trace_visualize = false trackers.register("fonts.composing.visualize", function(v) trace_visualize = v end)
local trace_define = false trackers.register("fonts.composing.define", function(v) trace_define = v end)
diff --git a/tex/context/base/mkiv/font-imp-tex.lua b/tex/context/base/mkiv/font-imp-tex.lua
index b4b9a7b69..87a1ae3aa 100644
--- a/tex/context/base/mkiv/font-imp-tex.lua
+++ b/tex/context/base/mkiv/font-imp-tex.lua
@@ -13,36 +13,31 @@ local otf = fonts.handlers.otf
local registerotffeature = otf.features.register
local addotffeature = otf.addfeature
--- tlig (we need numbers for some fonts so ...)
+-- We provide a few old and obsolete compatibility input features. We need numbers
+-- for some fonts so no names here. Do we also need them for afm fonts?
-local specification = {
+local tlig = {
type = "ligature",
order = { "tlig" },
prepend = true,
data = {
- -- endash = "hyphen hyphen",
- -- emdash = "hyphen hyphen hyphen",
- [0x2013] = { 0x002D, 0x002D },
- [0x2014] = { 0x002D, 0x002D, 0x002D },
- -- quotedblleft = "quoteleft quoteleft",
- -- quotedblright = "quoteright quoteright",
- -- quotedblleft = "grave grave",
- -- quotedblright = "quotesingle quotesingle",
- -- quotedblbase = "comma comma",
+ [0x2013] = { 0x002D, 0x002D },
+ [0x2014] = { 0x002D, 0x002D, 0x002D },
},
}
-addotffeature("tlig",specification)
-
-registerotffeature {
- -- this makes it a known feature (in tables)
- name = "tlig",
- description = "tex ligatures",
+local tquo = {
+ type = "ligature",
+ order = { "tquo" },
+ prepend = true,
+ data = {
+ [0x201C] = { 0x0060, 0x0060 },
+ [0x201D] = { 0x0027, 0x0027 },
+ [0x201E] = { 0x002C, 0x002C },
+ },
}
--- trep
-
-local specification = {
+local trep = {
type = "substitution",
order = { "trep" },
prepend = true,
@@ -53,13 +48,13 @@ local specification = {
},
}
-addotffeature("trep",specification)
+addotffeature("trep",trep) -- last
+addotffeature("tlig",tlig)
+addotffeature("tquo",tquo) -- first
-registerotffeature {
- -- this makes it a known feature (in tables)
- name = "trep",
- description = "tex replacements",
-}
+registerotffeature { name = "tlig", description = "tex ligatures" }
+registerotffeature { name = "tquo", description = "tex quotes" }
+registerotffeature { name = "trep", description = "tex replacements" }
-- some day this will be moved to font-imp-scripts.lua
diff --git a/tex/context/base/mkiv/font-ini.lua b/tex/context/base/mkiv/font-ini.lua
index 8bab6d902..201cc69f4 100644
--- a/tex/context/base/mkiv/font-ini.lua
+++ b/tex/context/base/mkiv/font-ini.lua
@@ -6,9 +6,7 @@ if not modules then modules = { } end modules ['font-ini'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>Not much is happening here.</p>
---ldx]]--
+-- Not much is happening here.
local allocate = utilities.storage.allocate
local sortedhash = table.sortedhash
diff --git a/tex/context/base/mkiv/font-log.lua b/tex/context/base/mkiv/font-log.lua
index 092b5a62e..96b5864fd 100644
--- a/tex/context/base/mkiv/font-log.lua
+++ b/tex/context/base/mkiv/font-log.lua
@@ -19,12 +19,9 @@ fonts.loggers = loggers
local usedfonts = utilities.storage.allocate()
----- loadedfonts = utilities.storage.allocate()
---[[ldx--
-<p>The following functions are used for reporting about the fonts
-used. The message itself is not that useful in regular runs but since
-we now have several readers it may be handy to know what reader is
-used for which font.</p>
---ldx]]--
+-- The following functions are used for reporting about the fonts used. The message
+-- itself is not that useful in regular runs but since we now have several readers
+-- it may be handy to know what reader is used for which font.
function loggers.onetimemessage(font,char,message,reporter)
local tfmdata = fonts.hashes.identifiers[font]
diff --git a/tex/context/base/mkiv/font-nod.lua b/tex/context/base/mkiv/font-nod.lua
index a7dcfd9b0..1e39784d9 100644
--- a/tex/context/base/mkiv/font-nod.lua
+++ b/tex/context/base/mkiv/font-nod.lua
@@ -7,11 +7,6 @@ if not modules then modules = { } end modules ['font-nod'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>This is rather experimental. We need more control and some of this
-might become a runtime module instead. This module will be cleaned up!</p>
---ldx]]--
-
local utfchar = utf.char
local concat, fastcopy = table.concat, table.fastcopy
local match, rep = string.match, string.rep
diff --git a/tex/context/base/mkiv/font-one.lua b/tex/context/base/mkiv/font-one.lua
index 829f52ea0..25efc2a04 100644
--- a/tex/context/base/mkiv/font-one.lua
+++ b/tex/context/base/mkiv/font-one.lua
@@ -7,18 +7,16 @@ if not modules then modules = { } end modules ['font-one'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>Some code may look a bit obscure but this has to do with the fact that we also use
-this code for testing and much code evolved in the transition from <l n='tfm'/> to
-<l n='afm'/> to <l n='otf'/>.</p>
-
-<p>The following code still has traces of intermediate font support where we handles
-font encodings. Eventually font encoding went away but we kept some code around in
-other modules.</p>
-
-<p>This version implements a node mode approach so that users can also more easily
-add features.</p>
---ldx]]--
+-- Some code may look a bit obscure but this has to do with the fact that we also
+-- use this code for testing and much code evolved in the transition from TFM to AFM
+-- to OTF.
+--
+-- The following code still has traces of intermediate font support where we handles
+-- font encodings. Eventually font encoding went away but we kept some code around
+-- in other modules.
+--
+-- This version implements a node mode approach so that users can also more easily
+-- add features.
local fonts, logs, trackers, containers, resolvers = fonts, logs, trackers, containers, resolvers
@@ -71,15 +69,13 @@ local overloads = fonts.mappings.overloads
local applyruntimefixes = fonts.treatments and fonts.treatments.applyfixes
---[[ldx--
-<p>We cache files. Caching is taken care of in the loader. We cheat a bit by adding
-ligatures and kern information to the afm derived data. That way we can set them faster
-when defining a font.</p>
-
-<p>We still keep the loading two phased: first we load the data in a traditional
-fashion and later we transform it to sequences. Then we apply some methods also
-used in opentype fonts (like <t>tlig</t>).</p>
---ldx]]--
+-- We cache files. Caching is taken care of in the loader. We cheat a bit by adding
+-- ligatures and kern information to the afm derived data. That way we can set them
+-- faster when defining a font.
+--
+-- We still keep the loading two phased: first we load the data in a traditional
+-- fashion and later we transform it to sequences. Then we apply some methods also
+-- used in opentype fonts (like tlig).
function afm.load(filename)
filename = resolvers.findfile(filename,'afm') or ""
@@ -312,10 +308,8 @@ local function enhance_fix_names(data)
end
end
---[[ldx--
-<p>These helpers extend the basic table with extra ligatures, texligatures
-and extra kerns. This saves quite some lookups later.</p>
---ldx]]--
+-- These helpers extend the basic table with extra ligatures, texligatures and extra
+-- kerns. This saves quite some lookups later.
local addthem = function(rawdata,ligatures)
if ligatures then
@@ -349,17 +343,14 @@ local function enhance_add_ligatures(rawdata)
addthem(rawdata,afm.helpdata.ligatures)
end
---[[ldx--
-<p>We keep the extra kerns in separate kerning tables so that we can use
-them selectively.</p>
---ldx]]--
-
--- This is rather old code (from the beginning when we had only tfm). If
--- we unify the afm data (now we have names all over the place) then
--- we can use shcodes but there will be many more looping then. But we
--- could get rid of the tables in char-cmp then. Als, in the generic version
--- we don't use the character database. (Ok, we can have a context specific
--- variant).
+-- We keep the extra kerns in separate kerning tables so that we can use them
+-- selectively.
+--
+-- This is rather old code (from the beginning when we had only tfm). If we unify
+-- the afm data (now we have names all over the place) then we can use shcodes but
+-- there will be many more looping then. But we could get rid of the tables in
+-- char-cmp then. Als, in the generic version we don't use the character database.
+-- (Ok, we can have a context specific variant).
local function enhance_add_extra_kerns(rawdata) -- using shcodes is not robust here
local descriptions = rawdata.descriptions
@@ -440,9 +431,7 @@ local function enhance_add_extra_kerns(rawdata) -- using shcodes is not robust h
do_it_copy(afm.helpdata.rightkerned)
end
---[[ldx--
-<p>The copying routine looks messy (and is indeed a bit messy).</p>
---ldx]]--
+-- The copying routine looks messy (and is indeed a bit messy).
local function adddimensions(data) -- we need to normalize afm to otf i.e. indexed table instead of name
if data then
@@ -619,11 +608,9 @@ end
return nil
end
---[[ldx--
-<p>Originally we had features kind of hard coded for <l n='afm'/> files but since I
-expect to support more font formats, I decided to treat this fontformat like any
-other and handle features in a more configurable way.</p>
---ldx]]--
+-- Originally we had features kind of hard coded for AFM files but since I expect to
+-- support more font formats, I decided to treat this fontformat like any other and
+-- handle features in a more configurable way.
function afm.setfeatures(tfmdata,features)
local okay = constructors.initializefeatures("afm",tfmdata,features,trace_features,report_afm)
@@ -715,13 +702,10 @@ local function afmtotfm(specification)
end
end
---[[ldx--
-<p>As soon as we could intercept the <l n='tfm'/> reader, I implemented an
-<l n='afm'/> reader. Since traditional <l n='pdftex'/> could use <l n='opentype'/>
-fonts with <l n='afm'/> companions, the following method also could handle
-those cases, but now that we can handle <l n='opentype'/> directly we no longer
-need this features.</p>
---ldx]]--
+-- As soon as we could intercept the TFM reader, I implemented an AFM reader. Since
+-- traditional pdfTeX could use OpenType fonts with AFM companions, the following
+-- method also could handle those cases, but now that we can handle OpenType
+-- directly we no longer need this features.
local function read_from_afm(specification)
local tfmdata = afmtotfm(specification)
@@ -736,9 +720,7 @@ local function read_from_afm(specification)
return tfmdata
end
---[[ldx--
-<p>We have the usual two modes and related features initializers and processors.</p>
---ldx]]--
+-- We have the usual two modes and related features initializers and processors.
registerafmfeature {
name = "mode",
diff --git a/tex/context/base/mkiv/font-onr.lua b/tex/context/base/mkiv/font-onr.lua
index 9e5a012bd..6234742a3 100644
--- a/tex/context/base/mkiv/font-onr.lua
+++ b/tex/context/base/mkiv/font-onr.lua
@@ -7,18 +7,16 @@ if not modules then modules = { } end modules ['font-onr'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>Some code may look a bit obscure but this has to do with the fact that we also use
-this code for testing and much code evolved in the transition from <l n='tfm'/> to
-<l n='afm'/> to <l n='otf'/>.</p>
-
-<p>The following code still has traces of intermediate font support where we handles
-font encodings. Eventually font encoding went away but we kept some code around in
-other modules.</p>
-
-<p>This version implements a node mode approach so that users can also more easily
-add features.</p>
---ldx]]--
+-- Some code may look a bit obscure but this has to do with the fact that we also
+-- use this code for testing and much code evolved in the transition from TFM to AFM
+-- to OTF.
+--
+-- The following code still has traces of intermediate font support where we handles
+-- font encodings. Eventually font encoding went away but we kept some code around
+-- in other modules.
+--
+-- This version implements a node mode approach so that users can also more easily
+-- add features.
local fonts, logs, trackers, resolvers = fonts, logs, trackers, resolvers
@@ -44,12 +42,9 @@ afm.readers = readers
afm.version = 1.513 -- incrementing this number one up will force a re-cache
---[[ldx--
-<p>We start with the basic reader which we give a name similar to the built in <l n='tfm'/>
-and <l n='otf'/> reader.</p>
-<p>We use a new (unfinished) pfb loader but I see no differences between the old
-and new vectors (we actually had one bad vector with the old loader).</p>
---ldx]]--
+-- We start with the basic reader which we give a name similar to the built in TFM
+-- and OTF reader. We use a PFB loader but I see no differences between the old and
+-- new vectors (we actually had one bad vector with the old loader).
local get_indexes, get_shapes
@@ -305,11 +300,10 @@ do
end
---[[ldx--
-<p>We start with the basic reader which we give a name similar to the built in <l n='tfm'/>
-and <l n='otf'/> reader. We only need data that is relevant for our use. We don't support
-more complex arrangements like multiple master (obsolete), direction specific kerning, etc.</p>
---ldx]]--
+-- We start with the basic reader which we give a name similar to the built in TFM
+-- and OTF reader. We only need data that is relevant for our use. We don't support
+-- more complex arrangements like multiple master (obsolete), direction specific
+-- kerning, etc.
local spacer = patterns.spacer
local whitespace = patterns.whitespace
diff --git a/tex/context/base/mkiv/font-ota.lua b/tex/context/base/mkiv/font-ota.lua
index a8f9f0047..160d0d0ed 100644
--- a/tex/context/base/mkiv/font-ota.lua
+++ b/tex/context/base/mkiv/font-ota.lua
@@ -54,10 +54,8 @@ local chardata = characters and characters.data
local otffeatures = fonts.constructors.features.otf
local registerotffeature = otffeatures.register
---[[ldx--
-<p>Analyzers run per script and/or language and are needed in order to
-process features right.</p>
---ldx]]--
+-- Analyzers run per script and/or language and are needed in order to process
+-- features right.
local setstate = nuts.setstate
local getstate = nuts.getstate
diff --git a/tex/context/base/mkiv/font-ots.lua b/tex/context/base/mkiv/font-ots.lua
index 6d7c5fb25..48f85c365 100644
--- a/tex/context/base/mkiv/font-ots.lua
+++ b/tex/context/base/mkiv/font-ots.lua
@@ -7,92 +7,90 @@ if not modules then modules = { } end modules ['font-ots'] = { -- sequences
license = "see context related readme files",
}
---[[ldx--
-<p>I need to check the description at the microsoft site ... it has been improved
-so maybe there are some interesting details there. Most below is based on old and
-incomplete documentation and involved quite a bit of guesswork (checking with the
-abstract uniscribe of those days. But changing things is tricky!</p>
-
-<p>This module is a bit more split up that I'd like but since we also want to test
-with plain <l n='tex'/> it has to be so. This module is part of <l n='context'/>
-and discussion about improvements and functionality mostly happens on the
-<l n='context'/> mailing list.</p>
-
-<p>The specification of OpenType is (or at least decades ago was) kind of vague.
-Apart from a lack of a proper free specifications there's also the problem that
-Microsoft and Adobe may have their own interpretation of how and in what order to
-apply features. In general the Microsoft website has more detailed specifications
-and is a better reference. There is also some information in the FontForge help
-files. In the end we rely most on the Microsoft specification.</p>
-
-<p>Because there is so much possible, fonts might contain bugs and/or be made to
-work with certain rederers. These may evolve over time which may have the side
-effect that suddenly fonts behave differently. We don't want to catch all font
-issues.</p>
-
-<p>After a lot of experiments (mostly by Taco, me and Idris) the first implementation
-was already quite useful. When it did most of what we wanted, a more optimized version
-evolved. Of course all errors are mine and of course the code can be improved. There
-are quite some optimizations going on here and processing speed is currently quite
-acceptable and has been improved over time. Many complex scripts are not yet supported
-yet, but I will look into them as soon as <l n='context'/> users ask for it.</p>
-
-<p>The specification leaves room for interpretation. In case of doubt the Microsoft
-implementation is the reference as it is the most complete one. As they deal with
-lots of scripts and fonts, Kai and Ivo did a lot of testing of the generic code and
-their suggestions help improve the code. I'm aware that not all border cases can be
-taken care of, unless we accept excessive runtime, and even then the interference
-with other mechanisms (like hyphenation) are not trivial.</p>
-
-<p>Especially discretionary handling has been improved much by Kai Eigner who uses complex
-(latin) fonts. The current implementation is a compromis between his patches and my code
-and in the meantime performance is quite ok. We cannot check all border cases without
-compromising speed but so far we're okay. Given good test cases we can probably improve
-it here and there. Especially chain lookups are non trivial with discretionaries but
-things got much better over time thanks to Kai.</p>
-
-<p>Glyphs are indexed not by unicode but in their own way. This is because there is no
-relationship with unicode at all, apart from the fact that a font might cover certain
-ranges of characters. One character can have multiple shapes. However, at the
-<l n='tex'/> end we use unicode so and all extra glyphs are mapped into a private
-space. This is needed because we need to access them and <l n='tex'/> has to include
-then in the output eventually.</p>
-
-<p>The initial data table is rather close to the open type specification and also not
-that different from the one produced by <l n='fontforge'/> but we uses hashes instead.
-In <l n='context'/> that table is packed (similar tables are shared) and cached on disk
-so that successive runs can use the optimized table (after loading the table is
-unpacked).</p>
-
-<p>This module is sparsely documented because it is has been a moving target. The
-table format of the reader changed a bit over time and we experiment a lot with
-different methods for supporting features. By now the structures are quite stable</p>
-
-<p>Incrementing the version number will force a re-cache. We jump the number by one
-when there's a fix in the reader or processing code that can result in different
-results.</p>
-
-<p>This code is also used outside context but in context it has to work with other
-mechanisms. Both put some constraints on the code here.</p>
-
---ldx]]--
-
--- Remark: We assume that cursives don't cross discretionaries which is okay because it
--- is only used in semitic scripts.
+-- I need to check the description at the microsoft site ... it has been improved so
+-- maybe there are some interesting details there. Most below is based on old and
+-- incomplete documentation and involved quite a bit of guesswork (checking with the
+-- abstract uniscribe of those days. But changing things is tricky!
+--
+-- This module is a bit more split up that I'd like but since we also want to test
+-- with plain TeX it has to be so. This module is part of ConTeXt and discussion
+-- about improvements and functionality mostly happens on the ConTeXt mailing list.
+--
+-- The specification of OpenType is (or at least decades ago was) kind of vague.
+-- Apart from a lack of a proper free specifications there's also the problem that
+-- Microsoft and Adobe may have their own interpretation of how and in what order to
+-- apply features. In general the Microsoft website has more detailed specifications
+-- and is a better reference. There is also some information in the FontForge help
+-- files. In the end we rely most on the Microsoft specification.
+--
+-- Because there is so much possible, fonts might contain bugs and/or be made to
+-- work with certain rederers. These may evolve over time which may have the side
+-- effect that suddenly fonts behave differently. We don't want to catch all font
+-- issues.
+--
+-- After a lot of experiments (mostly by Taco, me and Idris) the first
+-- implementation was already quite useful. When it did most of what we wanted, a
+-- more optimized version evolved. Of course all errors are mine and of course the
+-- code can be improved. There are quite some optimizations going on here and
+-- processing speed is currently quite acceptable and has been improved over time.
+-- Many complex scripts are not yet supported yet, but I will look into them as soon
+-- as ConTeXt users ask for it.
+--
+-- The specification leaves room for interpretation. In case of doubt the Microsoft
+-- implementation is the reference as it is the most complete one. As they deal with
+-- lots of scripts and fonts, Kai and Ivo did a lot of testing of the generic code
+-- and their suggestions help improve the code. I'm aware that not all border cases
+-- can be taken care of, unless we accept excessive runtime, and even then the
+-- interference with other mechanisms (like hyphenation) are not trivial.
+--
+-- Especially discretionary handling has been improved much by Kai Eigner who uses
+-- complex (latin) fonts. The current implementation is a compromis between his
+-- patches and my code and in the meantime performance is quite ok. We cannot check
+-- all border cases without compromising speed but so far we're okay. Given good
+-- test cases we can probably improve it here and there. Especially chain lookups
+-- are non trivial with discretionaries but things got much better over time thanks
+-- to Kai.
+--
+-- Glyphs are indexed not by unicode but in their own way. This is because there is
+-- no relationship with unicode at all, apart from the fact that a font might cover
+-- certain ranges of characters. One character can have multiple shapes. However, at
+-- the TeX end we use unicode so and all extra glyphs are mapped into a private
+-- space. This is needed because we need to access them and TeX has to include then
+-- in the output eventually.
+--
+-- The initial data table is rather close to the open type specification and also
+-- not that different from the one produced by Fontforge but we uses hashes instead.
+-- In ConTeXt that table is packed (similar tables are shared) and cached on disk so
+-- that successive runs can use the optimized table (after loading the table is
+-- unpacked).
+--
+-- This module is sparsely documented because it is has been a moving target. The
+-- table format of the reader changed a bit over time and we experiment a lot with
+-- different methods for supporting features. By now the structures are quite stable
+--
+-- Incrementing the version number will force a re-cache. We jump the number by one
+-- when there's a fix in the reader or processing code that can result in different
+-- results.
+--
+-- This code is also used outside ConTeXt but in ConTeXt it has to work with other
+-- mechanisms. Both put some constraints on the code here.
+--
+-- Remark: We assume that cursives don't cross discretionaries which is okay because
+-- it is only used in semitic scripts.
--
-- Remark: We assume that marks precede base characters.
--
--- Remark: When complex ligatures extend into discs nodes we can get side effects. Normally
--- this doesn't happen; ff\d{l}{l}{l} in lm works but ff\d{f}{f}{f}.
+-- Remark: When complex ligatures extend into discs nodes we can get side effects.
+-- Normally this doesn't happen; ff\d{l}{l}{l} in lm works but ff\d{f}{f}{f}.
--
-- Todo: check if we copy attributes to disc nodes if needed.
--
--- Todo: it would be nice if we could get rid of components. In other places we can use
--- the unicode properties. We can just keep a lua table.
+-- Todo: it would be nice if we could get rid of components. In other places we can
+-- use the unicode properties. We can just keep a lua table.
--
--- Remark: We do some disc juggling where we need to keep in mind that the pre, post and
--- replace fields can have prev pointers to a nesting node ... I wonder if that is still
--- needed.
+-- Remark: We do some disc juggling where we need to keep in mind that the pre, post
+-- and replace fields can have prev pointers to a nesting node ... I wonder if that
+-- is still needed.
--
-- Remark: This is not possible:
--
@@ -1038,10 +1036,8 @@ function handlers.gpos_pair(head,start,dataset,sequence,kerns,rlmode,skiphash,st
end
end
---[[ldx--
-<p>We get hits on a mark, but we're not sure if the it has to be applied so
-we need to explicitly test for basechar, baselig and basemark entries.</p>
---ldx]]--
+-- We get hits on a mark, but we're not sure if the it has to be applied so we need
+-- to explicitly test for basechar, baselig and basemark entries.
function handlers.gpos_mark2base(head,start,dataset,sequence,markanchors,rlmode,skiphash)
local markchar = getchar(start)
@@ -1236,10 +1232,8 @@ function handlers.gpos_cursive(head,start,dataset,sequence,exitanchors,rlmode,sk
return head, start, false
end
---[[ldx--
-<p>I will implement multiple chain replacements once I run into a font that uses
-it. It's not that complex to handle.</p>
---ldx]]--
+-- I will implement multiple chain replacements once I run into a font that uses it.
+-- It's not that complex to handle.
local chainprocs = { }
@@ -1292,29 +1286,22 @@ end
chainprocs.reversesub = reversesub
---[[ldx--
-<p>This chain stuff is somewhat tricky since we can have a sequence of actions to be
-applied: single, alternate, multiple or ligature where ligature can be an invalid
-one in the sense that it will replace multiple by one but not neccessary one that
-looks like the combination (i.e. it is the counterpart of multiple then). For
-example, the following is valid:</p>
-
-<typing>
-<line>xxxabcdexxx [single a->A][multiple b->BCD][ligature cde->E] xxxABCDExxx</line>
-</typing>
-
-<p>Therefore we we don't really do the replacement here already unless we have the
-single lookup case. The efficiency of the replacements can be improved by deleting
-as less as needed but that would also make the code even more messy.</p>
---ldx]]--
-
---[[ldx--
-<p>Here we replace start by a single variant.</p>
---ldx]]--
-
--- To be done (example needed): what if > 1 steps
-
--- this is messy: do we need this disc checking also in alternates?
+-- This chain stuff is somewhat tricky since we can have a sequence of actions to be
+-- applied: single, alternate, multiple or ligature where ligature can be an invalid
+-- one in the sense that it will replace multiple by one but not neccessary one that
+-- looks like the combination (i.e. it is the counterpart of multiple then). For
+-- example, the following is valid:
+--
+-- xxxabcdexxx [single a->A][multiple b->BCD][ligature cde->E] xxxABCDExxx
+--
+-- Therefore we we don't really do the replacement here already unless we have the
+-- single lookup case. The efficiency of the replacements can be improved by
+-- deleting as less as needed but that would also make the code even more messy.
+--
+-- Here we replace start by a single variant.
+--
+-- To be done : what if > 1 steps (example needed)
+-- This is messy: do we need this disc checking also in alternates?
local function reportzerosteps(dataset,sequence)
logwarning("%s: no steps",cref(dataset,sequence))
@@ -1390,9 +1377,7 @@ function chainprocs.gsub_single(head,start,stop,dataset,sequence,currentlookup,r
return head, start, false
end
---[[ldx--
-<p>Here we replace start by new glyph. First we delete the rest of the match.</p>
---ldx]]--
+-- Here we replace start by new glyph. First we delete the rest of the match.
-- char_1 mark_1 -> char_x mark_1 (ignore marks)
-- char_1 mark_1 -> char_x
@@ -1444,9 +1429,7 @@ function chainprocs.gsub_alternate(head,start,stop,dataset,sequence,currentlooku
return head, start, false
end
---[[ldx--
-<p>Here we replace start by a sequence of new glyphs.</p>
---ldx]]--
+-- Here we replace start by a sequence of new glyphs.
function chainprocs.gsub_multiple(head,start,stop,dataset,sequence,currentlookup,rlmode,skiphash,chainindex)
local mapping = currentlookup.mapping
@@ -1470,11 +1453,9 @@ function chainprocs.gsub_multiple(head,start,stop,dataset,sequence,currentlookup
return head, start, false
end
---[[ldx--
-<p>When we replace ligatures we use a helper that handles the marks. I might change
-this function (move code inline and handle the marks by a separate function). We
-assume rather stupid ligatures (no complex disc nodes).</p>
---ldx]]--
+-- When we replace ligatures we use a helper that handles the marks. I might change
+-- this function (move code inline and handle the marks by a separate function). We
+-- assume rather stupid ligatures (no complex disc nodes).
-- compare to handlers.gsub_ligature which is more complex ... why
@@ -2532,7 +2513,7 @@ local function handle_contextchain(head,start,dataset,sequence,contexts,rlmode,s
-- fonts can have many steps (each doing one check) or many contexts
-- todo: make a per-char cache so that we have small contexts (when we have a context
- -- n == 1 and otherwise it can be more so we can even distingish n == 1 or more)
+ -- n == 1 and otherwise it can be more so we can even distinguish n == 1 or more)
local nofcontexts = contexts.n -- #contexts
diff --git a/tex/context/base/mkiv/font-syn.lua b/tex/context/base/mkiv/font-syn.lua
index e80d57f41..9fba3d8d4 100644
--- a/tex/context/base/mkiv/font-syn.lua
+++ b/tex/context/base/mkiv/font-syn.lua
@@ -56,10 +56,8 @@ local trace_rejections = false trackers.register("fonts.rejections", fu
local report_names = logs.reporter("fonts","names")
---[[ldx--
-<p>This module implements a name to filename resolver. Names are resolved
-using a table that has keys filtered from the font related files.</p>
---ldx]]--
+-- This module implements a name to filename resolver. Names are resolved using a
+-- table that has keys filtered from the font related files.
fonts = fonts or { } -- also used elsewhere
@@ -88,10 +86,6 @@ local autoreload = true
directives.register("fonts.autoreload", function(v) autoreload = toboolean(v) end)
directives.register("fonts.usesystemfonts", function(v) usesystemfonts = toboolean(v) end)
---[[ldx--
-<p>A few helpers.</p>
---ldx]]--
-
-- -- what to do with these -- --
--
-- thin -> thin
@@ -305,10 +299,8 @@ local function analyzespec(somename)
end
end
---[[ldx--
-<p>It would make sense to implement the filters in the related modules,
-but to keep the overview, we define them here.</p>
---ldx]]--
+-- It would make sense to implement the filters in the related modules, but to keep
+-- the overview, we define them here.
filters.afm = fonts.handlers.afm.readers.getinfo
filters.otf = fonts.handlers.otf.readers.getinfo
@@ -412,11 +404,9 @@ filters.ttc = filters.otf
-- end
-- end
---[[ldx--
-<p>The scanner loops over the filters using the information stored in
-the file databases. Watch how we check not only for the names, but also
-for combination with the weight of a font.</p>
---ldx]]--
+-- The scanner loops over the filters using the information stored in the file
+-- databases. Watch how we check not only for the names, but also for combination
+-- with the weight of a font.
filters.list = {
"otf", "ttf", "ttc", "afm", -- no longer dfont support (for now)
@@ -1402,11 +1392,8 @@ local function is_reloaded()
end
end
---[[ldx--
-<p>The resolver also checks if the cached names are loaded. Being clever
-here is for testing purposes only (it deals with names prefixed by an
-encoding name).</p>
---ldx]]--
+-- The resolver also checks if the cached names are loaded. Being clever here is for
+-- testing purposes only (it deals with names prefixed by an encoding name).
local function fuzzy(mapping,sorted,name,sub) -- no need for reverse sorted here
local condensed = gsub(name,"[^%a%d]","")
diff --git a/tex/context/base/mkiv/font-tfm.lua b/tex/context/base/mkiv/font-tfm.lua
index 945421a42..81f94532b 100644
--- a/tex/context/base/mkiv/font-tfm.lua
+++ b/tex/context/base/mkiv/font-tfm.lua
@@ -50,21 +50,18 @@ constructors.resolvevirtualtoo = false -- wil be set in font-ctx.lua
fonts.formats.tfm = "type1" -- we need to have at least a value here
fonts.formats.ofm = "type1" -- we need to have at least a value here
---[[ldx--
-<p>The next function encapsulates the standard <l n='tfm'/> loader as
-supplied by <l n='luatex'/>.</p>
---ldx]]--
-
--- this might change: not scaling and then apply features and do scaling in the
--- usual way with dummy descriptions but on the other hand .. we no longer use
--- tfm so why bother
-
--- ofm directive blocks local path search unless set; btw, in context we
--- don't support ofm files anyway as this format is obsolete
-
--- we need to deal with nested virtual fonts, but because we load in the
--- frontend we also need to make sure we don't nest too deep (esp when sizes
--- get large)
+-- The next function encapsulates the standard TFM loader as supplied by LuaTeX.
+--
+-- This might change: not scaling and then apply features and do scaling in the
+-- usual way with dummy descriptions but on the other hand. However, we no longer
+-- use TFM (except for the JMN math fonts) so why bother.
+--
+-- The ofm directive blocks a local path search unless set. Actually, in ConTeXt we
+-- never had to deal with OFM files anyway as this format is obsolete (there are
+-- hardly any fonts in that format that are of use).
+--
+-- We need to deal with nested virtual fonts, but because we load in the frontend we
+-- also need to make sure we don't nest too deep (esp when sizes get large)
--
-- (VTITLE Example of a recursion)
-- (MAPFONT D 0 (FONTNAME recurse)(FONTAT D 2))
@@ -72,7 +69,7 @@ supplied by <l n='luatex'/>.</p>
-- (CHARACTER C B (CHARWD D 2)(CHARHT D 2)(MAP (SETCHAR C A)))
-- (CHARACTER C C (CHARWD D 4)(CHARHT D 4)(MAP (SETCHAR C B)))
--
--- we added the same checks as below to the luatex engine
+-- We added the same checks as below to the LuaTeX engine.
function tfm.setfeatures(tfmdata,features)
local okay = constructors.initializefeatures("tfm",tfmdata,features,trace_features,report_tfm)
diff --git a/tex/context/base/mkiv/font-trt.lua b/tex/context/base/mkiv/font-trt.lua
index abc92ba52..893534078 100644
--- a/tex/context/base/mkiv/font-trt.lua
+++ b/tex/context/base/mkiv/font-trt.lua
@@ -12,11 +12,9 @@ local cleanfilename = fonts.names.cleanfilename
local splitbase = file.splitbase
local lower = string.lower
---[[ldx--
-<p>We provide a simple treatment mechanism (mostly because I want to demonstrate
-something in a manual). It's one of the few places where an lfg file gets loaded
-outside the goodies manager.</p>
---ldx]]--
+-- We provide a simple treatment mechanism (mostly because I want to demonstrate
+-- something in a manual). It's one of the few places where an lfg file gets loaded
+-- outside the goodies manager.
local treatments = fonts.treatments or { }
fonts.treatments = treatments
diff --git a/tex/context/base/mkiv/font-vir.lua b/tex/context/base/mkiv/font-vir.lua
index c3071cac0..6142ddafd 100644
--- a/tex/context/base/mkiv/font-vir.lua
+++ b/tex/context/base/mkiv/font-vir.lua
@@ -6,9 +6,8 @@ if not modules then modules = { } end modules ['font-vir'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>This is very experimental code! Not yet adapted to recent changes. This will change.</p>
---ldx]]--
+-- This is very experimental code! Not yet adapted to recent changes. This will
+-- change. Actually we moved on.
-- present in the backend but unspecified:
--
@@ -25,10 +24,8 @@ local constructors = fonts.constructors
local vf = constructors.handlers.vf
vf.version = 1.000 -- same as tfm
---[[ldx--
-<p>We overload the <l n='vf'/> reader.</p>
---ldx]]--
-
+-- -- We overload the VF reader.:
+--
-- general code / already frozen
--
-- function vf.find(name)
diff --git a/tex/context/base/mkiv/l-dir.lua b/tex/context/base/mkiv/l-dir.lua
index 3e24e4e2a..316406850 100644
--- a/tex/context/base/mkiv/l-dir.lua
+++ b/tex/context/base/mkiv/l-dir.lua
@@ -21,7 +21,8 @@ local dir = dir
local lfs = lfs
local attributes = lfs.attributes
-local walkdir = lfs.dir
+----- walkdir = lfs.dir
+local scandir = lfs.dir
local isdir = lfs.isdir -- not robust, will be overloaded anyway
local isfile = lfs.isfile -- not robust, will be overloaded anyway
local currentdir = lfs.currentdir
@@ -69,6 +70,20 @@ else
end
+-- safeguard
+
+local isreadable = file.isreadable
+
+local walkdir = function(p,...)
+ if isreadable(p.."/.") then
+ return scandir(p,...)
+ else
+ return function() end
+ end
+end
+
+lfs.walkdir = walkdir
+
-- handy
function dir.current()
diff --git a/tex/context/base/mkiv/lang-url.lua b/tex/context/base/mkiv/lang-url.lua
index 7a8b7ca86..7cd666df5 100644
--- a/tex/context/base/mkiv/lang-url.lua
+++ b/tex/context/base/mkiv/lang-url.lua
@@ -21,12 +21,10 @@ local v_after = variables.after
local is_letter = characters.is_letter
---[[
-<p>Hyphenating <l n='url'/>'s is somewhat tricky and a matter of taste. I did
-consider using a dedicated hyphenation pattern or dealing with it by node
-parsing, but the following solution suits as well. After all, we're mostly
-dealing with <l n='ascii'/> characters.</p>
-]]--
+-- Hyphenating URL's is somewhat tricky and a matter of taste. I did consider using
+-- a dedicated hyphenation pattern or dealing with it by node parsing, but the
+-- following solution suits as well. After all, we're mostly dealing with ASCII
+-- characters.
local urls = { }
languages.urls = urls
diff --git a/tex/context/base/mkiv/luat-cbk.lua b/tex/context/base/mkiv/luat-cbk.lua
index 9fd55f3ec..9e35283c1 100644
--- a/tex/context/base/mkiv/luat-cbk.lua
+++ b/tex/context/base/mkiv/luat-cbk.lua
@@ -12,20 +12,16 @@ local collectgarbage, type, next = collectgarbage, type, next
local round = math.round
local sortedhash, sortedkeys, tohash = table.sortedhash, table.sortedkeys, table.tohash
---[[ldx--
-<p>Callbacks are the real asset of <l n='luatex'/>. They permit you to hook
-your own code into the <l n='tex'/> engine. Here we implement a few handy
-auxiliary functions.</p>
---ldx]]--
+-- Callbacks are the real asset of LuaTeX. They permit you to hook your own code
+-- into the TeX engine. Here we implement a few handy auxiliary functions. Watch
+-- out, there are diferences between LuateX and LuaMetaTeX.
callbacks = callbacks or { }
local callbacks = callbacks
---[[ldx--
-<p>When you (temporarily) want to install a callback function, and after a
-while wants to revert to the original one, you can use the following two
-functions. This only works for non-frozen ones.</p>
---ldx]]--
+-- When you (temporarily) want to install a callback function, and after a while
+-- wants to revert to the original one, you can use the following two functions.
+-- This only works for non-frozen ones.
local trace_callbacks = false trackers.register("system.callbacks", function(v) trace_callbacks = v end)
local trace_calls = false -- only used when analyzing performance and initializations
@@ -47,13 +43,12 @@ local list = callbacks.list
local permit_overloads = false
local block_overloads = false
---[[ldx--
-<p>By now most callbacks are frozen and most provide a way to plug in your own code. For instance
-all node list handlers provide before/after namespaces and the file handling code can be extended
-by adding schemes and if needed I can add more hooks. So there is no real need to overload a core
-callback function. It might be ok for quick and dirty testing but anyway you're on your own if
-you permanently overload callback functions.</p>
---ldx]]--
+-- By now most callbacks are frozen and most provide a way to plug in your own code.
+-- For instance all node list handlers provide before/after namespaces and the file
+-- handling code can be extended by adding schemes and if needed I can add more
+-- hooks. So there is no real need to overload a core callback function. It might be
+-- ok for quick and dirty testing but anyway you're on your own if you permanently
+-- overload callback functions.
-- This might become a configuration file only option when it gets abused too much.
@@ -279,65 +274,50 @@ end)
-- callbacks.freeze("read_.*_file","reading file")
-- callbacks.freeze("open_.*_file","opening file")
---[[ldx--
-<p>The simple case is to remove the callback:</p>
-
-<code>
-callbacks.push('linebreak_filter')
-... some actions ...
-callbacks.pop('linebreak_filter')
-</code>
-
-<p>Often, in such case, another callback or a macro call will pop
-the original.</p>
-
-<p>In practice one will install a new handler, like in:</p>
-
-<code>
-callbacks.push('linebreak_filter', function(...)
- return something_done(...)
-end)
-</code>
-
-<p>Even more interesting is:</p>
-
-<code>
-callbacks.push('linebreak_filter', function(...)
- callbacks.pop('linebreak_filter')
- return something_done(...)
-end)
-</code>
-
-<p>This does a one-shot.</p>
---ldx]]--
-
---[[ldx--
-<p>Callbacks may result in <l n='lua'/> doing some hard work
-which takes time and above all resourses. Sometimes it makes
-sense to disable or tune the garbage collector in order to
-keep the use of resources acceptable.</p>
-
-<p>At some point in the development we did some tests with counting
-nodes (in this case 121049).</p>
-
-<table>
-<tr><td>setstepmul</td><td>seconds</td><td>megabytes</td></tr>
-<tr><td>200</td><td>24.0</td><td>80.5</td></tr>
-<tr><td>175</td><td>21.0</td><td>78.2</td></tr>
-<tr><td>150</td><td>22.0</td><td>74.6</td></tr>
-<tr><td>160</td><td>22.0</td><td>74.6</td></tr>
-<tr><td>165</td><td>21.0</td><td>77.6</td></tr>
-<tr><td>125</td><td>21.5</td><td>89.2</td></tr>
-<tr><td>100</td><td>21.5</td><td>88.4</td></tr>
-</table>
-
-<p>The following code is kind of experimental. In the documents
-that describe the development of <l n='luatex'/> we report
-on speed tests. One observation is that it sometimes helps to
-restart the collector. Okay, experimental code has been removed,
-because messing aroudn with the gc is too unpredictable.</p>
---ldx]]--
-
+-- The simple case is to remove the callback:
+--
+-- callbacks.push('linebreak_filter')
+-- ... some actions ...
+-- callbacks.pop('linebreak_filter')
+--
+-- Often, in such case, another callback or a macro call will pop the original.
+--
+-- In practice one will install a new handler, like in:
+--
+-- callbacks.push('linebreak_filter', function(...)
+-- return something_done(...)
+-- end)
+--
+-- Even more interesting is:
+--
+-- callbacks.push('linebreak_filter', function(...)
+-- callbacks.pop('linebreak_filter')
+-- return something_done(...)
+-- end)
+--
+-- This does a one-shot.
+--
+-- Callbacks may result in Lua doing some hard work which takes time and above all
+-- resourses. Sometimes it makes sense to disable or tune the garbage collector in
+-- order to keep the use of resources acceptable.
+--
+-- At some point in the development we did some tests with counting nodes (in this
+-- case 121049).
+--
+-- setstepmul seconds megabytes
+-- 200 24.0 80.5
+-- 175 21.0 78.2
+-- 150 22.0 74.6
+-- 160 22.0 74.6
+-- 165 21.0 77.6
+-- 125 21.5 89.2
+-- 100 21.5 88.4
+--
+-- The following code is kind of experimental. In the documents that describe the
+-- development of LuaTeX we report on speed tests. One observation is that it
+-- sometimes helps to restart the collector. Okay, experimental code has been
+-- removed, because messing around with the gc is too unpredictable.
+--
-- For the moment we keep this here and not in util-gbc.lua or so.
utilities = utilities or { }
diff --git a/tex/context/base/mkiv/luat-ini.lua b/tex/context/base/mkiv/luat-ini.lua
index dcca8cec7..83fe0713d 100644
--- a/tex/context/base/mkiv/luat-ini.lua
+++ b/tex/context/base/mkiv/luat-ini.lua
@@ -6,11 +6,9 @@ if not modules then modules = { } end modules ['luat-ini'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>We cannot load anything yet. However what we will do us reserve a few tables.
-These can be used for runtime user data or third party modules and will not be
-cluttered by macro package code.</p>
---ldx]]--
+-- We cannot load anything yet. However what we will do us reserve a few tables.
+-- These can be used for runtime user data or third party modules and will not be
+-- cluttered by macro package code.
userdata = userdata or { } -- for users (e.g. functions etc)
thirddata = thirddata or { } -- only for third party modules
diff --git a/tex/context/base/mkiv/lxml-aux.lua b/tex/context/base/mkiv/lxml-aux.lua
index fc17371e5..217f81c13 100644
--- a/tex/context/base/mkiv/lxml-aux.lua
+++ b/tex/context/base/mkiv/lxml-aux.lua
@@ -110,11 +110,7 @@ function xml.processattributes(root,pattern,handle)
return collected
end
---[[ldx--
-<p>The following functions collect elements and texts.</p>
---ldx]]--
-
--- are these still needed -> lxml-cmp.lua
+-- The following functions collect elements and texts.
function xml.collect(root, pattern)
return xmlapplylpath(root,pattern)
@@ -153,9 +149,7 @@ function xml.collect_tags(root, pattern, nonamespace)
end
end
---[[ldx--
-<p>We've now arrived at the functions that manipulate the tree.</p>
---ldx]]--
+-- We've now arrived at the functions that manipulate the tree.
local no_root = { no_root = true }
@@ -780,9 +774,7 @@ function xml.remapname(root, pattern, newtg, newns, newrn)
end
end
---[[ldx--
-<p>Helper (for q2p).</p>
---ldx]]--
+-- Helper (for q2p).
function xml.cdatatotext(e)
local dt = e.dt
@@ -879,9 +871,7 @@ end
-- xml.addentitiesdoctype(x,"hexadecimal")
-- print(x)
---[[ldx--
-<p>Here are a few synonyms.</p>
---ldx]]--
+-- Here are a few synonyms:
xml.all = xml.each
xml.insert = xml.insertafter
diff --git a/tex/context/base/mkiv/lxml-ent.lua b/tex/context/base/mkiv/lxml-ent.lua
index df80a7985..1d6d058b6 100644
--- a/tex/context/base/mkiv/lxml-ent.lua
+++ b/tex/context/base/mkiv/lxml-ent.lua
@@ -10,14 +10,10 @@ local next = next
local byte, format = string.byte, string.format
local setmetatableindex = table.setmetatableindex
---[[ldx--
-<p>We provide (at least here) two entity handlers. The more extensive
-resolver consults a hash first, tries to convert to <l n='utf'/> next,
-and finaly calls a handler when defines. When this all fails, the
-original entity is returned.</p>
-
-<p>We do things different now but it's still somewhat experimental</p>
---ldx]]--
+-- We provide (at least here) two entity handlers. The more extensive resolver
+-- consults a hash first, tries to convert to UTF next, and finaly calls a handler
+-- when defines. When this all fails, the original entity is returned. We do things
+-- different now but it's still somewhat experimental.
local trace_entities = false trackers.register("xml.entities", function(v) trace_entities = v end)
diff --git a/tex/context/base/mkiv/lxml-lpt.lua b/tex/context/base/mkiv/lxml-lpt.lua
index 78a9fca2e..d242b07de 100644
--- a/tex/context/base/mkiv/lxml-lpt.lua
+++ b/tex/context/base/mkiv/lxml-lpt.lua
@@ -20,28 +20,21 @@ local formatters = string.formatters -- no need (yet) as paths are cached anyway
-- beware, this is not xpath ... e.g. position is different (currently) and
-- we have reverse-sibling as reversed preceding sibling
---[[ldx--
-<p>This module can be used stand alone but also inside <l n='mkiv'/> in
-which case it hooks into the tracker code. Therefore we provide a few
-functions that set the tracers. Here we overload a previously defined
-function.</p>
-<p>If I can get in the mood I will make a variant that is XSLT compliant
-but I wonder if it makes sense.</P>
---ldx]]--
-
---[[ldx--
-<p>Expecially the lpath code is experimental, we will support some of xpath, but
-only things that make sense for us; as compensation it is possible to hook in your
-own functions. Apart from preprocessing content for <l n='context'/> we also need
-this module for process management, like handling <l n='ctx'/> and <l n='rlx'/>
-files.</p>
-
-<typing>
-a/b/c /*/c
-a/b/c/first() a/b/c/last() a/b/c/index(n) a/b/c/index(-n)
-a/b/c/text() a/b/c/text(1) a/b/c/text(-1) a/b/c/text(n)
-</typing>
---ldx]]--
+-- This module can be used stand alone but also inside ConTeXt in which case it
+-- hooks into the tracker code. Therefore we provide a few functions that set the
+-- tracers. Here we overload a previously defined function.
+--
+-- If I can get in the mood I will make a variant that is XSLT compliant but I
+-- wonder if it makes sense.
+--
+-- Expecially the lpath code is experimental, we will support some of xpath, but
+-- only things that make sense for us; as compensation it is possible to hook in
+-- your own functions. Apart from preprocessing content for ConTeXt we also need
+-- this module for process management, like handling CTX and RLX files.
+--
+-- a/b/c /*/c
+-- a/b/c/first() a/b/c/last() a/b/c/index(n) a/b/c/index(-n)
+-- a/b/c/text() a/b/c/text(1) a/b/c/text(-1) a/b/c/text(n)
local trace_lpath = false
local trace_lparse = false
@@ -62,11 +55,9 @@ if trackers then
end)
end
---[[ldx--
-<p>We've now arrived at an interesting part: accessing the tree using a subset
-of <l n='xpath'/> and since we're not compatible we call it <l n='lpath'/>. We
-will explain more about its usage in other documents.</p>
---ldx]]--
+-- We've now arrived at an interesting part: accessing the tree using a subset of
+-- XPATH and since we're not compatible we call it LPATH. We will explain more about
+-- its usage in other documents.
local xml = xml
@@ -1273,9 +1264,8 @@ do
end
local applylpath = xml.applylpath
---[[ldx--
-<p>This is the main filter function. It returns whatever is asked for.</p>
---ldx]]--
+
+-- This is the main filter function. It returns whatever is asked for.
function xml.filter(root,pattern) -- no longer funny attribute handling here
return applylpath(root,pattern)
@@ -1525,21 +1515,16 @@ expressions.tag = function(e,n) -- only tg
end
end
---[[ldx--
-<p>Often using an iterators looks nicer in the code than passing handler
-functions. The <l n='lua'/> book describes how to use coroutines for that
-purpose (<url href='http://www.lua.org/pil/9.3.html'/>). This permits
-code like:</p>
-
-<typing>
-for r, d, k in xml.elements(xml.load('text.xml'),"title") do
- print(d[k]) -- old method
-end
-for e in xml.collected(xml.load('text.xml'),"title") do
- print(e) -- new one
-end
-</typing>
---ldx]]--
+-- Often using an iterators looks nicer in the code than passing handler functions.
+-- The LUA book describes how to use coroutines for that purpose
+-- 'href="http://www.lua.org/pil/9.3.html"'. This permits code like:
+--
+-- for r, d, k in xml.elements(xml.load('text.xml'),"title") do
+-- print(d[k]) -- old method
+-- end
+-- for e in xml.collected(xml.load('text.xml'),"title") do
+-- print(e) -- new one
+-- end
-- local wrap, yield = coroutine.wrap, coroutine.yield
-- local dummy = function() end
diff --git a/tex/context/base/mkiv/lxml-mis.lua b/tex/context/base/mkiv/lxml-mis.lua
index 04ba7b35c..ea62550bb 100644
--- a/tex/context/base/mkiv/lxml-mis.lua
+++ b/tex/context/base/mkiv/lxml-mis.lua
@@ -17,13 +17,10 @@ local P, S, R, C, V, Cc, Cs = lpeg.P, lpeg.S, lpeg.R, lpeg.C, lpeg.V, lpeg.Cc, l
lpegpatterns.xml = lpegpatterns.xml or { }
local xmlpatterns = lpegpatterns.xml
---[[ldx--
-<p>The following helper functions best belong to the <t>lxml-ini</t>
-module. Some are here because we need then in the <t>mk</t>
-document and other manuals, others came up when playing with
-this module. Since this module is also used in <l n='mtxrun'/> we've
-put them here instead of loading mode modules there then needed.</p>
---ldx]]--
+-- The following helper functions best belong to the 'lxml-ini' module. Some are
+-- here because we need then in the 'mk' document and other manuals, others came up
+-- when playing with this module. Since this module is also used in 'mtxrun' we've
+-- put them here instead of loading mode modules there then needed.
local function xmlgsub(t,old,new) -- will be replaced
local dt = t.dt
diff --git a/tex/context/base/mkiv/lxml-tab.lua b/tex/context/base/mkiv/lxml-tab.lua
index e18362bd8..a06b59065 100644
--- a/tex/context/base/mkiv/lxml-tab.lua
+++ b/tex/context/base/mkiv/lxml-tab.lua
@@ -18,13 +18,12 @@ local trace_entities = false trackers.register("xml.entities", function(v) trac
local report_xml = logs and logs.reporter("xml","core") or function(...) print(string.format(...)) end
---[[ldx--
-<p>The parser used here is inspired by the variant discussed in the lua book, but
-handles comment and processing instructions, has a different structure, provides
-parent access; a first version used different trickery but was less optimized to we
-went this route. First we had a find based parser, now we have an <l n='lpeg'/> based one.
-The find based parser can be found in l-xml-edu.lua along with other older code.</p>
---ldx]]--
+-- The parser used here is inspired by the variant discussed in the lua book, but
+-- handles comment and processing instructions, has a different structure, provides
+-- parent access; a first version used different trickery but was less optimized to
+-- we went this route. First we had a find based parser, now we have an LPEG based
+-- one. The find based parser can be found in l-xml-edu.lua along with other older
+-- code.
if lpeg.setmaxstack then lpeg.setmaxstack(1000) end -- deeply nested xml files
@@ -42,26 +41,19 @@ local lpegmatch, lpegpatterns = lpeg.match, lpeg.patterns
local P, S, R, C, V, C, Cs = lpeg.P, lpeg.S, lpeg.R, lpeg.C, lpeg.V, lpeg.C, lpeg.Cs
local formatters = string.formatters
---[[ldx--
-<p>First a hack to enable namespace resolving. A namespace is characterized by
-a <l n='url'/>. The following function associates a namespace prefix with a
-pattern. We use <l n='lpeg'/>, which in this case is more than twice as fast as a
-find based solution where we loop over an array of patterns. Less code and
-much cleaner.</p>
---ldx]]--
+-- First a hack to enable namespace resolving. A namespace is characterized by a
+-- URL. The following function associates a namespace prefix with a pattern. We use
+-- LPEG, which in this case is more than twice as fast as a find based solution
+-- where we loop over an array of patterns. Less code and much cleaner.
do -- begin of namespace closure (we ran out of locals)
xml.xmlns = xml.xmlns or { }
---[[ldx--
-<p>The next function associates a namespace prefix with an <l n='url'/>. This
-normally happens independent of parsing.</p>
-
-<typing>
-xml.registerns("mml","mathml")
-</typing>
---ldx]]--
+-- The next function associates a namespace prefix with an URL. This normally
+-- happens independent of parsing.
+--
+-- xml.registerns("mml","mathml")
local check = P(false)
local parse = check
@@ -71,15 +63,11 @@ function xml.registerns(namespace, pattern) -- pattern can be an lpeg
parse = P { P(check) + 1 * V(1) }
end
---[[ldx--
-<p>The next function also registers a namespace, but this time we map a
-given namespace prefix onto a registered one, using the given
-<l n='url'/>. This used for attributes like <t>xmlns:m</t>.</p>
-
-<typing>
-xml.checkns("m","http://www.w3.org/mathml")
-</typing>
---ldx]]--
+-- The next function also registers a namespace, but this time we map a given
+-- namespace prefix onto a registered one, using the given URL. This used for
+-- attributes like 'xmlns:m'.
+--
+-- xml.checkns("m","http://www.w3.org/mathml")
function xml.checkns(namespace,url)
local ns = lpegmatch(parse,lower(url))
@@ -88,68 +76,54 @@ function xml.checkns(namespace,url)
end
end
---[[ldx--
-<p>Next we provide a way to turn an <l n='url'/> into a registered
-namespace. This used for the <t>xmlns</t> attribute.</p>
-
-<typing>
-resolvedns = xml.resolvens("http://www.w3.org/mathml")
-</typing>
-
-This returns <t>mml</t>.
---ldx]]--
+-- Next we provide a way to turn an URL into a registered namespace. This used for
+-- the 'xmlns' attribute.
+--
+-- resolvedns = xml.resolvens("http://www.w3.org/mathml")
+--
+-- This returns MATHML.
function xml.resolvens(url)
return lpegmatch(parse,lower(url)) or ""
end
---[[ldx--
-<p>A namespace in an element can be remapped onto the registered
-one efficiently by using the <t>xml.xmlns</t> table.</p>
---ldx]]--
+-- A namespace in an element can be remapped onto the registered one efficiently by
+-- using the 'xml.xmlns' table.
end -- end of namespace closure
---[[ldx--
-<p>This version uses <l n='lpeg'/>. We follow the same approach as before, stack and top and
-such. This version is about twice as fast which is mostly due to the fact that
-we don't have to prepare the stream for cdata, doctype etc etc. This variant is
-is dedicated to Luigi Scarso, who challenged me with 40 megabyte <l n='xml'/> files that
-took 12.5 seconds to load (1.5 for file io and the rest for tree building). With
-the <l n='lpeg'/> implementation we got that down to less 7.3 seconds. Loading the 14
-<l n='context'/> interface definition files (2.6 meg) went down from 1.05 seconds to 0.55.</p>
-
-<p>Next comes the parser. The rather messy doctype definition comes in many
-disguises so it is no surprice that later on have to dedicate quite some
-<l n='lpeg'/> code to it.</p>
-
-<typing>
-<!DOCTYPE Something PUBLIC "... ..." "..." [ ... ] >
-<!DOCTYPE Something PUBLIC "... ..." "..." >
-<!DOCTYPE Something SYSTEM "... ..." [ ... ] >
-<!DOCTYPE Something SYSTEM "... ..." >
-<!DOCTYPE Something [ ... ] >
-<!DOCTYPE Something >
-</typing>
-
-<p>The code may look a bit complex but this is mostly due to the fact that we
-resolve namespaces and attach metatables. There is only one public function:</p>
-
-<typing>
-local x = xml.convert(somestring)
-</typing>
-
-<p>An optional second boolean argument tells this function not to create a root
-element.</p>
-
-<p>Valid entities are:</p>
-
-<typing>
-<!ENTITY xxxx SYSTEM "yyyy" NDATA zzzz>
-<!ENTITY xxxx PUBLIC "yyyy" >
-<!ENTITY xxxx "yyyy" >
-</typing>
---ldx]]--
+-- This version uses LPEG. We follow the same approach as before, stack and top and
+-- such. This version is about twice as fast which is mostly due to the fact that we
+-- don't have to prepare the stream for cdata, doctype etc etc. This variant is is
+-- dedicated to Luigi Scarso, who challenged me with 40 megabyte XML files that took
+-- 12.5 seconds to load (1.5 for file io and the rest for tree building). With the
+-- LPEG implementation we got that down to less 7.3 seconds. Loading the 14 ConTeXt
+-- interface definition files (2.6 meg) went down from 1.05 seconds to 0.55.
+--
+-- Next comes the parser. The rather messy doctype definition comes in many
+-- disguises so it is no surprice that later on have to dedicate quite some LPEG
+-- code to it.
+--
+-- <!DOCTYPE Something PUBLIC "... ..." "..." [ ... ] >
+-- <!DOCTYPE Something PUBLIC "... ..." "..." >
+-- <!DOCTYPE Something SYSTEM "... ..." [ ... ] >
+-- <!DOCTYPE Something SYSTEM "... ..." >
+-- <!DOCTYPE Something [ ... ] >
+-- <!DOCTYPE Something >
+--
+-- The code may look a bit complex but this is mostly due to the fact that we
+-- resolve namespaces and attach metatables. There is only one public function:
+--
+-- local x = xml.convert(somestring)
+--
+-- An optional second boolean argument tells this function not to create a root
+-- element.
+--
+-- Valid entities are:
+--
+-- <!ENTITY xxxx SYSTEM "yyyy" NDATA zzzz>
+-- <!ENTITY xxxx PUBLIC "yyyy" >
+-- <!ENTITY xxxx "yyyy" >
-- not just one big nested table capture (lpeg overflow)
@@ -1332,10 +1306,8 @@ function xml.inheritedconvert(data,xmldata,cleanup) -- xmldata is parent
return xc
end
---[[ldx--
-<p>Packaging data in an xml like table is done with the following
-function. Maybe it will go away (when not used).</p>
---ldx]]--
+-- Packaging data in an xml like table is done with the following function. Maybe it
+-- will go away (when not used).
function xml.is_valid(root)
return root and root.dt and root.dt[1] and type(root.dt[1]) == "table" and not root.dt[1].er
@@ -1354,11 +1326,8 @@ end
xml.errorhandler = report_xml
---[[ldx--
-<p>We cannot load an <l n='lpeg'/> from a filehandle so we need to load
-the whole file first. The function accepts a string representing
-a filename or a file handle.</p>
---ldx]]--
+-- We cannot load an LPEG from a filehandle so we need to load the whole file first.
+-- The function accepts a string representing a filename or a file handle.
function xml.load(filename,settings)
local data = ""
@@ -1382,10 +1351,8 @@ function xml.load(filename,settings)
end
end
---[[ldx--
-<p>When we inject new elements, we need to convert strings to
-valid trees, which is what the next function does.</p>
---ldx]]--
+-- When we inject new elements, we need to convert strings to valid trees, which is
+-- what the next function does.
local no_root = { no_root = true }
@@ -1398,11 +1365,9 @@ function xml.toxml(data)
end
end
---[[ldx--
-<p>For copying a tree we use a dedicated function instead of the
-generic table copier. Since we know what we're dealing with we
-can speed up things a bit. The second argument is not to be used!</p>
---ldx]]--
+-- For copying a tree we use a dedicated function instead of the generic table
+-- copier. Since we know what we're dealing with we can speed up things a bit. The
+-- second argument is not to be used!
-- local function copy(old)
-- if old then
@@ -1466,13 +1431,10 @@ end
xml.copy = copy
---[[ldx--
-<p>In <l n='context'/> serializing the tree or parts of the tree is a major
-actitivity which is why the following function is pretty optimized resulting
-in a few more lines of code than needed. The variant that uses the formatting
-function for all components is about 15% slower than the concatinating
-alternative.</p>
---ldx]]--
+-- In ConTeXt serializing the tree or parts of the tree is a major actitivity which
+-- is why the following function is pretty optimized resulting in a few more lines
+-- of code than needed. The variant that uses the formatting function for all
+-- components is about 15% slower than the concatinating alternative.
-- todo: add <?xml version='1.0' standalone='yes'?> when not present
@@ -1490,10 +1452,8 @@ function xml.checkbom(root) -- can be made faster
end
end
---[[ldx--
-<p>At the cost of some 25% runtime overhead you can first convert the tree to a string
-and then handle the lot.</p>
---ldx]]--
+-- At the cost of some 25% runtime overhead you can first convert the tree to a
+-- string and then handle the lot.
-- new experimental reorganized serialize
@@ -1711,21 +1671,18 @@ newhandlers {
}
}
---[[ldx--
-<p>How you deal with saving data depends on your preferences. For a 40 MB database
-file the timing on a 2.3 Core Duo are as follows (time in seconds):</p>
-
-<lines>
-1.3 : load data from file to string
-6.1 : convert string into tree
-5.3 : saving in file using xmlsave
-6.8 : converting to string using xml.tostring
-3.6 : saving converted string in file
-</lines>
-<p>Beware, these were timing with the old routine but measurements will not be that
-much different I guess.</p>
---ldx]]--
+-- How you deal with saving data depends on your preferences. For a 40 MB database
+-- file the timing on a 2.3 Core Duo are as follows (time in seconds):
+--
+-- 1.3 : load data from file to string
+-- 6.1 : convert string into tree
+-- 5.3 : saving in file using xmlsave
+-- 6.8 : converting to string using xml.tostring
+-- 3.6 : saving converted string in file
+--
+-- Beware, these were timing with the old routine but measurements will not be that
+-- much different I guess.
-- maybe this will move to lxml-xml
@@ -1827,10 +1784,8 @@ xml.newhandlers = newhandlers
xml.serialize = serialize
xml.tostring = xmltostring
---[[ldx--
-<p>The next function operated on the content only and needs a handle function
-that accepts a string.</p>
---ldx]]--
+-- The next function operated on the content only and needs a handle function that
+-- accepts a string.
local function xmlstring(e,handle)
if not handle or (e.special and e.tg ~= "@rt@") then
@@ -1849,9 +1804,7 @@ end
xml.string = xmlstring
---[[ldx--
-<p>A few helpers:</p>
---ldx]]--
+-- A few helpers:
--~ xmlsetproperty(root,"settings",settings)
@@ -1899,11 +1852,9 @@ function xml.name(root)
end
end
---[[ldx--
-<p>The next helper erases an element but keeps the table as it is,
-and since empty strings are not serialized (effectively) it does
-not harm. Copying the table would take more time. Usage:</p>
---ldx]]--
+-- The next helper erases an element but keeps the table as it is, and since empty
+-- strings are not serialized (effectively) it does not harm. Copying the table
+-- would take more time.
function xml.erase(dt,k)
if dt then
@@ -1915,13 +1866,9 @@ function xml.erase(dt,k)
end
end
---[[ldx--
-<p>The next helper assigns a tree (or string). Usage:</p>
-
-<typing>
-dt[k] = xml.assign(root) or xml.assign(dt,k,root)
-</typing>
---ldx]]--
+-- The next helper assigns a tree (or string). Usage:
+--
+-- dt[k] = xml.assign(root) or xml.assign(dt,k,root)
function xml.assign(dt,k,root)
if dt and k then
@@ -1932,15 +1879,10 @@ function xml.assign(dt,k,root)
end
end
--- the following helpers may move
-
---[[ldx--
-<p>The next helper assigns a tree (or string). Usage:</p>
-<typing>
-xml.tocdata(e)
-xml.tocdata(e,"error")
-</typing>
---ldx]]--
+-- The next helper assigns a tree (or string). Usage:
+--
+-- xml.tocdata(e)
+-- xml.tocdata(e,"error")
function xml.tocdata(e,wrapper) -- a few more in the aux module
local whatever = type(e) == "table" and xmltostring(e.dt) or e or ""
diff --git a/tex/context/base/mkiv/math-map.lua b/tex/context/base/mkiv/math-map.lua
index 5f93b43fc..153dde852 100644
--- a/tex/context/base/mkiv/math-map.lua
+++ b/tex/context/base/mkiv/math-map.lua
@@ -7,31 +7,13 @@ if not modules then modules = { } end modules ['math-map'] = {
license = "see context related readme files"
}
--- todo: make sparse .. if self
-
---[[ldx--
-<p>Remapping mathematics alphabets.</p>
---ldx]]--
-
--- oldstyle: not really mathematics but happened to be part of
--- the mathematics fonts in cmr
---
--- persian: we will also provide mappers for other
--- scripts
-
--- todo: alphabets namespace
--- maybe: script/scriptscript dynamic,
-
--- superscripped primes get unscripted !
-
--- to be looked into once the fonts are ready (will become font
--- goodie):
---
--- (U+2202,U+1D715) : upright
--- (U+2202,U+1D715) : italic
--- (U+2202,U+1D715) : upright
---
--- plus add them to the regular vectors below so that they honor \it etc
+-- persian: we will also provide mappers for other scripts
+-- todo : alphabets namespace
+-- maybe : script/scriptscript dynamic,
+-- check : (U+2202,U+1D715) : upright
+-- (U+2202,U+1D715) : italic
+-- (U+2202,U+1D715) : upright
+-- add them to the regular vectors below so that they honor \it etc
local type, next = type, next
local merged, sortedhash = table.merged, table.sortedhash
diff --git a/tex/context/base/mkiv/meta-fun.lua b/tex/context/base/mkiv/meta-fun.lua
index ddbbd9a52..aa388b0ca 100644
--- a/tex/context/base/mkiv/meta-fun.lua
+++ b/tex/context/base/mkiv/meta-fun.lua
@@ -13,15 +13,18 @@ local format, load, type = string.format, load, type
local context = context
local metapost = metapost
-metapost.metafun = metapost.metafun or { }
-local metafun = metapost.metafun
+local metafun = metapost.metafun or { }
+metapost.metafun = metafun
function metafun.topath(t,connector)
context("(")
if #t > 0 then
+ if not connector then
+ connector = ".."
+ end
for i=1,#t do
if i > 1 then
- context(connector or "..")
+ context(connector)
end
local ti = t[i]
if type(ti) == "string" then
@@ -39,12 +42,15 @@ end
function metafun.interpolate(f,b,e,s,c)
local done = false
context("(")
- for i=b,e,(e-b)/s do
- local d = load(format("return function(x) return %s end",f))
- if d then
- d = d()
+ local d = load(format("return function(x) return %s end",f))
+ if d then
+ d = d()
+ if not c then
+ c = "..."
+ end
+ for i=b,e,(e-b)/s do
if done then
- context(c or "...")
+ context(c)
else
done = true
end
diff --git a/tex/context/base/mkiv/mlib-fio.lua b/tex/context/base/mkiv/mlib-fio.lua
index 51c88eb22..39a709505 100644
--- a/tex/context/base/mkiv/mlib-fio.lua
+++ b/tex/context/base/mkiv/mlib-fio.lua
@@ -54,8 +54,18 @@ local function validftype(ftype)
end
end
+local remapped = {
+ -- We don't yet have an interface for adding more here but when needed
+ -- there will be one.
+ ["hatching.mp"] = "mp-remapped-hatching.mp",
+ ["boxes.mp"] = "mp-remapped-boxes.mp",
+ ["hatching"] = "mp-remapped-hatching.mp",
+ ["boxes"] = "mp-remapped-boxes.mp",
+}
+
finders.file = function(specification,name,mode,ftype)
- return resolvers.findfile(name,validftype(ftype))
+ local usedname = remapped[name] or name
+ return resolvers.findfile(usedname,validftype(ftype))
end
local function i_finder(name,mode,ftype) -- fake message for mpost.map and metafun.mpvi
diff --git a/tex/context/base/mkiv/mlib-run.lua b/tex/context/base/mkiv/mlib-run.lua
index 602d6f36c..82426668f 100644
--- a/tex/context/base/mkiv/mlib-run.lua
+++ b/tex/context/base/mkiv/mlib-run.lua
@@ -6,28 +6,12 @@ if not modules then modules = { } end modules ['mlib-run'] = {
license = "see context related readme files",
}
--- cmyk -> done, native
--- spot -> done, but needs reworking (simpler)
--- multitone ->
--- shade -> partly done, todo: cm
--- figure -> done
--- hyperlink -> low priority, easy
-
--- new * run
--- or
--- new * execute^1 * finish
-
--- a*[b,c] == b + a * (c-b)
-
---[[ldx--
-<p>The directional helpers and pen analysis are more or less translated from the
-<l n='c'/> code. It really helps that Taco know that source so well. Taco and I spent
-quite some time on speeding up the <l n='lua'/> and <l n='c'/> code. There is not
-much to gain, especially if one keeps in mind that when integrated in <l n='tex'/>
-only a part of the time is spent in <l n='metapost'/>. Of course an integrated
-approach is way faster than an external <l n='metapost'/> and processing time
-nears zero.</p>
---ldx]]--
+-- The directional helpers and pen analysis are more or less translated from the C
+-- code. It really helps that Taco know that source so well. Taco and I spent quite
+-- some time on speeding up the Lua and C code. There is not much to gain,
+-- especially if one keeps in mind that when integrated in TeX only a part of the
+-- time is spent in MetaPost. Of course an integrated approach is way faster than an
+-- external MetaPost and processing time nears zero.
local type, tostring, tonumber, next = type, tostring, tonumber, next
local find, striplines = string.find, utilities.strings.striplines
diff --git a/tex/context/base/mkiv/mult-mps.lua b/tex/context/base/mkiv/mult-mps.lua
index 008bcbb9f..cfa821517 100644
--- a/tex/context/base/mkiv/mult-mps.lua
+++ b/tex/context/base/mkiv/mult-mps.lua
@@ -127,7 +127,7 @@ return {
--
"red", "green", "blue", "cyan", "magenta", "yellow", "black", "white", "background",
--
- "mm", "pt", "dd", "bp", "cm", "pc", "cc", "in", "dk",
+ "mm", "pt", "dd", "bp", "cm", "pc", "cc", "in", "dk", "es", "ts",
--
"triplet", "quadruplet", "totransform", "bymatrix", "closedcurve", "closedlines",
--
diff --git a/tex/context/base/mkiv/node-ini.lua b/tex/context/base/mkiv/node-ini.lua
index ef7d4afed..ea726ff3d 100644
--- a/tex/context/base/mkiv/node-ini.lua
+++ b/tex/context/base/mkiv/node-ini.lua
@@ -6,50 +6,38 @@ if not modules then modules = { } end modules ['node-ini'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>Most of the code that had accumulated here is now separated in modules.</p>
---ldx]]--
-
--- I need to clean up this module as it's a bit of a mess now. The latest luatex
--- has most tables but we have a few more in luametatex. Also, some are different
--- between these engines. We started out with hardcoded tables, that then ended
--- up as comments and are now gone (as they differ per engine anyway).
+-- Most of the code that had accumulated here is now separated in modules.
local next, type, tostring = next, type, tostring
local gsub = string.gsub
local concat, remove = table.concat, table.remove
local sortedhash, sortedkeys, swapped = table.sortedhash, table.sortedkeys, table.swapped
---[[ldx--
-<p>Access to nodes is what gives <l n='luatex'/> its power. Here we implement a
-few helper functions. These functions are rather optimized.</p>
---ldx]]--
-
---[[ldx--
-<p>When manipulating node lists in <l n='context'/>, we will remove nodes and
-insert new ones. While node access was implemented, we did quite some experiments
-in order to find out if manipulating nodes in <l n='lua'/> was feasible from the
-perspective of performance.</p>
-
-<p>First of all, we noticed that the bottleneck is more with excessive callbacks
-(some gets called very often) and the conversion from and to <l n='tex'/>'s
-datastructures. However, at the <l n='lua'/> end, we found that inserting and
-deleting nodes in a table could become a bottleneck.</p>
-
-<p>This resulted in two special situations in passing nodes back to <l n='tex'/>:
-a table entry with value <type>false</type> is ignored, and when instead of a
-table <type>true</type> is returned, the original table is used.</p>
-
-<p>Insertion is handled (at least in <l n='context'/> as follows. When we need to
-insert a node at a certain position, we change the node at that position by a
-dummy node, tagged <type>inline</type> which itself has_attribute the original
-node and one or more new nodes. Before we pass back the list we collapse the
-list. Of course collapsing could be built into the <l n='tex'/> engine, but this
-is a not so natural extension.</p>
-
-<p>When we collapse (something that we only do when really needed), we also
-ignore the empty nodes. [This is obsolete!]</p>
---ldx]]--
+-- Access to nodes is what gives LuaTeX its power. Here we implement a few helper
+-- functions. These functions are rather optimized.
+--
+-- When manipulating node lists in ConTeXt, we will remove nodes and insert new
+-- ones. While node access was implemented, we did quite some experiments in order
+-- to find out if manipulating nodes in Lua was feasible from the perspective of
+-- performance.
+--
+-- First of all, we noticed that the bottleneck is more with excessive callbacks
+-- (some gets called very often) and the conversion from and to TeX's
+-- datastructures. However, at the Lua end, we found that inserting and deleting
+-- nodes in a table could become a bottleneck.
+--
+-- This resulted in two special situations in passing nodes back to TeX: a table
+-- entry with value 'false' is ignored, and when instead of a table 'true' is
+-- returned, the original table is used.
+--
+-- Insertion is handled (at least in ConTeXt as follows. When we need to insert a
+-- node at a certain position, we change the node at that position by a dummy node,
+-- tagged 'inline' which itself has_attribute the original node and one or more new
+-- nodes. Before we pass back the list we collapse the list. Of course collapsing
+-- could be built into the TeX engine, but this is a not so natural extension.
+
+-- When we collapse (something that we only do when really needed), we also ignore
+-- the empty nodes. [This is obsolete!]
-- local gf = node.direct.getfield
-- local n = table.setmetatableindex("number")
diff --git a/tex/context/base/mkiv/node-res.lua b/tex/context/base/mkiv/node-res.lua
index 5c669f9da..f2c6e97e9 100644
--- a/tex/context/base/mkiv/node-res.lua
+++ b/tex/context/base/mkiv/node-res.lua
@@ -9,11 +9,6 @@ if not modules then modules = { } end modules ['node-res'] = {
local type, next = type, next
local gmatch, format = string.gmatch, string.format
---[[ldx--
-<p>The next function is not that much needed but in <l n='context'/> we use
-for debugging <l n='luatex'/> node management.</p>
---ldx]]--
-
local nodes, node = nodes, node
local report_nodes = logs.reporter("nodes","housekeeping")
diff --git a/tex/context/base/mkiv/node-tra.lua b/tex/context/base/mkiv/node-tra.lua
index 67435f1c7..20e354392 100644
--- a/tex/context/base/mkiv/node-tra.lua
+++ b/tex/context/base/mkiv/node-tra.lua
@@ -6,10 +6,8 @@ if not modules then modules = { } end modules ['node-tra'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>This is rather experimental. We need more control and some of this
-might become a runtime module instead. This module will be cleaned up!</p>
---ldx]]--
+-- Some of the code here might become a runtime module instead. This old module will
+-- be cleaned up anyway!
local next = next
local utfchar = utf.char
diff --git a/tex/context/base/mkiv/pack-obj.lua b/tex/context/base/mkiv/pack-obj.lua
index 445085776..dda828749 100644
--- a/tex/context/base/mkiv/pack-obj.lua
+++ b/tex/context/base/mkiv/pack-obj.lua
@@ -6,10 +6,8 @@ if not modules then modules = { } end modules ['pack-obj'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>We save object references in the main utility table. jobobjects are
-reusable components.</p>
---ldx]]--
+-- We save object references in the main utility table; job objects are reusable
+-- components.
local context = context
local codeinjections = backends.codeinjections
diff --git a/tex/context/base/mkiv/pack-rul.lua b/tex/context/base/mkiv/pack-rul.lua
index 98117867c..20db028ec 100644
--- a/tex/context/base/mkiv/pack-rul.lua
+++ b/tex/context/base/mkiv/pack-rul.lua
@@ -7,10 +7,6 @@ if not modules then modules = { } end modules ['pack-rul'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>An explanation is given in the history document <t>mk</t>.</p>
---ldx]]--
-
-- we need to be careful with display math as it uses shifts
-- \framed[align={lohi,middle}]{$x$}
diff --git a/tex/context/base/mkiv/publ-dat.lua b/tex/context/base/mkiv/publ-dat.lua
index 64aaaf460..2e5f07f05 100644
--- a/tex/context/base/mkiv/publ-dat.lua
+++ b/tex/context/base/mkiv/publ-dat.lua
@@ -11,12 +11,6 @@ if not modules then modules = { } end modules ['publ-dat'] = {
-- todo: dataset = datasets[dataset] => current = datasets[dataset]
-- todo: maybe split this file
---[[ldx--
-<p>This is a prelude to integrated bibliography support. This file just loads
-bibtex files and converts them to xml so that the we access the content
-in a convenient way. Actually handling the data takes place elsewhere.</p>
---ldx]]--
-
if not characters then
dofile(resolvers.findfile("char-utf.lua"))
dofile(resolvers.findfile("char-tex.lua"))
diff --git a/tex/context/base/mkiv/publ-ini.lua b/tex/context/base/mkiv/publ-ini.lua
index dac0ab441..aa96dd8bc 100644
--- a/tex/context/base/mkiv/publ-ini.lua
+++ b/tex/context/base/mkiv/publ-ini.lua
@@ -296,7 +296,8 @@ do
local checksum = nil
local username = file.addsuffix(file.robustname(formatters["%s-btx-%s"](prefix,name)),"lua")
if userdata and next(userdata) then
- if job.passes.first then
+ if environment.currentrun == 1 then
+ -- if job.passes.first then
local newdata = serialize(userdata)
checksum = md5.HEX(newdata)
io.savedata(username,newdata)
diff --git a/tex/context/base/mkiv/publ-ini.mkiv b/tex/context/base/mkiv/publ-ini.mkiv
index 6e34d3ab5..05d93ef85 100644
--- a/tex/context/base/mkiv/publ-ini.mkiv
+++ b/tex/context/base/mkiv/publ-ini.mkiv
@@ -342,7 +342,7 @@
\newtoks\t_btx_cmd
\newbox \b_btx_cmd
-\t_btx_cmd{\global\setbox\b_btx_cmd\hpack{\clf_btxcmdstring}}
+\t_btx_cmd{\global\setbox\b_btx_cmd\hbox{\clf_btxcmdstring}} % no \hpack, otherwise prerolling --- doesn't work
\let\btxcmd\btxcommand
diff --git a/tex/context/base/mkiv/regi-ini.lua b/tex/context/base/mkiv/regi-ini.lua
index 2a3b2caaf..460d97d5e 100644
--- a/tex/context/base/mkiv/regi-ini.lua
+++ b/tex/context/base/mkiv/regi-ini.lua
@@ -6,11 +6,8 @@ if not modules then modules = { } end modules ['regi-ini'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>Regimes take care of converting the input characters into
-<l n='utf'/> sequences. The conversion tables are loaded at
-runtime.</p>
---ldx]]--
+-- Regimes take care of converting the input characters into UTF sequences. The
+-- conversion tables are loaded at runtime.
-- Todo: use regi-imp*.lua instead
@@ -30,9 +27,7 @@ local sequencers = utilities.sequencers
local textlineactions = resolvers.openers.helpers.textlineactions
local setmetatableindex = table.setmetatableindex
---[[ldx--
-<p>We will hook regime handling code into the input methods.</p>
---ldx]]--
+-- We will hook regime handling code into the input methods.
local trace_translating = false trackers.register("regimes.translating", function(v) trace_translating = v end)
diff --git a/tex/context/base/mkiv/sort-ini.lua b/tex/context/base/mkiv/sort-ini.lua
index 98f516c22..a375d7057 100644
--- a/tex/context/base/mkiv/sort-ini.lua
+++ b/tex/context/base/mkiv/sort-ini.lua
@@ -6,49 +6,45 @@ if not modules then modules = { } end modules ['sort-ini'] = {
license = "see context related readme files"
}
--- It took a while to get there, but with Fleetwood Mac's "Don't Stop"
--- playing in the background we sort of got it done.
-
---[[<p>The code here evolved from the rather old mkii approach. There
-we concatinate the key and (raw) entry into a new string. Numbers and
-special characters get some treatment so that they sort ok. In
-addition some normalization (lowercasing, accent stripping) takes
-place and again data is appended ror prepended. Eventually these
-strings are sorted using a regular string sorter. The relative order
-of character is dealt with by weighting them. It took a while to
-figure this all out but eventually it worked ok for most languages,
-given that the right datatables were provided.</p>
-
-<p>Here we do follow a similar approach but this time we don't append
-the manipulated keys and entries but create tables for each of them
-with entries being tables themselves having different properties. In
-these tables characters are represented by numbers and sorting takes
-place using these numbers. Strings are simplified using lowercasing
-as well as shape codes. Numbers are filtered and after getting an offset
-they end up at the right end of the spectrum (more clever parser will
-be added some day). There are definitely more solutions to the problem
-and it is a nice puzzle to solve.</p>
-
-<p>In the future more methods can be added, as there is practically no
-limit to what goes into the tables. For that we will provide hooks.</p>
-
-<p>Todo: decomposition with specific order of accents, this is
-relatively easy to do.</p>
-
-<p>Todo: investigate what standards and conventions there are and see
-how they map onto this mechanism. I've learned that users can come up
-with any demand so nothing here is frozen.</p>
-
-<p>Todo: I ran into the Unicode Collation document and noticed that
-there are some similarities (like the weights) but using that method
-would still demand extra code for language specifics. One option is
-to use the allkeys.txt file for the uc vectors but then we would also
-use the collapsed key (sq, code is now commented). In fact, we could
-just hook those into the replacer code that we reun beforehand.</p>
-
-<p>In the future index entries will become more clever, i.e. they will
-have language etc properties that then can be used.</p>
-]]--
+-- It took a while to get there, but with Fleetwood Mac's "Don't Stop" playing in
+-- the background we sort of got it done.
+--
+-- The code here evolved from the rather old mkii approach. There we concatinate the
+-- key and (raw) entry into a new string. Numbers and special characters get some
+-- treatment so that they sort ok. In addition some normalization (lowercasing,
+-- accent stripping) takes place and again data is appended ror prepended.
+-- Eventually these strings are sorted using a regular string sorter. The relative
+-- order of character is dealt with by weighting them. It took a while to figure
+-- this all out but eventually it worked ok for most languages, given that the right
+-- datatables were provided.
+--
+-- Here we do follow a similar approach but this time we don't append the
+-- manipulated keys and entries but create tables for each of them with entries
+-- being tables themselves having different properties. In these tables characters
+-- are represented by numbers and sorting takes place using these numbers. Strings
+-- are simplified using lowercasing as well as shape codes. Numbers are filtered and
+-- after getting an offset they end up at the right end of the spectrum (more clever
+-- parser will be added some day). There are definitely more solutions to the
+-- problem and it is a nice puzzle to solve.
+--
+-- In the future more methods can be added, as there is practically no limit to what
+-- goes into the tables. For that we will provide hooks.
+--
+-- Todo: decomposition with specific order of accents, this is relatively easy to
+-- do.
+--
+-- Todo: investigate what standards and conventions there are and see how they map
+-- onto this mechanism. I've learned that users can come up with any demand so
+-- nothing here is frozen.
+--
+-- Todo: I ran into the Unicode Collation document and noticed that there are some
+-- similarities (like the weights) but using that method would still demand extra
+-- code for language specifics. One option is to use the allkeys.txt file for the uc
+-- vectors but then we would also use the collapsed key (sq, code is now commented).
+-- In fact, we could just hook those into the replacer code that we reun beforehand.
+--
+-- In the future index entries will become more clever, i.e. they will have language
+-- etc properties that then can be used.
local gsub, find, rep, sub, sort, concat, tohash, format = string.gsub, string.find, string.rep, string.sub, table.sort, table.concat, table.tohash, string.format
local utfbyte, utfchar, utfcharacters = utf.byte, utf.char, utf.characters
diff --git a/tex/context/base/mkiv/status-files.pdf b/tex/context/base/mkiv/status-files.pdf
index de994239b..476b1642f 100644
--- a/tex/context/base/mkiv/status-files.pdf
+++ b/tex/context/base/mkiv/status-files.pdf
Binary files differ
diff --git a/tex/context/base/mkiv/status-lua.pdf b/tex/context/base/mkiv/status-lua.pdf
index e6773acf4..734e7705c 100644
--- a/tex/context/base/mkiv/status-lua.pdf
+++ b/tex/context/base/mkiv/status-lua.pdf
Binary files differ
diff --git a/tex/context/base/mkiv/syst-con.lua b/tex/context/base/mkiv/syst-con.lua
index 6a11fa8d3..f0ea8546a 100644
--- a/tex/context/base/mkiv/syst-con.lua
+++ b/tex/context/base/mkiv/syst-con.lua
@@ -20,10 +20,9 @@ local implement = interfaces.implement
local formatters = string.formatters
---[[ldx--
-<p>For raw 8 bit characters, the offset is 0x110000 (bottom of plane 18) at
-the top of <l n='luatex'/>'s char range but outside the unicode range.</p>
---ldx]]--
+-- For raw 8 bit characters, the offset is 0x110000 (bottom of plane 18) at the top
+-- of LuaTeX's char range but outside the unicode range. This is no longer the case
+-- in LuaMetaTeX.
function converters.hexstringtonumber(n) tonumber(n,16) end
function converters.octstringtonumber(n) tonumber(n, 8) end
diff --git a/tex/context/base/mkiv/syst-ini.mkiv b/tex/context/base/mkiv/syst-ini.mkiv
index ae1978eb6..5f226958b 100644
--- a/tex/context/base/mkiv/syst-ini.mkiv
+++ b/tex/context/base/mkiv/syst-ini.mkiv
@@ -253,6 +253,9 @@
\let\newfam\newfamily
+\let\newinteger \newcount % just in case
+\let\newdimension\newdimen % just in case
+
\firstvalidlanguage\plusone
% Watch out, for the moment we disable the check for already being defined
diff --git a/tex/context/base/mkiv/tabl-tbl.mkiv b/tex/context/base/mkiv/tabl-tbl.mkiv
index 2ed104adf..8b6afb956 100644
--- a/tex/context/base/mkiv/tabl-tbl.mkiv
+++ b/tex/context/base/mkiv/tabl-tbl.mkiv
@@ -1551,7 +1551,8 @@
\fi}
\def\tabl_tabulate_vrule_reset_indeed
- {\dofastloopcs\c_tabl_tabulate_max_vrulecolumn\tabl_tabulate_vrule_reset_step
+ {\gletcsname\??tabulatevrule0\endcsname\undefined
+ \dofastloopcs\c_tabl_tabulate_max_vrulecolumn\tabl_tabulate_vrule_reset_step
\global\c_tabl_tabulate_max_vrulecolumn\zerocount}
\def\tabl_tabulate_vrule_reset_step % undefined or relax
diff --git a/tex/context/base/mkiv/trac-lmx.lua b/tex/context/base/mkiv/trac-lmx.lua
index a531a76d6..56522e1e7 100644
--- a/tex/context/base/mkiv/trac-lmx.lua
+++ b/tex/context/base/mkiv/trac-lmx.lua
@@ -6,7 +6,8 @@ if not modules then modules = { } end modules ['trac-lmx'] = {
license = "see context related readme files"
}
--- this one will be adpated to the latest helpers
+-- This one will be adpated to the latest helpers. It might even become a
+-- module instead.
local type, tostring, rawget, loadstring, pcall = type, tostring, rawget, loadstring, pcall
local format, sub, gsub = string.format, string.sub, string.gsub
diff --git a/tex/context/base/mkiv/util-dim.lua b/tex/context/base/mkiv/util-dim.lua
index bb9eca966..6462f3e49 100644
--- a/tex/context/base/mkiv/util-dim.lua
+++ b/tex/context/base/mkiv/util-dim.lua
@@ -6,14 +6,10 @@ if not modules then modules = { } end modules ['util-dim'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>Internally <l n='luatex'/> work with scaled point, which are
-represented by integers. However, in practice, at east at the
-<l n='tex'/> end we work with more generic units like points (pt). Going
-from scaled points (numbers) to one of those units can be
-done by using the conversion factors collected in the following
-table.</p>
---ldx]]--
+-- Internally LuaTeX work with scaled point, which are represented by integers.
+-- However, in practice, at east at the TeX end we work with more generic units like
+-- points (pt). Going from scaled points (numbers) to one of those units can be done
+-- by using the conversion factors collected in the following table.
local format, match, gsub, type, setmetatable = string.format, string.match, string.gsub, type, setmetatable
local P, S, R, Cc, C, lpegmatch = lpeg.P, lpeg.S, lpeg.R, lpeg.Cc, lpeg.C, lpeg.match
@@ -45,7 +41,9 @@ local dimenfactors = allocate {
["dd"] = ( 1157/ 1238)/65536,
["cc"] = ( 1157/14856)/65536,
-- ["nd"] = (20320/21681)/65536,
- -- ["nc"] = ( 5080/65043)/65536
+ -- ["nc"] = ( 5080/65043)/65536,
+ ["es"] = ( 9176/ 129)/65536,
+ ["ts"] = ( 4588/ 645)/65536,
}
-- print(table.serialize(dimenfactors))
@@ -86,10 +84,8 @@ local dimenfactors = allocate {
-- ["sp"]=1,
-- }
---[[ldx--
-<p>A conversion function that takes a number, unit (string) and optional
-format (string) is implemented using this table.</p>
---ldx]]--
+-- A conversion function that takes a number, unit (string) and optional format
+-- (string) is implemented using this table.
local f_none = formatters["%s%s"]
local f_true = formatters["%0.5F%s"]
@@ -110,9 +106,7 @@ local function numbertodimen(n,unit,fmt) -- will be redefined later !
end
end
---[[ldx--
-<p>We collect a bunch of converters in the <type>number</type> namespace.</p>
---ldx]]--
+-- We collect a bunch of converters in the 'number' namespace.
number.maxdimen = 1073741823
number.todimen = numbertodimen
@@ -122,7 +116,7 @@ function number.topoints (n,fmt) return numbertodimen(n,"pt",fmt) end
function number.toinches (n,fmt) return numbertodimen(n,"in",fmt) end
function number.tocentimeters (n,fmt) return numbertodimen(n,"cm",fmt) end
function number.tomillimeters (n,fmt) return numbertodimen(n,"mm",fmt) end
-function number.toscaledpoints(n,fmt) return numbertodimen(n,"sp",fmt) end
+-------- number.toscaledpoints(n,fmt) return numbertodimen(n,"sp",fmt) end
function number.toscaledpoints(n) return n .. "sp" end
function number.tobasepoints (n,fmt) return numbertodimen(n,"bp",fmt) end
function number.topicas (n,fmt) return numbertodimen(n "pc",fmt) end
@@ -130,14 +124,13 @@ function number.todidots (n,fmt) return numbertodimen(n,"dd",fmt) end
function number.tociceros (n,fmt) return numbertodimen(n,"cc",fmt) end
-------- number.tonewdidots (n,fmt) return numbertodimen(n,"nd",fmt) end
-------- number.tonewciceros (n,fmt) return numbertodimen(n,"nc",fmt) end
+function number.toediths (n,fmt) return numbertodimen(n,"es",fmt) end
+function number.totoves (n,fmt) return numbertodimen(n,"ts",fmt) end
---[[ldx--
-<p>More interesting it to implement a (sort of) dimen datatype, one
-that permits calculations too. First we define a function that
-converts a string to scaledpoints. We use <l n='lpeg'/>. We capture
-a number and optionally a unit. When no unit is given a constant
-capture takes place.</p>
---ldx]]--
+-- More interesting it to implement a (sort of) dimen datatype, one that permits
+-- calculations too. First we define a function that converts a string to
+-- scaledpoints. We use LPEG. We capture a number and optionally a unit. When no
+-- unit is given a constant capture takes place.
local amount = (S("+-")^0 * R("09")^0 * P(".")^0 * R("09")^0) + Cc("0")
local unit = R("az")^1 + P("%")
@@ -152,21 +145,16 @@ function number.splitdimen(str)
return lpegmatch(splitter,str)
end
---[[ldx--
-<p>We use a metatable to intercept errors. When no key is found in
-the table with factors, the metatable will be consulted for an
-alternative index function.</p>
---ldx]]--
+-- We use a metatable to intercept errors. When no key is found in the table with
+-- factors, the metatable will be consulted for an alternative index function.
setmetatableindex(dimenfactors, function(t,s)
-- error("wrong dimension: " .. (s or "?")) -- better a message
return false
end)
---[[ldx--
-<p>We redefine the following function later on, so we comment it
-here (which saves us bytecodes.</p>
---ldx]]--
+-- We redefine the following function later on, so we comment it here (which saves
+-- us bytecodes.
-- function string.todimen(str)
-- if type(str) == "number" then
@@ -182,44 +170,38 @@ here (which saves us bytecodes.</p>
local stringtodimen -- assigned later (commenting saves bytecode)
local amount = S("+-")^0 * R("09")^0 * S(".,")^0 * R("09")^0
-local unit = P("pt") + P("cm") + P("mm") + P("sp") + P("bp") + P("in") +
- P("pc") + P("dd") + P("cc") + P("nd") + P("nc")
+local unit = P("pt") + P("cm") + P("mm") + P("sp") + P("bp")
+ + P("es") + P("ts") + P("pc") + P("dd") + P("cc")
+ + P("in")
+ -- + P("nd") + P("nc")
local validdimen = amount * unit
lpeg.patterns.validdimen = validdimen
---[[ldx--
-<p>This converter accepts calls like:</p>
-
-<typing>
-string.todimen("10")
-string.todimen(".10")
-string.todimen("10.0")
-string.todimen("10.0pt")
-string.todimen("10pt")
-string.todimen("10.0pt")
-</typing>
-
-<p>With this in place, we can now implement a proper datatype for dimensions, one
-that permits us to do this:</p>
-
-<typing>
-s = dimen "10pt" + dimen "20pt" + dimen "200pt"
- - dimen "100sp" / 10 + "20pt" + "0pt"
-</typing>
-
-<p>We create a local metatable for this new type:</p>
---ldx]]--
+-- This converter accepts calls like:
+--
+-- string.todimen("10")
+-- string.todimen(".10")
+-- string.todimen("10.0")
+-- string.todimen("10.0pt")
+-- string.todimen("10pt")
+-- string.todimen("10.0pt")
+--
+-- With this in place, we can now implement a proper datatype for dimensions, one
+-- that permits us to do this:
+--
+-- s = dimen "10pt" + dimen "20pt" + dimen "200pt"
+-- - dimen "100sp" / 10 + "20pt" + "0pt"
+--
+-- We create a local metatable for this new type:
local dimensions = { }
---[[ldx--
-<p>The main (and globally) visible representation of a dimen is defined next: it is
-a one-element table. The unit that is returned from the match is normally a number
-(one of the previously defined factors) but we also accept functions. Later we will
-see why. This function is redefined later.</p>
---ldx]]--
+-- The main (and globally) visible representation of a dimen is defined next: it is
+-- a one-element table. The unit that is returned from the match is normally a
+-- number (one of the previously defined factors) but we also accept functions.
+-- Later we will see why. This function is redefined later.
-- function dimen(a)
-- if a then
@@ -241,11 +223,9 @@ see why. This function is redefined later.</p>
-- end
-- end
---[[ldx--
-<p>This function return a small hash with a metatable attached. It is
-through this metatable that we can do the calculations. We could have
-shared some of the code but for reasons of speed we don't.</p>
---ldx]]--
+-- This function return a small hash with a metatable attached. It is through this
+-- metatable that we can do the calculations. We could have shared some of the code
+-- but for reasons of speed we don't.
function dimensions.__add(a, b)
local ta, tb = type(a), type(b)
@@ -281,20 +261,16 @@ function dimensions.__unm(a)
return setmetatable({ - a }, dimensions)
end
---[[ldx--
-<p>It makes no sense to implement the power and modulo function but
-the next two do make sense because they permits is code like:</p>
-
-<typing>
-local a, b = dimen "10pt", dimen "11pt"
-...
-if a > b then
- ...
-end
-</typing>
---ldx]]--
-
--- makes no sense: dimensions.__pow and dimensions.__mod
+-- It makes no sense to implement the power and modulo function but
+-- the next two do make sense because they permits is code like:
+--
+-- local a, b = dimen "10pt", dimen "11pt"
+-- ...
+-- if a > b then
+-- ...
+-- end
+--
+-- This also makes no sense: dimensions.__pow and dimensions.__mod.
function dimensions.__lt(a, b)
return a[1] < b[1]
@@ -304,24 +280,17 @@ function dimensions.__eq(a, b)
return a[1] == b[1]
end
---[[ldx--
-<p>We also need to provide a function for conversion to string (so that
-we can print dimensions). We print them as points, just like <l n='tex'/>.</p>
---ldx]]--
+-- We also need to provide a function for conversion to string (so that we can print
+-- dimensions). We print them as points, just like TeX.
function dimensions.__tostring(a)
return a[1]/65536 .. "pt" -- instead of todimen(a[1])
end
---[[ldx--
-<p>Since it does not take much code, we also provide a way to access
-a few accessors</p>
-
-<typing>
-print(dimen().pt)
-print(dimen().sp)
-</typing>
---ldx]]--
+-- Since it does not take much code, we also provide a way to access a few accessors
+--
+-- print(dimen().pt)
+-- print(dimen().sp)
function dimensions.__index(tab,key)
local d = dimenfactors[key]
@@ -332,41 +301,34 @@ function dimensions.__index(tab,key)
return 1/d
end
---[[ldx--
-<p>In the converter from string to dimension we support functions as
-factors. This is because in <l n='tex'/> we have a few more units:
-<type>ex</type> and <type>em</type>. These are not constant factors but
-depend on the current font. They are not defined by default, but need
-an explicit function call. This is because at the moment that this code
-is loaded, the relevant tables that hold the functions needed may not
-yet be available.</p>
---ldx]]--
-
- dimenfactors["ex"] = 4 * 1/65536 -- 4pt
- dimenfactors["em"] = 10 * 1/65536 -- 10pt
--- dimenfactors["%"] = 4 * 1/65536 -- 400pt/100
-
---[[ldx--
-<p>The previous code is rather efficient (also thanks to <l n='lpeg'/>) but we
-can speed it up by caching converted dimensions. On my machine (2008) the following
-loop takes about 25.5 seconds.</p>
-
-<typing>
-for i=1,1000000 do
- local s = dimen "10pt" + dimen "20pt" + dimen "200pt"
- - dimen "100sp" / 10 + "20pt" + "0pt"
-end
-</typing>
-
-<p>When we cache converted strings this becomes 16.3 seconds. In order not
-to waste too much memory on it, we tag the values of the cache as being
-week which mean that the garbage collector will collect them in a next
-sweep. This means that in most cases the speed up is mostly affecting the
-current couple of calculations and as such the speed penalty is small.</p>
-
-<p>We redefine two previous defined functions that can benefit from
-this:</p>
---ldx]]--
+-- In the converter from string to dimension we support functions as factors. This
+-- is because in TeX we have a few more units: 'ex' and 'em'. These are not constant
+-- factors but depend on the current font. They are not defined by default, but need
+-- an explicit function call. This is because at the moment that this code is
+-- loaded, the relevant tables that hold the functions needed may not yet be
+-- available.
+
+ dimenfactors["ex"] = 4 /65536 -- 4pt
+ dimenfactors["em"] = 10 /65536 -- 10pt
+-- dimenfactors["%"] = 4 /65536 -- 400pt/100
+ dimenfactors["eu"] = (9176/129)/65536 -- 1es
+
+-- The previous code is rather efficient (also thanks to LPEG) but we can speed it
+-- up by caching converted dimensions. On my machine (2008) the following loop takes
+-- about 25.5 seconds.
+--
+-- for i=1,1000000 do
+-- local s = dimen "10pt" + dimen "20pt" + dimen "200pt"
+-- - dimen "100sp" / 10 + "20pt" + "0pt"
+-- end
+--
+-- When we cache converted strings this becomes 16.3 seconds. In order not to waste
+-- too much memory on it, we tag the values of the cache as being week which mean
+-- that the garbage collector will collect them in a next sweep. This means that in
+-- most cases the speed up is mostly affecting the current couple of calculations
+-- and as such the speed penalty is small.
+--
+-- We redefine two previous defined functions that can benefit from this:
local known = { } setmetatable(known, { __mode = "v" })
@@ -436,14 +398,10 @@ function number.toscaled(d)
return format("%0.5f",d/0x10000) -- 2^16
end
---[[ldx--
-<p>In a similar fashion we can define a glue datatype. In that case we
-probably use a hash instead of a one-element table.</p>
---ldx]]--
-
---[[ldx--
-<p>Goodie:s</p>
---ldx]]--
+-- In a similar fashion we can define a glue datatype. In that case we probably use
+-- a hash instead of a one-element table.
+--
+-- A goodie:
function number.percent(n,d) -- will be cleaned up once luatex 0.30 is out
d = d or texget("hsize")
diff --git a/tex/context/base/mkiv/util-fmt.lua b/tex/context/base/mkiv/util-fmt.lua
index fe80c6420..4da4ef985 100644
--- a/tex/context/base/mkiv/util-fmt.lua
+++ b/tex/context/base/mkiv/util-fmt.lua
@@ -11,7 +11,7 @@ utilities.formatters = utilities.formatters or { }
local formatters = utilities.formatters
local concat, format = table.concat, string.format
-local tostring, type = tostring, type
+local tostring, type, unpack = tostring, type, unpack
local strip = string.strip
local lpegmatch = lpeg.match
@@ -21,12 +21,15 @@ function formatters.stripzeros(str)
return lpegmatch(stripper,str)
end
-function formatters.formatcolumns(result,between)
+function formatters.formatcolumns(result,between,header)
if result and #result > 0 then
- between = between or " "
- local widths, numbers = { }, { }
- local first = result[1]
- local n = #first
+ local widths = { }
+ local numbers = { }
+ local templates = { }
+ local first = result[1]
+ local n = #first
+ between = between or " "
+ --
for i=1,n do
widths[i] = 0
end
@@ -35,13 +38,6 @@ function formatters.formatcolumns(result,between)
for j=1,n do
local rj = r[j]
local tj = type(rj)
--- if tj == "number" then
--- numbers[j] = true
--- end
--- if tj ~= "string" then
--- rj = tostring(rj)
--- r[j] = rj
--- end
if tj == "number" then
numbers[j] = true
rj = tostring(rj)
@@ -55,29 +51,59 @@ function formatters.formatcolumns(result,between)
end
end
end
+ if header then
+ for i=1,#header do
+ local h = header[i]
+ for j=1,n do
+ local hj = tostring(h[j])
+ h[j] = hj
+ local w = #hj
+ if w > widths[j] then
+ widths[j] = w
+ end
+ end
+ end
+ end
for i=1,n do
local w = widths[i]
if numbers[i] then
if w > 80 then
- widths[i] = "%s" .. between
- else
- widths[i] = "%0" .. w .. "i" .. between
+ templates[i] = "%s" .. between
+ else
+ templates[i] = "% " .. w .. "i" .. between
end
else
if w > 80 then
- widths[i] = "%s" .. between
- elseif w > 0 then
- widths[i] = "%-" .. w .. "s" .. between
+ templates[i] = "%s" .. between
+ elseif w > 0 then
+ templates[i] = "%-" .. w .. "s" .. between
else
- widths[i] = "%s"
+ templates[i] = "%s"
end
end
end
- local template = strip(concat(widths))
+ local template = strip(concat(templates))
for i=1,#result do
local str = format(template,unpack(result[i]))
result[i] = strip(str)
end
+ if header then
+ for i=1,n do
+ local w = widths[i]
+ if w > 80 then
+ templates[i] = "%s" .. between
+ elseif w > 0 then
+ templates[i] = "%-" .. w .. "s" .. between
+ else
+ templates[i] = "%s"
+ end
+ end
+ local template = strip(concat(templates))
+ for i=1,#header do
+ local str = format(template,unpack(header[i]))
+ header[i] = strip(str)
+ end
+ end
end
- return result
+ return result, header
end
diff --git a/tex/context/base/mkiv/util-seq.lua b/tex/context/base/mkiv/util-seq.lua
index 35839f230..49952dd98 100644
--- a/tex/context/base/mkiv/util-seq.lua
+++ b/tex/context/base/mkiv/util-seq.lua
@@ -6,15 +6,13 @@ if not modules then modules = { } end modules ['util-seq'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>Here we implement a mechanism for chaining the special functions
-that we use in <l n="context"> to deal with mode list processing. We
-assume that namespaces for the functions are used, but for speed we
-use locals to refer to them when compiling the chain.</p>
---ldx]]--
-
+-- Here we implement a mechanism for chaining the special functions that we use in
+-- ConteXt to deal with mode list processing. We assume that namespaces for the
+-- functions are used, but for speed we use locals to refer to them when compiling
+-- the chain.
+--
-- todo: delayed: i.e. we register them in the right order already but delay usage
-
+--
-- todo: protect groups (as in tasks)
local gsub, gmatch = string.gsub, string.gmatch
diff --git a/tex/context/base/mkxl/attr-ini.lmt b/tex/context/base/mkxl/attr-ini.lmt
index 8b2ec8911..32fc36cdd 100644
--- a/tex/context/base/mkxl/attr-ini.lmt
+++ b/tex/context/base/mkxl/attr-ini.lmt
@@ -10,10 +10,8 @@ local next, type = next, type
local osexit = os.exit
local sortedhash = table.sortedhash
---[[ldx--
-<p>We start with a registration system for atributes so that we can use the
-symbolic names later on.</p>
---ldx]]--
+-- We start with a registration system for atributes so that we can use the symbolic
+-- names later on.
local nodes = nodes
local context = context
@@ -71,17 +69,13 @@ trackers.register("attributes.values", function(v) trace_values = v end)
-- end
-- end
---[[ldx--
-<p>We reserve this one as we really want it to be always set (faster).</p>
---ldx]]--
+-- We reserve this one as we really want it to be always set (faster).
names[0], numbers["fontdynamic"] = "fontdynamic", 0
---[[ldx--
-<p>private attributes are used by the system and public ones are for users. We use dedicated
-ranges of numbers for them. Of course a the <l n='context'/> end a private attribute can be
-accessible too, so a private attribute can have a public appearance.</p>
---ldx]]--
+-- Private attributes are used by the system and public ones are for users. We use
+-- dedicated ranges of numbers for them. Of course a the TeX end a private attribute
+-- can be accessible too, so a private attribute can have a public appearance.
sharedstorage.attributes_last_private = sharedstorage.attributes_last_private or 15 -- very private
sharedstorage.attributes_last_public = sharedstorage.attributes_last_public or 1024 -- less private
diff --git a/tex/context/base/mkxl/char-tex.lmt b/tex/context/base/mkxl/char-tex.lmt
index 31023136d..0ac297d59 100644
--- a/tex/context/base/mkxl/char-tex.lmt
+++ b/tex/context/base/mkxl/char-tex.lmt
@@ -46,17 +46,14 @@ local trace_defining = false trackers.register("characters.defining", fu
local report_defining = logs.reporter("characters")
---[[ldx--
-<p>In order to deal with 8-bit output, we need to find a way to go from <l n='utf'/> to
-8-bit. This is handled in the <l n='luatex'/> engine itself.</p>
-
-<p>This leaves us problems with characters that are specific to <l n='tex'/> like
-<type>{}</type>, <type>$</type> and alike. We can remap some chars that tex input files
-are sensitive for to a private area (while writing to a utility file) and revert then
-to their original slot when we read in such a file. Instead of reverting, we can (when
-we resolve characters to glyphs) map them to their right glyph there. For this purpose
-we can use the private planes 0x0F0000 and 0x100000.</p>
---ldx]]--
+-- In order to deal with 8-bit output, we need to find a way to go from UTF to
+-- 8-bit. This is handled in the 32 bit engine itself. This leaves us problems with
+-- characters that are specific to TeX, like curly braces and dollars. We can remap
+-- some chars that tex input files are sensitive for to a private area (while
+-- writing to a utility file) and revert then to their original slot when we read in
+-- such a file. Instead of reverting, we can (when we resolve characters to glyphs)
+-- map them to their right glyph there. For this purpose we can use the private
+-- planes 0x0F0000 and 0x100000.
local low = allocate()
local high = allocate()
@@ -106,21 +103,6 @@ private.escape = utf.remapper(escapes) -- maybe: ,"dynamic"
private.replace = utf.remapper(low) -- maybe: ,"dynamic"
private.revert = utf.remapper(high) -- maybe: ,"dynamic"
---[[ldx--
-<p>We get a more efficient variant of this when we integrate
-replacements in collapser. This more or less renders the previous
-private code redundant. The following code is equivalent but the
-first snippet uses the relocated dollars.</p>
-
-<typing>
-[󰀤x󰀤] [$x$]
-</typing>
---ldx]]--
-
--- using the tree-lpeg-mapper would be nice but we also need to deal with end-of-string
--- cases: "\"\i" and don't want "\relax" to be seen as \r e lax" (for which we need to mess
--- with spaces
-
local accentmapping = allocate {
['"'] = { [""] = "¨",
A = "Ä", a = "ä",
@@ -288,12 +270,12 @@ local commandmapping = allocate {
texcharacters.commandmapping = commandmapping
-local ligaturemapping = allocate {
- ["''"] = "”",
- ["``"] = "“",
- ["--"] = "–",
- ["---"] = "—",
-}
+-- local ligaturemapping = allocate {
+-- ["''"] = "”",
+-- ["``"] = "“",
+-- ["--"] = "–",
+-- ["---"] = "—",
+-- }
-- Older accent handling code can be found in char-def.lua but in the meantime
-- we moved on. First the one with commands:
@@ -321,9 +303,9 @@ local function toutfpattern()
hash["{\\"..k.."}"] = v
hash["{\\"..k.." }"] = v
end
- for k, v in next, ligaturemapping do
- hash[k] = v
- end
+ -- for k, v in next, ligaturemapping do
+ -- hash[k] = v
+ -- end
untex = utfchartabletopattern(hash) / hash
end
return untex
@@ -376,9 +358,9 @@ local function toutfpattern()
for k, v in next, commandmapping do
hash[k] = v
end
- for k, v in next, ligaturemapping do
- hash[k] = v
- end
+ -- for k, v in next, ligaturemapping do
+ -- hash[k] = v
+ -- end
untex = utfchartabletopattern(hash) / hash
end
return untex
@@ -580,10 +562,8 @@ implement { -- a waste of scanner but consistent
actions = texcharacters.defineaccents
}
---[[ldx--
-<p>Instead of using a <l n='tex'/> file to define the named glyphs, we
-use the table. After all, we have this information available anyway.</p>
---ldx]]--
+-- Instead of using a TeX file to define the named glyphs, we use the table. After
+-- all, we have this information available anyway.
local function to_number(s)
local n = tonumber(s)
@@ -878,10 +858,6 @@ function characters.setactivecatcodes(cct)
tex.catcodetable = saved
end
---[[ldx--
-<p>Setting the lccodes is also done in a loop over the data table.</p>
---ldx]]--
-
implement {
name = "chardescription",
arguments = "integer",
diff --git a/tex/context/base/mkxl/cont-new.mkxl b/tex/context/base/mkxl/cont-new.mkxl
index 9a6fc93da..53ccef0b6 100644
--- a/tex/context/base/mkxl/cont-new.mkxl
+++ b/tex/context/base/mkxl/cont-new.mkxl
@@ -13,7 +13,7 @@
% \normalend % uncomment this to get the real base runtime
-\newcontextversion{2023.03.20 15:42}
+\newcontextversion{2023.04.01 09:28}
%D This file is loaded at runtime, thereby providing an excellent place for hacks,
%D patches, extensions and new features. There can be local overloads in cont-loc
diff --git a/tex/context/base/mkxl/context.mkxl b/tex/context/base/mkxl/context.mkxl
index 1a07772eb..6f4b7d052 100644
--- a/tex/context/base/mkxl/context.mkxl
+++ b/tex/context/base/mkxl/context.mkxl
@@ -29,7 +29,7 @@
%D {YYYY.MM.DD HH:MM} format.
\immutable\edef\contextformat {\jobname}
-\immutable\edef\contextversion{2023.03.20 15:42}
+\immutable\edef\contextversion{2023.04.01 09:28}
%overloadmode 1 % check frozen / warning
%overloadmode 2 % check frozen / error
@@ -215,8 +215,9 @@
\loadmkxlfile{unic-ini}
-\loadmkxlfile{core-two}
+%loadmkxlfile{core-two} % retired, not in testsuite, not on garden, not in styles
\loadmkxlfile{core-dat}
+\loadmkxlfile{core-pag}
\loadmkxlfile{colo-ini}
\loadmkxlfile{colo-nod}
@@ -647,26 +648,26 @@
% we will definitely freeze mkiv and then use lmt files for futher development
% of lmtx. We also no longer use the macro feature to replace 5.3 compatible
% function calls by native 5.4 features as lmt files assume 5.4 anyway. This
-% makes format generation a little faster (not that it's that slow). It might \
+% makes format generation a little faster (not that it's that slow). It might
% take a while before we dealt with all of them because I'll also clean them
-% up a bit when doing.
+% up a bit when doing. Some will probably always be shared, like char-def.lua.
%
% % luat-bas.mkxl l-macro-imp-optimize % this is no longer used
-% c:/data/develop/context/sources/buff-imp-default.lua
-% c:/data/develop/context/sources/buff-imp-escaped.lua
-% c:/data/develop/context/sources/buff-imp-lua.lua
-% c:/data/develop/context/sources/buff-imp-mp.lua
-% c:/data/develop/context/sources/buff-imp-nested.lua
-% c:/data/develop/context/sources/buff-imp-parsed-xml.lua
-% c:/data/develop/context/sources/buff-imp-tex.lua
-% c:/data/develop/context/sources/buff-imp-xml.lua
-
% c:/data/develop/context/sources/buff-par.lua
% c:/data/develop/context/sources/buff-ver.lua
+%
+% c:/data/develop/context/sources/buff-imp-default.lua % shared
+% c:/data/develop/context/sources/buff-imp-escaped.lua % shared
+% c:/data/develop/context/sources/buff-imp-lua.lua % shared
+% c:/data/develop/context/sources/buff-imp-mp.lua % shared
+% c:/data/develop/context/sources/buff-imp-nested.lua % shared
+% c:/data/develop/context/sources/buff-imp-parsed-xml.lua % shared
+% c:/data/develop/context/sources/buff-imp-tex.lua % shared
+% c:/data/develop/context/sources/buff-imp-xml.lua % shared
% c:/data/develop/context/sources/char-cjk.lua
-% c:/data/develop/context/sources/char-def.lua
+% c:/data/develop/context/sources/char-def.lua % shared data file, a real big one
% c:/data/develop/context/sources/char-enc.lua
% c:/data/develop/context/sources/char-ent.lua
% c:/data/develop/context/sources/char-fio.lua
@@ -680,7 +681,7 @@
% c:/data/develop/context/sources/cldf-com.lua
% c:/data/develop/context/sources/cldf-ini.lua
-% c:/data/develop/context/sources/cldf-prs.lua % use in chemistry
+% c:/data/develop/context/sources/cldf-prs.lua % used in chemistry
% c:/data/develop/context/sources/cldf-scn.lua
% c:/data/develop/context/sources/cldf-stp.lua
% c:/data/develop/context/sources/cldf-ver.lua
@@ -690,8 +691,6 @@
% c:/data/develop/context/sources/core-con.lua
% c:/data/develop/context/sources/core-ctx.lua
-% c:/data/develop/context/sources/core-dat.lua
-% c:/data/develop/context/sources/core-two.lua
% data...
@@ -700,7 +699,7 @@
% c:/data/develop/context/sources/file-res.lua
% c:/data/develop/context/sources/font-afk.lua
-% c:/data/develop/context/sources/font-agl.lua
+% c:/data/develop/context/sources/font-agl.lua % shared data file
% c:/data/develop/context/sources/font-aux.lua
% c:/data/develop/context/sources/font-cid.lua
% c:/data/develop/context/sources/font-enc.lua
@@ -724,16 +723,16 @@
% c:/data/develop/context/sources/font-trt.lua
% c:/data/develop/context/sources/font-web.lua % proof of concept, never used
-% c:/data/develop/context/sources/font-imp-combining.lua % shared, like typescript
-% c:/data/develop/context/sources/font-imp-dimensions.lua % idem
-% c:/data/develop/context/sources/font-imp-italics.lua % idem
-% c:/data/develop/context/sources/font-imp-notused.lua % idem
-% c:/data/develop/context/sources/font-imp-properties.lua % idem
-% c:/data/develop/context/sources/font-imp-reorder.lua % idem
-% c:/data/develop/context/sources/font-imp-spacekerns.lua % idem
-% c:/data/develop/context/sources/font-imp-tex.lua % idem
-% c:/data/develop/context/sources/font-imp-tweaks.lua % idem
-% c:/data/develop/context/sources/font-imp-unicode.lua % idem
+% c:/data/develop/context/sources/font-imp-combining.lua % shared
+% c:/data/develop/context/sources/font-imp-dimensions.lua % shared
+% c:/data/develop/context/sources/font-imp-italics.lua % shared
+% c:/data/develop/context/sources/font-imp-notused.lua % shared
+% c:/data/develop/context/sources/font-imp-properties.lua % shared
+% c:/data/develop/context/sources/font-imp-reorder.lua % shared
+% c:/data/develop/context/sources/font-imp-spacekerns.lua % shared
+% c:/data/develop/context/sources/font-imp-tex.lua % shared
+% c:/data/develop/context/sources/font-imp-tweaks.lua % shared
+% c:/data/develop/context/sources/font-imp-unicode.lua % shared
% c:/data/develop/context/sources/good-ctx.lua
% c:/data/develop/context/sources/good-ini.lua
@@ -749,26 +748,26 @@
% c:/data/develop/context/sources/java-ini.lua
-% c:/data/develop/context/sources/lang-cnt.lua
-% c:/data/develop/context/sources/lang-def.lua % these are data files
-% c:/data/develop/context/sources/lang-txt.lua % these are data files
+% c:/data/develop/context/sources/lang-cnt.lua % shared data file
+% c:/data/develop/context/sources/lang-def.lua % shared data file
+% c:/data/develop/context/sources/lang-txt.lua % shared data file
% c:/data/develop/context/sources/lang-wrd.lua
% c:/data/develop/context/sources/luat-exe.lua
% c:/data/develop/context/sources/luat-iop.lua
% c:/data/develop/context/sources/luat-mac.lua % will become lmt
-% c:/data/develop/context/sources/lxml-aux.lua
-% c:/data/develop/context/sources/lxml-css.lua
-% c:/data/develop/context/sources/lxml-dir.lua
-% c:/data/develop/context/sources/lxml-ent.lua
-% c:/data/develop/context/sources/lxml-ini.lua
-% c:/data/develop/context/sources/lxml-lpt.lua
-% c:/data/develop/context/sources/lxml-mis.lua
-% c:/data/develop/context/sources/lxml-sor.lua
-% c:/data/develop/context/sources/lxml-tab.lua
-% c:/data/develop/context/sources/lxml-tex.lua
-% c:/data/develop/context/sources/lxml-xml.lua
+% c:/data/develop/context/sources/lxml-aux.lua % the xml interfcace is rather stable
+% c:/data/develop/context/sources/lxml-css.lua % and is also provided/used in lua so
+% c:/data/develop/context/sources/lxml-dir.lua % might as well share these because they
+% c:/data/develop/context/sources/lxml-ent.lua % are unlikely to change
+% c:/data/develop/context/sources/lxml-ini.lua %
+% c:/data/develop/context/sources/lxml-lpt.lua %
+% c:/data/develop/context/sources/lxml-mis.lua %
+% c:/data/develop/context/sources/lxml-sor.lua %
+% c:/data/develop/context/sources/lxml-tab.lua %
+% c:/data/develop/context/sources/lxml-tex.lua %
+% c:/data/develop/context/sources/lxml-xml.lua %
% c:/data/develop/context/sources/meta-blb.lua
% c:/data/develop/context/sources/meta-fun.lua
@@ -788,16 +787,16 @@
% c:/data/develop/context/sources/page-pst.lua
% c:/data/develop/context/sources/publ-aut.lua % shared
-% c:/data/develop/context/sources/publ-dat.lua
-% c:/data/develop/context/sources/publ-fnd.lua
-% c:/data/develop/context/sources/publ-inc.lua
-% c:/data/develop/context/sources/publ-ini.lua
-% c:/data/develop/context/sources/publ-jrn.lua
-% c:/data/develop/context/sources/publ-oth.lua
-% c:/data/develop/context/sources/publ-reg.lua
-% c:/data/develop/context/sources/publ-sor.lua
-% c:/data/develop/context/sources/publ-tra.lua
-% c:/data/develop/context/sources/publ-usr.lua
+% c:/data/develop/context/sources/publ-dat.lua % shared
+% c:/data/develop/context/sources/publ-fnd.lua % shared
+% c:/data/develop/context/sources/publ-inc.lua % shared
+% c:/data/develop/context/sources/publ-ini.lua % shared
+% c:/data/develop/context/sources/publ-jrn.lua % shared
+% c:/data/develop/context/sources/publ-oth.lua % shared
+% c:/data/develop/context/sources/publ-reg.lua % shared
+% c:/data/develop/context/sources/publ-sor.lua % shared
+% c:/data/develop/context/sources/publ-tra.lua % shared
+% c:/data/develop/context/sources/publ-usr.lua % shared
% c:/data/develop/context/sources/scrn-but.lua
% c:/data/develop/context/sources/scrn-fld.lua
@@ -828,6 +827,3 @@
% c:/data/develop/context/sources/trac-lmx.lua
% c:/data/develop/context/sources/trac-par.lua
% c:/data/develop/context/sources/trac-tex.lua
-
-% c:/data/develop/context/sources/typo-cln.lua -- wrong name for what it does
-% c:/data/develop/context/sources/typo-dha.lua
diff --git a/tex/context/base/mkxl/core-dat.lmt b/tex/context/base/mkxl/core-dat.lmt
new file mode 100644
index 000000000..fd8aa0fb6
--- /dev/null
+++ b/tex/context/base/mkxl/core-dat.lmt
@@ -0,0 +1,225 @@
+if not modules then modules = { } end modules ['core-dat'] = {
+ version = 1.001,
+ comment = "companion to core-dat.mkiv",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files"
+}
+
+-- This module provides a (multipass) container for arbitrary data. It replaces the
+-- twopass data mechanism.
+
+local tonumber, tostring, type = tonumber, tostring, type
+
+local context = context
+
+local trace_datasets = false trackers.register("job.datasets" , function(v) trace_datasets = v end)
+
+local report_dataset = logs.reporter("dataset")
+
+local allocate = utilities.storage.allocate
+local settings_to_hash = utilities.parsers.settings_to_hash
+
+local texgetcount = tex.getcount
+local texsetcount = tex.setcount
+
+local v_yes = interfaces.variables.yes
+
+local new_latelua = nodes.pool.latelua
+
+local implement = interfaces.implement
+
+local c_realpageno = tex.iscount("realpageno")
+
+local collected = allocate()
+local tobesaved = allocate()
+
+local datasets = {
+ collected = collected,
+ tobesaved = tobesaved,
+}
+
+job.datasets = datasets
+
+local function initializer()
+ collected = datasets.collected
+ tobesaved = datasets.tobesaved
+end
+
+job.register('job.datasets.collected', tobesaved, initializer, nil)
+
+local sets = { }
+
+table.setmetatableindex(tobesaved, function(t,k)
+ local v = { }
+ t[k] = v
+ return v
+end)
+
+table.setmetatableindex(sets, function(t,k)
+ local v = {
+ index = 0,
+ order = 0,
+ }
+ t[k] = v
+ return v
+end)
+
+local function setdata(settings)
+ local name = settings.name
+ local tag = settings.tag
+ local data = settings.data
+ local list = tobesaved[name]
+ if settings.convert and type(data) == "string" then
+ data = settings_to_hash(data)
+ end
+ if type(data) ~= "table" then
+ data = { data = data }
+ end
+ if not tag then
+ tag = #list + 1
+ else
+ tag = tonumber(tag) or tag -- autonumber saves keys
+ end
+ list[tag] = data
+ if settings.delay == v_yes then
+ local set = sets[name]
+ local index = set.index + 1
+ set.index = index
+ data.index = index
+ data.order = index
+ data.realpage = texgetcount(c_realpageno)
+ if trace_datasets then
+ report_dataset("action %a, name %a, tag %a, index %a","assign delayed",name,tag,index)
+ end
+ elseif trace_datasets then
+ report_dataset("action %a, name %a, tag %a","assign immediate",name,tag)
+ end
+ return name, tag, data
+end
+
+datasets.setdata = setdata
+
+function datasets.extend(name,tag)
+ if type(name) == "table" then
+ name, tag = name.name, name.tag
+ end
+ local set = sets[name]
+ local order = set.order + 1
+ local realpage = texgetcount(c_realpageno)
+ set.order = order
+ local t = tobesaved[name][tag]
+ t.realpage = realpage
+ t.order = order
+ if trace_datasets then
+ report_dataset("action %a, name %a, tag %a, page %a, index %a","flush by order",name,tag,t.index or 0,order,realpage)
+ end
+end
+
+function datasets.getdata(name,tag,key,default)
+ local t = collected[name]
+ if t == nil then
+ if trace_datasets then
+ report_dataset("error: unknown dataset, name %a",name)
+ end
+ elseif type(t) ~= "table" then
+ return t
+ else
+ t = t[tag] or t[tonumber(tag)]
+ if not t then
+ if trace_datasets then
+ report_dataset("error: unknown dataset, name %a, tag %a",name,tag)
+ end
+ elseif key then
+ return t[key] or default
+ else
+ return t
+ end
+ end
+ return default
+end
+
+local function setdataset(settings)
+ settings.convert = true
+ local name, tag = setdata(settings)
+ if settings.delay ~= v_yes then
+ --
+ else
+ context(new_latelua { action = job.datasets.extend, name = name, tag = tag })
+ end
+end
+
+local cache = table.setmetatableindex(function(t,k)
+ local v = table.load(k..".tuc")
+ if v then
+ v = v.job
+ if v then
+ v = v.datasets
+ if v then
+ v = v.collected
+ end
+ end
+ end
+ if not v then
+ v = { }
+ if trace_datasets then
+ report_dataset("error: unknown dataset job %a",k)
+ end
+ end
+ t[k] = v
+ return v
+end)
+
+local function datasetvariable(name,tag,key,cache)
+ local t = (cache or collected)[name]
+ if t == nil then
+ if trace_datasets then
+ report_dataset("error: unknown dataset, name %a, tag %a, not passed to tex",name) -- no tag
+ end
+ elseif type(t) ~= "table" then
+ context(tostring(t))
+ else
+ t = t and (t[tag] or t[tonumber(tag)])
+ if not t then
+ if trace_datasets then
+ report_dataset("error: unknown dataset, name %a, tag %a, not passed to tex",name,tag)
+ end
+ elseif type(t) == "table" then
+ local s = t[key]
+ if type(s) ~= "table" then
+ context(tostring(s))
+ elseif trace_datasets then
+ report_dataset("error: unknown dataset, name %a, tag %a, not passed to tex",name,tag)
+ end
+ end
+ end
+end
+
+local function datasetvariablefromjob(jobnname,name,tag,key)
+ datasetvariable(name,tag,key,cache[jobnname])
+end
+
+implement {
+ name = "setdataset",
+ actions = setdataset,
+ arguments = {
+ {
+ { "name" },
+ { "tag" },
+ { "delay" },
+ { "data" },
+ }
+ }
+}
+
+implement {
+ name = "datasetvariable",
+ actions = datasetvariable,
+ arguments = "3 strings",
+}
+
+implement {
+ name = "datasetvariablefromjob",
+ arguments = { "string", "string", "string", "string" },
+ actions = datasetvariablefromjob
+}
diff --git a/tex/context/base/mkxl/core-dat.mkxl b/tex/context/base/mkxl/core-dat.mkxl
index ab40d874c..6d7d1bd14 100644
--- a/tex/context/base/mkxl/core-dat.mkxl
+++ b/tex/context/base/mkxl/core-dat.mkxl
@@ -1,6 +1,6 @@
%D \module
%D [ file=core-dat,
-%D version=20122.04.17, % replaces core-two from 1997.03.31,
+%D version=2021.04.17, % replaces core-two from 1997.03.31,
%D title=\CONTEXT\ Core Macros,
%D subtitle=Multipass Datasets,
%D author=Hans Hagen,
@@ -42,7 +42,7 @@
\unprotect
-\registerctxluafile{core-dat}{}
+\registerctxluafile{core-dat}{autosuffix}
\installcorenamespace{dataset}
@@ -78,50 +78,4 @@
\expandafter\clf_datasetvariable
\fi}
-\installcorenamespace{pagestate}
-\installcorenamespace{pagestatecounter}
-
-\installcommandhandler \??pagestate {pagestate} \??pagestate
-
-\def\syst_pagestates_allocate
- {\expandafter\newinteger\csname\??pagestatecounter\currentpagestate\endcsname}
-
-\appendtoks
- \syst_pagestates_allocate
-\to \everydefinepagestate
-
-\setuppagestate
- [\c!delay=\v!yes]
-
-\permanent\tolerant\protected\def\setpagestate[#1]#*[#2]%
- {\begingroup
- \edef\currentpagestate{#1}%
- \ifcsname\??pagestatecounter\currentpagestate\endcsname
- \scratchcounter\lastnamedcs
- \advanceby\scratchcounter\plusone
- \else
- \scratchcounter\plusone
- \syst_pagestates_allocate
- \fi
- \global\csname\??pagestatecounter\currentpagestate\endcsname\scratchcounter
- \clf_setpagestate
- name {\currentpagestate}%
- tag {\ifparameter#2\or#2\else\number\scratchcounter\fi}%
- delay {\pagestateparameter\c!delay}%
- \relax
- \endgroup}
-
-\permanent\protected\def\autosetpagestate#1%
- {\setpagestate[#1]\relax}
-
-\permanent\def\autopagestatenumber#1{\begincsname\??pagestatecounter#1\endcsname}
-
-\permanent\def\pagestaterealpage #1#2{\clf_pagestaterealpage {#1}{#2}}
-\permanent\def\setpagestaterealpageno#1#2{\clf_setpagestaterealpageno{#1}{#2}}
-\permanent\def\pagestaterealpageorder#1#2{\clf_pagestaterealpageorder{#1}#2\relax}
-
-\permanent\def\autopagestaterealpage #1{\clf_pagestaterealpage {#1}{\number\autopagestatenumber{#1}}}
-\permanent\def\setautopagestaterealpageno#1{\clf_setpagestaterealpageno{#1}{\number\autopagestatenumber{#1}}}
-\permanent\def\autopagestaterealpageorder#1{\clf_pagestaterealpageorder{#1}\numexpr\autopagestatenumber{#1}\relax}
-
\protect
diff --git a/tex/context/base/mkxl/core-pag.lmt b/tex/context/base/mkxl/core-pag.lmt
new file mode 100644
index 000000000..219171d42
--- /dev/null
+++ b/tex/context/base/mkxl/core-pag.lmt
@@ -0,0 +1,160 @@
+if not modules then modules = { } end modules ['core-dat'] = {
+ version = 1.001,
+ comment = "companion to core-dat.mkiv",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files"
+}
+
+-- This module provides a (multipass) container for arbitrary data. It replaces the
+-- twopass data mechanism.
+
+local tonumber = tonumber
+
+local context = context
+local ctx_latelua = context.latelua
+
+local trace_pagestates = false trackers.register("job.pagestates", function(v) trace_pagestates = v end)
+
+local report_pagestate = logs.reporter("pagestate")
+
+local allocate = utilities.storage.allocate
+
+local texgetcount = tex.getcount
+local texsetcount = tex.setcount
+
+local new_latelua = nodes.pool.latelua
+
+local implement = interfaces.implement
+local getnamespace = interfaces.getnamespace
+
+local c_realpageno = tex.iscount("realpageno")
+local c_realpagestateno = tex.iscount("realpagestateno")
+
+local collected = allocate()
+local tobesaved = allocate()
+
+local pagestates = {
+ collected = collected,
+ tobesaved = tobesaved,
+}
+
+job.pagestates = pagestates
+
+local function initializer()
+ collected = pagestates.collected
+ tobesaved = pagestates.tobesaved
+end
+
+job.register("job.pagestates.collected", tobesaved, initializer, nil)
+
+table.setmetatableindex(tobesaved, "table")
+
+local function setstate(settings)
+ local name = settings.name
+ local tag = settings.tag
+ local list = tobesaved[name]
+ if not tag then
+ tag = #list + 1
+ else
+ tag = tonumber(tag) or tag -- autonumber saves keys
+ end
+ local realpage = texgetcount(c_realpageno)
+ local data = realpage
+ list[tag] = data
+ if trace_pagestates then
+ report_pagestate("action %a, name %a, tag %a, preset %a","set",name,tag,realpage)
+ end
+ return name, tag, data
+end
+
+local function extend(name,tag)
+ local realpage = texgetcount(c_realpageno)
+ if trace_pagestates then
+ report_pagestate("action %a, name %a, tag %a, preset %a","synchronize",name,tag,realpage)
+ end
+ tobesaved[name][tag] = realpage
+end
+
+local function realpage(name,tag,default)
+ local t = collected[name]
+ if t then
+ t = t[tag] or t[tonumber(tag)]
+ if t then
+ return tonumber(t or default)
+ elseif trace_pagestates then
+ report_pagestate("error: unknown dataset, name %a, tag %a",name,tag)
+ end
+ elseif trace_pagestates then
+ report_pagestate("error: unknown dataset, name %a, tag %a",name) -- nil
+ end
+ return default
+end
+
+local function realpageorder(name,tag)
+ local t = collected[name]
+ if t then
+ local p = t[tag]
+ if p then
+ local n = 1
+ for i=tag-1,1,-1 do
+ if t[i] == p then
+ n = n +1
+ end
+ end
+ return n
+ end
+ end
+ return 0
+end
+
+pagestates.setstate = setstate
+pagestates.extend = extend
+pagestates.realpage = realpage
+pagestates.realpageorder = realpageorder
+
+function pagestates.countervalue(name)
+ return name and texgetcount(getnamespace("pagestatecounter") .. name) or 0
+end
+
+local function setpagestate(settings)
+ local name, tag = setstate(settings)
+ -- context(new_latelua(function() extend(name,tag) end))
+ ctx_latelua(function() extend(name,tag) end)
+end
+
+local function setpagestaterealpageno(name,tag)
+ local t = collected[name]
+ t = t and (t[tag] or t[tonumber(tag)])
+ texsetcount("realpagestateno",t or texgetcount(c_realpageno))
+end
+
+implement {
+ name = "setpagestate",
+ actions = setpagestate,
+ arguments = {
+ {
+ { "name" },
+ { "tag" },
+ { "delay" },
+ }
+ }
+}
+
+implement {
+ name = "pagestaterealpage",
+ actions = { realpage, context },
+ arguments = "2 strings",
+}
+
+implement {
+ name = "setpagestaterealpageno",
+ actions = setpagestaterealpageno,
+ arguments = "2 strings",
+}
+
+implement {
+ name = "pagestaterealpageorder",
+ actions = { realpageorder, context },
+ arguments = { "string", "integer" }
+}
diff --git a/tex/context/base/mkxl/core-pag.mkxl b/tex/context/base/mkxl/core-pag.mkxl
new file mode 100644
index 000000000..43b398b16
--- /dev/null
+++ b/tex/context/base/mkxl/core-pag.mkxl
@@ -0,0 +1,68 @@
+%D \module
+%D [ file=core-pag,
+%D version=2023.03.23, % moved from core-dat
+%D title=\CONTEXT\ Core Macros,
+%D subtitle=Multipass Pagestate,
+%D author=Hans Hagen,
+%D date=\currentdate,
+%D copyright={PRAGMA ADE \& \CONTEXT\ Development Team}]
+%C
+%C This module is part of the \CONTEXT\ macro||package and is
+%C therefore copyrighted by \PRAGMA. See mreadme.pdf for
+%C details.
+
+\writestatus{loading}{ConTeXt Core Macros / Multipass Pagestate}
+
+\unprotect
+
+\newinteger\realpagestateno
+
+\registerctxluafile{core-pag}{autosuffix}
+
+\installcorenamespace{pagestate}
+\installcorenamespace{pagestatecounter}
+
+\installcommandhandler \??pagestate {pagestate} \??pagestate
+
+\def\syst_pagestates_allocate
+ {\expandafter\newinteger\csname\??pagestatecounter\currentpagestate\endcsname}
+
+\appendtoks
+ \syst_pagestates_allocate
+\to \everydefinepagestate
+
+\setuppagestate
+ [\c!delay=\v!yes]
+
+\permanent\tolerant\protected\def\setpagestate[#1]#*[#2]%
+ {\begingroup
+ \edef\currentpagestate{#1}%
+ \ifcsname\??pagestatecounter\currentpagestate\endcsname
+ \scratchcounter\lastnamedcs
+ \advanceby\scratchcounter\plusone
+ \else
+ \scratchcounter\plusone
+ \syst_pagestates_allocate
+ \fi
+ \global\csname\??pagestatecounter\currentpagestate\endcsname\scratchcounter
+ \clf_setpagestate
+ name {\currentpagestate}%
+ tag {\ifparameter#2\or#2\else\number\scratchcounter\fi}%
+ delay {\pagestateparameter\c!delay}%
+ \relax
+ \endgroup}
+
+\permanent\protected\def\autosetpagestate#1%
+ {\setpagestate[#1]\relax}
+
+\permanent\def\autopagestatenumber#1{\begincsname\??pagestatecounter#1\endcsname}
+
+\permanent\def\pagestaterealpage #1#2{\clf_pagestaterealpage {#1}{#2}}
+\permanent\def\setpagestaterealpageno#1#2{\clf_setpagestaterealpageno{#1}{#2}}
+\permanent\def\pagestaterealpageorder#1#2{\clf_pagestaterealpageorder{#1}#2\relax}
+
+\permanent\def\autopagestaterealpage #1{\clf_pagestaterealpage {#1}{\number\autopagestatenumber{#1}}}
+\permanent\def\setautopagestaterealpageno#1{\clf_setpagestaterealpageno{#1}{\number\autopagestatenumber{#1}}}
+\permanent\def\autopagestaterealpageorder#1{\clf_pagestaterealpageorder{#1}\numexpr\autopagestatenumber{#1}\relax}
+
+\protect
diff --git a/tex/context/base/mkxl/core-two.lmt b/tex/context/base/mkxl/core-two.lmt
new file mode 100644
index 000000000..7ea42374e
--- /dev/null
+++ b/tex/context/base/mkxl/core-two.lmt
@@ -0,0 +1,210 @@
+if not modules then modules = { } end modules ['core-two'] = {
+ version = 1.001,
+ comment = "companion to core-two.mkiv",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files"
+}
+
+-- This is actually one of the oldest MkIV files and basically a port of MkII but
+-- the old usage has long be phased out. Also, the public part is now handled by
+-- datasets which makes this a more private store.
+
+-- local next = next
+-- local remove, concat = table.remove, table.concat
+
+local allocate = utilities.storage.allocate
+
+local collected = allocate()
+local tobesaved = allocate()
+
+local jobpasses = {
+ collected = collected,
+ tobesaved = tobesaved,
+}
+
+job.passes = jobpasses
+
+local function initializer()
+ collected = jobpasses.collected
+ tobesaved = jobpasses.tobesaved
+end
+
+job.register('job.passes.collected', tobesaved, initializer, nil)
+
+function jobpasses.getcollected(id)
+ return collected[id] or { }
+end
+
+function jobpasses.gettobesaved(id)
+ local t = tobesaved[id]
+ if not t then
+ t = { }
+ tobesaved[id] = t
+ end
+ return t
+end
+
+-- local function define(id)
+-- local p = tobesaved[id]
+-- if not p then
+-- p = { }
+-- tobesaved[id] = p
+-- end
+-- return p
+-- end
+--
+-- local function save(id,str,index)
+-- local jti = define(id)
+-- if index then
+-- jti[index] = str
+-- else
+-- jti[#jti+1] = str
+-- end
+-- end
+--
+-- local function savetagged(id,tag,str)
+-- local jti = define(id)
+-- jti[tag] = str
+-- end
+--
+-- local function getdata(id,index,default)
+-- local jti = collected[id]
+-- local value = jti and jti[index]
+-- return value ~= "" and value or default or ""
+-- end
+--
+-- local function getfield(id,index,tag,default)
+-- local jti = collected[id]
+-- jti = jti and jti[index]
+-- local value = jti and jti[tag]
+-- return value ~= "" and value or default or ""
+-- end
+--
+-- local function getcollected(id)
+-- return collected[id] or { }
+-- end
+--
+-- local function gettobesaved(id)
+-- return define(id)
+-- end
+--
+-- local function get(id)
+-- local jti = collected[id]
+-- if jti and #jti > 0 then
+-- return remove(jti,1)
+-- end
+-- end
+--
+-- local function first(id)
+-- local jti = collected[id]
+-- return jti and jti[1]
+-- end
+--
+-- local function last(id)
+-- local jti = collected[id]
+-- return jti and jti[#jti]
+-- end
+--
+-- local function find(id,n)
+-- local jti = collected[id]
+-- return jti and jti[n] or nil
+-- end
+--
+-- local function count(id)
+-- local jti = collected[id]
+-- return jti and #jti or 0
+-- end
+--
+-- local function list(id)
+-- local jti = collected[id]
+-- if jti then
+-- return concat(jti,',')
+-- end
+-- end
+--
+-- local function inlist(id,str)
+-- local jti = collected[id]
+-- if jti then
+-- for _, v in next, jti do
+-- if v == str then
+-- return true
+-- end
+-- end
+-- end
+-- return false
+-- end
+--
+-- local check = first
+--
+-- jobpasses.define = define
+-- jobpasses.save = save
+-- jobpasses.savetagged = savetagged
+-- jobpasses.getdata = getdata
+-- jobpasses.getfield = getfield
+-- jobpasses.getcollected = getcollected
+-- jobpasses.gettobesaved = gettobesaved
+-- jobpasses.get = get
+-- jobpasses.first = first
+-- jobpasses.last = last
+-- jobpasses.find = find
+-- jobpasses.list = list
+-- jobpasses.count = count
+-- jobpasses.check = check
+-- jobpasses.inlist = inlist
+--
+-- -- interface
+--
+-- local implement = interfaces.implement
+--
+-- implement { name = "gettwopassdata", actions = { get, context }, arguments = "string" }
+-- implement { name = "getfirsttwopassdata",actions = { first, context }, arguments = "string" }
+-- implement { name = "getlasttwopassdata", actions = { last, context }, arguments = "string" }
+-- implement { name = "findtwopassdata", actions = { find, context }, arguments = "2 strings" }
+-- implement { name = "gettwopassdatalist", actions = { list, context }, arguments = "string" }
+-- implement { name = "counttwopassdata", actions = { count, context }, arguments = "string" }
+-- implement { name = "checktwopassdata", actions = { check, context }, arguments = "string" }
+--
+-- implement {
+-- name = "definetwopasslist",
+-- actions = define,
+-- arguments = "string"
+-- }
+--
+-- implement {
+-- name = "savetwopassdata",
+-- actions = save,
+-- arguments = "2 strings",
+-- }
+--
+-- implement {
+-- name = "savetaggedtwopassdata",
+-- actions = savetagged,
+-- arguments = "3 strings",
+-- }
+--
+-- implement {
+-- name = "doifelseintwopassdata",
+-- actions = { inlist, commands.doifelse },
+-- arguments = "2 strings",
+-- }
+--
+-- -- local ctx_latelua = context.latelua
+--
+-- -- implement {
+-- -- name = "lazysavetwopassdata",
+-- -- arguments = "3 strings",
+-- -- public = true,
+-- -- actions = function(a,b,c)
+-- -- ctx_latelua(function() save(a,c) end)
+-- -- end,
+-- -- }
+--
+-- -- implement {
+-- -- name = "lazysavetaggedtwopassdata",
+-- -- arguments = "3 strings",
+-- -- public = true,
+-- -- actions = function(a,b,c)
+-- -- ctx_latelua(function() savetagged(a,b,c) end)
+-- -- end,
+-- -- }
diff --git a/tex/context/base/mkxl/core-two.mkxl b/tex/context/base/mkxl/core-two.mkxl
index 38f03c7c4..10a7eec9e 100644
--- a/tex/context/base/mkxl/core-two.mkxl
+++ b/tex/context/base/mkxl/core-two.mkxl
@@ -1,6 +1,6 @@
%D \module
%D [ file=core-two, % moved from core-uti
-%D version=1997.03.31,
+%D version=1997.03.31, % stripped down 2023-03-21
%D title=\CONTEXT\ Core Macros,
%D subtitle=Two Pass Data,
%D author=Hans Hagen,
@@ -11,102 +11,110 @@
%C therefore copyrighted by \PRAGMA. See mreadme.pdf for
%C details.
-\writestatus{loading}{ConTeXt Core Macros / Two Pass Data}
+%D The public interface is replaced by datasets and two pass data is now private
+%D to the engine. For the moment we keep some commands commented. The unused
+%D (second) argument is an inheritance from \MKII. If needed we can bring back
+%D a compatible interface.
-%D This is a rather old mechanism which has not changed much over time, apart from
-%D adding a few more selectors. This code used to be part of \type {core-uti}. The
-%D following examples demonstrate the interface.
-%D
-%D \startbuffer
-%D \definetwopasslist{test-1}
-%D
-%D \gettwopassdatalist{test-1} [\twopassdatalist=]
-%D \checktwopassdata {test-1} [\twopassdata=]
-%D \checktwopassdata {test-1} [\twopassdata=]
-%D \gettwopassdata {test-1} [\twopassdata=]
-%D \gettwopassdata {test-1} [\twopassdata=]
-%D
-%D \definetwopasslist{test-2}
-%D
-%D \lazysavetwopassdata{test-2}{1}{x}
-%D \lazysavetwopassdata{test-2}{2}{y}
-%D \lazysavetwopassdata{test-2}{3}{z}
-%D
-%D \gettwopassdatalist{test-2} [\twopassdatalist=x,y,z]
-%D \checktwopassdata {test-2} [\twopassdata=x]
-%D \checktwopassdata {test-2} [\twopassdata=x]
-%D \gettwopassdata {test-2} [\twopassdata=x]
-%D \gettwopassdata {test-2} [\twopassdata=y]
-%D \gettwopassdata {test-2} [\twopassdata=z]
-%D \gettwopassdata {test-2} [\twopassdata=]
-%D
-%D \definetwopasslist{test-3}
-%D
-%D \lazysavetaggedtwopassdata{test-3}{1}{x}{a}
-%D \lazysavetaggedtwopassdata{test-3}{2}{y}{b}
-%D \lazysavetaggedtwopassdata{test-3}{3}{z}{c}
-%D
-%D \findtwopassdata{test-3}{x} [\twopassdata=a]
-%D \findtwopassdata{test-3}{y} [\twopassdata=b]
-%D \findtwopassdata{test-3}{z} [\twopassdata=c]
-%D \findtwopassdata{test-3}{w} [\twopassdata=]
-%D
-%D \definetwopasslist{test-4}
-%D
-%D \lazysavetwopassdata{test-4}{1}{A}
-%D \lazysavetwopassdata{test-4}{2}{B}
-%D \lazysavetwopassdata{test-4}{3}{C}
-%D
-%D \getfirsttwopassdata{test-4} [\twopassdata=A]
-%D \getlasttwopassdata {test-4} [\twopassdata=C]
-%D \getfirsttwopassdata{test-4} [\twopassdata=A]
-%D \getlasttwopassdata {test-4} [\twopassdata=C]
-%D \getfromtwopassdata {test-4}{1} [\twopassdata=A]
-%D \getfromtwopassdata {test-4}{3} [\twopassdata=C]
-%D \getfromtwopassdata {test-4}{2} [\twopassdata=B]
-%D \stopbuffer
-%D
-%D \getbuffer \typebuffer
+\writestatus{loading}{ConTeXt Core Macros / Two Pass Data}
\unprotect
-\registerctxluafile{core-two}{}
-
-\permanent\def\immediatesavetwopassdata #1#2#3{\normalexpanded{\noexpand\clf_savetwopassdata{#1}{#3}}}
-\permanent\def \lazysavetwopassdata #1#2#3{\normalexpanded{\noexpand\ctxlatecommand{savetwopassdata("#1","#3")}}}
-\permanent\let \savetwopassdata \lazysavetwopassdata
-\permanent\def \savetaggedtwopassdata#1#2#3#4{\normalexpanded{\noexpand\clf_savetaggedtwopassdata{#1}{#3}{#4}}}
-\permanent\def\lazysavetaggedtwopassdata#1#2#3#4{\normalexpanded{\noexpand\ctxlatecommand{savetaggedtwopassdata("#1",'#3',"#4")}}}
-
-% temp hack: needs a proper \starteverytimeluacode
-
-\setfalse\twopassdatafound
-
-\mutable\lettonothing\twopassdata
-\mutable\lettonothing\twopassdatalist
-
-\mutable\let\noftwopassitems\!!zeropoint
-
-\def\syst_twopass_check % can be delegated to lua once obsolete is gone
- {\ifempty\twopassdata
- \setfalse\twopassdatafound
- \else
- \settrue\twopassdatafound
- \fi}
-
-\permanent\protected\def\definetwopasslist #1{\clf_definetwopasslist{#1}}
-\permanent\protected\def\gettwopassdata #1{\edef\twopassdata {\clf_gettwopassdata {#1}}\syst_twopass_check}
-\permanent\protected\def\checktwopassdata #1{\edef\twopassdata {\clf_checktwopassdata {#1}}\syst_twopass_check}
-\permanent\protected\def\findtwopassdata #1#2{\edef\twopassdata {\clf_findtwopassdata {#1}{#2}}\syst_twopass_check}
-\permanent\protected\def\getfirsttwopassdata #1{\edef\twopassdata {\clf_getfirsttwopassdata {#1}}\syst_twopass_check}
-\permanent\protected\def\getlasttwopassdata #1{\edef\twopassdata {\clf_getlasttwopassdata {#1}}%
- \edef\noftwopassitems{\clf_counttwopassdata {#1}}\syst_twopass_check}
-\permanent\protected\def\getnamedtwopassdatalist#1#2{\edef #1{\clf_gettwopassdatalist {#2}}}
-\permanent\protected\def\gettwopassdatalist #1{\edef\twopassdatalist{\clf_gettwopassdatalist {#1}}}
-
-\permanent\protected\def\doifelseintwopassdata #1#2{\clf_doifelseintwopassdata{#1}{#2}}
+\registerctxluafile{core-two}{autosuffix}
-\aliased\let\doifintwopassdataelse\doifelseintwopassdata
-\aliased\let\getfromtwopassdata \findtwopassdata
+% %D This is a rather old mechanism which has not changed much over time, apart from
+% %D adding a few more selectors. This code used to be part of \type {core-uti}. The
+% %D following examples demonstrate the interface.
+% %D
+% %D \startbuffer
+% %D \definetwopasslist{test-1}
+% %D
+% %D \gettwopassdatalist{test-1} [\twopassdatalist=]
+% %D \checktwopassdata {test-1} [\twopassdata=]
+% %D \checktwopassdata {test-1} [\twopassdata=]
+% %D \gettwopassdata {test-1} [\twopassdata=]
+% %D \gettwopassdata {test-1} [\twopassdata=]
+% %D
+% %D \definetwopasslist{test-2}
+% %D
+% %D \lazysavetwopassdata{test-2}{1}{x}
+% %D \lazysavetwopassdata{test-2}{2}{y}
+% %D \lazysavetwopassdata{test-2}{3}{z}
+% %D
+% %D \gettwopassdatalist{test-2} [\twopassdatalist=x,y,z]
+% %D \checktwopassdata {test-2} [\twopassdata=x]
+% %D \checktwopassdata {test-2} [\twopassdata=x]
+% %D \gettwopassdata {test-2} [\twopassdata=x]
+% %D \gettwopassdata {test-2} [\twopassdata=y]
+% %D \gettwopassdata {test-2} [\twopassdata=z]
+% %D \gettwopassdata {test-2} [\twopassdata=]
+% %D
+% %D \definetwopasslist{test-3}
+% %D
+% %D \lazysavetaggedtwopassdata{test-3}{1}{x}{a}
+% %D \lazysavetaggedtwopassdata{test-3}{2}{y}{b}
+% %D \lazysavetaggedtwopassdata{test-3}{3}{z}{c}
+% %D
+% %D \findtwopassdata{test-3}{x} [\twopassdata=a]
+% %D \findtwopassdata{test-3}{y} [\twopassdata=b]
+% %D \findtwopassdata{test-3}{z} [\twopassdata=c]
+% %D \findtwopassdata{test-3}{w} [\twopassdata=]
+% %D
+% %D \definetwopasslist{test-4}
+% %D
+% %D \lazysavetwopassdata{test-4}{1}{A}
+% %D \lazysavetwopassdata{test-4}{2}{B}
+% %D \lazysavetwopassdata{test-4}{3}{C}
+% %D
+% %D \getfirsttwopassdata{test-4} [\twopassdata=A]
+% %D \getlasttwopassdata {test-4} [\twopassdata=C]
+% %D \getfirsttwopassdata{test-4} [\twopassdata=A]
+% %D \getlasttwopassdata {test-4} [\twopassdata=C]
+% %D \getfromtwopassdata {test-4}{1} [\twopassdata=A]
+% %D \getfromtwopassdata {test-4}{3} [\twopassdata=C]
+% %D \getfromtwopassdata {test-4}{2} [\twopassdata=B]
+% %D \stopbuffer
+% %D
+% %D \getbuffer \typebuffer
+%
+% %D The next code can be simplified (read: defined at the \LUA\ end) but we never use this
+% %D mechanism which has been replaced by datasets so it's not worth the effort.
+%
+% \permanent\def\immediatesavetwopassdata #1#2#3{\normalexpanded{\noexpand\clf_savetwopassdata{#1}{#3}}}
+% \permanent\def \lazysavetwopassdata #1#2#3{\normalexpanded{\noexpand\ctxlatecommand{savetwopassdata("#1","#3")}}}
+% \permanent\let \savetwopassdata \lazysavetwopassdata
+% \permanent\def \savetaggedtwopassdata#1#2#3#4{\normalexpanded{\noexpand\clf_savetaggedtwopassdata{#1}{#3}{#4}}}
+% \permanent\def\lazysavetaggedtwopassdata#1#2#3#4{\normalexpanded{\noexpand\ctxlatecommand{savetaggedtwopassdata("#1","#3","#4")}}}
+%
+% % temp hack: needs a proper \starteverytimeluacode
+%
+% \setfalse\twopassdatafound
+%
+% \mutable\lettonothing\twopassdata
+% \mutable\lettonothing\twopassdatalist
+%
+% \mutable\let\noftwopassitems\!!zeropoint
+%
+% \def\syst_twopass_check % can be delegated to lua once obsolete is gone
+% {\ifempty\twopassdata
+% \setfalse\twopassdatafound
+% \else
+% \settrue\twopassdatafound
+% \fi}
+%
+% \permanent\protected\def\definetwopasslist #1{\clf_definetwopasslist{#1}}
+% \permanent\protected\def\gettwopassdata #1{\edef\twopassdata {\clf_gettwopassdata {#1}}\syst_twopass_check}
+% \permanent\protected\def\checktwopassdata #1{\edef\twopassdata {\clf_checktwopassdata {#1}}\syst_twopass_check}
+% \permanent\protected\def\findtwopassdata #1#2{\edef\twopassdata {\clf_findtwopassdata {#1}{#2}}\syst_twopass_check}
+% \permanent\protected\def\getfirsttwopassdata #1{\edef\twopassdata {\clf_getfirsttwopassdata {#1}}\syst_twopass_check}
+% \permanent\protected\def\getlasttwopassdata #1{\edef\twopassdata {\clf_getlasttwopassdata {#1}}%
+% \edef\noftwopassitems{\clf_counttwopassdata {#1}}\syst_twopass_check}
+% \permanent\protected\def\getnamedtwopassdatalist#1#2{\edef #1{\clf_gettwopassdatalist {#2}}}
+% \permanent\protected\def\gettwopassdatalist #1{\edef\twopassdatalist{\clf_gettwopassdatalist {#1}}}
+%
+% \permanent\protected\def\doifelseintwopassdata #1#2{\clf_doifelseintwopassdata{#1}{#2}}
+%
+% \aliased\let\doifintwopassdataelse\doifelseintwopassdata
+% \aliased\let\getfromtwopassdata \findtwopassdata
\protect \endinput
diff --git a/tex/context/base/mkxl/core-uti.lmt b/tex/context/base/mkxl/core-uti.lmt
index 966428b36..e4b6606e3 100644
--- a/tex/context/base/mkxl/core-uti.lmt
+++ b/tex/context/base/mkxl/core-uti.lmt
@@ -6,16 +6,13 @@ if not modules then modules = { } end modules ['core-uti'] = {
license = "see context related readme files"
}
--- todo: keep track of changes here (hm, track access, and only true when
--- accessed and changed)
-
---[[ldx--
-<p>A utility file has always been part of <l n='context'/> and with
-the move to <l n='luatex'/> we also moved a lot of multi-pass info
-to a <l n='lua'/> table. Instead of loading a <l n='tex'/> based
-utility file under different setups, we now load a table once. This
-saves much runtime but at the cost of more memory usage.</p>
---ldx]]--
+-- A utility file has always been part of ConTeXt and with the move to LuaTeX we
+-- also moved a lot of multi-pass info to a Lua table. Instead of loading a TeX
+-- based utility file under different setups, we now load a table once. This saves
+-- much runtime but at the cost of more memory usage.
+--
+-- In the meantime the overhead is a bit more due to the amount of data being saved
+-- and more agressive compacting.
local math = math
local next, type, tostring, tonumber, setmetatable, load = next, type, tostring, tonumber, setmetatable, load
@@ -46,14 +43,9 @@ local job = job
job.version = 1.33
job.packversion = 1.02
--- some day we will implement loading of other jobs and then we need
--- job.jobs
-
---[[ldx--
-<p>Variables are saved using in the previously defined table and passed
-onto <l n='tex'/> using the following method. Of course one can also
-directly access the variable using a <l n='lua'/> call.</p>
---ldx]]--
+-- Variables are saved using in the previously defined table and passed onto TeX
+-- using the following method. Of course one can also directly access the variable
+-- using a Lua call.
local savelist, comment = { }, { }
@@ -382,6 +374,12 @@ function job.load(filename)
end
function job.loadother(filename)
+ local jobname = environment.jobname
+ if filename == jobname then
+ return
+ else
+ report_passes("integrating list %a into %a",filename,jobname)
+ end
statistics.starttiming(loadedfiles)
filename = file.addsuffix(filename,"tuc")
local unpacked = othercache[filename]
diff --git a/tex/context/base/mkxl/file-mod.lmt b/tex/context/base/mkxl/file-mod.lmt
index d10abf533..567387a3a 100644
--- a/tex/context/base/mkxl/file-mod.lmt
+++ b/tex/context/base/mkxl/file-mod.lmt
@@ -6,17 +6,11 @@ if not modules then modules = { } end modules ['file-mod'] = {
license = "see context related readme files"
}
--- This module will be redone! For instance, the prefixes will move to data-*
--- as they arr sort of generic along with home:// etc/.
-
--- context is not defined yet! todo! (we need to load tupp-fil after cld)
--- todo: move startreadingfile to lua and push regime there
-
---[[ldx--
-<p>It's more convenient to manipulate filenames (paths) in
-<l n='lua'/> than in <l n='tex'/>. These methods have counterparts
-at the <l n='tex'/> side.</p>
---ldx]]--
+-- This module will be redone! For instance, the prefixes will move to data-* as
+-- they are sort of generic along with home:// etc/.
+--
+-- It is more convenient to manipulate filenames (paths) in Lua than in TeX. The
+-- methods below have counterparts at the TeX end.
local format, find, concat, tonumber = string.format, string.find, table.concat, tonumber
local sortedhash = table.sortedhash
diff --git a/tex/context/base/mkxl/font-con.lmt b/tex/context/base/mkxl/font-con.lmt
index 073af7d2e..5a887d61d 100644
--- a/tex/context/base/mkxl/font-con.lmt
+++ b/tex/context/base/mkxl/font-con.lmt
@@ -22,11 +22,9 @@ local trace_scaling = false trackers.register("fonts.scaling", function(v)
local report_defining = logs.reporter("fonts","defining")
--- watch out: no negative depths and negative eights permitted in regular fonts
-
---[[ldx--
-<p>Here we only implement a few helper functions.</p>
---ldx]]--
+-- Watch out: no negative depths and negative heights are permitted in regular
+-- fonts. Also, the code in LMTX is a bit different. Here we only implement a
+-- few helper functions.
local fonts = fonts
local constructors = fonts.constructors or { }
@@ -53,11 +51,9 @@ constructors.loadedfonts = loadedfonts
----- scalecommands = fonts.helpers.scalecommands
---[[ldx--
-<p>We need to normalize the scale factor (in scaled points). This has to
-do with the fact that <l n='tex'/> uses a negative multiple of 1000 as
-a signal for a font scaled based on the design size.</p>
---ldx]]--
+-- We need to normalize the scale factor (in scaled points). This has to do with the
+-- fact that TeX uses a negative multiple of 1000 as a signal for a font scaled
+-- based on the design size.
local factors = {
pt = 65536.0,
@@ -112,33 +108,29 @@ function constructors.getmathparameter(tfmdata,name)
end
end
---[[ldx--
-<p>Beware, the boundingbox is passed as reference so we may not overwrite it
-in the process; numbers are of course copies. Here 65536 equals 1pt. (Due to
-excessive memory usage in CJK fonts, we no longer pass the boundingbox.)</p>
---ldx]]--
-
--- The scaler is only used for otf and afm and virtual fonts. If a virtual font has italic
--- correction make sure to set the hasitalics flag. Some more flags will be added in the
--- future.
-
---[[ldx--
-<p>The reason why the scaler was originally split, is that for a while we experimented
-with a helper function. However, in practice the <l n='api'/> calls are too slow to
-make this profitable and the <l n='lua'/> based variant was just faster. A days
-wasted day but an experience richer.</p>
---ldx]]--
+-- Beware, the boundingbox is passed as reference so we may not overwrite it in the
+-- process; numbers are of course copies. Here 65536 equals 1pt. (Due to excessive
+-- memory usage in CJK fonts, we no longer pass the boundingbox.)
+--
+-- The scaler is only used for OTF and AFM and virtual fonts. If a virtual font has
+-- italic correction make sure to set the hasitalics flag. Some more flags will be
+-- added in the future.
+--
+-- The reason why the scaler was originally split, is that for a while we
+-- experimented with a helper function. However, in practice the API calls are too
+-- slow to make this profitable and the Lua based variant was just faster. A days
+-- wasted day but an experience richer.
-- experimental, sharing kerns (unscaled and scaled) saves memory
-- local sharedkerns, basekerns = constructors.check_base_kerns(tfmdata)
-- loop over descriptions (afm and otf have descriptions, tfm not)
-- there is no need (yet) to assign a value to chr.tonunicode
-
+--
-- constructors.prepare_base_kerns(tfmdata) -- optimalization
-
--- we have target.name=metricfile and target.fullname=RealName and target.filename=diskfilename
--- when collapsing fonts, luatex looks as both target.name and target.fullname as ttc files
--- can have multiple subfonts
+--
+-- We have target.name = metricfile and target.fullname = RealName and
+-- target.filename = diskfilename when collapsing fonts. LuaTeX looks at both
+-- target.name and target.fullname because TTC files can have multiple subfonts.
function constructors.calculatescale(tfmdata,scaledpoints)
-- implemented in font-ctx.lmt
@@ -1008,9 +1000,7 @@ function constructors.finalize(tfmdata)
return tfmdata
end
---[[ldx--
-<p>A unique hash value is generated by:</p>
---ldx]]--
+-- A unique hash value is generated by:
local hashmethods = { }
constructors.hashmethods = hashmethods
@@ -1069,13 +1059,11 @@ hashmethods.normal = function(list)
end
end
---[[ldx--
-<p>In principle we can share tfm tables when we are in need for a font, but then
-we need to define a font switch as an id/attr switch which is no fun, so in that
-case users can best use dynamic features ... so, we will not use that speedup. Okay,
-when we get rid of base mode we can optimize even further by sharing, but then we
-loose our testcases for <l n='luatex'/>.</p>
---ldx]]--
+-- In principle we can share tfm tables when we are in need for a font, but then we
+-- need to define a font switch as an id/attr switch which is no fun, so in that
+-- case users can best use dynamic features ... so, we will not use that speedup.
+-- Okay, when we get rid of base mode we can optimize even further by sharing, but
+-- then we loose our testcases for LuaTeX.
function constructors.hashinstance(specification,force)
-- implemented in font-ctx.lmt
@@ -1407,10 +1395,7 @@ do
end
---[[ldx--
-<p>We need to check for default features. For this we provide
-a helper function.</p>
---ldx]]--
+-- We need to check for default features. For this we provide a helper function.
function constructors.checkedfeatures(what,features)
local defaults = handlers[what].features.defaults
diff --git a/tex/context/base/mkxl/font-ctx.lmt b/tex/context/base/mkxl/font-ctx.lmt
index 77953d64a..1d59ad728 100644
--- a/tex/context/base/mkxl/font-ctx.lmt
+++ b/tex/context/base/mkxl/font-ctx.lmt
@@ -529,19 +529,13 @@ do
end
---[[ldx--
-<p>So far we haven't really dealt with features (or whatever we want
-to pass along with the font definition. We distinguish the following
-situations:</p>
-situations:</p>
-
-<code>
-name:xetex like specs
-name@virtual font spec
-name*context specification
-</code>
---ldx]]--
-
+-- So far we haven't really dealt with features (or whatever we want to pass along
+-- with the font definition. We distinguish the following situations:
+--
+-- name:xetex like specs
+-- name@virtual font spec
+-- name*context specification
+--
-- Currently fonts are scaled while constructing the font, so we have to do scaling
-- of commands in the vf at that point using e.g. "local scale = g.parameters.factor
-- or 1" after all, we need to work with copies anyway and scaling needs to be done
@@ -2269,10 +2263,8 @@ dimenfactors.em = nil
dimenfactors["%"] = nil
dimenfactors.pct = nil
---[[ldx--
-<p>Before a font is passed to <l n='tex'/> we scale it. Here we also need
-to scale virtual characters.</p>
---ldx]]--
+-- Before a font is passed to TeX we scale it. Here we also need to scale virtual
+-- characters.
do
diff --git a/tex/context/base/mkxl/font-def.lmt b/tex/context/base/mkxl/font-def.lmt
index 6afeeb474..ea6b2d0c0 100644
--- a/tex/context/base/mkxl/font-def.lmt
+++ b/tex/context/base/mkxl/font-def.lmt
@@ -24,10 +24,9 @@ trackers.register("fonts.loading", "fonts.defining", "otf.loading", "afm.loading
local report_defining = logs.reporter("fonts","defining")
---[[ldx--
-<p>Here we deal with defining fonts. We do so by intercepting the
-default loader that only handles <l n='tfm'/>.</p>
---ldx]]--
+-- Here we deal with defining fonts. We do so by intercepting the default loader
+-- that only handles TFM files. Although, we started out that way but in the
+-- meantime we can hardly speak of TFM any more.
local nextfont = font.nextid
@@ -55,25 +54,18 @@ local designsizes = constructors.designsizes
local resolvefile = fontgoodies and fontgoodies.filenames and fontgoodies.filenames.resolve or function(s) return s end
---[[ldx--
-<p>We hardly gain anything when we cache the final (pre scaled)
-<l n='tfm'/> table. But it can be handy for debugging, so we no
-longer carry this code along. Also, we now have quite some reference
-to other tables so we would end up with lots of catches.</p>
---ldx]]--
-
---[[ldx--
-<p>We can prefix a font specification by <type>name:</type> or
-<type>file:</type>. The first case will result in a lookup in the
-synonym table.</p>
-
-<typing>
-[ name: | file: ] identifier [ separator [ specification ] ]
-</typing>
-
-<p>The following function split the font specification into components
-and prepares a table that will move along as we proceed.</p>
---ldx]]--
+-- We hardly gain anything when we cache the final (pre scaled) TFM table. But it
+-- can be handy for debugging, so we no longer carry this code along. Also, we now
+-- have quite some reference to other tables so we would end up with lots of
+-- catches.
+--
+-- We can prefix a font specification by "name:" or "file:". The first case will
+-- result in a lookup in the synonym table.
+--
+-- [ name: | file: ] identifier [ separator [ specification ] ]
+--
+-- The following function split the font specification into components and prepares
+-- a table that will move along as we proceed.
-- beware, we discard additional specs
--
@@ -166,9 +158,7 @@ do
end
---[[ldx--
-<p>We can resolve the filename using the next function:</p>
---ldx]]--
+-- We can resolve the filename using the next function:
definers.resolvers = definers.resolvers or { }
local resolvers = definers.resolvers
@@ -261,23 +251,17 @@ function definers.resolve(specification)
return specification
end
---[[ldx--
-<p>The main read function either uses a forced reader (as determined by
-a lookup) or tries to resolve the name using the list of readers.</p>
-
-<p>We need to cache when possible. We do cache raw tfm data (from <l
-n='tfm'/>, <l n='afm'/> or <l n='otf'/>). After that we can cache based
-on specificstion (name) and size, that is, <l n='tex'/> only needs a number
-for an already loaded fonts. However, it may make sense to cache fonts
-before they're scaled as well (store <l n='tfm'/>'s with applied methods
-and features). However, there may be a relation between the size and
-features (esp in virtual fonts) so let's not do that now.</p>
-
-<p>Watch out, here we do load a font, but we don't prepare the
-specification yet.</p>
---ldx]]--
-
--- very experimental:
+-- The main read function either uses a forced reader (as determined by a lookup) or
+-- tries to resolve the name using the list of readers.
+--
+-- We need to cache when possible. We do cache raw tfm data (from TFM, AFM or OTF).
+-- After that we can cache based on specificstion (name) and size, that is, TeX only
+-- needs a number for an already loaded fonts. However, it may make sense to cache
+-- fonts before they're scaled as well (store TFM's with applied methods and
+-- features). However, there may be a relation between the size and features (esp in
+-- virtual fonts) so let's not do that now.
+--
+-- Watch out, here we do load a font, but we don't prepare the specification yet.
function definers.applypostprocessors(tfmdata)
local postprocessors = tfmdata.postprocessors
@@ -431,17 +415,13 @@ function constructors.readanddefine(name,size) -- no id -- maybe a dummy first
return fontdata[id], id
end
---[[ldx--
-<p>So far the specifiers. Now comes the real definer. Here we cache
-based on id's. Here we also intercept the virtual font handler. Since
-it evolved stepwise I may rewrite this bit (combine code).</p>
-
-In the previously defined reader (the one resulting in a <l n='tfm'/>
-table) we cached the (scaled) instances. Here we cache them again, but
-this time based on id. We could combine this in one cache but this does
-not gain much. By the way, passing id's back to in the callback was
-introduced later in the development.</p>
---ldx]]--
+-- So far the specifiers. Now comes the real definer. Here we cache based on id's.
+-- Here we also intercept the virtual font handler.
+--
+-- In the previously defined reader (the one resulting in a TFM table) we cached the
+-- (scaled) instances. Here we cache them again, but this time based on id. We could
+-- combine this in one cache but this does not gain much. By the way, passing id's
+-- back to in the callback was introduced later in the development.
function definers.registered(hash)
local id = internalized[hash]
diff --git a/tex/context/base/mkxl/font-fbk.lmt b/tex/context/base/mkxl/font-fbk.lmt
index bdc5265ae..09f20b42c 100644
--- a/tex/context/base/mkxl/font-fbk.lmt
+++ b/tex/context/base/mkxl/font-fbk.lmt
@@ -10,10 +10,6 @@ local cos, tan, rad, format = math.cos, math.tan, math.rad, string.format
local utfbyte, utfchar = utf.byte, utf.char
local next = next
---[[ldx--
-<p>This is very experimental code!</p>
---ldx]]--
-
local trace_visualize = false trackers.register("fonts.composing.visualize", function(v) trace_visualize = v end)
local trace_define = false trackers.register("fonts.composing.define", function(v) trace_define = v end)
diff --git a/tex/context/base/mkxl/font-fil.mklx b/tex/context/base/mkxl/font-fil.mklx
index 79535ea11..73348645d 100644
--- a/tex/context/base/mkxl/font-fil.mklx
+++ b/tex/context/base/mkxl/font-fil.mklx
@@ -294,7 +294,7 @@
% pre-expansion.
\def\font_helpers_update_font_class_parameters
- {\edef\m_font_class_direction {\begincsname\??fontclass\fontclass\fontstyle\s!direction \endcsname}%
+ {%edef\m_font_class_direction {\begincsname\??fontclass\fontclass\fontstyle\s!direction \endcsname}%
\edef\m_font_class_features {\begincsname\??fontclass\fontclass\fontstyle\s!features \endcsname}%
\edef\m_font_class_fallbacks {\begincsname\??fontclass\fontclass\fontstyle\s!fallbacks \endcsname}%
\edef\m_font_class_goodies {\begincsname\??fontclass\fontclass\fontstyle\s!goodies \endcsname}%
diff --git a/tex/context/base/mkxl/font-ini.lmt b/tex/context/base/mkxl/font-ini.lmt
index bc68fa83d..dcec8594e 100644
--- a/tex/context/base/mkxl/font-ini.lmt
+++ b/tex/context/base/mkxl/font-ini.lmt
@@ -6,10 +6,6 @@ if not modules then modules = { } end modules ['font-ini'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>Not much is happening here.</p>
---ldx]]--
-
local sortedhash, setmetatableindex = table.sortedhash, table.setmetatableindex
local allocate = utilities.storage.allocate
diff --git a/tex/context/base/mkxl/font-ini.mklx b/tex/context/base/mkxl/font-ini.mklx
index 6efae2ae1..ea727bde4 100644
--- a/tex/context/base/mkxl/font-ini.mklx
+++ b/tex/context/base/mkxl/font-ini.mklx
@@ -755,6 +755,16 @@
\immutable\dimensiondef\d_font_default_size 10pt
+%lettonothing\m_font_class_direction % no longer used
+\lettonothing\m_font_class_features
+\lettonothing\m_font_class_fallbacks
+\lettonothing\m_font_class_goodies
+
+\lettonothing\m_font_direction
+\lettonothing\m_font_features
+\lettonothing\m_font_fallbacks
+\lettonothing\m_font_goodies
+
\protected\def\font_helpers_low_level_define
{\ifconditional\c_font_compact
\expandafter\font_helpers_low_level_define_compact
diff --git a/tex/context/base/mkxl/font-mat.mklx b/tex/context/base/mkxl/font-mat.mklx
index 76f6f87b9..54473a347 100644
--- a/tex/context/base/mkxl/font-mat.mklx
+++ b/tex/context/base/mkxl/font-mat.mklx
@@ -337,15 +337,17 @@
%D 0 while in rl mode 0 is a copy of 1. There is no real overhead involved in this.
%D This also permits different font definitions for normal and mixed.
-\lettonothing\m_font_class_direction
-\lettonothing\m_font_class_features
-\lettonothing\m_font_class_fallbacks
-\lettonothing\m_font_class_goodies
-
-\lettonothing\m_font_direction
-\lettonothing\m_font_features
-\lettonothing\m_font_fallbacks
-\lettonothing\m_font_goodies
+% moved to ini
+%
+% \lettonothing\m_font_class_direction
+% \lettonothing\m_font_class_features
+% \lettonothing\m_font_class_fallbacks
+% \lettonothing\m_font_class_goodies
+%
+% \lettonothing\m_font_direction
+% \lettonothing\m_font_features
+% \lettonothing\m_font_fallbacks
+% \lettonothing\m_font_goodies
\appendtoks
\font_helpers_set_math_family\c_font_fam_mr\s!mr
diff --git a/tex/context/base/mkxl/font-one.lmt b/tex/context/base/mkxl/font-one.lmt
index 453f61192..71694dcca 100644
--- a/tex/context/base/mkxl/font-one.lmt
+++ b/tex/context/base/mkxl/font-one.lmt
@@ -7,18 +7,16 @@ if not modules then modules = { } end modules ['font-one'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>Some code may look a bit obscure but this has to do with the fact that we also use
-this code for testing and much code evolved in the transition from <l n='tfm'/> to
-<l n='afm'/> to <l n='otf'/>.</p>
-
-<p>The following code still has traces of intermediate font support where we handles
-font encodings. Eventually font encoding went away but we kept some code around in
-other modules.</p>
-
-<p>This version implements a node mode approach so that users can also more easily
-add features.</p>
---ldx]]--
+-- Some code may look a bit obscure but this has to do with the fact that we also
+-- use this code for testing and much code evolved in the transition from TFM to AFM
+-- to OTF.
+--
+-- The following code still has traces of intermediate font support where we handles
+-- font encodings. Eventually font encoding went away but we kept some code around
+-- in other modules.
+--
+-- This version implements a node mode approach so that users can also more easily
+-- add features.
local fonts, logs, trackers, containers, resolvers = fonts, logs, trackers, containers, resolvers
@@ -71,15 +69,13 @@ local overloads = fonts.mappings.overloads
local applyruntimefixes = fonts.treatments and fonts.treatments.applyfixes
---[[ldx--
-<p>We cache files. Caching is taken care of in the loader. We cheat a bit by adding
-ligatures and kern information to the afm derived data. That way we can set them faster
-when defining a font.</p>
-
-<p>We still keep the loading two phased: first we load the data in a traditional
-fashion and later we transform it to sequences. Then we apply some methods also
-used in opentype fonts (like <t>tlig</t>).</p>
---ldx]]--
+-- We cache files. Caching is taken care of in the loader. We cheat a bit by adding
+-- ligatures and kern information to the afm derived data. That way we can set them
+-- faster when defining a font.
+--
+-- We still keep the loading two phased: first we load the data in a traditional
+-- fashion and later we transform it to sequences. Then we apply some methods also
+-- used in opentype fonts (like tlig).
function afm.load(filename)
filename = resolvers.findfile(filename,'afm') or ""
@@ -312,10 +308,8 @@ local function enhance_fix_names(data)
end
end
---[[ldx--
-<p>These helpers extend the basic table with extra ligatures, texligatures
-and extra kerns. This saves quite some lookups later.</p>
---ldx]]--
+-- These helpers extend the basic table with extra ligatures, texligatures and extra
+-- kerns. This saves quite some lookups later.
local addthem = function(rawdata,ligatures)
if ligatures then
@@ -349,17 +343,14 @@ local function enhance_add_ligatures(rawdata)
addthem(rawdata,afm.helpdata.ligatures)
end
---[[ldx--
-<p>We keep the extra kerns in separate kerning tables so that we can use
-them selectively.</p>
---ldx]]--
-
--- This is rather old code (from the beginning when we had only tfm). If
--- we unify the afm data (now we have names all over the place) then
--- we can use shcodes but there will be many more looping then. But we
--- could get rid of the tables in char-cmp then. Als, in the generic version
--- we don't use the character database. (Ok, we can have a context specific
--- variant).
+-- We keep the extra kerns in separate kerning tables so that we can use them
+-- selectively.
+--
+-- This is rather old code (from the beginning when we had only tfm). If we unify
+-- the afm data (now we have names all over the place) then we can use shcodes but
+-- there will be many more looping then. But we could get rid of the tables in
+-- char-cmp then. Als, in the generic version we don't use the character database.
+-- (Ok, we can have a context specific variant).
local function enhance_add_extra_kerns(rawdata) -- using shcodes is not robust here
local descriptions = rawdata.descriptions
@@ -440,9 +431,7 @@ local function enhance_add_extra_kerns(rawdata) -- using shcodes is not robust h
do_it_copy(afm.helpdata.rightkerned)
end
---[[ldx--
-<p>The copying routine looks messy (and is indeed a bit messy).</p>
---ldx]]--
+-- The copying routine looks messy (and is indeed a bit messy).
local function adddimensions(data) -- we need to normalize afm to otf i.e. indexed table instead of name
if data then
@@ -619,11 +608,9 @@ end
return nil
end
---[[ldx--
-<p>Originally we had features kind of hard coded for <l n='afm'/> files but since I
-expect to support more font formats, I decided to treat this fontformat like any
-other and handle features in a more configurable way.</p>
---ldx]]--
+-- Originally we had features kind of hard coded for AFM files but since I expect to
+-- support more font formats, I decided to treat this fontformat like any other and
+-- handle features in a more configurable way.
function afm.setfeatures(tfmdata,features)
local okay = constructors.initializefeatures("afm",tfmdata,features,trace_features,report_afm)
@@ -715,13 +702,10 @@ local function afmtotfm(specification)
end
end
---[[ldx--
-<p>As soon as we could intercept the <l n='tfm'/> reader, I implemented an
-<l n='afm'/> reader. Since traditional <l n='pdftex'/> could use <l n='opentype'/>
-fonts with <l n='afm'/> companions, the following method also could handle
-those cases, but now that we can handle <l n='opentype'/> directly we no longer
-need this features.</p>
---ldx]]--
+-- As soon as we could intercept the TFM reader, I implemented an AFM reader. Since
+-- traditional pdfTeX could use OpenType fonts with AFM companions, the following
+-- method also could handle those cases, but now that we can handle OpenType
+-- directly we no longer need this features.
local function read_from_afm(specification)
local tfmdata = afmtotfm(specification)
@@ -736,9 +720,7 @@ local function read_from_afm(specification)
return tfmdata
end
---[[ldx--
-<p>We have the usual two modes and related features initializers and processors.</p>
---ldx]]--
+-- We have the usual two modes and related features initializers and processors.
registerafmfeature {
name = "mode",
diff --git a/tex/context/base/mkxl/font-onr.lmt b/tex/context/base/mkxl/font-onr.lmt
index d28c247df..04f9d3bb2 100644
--- a/tex/context/base/mkxl/font-onr.lmt
+++ b/tex/context/base/mkxl/font-onr.lmt
@@ -7,18 +7,16 @@ if not modules then modules = { } end modules ['font-onr'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>Some code may look a bit obscure but this has to do with the fact that we also use
-this code for testing and much code evolved in the transition from <l n='tfm'/> to
-<l n='afm'/> to <l n='otf'/>.</p>
-
-<p>The following code still has traces of intermediate font support where we handles
-font encodings. Eventually font encoding went away but we kept some code around in
-other modules.</p>
-
-<p>This version implements a node mode approach so that users can also more easily
-add features.</p>
---ldx]]--
+-- Some code may look a bit obscure but this has to do with the fact that we also
+-- use this code for testing and much code evolved in the transition from TFM to AFM
+-- to OTF.
+--
+-- The following code still has traces of intermediate font support where we handles
+-- font encodings. Eventually font encoding went away but we kept some code around
+-- in other modules.
+--
+-- This version implements a node mode approach so that users can also more easily
+-- add features.
local fonts, logs, trackers, resolvers = fonts, logs, trackers, resolvers
@@ -49,12 +47,9 @@ pfb.version = 1.002
local readers = afm.readers or { }
afm.readers = readers
---[[ldx--
-<p>We start with the basic reader which we give a name similar to the built in <l n='tfm'/>
-and <l n='otf'/> reader.</p>
-<p>We use a new (unfinished) pfb loader but I see no differences between the old
-and new vectors (we actually had one bad vector with the old loader).</p>
---ldx]]--
+-- We start with the basic reader which we give a name similar to the built in TFM
+-- and OTF reader. We use a PFB loader but I see no differences between the old and
+-- new vectors (we actually had one bad vector with the old loader).
local get_indexes, get_shapes
@@ -71,7 +66,7 @@ do
-- local plain = bxor(cipher,rshift(r,8))
local plain = (cipher ~ ((r >> 8) & 0xFFFFFFFF))
-- r = ((cipher + r) * c1 + c2) % 65536
- r = ((cipher + r) * c1 + c2) % 0x10000
+ r = ((cipher + r) * c1 + c2) % 0x10000
return char(plain)
end
@@ -366,11 +361,10 @@ do
end
---[[ldx--
-<p>We start with the basic reader which we give a name similar to the built in <l n='tfm'/>
-and <l n='otf'/> reader. We only need data that is relevant for our use. We don't support
-more complex arrangements like multiple master (obsolete), direction specific kerning, etc.</p>
---ldx]]--
+-- We start with the basic reader which we give a name similar to the built in TFM
+-- and OTF reader. We only need data that is relevant for our use. We don't support
+-- more complex arrangements like multiple master (obsolete), direction specific
+-- kerning, etc.
local spacer = patterns.spacer
local whitespace = patterns.whitespace
diff --git a/tex/context/base/mkxl/font-ota.lmt b/tex/context/base/mkxl/font-ota.lmt
index 157270ef1..6e8130741 100644
--- a/tex/context/base/mkxl/font-ota.lmt
+++ b/tex/context/base/mkxl/font-ota.lmt
@@ -56,10 +56,8 @@ local chardata = characters and characters.data
local otffeatures = fonts.constructors.features.otf
local registerotffeature = otffeatures.register
---[[ldx--
-<p>Analyzers run per script and/or language and are needed in order to
-process features right.</p>
---ldx]]--
+-- Analyzers run per script and/or language and are needed in order to process
+-- features right.
local setstate = nuts.setstate
local getstate = nuts.getstate
diff --git a/tex/context/base/mkxl/font-ots.lmt b/tex/context/base/mkxl/font-ots.lmt
index e7fcfc576..0e99de6d1 100644
--- a/tex/context/base/mkxl/font-ots.lmt
+++ b/tex/context/base/mkxl/font-ots.lmt
@@ -7,92 +7,90 @@ if not modules then modules = { } end modules ['font-ots'] = { -- sequences
license = "see context related readme files",
}
---[[ldx--
-<p>I need to check the description at the microsoft site ... it has been improved
-so maybe there are some interesting details there. Most below is based on old and
-incomplete documentation and involved quite a bit of guesswork (checking with the
-abstract uniscribe of those days. But changing things is tricky!</p>
-
-<p>This module is a bit more split up that I'd like but since we also want to test
-with plain <l n='tex'/> it has to be so. This module is part of <l n='context'/>
-and discussion about improvements and functionality mostly happens on the
-<l n='context'/> mailing list.</p>
-
-<p>The specification of OpenType is (or at least decades ago was) kind of vague.
-Apart from a lack of a proper free specifications there's also the problem that
-Microsoft and Adobe may have their own interpretation of how and in what order to
-apply features. In general the Microsoft website has more detailed specifications
-and is a better reference. There is also some information in the FontForge help
-files. In the end we rely most on the Microsoft specification.</p>
-
-<p>Because there is so much possible, fonts might contain bugs and/or be made to
-work with certain rederers. These may evolve over time which may have the side
-effect that suddenly fonts behave differently. We don't want to catch all font
-issues.</p>
-
-<p>After a lot of experiments (mostly by Taco, me and Idris) the first implementation
-was already quite useful. When it did most of what we wanted, a more optimized version
-evolved. Of course all errors are mine and of course the code can be improved. There
-are quite some optimizations going on here and processing speed is currently quite
-acceptable and has been improved over time. Many complex scripts are not yet supported
-yet, but I will look into them as soon as <l n='context'/> users ask for it.</p>
-
-<p>The specification leaves room for interpretation. In case of doubt the Microsoft
-implementation is the reference as it is the most complete one. As they deal with
-lots of scripts and fonts, Kai and Ivo did a lot of testing of the generic code and
-their suggestions help improve the code. I'm aware that not all border cases can be
-taken care of, unless we accept excessive runtime, and even then the interference
-with other mechanisms (like hyphenation) are not trivial.</p>
-
-<p>Especially discretionary handling has been improved much by Kai Eigner who uses complex
-(latin) fonts. The current implementation is a compromis between his patches and my code
-and in the meantime performance is quite ok. We cannot check all border cases without
-compromising speed but so far we're okay. Given good test cases we can probably improve
-it here and there. Especially chain lookups are non trivial with discretionaries but
-things got much better over time thanks to Kai.</p>
-
-<p>Glyphs are indexed not by unicode but in their own way. This is because there is no
-relationship with unicode at all, apart from the fact that a font might cover certain
-ranges of characters. One character can have multiple shapes. However, at the
-<l n='tex'/> end we use unicode so and all extra glyphs are mapped into a private
-space. This is needed because we need to access them and <l n='tex'/> has to include
-then in the output eventually.</p>
-
-<p>The initial data table is rather close to the open type specification and also not
-that different from the one produced by <l n='fontforge'/> but we uses hashes instead.
-In <l n='context'/> that table is packed (similar tables are shared) and cached on disk
-so that successive runs can use the optimized table (after loading the table is
-unpacked).</p>
-
-<p>This module is sparsely documented because it is has been a moving target. The
-table format of the reader changed a bit over time and we experiment a lot with
-different methods for supporting features. By now the structures are quite stable</p>
-
-<p>Incrementing the version number will force a re-cache. We jump the number by one
-when there's a fix in the reader or processing code that can result in different
-results.</p>
-
-<p>This code is also used outside context but in context it has to work with other
-mechanisms. Both put some constraints on the code here.</p>
-
---ldx]]--
-
--- Remark: We assume that cursives don't cross discretionaries which is okay because it
--- is only used in semitic scripts.
+-- I need to check the description at the microsoft site ... it has been improved so
+-- maybe there are some interesting details there. Most below is based on old and
+-- incomplete documentation and involved quite a bit of guesswork (checking with the
+-- abstract uniscribe of those days. But changing things is tricky!
+--
+-- This module is a bit more split up that I'd like but since we also want to test
+-- with plain TeX it has to be so. This module is part of ConTeXt and discussion
+-- about improvements and functionality mostly happens on the ConTeXt mailing list.
+--
+-- The specification of OpenType is (or at least decades ago was) kind of vague.
+-- Apart from a lack of a proper free specifications there's also the problem that
+-- Microsoft and Adobe may have their own interpretation of how and in what order to
+-- apply features. In general the Microsoft website has more detailed specifications
+-- and is a better reference. There is also some information in the FontForge help
+-- files. In the end we rely most on the Microsoft specification.
+--
+-- Because there is so much possible, fonts might contain bugs and/or be made to
+-- work with certain rederers. These may evolve over time which may have the side
+-- effect that suddenly fonts behave differently. We don't want to catch all font
+-- issues.
+--
+-- After a lot of experiments (mostly by Taco, me and Idris) the first
+-- implementation was already quite useful. When it did most of what we wanted, a
+-- more optimized version evolved. Of course all errors are mine and of course the
+-- code can be improved. There are quite some optimizations going on here and
+-- processing speed is currently quite acceptable and has been improved over time.
+-- Many complex scripts are not yet supported yet, but I will look into them as soon
+-- as ConTeXt users ask for it.
+--
+-- The specification leaves room for interpretation. In case of doubt the Microsoft
+-- implementation is the reference as it is the most complete one. As they deal with
+-- lots of scripts and fonts, Kai and Ivo did a lot of testing of the generic code
+-- and their suggestions help improve the code. I'm aware that not all border cases
+-- can be taken care of, unless we accept excessive runtime, and even then the
+-- interference with other mechanisms (like hyphenation) are not trivial.
+--
+-- Especially discretionary handling has been improved much by Kai Eigner who uses
+-- complex (latin) fonts. The current implementation is a compromis between his
+-- patches and my code and in the meantime performance is quite ok. We cannot check
+-- all border cases without compromising speed but so far we're okay. Given good
+-- test cases we can probably improve it here and there. Especially chain lookups
+-- are non trivial with discretionaries but things got much better over time thanks
+-- to Kai.
+--
+-- Glyphs are indexed not by unicode but in their own way. This is because there is
+-- no relationship with unicode at all, apart from the fact that a font might cover
+-- certain ranges of characters. One character can have multiple shapes. However, at
+-- the TeX end we use unicode so and all extra glyphs are mapped into a private
+-- space. This is needed because we need to access them and TeX has to include then
+-- in the output eventually.
+--
+-- The initial data table is rather close to the open type specification and also
+-- not that different from the one produced by Fontforge but we uses hashes instead.
+-- In ConTeXt that table is packed (similar tables are shared) and cached on disk so
+-- that successive runs can use the optimized table (after loading the table is
+-- unpacked).
+--
+-- This module is sparsely documented because it is has been a moving target. The
+-- table format of the reader changed a bit over time and we experiment a lot with
+-- different methods for supporting features. By now the structures are quite stable
+--
+-- Incrementing the version number will force a re-cache. We jump the number by one
+-- when there's a fix in the reader or processing code that can result in different
+-- results.
+--
+-- This code is also used outside ConTeXt but in ConTeXt it has to work with other
+-- mechanisms. Both put some constraints on the code here.
+--
+-- Remark: We assume that cursives don't cross discretionaries which is okay because
+-- it is only used in semitic scripts.
--
-- Remark: We assume that marks precede base characters.
--
--- Remark: When complex ligatures extend into discs nodes we can get side effects. Normally
--- this doesn't happen; ff\d{l}{l}{l} in lm works but ff\d{f}{f}{f}.
+-- Remark: When complex ligatures extend into discs nodes we can get side effects.
+-- Normally this doesn't happen; ff\d{l}{l}{l} in lm works but ff\d{f}{f}{f}.
--
-- Todo: check if we copy attributes to disc nodes if needed.
--
--- Todo: it would be nice if we could get rid of components. In other places we can use
--- the unicode properties. We can just keep a lua table.
+-- Todo: it would be nice if we could get rid of components. In other places we can
+-- use the unicode properties. We can just keep a lua table.
--
--- Remark: We do some disc juggling where we need to keep in mind that the pre, post and
--- replace fields can have prev pointers to a nesting node ... I wonder if that is still
--- needed.
+-- Remark: We do some disc juggling where we need to keep in mind that the pre, post
+-- and replace fields can have prev pointers to a nesting node ... I wonder if that
+-- is still needed.
--
-- Remark: This is not possible:
--
@@ -1092,10 +1090,8 @@ function handlers.gpos_pair(head,start,dataset,sequence,kerns,rlmode,skiphash,st
end
end
---[[ldx--
-<p>We get hits on a mark, but we're not sure if the it has to be applied so
-we need to explicitly test for basechar, baselig and basemark entries.</p>
---ldx]]--
+-- We get hits on a mark, but we're not sure if the it has to be applied so we need
+-- to explicitly test for basechar, baselig and basemark entries.
function handlers.gpos_mark2base(head,start,dataset,sequence,markanchors,rlmode,skiphash)
local markchar = getchar(start)
@@ -1292,10 +1288,8 @@ function handlers.gpos_cursive(head,start,dataset,sequence,exitanchors,rlmode,sk
return head, start, false
end
---[[ldx--
-<p>I will implement multiple chain replacements once I run into a font that uses
-it. It's not that complex to handle.</p>
---ldx]]--
+-- I will implement multiple chain replacements once I run into a font that uses it.
+-- It's not that complex to handle.
local chainprocs = { }
@@ -1348,29 +1342,22 @@ end
chainprocs.reversesub = reversesub
---[[ldx--
-<p>This chain stuff is somewhat tricky since we can have a sequence of actions to be
-applied: single, alternate, multiple or ligature where ligature can be an invalid
-one in the sense that it will replace multiple by one but not neccessary one that
-looks like the combination (i.e. it is the counterpart of multiple then). For
-example, the following is valid:</p>
-
-<typing>
-<line>xxxabcdexxx [single a->A][multiple b->BCD][ligature cde->E] xxxABCDExxx</line>
-</typing>
-
-<p>Therefore we we don't really do the replacement here already unless we have the
-single lookup case. The efficiency of the replacements can be improved by deleting
-as less as needed but that would also make the code even more messy.</p>
---ldx]]--
-
---[[ldx--
-<p>Here we replace start by a single variant.</p>
---ldx]]--
-
--- To be done (example needed): what if > 1 steps
-
--- this is messy: do we need this disc checking also in alternates?
+-- This chain stuff is somewhat tricky since we can have a sequence of actions to be
+-- applied: single, alternate, multiple or ligature where ligature can be an invalid
+-- one in the sense that it will replace multiple by one but not neccessary one that
+-- looks like the combination (i.e. it is the counterpart of multiple then). For
+-- example, the following is valid:
+--
+-- xxxabcdexxx [single a->A][multiple b->BCD][ligature cde->E] xxxABCDExxx
+--
+-- Therefore we we don't really do the replacement here already unless we have the
+-- single lookup case. The efficiency of the replacements can be improved by
+-- deleting as less as needed but that would also make the code even more messy.
+--
+-- Here we replace start by a single variant.
+--
+-- To be done : what if > 1 steps (example needed)
+-- This is messy: do we need this disc checking also in alternates?
local function reportzerosteps(dataset,sequence)
logwarning("%s: no steps",cref(dataset,sequence))
@@ -1446,9 +1433,7 @@ function chainprocs.gsub_single(head,start,stop,dataset,sequence,currentlookup,r
return head, start, false
end
---[[ldx--
-<p>Here we replace start by new glyph. First we delete the rest of the match.</p>
---ldx]]--
+-- Here we replace start by new glyph. First we delete the rest of the match.
-- char_1 mark_1 -> char_x mark_1 (ignore marks)
-- char_1 mark_1 -> char_x
@@ -1500,9 +1485,7 @@ function chainprocs.gsub_alternate(head,start,stop,dataset,sequence,currentlooku
return head, start, false
end
---[[ldx--
-<p>Here we replace start by a sequence of new glyphs.</p>
---ldx]]--
+-- Here we replace start by a sequence of new glyphs.
function chainprocs.gsub_multiple(head,start,stop,dataset,sequence,currentlookup,rlmode,skiphash,chainindex)
local mapping = currentlookup.mapping
@@ -1526,11 +1509,9 @@ function chainprocs.gsub_multiple(head,start,stop,dataset,sequence,currentlookup
return head, start, false
end
---[[ldx--
-<p>When we replace ligatures we use a helper that handles the marks. I might change
-this function (move code inline and handle the marks by a separate function). We
-assume rather stupid ligatures (no complex disc nodes).</p>
---ldx]]--
+-- When we replace ligatures we use a helper that handles the marks. I might change
+-- this function (move code inline and handle the marks by a separate function). We
+-- assume rather stupid ligatures (no complex disc nodes).
-- compare to handlers.gsub_ligature which is more complex ... why
diff --git a/tex/context/base/mkxl/font-tfm.lmt b/tex/context/base/mkxl/font-tfm.lmt
index 9fce8fc5f..d6857b39e 100644
--- a/tex/context/base/mkxl/font-tfm.lmt
+++ b/tex/context/base/mkxl/font-tfm.lmt
@@ -50,21 +50,18 @@ constructors.resolvevirtualtoo = false -- wil be set in font-ctx.lua
fonts.formats.tfm = "type1" -- we need to have at least a value here
fonts.formats.ofm = "type1" -- we need to have at least a value here
---[[ldx--
-<p>The next function encapsulates the standard <l n='tfm'/> loader as
-supplied by <l n='luatex'/>.</p>
---ldx]]--
-
--- this might change: not scaling and then apply features and do scaling in the
--- usual way with dummy descriptions but on the other hand .. we no longer use
--- tfm so why bother
-
--- ofm directive blocks local path search unless set; btw, in context we
--- don't support ofm files anyway as this format is obsolete
-
--- we need to deal with nested virtual fonts, but because we load in the
--- frontend we also need to make sure we don't nest too deep (esp when sizes
--- get large)
+-- The next function encapsulates the standard TFM loader as supplied by LuaTeX.
+--
+-- This might change: not scaling and then apply features and do scaling in the
+-- usual way with dummy descriptions but on the other hand. However, we no longer
+-- use TFM (except for the JMN math fonts) so why bother.
+--
+-- The ofm directive blocks a local path search unless set. Actually, in ConTeXt we
+-- never had to deal with OFM files anyway as this format is obsolete (there are
+-- hardly any fonts in that format that are of use).
+--
+-- We need to deal with nested virtual fonts, but because we load in the frontend we
+-- also need to make sure we don't nest too deep (esp when sizes get large)
--
-- (VTITLE Example of a recursion)
-- (MAPFONT D 0 (FONTNAME recurse)(FONTAT D 2))
@@ -72,7 +69,8 @@ supplied by <l n='luatex'/>.</p>
-- (CHARACTER C B (CHARWD D 2)(CHARHT D 2)(MAP (SETCHAR C A)))
-- (CHARACTER C C (CHARWD D 4)(CHARHT D 4)(MAP (SETCHAR C B)))
--
--- we added the same checks as below to the luatex engine
+-- The virtual fonts are handled in the backend and therefore LMTX provides more
+-- features than in the original specification. LuaTeX already had a few more.
function tfm.setfeatures(tfmdata,features)
local okay = constructors.initializefeatures("tfm",tfmdata,features,trace_features,report_tfm)
diff --git a/tex/context/base/mkxl/lang-url.lmt b/tex/context/base/mkxl/lang-url.lmt
index b918464d0..7607d7d84 100644
--- a/tex/context/base/mkxl/lang-url.lmt
+++ b/tex/context/base/mkxl/lang-url.lmt
@@ -23,12 +23,10 @@ local v_after = variables.after
local is_letter = characters.is_letter
---[[
-<p>Hyphenating <l n='url'/>'s is somewhat tricky and a matter of taste. I did
-consider using a dedicated hyphenation pattern or dealing with it by node
-parsing, but the following solution suits as well. After all, we're mostly
-dealing with <l n='ascii'/> characters.</p>
-]]--
+-- Hyphenating URL's is somewhat tricky and a matter of taste. I did consider using
+-- a dedicated hyphenation pattern or dealing with it by node parsing, but the
+-- following solution suits as well. After all, we're mostly dealing with ASCII
+-- characters.
local urls = { }
languages.urls = urls
diff --git a/tex/context/base/mkxl/lpdf-ano.lmt b/tex/context/base/mkxl/lpdf-ano.lmt
index 55b145730..2e19ffd5e 100644
--- a/tex/context/base/mkxl/lpdf-ano.lmt
+++ b/tex/context/base/mkxl/lpdf-ano.lmt
@@ -725,6 +725,7 @@ lpdf.action = pdfaction
function codeinjections.prerollreference(actions) -- share can become option
if actions then
+-- inspect(actions)
local main, n = pdfaction(actions)
if main then
local bs, bc = pdfborder()
diff --git a/tex/context/base/mkxl/lpdf-pde.lmt b/tex/context/base/mkxl/lpdf-pde.lmt
index 68712d58d..4e5d73e04 100644
--- a/tex/context/base/mkxl/lpdf-pde.lmt
+++ b/tex/context/base/mkxl/lpdf-pde.lmt
@@ -67,7 +67,6 @@ local lpdf = lpdf
local lpdf_epdf = { }
lpdf.epdf = lpdf_epdf
-local pdfopen = pdfe.open
local pdfopenfile = pdfe.openfile
local pdfnew = pdfe.new
local pdfclose = pdfe.close
@@ -540,10 +539,9 @@ function lpdf_epdf.load(filename,userpassword,ownerpassword,fromstring)
local __file__
if fromstring then
__data__ = pdfnew(filename,#filename)
- elseif pdfopenfile then
- __data__ = pdfopenfile(ioopen(filename,"rb"))
else
- __data__ = pdfopen(filename)
+ local f = ioopen(filename,"rb")
+ __data__ = f and pdfopenfile(f)
end
if __data__ then
if userpassword and getstatus(__data__) < 0 then
diff --git a/tex/context/base/mkxl/luat-cbk.lmt b/tex/context/base/mkxl/luat-cbk.lmt
index 744d12e27..2a3a58b04 100644
--- a/tex/context/base/mkxl/luat-cbk.lmt
+++ b/tex/context/base/mkxl/luat-cbk.lmt
@@ -12,20 +12,16 @@ local collectgarbage, type, next = collectgarbage, type, next
local round = math.round
local sortedhash, sortedkeys, tohash = table.sortedhash, table.sortedkeys, table.tohash
---[[ldx--
-<p>Callbacks are the real asset of <l n='luatex'/>. They permit you to hook
-your own code into the <l n='tex'/> engine. Here we implement a few handy
-auxiliary functions.</p>
---ldx]]--
+-- Callbacks are the real asset of LuaTeX. They permit you to hook your own code
+-- into the TeX engine. Here we implement a few handy auxiliary functions. Watch
+-- out, there are diferences between LuateX and LuaMetaTeX.
callbacks = callbacks or { }
local callbacks = callbacks
---[[ldx--
-<p>When you (temporarily) want to install a callback function, and after a
-while wants to revert to the original one, you can use the following two
-functions. This only works for non-frozen ones.</p>
---ldx]]--
+-- When you (temporarily) want to install a callback function, and after a while
+-- wants to revert to the original one, you can use the following two functions.
+-- This only works for non-frozen ones.
local trace_callbacks = false trackers.register("system.callbacks", function(v) trace_callbacks = v end)
local trace_calls = false -- only used when analyzing performance and initializations
@@ -47,13 +43,12 @@ local list = callbacks.list
local permit_overloads = false
local block_overloads = false
---[[ldx--
-<p>By now most callbacks are frozen and most provide a way to plug in your own code. For instance
-all node list handlers provide before/after namespaces and the file handling code can be extended
-by adding schemes and if needed I can add more hooks. So there is no real need to overload a core
-callback function. It might be ok for quick and dirty testing but anyway you're on your own if
-you permanently overload callback functions.</p>
---ldx]]--
+-- By now most callbacks are frozen and most provide a way to plug in your own code.
+-- For instance all node list handlers provide before/after namespaces and the file
+-- handling code can be extended by adding schemes and if needed I can add more
+-- hooks. So there is no real need to overload a core callback function. It might be
+-- ok for quick and dirty testing but anyway you're on your own if you permanently
+-- overload callback functions.
-- This might become a configuration file only option when it gets abused too much.
diff --git a/tex/context/base/mkxl/luat-cod.mkxl b/tex/context/base/mkxl/luat-cod.mkxl
index ed4a13981..322076aa1 100644
--- a/tex/context/base/mkxl/luat-cod.mkxl
+++ b/tex/context/base/mkxl/luat-cod.mkxl
@@ -42,7 +42,7 @@
\toksapp \everydump {%
\permanent\let\ctxlatelua \latelua
\permanent\def\ctxlatecommand#1{\latelua{commands.#1}}%
- \aliased\let\lateluacode \ctxlatelua
+ \aliased\let\lateluacode \ctxlatelua
} % no \appendtoks yet
\protect \endinput
diff --git a/tex/context/base/mkxl/luat-ini.lmt b/tex/context/base/mkxl/luat-ini.lmt
index 3202ea42b..56e3bd1c1 100644
--- a/tex/context/base/mkxl/luat-ini.lmt
+++ b/tex/context/base/mkxl/luat-ini.lmt
@@ -6,11 +6,9 @@ if not modules then modules = { } end modules ['luat-ini'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>We cannot load anything yet. However what we will do us reserve a few tables.
-These can be used for runtime user data or third party modules and will not be
-cluttered by macro package code.</p>
---ldx]]--
+-- We cannot load anything yet. However what we will do us reserve a few tables.
+-- These can be used for runtime user data or third party modules and will not be
+-- cluttered by macro package code.
userdata = userdata or { } -- for users (e.g. functions etc)
thirddata = thirddata or { } -- only for third party modules
diff --git a/tex/context/base/mkxl/math-act.lmt b/tex/context/base/mkxl/math-act.lmt
index 0c75147f6..4a46baff9 100644
--- a/tex/context/base/mkxl/math-act.lmt
+++ b/tex/context/base/mkxl/math-act.lmt
@@ -533,7 +533,7 @@ do
k = mathgaps[k] or k
local character = targetcharacters[k]
if character then
--- if not character.tweaked then -- todo: add a force
+ -- if not character.tweaked then -- todo: add a force
local t = type(v)
if t == "number" then
v = list[v]
@@ -666,7 +666,7 @@ do
else
report_mathtweak("invalid dimension entry %U",k)
end
--- character.tweaked = true
+ -- character.tweaked = true
if v.all then
local nxt = character.next
if nxt then
@@ -680,7 +680,7 @@ do
end
end
end
--- end
+ -- end
else
report_tweak("no character %U",target,original,k)
end
@@ -1938,63 +1938,178 @@ do
-- vfmath.builders.extension(target)
local rbe = newprivateslot("radical bar extender")
+ local fbe = newprivateslot("fraction bar extender")
+
+ local frp = {
+ newprivateslot("flat rule left piece"),
+ newprivateslot("flat rule middle piece"),
+ newprivateslot("flat rule right piece"),
+ }
+
+ local rrp = {
+ newprivateslot("radical rule middle piece"),
+ newprivateslot("radical rule right piece"),
+ }
+
+ local mrp = {
+ newprivateslot("minus rule left piece"),
+ newprivateslot("minus rule middle piece"),
+ newprivateslot("minus rule right piece"),
+ }
- local function useminus(unicode,characters,parameters)
+ local function useminus(target,unicode,characters,parameters,skipfirst,what)
local minus = characters[0x2212]
- local xoffset = parameters.xoffset or .075
- local yoffset = parameters.yoffset or .9
- local xscale = parameters.xscale or 1
- local yscale = parameters.yscale or 1
- local xwidth = parameters.width or (1 - 2*xoffset)
- local xheight = parameters.height or (1 - yoffset)
- local mheight = minus.height
- local mwidth = minus.width
- local height = xheight*mheight
- local xshift = xoffset * mwidth
- local yshift = yoffset * mheight
- local advance = xwidth * mwidth
- local step = mwidth / 2
- characters[unicode] = {
- height = height,
- depth = height,
- width = advance,
- commands = {
- push,
- leftcommand[xshift],
- downcommand[yshift],
- -- slotcommand[0][0x2212],
- { "slot", 0, 0x2212, xscale, yscale },
- pop,
- },
- unicode = unicode,
- -- parts = {
- -- { extender = 0, glyph = first, ["end"] = fw/2, start = 0, advance = fw },
- -- { extender = 1, glyph = middle, ["end"] = mw/2, start = mw/2, advance = mw },
- -- { extender = 0, glyph = last, ["end"] = 0, start = lw/2, advance = lw },
- -- },
- parts = {
- { extender = 0, glyph = unicode, ["end"] = step, start = 0, advance = advance },
- { extender = 1, glyph = unicode, ["end"] = step, start = step, advance = advance },
- },
- partsorientation = "horizontal",
- }
+ local parts = minus.parts
+ if parameters == true then
+ parameters = { }
+ end
+ if parts then
+ parts = copytable(parts)
+ local xscale = parameters.xscale or 1
+ local yscale = parameters.yscale or 1
+ local mheight = minus.height
+ local height = (parameters.height or 1) * mheight
+ local yshift = (parameters.yoffset or 0) * mheight
+ if skipfirst then
+ table.remove(parts,1)
+ end
+ height = height / 2
+ yshift = yshift + height
+ for i=1,#parts do
+ local part = parts[i]
+ local glyph = part.glyph
+ local gdata = characters[glyph]
+ local width = gdata.width
+ local xshift = 0
+ if i == 1 and parameters.leftoffset then
+ xshift = (parameters.leftoffset) * width
+ width = width - xshift
+ elseif i == #parts and parameters.rightoffset then
+ width = (1 + parameters.rightoffset) * width
+ end
+ characters[what[i]] = {
+ height = height,
+ depth = height,
+ width = width,
+ commands = {
+ leftcommand[xshift],
+ downcommand[yshift],
+-- slotcommand[0][glyph],
+ { "slot", 0, glyph, xscale, yscale },
+ },
+ }
+ part.glyph = what[i]
+ part.advance = width
+ end
+ characters[unicode] = {
+ height = height,
+ depth = height,
+ width = advance,
+ commands = {
+ downcommand[yshift],
+-- slotcommand[0][0x2212],
+ { "slot", 0, 0x2212, xscale, yscale },
+ },
+ unicode = unicode,
+ parts = parts,
+ partsorientation = "horizontal",
+ }
+ end
+ end
+
+ -- add minus parts of not there and create clipped clone
+
+ local function checkminus(target,unicode,characters,parameters,skipfirst,what)
+ local minus = characters[unicode]
+ local parts = minus.parts
+ if parameters == true then
+ parameters = { }
+ end
+ local p_normal = 0
+ local p_flat = 0
+ local mwidth = minus.width
+ local height = minus.height
+ local depth = minus.depth
+ local loffset = parameters.leftoffset or 0
+ local roffset = parameters.rightoffset or 0
+ local lshift = mwidth * loffset
+ local rshift = mwidth * roffset
+ local width = mwidth - lshift - rshift
+ if parts then
+ -- print("minus has parts")
+ if lshift ~= 0 or width ~= mwidth then
+ parts = copytable(parts)
+ for i=1,#parts do
+ local part = parts[i]
+ local glyph = part.glyph
+ local gdata = characters[glyph]
+ local width = gdata.width
+ local advance = part.advance
+ local lshift = 0
+ if i == 1 and left ~= 0 then
+ lshift = loffset * width
+ width = width - lshift
+ advance = advance - lshift
+ elseif i == #parts and roffset ~= 0 then
+ width = width - rshift
+ advance = advance - rshift
+ end
+ characters[what[i]] = {
+ height = height,
+ depth = depth,
+ width = width,
+ commands = {
+ leftcommand[lshift],
+ slotcommand[0][glyph],
+ },
+ }
+ part.glyph = what[i]
+ part.advance = advance
+ end
+ minus.parts = parts
+ minus.partsorientation = "horizontal"
+
+ end
+ else
+ local f_normal = formatters["M-NORMAL-%H"](unicode)
+ -- local p_normal = hasprivate(main,f_normal)
+ p_normal = addprivate(target,f_normal,{
+ height = height,
+ width = width,
+ commands = {
+ push,
+ leftcommand[lshift],
+ slotcommand[0][unicode],
+ pop,
+ },
+ })
+ local step = width/2
+ minus.parts = {
+ { extender = 0, glyph = p_normal, ["end"] = step, start = 0, advance = width },
+ { extender = 1, glyph = p_normal, ["end"] = step, start = step, advance = width },
+ { extender = 0, glyph = p_normal, ["end"] = 0, start = step, advance = width },
+ }
+ minus.partsorientation = "horizontal"
+ end
end
function mathtweaks.replacerules(target,original,parameters)
local characters = target.characters
+ local minus = parameters.minus
local fraction = parameters.fraction
local radical = parameters.radical
+ local stacker = parameters.stacker
+ if minus then
+ checkminus(target,0x2212,characters,minus,false,mrp)
+ end
if fraction then
- local template = fraction.template
- if template == 0x2212 or template == "minus" then
- useminus(0x203E,characters,fraction)
- end
+ useminus(target,fbe,characters,fraction,false,frp)
end
if radical then
- local template = radical.template
- if template == 0x2212 or template == "minus" then
- useminus(rbe,characters,radical)
- end
+ useminus(target,rbe,characters,radical,true,rrp)
+ end
+ if stacker then
+ useminus(target,0x203E,characters,stacker,false,frp)
end
end
@@ -2110,6 +2225,7 @@ do
return {
--
[0x002D] = { { left = slack, right = slack, glyph = 0x2212 }, single }, -- rel
+-- [0x2212] = { { left = slack, right = slack, glyph = 0x2212 }, single }, -- rel
--
[0x2190] = leftsingle, -- leftarrow
[0x219E] = leftsingle, -- twoheadleftarrow
@@ -3091,59 +3207,6 @@ do
local double <const> = 0x2016
local triple <const> = 0x2980
- -- local nps = fonts.helpers.newprivateslot
- --
- -- local function variantlist(characters,unicode,chardata,what,total,used)
- -- local parenthesis = characters[0x28].next
- -- local width = chardata.width
- -- local height = chardata.height
- -- local depth = chardata.depth
- -- local total = height + depth
- -- local count = 1
- -- while parenthesis do
- -- local private = nps(what .. " size " .. count)
- -- local pardata = characters[parenthesis]
- -- local parheight = pardata.height
- -- local pardepth = pardata.depth
- -- local scale = (parheight+pardepth)/total
- -- local offset = - pardepth + scale * depth
- -- chardata.next = private
- -- chardata = {
- -- unicode = unicode,
- -- width = width,
- -- height = parheight,
- -- depth = pardepth,
- -- commands = {
- -- { "offset", 0, offset, unicode, 1, scale }
- -- },
- -- }
- -- characters[private] = chardata
- -- parenthesis = pardata.next
- -- if paranthesis then
- -- pardata = characters[parenthesis]
- -- end
- -- count = count + 1
- -- end
- -- chardata.parts = {
- -- {
- -- advance = total,
- -- ["end"] = used,
- -- glyph = unicode,
- -- start = 0,
- -- -- start = used/5,
- -- },
- -- {
- -- advance = total,
- -- -- ["end"] = 0,
- -- ["end"] = used/5, -- prevents small gap with inward curved endpoints
- -- extender = 1,
- -- glyph = unicode,
- -- start = used,
- -- },
- -- }
- -- chardata.partsorientation = "vertical"
- -- end
-
local function variantlist(unicode,chardata,total,used)
chardata.varianttemplate = 0x0028
chardata.parts = {
diff --git a/tex/context/base/mkxl/math-ali.mkxl b/tex/context/base/mkxl/math-ali.mkxl
index b37887332..b90bad174 100644
--- a/tex/context/base/mkxl/math-ali.mkxl
+++ b/tex/context/base/mkxl/math-ali.mkxl
@@ -1403,9 +1403,41 @@
\c!toffset=.25\exheight,
\c!boffset=\mathmatrixparameter\c!toffset]
-\noaligned\permanent\tolerant\protected\def\math_matrix_HL[#1]#*%
+% \noaligned\permanent\tolerant\protected\def\math_matrix_HL[#1]#*%
+% {\noalign\bgroup
+% \math_matrix_check_rule[#1]%
+% \divideby\scratchdimen\plustwo
+% \ifdim\scratchdimen>\zeropoint
+% % \autorule\s!height\scratchdimen\s!depth\scratchdimen\relax
+% \scratchdistance\mathmatrixparameter\c!toffset\relax
+% \ifdim\scratchdistance>\zeropoint
+% \nohrule
+% \s!attr \mathalignmentvruleattribute\plustwo
+% \s!height\scratchdistance
+% \s!depth \zeropoint
+% \relax
+% \fi
+% \hrule
+% \s!attr \mathalignmentvruleattribute\plusthree
+% \s!height\scratchdimen
+% \s!depth \scratchdimen
+% \relax
+% \scratchdistance\mathmatrixparameter\c!boffset\relax
+% \ifdim\scratchdistance>\zeropoint
+% \nohrule
+% \s!attr \mathalignmentvruleattribute\plusfour
+% \s!height\zeropoint
+% \s!depth \scratchdistance
+% \relax
+% \fi
+% \else
+% % zero dimensions disable the rule
+% \fi
+% \egroup}
+
+\def\math_matrix_HL_indeed#1#2%
{\noalign\bgroup
- \math_matrix_check_rule[#1]%
+ \math_matrix_check_rule[#2]%
\divideby\scratchdimen\plustwo
\ifdim\scratchdimen>\zeropoint
% \autorule\s!height\scratchdimen\s!depth\scratchdimen\relax
@@ -1422,6 +1454,17 @@
\s!height\scratchdimen
\s!depth \scratchdimen
\relax
+ \ifnum#1>\plusone
+ \localcontrolledloop\plustwo#1\plusone
+ {\kern.125\d_math_eqalign_distance % hskip
+ \hrule
+ \s!attr \mathalignmentvruleattribute\plusthree
+ \s!height\scratchdimen
+ \s!depth \scratchdimen
+ \relax}%
+ \kern-2\scratchdimen
+ \kern-.125\d_math_eqalign_distance % hskip
+ \fi
\scratchdistance\mathmatrixparameter\c!boffset\relax
\ifdim\scratchdistance>\zeropoint
\nohrule
@@ -1435,6 +1478,9 @@
\fi
\egroup}
+\permanent\tolerant\noaligned\protected\def\math_matrix_HL [#1]#*{\math_matrix_HL_indeed\plusone{#1}}
+\permanent\tolerant\noaligned\protected\def\math_matrix_HLHL[#1]#*{\math_matrix_HL_indeed\plustwo{#1}}
+
\protected\def\math_matrix_vertical_rule_indeed#1#2%
{\math_matrix_check_rule[#2]%
\enablematrixrules
@@ -1514,19 +1560,38 @@
%boundary\c_math_matrix_sl_boundary
\enforced\let\NR\math_matrix_NL_NR}
-\permanent\tolerant\protected\def\math_matrix_VL[#1]#*%
+% \permanent\tolerant\protected\def\math_matrix_VL[#1]#*%
+% {\span\omit
+% \ifconditional\c_math_matrix_first\else
+% \kern.5\d_math_eqalign_distance % hskip
+% \fi
+% \math_matrix_vertical_rule_yes{#1}%
+% \kern.5\d_math_eqalign_distance % hskip
+% \global\setfalse\c_math_matrix_first
+% \aligntab
+% \boundary\c_math_matrix_vl_boundary
+% \enforced\let\NR\math_matrix_NL_NR
+% }
+
+\def\math_matrix_VL_indeed#1#2%%
{\span\omit
\ifconditional\c_math_matrix_first\else
\kern.5\d_math_eqalign_distance % hskip
\fi
- \math_matrix_vertical_rule_yes{#1}%
- \kern.5\d_math_eqalign_distance % hskip
+ \math_matrix_vertical_rule_yes{#2}%
+ \localcontrolledloop\plustwo#1\plusone
+ {\kern.125\d_math_eqalign_distance % hskip
+ \math_matrix_vertical_rule_yes{#2}}%
+ \kern.5\d_math_eqalign_distance
\global\setfalse\c_math_matrix_first
\aligntab
\boundary\c_math_matrix_vl_boundary
\enforced\let\NR\math_matrix_NL_NR
}
+\permanent\tolerant\protected\def\math_matrix_VL [#1]#*{\math_matrix_VL_indeed\plusone{#1}}
+\permanent\tolerant\protected\def\math_matrix_VLVL[#1]#*{\math_matrix_VL_indeed\plustwo{#1}}
+
\permanent\tolerant\protected\def\math_matrix_NL[#1]#*%
{\span\omit
\ifconditional\c_math_matrix_first\else
@@ -1585,6 +1650,9 @@
\enforced\let\VC\math_matrix_VC % bonus, extra column
\enforced\let\VT\math_matrix_VT % bonus, idem but tight
\enforced\let\TB\math_common_TB
+ % just because it's easy:
+ \enforced\let\VLVL\math_matrix_VLVL
+ \enforced\let\HLHL\math_matrix_HLHL
\to \everymathmatrix
\definesystemattribute[mathmatrixornament][public]
diff --git a/tex/context/base/mkxl/math-fnt.lmt b/tex/context/base/mkxl/math-fnt.lmt
index 911e0adb5..7e2c0c75c 100644
--- a/tex/context/base/mkxl/math-fnt.lmt
+++ b/tex/context/base/mkxl/math-fnt.lmt
@@ -63,9 +63,11 @@ local function register_extensible(font,char,style,box)
return nil
else
local bx = tonut(box)
- updaters.apply("tagging.state.disable") -- fast enough
- nodes.handlers.finalizelist(bx)
- updaters.apply("tagging.state.enable")
+ -- actually we don't want colors and such so if we do finalize we
+ -- should be more selctive:
+-- updaters.apply("tagging.state.disable")
+-- nodes.handlers.finalizelist(bx)
+-- updaters.apply("tagging.state.enable")
local id = getid(bx)
local al = getattrlst(bx)
local wd, ht, dp = getwhd(bx)
diff --git a/tex/context/base/mkxl/math-frc.mkxl b/tex/context/base/mkxl/math-frc.mkxl
index 47edc52c4..5c1eab8dd 100644
--- a/tex/context/base/mkxl/math-frc.mkxl
+++ b/tex/context/base/mkxl/math-frc.mkxl
@@ -104,6 +104,14 @@
\c!vfactor=\plusthousand,
\c!rule=\v!auto]
+%D We now default to nice bars:
+
+\integerdef\fractionbarextenderuc \privatecharactercode{fraction bar extender}
+
+\setupmathfractions
+ [\c!rule=\v!symbol,
+ \c!middle=\fractionbarextenderuc]
+
\appendtoks
\instance\frozen\protected\edefcsname\currentmathfraction\endcsname{\math_frac{\currentmathfraction}}%
\to \everydefinemathfraction
diff --git a/tex/context/base/mkxl/math-ini.mkxl b/tex/context/base/mkxl/math-ini.mkxl
index 8c0615eb6..6f2dfc1c2 100644
--- a/tex/context/base/mkxl/math-ini.mkxl
+++ b/tex/context/base/mkxl/math-ini.mkxl
@@ -1399,6 +1399,10 @@
% \im{1\unit{hour} 20 \unit{minute} 56 \unit{second}}
%
\inherited\setmathspacing \mathdimensioncode \mathdigitcode \allmathstyles \thickmuskip
+ \inherited\setmathspacing \mathdimensioncode \mathbinarycode \allsplitstyles \medmuskip
+ \inherited\setmathspacing \mathdimensioncode \mathbinarycode \allunsplitstyles \pettymuskip
+ \inherited\setmathspacing \mathdimensioncode \mathrelationcode \allsplitstyles \thickmuskip
+ \inherited\setmathspacing \mathdimensioncode \mathrelationcode \allunsplitstyles \pettymuskip
%
\inherited\setmathspacing \mathfakecode \mathallcode \allmathstyles \tinymuskip
\inherited\setmathspacing \mathallcode \mathfakecode \allmathstyles \tinymuskip
@@ -2814,50 +2818,83 @@
\installcorenamespace {mathautopunctuation}
-\bgroup
-
- % This can and will be replaced by classes:
-
- \catcode\commaasciicode \activecatcode
- \catcode\periodasciicode \activecatcode
- \catcode\semicolonasciicode\activecatcode
-
- \gdefcsname\??mathautopunctuation\v!no\endcsname
- {\let,\math_punctuation_nop_comma
- \let.\math_punctuation_nop_period
- \let;\math_punctuation_nop_semicolon}
-
- \gdefcsname\??mathautopunctuation\v!yes\endcsname
- {\let,\math_punctuation_yes_comma
- \let.\math_punctuation_yes_period
- \let;\math_punctuation_nop_semicolon}
-
- \gdefcsname\??mathautopunctuation\v!all\endcsname
- {\let,\math_punctuation_all_comma
- \let.\math_punctuation_all_period
- \let;\math_punctuation_nop_semicolon}
-
- \gdefcsname\??mathautopunctuation comma\endcsname
- {\let,\math_punctuation_yes_comma
- \let.\math_punctuation_yes_period
- \let;\math_punctuation_nop_semicolon}
-
- \gdefcsname\??mathautopunctuation\v!yes\string,semicolon\endcsname
- {\let,\math_punctuation_yes_comma
- \let.\math_punctuation_yes_period
- \let;\math_punctuation_yes_semicolon}
-
- \gdefcsname\??mathautopunctuation comma\string,semicolon\endcsname
- {\let,\math_punctuation_yes_comma
- \let.\math_punctuation_yes_period
- \let;\math_punctuation_yes_semicolon}
-
- \gdefcsname\??mathautopunctuation\v!all\string,semicolon\endcsname
- {\let,\math_punctuation_all_comma
- \let.\math_punctuation_all_period
- \let;\math_punctuation_all_semicolon}
+% \bgroup
+%
+% \catcode\commaasciicode \activecatcode
+% \catcode\periodasciicode \activecatcode
+% \catcode\semicolonasciicode\activecatcode
+%
+% \gdefcsname\??mathautopunctuation\v!no\endcsname
+% {\let,\math_punctuation_nop_comma
+% \let.\math_punctuation_nop_period
+% \let;\math_punctuation_nop_semicolon}
+%
+% \gdefcsname\??mathautopunctuation\v!yes\endcsname
+% {\let,\math_punctuation_yes_comma
+% \let.\math_punctuation_yes_period
+% \let;\math_punctuation_nop_semicolon}
+%
+% \gdefcsname\??mathautopunctuation\v!all\endcsname
+% {\let,\math_punctuation_all_comma
+% \let.\math_punctuation_all_period
+% \let;\math_punctuation_nop_semicolon}
+%
+% \gdefcsname\??mathautopunctuation comma\endcsname
+% {\let,\math_punctuation_yes_comma
+% \let.\math_punctuation_yes_period
+% \let;\math_punctuation_nop_semicolon}
+%
+% \gdefcsname\??mathautopunctuation\v!yes\string,semicolon\endcsname
+% {\let,\math_punctuation_yes_comma
+% \let.\math_punctuation_yes_period
+% \let;\math_punctuation_yes_semicolon}
+%
+% \gdefcsname\??mathautopunctuation comma\string,semicolon\endcsname
+% {\let,\math_punctuation_yes_comma
+% \let.\math_punctuation_yes_period
+% \let;\math_punctuation_yes_semicolon}
+%
+% \gdefcsname\??mathautopunctuation\v!all\string,semicolon\endcsname
+% {\let,\math_punctuation_all_comma
+% \let.\math_punctuation_all_period
+% \let;\math_punctuation_all_semicolon}
+%
+% \egroup
-\egroup
+\defcsname\??mathautopunctuation\v!no\endcsname
+ {\letcharcode\commaasciicode \math_punctuation_nop_comma
+ \letcharcode\periodasciicode \math_punctuation_nop_period
+ \letcharcode\semicolonasciicode\math_punctuation_nop_semicolon}
+
+\defcsname\??mathautopunctuation\v!yes\endcsname
+ {\letcharcode\commaasciicode \math_punctuation_yes_comma
+ \letcharcode\periodasciicode \math_punctuation_yes_period
+ \letcharcode\semicolonasciicode\math_punctuation_nop_semicolon}
+
+\defcsname\??mathautopunctuation\v!all\endcsname
+ {\letcharcode\commaasciicode \math_punctuation_all_comma
+ \letcharcode\periodasciicode \math_punctuation_all_period
+ \letcharcode\semicolonasciicode\math_punctuation_nop_semicolon}
+
+\defcsname\??mathautopunctuation comma\endcsname
+ {\letcharcode\commaasciicode \math_punctuation_yes_comma
+ \letcharcode\periodasciicode \math_punctuation_yes_period
+ \letcharcode\semicolonasciicode\math_punctuation_nop_semicolon}
+
+\defcsname\??mathautopunctuation\v!yes\string,semicolon\endcsname
+ {\letcharcode\commaasciicode \math_punctuation_yes_comma
+ \letcharcode\periodasciicode \math_punctuation_yes_period
+ \letcharcode\semicolonasciicode\math_punctuation_yes_semicolon}
+
+\defcsname\??mathautopunctuation comma\string,semicolon\endcsname
+ {\letcharcode\commaasciicode \math_punctuation_yes_comma
+ \letcharcode\periodasciicode \math_punctuation_yes_period
+ \letcharcode\semicolonasciicode\math_punctuation_yes_semicolon}
+
+\defcsname\??mathautopunctuation\v!all\string,semicolon\endcsname
+ {\letcharcode\commaasciicode \math_punctuation_all_comma
+ \letcharcode\periodasciicode \math_punctuation_all_period
+ \letcharcode\semicolonasciicode\math_punctuation_all_semicolon}
% \appendtoks
% \global\mathcode\commaasciicode \c_math_special
diff --git a/tex/context/base/mkxl/math-map.lmt b/tex/context/base/mkxl/math-map.lmt
index 98cc59c89..0bd75d748 100644
--- a/tex/context/base/mkxl/math-map.lmt
+++ b/tex/context/base/mkxl/math-map.lmt
@@ -7,31 +7,13 @@ if not modules then modules = { } end modules ['math-map'] = {
license = "see context related readme files"
}
--- todo: make sparse .. if self
-
---[[ldx--
-<p>Remapping mathematics alphabets.</p>
---ldx]]--
-
--- oldstyle: not really mathematics but happened to be part of
--- the mathematics fonts in cmr
---
--- persian: we will also provide mappers for other
--- scripts
-
--- todo: alphabets namespace
--- maybe: script/scriptscript dynamic,
-
--- superscripped primes get unscripted !
-
--- to be looked into once the fonts are ready (will become font
--- goodie):
---
--- (U+2202,U+1D715) : upright
--- (U+2202,U+1D715) : italic
--- (U+2202,U+1D715) : upright
---
--- plus add them to the regular vectors below so that they honor \it etc
+-- persian: we will also provide mappers for other scripts
+-- todo : alphabets namespace
+-- maybe : script/scriptscript dynamic,
+-- check : (U+2202,U+1D715) : upright
+-- (U+2202,U+1D715) : italic
+-- (U+2202,U+1D715) : upright
+-- add them to the regular vectors below so that they honor \it etc
local type, next = type, next
local merged, sortedhash = table.merged, table.sortedhash
diff --git a/tex/context/base/mkxl/math-noa.lmt b/tex/context/base/mkxl/math-noa.lmt
index 4a0cb5744..f64783ed9 100644
--- a/tex/context/base/mkxl/math-noa.lmt
+++ b/tex/context/base/mkxl/math-noa.lmt
@@ -890,39 +890,43 @@ do
local data = fontdata[font]
local characters = data.characters
local olddata = characters[oldchar]
--- local oldheight = olddata.height or 0
--- local olddepth = olddata.depth or 0
- local template = olddata.varianttemplate
- local newchar = mathematics.big(data,template or oldchar,size,method)
- local newdata = characters[newchar]
- local newheight = newdata.height or 0
- local newdepth = newdata.depth or 0
- if template then
--- local ratio = (newheight + newdepth) / (oldheight + olddepth)
--- setheight(pointer,ratio * oldheight)
--- setdepth(pointer,ratio * olddepth)
- setheight(pointer,newheight)
- setdepth(pointer,newdepth)
- if not olddata.extensible then
- -- check this on bonum and antykwa
- setoptions(pointer,0)
- end
- if trace_fences then
--- report_fences("replacing %C using method %a, size %a, template %C and ratio %.3f",newchar,method,size,template,ratio)
- report_fences("replacing %C using method %a, size %a and template %C",newchar,method,size,template)
- end
- else
- -- 1 scaled point is a signal, for now
- if ht == 1 then
+ if olddata then
+-- local oldheight = olddata.height or 0
+-- local olddepth = olddata.depth or 0
+ local template = olddata.varianttemplate
+ local newchar = mathematics.big(data,template or oldchar,size,method)
+ local newdata = characters[newchar]
+ local newheight = newdata.height or 0
+ local newdepth = newdata.depth or 0
+ if template then
+-- local ratio = (newheight + newdepth) / (oldheight + olddepth)
+-- setheight(pointer,ratio * oldheight)
+-- setdepth(pointer,ratio * olddepth)
setheight(pointer,newheight)
- end
- if dp == 1 then
setdepth(pointer,newdepth)
+ if not olddata.extensible then
+ -- check this on bonum and antykwa
+ setoptions(pointer,0)
+ end
+ if trace_fences then
+-- report_fences("replacing %C using method %a, size %a, template %C and ratio %.3f",newchar,method,size,template,ratio)
+ report_fences("replacing %C using method %a, size %a and template %C",newchar,method,size,template)
+ end
+ else
+ -- 1 scaled point is a signal, for now
+ if ht == 1 then
+ setheight(pointer,newheight)
+ end
+ if dp == 1 then
+ setdepth(pointer,newdepth)
+ end
+ setchar(delimiter,newchar)
+ if trace_fences then
+ report_fences("replacing %C by %C using method %a and size %a",oldchar,char,method,size)
+ end
end
- setchar(delimiter,newchar)
- if trace_fences then
- report_fences("replacing %C by %C using method %a and size %a",oldchar,char,method,size)
- end
+ elseif trace_fences then
+ report_fences("not replacing %C using method %a and size %a",oldchar,method,size)
end
end
end
diff --git a/tex/context/base/mkxl/math-rad.mklx b/tex/context/base/mkxl/math-rad.mklx
index 863bb2128..ee91243e0 100644
--- a/tex/context/base/mkxl/math-rad.mklx
+++ b/tex/context/base/mkxl/math-rad.mklx
@@ -378,6 +378,12 @@
\integerdef\delimitedrightanutityuc \privatecharactercode{delimited right annuity}
\integerdef\radicalbarextenderuc \privatecharactercode{radical bar extender}
+%D We now default to nice bars:
+
+\setupmathradical
+ [\c!rule=\v!symbol,
+ \c!top=\radicalbarextenderuc]
+
\definemathradical
[rannuity]
[\c!left=\zerocount,
diff --git a/tex/context/base/mkxl/math-spa.lmt b/tex/context/base/mkxl/math-spa.lmt
index d2927ff58..a575b1714 100644
--- a/tex/context/base/mkxl/math-spa.lmt
+++ b/tex/context/base/mkxl/math-spa.lmt
@@ -41,6 +41,7 @@ local getnormalizedline = node.direct.getnormalizedline
local getbox = nuts.getbox
local setoffsets = nuts.setoffsets
local addxoffset = nuts.addxoffset
+local setattrlist = nuts.setattrlist
local nextglue = nuts.traversers.glue
local nextlist = nuts.traversers.list
@@ -48,7 +49,9 @@ local nextboundary = nuts.traversers.boundary
local nextnode = nuts.traversers.node
local insertafter = nuts.insertafter
+local insertbefore = nuts.insertbefore
local newkern = nuts.pool.kern
+local newstrutrule = nuts.pool.strutrule
local texsetdimen = tex.setdimen
local texgetdimen = tex.getdimen
@@ -68,6 +71,10 @@ local d_strc_math_first_height = texisdimen("d_strc_math_first_height")
local d_strc_math_last_depth = texisdimen("d_strc_math_last_depth")
local d_strc_math_indent = texisdimen("d_strc_math_indent")
+local report = logs.reporter("mathalign")
+
+local trace = false trackers.register("mathalign",function(v) trace = v end )
+
local function moveon(s)
for n, id, subtype in nextnode, getnext(s) do
s = n
@@ -138,15 +145,20 @@ stages[1] = function(specification,stage)
p = getprev(p)
end
end
- -- we use a hangindent so we need to treat the first one
- local f = found[1]
- local delta = f[2] - max
- if delta ~= 0 then
- insertafter(head,moveon(head),newkern(-delta))
- end
- for i=2,#found do
+ for i=1,#found do
local f = found[i]
- insertafter(head,moveon(f[3]),newkern(-f[2])) -- check head
+ local w = f[2]
+ local d = i == 1 and (max-w) or -w
+ local k = newkern(d)
+ local r = newstrutrule(0,2*65536,2*65536)
+ local s = moveon(f[3])
+ if trace then
+ report("row %i, width %p, delta %p",i,w,d)
+ end
+ setattrlist(r,head)
+ setattrlist(k,head)
+ insertbefore(head,s,r)
+ insertafter(head,r,k)
end
end
texsetdimen("global",d_strc_math_indent,max)
diff --git a/tex/context/base/mkxl/math-stc.mklx b/tex/context/base/mkxl/math-stc.mklx
index fdad71978..5a701426a 100644
--- a/tex/context/base/mkxl/math-stc.mklx
+++ b/tex/context/base/mkxl/math-stc.mklx
@@ -1043,7 +1043,7 @@
\definemathstackers [\v!medium] [\v!mathematics] [\c!hoffset=1.5\mathemwidth]
\definemathstackers [\v!big] [\v!mathematics] [\c!hoffset=2\mathemwidth]
-\definemathextensible [\v!reverse] [xrel] ["002D]
+\definemathextensible [\v!reverse] [xrel] ["2212] % ["002D]
\definemathextensible [\v!reverse] [xequal] ["003D]
\definemathextensible [\v!reverse] [xleftarrow] ["2190] % ["27F5]
\definemathextensible [\v!reverse] [xrightarrow] ["2192] % ["27F6]
@@ -1066,7 +1066,7 @@
\definemathextensible [\v!reverse] [xrightleftharpoons] ["21CC]
\definemathextensible [\v!reverse] [xtriplerel] ["2261]
-\definemathextensible [\v!mathematics] [mrel] ["002D]
+\definemathextensible [\v!mathematics] [mrel] ["2212] % ["002D]
\definemathextensible [\v!mathematics] [mequal] ["003D]
\definemathextensible [\v!mathematics] [mleftarrow] ["2190] % ["27F5]
\definemathextensible [\v!mathematics] [mrightarrow] ["2192] % ["27F6]
@@ -1089,7 +1089,7 @@
\definemathextensible [\v!mathematics] [mrightleftharpoons] ["21CC]
\definemathextensible [\v!mathematics] [mtriplerel] ["2261]
-\definemathextensible [\v!text] [trel] ["002D]
+\definemathextensible [\v!text] [trel] ["2212] % ["002D]
\definemathextensible [\v!text] [tequal] ["003D]
\definemathextensible [\v!text] [tmapsto] ["21A6]
\definemathextensible [\v!text] [tleftarrow] ["2190] % ["27F5]
@@ -1168,9 +1168,9 @@
%D in the backend (okay, we still need to deal with some cut and paste issues but at
%D least we now know what we deal with.
-\definemathoverextensible [\v!vfenced] [overbar] ["203E]
-\definemathunderextensible [\v!vfenced] [underbar] ["203E] % ["0332]
-\definemathdoubleextensible [\v!vfenced] [doublebar] ["203E] ["203E] % ["0332]
+\definemathoverextensible [\v!vfenced] [overbar] ["203E] % todo: private
+\definemathunderextensible [\v!vfenced] [underbar] ["203E] % todo: private
+\definemathdoubleextensible [\v!vfenced] [doublebar] ["203E] ["203E] % todo: private
\definemathoverextensible [\v!vfenced] [overbrace] ["23DE]
\definemathunderextensible [\v!vfenced] [underbrace] ["23DF]
@@ -1186,13 +1186,13 @@
%D For mathml:
-\definemathdoubleextensible [\v!both] [overbarunderbar] ["203E] ["203E]
+\definemathdoubleextensible [\v!both] [overbarunderbar] ["203E] ["203E] % todo: private
\definemathdoubleextensible [\v!both] [overbraceunderbrace] ["23DE] ["23DF]
\definemathdoubleextensible [\v!both] [overparentunderparent] ["23DC] ["23DD]
\definemathdoubleextensible [\v!both] [overbracketunderbracket] ["23B4] ["23B5]
-\definemathovertextextensible [\v!bothtext] [overbartext] ["203E]
-\definemathundertextextensible [\v!bothtext] [underbartext] ["203E]
+\definemathovertextextensible [\v!bothtext] [overbartext] ["203E] % todo: private
+\definemathundertextextensible [\v!bothtext] [underbartext] ["203E] % todo: private
\definemathovertextextensible [\v!bothtext] [overbracetext] ["23DE]
\definemathundertextextensible [\v!bothtext] [underbracetext] ["23DF]
\definemathovertextextensible [\v!bothtext] [overparenttext] ["23DC]
@@ -1285,8 +1285,8 @@
\permanent\tolerant\protected\def\defineextensiblefiller[#1]#*[#2]%
{\frozen\instance\edefcsname#1\endcsname{\mathfiller{\number#2}}}
-%defineextensiblefiller [barfill] ["203E] % yet undefined
-\defineextensiblefiller [relfill] ["002D]
+%defineextensiblefiller [barfill] ["203E] % % todo: private
+\defineextensiblefiller [relfill] ["2212] % ["002D]
\defineextensiblefiller [equalfill] ["003D]
\defineextensiblefiller [leftarrowfill] ["2190]
\defineextensiblefiller [rightarrowfill] ["2192]
diff --git a/tex/context/base/mkxl/math-twk.mkxl b/tex/context/base/mkxl/math-twk.mkxl
index 6ffb36818..6e015d3de 100644
--- a/tex/context/base/mkxl/math-twk.mkxl
+++ b/tex/context/base/mkxl/math-twk.mkxl
@@ -95,5 +95,12 @@
\permanent\protected\def\minute{\iffontchar\font\textminute\textminute\else\mathminute\fi}
\permanent\protected\def\second{\iffontchar\font\textsecond\textsecond\else\mathsecond\fi}
+% \startsetups[math:rules]
+% \letmathfractionparameter\c!rule\v!symbol
+% \setmathfractionparameter\c!middle{"203E}%
+% \letmathradicalparameter \c!rule\v!symbol
+% \setmathradicalparameter \c!top{\radicalbarextenderuc}%
+% \setmathfenceparameter \c!alternative{1}%
+% \stopsetups
\protect
diff --git a/tex/context/base/mkxl/math-vfu.lmt b/tex/context/base/mkxl/math-vfu.lmt
index 0a2b440a1..1639517b5 100644
--- a/tex/context/base/mkxl/math-vfu.lmt
+++ b/tex/context/base/mkxl/math-vfu.lmt
@@ -83,27 +83,37 @@ nps("flat double rule left piece")
nps("flat double rule middle piece")
nps("flat double rule right piece")
+nps("minus rule left piece")
+nps("minus rule middle piece")
+nps("minus rule right piece")
+
do
- local function horibar(main,unicode,rule,left,right,normal)
+ -- this overlaps with math-act
+
+ local function horibar(main,unicode,rule,left,right,normal,force,m,l,r)
local characters = main.characters
- if not characters[unicode] then
+ local data = characters[unicode]
+ if force or not data then
local height = main.mathparameters.defaultrulethickness or 4*65536/10
- local f_rule = rule and formatters["M-HORIBAR-RULE-%H"](rule)
- local p_rule = rule and hasprivate(main,f_rule)
+ local f_rule = rule and formatters["M-HORIBAR-M-%H"](rule)
+ local p_rule = rule and hasprivate(main,f_rule)
+ local ndata = normal and characters[normal]
if rule and left and right and normal then
- local ldata = characters[left]
- local mdata = characters[rule]
- local rdata = characters[right]
- local ndata = characters[normal]
+ local ldata = characters[l or left]
+ local mdata = characters[m or rule]
+ local rdata = characters[r or right]
local lwidth = ldata.width or 0
local mwidth = mdata.width or 0
local rwidth = rdata.width or 0
local nwidth = ndata.width or 0
local down = (mdata.height / 2) - height
- --
- local f_left = right and formatters["M-HORIBAR-LEFT-%H"](right)
- local f_right = right and formatters["M-HORIBAR-RIGHT-%H"](right)
+if unicode == normal then
+ height = ndata.height
+ down = 0
+end --
+ local f_left = left and formatters["M-HORIBAR-L-%H"](left)
+ local f_right = right and formatters["M-HORIBAR-R-%H"](right)
local p_left = left and hasprivate(main,f_left)
local p_right = right and hasprivate(main,f_right)
--
@@ -116,7 +126,7 @@ do
push,
leftcommand[.025*mwidth],
downcommand[down],
- slotcommand[0][rule],
+ slotcommand[0][m or rule],
pop,
},
})
@@ -130,7 +140,7 @@ do
push,
leftcommand[.025*lwidth],
downcommand[down],
- slotcommand[0][left],
+ slotcommand[0][l or left],
pop,
},
})
@@ -144,48 +154,72 @@ do
push,
leftcommand[.025*rwidth],
downcommand[down],
- slotcommand[0][right],
+ slotcommand[0][r or right],
pop,
},
})
end
- characters[unicode] = {
- keepvirtual = true,
- partsorientation = "horizontal",
- height = height,
- width = nwidth,
--- keepvirtual = true,
- commands = {
+if unicode ~= normal then
+ data = {
+ unicode = unicode,
+ height = height,
+ width = nwidth,
+ commands = {
downcommand[down],
slotcommand[0][normal]
},
- parts = {
- { glyph = p_left, ["end"] = 0.4*lwidth },
- { glyph = p_rule, extender = 1, ["start"] = mwidth, ["end"] = mwidth },
- { glyph = p_right, ["start"] = 0.6*rwidth },
- }
+ }
+ characters[unicode] = data
+end
+ data.parts = {
+ { glyph = p_left, ["end"] = 0.4*lwidth },
+ { glyph = p_rule, extender = 1, ["start"] = mwidth, ["end"] = mwidth },
+ { glyph = p_right, ["start"] = 0.6*rwidth },
}
else
- local width = main.parameters.quad/4 or 4*65536
+ local width = main.parameters.quad/2 or 4*65536 -- 3
if not characters[p_rule] then
- p_rule = addprivate(main,f_rule,{
- height = height,
- width = width,
--- keepvirtual = true,
- commands = { push, { "rule", height, width }, pop },
- })
+ if unicode == normal then
+ p_rule = addprivate(main,f_rule,{
+ height = ndata.height,
+ width = width,
+ commands = {
+ push,
+ upcommand[(ndata.height - height)/2],
+ { "rule", height, width },
+ pop
+ },
+ })
+ else
+ p_rule = addprivate(main,f_rule,{
+ height = height,
+ width = width,
+ commands = {
+ push,
+ { "rule", height, width },
+ pop
+ },
+ })
+ end
end
- characters[unicode] = {
- height = height,
- width = nwidth,
--- keepvirtual = true,
- partsorientation = "horizontal",
- parts = {
- { glyph = p_rule },
- { glyph = p_rule, extender = 1, ["start"] = width/2, ["end"] = width/2 },
+if unicode ~= normal then
+ data = {
+ unicode = unicode,
+ height = height,
+ width = width,
+ commands = {
+ slotcommand[0][p_rule]
}
}
+ characters[unicode] = data
+end
+ data.parts = {
+ { glyph = p_rule, ["start"] = width/2, ["end"] = width/2 },
+ { glyph = p_rule, extender = 1, ["start"] = width/2, ["end"] = width/2 },
+ }
end
+ data.keepvirtual = true -- i need to figure this out
+ data.partsorientation = "horizontal"
end
end
@@ -205,8 +239,8 @@ do
local nwidth = ndata.width or 0
local down = (mdata.height / 2) - height
--
- local f_rule = rule and formatters["M-ROOTBAR-RULE-%H"](rule)
- local f_right = right and formatters["M-ROOTBAR-RIGHT-%H"](right)
+ local f_rule = rule and formatters["M-ROOTBAR-M-%H"](rule)
+ local f_right = right and formatters["M-ROOTBAR-R-%H"](right)
local p_rule = rule and hasprivate(main,f_rule)
local p_right = right and hasprivate(main,f_right)
--
diff --git a/tex/context/base/mkxl/meta-imp-newmath.mkxl b/tex/context/base/mkxl/meta-imp-newmath.mkxl
new file mode 100644
index 000000000..af49f82ac
--- /dev/null
+++ b/tex/context/base/mkxl/meta-imp-newmath.mkxl
@@ -0,0 +1,76 @@
+%D \module
+%D [ file=meta-imp-newmath,
+%D version=2023.04.01,
+%D title=\METAPOST\ Graphics,
+%D subtitle=New Math Symbols,
+%D author=Mikael Sundqvist & Hans Hagen,
+%D date=\currentdate,
+%D copyright={PRAGMA ADE \& \CONTEXT\ Development Team}]
+%C
+%C This module is part of the \CONTEXT\ macro||package and is
+%C therefore copyrighted by \PRAGMA. See mreadme.pdf for
+%C details.
+
+%D In this file we will collect solutions for special math symbols. When such symbols
+%D are used in publications the CMS will contact the Unicode Consortium to suggest that
+%D they get a slot, because then we have proof of usage. We also consider old obsolete
+%D symbols because they can be treated like some ancient out|-|of|-|use script and fit
+%D into the \type {ancient math script}.
+
+\startMPextensions
+ vardef math_ornament_hat(expr w,h,d,o,l) =
+ image ( path p ; p :=
+ (w/2,h + 10l) --
+ (o + w,h + o) --
+ (w/2,h + 7l) --
+ (-o,h + o) --
+ cycle ;
+ fill p randomized o ;
+ setbounds currentpicture to (-o,0) -- (w+o,0) -- (w+o,h+2o) -- (-o,h+2o) -- cycle ;
+ )
+ enddef ;
+\stopMPextensions
+
+\startuniqueMPgraphic{math:ornament:hat}
+ draw
+ math_ornament_hat(
+ OverlayWidth,
+ OverlayHeight,
+ OverlayDepth,
+ OverlayOffset,
+ OverlayLineWidth
+ )
+ withpen
+ pencircle
+ xscaled (2OverlayLineWidth)
+ yscaled (3OverlayLineWidth/4)
+ rotated 30
+ withcolor
+ OverlayLineColor ;
+% draw boundingbox currentpicture;
+\stopuniqueMPgraphic
+
+\definemathornament [widerandomhat] [mp=math:ornament:hat]
+
+\continueifinputfile{meta-imp-newnmath.mkxl}
+
+\starttext
+
+This symbol was designed for one of Mikaels students working on a thesis on
+probability. This student needed to typeset the characteristic function of a
+random variable \im {X} with density function \im {f_{X}}, and it was insisted to
+use another notation than the (wide) hat, that was already used for something
+else. For this reason the \tex {widerandomhat} was introduced,
+
+\startformula
+ E[\ee^{\ii tX}] = \widerandomhat{f_{X}}(t)\mtp{,}
+ E[\ee^{\ii t(X_1+X_2)}] = \widerandomhat{f_{X_1} \ast f_{X_2}}(t)\mtp{.}
+\stopformula
+
+Naturally, it is automatically scaled, just like the ordinary wide hat
+
+\startformula
+ \widehat{a+b+c+d+e+f} \neq \widerandomhat{a+b+c+d+e+f}
+\stopformula
+
+\stoptext
diff --git a/tex/context/base/mkxl/mlib-run.lmt b/tex/context/base/mkxl/mlib-run.lmt
index 0e955818e..de5ceb1db 100644
--- a/tex/context/base/mkxl/mlib-run.lmt
+++ b/tex/context/base/mkxl/mlib-run.lmt
@@ -6,28 +6,16 @@ if not modules then modules = { } end modules ['mlib-run'] = {
license = "see context related readme files",
}
--- cmyk -> done, native
--- spot -> done, but needs reworking (simpler)
--- multitone ->
--- shade -> partly done, todo: cm
--- figure -> done
--- hyperlink -> low priority, easy
-
--- new * run
--- or
--- new * execute^1 * finish
-
--- a*[b,c] == b + a * (c-b)
-
---[[ldx--
-<p>The directional helpers and pen analysis are more or less translated from the
-<l n='c'/> code. It really helps that Taco know that source so well. Taco and I spent
-quite some time on speeding up the <l n='lua'/> and <l n='c'/> code. There is not
-much to gain, especially if one keeps in mind that when integrated in <l n='tex'/>
-only a part of the time is spent in <l n='metapost'/>. Of course an integrated
-approach is way faster than an external <l n='metapost'/> and processing time
-nears zero.</p>
---ldx]]--
+-- The directional helpers and pen analysis are more or less translated from the C
+-- code. In LuaTeX we spent quite some time on speeding up the Lua interface as well
+-- as the C code. There is not much to gain, especially if one keeps in mind that
+-- when integrated in TeX only a part of the time is spent in MetaPost. Of course an
+-- integrated approach is way faster than an external MetaPost and processing time
+-- nears zero.
+--
+-- In LuaMetaTeX the MetaPost core has been cleaned up a it and as a result
+-- processing in double mode is now faster than in scaled mode. There are also extra
+-- features and interfaces, so the MkIV and MkXL (LMTX) implementation differ!
local type, tostring, tonumber, next = type, tostring, tonumber, next
local find, striplines = string.find, utilities.strings.striplines
diff --git a/tex/context/base/mkxl/node-ini.lmt b/tex/context/base/mkxl/node-ini.lmt
index f1b9bb452..38f55c160 100644
--- a/tex/context/base/mkxl/node-ini.lmt
+++ b/tex/context/base/mkxl/node-ini.lmt
@@ -6,19 +6,13 @@ if not modules then modules = { } end modules ['node-ini'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>Most of the code that had accumulated here is now separated in modules.</p>
---ldx]]--
-
local next, type, tostring = next, type, tostring
local gsub = string.gsub
local concat, remove = table.concat, table.remove
local sortedhash, sortedkeys, swapped = table.sortedhash, table.sortedkeys, table.swapped
---[[ldx--
-<p>Access to nodes is what gives <l n='luatex'/> its power. Here we implement a
-few helper functions. These functions are rather optimized.</p>
---ldx]]--
+-- Access to nodes is what gives LuaTeX its power. Here we implement a few helper
+-- functions. These functions are rather optimized.
nodes = nodes or { }
local nodes = nodes
diff --git a/tex/context/base/mkxl/node-res.lmt b/tex/context/base/mkxl/node-res.lmt
index 6fed08b63..2d2c31965 100644
--- a/tex/context/base/mkxl/node-res.lmt
+++ b/tex/context/base/mkxl/node-res.lmt
@@ -10,11 +10,6 @@ local type, next, rawset = type, next, rawset
local gmatch, format = string.gmatch, string.format
local round = math.round
---[[ldx--
-<p>The next function is not that much needed but in <l n='context'/> we use
-for debugging <l n='luatex'/> node management.</p>
---ldx]]--
-
local nodes, node = nodes, node
local report_nodes = logs.reporter("nodes","housekeeping")
diff --git a/tex/context/base/mkxl/node-tra.lmt b/tex/context/base/mkxl/node-tra.lmt
index 1ef1bb8ad..fe212f787 100644
--- a/tex/context/base/mkxl/node-tra.lmt
+++ b/tex/context/base/mkxl/node-tra.lmt
@@ -6,10 +6,8 @@ if not modules then modules = { } end modules ['node-tra'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>This is rather experimental. We need more control and some of this
-might become a runtime module instead. This module will be cleaned up!</p>
---ldx]]--
+-- Some of the code here might become a runtime module instead. This old module will
+-- be cleaned up anyway!
local next = next
local utfchar = utf.char
diff --git a/tex/context/base/mkxl/pack-obj.lmt b/tex/context/base/mkxl/pack-obj.lmt
index 1e22515b9..a18f5e7e7 100644
--- a/tex/context/base/mkxl/pack-obj.lmt
+++ b/tex/context/base/mkxl/pack-obj.lmt
@@ -6,10 +6,8 @@ if not modules then modules = { } end modules ['pack-obj'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>We save object references in the main utility table. jobobjects are
-reusable components.</p>
---ldx]]--
+-- We save object references in the main utility table; job objects are reusable
+-- components.
local context = context
local codeinjections = backends.codeinjections
diff --git a/tex/context/base/mkxl/pack-rul.lmt b/tex/context/base/mkxl/pack-rul.lmt
index 12d131c88..62a904901 100644
--- a/tex/context/base/mkxl/pack-rul.lmt
+++ b/tex/context/base/mkxl/pack-rul.lmt
@@ -7,10 +7,6 @@ if not modules then modules = { } end modules ['pack-rul'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>An explanation is given in the history document <t>mk</t>.</p>
---ldx]]--
-
-- we need to be careful with display math as it uses shifts
-- \framed[align={lohi,middle}]{$x$}
diff --git a/tex/context/base/mkxl/publ-ini.mkxl b/tex/context/base/mkxl/publ-ini.mkxl
index b75a933ad..802768a8c 100644
--- a/tex/context/base/mkxl/publ-ini.mkxl
+++ b/tex/context/base/mkxl/publ-ini.mkxl
@@ -342,7 +342,7 @@
\newtoks\t_btx_cmd
\newbox \b_btx_cmd
-\t_btx_cmd{\global\setbox\b_btx_cmd\hpack{\clf_btxcmdstring}}
+\t_btx_cmd{\global\setbox\b_btx_cmd\hbox{\clf_btxcmdstring}} % no \hpack, otherwise prerolling --- doesn't work
\aliased\let\btxcmd\btxcommand
diff --git a/tex/context/base/mkxl/regi-ini.lmt b/tex/context/base/mkxl/regi-ini.lmt
index c0cd4f1c8..efacd5128 100644
--- a/tex/context/base/mkxl/regi-ini.lmt
+++ b/tex/context/base/mkxl/regi-ini.lmt
@@ -6,11 +6,8 @@ if not modules then modules = { } end modules ['regi-ini'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>Regimes take care of converting the input characters into
-<l n='utf'/> sequences. The conversion tables are loaded at
-runtime.</p>
---ldx]]--
+-- Regimes take care of converting the input characters into UTF sequences. The
+-- conversion tables are loaded at runtime.
local tostring = tostring
local utfchar = utf.char
diff --git a/tex/context/base/mkxl/scrn-wid.lmt b/tex/context/base/mkxl/scrn-wid.lmt
index caa09adbd..f2112aa11 100644
--- a/tex/context/base/mkxl/scrn-wid.lmt
+++ b/tex/context/base/mkxl/scrn-wid.lmt
@@ -42,8 +42,6 @@ interactions.linkedlists = linkedlists
local texsetbox = tex.setbox
-local jobpasses = job.passes
-
local texgetcount = tex.getcount
local codeinjections = backends.codeinjections
@@ -277,7 +275,24 @@ implement {
}
}
--- Linkedlists (only a context interface)
+-- Linkedlists (only a context interface) .. untested, just adapted from old code.
+
+local collected = allocate()
+local tobesaved = allocate()
+
+local linkedlists = {
+ collected = collected,
+ tobesaved = tobesaved,
+}
+
+job.linkedlists = linkedlists
+
+local function initializer()
+ collected = linkedlists.collected
+ tobesaved = linkedlists.tobesaved
+end
+
+job.register("job.linkedlists.collected", tobesaved, initializer, nil)
implement {
name = "definelinkedlist",
@@ -291,10 +306,12 @@ implement {
name = "enhancelinkedlist",
arguments = { "string", "integer" },
actions = function(tag,n)
- local ll = jobpasses.gettobesaved(tag)
- if ll then
- ll[n] = texgetcount("realpageno")
+ local linkedlist = tobesaved[tag]
+ if not linkedlist then
+ linkedlist = { }
+ tobesaved[tag] = linkedlist
end
+ linkedlist[n] = texgetcount("realpageno")
end
}
@@ -302,15 +319,18 @@ implement {
name = "addlinklistelement",
arguments = "string",
actions = function(tag)
- local tobesaved = jobpasses.gettobesaved(tag)
- local collected = jobpasses.getcollected(tag) or { }
+ local tobesaved = tobesaved[tag] or { }
+ local collected = collected[tag] or { }
local currentlink = #tobesaved + 1
local noflinks = #collected
- tobesaved[currentlink] = 0
+ --
+ tobesaved[currentlink] = 0 -- needs checking
+ --
local f = collected[1] or 0
local l = collected[noflinks] or 0
local p = collected[currentlink-1] or f
local n = collected[currentlink+1] or l
+ --
context.setlinkedlistproperties(currentlink,noflinks,f,p,n,l)
-- context.ctxlatelua(function() commands.enhancelinkedlist(tag,currentlink) end)
end
diff --git a/tex/context/base/mkxl/spac-pag.mkxl b/tex/context/base/mkxl/spac-pag.mkxl
index d61ddcbe6..2e3e1bc00 100644
--- a/tex/context/base/mkxl/spac-pag.mkxl
+++ b/tex/context/base/mkxl/spac-pag.mkxl
@@ -16,7 +16,6 @@
\unprotect
\newif \ifpagestatemismatch
-\newinteger \realpagestateno
\newconstant\frozenpagestate
\permanent\protected\def\dotrackpagestate#1#2%
diff --git a/tex/context/base/mkxl/strc-itm.lmt b/tex/context/base/mkxl/strc-itm.lmt
index f9153c98e..4ee084ca3 100644
--- a/tex/context/base/mkxl/strc-itm.lmt
+++ b/tex/context/base/mkxl/strc-itm.lmt
@@ -6,20 +6,28 @@ if not modules then modules = { } end modules ['strc-itm'] = {
license = "see context related readme files"
}
-local structures = structures
-local itemgroups = structures.itemgroups
-local jobpasses = job.passes
-
+local allocate = utilities.storage.allocate
local implement = interfaces.implement
-local setvariable = jobpasses.save
-local getvariable = jobpasses.getfield
-
local texsetcount = tex.setcount
local texsetdimen = tex.setdimen
-local f_stamp = string.formatters["itemgroup:%s:%s"]
-local counts = table.setmetatableindex("number")
+local itemgroups = structures.itemgroups
+
+local collected = allocate()
+local tobesaved = allocate()
+
+itemgroups.collected = collected
+itemgroups.tobesaved = tobesaved
+
+local function initializer()
+ collected = itemgroups.collected
+ tobesaved = itemgroups.tobesaved
+end
+
+if job then
+ job.register("structures.itemgroups.collected", tobesaved, initializer)
+end
local c_strc_itemgroups_max_items = tex.iscount("c_strc_itemgroups_max_items")
local d_strc_itemgroups_max_width = tex.isdimen("d_strc_itemgroups_max_width")
@@ -28,6 +36,8 @@ local d_strc_itemgroups_max_width = tex.isdimen("d_strc_itemgroups_max_width")
-- an itemgroup which in turn makes for less passes when one itemgroup
-- entry is added or removed.
+local counts = table.setmetatableindex("number")
+
local trialtypesetting = context.trialtypesetting
local function analyzeitemgroup(name,level)
@@ -36,16 +46,37 @@ local function analyzeitemgroup(name,level)
n = n + 1
counts[name] = n
end
- local stamp = f_stamp(name,n)
- texsetcount(c_strc_itemgroups_max_items,getvariable(stamp,level,1,0))
- texsetdimen(d_strc_itemgroups_max_width,getvariable(stamp,level,2,0))
+ local items = 0
+ local width = 0
+ local itemgroup = collected[name]
+ if itemgroup then
+ local entry = itemgroup[n]
+ if entry then
+ local l = entry[level]
+ if l then
+ items = l[1] or 0
+ width = l[2] or 0
+ end
+ end
+ end
+ texsetcount(c_strc_itemgroups_max_items,items)
+ texsetdimen(d_strc_itemgroups_max_width,width)
end
local function registeritemgroup(name,level,nofitems,maxwidth)
local n = counts[name]
if not trialtypesetting() then
- -- no trialtypsetting
- setvariable(f_stamp(name,n), { nofitems, maxwidth }, level)
+ local itemgroup = tobesaved[name]
+ if not itemgroup then
+ itemgroup = { }
+ tobesaved[name] = itemgroup
+ end
+ local entry = itemgroup[n]
+ if not entry then
+ entry = { }
+ itemgroup[n] = entry
+ end
+ entry[level] = { nofitems, maxwidth }
elseif level == 1 then
counts[name] = n - 1
end
diff --git a/tex/context/base/mkxl/strc-lst.lmt b/tex/context/base/mkxl/strc-lst.lmt
index b60b75208..d54129f29 100644
--- a/tex/context/base/mkxl/strc-lst.lmt
+++ b/tex/context/base/mkxl/strc-lst.lmt
@@ -1571,7 +1571,7 @@ end
function lists.integrate(utilitydata)
local filename = utilitydata.comment.file
- if filename then
+ if filename and filename ~= environment.jobname then
local structures = utilitydata.structures
if structures then
local lists = structures.lists.collected or { }
diff --git a/tex/context/base/mkxl/strc-ref.lmt b/tex/context/base/mkxl/strc-ref.lmt
index 26b189475..945364b18 100644
--- a/tex/context/base/mkxl/strc-ref.lmt
+++ b/tex/context/base/mkxl/strc-ref.lmt
@@ -561,7 +561,7 @@ end
function references.integrate(utilitydata)
local filename = utilitydata.comment.file
- if filename then
+ if filename and filename ~= environment.jobname then
-- lists are already internalized
local structures = utilitydata.structures
if structures then
diff --git a/tex/context/base/mkxl/strc-reg.lmt b/tex/context/base/mkxl/strc-reg.lmt
index b66b22921..27d7e2586 100644
--- a/tex/context/base/mkxl/strc-reg.lmt
+++ b/tex/context/base/mkxl/strc-reg.lmt
@@ -1045,6 +1045,7 @@ function registers.use(tag,filename,class,prefix)
filename = filename,
data = job.loadother(filename),
prefix = prefix or class,
+ label = prefix or class,
}
end
@@ -1054,13 +1055,43 @@ implement {
actions = registers.use,
}
+-- function registers.use(tag,specification)
+-- local class = specification.class
+-- local filename = specification.filename
+-- local prefix = specification.prefix or class
+-- local label = specification.label or prefix
+-- if class and filename then
+-- used[tag] = {
+-- class = class,
+-- filename = filename,
+-- data = job.loadother(filename),
+-- prefix = prefix,
+-- label = label,
+-- }
+-- end
+-- end
+
+-- implement {
+-- name = "useregister",
+-- actions = registers.use,
+-- arguments = {
+-- "string",
+-- {
+-- { "filename" },
+-- { "class" },
+-- { "prefix" },
+-- { "label" },
+-- },
+-- }
+-- }
+
implement {
- name = "registerprefix",
+ name = "registerlabel",
arguments = "string",
actions = function(tag)
local u = used[tag]
if u then
- context(u.prefix)
+ context(u.label)
end
end
}
@@ -1075,7 +1106,13 @@ local function analyzeregister(class,options)
local list = utilities.parsers.settings_to_array(class)
local entries = { }
local nofentries = 0
- local metadata = false
+ local multiple = false
+ for i=1,#list do
+ if used[list[i]] then
+ multiple = true
+ break
+ end
+ end
for i=1,#list do
local l = list[i]
local u = used[l]
@@ -1089,9 +1126,14 @@ local function analyzeregister(class,options)
end
if d then
local e = d.entries
- local u = u and { u.prefix } or nil
+-- local u = u and { u.prefix } or nil
+local u = multiple and { string.formatters["%03i"](i) } or nil -- maybe prefix but then how about main
for i=1,#e do
local ei = e[i]
+if multiple and ei.metadata.kind == "see" then
+ -- skip see, can become an option
+else
+
nofentries = nofentries + 1
entries[nofentries] = ei
if u then
@@ -1099,6 +1141,7 @@ local function analyzeregister(class,options)
eil[#eil+1] = u
ei.external = l -- this is the (current) abstract tag, used for prefix
end
+end
end
if not metadata then
metadata = d.metadata
@@ -1107,9 +1150,11 @@ local function analyzeregister(class,options)
end
data = {
metadata = metadata or { },
+ multiple = multiple,
entries = entries,
}
collected[class] = data
+ options.multiple = multiple
end
if data and data.entries then
options = options or { }
@@ -1322,7 +1367,9 @@ function registers.flush(data,options,prefixspec,pagespec)
-- report_registers("invalid see entry in register %a, reference %a",entry.metadata.name,list[1][1])
end
end
- if entry.external then
+-- move up ?
+-- if entry.external then
+ if options.multiple or entry.external then
local list = entry.list
list[#list] = nil
end
@@ -1741,7 +1788,7 @@ interfaces.implement {
function registers.integrate(utilitydata)
local filename = utilitydata.comment.file
- if filename then
+ if filename and filename ~= environment.jobname then
local structures = utilitydata.structures
if structures then
local registers = structures.registers.collected or { }
diff --git a/tex/context/base/mkxl/strc-reg.mkxl b/tex/context/base/mkxl/strc-reg.mkxl
index afe3d27a0..464ac4eb1 100644
--- a/tex/context/base/mkxl/strc-reg.mkxl
+++ b/tex/context/base/mkxl/strc-reg.mkxl
@@ -792,6 +792,25 @@
{\doifelsefiledefined{#1}{}{\usefile[#1][#2]}%
\clf_useregister{#1}{#2}{#3}{#4}}
+% \permanent\protected\tolerant\def\useregister[#1]#*[#2]#*[#3]#*[#4]% tag file class prefix
+% {\begingroup
+% \doifelsefiledefined{#1}{}{\usefile[#1][#2]}%
+% \ifhastok={#4}%
+% \getdummyparameters[\c!prefix=#1,\c!label=#1,#4]%
+% \else
+% \getdummyparameters[\c!prefix=#4,\c!label=#4]%
+% \fi
+% \clf_useregister
+% {#1}
+% {
+% filename {#2}
+% class {#3}
+% prefix {\dummyparameter\c!prefix}
+% label {\dummyparameter\c!label}
+% }
+% \relax
+% \endgroup}
+
%D Character rendering (sections):
\installcorenamespace{registerindicator}
@@ -1123,15 +1142,16 @@
% todo: adapt \strc_references_goto_internal to take an extra argument, the ref
\permanent\protected\def\withregisterpagecommand#1#2#3#4% #1:processor #2:internal #3:realpage #4:page
- {\ifcase#3\relax
- {\tt [entry\space not\space flushed]}%
+ {\begingroup
+ \ifcase#3\relax
+ \tt [entry\space not\space flushed]%
\else
\def\currentregisterpageindex{#2}%
\def\currentregisterrealpage{#3}%
\ifchknum\currentregisterpageindex\or
\lettonothing\currentregisterpageprefix
\else
- \def\currentregisterpageprefix{\clf_registerprefix{\currentregisterpageindex}}%
+ \def\currentregisterpageprefix{\clf_registerlabel{\currentregisterpageindex}}%
\fi
\iflocation
\ifempty\currentregisterpageprefix
@@ -1144,7 +1164,8 @@
\setlocationattributes
\fi
\applyprocessor{#1}{\currentregisterpageprefix\registerparameter\c!pagecommand{#4}}%
- \fi}
+ \fi
+ \endgroup}
\lettonothing\m_current_register
@@ -1281,7 +1302,7 @@
\ifchknum\currentregisterseeindex\or
\lettonothing\currentregisterpageprefix
\else
- \def\currentregisterpageprefix{\clf_registerprefix{\currentregisterseeindex}}%
+ \def\currentregisterpageprefix{\clf_registerlabel{\currentregisterseeindex}}%
\fi
\iflocation
\ifempty\currentregisterpageprefix
diff --git a/tex/context/base/mkxl/tabl-ntb.mkxl b/tex/context/base/mkxl/tabl-ntb.mkxl
index 6e95512cd..b82dcb585 100644
--- a/tex/context/base/mkxl/tabl-ntb.mkxl
+++ b/tex/context/base/mkxl/tabl-ntb.mkxl
@@ -1634,13 +1634,22 @@
% enabled per 2018-02-22
-\def\tabl_ntb_table_get_max_width_step
- {\advanceby\scratchdimen\tabl_ntb_get_wid\fastloopindex
- \advanceby\scratchdimen\tabl_ntb_get_dis\fastloopindex}
+% \def\tabl_ntb_table_get_max_width_step
+% {\advanceby\scratchdimen\tabl_ntb_get_wid\fastloopindex
+% \advanceby\scratchdimen\tabl_ntb_get_dis\fastloopindex}
+%
+% \def\tabl_ntb_table_get_max_width
+% {\scratchdimen\zeropoint
+% \dofastloopcs\c_tabl_ntb_maximum_col\tabl_ntb_table_get_max_width_step
+% \ifdim\scratchdimen<\wd\scratchbox\relax
+% \scratchdimen\wd\scratchbox\relax
+% \fi}
\def\tabl_ntb_table_get_max_width
{\scratchdimen\zeropoint
- \dofastloopcs\c_tabl_ntb_maximum_col\tabl_ntb_table_get_max_width_step
+ \localcontrolledloop\zerocount\c_tabl_ntb_maximum_col\plusone
+ {\advanceby\scratchdimen\tabl_ntb_get_wid\currentloopiterator
+ \advanceby\scratchdimen\tabl_ntb_get_dis\currentloopiterator}%
\ifdim\scratchdimen<\wd\scratchbox\relax
\scratchdimen\wd\scratchbox\relax
\fi}
diff --git a/tex/context/base/mkxl/tabl-tbl.mkxl b/tex/context/base/mkxl/tabl-tbl.mkxl
index d353074d5..6b5e38f3a 100644
--- a/tex/context/base/mkxl/tabl-tbl.mkxl
+++ b/tex/context/base/mkxl/tabl-tbl.mkxl
@@ -1608,13 +1608,19 @@
\tabl_tabulate_vrule_reset_indeed
\fi}
+% \def\tabl_tabulate_vrule_reset_indeed
+% {\gletcsname\??tabulatevrule0\endcsname\undefined
+% \dofastloopcs\c_tabl_tabulate_max_vrulecolumn\tabl_tabulate_vrule_reset_step
+% \global\c_tabl_tabulate_max_vrulecolumn\zerocount}
+%
+% \def\tabl_tabulate_vrule_reset_step % undefined or relax
+% {\gletcsname\??tabulatevrule\the\fastloopindex\endcsname\undefined}
+
\def\tabl_tabulate_vrule_reset_indeed
- {\dofastloopcs\c_tabl_tabulate_max_vrulecolumn\tabl_tabulate_vrule_reset_step
+ {\localcontrolledloop\zerocount\c_tabl_tabulate_max_vrulecolumn\plusone % start at 0
+ {\gletcsname\??tabulatevrule\the\currentloopiterator\endcsname\undefined}%
\global\c_tabl_tabulate_max_vrulecolumn\zerocount}
-\def\tabl_tabulate_vrule_reset_step % undefined or relax
- {\gletcsname\??tabulatevrule\the\fastloopindex\endcsname\undefined}
-
\appendtoks
\tabl_tabulate_vrule_reset
\to \t_tabl_tabulate_every_after_row
@@ -1798,11 +1804,16 @@
\tabl_tabulate_color_reset_indeed
\fi}
-\def\tabl_tabulate_color_reset_indeed
- {\dofastloopcs\c_tabl_tabulate_max_colorcolumn\tabl_tabulate_color_reset_step}
+% \def\tabl_tabulate_color_reset_indeed
+% {\dofastloopcs\c_tabl_tabulate_max_colorcolumn\tabl_tabulate_color_reset_step}
+%
+% \def\tabl_tabulate_color_reset_step % undefined or empty?
+% {\gletcsname\??tabulatecolor\number\fastloopindex\endcsname\undefined}
-\def\tabl_tabulate_color_reset_step % undefined or empty?
- {\gletcsname\??tabulatecolor\number\fastloopindex\endcsname\undefined}
+\def\tabl_tabulate_color_reset_indeed
+ {\localcontrolledloop\zerocount\c_tabl_tabulate_max_colorcolumn\plusone % start at 1
+ {\gletcsname\??tabulatecolor\the\currentloopiterator\endcsname\undefined}%
+ \global\c_tabl_tabulate_max_colorcolumn\zerocount} % why not like vrule?
\appendtoks
\tabl_tabulate_color_reset
@@ -2201,34 +2212,38 @@
% {\glettonothing\tabl_tabulate_flush_collected_indeed
% \global\c_tabl_tabulate_column\zerocount
% \tabl_tabulate_pbreak_check
+% \global\setfalse\c_tabl_tabulate_split_done % new 27/12/2022
% \dofastloopcs\c_tabl_tabulate_columns\tabl_tabulate_flush_second_step
+% \ifconditional\c_tabl_tabulate_split_done\else
+% \glet\tabl_tabulate_tm\s!reset % new 27/12/2022
+% \fi
% \global\settrue\c_tabl_tabulate_firstflushed}
-%
+
% \protected\def\tabl_tabulate_flush_second_step
-% {\ifvoid\b_tabl_tabulate_current\fastloopindex\else
+% {\ifvoid\b_tabl_tabulate_current\fastloopindex
+% \else
% \gdef\tabl_tabulate_flush_collected_indeed{\the\t_tabl_tabulate_dummy}%
+% \ifvoid\b_tabl_tabulate_current\fastloopindex \else
+% \global\settrue\c_tabl_tabulate_split_done % new 27/12/2022
+% \fi
% \fi}
-%
-% \def\tabl_tabulate_flush_second
-% {\noalign{\tabl_tabulate_flush_second_indeed}%
-% \tabl_tabulate_flush_collected_indeed}
\protected\def\tabl_tabulate_flush_second_indeed
{\glettonothing\tabl_tabulate_flush_collected_indeed
\global\c_tabl_tabulate_column\zerocount
\tabl_tabulate_pbreak_check
\global\setfalse\c_tabl_tabulate_split_done % new 27/12/2022
- \dofastloopcs\c_tabl_tabulate_columns\tabl_tabulate_flush_second_step
+ \localcontrolledloop\plusone\c_tabl_tabulate_columns\plusone{\tabl_tabulate_flush_second_step}%
\ifconditional\c_tabl_tabulate_split_done\else
\glet\tabl_tabulate_tm\s!reset % new 27/12/2022
\fi
\global\settrue\c_tabl_tabulate_firstflushed}
\protected\def\tabl_tabulate_flush_second_step
- {\ifvoid\b_tabl_tabulate_current\fastloopindex
+ {\ifvoid\b_tabl_tabulate_current\currentloopiterator
\else
\gdef\tabl_tabulate_flush_collected_indeed{\the\t_tabl_tabulate_dummy}%
- \ifvoid\b_tabl_tabulate_current\fastloopindex \else
+ \ifvoid\b_tabl_tabulate_current\currentloopiterator \else
\global\settrue\c_tabl_tabulate_split_done % new 27/12/2022
\fi
\fi}
@@ -3262,7 +3277,7 @@
%\letcsname\??tabulatespana r\endcsname\relax
\noaligned\tolerant\def\tabl_tabulate_NS[#1]#*[#2]%
- {\NC\loopcs{#1}\tabl_tabulate_span
+ {\NC\loopcs{#1}\tabl_tabulate_span % use localloop and quit
\gdef\tabl_tabulate_kooh
{\begincsname\??tabulatespana#2\endcsname
\glet\tabl_tabulate_kooh\relax}%
diff --git a/tex/context/base/mkxl/trac-vis.lmt b/tex/context/base/mkxl/trac-vis.lmt
index dddb4799d..c9b68b407 100644
--- a/tex/context/base/mkxl/trac-vis.lmt
+++ b/tex/context/base/mkxl/trac-vis.lmt
@@ -1946,7 +1946,7 @@ do
head, current = ruledkern(head,current,vertical)
end
end
- goto next;
+ goto next
::list::
if id == hlist_code then
local content = getlist(current)
diff --git a/tex/context/base/mkxl/typo-cln.lmt b/tex/context/base/mkxl/typo-cln.lmt
new file mode 100644
index 000000000..469859162
--- /dev/null
+++ b/tex/context/base/mkxl/typo-cln.lmt
@@ -0,0 +1,109 @@
+if not modules then modules = { } end modules ['typo-cln'] = {
+ version = 1.001,
+ comment = "companion to typo-cln.mkiv",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files"
+}
+
+-- This quick and dirty hack took less time than listening to a CD (In
+-- this case Dream Theaters' Octavium). Of course extensions will take
+-- more time.
+
+-- This feature is probably never used so we can get rid of it.
+
+local tonumber = tonumber
+local utfbyte = utf.byte
+
+local trace_cleaners = false trackers.register("typesetters.cleaners", function(v) trace_cleaners = v end)
+local trace_autocase = false trackers.register("typesetters.cleaners.autocase",function(v) trace_autocase = v end)
+
+local report_cleaners = logs.reporter("nodes","cleaners")
+local report_autocase = logs.reporter("nodes","autocase")
+
+typesetters.cleaners = typesetters.cleaners or { }
+local cleaners = typesetters.cleaners
+
+local variables = interfaces.variables
+
+local nodecodes = nodes.nodecodes
+
+local enableaction = nodes.tasks.enableaction
+
+local texsetattribute = tex.setattribute
+
+local nuts = nodes.nuts
+
+local getattr = nuts.getattr
+local setattr = nuts.setattr
+
+local setchar = nuts.setchar
+
+local nextglyph = nuts.traversers.glyph
+
+local unsetvalue = attributes.unsetvalue
+
+local glyph_code = nodecodes.glyph
+local uccodes = characters.uccodes
+
+local a_cleaner = attributes.private("cleaner")
+
+local resetter = { -- this will become an entry in char-def
+ [utfbyte(".")] = true
+}
+
+-- Contrary to the casing code we need to keep track of a state.
+-- We could extend the casing code with a status tracker but on
+-- the other hand we might want to apply casing afterwards. So,
+-- cleaning comes first.
+
+function cleaners.handler(head)
+ local inline = false
+ for n, char, font in nextglyph, head do
+ if resetter[char] then
+ inline = false
+ elseif not inline then
+ local a = getattr(n,a_cleaner)
+ if a == 1 then -- currently only one cleaner so no need to be fancy
+ local upper = uccodes[char]
+ if type(upper) == "table" then
+ -- some day, not much change that \SS ends up here
+ else
+ setchar(n,upper)
+ if trace_autocase then
+ report_autocase("")
+ end
+ end
+ end
+ inline = true
+ end
+ end
+ return head
+end
+
+-- see typo-cap for a more advanced settings handler .. not needed now
+
+local enabled = false
+
+function cleaners.set(n)
+ if n == variables.reset or not tonumber(n) or n == 0 then
+ texsetattribute(a_cleaner,unsetvalue)
+ else
+ if not enabled then
+ enableaction("processors","typesetters.cleaners.handler")
+ if trace_cleaners then
+ report_cleaners("enabling cleaners")
+ end
+ enabled = true
+ end
+ texsetattribute(a_cleaner,tonumber(n))
+ end
+end
+
+-- interface
+
+interfaces.implement {
+ name = "setcharactercleaning",
+ actions = cleaners.set,
+ arguments = "string"
+}
diff --git a/tex/context/base/mkxl/typo-cln.mkxl b/tex/context/base/mkxl/typo-cln.mkxl
index 84fc1d235..fba9d4ab8 100644
--- a/tex/context/base/mkxl/typo-cln.mkxl
+++ b/tex/context/base/mkxl/typo-cln.mkxl
@@ -15,7 +15,7 @@
\unprotect
-\registerctxluafile{typo-cln}{}
+\registerctxluafile{typo-cln}{autosuffix}
\definesystemattribute[cleaner][public]
diff --git a/tex/context/base/mkxl/typo-dha.lmt b/tex/context/base/mkxl/typo-dha.lmt
new file mode 100644
index 000000000..e1a6662c4
--- /dev/null
+++ b/tex/context/base/mkxl/typo-dha.lmt
@@ -0,0 +1,481 @@
+if not modules then modules = { } end modules ['typo-dha'] = {
+ version = 1.001,
+ comment = "companion to typo-dir.mkiv",
+ author = "Hans Hagen, PRAGMA-ADE, Hasselt NL",
+ copyright = "PRAGMA ADE / ConTeXt Development Team",
+ license = "see context related readme files"
+}
+
+-- Some analysis by Idris:
+--
+-- 1. Assuming the reading- vs word-order distinction (bidi-char types) is governing;
+-- 2. Assuming that 'ARAB' represents an actual arabic string in raw input order, not word-order;
+-- 3. Assuming that 'BARA' represent the correct RL word order;
+--
+-- Then we have, with input: LATIN ARAB
+--
+-- \textdirection 1 LATIN ARAB => LATIN BARA
+-- \textdirection 1 LATIN ARAB => LATIN BARA
+-- \textdirection 1 LRO LATIN ARAB => LATIN ARAB
+-- \textdirection 1 LRO LATIN ARAB => LATIN ARAB
+-- \textdirection 1 RLO LATIN ARAB => NITAL ARAB
+-- \textdirection 1 RLO LATIN ARAB => NITAL ARAB
+
+-- elseif d == "es" then -- European Number Separator
+-- elseif d == "et" then -- European Number Terminator
+-- elseif d == "cs" then -- Common Number Separator
+-- elseif d == "nsm" then -- Non-Spacing Mark
+-- elseif d == "bn" then -- Boundary Neutral
+-- elseif d == "b" then -- Paragraph Separator
+-- elseif d == "s" then -- Segment Separator
+-- elseif d == "ws" then -- Whitespace
+-- elseif d == "on" then -- Other Neutrals
+
+-- todo : use new dir functions
+-- todo : make faster
+-- todo : move dir info into nodes
+-- todo : swappable tables and floats i.e. start-end overloads (probably loop in builders)
+
+-- I removed the original tracing code and now use the colorful one. If I ever want to change
+-- something I will just inject prints for tracing.
+
+local nodes, node = nodes, node
+
+local trace_directions = false trackers.register("typesetters.directions", function(v) trace_directions = v end)
+
+local report_directions = logs.reporter("typesetting","text directions")
+
+local nuts = nodes.nuts
+
+local getnext = nuts.getnext
+local getprev = nuts.getprev
+local getchar = nuts.getchar
+local getid = nuts.getid
+local getsubtype = nuts.getsubtype
+local getlist = nuts.getlist
+local getattr = nuts.getattr
+local getprop = nuts.getprop
+local getdirection = nuts.getdirection
+local isglyph = nuts.isglyph -- or ischar
+
+local setprop = nuts.setprop
+local setstate = nuts.setstate
+local setchar = nuts.setchar
+
+local insertnodebefore = nuts.insertbefore
+local insertnodeafter = nuts.insertafter
+local remove_node = nuts.remove
+local endofmath = nuts.endofmath
+
+local startofpar = nuts.startofpar
+
+local nodepool = nuts.pool
+
+local nodecodes = nodes.nodecodes
+local gluecodes = nodes.gluecodes
+
+local glyph_code = nodecodes.glyph
+local math_code = nodecodes.math
+local kern_code = nodecodes.kern
+local glue_code = nodecodes.glue
+local dir_code = nodecodes.dir
+local par_code = nodecodes.par
+
+local dirvalues = nodes.dirvalues
+local lefttoright_code = dirvalues.lefttoright
+local righttoleft_code = dirvalues.righttoleft
+
+local parfillskip_code = gluecodes.parfillskip
+
+local new_direction = nodepool.direction
+
+local insert = table.insert
+
+local fonthashes = fonts.hashes
+local fontchar = fonthashes.characters
+
+local chardirections = characters.directions
+local charmirrors = characters.mirrors
+local charclasses = characters.textclasses
+
+local directions = typesetters.directions
+local setcolor = directions.setcolor
+local getglobal = directions.getglobal
+
+local a_directions = attributes.private('directions')
+
+local strip = false
+
+local s_isol = fonts.analyzers.states.isol
+
+local function stopdir(finish) -- we could use finish directly
+ local n = new_direction(finish == righttoleft_code and righttoleft_code or lefttoright_code,true)
+ setprop(n,"direction",true)
+ return n
+end
+
+local function startdir(finish) -- we could use finish directly
+ local n = new_direction(finish == righttoleft_code and righttoleft_code or lefttoright_code)
+ setprop(n,"direction",true)
+ return n
+end
+
+local function nextisright(current)
+ current = getnext(current)
+ local character, id = isglyph(current)
+ if character then
+ local direction = chardirections[character]
+ return direction == "r" or direction == "al" or direction == "an"
+ end
+end
+
+local function previsright(current)
+ current = getprev(current)
+ local character, id = isglyph(current)
+ if character then
+ local direction = chardirections[character]
+ return direction == "r" or direction == "al" or direction == "an"
+ end
+end
+
+local function process(start)
+
+ local head = start
+ local current = head
+ local autodir = 0
+ local embedded = 0
+ local override = 0
+ local pardir = 0
+ local textdir = 0
+ local done = false
+ local stack = { }
+ local top = 0
+ local obsolete = { }
+ local rlo = false
+ local lro = false
+ local prevattr = false
+ local fences = { }
+
+ while current do
+ -- no isglyph here as we test for skips first
+ local id = getid(current)
+ local next = getnext(current)
+ if id == math_code then
+ current = getnext(endofmath(next))
+ elseif getprop(current,"direction") then
+ -- this handles unhbox etc
+ current = next
+ else
+ local attr = getattr(current,a_directions)
+ if attr and attr > 0 then
+ if attr ~= prevattr then
+ if not getglobal(a) then
+ lro = false
+ rlo = false
+ end
+ prevattr = attr
+ end
+ end
+ local prop = true
+ if id == glyph_code then
+ if attr and attr > 0 then
+ local character, font = isglyph(current)
+ if character == 0 then
+ -- skip signals
+ -- setprop(current,"direction",true)
+ else
+ local direction = chardirections[character]
+ local reversed = false
+ if rlo or override > 0 then
+ if direction == "l" then
+ direction = "r"
+ reversed = true
+ end
+ elseif lro or override < 0 then
+ if direction == "r" or direction == "al" then
+ setstate(current,s_isol) -- hm
+ direction = "l"
+ reversed = true
+ end
+ end
+ if direction == "on" then
+ local mirror = charmirrors[character]
+ if mirror and fontchar[font][mirror] then
+ local class = charclasses[character]
+ if class == "open" then
+ if nextisright(current) then
+ setchar(current,mirror)
+ -- setprop(current,"direction","r")
+ prop = "r"
+ elseif autodir < 0 then
+ setchar(current,mirror)
+ -- setprop(current,"direction","r")
+ prop = "r"
+ else
+ mirror = false
+ -- setprop(current,"direction","l")
+ prop = "l"
+ end
+ local fencedir = autodir == 0 and textdir or autodir
+ fences[#fences+1] = fencedir
+ elseif class == "close" and #fences > 0 then
+ local fencedir = fences[#fences]
+ fences[#fences] = nil
+ if fencedir < 0 then
+ setchar(current,mirror)
+ -- setprop(current,"direction","r")
+ prop = "r"
+ else
+ -- setprop(current,"direction","l")
+ prop = "l"
+ mirror = false
+ end
+ elseif autodir < 0 then
+ setchar(current,mirror)
+ -- setprop(current,"direction","r")
+ prop = "r"
+ else
+ -- setprop(current,"direction","l")
+ prop = "l"
+ mirror = false
+ end
+ else
+ -- setprop(current,"direction",true)
+ end
+ if trace_directions then
+ setcolor(current,direction,false,mirror)
+ end
+ elseif direction == "l" then
+ if trace_directions then
+ setcolor(current,"l",reversed)
+ end
+ -- setprop(current,"direction","l")
+ prop = "l"
+ elseif direction == "r" then
+ if trace_directions then
+ setcolor(current,"r",reversed)
+ end
+ -- setprop(current,"direction","r")
+ prop = "r"
+ elseif direction == "en" then -- european number
+ if trace_directions then
+ setcolor(current,"l")
+ end
+ -- setprop(current,"direction","l")
+ prop = "l"
+ elseif direction == "al" then -- arabic letter
+ if trace_directions then
+ setcolor(current,"r")
+ end
+ -- setprop(current,"direction","r")
+ prop = "r"
+ elseif direction == "an" then -- arabic number
+ -- needs a better scanner as it can be a float
+ if trace_directions then
+ setcolor(current,"l") -- was r
+ end
+ -- setprop(current,"direction","n") -- was r
+ prop = "n"
+ elseif direction == "lro" then -- Left-to-Right Override -> right becomes left
+ top = top + 1
+ stack[top] = { override, embedded }
+ override = -1
+ obsolete[#obsolete+1] = current
+ goto obsolete
+ elseif direction == "rlo" then -- Right-to-Left Override -> left becomes right
+ top = top + 1
+ stack[top] = { override, embedded }
+ override = 1
+ obsolete[#obsolete+1] = current
+ goto obsolete
+ elseif direction == "lre" then -- Left-to-Right Embedding -> lefttoright_code
+ top = top + 1
+ stack[top] = { override, embedded }
+ embedded = 1
+ obsolete[#obsolete+1] = current
+ goto obsolete
+ elseif direction == "rle" then -- Right-to-Left Embedding -> righttoleft_code
+ top = top + 1
+ stack[top] = { override, embedded }
+ embedded = -1
+ obsolete[#obsolete+1] = current
+ goto obsolete
+ elseif direction == "pdf" then -- Pop Directional Format
+ if top > 0 then
+ local s = stack[top]
+ override = s[1]
+ embedded = s[2]
+ top = top - 1
+ else
+ override = 0
+ embedded = 0
+ end
+ obsolete[#obsolete+1] = current
+ goto obsolete
+ elseif trace_directions then
+ setcolor(current)
+ -- setprop(current,"direction",true)
+ else
+ -- setprop(current,"direction",true)
+ end
+ end
+ else
+ -- setprop(current,"direction",true)
+ end
+ elseif id == glue_code then
+ if getsubtype(current) == parfillskip_code then
+ -- setprop(current,"direction","!")
+ prop = "!"
+ else
+ -- setprop(current,"direction","g")
+ prop = "g"
+ end
+ elseif id == kern_code then
+ -- setprop(current,"direction","k")
+ prop = "k"
+ elseif id == dir_code then
+ local direction, pop = getdirection(current)
+ if direction == righttoleft_code then
+ if not pop then
+ autodir = -1
+ elseif embedded and embedded~= 0 then
+ autodir = embedded
+ else
+ autodir = 0
+ end
+ elseif direction == lefttoright_code then
+ if not pop then
+ autodir = 1
+ elseif embedded and embedded~= 0 then
+ autodir = embedded
+ else
+ autodir = 0
+ end
+ end
+ textdir = autodir
+ -- setprop(current,"direction",true)
+ elseif id == par_code and startofpar(current) then
+ local direction = getdirection(current)
+ if direction == righttoleft_code then
+ autodir = -1
+ elseif direction == lefttoright_code then
+ autodir = 1
+ end
+ pardir = autodir
+ textdir = pardir
+ -- setprop(current,"direction",true)
+ else
+ -- setprop(current,"direction",true)
+ end
+ setprop(current,"direction",prop)
+ ::obsolete::
+ current = next
+ end
+ end
+
+ -- todo: track if really needed
+ -- todo: maybe we need to set the property (as it can be a copied list)
+
+ if done and strip then
+ local n = #obsolete
+ if n > 0 then
+ for i=1,n do
+ remove_node(head,obsolete[i],true)
+ end
+ if trace_directions then
+ report_directions("%s character nodes removed",n)
+ end
+ end
+ end
+
+ local state = false
+ local last = false
+ local collapse = true
+ current = head
+
+ -- todo: textdir
+ -- todo: inject before parfillskip
+
+ while current do
+ local id = getid(current)
+ if id == math_code then
+ -- todo: this might be tricky nesting
+ current = getnext(endofmath(getnext(current)))
+ else
+ local cp = getprop(current,"direction")
+ if cp == "n" then
+ local swap = state == "r"
+ if swap then
+ head = insertnodebefore(head,current,startdir(lefttoright_code))
+ end
+ setprop(current,"direction",true)
+ while true do
+ local n = getnext(current)
+ if n and getprop(n,"direction") == "n" then
+ current = n
+ setprop(current,"direction",true)
+ else
+ break
+ end
+ end
+ if swap then
+ head, current = insertnodeafter(head,current,stopdir(lefttoright_code))
+ end
+ elseif cp == "l" then
+ if state ~= "l" then
+ if state == "r" then
+ head = insertnodebefore(head,last or current,stopdir(righttoleft_code))
+ end
+ head = insertnodebefore(head,current,startdir(lefttoright_code))
+ state = "l"
+ done = true
+ end
+ last = false
+ elseif cp == "r" then
+ if state ~= "r" then
+ if state == "l" then
+ head = insertnodebefore(head,last or current,stopdir(lefttoright_code))
+ end
+ head = insertnodebefore(head,current,startdir(righttoleft_code))
+ state = "r"
+ done = true
+ end
+ last = false
+ elseif collapse then
+ if cp == "k" or cp == "g" then
+ last = last or current
+ else
+ last = false
+ end
+ else
+ if state == "r" then
+ head = insertnodebefore(head,current,stopdir(righttoleft_code))
+ elseif state == "l" then
+ head = insertnodebefore(head,current,stopdir(lefttoright_code))
+ end
+ state = false
+ last = false
+ end
+ setprop(current,"direction",true)
+ end
+ local next = getnext(current)
+ if next then
+ current = next
+ else
+ local sd = (state == "r" and stopdir(righttoleft_code)) or (state == "l" and stopdir(lefttoright_code))
+ if sd then
+ if id == glue_code and getsubtype(current) == parfillskip_code then
+ head = insertnodebefore(head,current,sd)
+ else
+ head = insertnodeafter(head,current,sd)
+ end
+ end
+ break
+ end
+ end
+
+ return head
+
+end
+
+directions.installhandler(interfaces.variables.default,process)
diff --git a/tex/context/base/mkxl/typo-dir.mkxl b/tex/context/base/mkxl/typo-dir.mkxl
index a5a4bc568..d9937ce73 100644
--- a/tex/context/base/mkxl/typo-dir.mkxl
+++ b/tex/context/base/mkxl/typo-dir.mkxl
@@ -19,9 +19,7 @@
\unprotect
\registerctxluafile{typo-dir}{autosuffix}
-\registerctxluafile{typo-dha}{}
-%registerctxluafile{typo-dua}{}
-%registerctxluafile{typo-dub}{}
+\registerctxluafile{typo-dha}{autosuffix}
\registerctxluafile{typo-duc}{autosuffix}
\definesystemattribute[directions][public,pickup]
diff --git a/tex/context/base/mkxl/typo-prc.mklx b/tex/context/base/mkxl/typo-prc.mklx
index f2df32986..f9a8f8e5e 100644
--- a/tex/context/base/mkxl/typo-prc.mklx
+++ b/tex/context/base/mkxl/typo-prc.mklx
@@ -54,6 +54,8 @@
\installcommandhandler \??processor {processor} \??processor
+\mutable\let\currentprocessor\empty % weird that this is needed
+
\appendtoks
\letcsname\??processorcheck\currentprocessor\endcsname\relax
\clf_registerstructureprocessor{\currentprocessor}% global, but it permits using processor that are yet undefined
diff --git a/tex/context/fonts/mkiv/bonum-math.lfg b/tex/context/fonts/mkiv/bonum-math.lfg
index 56262eb32..aa35c7b5a 100644
--- a/tex/context/fonts/mkiv/bonum-math.lfg
+++ b/tex/context/fonts/mkiv/bonum-math.lfg
@@ -233,6 +233,13 @@ return {
tweak = "addrules",
},
{
+ tweak = "replacerules",
+ -- minus = true, -- we have an extensible
+ fraction = { height = .244, yoffset = .757 },
+ radical = { height = .244, yoffset = .754 },
+ stacker = { height = .244, yoffset = .757 },
+ },
+ {
-- This tweak is only needed for the funny arrows and these now get properly
-- centered. (Could actually be done in the engine).
tweak = "addarrows",
diff --git a/tex/context/fonts/mkiv/cambria-math.lfg b/tex/context/fonts/mkiv/cambria-math.lfg
index a855a2513..9ad2c3afc 100644
--- a/tex/context/fonts/mkiv/cambria-math.lfg
+++ b/tex/context/fonts/mkiv/cambria-math.lfg
@@ -131,6 +131,26 @@ return {
tweak = "addrules",
},
{
+ tweak = "replacerules",
+ minus = {
+ leftoffset = .0925,
+ rightoffset = .0925,
+ },
+ fraction = {
+ height = .204,
+ yoffset = .796,
+ },
+ radical = {
+ height = .204,
+ yoffset = .796,
+ -- leftoffset = .075,
+ },
+ stacker = {
+ height = .204,
+ yoffset = .796,
+ },
+ },
+ {
tweak = "wipecues",
},
{
diff --git a/tex/context/fonts/mkiv/common-math-jmn.lfg b/tex/context/fonts/mkiv/common-math-jmn.lfg
index bcbe77962..1ebb7db39 100644
--- a/tex/context/fonts/mkiv/common-math-jmn.lfg
+++ b/tex/context/fonts/mkiv/common-math-jmn.lfg
@@ -55,6 +55,10 @@ return {
local sfm = ps("flat rule middle piece")
local sfr = ps("flat rule right piece")
+ local mrl = ps("minus rule left piece")
+ local mrm = ps("minus rule middle piece")
+ local mrr = ps("minus rule right piece")
+
local dfl = ps("flat double rule left piece")
local dfm = ps("flat double rule middle piece")
local dfr = ps("flat double rule right piece")
@@ -66,6 +70,7 @@ return {
local dar = ps("double arrow right piece")
local rad = ps("radical bar extender")
+ local frc = ps("fraction bar extender")
local antykwa = characters[srm]
@@ -104,11 +109,13 @@ return {
builders.jointwo(main,0x27FA,dal,joinrelfactor,dar)
if antykwa then
- builders.horibar(main,0x203E,srm,srl,srr,0x02212) -- overbar underbar fraction (we take 90/91/92 too!)
- -- builders.horibar(main,0x203E,srm,srl,srr,0x0002D) -- overbar underbar fraction (we take 90/91/92 too!)
- builders.rootbar(main,rad,srm,srr,0x02212) -- radical
+ builders.horibar(main,0x2212,mrm,mrl,mrr,0x2212,true,srm,srl,srr) -- minus
+ builders.horibar(main,0x203E,srm,srl,srr,0x2212) -- overbar underbar fraction (we take 90/91/92 too!)
+ builders.horibar(main,frc,srm,srl,srr,0x2212) -- fraction
+ builders.rootbar(main,rad,srm,srr,0x2212) -- radical
else
- builders.horibar(main,0x203E,0xFE073) -- overbar underbar
+ builders.horibar(main,0x2212,false,false,false,0x2212,true) -- minus
+ builders.horibar(main,0x203E,false,false,false,0x02212)
end
local ffactor = antykwa and 1 or 1
diff --git a/tex/context/fonts/mkiv/concrete-math.lfg b/tex/context/fonts/mkiv/concrete-math.lfg
index b69ee0103..53972ab05 100644
--- a/tex/context/fonts/mkiv/concrete-math.lfg
+++ b/tex/context/fonts/mkiv/concrete-math.lfg
@@ -87,6 +87,13 @@ return {
advance = 0.6,
},
{
+ tweak = "replacerules",
+ minus = true,
+ fraction = { height = .2, yoffset = .8 },
+ radical = { height = .2, yoffset = .8, leftoffset = .2 },
+ stacker = { height = .2, yoffset = .8 },
+ },
+ {
tweak = "addactuarian",
},
{
diff --git a/tex/context/fonts/mkiv/dejavu-math.lfg b/tex/context/fonts/mkiv/dejavu-math.lfg
index 03f869288..f3e1011ad 100644
--- a/tex/context/fonts/mkiv/dejavu-math.lfg
+++ b/tex/context/fonts/mkiv/dejavu-math.lfg
@@ -145,6 +145,13 @@ return {
tweak = "addrules",
},
{
+ tweak = "replacerules",
+ -- minus = true, -- we have an extensible
+ fraction = { height = .254, yoffset = .746 },
+ radical = { height = .254, yoffset = .746, yscale = .912 },
+ stacker = { height = .254, yoffset = .746 },
+ },
+ {
tweak = "wipecues",
},
{
diff --git a/tex/context/fonts/mkiv/ebgaramond-math.lfg b/tex/context/fonts/mkiv/ebgaramond-math.lfg
index f6f552dfe..bf9a2e027 100644
--- a/tex/context/fonts/mkiv/ebgaramond-math.lfg
+++ b/tex/context/fonts/mkiv/ebgaramond-math.lfg
@@ -174,20 +174,23 @@ return {
},
{
tweak = "replacerules",
+ minus = {
+ leftoffset = .075,
+ rightoffset = .075,
+ },
fraction = {
- template = "minus", -- 0x2212,
- xoffset = 0.075,
- yoffset = 0.9,
- -- width = 0.85,
- -- height = 0.1,
+ height = .2,
+ yoffset = .8,
},
radical = {
- template = "minus", -- 0x2212,
- xoffset = 0.075,
- yoffset = 0.9,
- yscale = 0.975,
- -- width = 0.85,
- -- height = 0.1,
+ height = .2,
+ yoffset = .8,
+ leftoffset = .075,
+ yscale = .9775,
+ },
+ stacker = {
+ height = .2,
+ yoffset = .8,
},
},
{
diff --git a/tex/context/fonts/mkiv/erewhon-math.lfg b/tex/context/fonts/mkiv/erewhon-math.lfg
index 54cc687fa..68a088d3f 100644
--- a/tex/context/fonts/mkiv/erewhon-math.lfg
+++ b/tex/context/fonts/mkiv/erewhon-math.lfg
@@ -90,6 +90,28 @@ return {
{
tweak = "addrules",
},
+{
+ tweak = "replacerules",
+ -- minus = {
+ -- height = 0.188,
+ -- yoffset = 0.812,
+ -- leftoffset = 0.2,
+ -- rightoffset = 0.2,
+ -- },
+ fraction = {
+ height = .188,
+ yoffset = .812,
+ },
+ radical = {
+ height = .188,
+ yoffset = .812,
+ leftoffset = 0.075,
+ },
+ stacker = {
+ height = .188,
+ yoffset = .812,
+ },
+},
{
tweak = "addactuarian",
},
diff --git a/tex/context/fonts/mkiv/kpfonts-math.lfg b/tex/context/fonts/mkiv/kpfonts-math.lfg
index 67ad3841f..5896323b4 100644
--- a/tex/context/fonts/mkiv/kpfonts-math.lfg
+++ b/tex/context/fonts/mkiv/kpfonts-math.lfg
@@ -112,6 +112,28 @@ return {
{
tweak = "addrules",
},
+{
+ tweak = "replacerules",
+ -- minus = {
+ -- height = 0.1818,
+ -- yoffset = 0.818,
+ -- leftoffset = 0.2,
+ -- rightoffset = 0.2,
+ -- },
+ fraction = {
+ height = .1818,
+ yoffset = .818,
+ },
+ radical = {
+ height = .1818,
+ yoffset = .818,
+ leftoffset = 0.075,
+ },
+ stacker = {
+ height = .1818,
+ yoffset = .818,
+ },
+},
{
-- This will be fixed. Check if new version comes out!
tweak = "addbars",
diff --git a/tex/context/fonts/mkiv/libertinus-math.lfg b/tex/context/fonts/mkiv/libertinus-math.lfg
index 6f707ed2b..ac15d6674 100644
--- a/tex/context/fonts/mkiv/libertinus-math.lfg
+++ b/tex/context/fonts/mkiv/libertinus-math.lfg
@@ -139,6 +139,29 @@ return {
advance = 0.5,
},
{
+ tweak = "replacerules",
+ minus = {
+ height = .176,
+ yoffset = .825,
+ leftoffset = .065,
+ rightoffset = .065,
+ },
+ fraction = {
+ height = .176,
+ yoffset = .825,
+ },
+ radical = {
+ height = .140,
+ yoffset = .800,
+ leftoffset = .075,
+ yscale = .950,
+ },
+ stacker = {
+ height = .176,
+ yoffset = .825,
+ },
+ },
+ {
tweak = "addactuarian",
},
{
diff --git a/tex/context/fonts/mkiv/lucida-math.lfg b/tex/context/fonts/mkiv/lucida-math.lfg
index 28510ac2d..50dce6907 100644
--- a/tex/context/fonts/mkiv/lucida-math.lfg
+++ b/tex/context/fonts/mkiv/lucida-math.lfg
@@ -64,14 +64,14 @@ return {
keep = true,
list = {
{ source = "latinsupplement" },
- -- { source = "latinextendeda" },
- -- { source = "latinextendedadditional" },
- -- { source = "latinextendedb" },
- -- { source = "latinextendedc" },
- -- { source = "latinextendedd" },
- -- { source = "latinextendede" },
- -- { source = "latinextendedf" },
- -- { source = "latinextendedg" },
+ -- { source = "latinextendeda" },
+ -- { source = "latinextendedadditional" },
+ -- { source = "latinextendedb" },
+ -- { source = "latinextendedc" },
+ -- { source = "latinextendedd" },
+ -- { source = "latinextendede" },
+ -- { source = "latinextendedf" },
+ -- { source = "latinextendedg" },
},
},
{
@@ -189,6 +189,27 @@ return {
tweak = "addrules",
},
{
+ tweak = "replacerules",
+ minus = {
+ leftoffset = .2,
+ rightoffset = .2,
+ },
+ fraction = {
+ height = .2,
+ yoffset = .825,
+ },
+ radical = {
+ height = .2,
+ yoffset = .825,
+ leftoffset = .075,
+ yscale = .940,
+ },
+ stacker = {
+ height = .2,
+ yoffset = .825,
+ },
+ },
+ {
tweak = "addactuarian",
},
{
diff --git a/tex/context/fonts/mkiv/modern-math.lfg b/tex/context/fonts/mkiv/modern-math.lfg
index 4af740789..e2560f0f1 100644
--- a/tex/context/fonts/mkiv/modern-math.lfg
+++ b/tex/context/fonts/mkiv/modern-math.lfg
@@ -324,6 +324,13 @@ return {
tweak = "addrules",
},
{
+ tweak = "replacerules",
+ -- minus = true, -- we have an extensible
+ fraction = { height = .15, yoffset = .85 },
+ radical = { height = .15, yoffset = .85 },
+ stacker = { height = .15, yoffset = .85 },
+ },
+ {
tweak = "addbars",
advance = 0.52,
},
@@ -346,6 +353,8 @@ return {
feature = "emulatelmtx",
comment = "this is for mkiv",
},
+-- { tweak = "inspect", slot = 0x2212 },
+-- { tweak = "inspect", slot = 0x003D },
},
},
bigslots = {
diff --git a/tex/context/fonts/mkiv/newcomputermodern-math.lfg b/tex/context/fonts/mkiv/newcomputermodern-math.lfg
index cae69aecc..bb881eda0 100644
--- a/tex/context/fonts/mkiv/newcomputermodern-math.lfg
+++ b/tex/context/fonts/mkiv/newcomputermodern-math.lfg
@@ -113,7 +113,7 @@ return {
["0x27EB.variants.*"] = { topright = -0.3, bottomright = -0.3 },
},
- },
+ },
{
tweak = "checkspacing",
},
@@ -130,6 +130,13 @@ return {
tweak = "addrules",
},
{
+ tweak = "replacerules",
+ -- minus = true, -- we have an extensible
+ fraction = { height = .15, yoffset = .85 },
+ radical = { height = .15, yoffset = .85 },
+ stacker = { height = .15, yoffset = .85 },
+ },
+ {
tweak = "addfourier",
variant = 1,
},
diff --git a/tex/context/fonts/mkiv/pagella-math.lfg b/tex/context/fonts/mkiv/pagella-math.lfg
index c1d0c7dd5..230f81c9c 100644
--- a/tex/context/fonts/mkiv/pagella-math.lfg
+++ b/tex/context/fonts/mkiv/pagella-math.lfg
@@ -154,7 +154,7 @@ return {
[0x27EB] = { topright = -0.2, bottomright = -0.2 },
["0x27EB.variants.*"] = { topright = -0.3, bottomright = -0.3 },
--
- [0x00393] = { bottomright = -0.20, }, -- upright Gamma
+ [0x0393] = { bottomright = -0.20, }, -- upright Gamma
--
["0x222B.parts.bottom"] = { bottomright = -0.20 }, -- int
["0x222C.parts.bottom"] = { bottomright = -0.15 }, -- iint
@@ -185,6 +185,13 @@ return {
advance = 0.2,
},
{
+ tweak = "replacerules",
+ minus = { rightoffset = .045 },
+ fraction = { height = .2, yoffset = .8, rightoffset = .04 },
+ radical = { height = .2, yoffset = .8, rightoffset = .04 },
+ stacker = { height = .2, yoffset = .8, rightoffset = .04 },
+ },
+ {
tweak = "addactuarian",
},
{
diff --git a/tex/context/fonts/mkiv/schola-math.lfg b/tex/context/fonts/mkiv/schola-math.lfg
index e4a3ad397..2e36f0825 100644
--- a/tex/context/fonts/mkiv/schola-math.lfg
+++ b/tex/context/fonts/mkiv/schola-math.lfg
@@ -117,6 +117,13 @@ return {
tweak = "addrules",
},
{
+ tweak = "replacerules",
+ -- minus = true, -- we have an extensible
+ fraction = { height = .25, yoffset = .75 },
+ radical = { height = .25, yoffset = .75 },
+ stacker = { height = .25, yoffset = .75 },
+ },
+ {
tweak = "addbars",
advance = 0.27,
},
diff --git a/tex/context/fonts/mkiv/stixtwo-math.lfg b/tex/context/fonts/mkiv/stixtwo-math.lfg
index b6b1757b6..c0c97862f 100644
--- a/tex/context/fonts/mkiv/stixtwo-math.lfg
+++ b/tex/context/fonts/mkiv/stixtwo-math.lfg
@@ -168,6 +168,29 @@ return {
advance = 0.4,
},
{
+ tweak = "replacerules",
+ minus = {
+ height = .233,
+ yoffset = .768,
+ -- leftoffset = .2,
+ -- rightoffset = .2,
+ },
+ fraction = {
+ height = .233,
+ yoffset = .768,
+ },
+ radical = {
+ height = .233,
+ yoffset = .768,
+ leftoffset = .05,
+ rightoffset = .05,
+ },
+ stacker = {
+ height = .233,
+ yoffset = .768,
+ },
+ },
+ {
tweak = "addactuarian",
},
{
diff --git a/tex/context/fonts/mkiv/termes-math.lfg b/tex/context/fonts/mkiv/termes-math.lfg
index d9c53ee28..364ea8369 100644
--- a/tex/context/fonts/mkiv/termes-math.lfg
+++ b/tex/context/fonts/mkiv/termes-math.lfg
@@ -123,6 +123,13 @@ return {
tweak = "addrules",
},
{
+ tweak = "replacerules",
+ -- minus = true, -- we have an extensible
+ fraction = { height = .2, yoffset = .8 },
+ radical = { height = .2, yoffset = .8 },
+ stacker = { height = .2, yoffset = .8 },
+ },
+ {
tweak = "addbars",
advance = 0.3,
},
diff --git a/tex/context/fonts/mkiv/type-imp-antykwa.mkiv b/tex/context/fonts/mkiv/type-imp-antykwa.mkiv
index f5e3158f4..f2fd100d0 100644
--- a/tex/context/fonts/mkiv/type-imp-antykwa.mkiv
+++ b/tex/context/fonts/mkiv/type-imp-antykwa.mkiv
@@ -14,14 +14,7 @@
\starttypescriptcollection[antykwa-torunska]
\startsetups[antykwa]
- % \setupmathfraction[\c!rule=\v!symbol,\c!middle="0203E]%
- % \setupmathradical [\c!rule=\v!symbol,\c!top ="FE010]%
- % \setupmathfence [\c!alternative=1]%
- \letmathfractionparameter\c!rule\v!symbol
- \setmathfractionparameter\c!middle{"203E}%
- \letmathradicalparameter \c!rule\v!symbol
- \setmathradicalparameter \c!top{\radicalbarextenderuc}%
- \setmathfenceparameter \c!alternative{1}%
+ \setmathfenceparameter\c!alternative{1}%
\stopsetups
% cond => -cont as in iwona
diff --git a/tex/context/fonts/mkiv/type-imp-concrete.mkiv b/tex/context/fonts/mkiv/type-imp-concrete.mkiv
index abf9b2cb1..c383a27fe 100644
--- a/tex/context/fonts/mkiv/type-imp-concrete.mkiv
+++ b/tex/context/fonts/mkiv/type-imp-concrete.mkiv
@@ -19,7 +19,7 @@
%\definefontfeature[none-slanted-concrete] [none] [slant=.2]
\doifunknownfontfeature {concrete-math-bold} {\definefontfeature[concrete-math-bold][boldened]}
- \doifunknownfontfeature {concrete-text-bold} {\definefontfeature[concrete-text-bold][boldened-15]}
+ \doifunknownfontfeature {concrete-text-bold} {\definefontfeature[concrete-text-bold][boldened-10]}
\starttypescript [\s!serif] [concrete]
\definefontsynonym [\s!Serif] [LMTypewriterVarWd-Regular] [\s!features={\s!default,concrete-text-bold}]
diff --git a/tex/context/fonts/mkiv/type-imp-ebgaramond.mkiv b/tex/context/fonts/mkiv/type-imp-ebgaramond.mkiv
index 966e50ba8..42575a61c 100644
--- a/tex/context/fonts/mkiv/type-imp-ebgaramond.mkiv
+++ b/tex/context/fonts/mkiv/type-imp-ebgaramond.mkiv
@@ -65,13 +65,9 @@
\starttypescriptcollection[ebgaramond]
- \startsetups[ebgaramond]
- \letmathfractionparameter\c!rule\v!symbol
- \setmathfractionparameter\c!middle{"203E}%
- \letmathradicalparameter \c!rule\v!symbol
- \setmathradicalparameter \c!top{\radicalbarextenderuc}%
- % \setmathfenceparameter \c!alternative{1}%
- \stopsetups
+% \startsetups[ebgaramond]
+% % \setmathfenceparameter \c!alternative{1}%
+% \stopsetups
\doifunknownfontfeature {ebgaramond-math-bold} {\definefontfeature[ebgaramond-math-bold][boldened]}
diff --git a/tex/context/fonts/mkiv/type-imp-iwona.mkiv b/tex/context/fonts/mkiv/type-imp-iwona.mkiv
index 528cb3208..01d859071 100644
--- a/tex/context/fonts/mkiv/type-imp-iwona.mkiv
+++ b/tex/context/fonts/mkiv/type-imp-iwona.mkiv
@@ -14,13 +14,7 @@
\starttypescriptcollection[iwona]
\startsetups[iwona]
- % \setupmathfence [\c!alternative=1]%
- \setmathfenceparameter \c!alternative{1}%
-% \letmathfractionparameter\c!rule\v!symbol
-% \setmathfractionparameter\c!middle{"203E}%
-% \letmathradicalparameter \c!rule\v!symbol
-% \setmathradicalparameter \c!top{\radicalbarextenderuc}%
-% \setmathfenceparameter \c!alternative{1}%
+ \setmathfenceparameter\c!alternative{1}%
\stopsetups
\startsetups[iwona-light] \directsetup{antykwa}\stopsetups
diff --git a/tex/context/fonts/mkiv/type-imp-kurier.mkiv b/tex/context/fonts/mkiv/type-imp-kurier.mkiv
index af1e2a28d..0ff7852fc 100644
--- a/tex/context/fonts/mkiv/type-imp-kurier.mkiv
+++ b/tex/context/fonts/mkiv/type-imp-kurier.mkiv
@@ -14,13 +14,7 @@
\starttypescriptcollection [kurier]
\startsetups[kurier]
- % \setupmathfence [\c!alternative=1]%
- \setmathfenceparameter \c!alternative{1}%
-% \letmathfractionparameter\c!rule\v!symbol
-% \setmathfractionparameter\c!middle{"203E}%
-% \letmathradicalparameter \c!rule\v!symbol
-% \setmathradicalparameter \c!top{\radicalbarextenderuc}%
-% \setmathfenceparameter \c!alternative{1}%
+ \setmathfenceparameter\c!alternative{1}%
\stopsetups
\startsetups[kurier-light] \directsetup{antykwa}\stopsetups
diff --git a/tex/context/fonts/mkiv/xcharter-math.lfg b/tex/context/fonts/mkiv/xcharter-math.lfg
index 193c0fd1b..3c349ee88 100644
--- a/tex/context/fonts/mkiv/xcharter-math.lfg
+++ b/tex/context/fonts/mkiv/xcharter-math.lfg
@@ -77,6 +77,28 @@ return {
{
tweak = "addrules",
},
+{
+ tweak = "replacerules",
+ -- minus = {
+ -- height = 0.188,
+ -- yoffset = 0.812,
+ -- leftoffset = 0.2,
+ -- rightoffset = 0.2,
+ -- },
+ fraction = {
+ height = .188,
+ yoffset = .812,
+ },
+ radical = {
+ height = .188,
+ yoffset = .812,
+ leftoffset = 0.2,-- no effect?
+ },
+ stacker = {
+ height = .188,
+ yoffset = .812,
+ },
+},
{
tweak = "addactuarian",
},
diff --git a/tex/context/modules/mkiv/m-tikz.mkiv b/tex/context/modules/mkiv/m-tikz.mkiv
index 221c074ad..ef1b6b7e3 100644
--- a/tex/context/modules/mkiv/m-tikz.mkiv
+++ b/tex/context/modules/mkiv/m-tikz.mkiv
@@ -42,6 +42,7 @@
\catcode`\@=11
\catcode`\|=12
\catcode`\!=12
+ \catcode`\~=12
\relax}
\permanent\protected\def\stoptikzinput
diff --git a/tex/context/modules/mkiv/s-abbreviations-logos.tex b/tex/context/modules/mkiv/s-abbreviations-logos.tex
index ab2b98a56..d04706ca6 100644
--- a/tex/context/modules/mkiv/s-abbreviations-logos.tex
+++ b/tex/context/modules/mkiv/s-abbreviations-logos.tex
@@ -177,10 +177,16 @@
\logo [LMX] {lmx}
\logo [LPEG] {lpeg}
\logo [LUA] {Lua}
-\logo [LUAJIT] {Lua\wordboundary JIT}
-\logo [LUAJITTEX] {Lua\wordboundary jit\TeXsuffix}
-\logo [LUAMETATEX] {\Lua\wordboundary Meta\wordboundary\TeXsuffix}
-\logo [LUATEX] {Lua\wordboundary\TeXsuffix}
+% \logo [LUAJIT] {Lua\wordboundary JIT}
+% \logo [LUAJITTEX] {Lua\wordboundary jit\TeXsuffix}
+% \logo [LUAMETATEX] {\Lua\wordboundary Meta\wordboundary\TeXsuffix}
+% \logo [LUATEX] {Lua\wordboundary\TeXsuffix}
+% \logo [LUAMETAFUN] {\Lua\wordboundary\MetaFun}
+\logo [LUAJIT] {Lua\-JIT}
+\logo [LUAJITTEX] {Lua\-jit\-\TeXsuffix}
+\logo [LUAMETATEX] {\Lua\-Meta\-\TeXsuffix}
+\logo [LUATEX] {Lua\-\TeXsuffix}
+\logo [LUAMETAFUN] {\Lua\-\MetaFun}
\logo [LUATOOLS] {luatools}
\logo [MACOSX] {MacOSX}
%logo [MACROTEX] {Macro\TeXsuffix}
@@ -189,7 +195,6 @@
\logo [MAPS] {Maps}
\logo [MATHML] {MathML}
\logo [METAFONT] {\MetaFont}
-\logo [LUAMETAFUN] {\Lua\wordboundary\MetaFun}
\logo [METAFUN] {\MetaFun}
\logo [METAPOST] {\MetaPost}
\logo [METATEX] {Meta\TeXsuffix}
diff --git a/tex/context/modules/mkiv/x-asciimath.lua b/tex/context/modules/mkiv/x-asciimath.lua
index fdcab141c..f158065aa 100644
--- a/tex/context/modules/mkiv/x-asciimath.lua
+++ b/tex/context/modules/mkiv/x-asciimath.lua
@@ -6,15 +6,14 @@ if not modules then modules = { } end modules ['x-asciimath'] = {
license = "see context related readme files"
}
---[[ldx--
-<p>Some backgrounds are discussed in <t>x-asciimath.mkiv</t>. This is a third version. I first
-tried a to make a proper expression parser but it's not that easy. First we have to avoid left
-recursion, which is not that trivial (maybe a future version of lpeg will provide that), and
-second there is not really a syntax but a mix of expressions and sequences with some fuzzy logic
-applied. Most problematic are fractions and we also need to handle incomplete expressions. So,
-instead we (sort of) tokenize the string and then do some passes over the result. Yes, it's real
-ugly and unsatisfying code mess down here. Don't take this as an example.</p>
---ldx]]--
+-- Some backgrounds are discussed in 'x-asciimath.mkiv'. This is a third version. I
+-- first tried a to make a proper expression parser but it's not that easy. First we
+-- have to avoid left recursion, which is not that trivial (maybe a future version
+-- of lpeg will provide that), and second there is not really a syntax but a mix of
+-- expressions and sequences with some fuzzy logic applied. Most problematic are
+-- fractions and we also need to handle incomplete expressions. So, instead we (sort
+-- of) tokenize the string and then do some passes over the result. Yes, it's real
+-- ugly and unsatisfying code mess down here. Don't take this as an example.
-- todo: spaces around all elements in cleanup?
-- todo: filter from files listed in tuc file
diff --git a/tex/context/modules/mkxl/m-tikz.mkxl b/tex/context/modules/mkxl/m-tikz.mkxl
index 21544d14e..6b173227c 100644
--- a/tex/context/modules/mkxl/m-tikz.mkxl
+++ b/tex/context/modules/mkxl/m-tikz.mkxl
@@ -45,6 +45,7 @@
\catcode`\@=11
\catcode`\|=12
\catcode`\!=12
+ \catcode`\~=13
\autoparagraphmode\zerocount}
\permanent\protected\def\stoptikzinput
diff --git a/tex/generic/context/luatex/luatex-fonts-merged.lua b/tex/generic/context/luatex/luatex-fonts-merged.lua
index e885a56ed..7d9befa51 100644
--- a/tex/generic/context/luatex/luatex-fonts-merged.lua
+++ b/tex/generic/context/luatex/luatex-fonts-merged.lua
@@ -1,6 +1,6 @@
-- merged file : c:/data/develop/context/sources/luatex-fonts-merged.lua
-- parent file : c:/data/develop/context/sources/luatex-fonts.lua
--- merge date : 2023-03-20 15:42
+-- merge date : 2023-04-01 09:28
do -- begin closure to overcome local limits and interference
@@ -37807,7 +37807,7 @@ local fonts=fonts
local otf=fonts.handlers.otf
local registerotffeature=otf.features.register
local addotffeature=otf.addfeature
-local specification={
+local tlig={
type="ligature",
order={ "tlig" },
prepend=true,
@@ -37816,12 +37816,17 @@ local specification={
[0x2014]={ 0x002D,0x002D,0x002D },
},
}
-addotffeature("tlig",specification)
-registerotffeature {
- name="tlig",
- description="tex ligatures",
+local tquo={
+ type="ligature",
+ order={ "tquo" },
+ prepend=true,
+ data={
+ [0x201C]={ 0x0060,0x0060 },
+ [0x201D]={ 0x0027,0x0027 },
+ [0x201E]={ 0x002C,0x002C },
+ },
}
-local specification={
+local trep={
type="substitution",
order={ "trep" },
prepend=true,
@@ -37829,11 +37834,12 @@ local specification={
[0x0027]=0x2019,
},
}
-addotffeature("trep",specification)
-registerotffeature {
- name="trep",
- description="tex replacements",
-}
+addotffeature("trep",trep)
+addotffeature("tlig",tlig)
+addotffeature("tquo",tquo)
+registerotffeature { name="tlig",description="tex ligatures" }
+registerotffeature { name="tquo",description="tex quotes" }
+registerotffeature { name="trep",description="tex replacements" }
local anum_arabic={
[0x0030]=0x0660,
[0x0031]=0x0661,
diff --git a/tex/generic/context/luatex/luatex-mplib.lua b/tex/generic/context/luatex/luatex-mplib.lua
index 1839c44ee..99a23b03c 100644
--- a/tex/generic/context/luatex/luatex-mplib.lua
+++ b/tex/generic/context/luatex/luatex-mplib.lua
@@ -6,19 +6,14 @@ if not modules then modules = { } end modules ['luatex-mplib'] = {
license = "public domain",
}
---[[ldx--
-<p>This module is a stripped down version of libraries that are used
-by <l n='context'/>. It can be used in other macro packages and/or
-serve as an example. Embedding in a macro package is upto others and
-normally boils down to inputting <t>supp-mpl.tex</t>.</p>
---ldx]]--
+-- This module is a stripped down version of libraries that are used by ConTeXt. It
+-- can be used in other macro packages and/or serve as an example. Embedding in a
+-- macro package is upto others and normally boils down to inputting 'supp-mpl.tex'.
if metapost and metapost.version then
- --[[ldx--
- <p>Let's silently quit and make sure that no one loads it
- manually in <l n='context'/>.</p>
- --ldx]]--
+ -- Let's silently quit and make sure that no one loads it manually in
+ -- ConTeXt.
else
@@ -29,27 +24,25 @@ else
local mplib = require ('mplib')
local kpse = require ('kpse')
- --[[ldx--
- <p>We create a namespace and some variables to it. If a namespace is
- already defined it wil not be initialized. This permits hooking
- in code beforehand.</p>
+ -- We create a namespace and some variables to it. If a namespace is already
+ -- defined it wil not be initialized. This permits hooking in code beforehand.
- <p>We don't make a format automatically. After all, distributions
- might have their own preferences and normally a format (mem) file will
- have some special place in the <l n='tex'/> tree. Also, there can already
- be format files, different memort settings and other nasty pitfalls that
- we don't want to interfere with. If you want, you can define a function
- <t>metapost.make(name,mem_name) that does the job.</t></p>
- --ldx]]--
+ -- We don't make a format automatically. After all, distributions might have
+ -- their own preferences and normally a format (mem) file will have some
+ -- special place in the TeX tree. Also, there can already be format files,
+ -- different memort settings and other nasty pitfalls that we don't want to
+ -- interfere with. If you want, you can define a function
+ --
+ -- metapost.make (name,mem_name)
+ --
+ -- that does the job.
metapost = metapost or { }
metapost.version = 1.00
metapost.showlog = metapost.showlog or false
metapost.lastlog = ""
- --[[ldx--
- <p>A few helpers, taken from <t>l-file.lua</t>.</p>
- --ldx]]--
+ -- A few helpers, taken from 'l-file.lua'.
local file = file or { }
@@ -61,10 +54,7 @@ else
return (string.gsub(filename,"%.[%a%d]+$",""))
end
- --[[ldx--
- <p>We use the <l n='kpse'/> library unless a finder is already
- defined.</p>
- --ldx]]--
+ -- We use the KPSE library unless a finder is already defined.
local mpkpse = kpse.new("luatex","mpost")
@@ -76,10 +66,9 @@ else
end
end
- --[[ldx--
- <p>You can use your own reported if needed, as long as it handles multiple
- arguments and formatted strings.</p>
- --ldx]]--
+ -- You can use your own reported if needed, as long as it handles multiple
+ -- arguments and formatted strings.
+
metapost.report = metapost.report or function(...)
if logs.report then
@@ -89,11 +78,9 @@ else
end
end
- --[[ldx--
- <p>The rest of this module is not documented. More info can be found in the
- <l n='luatex'/> manual, articles in user group journals and the files that
- ship with <l n='context'/>.</p>
- --ldx]]--
+ -- The rest of this module is not documented. More info can be found in the
+ -- LuaTeX manual, articles in user group journals and the files that ship
+ -- with ConTeXt.
function metapost.resetlastlog()
metapost.lastlog = ""
@@ -329,9 +316,8 @@ else
return true -- done
end
- --[[ldx--
- <p>We removed some message and tracing code. We might even remove the flusher</p>
- --ldx]]--
+ -- We removed some message and tracing code. We might even remove the
+ -- flusher.
local function pdf_startfigure(n,llx,lly,urx,ury)
tex.sprint(format("\\startMPLIBtoPDF{%s}{%s}{%s}{%s}",llx,lly,urx,ury))
@@ -443,9 +429,7 @@ else
return t
end
- --[[ldx--
- <p>Support for specials has been removed.</p>
- --ldx]]--
+ -- Support for specials has been removed.
function metapost.flush(result,flusher)
if result then
diff --git a/tex/generic/context/luatex/luatex-preprocessor.lua b/tex/generic/context/luatex/luatex-preprocessor.lua
index 8faa0b47e..b1debcd5c 100644
--- a/tex/generic/context/luatex/luatex-preprocessor.lua
+++ b/tex/generic/context/luatex/luatex-preprocessor.lua
@@ -6,11 +6,9 @@ if not modules then modules = { } end modules ['luatex-preprocessor'] = {
license = "see context related readme files"
}
---[[ldx
-<p>This is a stripped down version of the preprocessor. In
-<l n='context'/> we have a bit more, use a different logger, and
-use a few optimizations. A few examples are shown at the end.</p>
---ldx]]
+-- This is a stripped down version of the preprocessor. In ConTeXt we have a bit
+-- more, use a different logger, and use a few optimizations. A few examples are
+-- shown at the end.
local rep, sub, gmatch = string.rep, string.sub, string.gmatch
local insert, remove = table.insert, table.remove
@@ -99,10 +97,6 @@ local parser = lpeg.Cs { "converter",
converter = (lpeg.V("definition") + anything)^1,
}
---[[ldx
-<p>We provide a few commands.</p>
---ldx]]
-
-- local texkpse
local function find_file(...)
diff --git a/tex/latex/context/ppchtex/m-ch-de.sty b/tex/latex/context/ppchtex/m-ch-de.sty
deleted file mode 100644
index d35f8cf2d..000000000
--- a/tex/latex/context/ppchtex/m-ch-de.sty
+++ /dev/null
@@ -1,19 +0,0 @@
-\ProvidesPackage{m-ch-de}[2004/07/30 package wrapper for m-ch-de.tex]
-
-\newif\ifPPCH@PSTRICKS
-
-\DeclareOption{pstricks}{\PPCH@PSTRICKStrue}
-\DeclareOption{pictex}{\PPCH@PSTRICKSfalse}
-
-\ExecuteOptions{pictex}
-\ProcessOptions\relax
-
-\ifPPCH@PSTRICKS
- \RequirePackage{pstricks,pst-plot}
-\else
- \RequirePackage{m-pictex}
-\fi
-
-\input{m-ch-de.tex}
-
-\endinput \ No newline at end of file
diff --git a/tex/latex/context/ppchtex/m-ch-en.sty b/tex/latex/context/ppchtex/m-ch-en.sty
deleted file mode 100644
index e93a49867..000000000
--- a/tex/latex/context/ppchtex/m-ch-en.sty
+++ /dev/null
@@ -1,19 +0,0 @@
-\ProvidesPackage{m-ch-en}[2004/07/30 package wrapper for m-ch-en.tex]
-
-\newif\ifPPCH@PSTRICKS
-
-\DeclareOption{pstricks}{\PPCH@PSTRICKStrue}
-\DeclareOption{pictex}{\PPCH@PSTRICKSfalse}
-
-\ExecuteOptions{pictex}
-\ProcessOptions\relax
-
-\ifPPCH@PSTRICKS
- \RequirePackage{pstricks,pst-plot}
-\else
- \RequirePackage{m-pictex}
-\fi
-
-\input{m-ch-en.tex}
-
-\endinput \ No newline at end of file
diff --git a/tex/latex/context/ppchtex/m-ch-nl.sty b/tex/latex/context/ppchtex/m-ch-nl.sty
deleted file mode 100644
index 6e2b8d43d..000000000
--- a/tex/latex/context/ppchtex/m-ch-nl.sty
+++ /dev/null
@@ -1,19 +0,0 @@
-\ProvidesPackage{m-ch-nl}[2004/07/30 package wrapper for m-ch-nl.tex]
-
-\newif\ifPPCH@PSTRICKS
-
-\DeclareOption{pstricks}{\PPCH@PSTRICKStrue}
-\DeclareOption{pictex}{\PPCH@PSTRICKSfalse}
-
-\ExecuteOptions{pictex}
-\ProcessOptions\relax
-
-\ifPPCH@PSTRICKS
- \RequirePackage{pstricks,pst-plot}
-\else
- \RequirePackage{m-pictex}
-\fi
-
-\input{m-ch-nl.tex}
-
-\endinput \ No newline at end of file
diff --git a/tex/latex/context/ppchtex/m-pictex.sty b/tex/latex/context/ppchtex/m-pictex.sty
deleted file mode 100644
index a967b362d..000000000
--- a/tex/latex/context/ppchtex/m-pictex.sty
+++ /dev/null
@@ -1,5 +0,0 @@
-\ProvidesPackage{m-pictex}[2004/07/30 package wrapper for m-pictex.tex]
-
-\input{m-pictex.mkii}
-
-\endinput