From 831cb34465d19bd4e508d80c8a25f7848ac53778 Mon Sep 17 00:00:00 2001 From: Skef Iterum Date: Thu, 5 Dec 2024 15:18:20 -0800 Subject: [PATCH 1/6] VF-first feature writers Adapt tests for new code --- kernFeatureWriter.py | 886 ++++++++++++++++++++++++-------- markFeatureWriter.py | 408 ++++++++++++--- tests/kern_mock_rtl.fea | 2 +- tests/kern_ss4_exceptions.fea | 2 +- tests/test_kernFeatureWriter.py | 72 ++- tests/test_markFeatureWriter.py | 2 +- 6 files changed, 1056 insertions(+), 316 deletions(-) diff --git a/kernFeatureWriter.py b/kernFeatureWriter.py index b044e58..e35c89f 100755 --- a/kernFeatureWriter.py +++ b/kernFeatureWriter.py @@ -27,9 +27,12 @@ #### Usage: ```zsh - # write a basic kern feature file + # write a basic kern feature file for a static font python kernFeatureWriter.py font.ufo + # write a basic kern feature file for a variable font + python kernFeatureWriter.py font.designspace + # write a kern feature file with minimum absolute kerning value of 5 python kernFeatureWriter.py -min 5 font.ufo @@ -43,16 +46,25 @@ ''' import argparse -import defcon import itertools import time - +from abc import abstractmethod +from collections import defaultdict +from graphlib import TopologicalSorter, CycleError from pathlib import Path +from math import copysign +import defcon +from fontTools.designspaceLib import ( + DesignSpaceDocument, + DesignSpaceDocumentError, +) # constants RTL_GROUP = 'RTL_KERNING' RTL_TAGS = ['_ARA', '_HEB', '_RTL'] +SHORTINSTNAMEKEY = 'com.adobe.shortInstanceName' +GROUPSPLITSUFFIX = '_split' # helpers @@ -94,6 +106,9 @@ def __init__(self): # The default output filename self.output_name = 'kern.fea' + # The default name for the locations file + self.locations_name = 'locations.fea' + # Default mimimum kerning value. This value is _inclusive_, which # means that pairs that equal this absolute value will not be # ignored/trimmed. Pairs in range of +/- value will be trimmed. @@ -108,12 +123,19 @@ def __init__(self): # Write trimmed pairs to the output file (as comments). self.write_trimmed_pairs = False + # If variable, output user location values + # (default is design values) + self.user_values = False + # Write subtables? self.write_subtables = False # Write time stamp in .fea file header? self.write_timestamp = False + # Do not write the locations file? + self.no_locations = False + # Write single-element groups as glyphs? # (This has no influence on the output kerning data, but helps with # balancing subtables, and potentially makes the number of kerning @@ -124,6 +146,430 @@ def __init__(self): self.ignore_suffix = None +class KernAdapter(object): + ''' + Interface layer between underlying font source and the KerningSanitizer + ''' + + def has_data(self): + return self._has_data + + def has_locations(self): + ''' + Returns true when the source is variable and false otherwise + ''' + return False + + def get_locations(self, userUnits = False): + ''' + Returns a dictionary of location name to axis coordinates + ''' + assert False + return {} + + @abstractmethod + def all_glyphs(self): + ''' + Returns a set of the names of all glyphs in the sources + ''' + pass + + @abstractmethod + def glyph_order(self): + ''' + Returns a dictionary of all the glyphs in the source font where + the key is the name and the value is the order of the glyph in + the font. + ''' + pass + + @abstractmethod + def groups(self): + ''' + Returns a dict of all groups in the sources, with the name as a + key and a list of glyphs in the group as the value + ''' + pass + + @abstractmethod + def kerning(self): + ''' + Returns a dict of all kerning pairs in the sources, with the + key being a tuple of (left, right) and the value being the kerning + value. The elements of the tuple can be glyph names or group names + ''' + pass + + @abstractmethod + def postscript_font_name(self): + ''' + Returns the postscriptFontName stored in the sources, or None if + there is no name + ''' + pass + + @abstractmethod + def path(self): + ''' + Returns path to the top of the source as a Path() object + ''' + pass + + @abstractmethod + def merge_values(self, v_fallback, v_exception): + ''' + When v_exception is the value associated with a more specific + rule, and v_fallback is associated with a less specific but + matching rule, returns a combined value for the exception + filled in with parts of the fallback as needed. + ''' + pass + + + @abstractmethod + def value_string(self, value, rtl=False): + ''' + Returns the value as a string that can be used in a feature file. + When rtl is True the value will be for right-to-left use + ''' + pass + + @abstractmethod + def below_minimum(self, value, minimum): + ''' + Returns True if the value is considered greater than minimum and + False otherwise. The value parameter must be from the kerning() + dictionary. The minimum parameter is an integer. + ''' + pass + + @abstractmethod + def value_is_zero(self, value): + ''' + Returns True if the value is considered greater than minimum and + False otherwise. The value parameter must be from the kerning() + dictionary. The minimum parameter is an integer. + ''' + pass + + +class UFOKernAdapter(KernAdapter): + ''' + Adapter for a single UFO + ''' + + def __init__(self, f): + self._has_data = True + + if f: + if not f.kerning: + print('ERROR: The font has no kerning!') + self._has_data = False + return + if set(f.kerning.values()) == {0}: + print('ERROR: All kerning values are zero!') + self._has_data = False + return + self.f = f + + def all_glyphs(self): + return set(self.f.keys()) + + def glyph_order(self): + return {gn: i for (i, gn) in enumerate(self.f.keys())} + + def groups(self): + return self.f.groups + + def kerning(self): + return self.f.kerning + + def postscript_font_name(self): + try: + return self.f.info.postscriptFontName + except: + pass + return None + + def path(self): + return Path(self.f.path) + + def merge_values(self, v_fallback, v_exception): + return v_exception + + def value_string(self, value, rtl=False): + # adding 0 makes a -0.0 into a 0.0 + if rtl: + return '<{0:g} 0 {0:g} 0>'.format(value + 0) + else: + return '{0:g}'.format(value + 0) + + def below_minimum(self, value, minimum): + return abs(value) < minimum + + def value_is_zero(self, value): + return value == 0 + +class DesignspaceKernAdapter(KernAdapter): + ''' + Adapter for a UFO-based variable font with a designspace file + ''' + + def __init__(self, dsDoc): + self._has_data = True + + try: + self.fonts = dsDoc.loadSourceFonts(defcon.Font) + except DesignSpaceDocumentError as err: + print(err) + self._has_data = False + + for i, f in enumerate(self.fonts): + f.sourceIndex = i + + defaultSource = dsDoc.findDefault() + if defaultSource is not None: + self.defaultIndex = dsDoc.sources.index(defaultSource) + defaultFont = self.fonts.pop(self.defaultIndex) + self.fonts.insert(0, defaultFont) + else: + print('ERROR: did not find source at default location') + self._has_data = False + + default_location = dsDoc.sources[self.defaultIndex].location + self.defaultInstanceIndex = None + for i, inst in enumerate(dsDoc.instances): + if inst.designLocation == default_location: + self.defaultInstanceIndex = i + break + + if self.defaultInstanceIndex is None: + print('could not find named instance for default location') + + self.shortNames = [None] + for f in self.fonts[1:]: + if SHORTINSTNAMEKEY in f.lib: + self.shortNames.append(f.lib[SHORTINSTNAMEKEY]) + else: + self.shortNames.append(self.make_short_name(dsDoc, + f.sourceIndex)) + + self.dsDoc = dsDoc + + def has_locations(self): + return True + + def get_locations(self, userUnits = False): + tagDict = {} + for axisName in self.dsDoc.getAxisOrder(): + tagDict[axisName] = self.dsDoc.getAxis(axisName).tag + locDict = {} + for i, f in enumerate(self.fonts): + if i == 0: + continue + axisLocs = self.dsDoc.sources[f.sourceIndex].designLocation + if userUnits: + axisLocs = self.dsDoc.map_backward(axisLocs) + axisLocsByTag = {} + for axisName, axisTag in tagDict.items(): + axisLocsByTag[axisTag] = axisLocs[axisName] + locDict[self.shortNames[i]] = axisLocsByTag + return locDict + + def make_short_name(self, dsDoc, sourceIndex): + source = dsDoc.sources[sourceIndex] + location = source.location + anames = [] + for an in dsDoc.getAxisOrder(): + avstr = "%c%g" % (an[0], location[an]) + avstr = avstr.replace('.', 'p') + avstr = avstr.replace('-', 'n') + anames.append(avstr) + return '_'.join(anames) + + + def calc_glyph_data(self): + default_glyph_list = self.fonts[0].keys() + default_glyph_set = set(default_glyph_list) + + all_extra_glyphs = set() + self.glyph_sets = [default_glyph_set] + for i, f in enumerate(self.fonts): + if i == 0: + continue + current_glyph_set = set(f.keys()) + self.glyph_sets.append(current_glyph_set) + extra_glyphs = current_glyph_set - default_glyph_set + if extra_glyphs: + source_name = self.dsDoc.sources[f.sourceIndex].styleName + print(f'source {source_name} has these extra glyphs' + f'not in default: [{", ".join(extra_glyphs)}]') + all_extra_glyphs |= extra_glyphs + + self.glyph_set = default_glyph_set | all_extra_glyphs + + self._glyph_order = {gn: i for (i, gn) in enumerate(default_glyph_list)} + if all_extra_glyphs: + extras_order = {gn: i for (i, gn) in + enumerate(all_extra_glyphs, + start=default_glyph_set.size())} + self._glyph_order.update(extras_order) + + def all_glyphs(self): + if not hasattr(self, 'glyph_set'): + self.calc_glyph_data() + return self.glyph_set + + def glyph_order(self): + if not hasattr(self, '_glyph_order'): + self.calc_glyph_data() + return self._glyph_order + + def groups(self): + if hasattr(self, '_groups'): + return self._groups + # Calculate partial orderings for groups across all fonts + group_orderings = defaultdict(lambda: defaultdict(set)) + for f in self.fonts: + for g, gl in f.groups.items(): + ordering = group_orderings[g] + for j, gn in enumerate(gl): + ordering[gn] |= set(gl[j+1:]) + + # Use the partial orderings to calculate a total ordering, + # or failing that use the order in which the glyphs were + # encountered + self._groups = {} + for g, ordering in group_orderings.items(): + try: + ts = TopologicalSorter(ordering) + l = list(ts.static_order()) + except CycleError as err: + print(f'glyphs in group {g} have different orderings across ' + 'different sources, ordering cannot be preserved') + l = ordering.keys() + self._groups[g] = l + + return self._groups + + def kerning(self): + if hasattr(self, '_kerning'): + return self._kerning + if not hasattr(self, 'glyph_sets'): + self.calc_glyph_data() + + # Collect full set of kerning pairs across sources + used_kerning_groups = set() + all_pairs = set() + for f in self.fonts: + for l, r in f.kerning.keys(): + all_pairs.add((l, r)) + if is_kerning_group(l): + used_kerning_groups.add(l) + if is_kerning_group(r): + used_kerning_groups.add(r) + + # Find and split groups with mixed sparseness + groups = self.groups() + group_remap = {} + for g in used_kerning_groups: + sparse_patterns = defaultdict(list) + for gl in groups[g]: + pattern = (i for i, glyphs in enumerate(self.glyph_sets) + if gl in glyphs) + assert pattern + sparse_patterns[frozenset(pattern)].append(gl) + if len(sparse_patterns) == 1: + # Nothing sparse or all glyphs sparse in the same way + continue + remap_list = [] + for i, group_list in enumerate(sparse_patterns.values()): + new_group_name = g + GROUPSPLITSUFFIX + str(i) + groups[new_group_name] = group_list + remap_list.append(new_group_name) + del groups[g] + group_remap[g] = remap_list + + # Build up variable kerning values using remapped groups + self._kerning = {} + for l, r in all_pairs: + pair = (l, r) + left_list = group_remap.get(l, [l]) + right_list = group_remap.get(r, [r]) + for lelem in left_list: + lglyph = groups.get(lelem, [lelem])[0] + for relem in right_list: + value = [] + rglyph = groups.get(relem, [relem])[0] + for i, f in enumerate(self.fonts): + # Glyph wasn't present in font, set value to None + if (lglyph not in self.glyph_sets[i] or + rglyph not in self.glyph_sets[i]): + value.append(None) + continue + if pair in f.kerning: + value.append(f.kerning[pair]) + else: + # Use -0 to differentiate implicit 0 from + # (potential) explicit 0 value in file + value.append(-0.0) + self._kerning[(lelem, relem)] = value + + return self._kerning + + def postscript_font_name(self): + # Try the designspace document first + if self.defaultInstanceIndex is not None: + di = self.dsDoc.instances[self.defaultInstanceIndex] + if hasattr(di, 'postScriptFontName'): + return di.postScriptFontName + # Then the UFO via defcon + try: + return self.fonts[0].info.postscriptFontName + except: + pass + return None + + def path(self): + return Path(self.dsDoc.path) + + def merge_values(self, v_fallback, v_exception): + r = [] + for f, e in zip(v_fallback, v_exception): + if e is not None and not (e == 0 and copysign(1, e) == -1): + r.append(e) + else: + r.append(f) + return r + + def value_string(self, value, rtl=False): + # adding 0 makes a -0.0 into a 0.0 + assert len(value) == len(self.fonts) + format_str = '<{0:g} 0 {0:g} 0>' if rtl else '{0:g}' + def_value = value[0] + 0 + if all(v is None or v == def_value for v in value): + return format_str.format(def_value) + else: + value_strs = [] + for i, v in enumerate(value): + if v is None: + continue + vstr = format_str.format(v + 0) + if i == 0: + value_strs.append(vstr) + else: + value_strs.append('@' + self.shortNames[i] + ':' + vstr) + return '(' + ' '.join(value_strs) + ')' + + def below_minimum(self, value, minimum): + assert len(value) == len(self.fonts) + return all((v is None or abs(v) < minimum for v in value)) + + def value_is_zero(self, value): + assert len(value) == len(self.fonts) + return all((v is None or v == 0 for v in value)) + + class KerningSanitizer(object): ''' Sanitize UFO kerning and groups: @@ -135,53 +581,84 @@ class KerningSanitizer(object): ''' - def __init__(self, f): - self.f = f + def __init__(self, a): + self.a = a self.kerning = {} self.groups = {} self.reference_groups = {} + self.source_glyphs = self.a.all_glyphs() + self.source_glyph_order = self.a.glyph_order() + self.source_groups = self.a.groups() + self.source_kerning = self.a.kerning() + self.left_glyph_to_group = {} + self.left_conflict_groups = {} + self.right_glyph_to_group = {} + self.right_conflict_groups = {} + # empty groups self.empty_groups = [ - g for (g, gl) in self.f.groups.items() if not gl] + g for (g, gl) in self.source_groups.items() if not gl + ] # groups containing glyphs not in the UFO self.invalid_groups = [ - g for (g, gl) in self.f.groups.items() if not - set(gl) <= set(self.f.keys())] - # remaining groups - self.valid_groups = [ - g for g in self.f.groups.keys() if - g not in [set(self.invalid_groups) | set(self.empty_groups)] and - is_kerning_group(g) + g for (g, gl) in self.source_groups.items() if not + set(gl) <= self.source_glyphs ] - - self.valid_items = set(self.f.keys()) | set(self.valid_groups) + bad_group_set = set(self.invalid_groups) | set(self.empty_groups) + # remaining groups + self.valid_groups = { + g for g in self.source_groups.keys() + if g not in bad_group_set and is_kerning_group(g) + } + # Build glyph_to_group maps for each side, testing for and + # eliminating conflicts by marking groups as conflicting + left_group_set = { l for l, r in self.source_kerning.keys() + if l in self.valid_groups } + right_group_set = { r for l, r in self.source_kerning.keys() + if r in self.valid_groups } + for gs, g2g, cg in ((left_group_set, self.left_glyph_to_group, + self.left_conflict_groups), + (right_group_set, self.right_glyph_to_group, + self.right_conflict_groups)): + for g in gs: + for gl in self.source_groups[g]: + if gl in g2g: + cg[g] = gl + else: + g2g[gl] = g + self.valid_groups -= set(self.left_conflict_groups.keys()) + self.valid_groups -= set(self.right_conflict_groups.keys()) + self.valid_items = self.source_glyphs | self.valid_groups # pairs containing an invalid glyph or group self.invalid_pairs = [ - pair for pair in self.f.kerning.keys() if not - set(pair) <= set(self.valid_items)] - + pair for pair in self.source_kerning.keys() if not + set(pair) <= self.valid_items + ] + invalid_pair_set = set(self.invalid_pairs) self.kerning = { - pair: value for pair, value in self.f.kerning.items() if - pair not in self.invalid_pairs + pair: value for pair, value in self.source_kerning.items() if + pair not in invalid_pair_set } self.groups = { - gn: self.f.groups.get(gn) for gn in self.get_used_group_names()} - + gn: self.source_groups.get(gn) + for gn in self.get_used_group_names(self.source_groups) + } self.reference_groups = { - gn: g_list for gn, g_list in self.f.groups.items() if not - is_kerning_group(gn)} + gn: g_set for gn, g_set in self.source_groups.items() if not + is_kerning_group(gn) + } - def get_used_group_names(self): + def get_used_group_names(self, groups): ''' Return all groups which are actually used in kerning, by iterating through valid kerning pairs. ''' - groups = list(self.f.groups.keys()) + group_order = {gn: i for (i, gn) in enumerate(groups.keys())} used_groups = [] for pair in self.kerning.keys(): used_groups.extend([item for item in pair if is_group(item)]) - return sorted(set(used_groups), key=groups.index) + return sorted(set(used_groups), key=lambda item: group_order[item]) def report(self): ''' @@ -190,33 +667,42 @@ def report(self): for group in self.empty_groups: print(f'group {group} is empty') for group in self.invalid_groups: - glyph_list = self.f.groups[group] - extraneous_glyphs = sorted( - set(glyph_list) - set(self.f.keys()), key=glyph_list.index) + glyph_set = set(self.source_groups[group]) + extraneous_glyphs = sorted(glyph_set - self.source_glyphs, + key=lambda item: self.source_glyph_order.get(item, item)) print( f'group {group} contains extraneous glyph(s): ' f'[{", ".join(extraneous_glyphs)}]') + for cg, g2g, desc in ((self.left_conflict_groups, + self.left_glyph_to_group,'left'), + (self.right_conflict_groups, + self.right_glyph_to_group, 'right')): + for group, gl in cg: + print( + f'group {group} ignored because it contains glyph {gl} ' + f'double-mapped on {desc} side (other group is {g2g[gl]})') for pair in self.invalid_pairs: invalid_items = sorted( set(pair) - self.valid_items, key=pair.index) for item in invalid_items: if is_group(item): - item_type = 'group' + item_type = 'invalid group' else: - item_type = 'glyph' + item_type = 'non-existent glyph' print( - f'pair ({pair[0]} {pair[1]}) references non-existent ' + f'pair ({pair[0]} {pair[1]}) references ' f'{item_type} {item}' ) class KernProcessor(object): def __init__( - self, groups=None, kerning=None, reference_groups=None, + self, adapter, groups=None, kerning=None, reference_groups=None, + left_glyph_to_group={}, right_glyph_to_group={}, option_dissolve=False, ignore_suffix=None ): - + self.a = adapter # kerning dicts containing pair-value combinations self.glyph_glyph = {} self.glyph_glyph_exceptions = {} @@ -239,6 +725,12 @@ def __init__( self.groups = groups self.kerning = kerning self.reference_groups = reference_groups + self.left_glyph_to_group = { + gl: self._remap_name(g) for gl, g in left_glyph_to_group.items() + } + self.right_glyph_to_group = { + gl: self._remap_name(g) for gl, g in right_glyph_to_group.items() + } self.ignore_suffix = ignore_suffix @@ -251,8 +743,6 @@ def __init__( self.group_order = sorted(self.groups.keys()) self.kerning = self._remap_kerning(kerning) - self.grouped_left = self._get_grouped_glyphs(left=True) - self.grouped_right = self._get_grouped_glyphs(left=False) self.rtl_glyphs = self._get_rtl_glyphs(self.groups) self._find_exceptions() @@ -327,25 +817,6 @@ def _get_rtl_glyphs(self, groups): groups.get(rtl_group) for rtl_group in rtl_groups)) return rtl_glyphs - def _get_grouped_glyphs(self, left=False): - ''' - Return lists of glyphs used in groups on left or right side. - This is used to calculate the subtable size for a given list - of groups (groupFilterList) used within that subtable. - ''' - grouped = [] - - if left: - for left, right in self.kerning.keys(): - if is_group(left): - grouped.extend(self.groups.get(left)) - else: - for left, right in self.kerning.keys(): - if is_group(right): - grouped.extend(self.groups.get(right)) - - return sorted(set(grouped)) - def _dissolve_singleton_groups(self, groups, kerning): ''' Find any (non-RTL) group with a single-item glyph list. @@ -401,6 +872,46 @@ def _explode(self, glyph_list_a, glyph_list_b): return list(itertools.product(glyph_list_a, glyph_list_b)) + def _get_class_maps(self, pair): + left, right = pair + is_rtl = self._is_rtl(pair) + left_is_group = is_group(left) + right_is_group = is_group(right) + if left_is_group and right_is_group: + if is_rtl: + return (self.rtl_group_group, None) + else: + return (self.group_group, None) + elif left_is_group and not right_is_group: + if is_rtl: + return (self.rtl_group_group, self.rtl_group_glyph_exceptions) + else: + return (self.group_group, self.group_glyph_exceptions) + elif not left_is_group and right_is_group: + if is_rtl: + return (self.rtl_glyph_group, self.rtl_glyph_group_exceptions) + else: + return (self.glyph_group, self.glyph_group_exceptions) + else: + if is_rtl: + return (self.rtl_glyph_glyph, self.rtl_glyph_glyph_exceptions) + else: + return (self.glyph_glyph, self.glyph_glyph_exceptions) + + def _get_fallbacks(self, pair): + left, right = pair + left_group = None + if not is_group(left) and left in self.left_glyph_to_group: + left_group = self.left_glyph_to_group[left] + right_group = None + if not is_group(right) and right in self.right_glyph_to_group: + right_group = self.right_glyph_to_group[right] + if left_group is None and right_group is None: + return [] + candidate_pairs = [(left_group, right), (left, right_group), + (left_group, right_group)] + return [ c for c in candidate_pairs if c in self.kerning ] + def _find_exceptions(self): ''' Process kerning to find which pairs are exceptions, @@ -416,144 +927,20 @@ def _find_exceptions(self): del self.kerning[pair] continue - glyph_2_glyph = sorted( - [pair for pair in self.kerning.keys() if( - not is_group(pair[0]) and - not is_group(pair[1]))] - ) - glyph_2_group = sorted( - [pair for pair in self.kerning.keys() if( - not is_group(pair[0]) and - is_group(pair[1]))] - ) - group_2_item = sorted( - [pair for pair in self.kerning.keys() if( - is_group(pair[0]))] - ) - - # glyph to group pairs: - # --------------------- - for (glyph, group) in glyph_2_group: - pair = glyph, group - value = self.kerning[pair] - is_rtl_pair = self._is_rtl(pair) - if glyph in self.grouped_left: - # it is a glyph_to_group exception! - if is_rtl_pair: - self.rtl_glyph_group_exceptions[pair] = value - else: - self.glyph_group_exceptions[pair] = value + for pair, value in self.kerning.items(): + std_map, exp_map = self._get_class_maps(pair) + fallbacks = self._get_fallbacks(pair) + if len(fallbacks) > 0: + assert exp_map is not None + for f in fallbacks: + value = self.a.merge_values(self.kerning[f], value) + exp_map[pair] = value self.pairs_processed.append(pair) - else: - for grouped_glyph in self.groups[group]: - gr_pair = (glyph, grouped_glyph) - if gr_pair in glyph_2_glyph: - gr_value = self.kerning[gr_pair] - # that pair is a glyph_to_glyph exception! - if is_rtl_pair: - self.rtl_glyph_glyph_exceptions[gr_pair] = gr_value - else: - self.glyph_glyph_exceptions[gr_pair] = gr_value - - # skip the pair if the value is zero - if value == 0: + if self.a.value_is_zero(value): self.pairs_unprocessed.append(pair) - continue - - if is_rtl_pair: - self.rtl_glyph_group[pair] = value - else: - self.glyph_group[pair] = value - self.pairs_processed.append(pair) - - # group to group/glyph pairs: - # --------------------------- - exploded_pair_list = [] - exploded_pair_list_rtl = [] - - for (group_l, item_r) in group_2_item: - # the right item of the pair may be a group or a glyph - pair = (group_l, item_r) - value = self.kerning[pair] - is_rtl_pair = self._is_rtl(pair) - l_group_glyphs = self.groups[group_l] - - if is_group(item_r): - r_group_glyphs = self.groups[item_r] - else: - # not a group, therefore a glyph - if item_r in self.grouped_right: - # it is a group_to_glyph exception! - if is_rtl_pair: - self.rtl_group_glyph_exceptions[pair] = value - else: - self.group_glyph_exceptions[pair] = value - self.pairs_processed.append(pair) - continue # It is an exception, so move on to the next pair - else: - r_group_glyphs = [item_r] - - # skip the pair if the value is zero - if value == 0: - self.pairs_unprocessed.append(pair) - continue - - if is_rtl_pair: - self.rtl_group_group[pair] = value - exploded_pair_list_rtl.extend( - self._explode(l_group_glyphs, r_group_glyphs)) - else: - self.group_group[pair] = value - exploded_pair_list.extend( - self._explode(l_group_glyphs, r_group_glyphs)) - # list of all possible pair combinations for the - # @class @class kerning pairs of the font. - self.pairs_processed.append(pair) - - # Find the intersection of the exploded pairs with the glyph_2_glyph - # pairs collected above. Those must be exceptions, as they occur twice - # (once in class-kerning, once as a single pair). - self.exception_pairs = set(exploded_pair_list) & set(glyph_2_glyph) - self.exception_pairs_rtl = set(exploded_pair_list_rtl) & set(glyph_2_glyph) - - for pair in self.exception_pairs: - self.glyph_glyph_exceptions[pair] = self.kerning[pair] - - for pair in self.exception_pairs_rtl: - self.rtl_glyph_glyph_exceptions[pair] = self.kerning[pair] - - # finally, collect normal glyph to glyph pairs: - # --------------------------------------------- - # NB: RTL glyph-to-glyph pairs can only be identified if its - # glyphs are in the @RTL_KERNING group. - - for glyph_1, glyph_2 in glyph_2_glyph: - pair = glyph_1, glyph_2 - value = self.kerning[pair] - is_rtl_pair = self._is_rtl(pair) - if any( - [glyph_1 in self.grouped_left, glyph_2 in self.grouped_right] - ): - # it is an exception! - # exceptions expressed as glyph-to-glyph pairs -- these cannot - # be filtered and need to be added to the kern feature - # --------------------------------------------- - if is_rtl_pair: - self.rtl_glyph_glyph_exceptions[pair] = value - else: - self.glyph_glyph_exceptions[pair] = value - self.pairs_processed.append(pair) - else: - if ( - pair not in self.glyph_glyph_exceptions and - pair not in self.rtl_glyph_glyph_exceptions - ): - if self._is_rtl(pair): - self.rtl_glyph_glyph[pair] = self.kerning[pair] - else: - self.glyph_glyph[pair] = self.kerning[pair] + std_map[pair] = value self.pairs_processed.append(pair) @@ -658,12 +1045,15 @@ def _getNumberOfKernedGlyphs(self, kerning, groups): class run(object): - def __init__(self, font, args=None): + def __init__(self, adapter, args=None): if not args: args = Defaults() - self.f = font + if not (adapter and adapter.has_data()): + return + + self.a = adapter self.minKern = args.min_value self.write_subtables = args.write_subtables self.subtable_size = args.subtable_size @@ -672,31 +1062,26 @@ def __init__(self, font, args=None): self.ignore_suffix = args.ignore_suffix self.trimmedPairs = 0 - if self.f: - if not self.f.kerning: - print('ERROR: The font has no kerning!') - return - if set(self.f.kerning.values()) == {0}: - print('ERROR: All kerning values are zero!') - return - - ks = KerningSanitizer(self.f) - ks.report() - kp = KernProcessor( - ks.groups, ks.kerning, ks.reference_groups, - self.dissolve_single, self.ignore_suffix) - - fea_data = self._make_fea_data(kp) - self.header = self.make_header(args) - output_path = Path(self.f.path).parent / args.output_name - self.write_fea_data(fea_data, output_path) + ks = KerningSanitizer(self.a) + ks.report() + kp = KernProcessor( + self.a, ks.groups, ks.kerning, ks.reference_groups, + ks.left_glyph_to_group, ks.right_glyph_to_group, + self.dissolve_single, self.ignore_suffix) + + fea_data = self._make_fea_data(kp) + self.header = self.make_header(args) + output_path = self.a.path().parent / args.output_name + self.write_fea_data(fea_data, output_path) + if not args.no_locations and self.a.has_locations(): + locations_path = self.a.path().parent / args.locations_name + self.write_locations(self.a, locations_path, args.user_values) def make_header(self, args): try: - ps_name = self.f.info.postscriptFontName + ps_name = self.a.postscript_font_name() except Exception: ps_name = None - header = [] if args.write_timestamp: header.append(f'# Created: {time.ctime()}') @@ -714,17 +1099,14 @@ def _dict2pos(self, pair_value_dict, minimum=0, enum=False, rtl=False): trimmed = 0 for (item_1, item_2), value in pair_value_dict.items(): - if rtl: - value_str = '<{0} 0 {0} 0>'.format(value) - else: - value_str = str(value) + value_str = self.a.value_string(value, rtl) posLine = f'pos {item_1} {item_2} {value_str};' if enum: data.append('enum ' + posLine) else: - if abs(value) < minimum: + if self.a.below_minimum(value, minimum): if self.write_trimmed_pairs: data.append('# ' + posLine) trimmed += 1 @@ -904,28 +1286,53 @@ def write_fea_data(self, data, output_path): print(f'Output file written to {output_path}') + def write_locations(self, adapter, locations_path, userUnits = False): + + print(f'Saving {locations_path.name} file...') + + data = ['# Named locations', ''] + + unit = 'u' if userUnits else 'd' + for name, axisLocs in adapter.get_locations(userUnits).items(): + locationStr = ', '.join(('%s=%g%s' % (tag, val, unit) for + tag, val in axisLocs.items())) + data.append(f'locationDef {locationStr} @{name};') + + with open(locations_path, 'w') as blob: + blob.write('\n'.join(data)) + blob.write('\n') + + print(f'Output file written to {locations_path}') + def check_input_file(parser, file_name): - fn = Path(file_name) - if fn.suffix.lower() != '.ufo': - parser.error(f'{fn.name} is not a UFO file') - if not fn.exists(): - parser.error(f'{fn.name} does not exist') + file_path = Path(file_name) + if file_path.suffix.lower() == '.ufo': + if not file_path.exists(): + parser.error(f'{file_name} does not exist') + elif not file_path.is_dir(): + parser.error(f'{file_name} is not a directory') + elif file_path.suffix.lower() == '.designspace': + if not file_path.exists(): + parser.error(f'{file_name} does not exist') + elif not file_path.is_file(): + parser.error(f'{file_name} is not a file') + else: + parser.error(f'Unrecognized input file type') return file_name - def get_args(args=None): defaults = Defaults() parser = argparse.ArgumentParser( description=__doc__, - formatter_class=argparse.ArgumentDefaultsHelpFormatter + formatter_class=argparse.RawDescriptionHelpFormatter ) parser.add_argument( 'input_file', type=lambda f: check_input_file(parser, f), - help='input UFO file') + help='input UFO or designspace file') parser.add_argument( '-o', '--output_name', @@ -933,6 +1340,12 @@ def get_args(args=None): default=defaults.output_name, help='change the output file name') + parser.add_argument( + '-l', '--locations_name', + action='store', + default=defaults.locations_name, + help='change the locations file name (variable font only)') + parser.add_argument( '-m', '--min_value', action='store', @@ -967,6 +1380,19 @@ def get_args(args=None): default=defaults.write_timestamp, help='write time stamp in header of output file') + parser.add_argument( + '--no_locations', + action='store_true', + default=defaults.no_locations, + help='Do not write locations file (variable font only)') + + parser.add_argument( + '-u', '--user_values', + action='store_true', + default=defaults.user_values, + help='For variable fonts, output user axis locations ' + 'rather than design axis locations') + parser.add_argument( '--dissolve_single', action='store_true', @@ -990,8 +1416,14 @@ def get_args(args=None): def main(test_args=None): args = get_args(test_args) - f = defcon.Font(args.input_file) - run(f, args) + input_path = Path(args.input_file) + if input_path.is_file(): + dsDoc = DesignSpaceDocument.fromfile(input_path) + a = DesignspaceKernAdapter(dsDoc) + else: + a = UFOKernAdapter(defcon.Font(args.input_file)) + if a.has_data(): + run(a, args) if __name__ == '__main__': diff --git a/markFeatureWriter.py b/markFeatureWriter.py index 6f357a4..cb3eb71 100755 --- a/markFeatureWriter.py +++ b/markFeatureWriter.py @@ -45,9 +45,12 @@ #### Usage: ```zsh - # write a basic mark feature + # write a basic mark feature for a static font python markFeatureWriter.py font.ufo + # write a basic mark feature for a variable font + python markFeatureWriter.py font.designspace + # write mark and mkmk feature files python markFeatureWriter.py -m font.ufo @@ -62,12 +65,22 @@ import argparse import sys -from defcon import Font +from abc import abstractmethod from pathlib import Path +from collections import defaultdict, namedtuple +from graphlib import TopologicalSorter, CycleError +from math import inf + +from defcon import Font +from fontTools.designspaceLib import ( + DesignSpaceDocument, + DesignSpaceDocumentError, +) # ligature anchors end with 1ST, 2ND, 3RD, etc. ORDINALS = ['1ST', '2ND', '3RD'] + [f'{i}TH' for i in range(4, 10)] - +SHORTINSTNAMEKEY = 'com.adobe.shortInstanceName' +NONEPOS = (-inf, -inf) class Defaults(object): """ @@ -93,11 +106,19 @@ def __init__(self): def check_input_file(parser, file_name): - fn = Path(file_name) - if fn.suffix.lower() != '.ufo': - parser.error(f'{fn.name} is not a UFO file') - if not fn.exists(): - parser.error(f'{fn.name} does not exist') + file_path = Path(file_name) + if file_path.suffix.lower() == '.ufo': + if not file_path.exists(): + parser.error(f'{file_name} does not exist') + elif not file_path.is_dir(): + parser.error(f'{file_name} is not a directory') + elif file_path.suffix.lower() == '.designspace': + if not file_path.exists(): + parser.error(f'{file_name} does not exist') + elif not file_path.is_file(): + parser.error(f'{file_name} is not a file') + else: + parser.error(f'Unrecognized input file type') return file_name @@ -105,16 +126,14 @@ def get_args(args=None): defaults = Defaults() parser = argparse.ArgumentParser( - description=( - 'Mark Feature Writer' - ), - formatter_class=argparse.ArgumentDefaultsHelpFormatter, + description=__doc__, + formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( 'input_file', type=lambda f: check_input_file(parser, f), - help='input UFO file') + help='input UFO or designspace file') parser.add_argument( '-t', '--trim_tags', @@ -215,11 +234,6 @@ def process_anchor_name(anchor_name, trim=False): return anchor_name -def round_coordinate(coordinate): - rounded_coordinate = tuple(int(round(v)) for v in coordinate) - return rounded_coordinate - - class AnchorMate(object): ''' AnchorMate lifts anchors from one or more glyphs and @@ -230,6 +244,280 @@ def __init__(self, anchor): self.pos_name_dict = {} +AnchorInfo = namedtuple('AnchorInfo', 'name, position') + + +class GlyphAnchorInfo(object): + ''' + The GlyphAnchorInfo object is just an attribute-based data structure + for communicating anchor parameters, somewhat based on the defcon + structure. It uses three attributes: "name", which is the the name of + the glyph, "width", which is the advance width, and "anchors", which + is a list of AnchorInfo named tuples. + ''' + + def __init__(self, name, width, anchor_list): + self.name = name + self.width = width + self.anchors = anchor_list + + +class MarkAdapter(object): + ''' + Interface between underlying data source and MarkFeatureWriter + ''' + + @abstractmethod + def anchor_glyphs(self): + ''' + Returns a dict of GlyphAnchorInfo objects, one per named glyph + ''' + pass + + @abstractmethod + def glyph_order(self): + ''' + Returns a dictionary of all the glyphs in the source font where + the key is the name and the value is the order of the glyph in + the font. + ''' + pass + + @abstractmethod + def groups(self): + ''' + Returns a dict of all groups in the sources, with the name as a + key and a list of glyphs in the group as the value + ''' + pass + + @abstractmethod + def path(self): + ''' + Returns path to the top of the source as a Path() object + ''' + pass + + @abstractmethod + def unique_name(self, prefix, position): + ''' + Returns a name starting with prefix that is unique relative to + the position parameter. (Can assume it will be called once per + unique position, so it does not need to track names already + returned.) + ''' + pass + + @abstractmethod + def anchor_position_string(self, position): + ''' + Returns the position as a string that can be used in an anchor + directive in a feature file. + ''' + pass + +class UFOMarkAdapter(MarkAdapter): + ''' + Adapter for a single UFO + ''' + + def __init__(self, path): + self.f = Font(path) + if not self.f: + sys.exit(f'Problem opening UFO file {path}') + + def anchor_glyphs(self): + d = {} + for g in self.f: + anchor_list = [AnchorInfo(a.name, (round(a.x), round(a.y))) + for a in g.anchors] + d[g.name] = GlyphAnchorInfo(g.name, g.width, anchor_list) + return d + + def glyph_order(self): + return {gn: i for (i, gn) + in enumerate(self.f.lib['public.glyphOrder'])} + + def groups(self): + return self.f.groups + + def path(self): + return Path(self.f.path) + + def unique_name(self, prefix, position): + # represent negative numbers with “n”, because minus is + # reserved for ranges: + str_x = str(position[0]).replace('-', 'n') + str_y = str(position[1]).replace('-', 'n') + return f'{prefix}_{str_x}_{str_y}' + + def anchor_position_string(self, position): + return str(position[0]) + ' ' + str(position[1]) + + +class DesignspaceMarkAdapter(MarkAdapter): + ''' + Adapter for a UFO-based variable font with a designspace file + ''' + + def __init__(self, dsDoc): + try: + self.fonts = dsDoc.loadSourceFonts(Font) + except DesignSpaceDocumentError as err: + sys.exit(err) + + for i, f in enumerate(self.fonts): + f.sourceIndex = i + + defaultSource = dsDoc.findDefault() + if defaultSource is not None: + defaultIndex = dsDoc.sources.index(defaultSource) + default_font = self.fonts.pop(defaultIndex) + self.fonts.insert(0, default_font) + else: + sys.exit('Error: did not find source for default instance') + + # Add name map + self.shortNames = [None] + for f in self.fonts[1:]: + if SHORTINSTNAMEKEY in f.lib: + self.shortNames.append(f.lib[SHORTINSTNAMEKEY]) + else: + self.shortNames.append(self.make_short_name(dsDoc, + f.sourceIndex)) + + self.base_names = {} + self.dsDoc = dsDoc + + # Must match function in kernFeatureWriter, which writes locations.fea + def make_short_name(self, dsDoc, sourceIndex): + source = dsDoc.sources[sourceIndex] + location = source.location + anames = [] + for an in dsDoc.getAxisOrder(): + avstr = "%c%g" % (an[0], location[an]) + avstr = avstr.replace('.', 'p') + avstr = avstr.replace('-', 'n') + anames.append(avstr) + return '_'.join(anames) + + def anchor_glyphs(self): + d = {} + f = self.fonts[0] + for g in f: + position_map = {} + for a in g.anchors: + # Put the default instance position first so that the + # position sorting groups those together + position_map[a.name] = [(round(a.x), round(a.y))] + anchorNameSet = set(position_map.keys()) + ni = 0 + for i, source in enumerate(self.fonts): + if i == 0: + continue + # If the glyph is absent put NONEPOS as the position + if g.name not in source: + for plist in position_map.values(): + postions.append(NONEPOS) + continue + foundNameSet = set() + for sga in source[g.name].anchors: + if sga.name not in anchorNameSet: + sys.exit(f'Error: glyph {g.name} has anchor {a.name} ' + f'in source of instance {self.shortNames[i]} ' + 'but not in source of default instance') + else: + plist = position_map[sga.name] + plist.append((round(sga.x), round(sga.y))) + foundNameSet.add(sga.name) + missingNames = anchorNameSet - foundNameSet + if missingNames: + mnamestr = ', '.join(missingNames) + sys.exit(f'Error: glyph {g.name} has anchors {mnamestr} ' + 'in source of default instance but not ' + f'source of instance {self.shortNames[i]}') + anchor_list = [AnchorInfo(a.name, tuple(position_map[a.name])) + for a in g.anchors] + d[g.name] = GlyphAnchorInfo(g.name, g.width, anchor_list) + return d + + def glyph_order(self): + # Use the glyph ordering in the source for the default instance + # as that should (always?) have all the glyphs + f = self.fonts[0] + return {gn: i for (i, gn) + in enumerate(f.lib['public.glyphOrder'])} + + def groups(self): + if hasattr(self, '_groups'): + return self._groups + # Calculate partial orderings for groups across all fonts + group_orderings = defaultdict(lambda: defaultdict(set)) + for i, f in enumerate(self.fonts): + for g, gl in f.groups.items(): + ordering = group_orderings[g] + for j, gn in enumerate(gl): + ordering[gn] |= set(gl[j+1:]) + + # Use the partial orderings to calculate a total ordering, + # or failing that use the order in which the glyphs were + # encountered + self._groups = {} + for g, ordering in group_orderings.items(): + try: + ts = TopologicalSorter(ordering) + l = list(ts.static_order()) + except CycleError as err: + print(f'glyphs in group {g} have different orderings across ' + 'different sources, ordering cannot be preserved') + l = ordering.keys() + self._groups[g] = l + + return self._groups + + def path(self): + return Path(self.dsDoc.path) + + def unique_name(self, prefix, position): + # represent negative numbers with “n”, because minus is + # reserved for ranges: + str_x = str(position[0][0]).replace('-', 'n') + str_y = str(position[0][1]).replace('-', 'n') + # We choose names based on the position in the default instance + # but other position values could be different. A position is + # a tuple of two-tuples, one for each source, and are always the + # same length so they can be sorted and compared for identity. + # So all we need to do here is be careful not to hand out the + # same name for two different positions. Because unique_name + # will only be called with a prefix,position pair once, all we + # need to do is track how many we've handed out so far and add + # a unique suffix + base_name = f'{prefix}_{str_x}_{str_y}' + if base_name not in self.base_names: + self.base_names[base_name] = 0 + return base_name + else: + rev = self.base_names[base_name] + 1 + self.base_names[base_name] = rev + return base_name + '_' + str(rev) + + + def anchor_position_string(self, position): + assert len(position) == len(self.fonts) + def_pos = position[0] + def_str = str(def_pos[0]) + ' ' + str(def_pos[1]) + if all(p == NONEPOS or p == def_pos for p in position[1:]): + return def_str + + pos_strs = ['<' + def_str + '>'] + for i, p in enumerate(position): + if i == 0 or p == NONEPOS: + continue + pos_strs.append('@' + self.shortNames[i] + ':<' + + str(p[0]) + ' ' + str(p[1]) + '>') + return '(' + ' '.join(pos_strs) + ')' + + class MarkFeatureWriter(object): def __init__(self, args=None): @@ -248,33 +536,42 @@ def __init__(self, args=None): self.write_classes = args.write_classes if args.input_file: - ufo_path = Path(args.input_file) - self.run(ufo_path) + input_path = Path(args.input_file) + if input_path.is_file(): + dsDoc = DesignSpaceDocument.fromfile(input_path) + adapter = DesignspaceMarkAdapter(dsDoc) + else: + adapter = UFOMarkAdapter(Path(args.input_file)) + self.run(adapter) - def run(self, ufo_path): - f = Font(ufo_path) - ufo_dir = ufo_path.parent - self.glyph_order = f.lib['public.glyphOrder'] + def run(self, adapter): + self.adapter = adapter + self.glyphs = adapter.anchor_glyphs() + self.glyph_order = adapter.glyph_order() + self.groups = adapter.groups() + output_dir = adapter.path().parent - combining_marks_group = f.groups.get(self.mkgrp_name, []) - if not combining_marks_group: + if self.mkgrp_name not in self.groups: sys.exit( f'No group named "{self.mkgrp_name}" found. ' 'Please add it to your UFO file ' '(and combining marks to it).' ) - combining_marks = [f[g_name] for g_name in combining_marks_group] + combining_marks_group = self.groups[self.mkgrp_name] + + combining_marks = [self.glyphs[g_name] + for g_name in combining_marks_group] # find out which attachment anchors exist in combining marks - combining_anchor_names = set([ + combining_anchor_names = set(( process_anchor_name(a.name, self.trim_tags) for - g in combining_marks for a in g.anchors if is_attaching(a.name)]) + g in combining_marks for a in g.anchors if is_attaching(a.name))) mkmk_marks = [g for g in combining_marks if not all( [is_attaching(anchor.name) for anchor in g.anchors])] base_glyphs = [ - g for g in f if + g for g in self.glyphs.values() if g.anchors and g not in combining_marks and g.width != 0 and @@ -360,7 +657,7 @@ def run(self, ufo_path): consolidated_content = [] if self.write_classes: # write the classes into an external file if so requested - write_output(ufo_dir, self.mkclass_file, mark_class_content) + write_output(output_dir, self.mkclass_file, mark_class_content) else: # otherwise they go on top of the mark.fea file consolidated_content.extend(mark_class_content) @@ -370,15 +667,15 @@ def run(self, ufo_path): if self.write_mkmk: # write mkmk only if requested, in the adjacent mkmk.fea file - write_output(ufo_dir, self.mkmk_file, mkmk_feature_content) + write_output(output_dir, self.mkmk_file, mkmk_feature_content) if self.indic_format: # write abvm/blwm in adjacent files. - write_output(ufo_dir, self.abvm_file, abvm_feature_content) - write_output(ufo_dir, self.blwm_file, blwm_feature_content) + write_output(output_dir, self.abvm_file, abvm_feature_content) + write_output(output_dir, self.blwm_file, blwm_feature_content) # write the mark feature - write_output(ufo_dir, self.mark_file, consolidated_content) + write_output(output_dir, self.mark_file, consolidated_content) def make_liga_anchor_dict(self, glyph_list, attachment_list=None): ''' @@ -404,8 +701,7 @@ def make_liga_anchor_dict(self, glyph_list, attachment_list=None): trimmed_anchor_name, self.trim_tags) ap = anchor_dict.setdefault(anchor_name, {}) index_pos_dict = ap.setdefault(g.name, {}) - position = round_coordinate((anchor.x, anchor.y)) - index_pos_dict[anchor_index] = position + index_pos_dict[anchor_index] = anchor.position return anchor_dict def make_anchor_dict(self, glyph_list, attachment_list=None): @@ -425,9 +721,8 @@ def make_anchor_dict(self, glyph_list, attachment_list=None): for g in glyph_list: for anchor in g.anchors: anchor_name = process_anchor_name(anchor.name, self.trim_tags) - position = round_coordinate((anchor.x, anchor.y)) am = anchor_dict.setdefault(anchor_name, AnchorMate(anchor)) - am.pos_name_dict.setdefault(position, []).append(g.name) + am.pos_name_dict.setdefault(anchor.position, []).append(g.name) if attachment_list: # remove anchors that do not have an attachment equivalent @@ -442,7 +737,7 @@ def sort_gnames(self, glyph_list): ''' Sort list of glyph names based on the glyph order ''' - glyph_list.sort(key=lambda x: self.glyph_order.index(x)) + glyph_list.sort(key=lambda x: self.glyph_order[x]) return glyph_list def make_mark_class(self, anchor_name, a_mate): @@ -452,25 +747,22 @@ def make_mark_class(self, anchor_name, a_mate): single_attachments = [] for position, g_names in pos_gname: - pos_x, pos_y = position + position_string = self.adapter.anchor_position_string(position) if len(g_names) > 1: sorted_g_names = self.sort_gnames(g_names) - # represent negative numbers with “n”, because minus is - # reserved for ranges: - str_x = str(pos_x).replace('-', 'n') - str_y = str(pos_y).replace('-', 'n') - group_name = f'@mGC{anchor_name}_{str_x}_{str_y}' + group_name = self.adapter.unique_name(f'@mGC{anchor_name}', + position) group_glyphs = ' '.join(sorted_g_names) mgroup_definitions.append( f'{group_name} = [ {group_glyphs} ];') mgroup_attachments.append( - f'markClass {group_name} ' + f'markClass {group_name} ' f'@MC{anchor_name};') else: g_name = g_names[0] single_attachments.append( - f'markClass {g_name} ' + f'markClass {g_name} ' f'@MC{anchor_name};') return mgroup_definitions, mgroup_attachments, single_attachments @@ -545,7 +837,7 @@ def make_mark_lookup(self, anchor_name, a_mate): for position, g_list in a_mate.pos_name_dict.items(): pos_to_gname.append((position, self.sort_gnames(g_list))) - pos_to_gname.sort(key=lambda x: self.glyph_order.index(x[1][0])) + pos_to_gname.sort(key=lambda x: self.glyph_order[x[1][0]]) # data looks like this: # [((235, 506), ['tonos']), ((269, 506), ['dieresistonos'])] @@ -554,7 +846,7 @@ def make_mark_lookup(self, anchor_name, a_mate): single_attachments = [] for position, g_names in pos_to_gname: - pos_x, pos_y = position + position_string = self.adapter.anchor_position_string(position) if len(g_names) > 1: sorted_g_names = self.sort_gnames(g_names) # GNUFL introduces the colon as part of the glyph name, @@ -566,14 +858,14 @@ def make_mark_lookup(self, anchor_name, a_mate): mgroup_definitions.append( f'\t{group_name} = [ {group_glyphs} ];') mgroup_attachments.append( - f'\tpos base {group_name} ' + f'\tpos base {group_name} ' f'mark @MC_{anchor_name};') else: g_name = g_names[0] single_attachments.append( # pos base AE mark @MC_above; - f'\tpos base {g_name} ' + f'\tpos base {g_name} ' f'mark @MC_{anchor_name};') output = [open_lookup] @@ -597,14 +889,14 @@ def make_liga_lookup(self, anchor_name, gname_index_dict): for g_name in sorted_g_names: liga_attachment = f'\tpos ligature {g_name}' for a_index, position in sorted(gname_index_dict[g_name].items()): - pos_x, pos_y = position + position_string = self.adapter.anchor_position_string(position) if a_index == 0: liga_attachment += ( - f' ' + f' ' f'mark @MC_{anchor_name}') else: liga_attachment += ( - f' ligComponent ' + f' ligComponent ' f'mark @MC_{anchor_name}') liga_attachment += ';' liga_attachments.append(liga_attachment) @@ -624,16 +916,16 @@ def make_mkmk_lookup(self, anchor_name, a_mate): for position, g_list in a_mate.pos_name_dict.items(): pos_to_gname.append((position, self.sort_gnames(g_list))) - pos_to_gname.sort(key=lambda x: self.glyph_order.index(x[1][0])) + pos_to_gname.sort(key=lambda x: self.glyph_order[x[1][0]]) mkmk_attachments = [] for position, g_names in pos_to_gname: - pos_x, pos_y = position + position_string = self.adapter.anchor_position_string(position) sorted_g_names = self.sort_gnames(g_names) for g_name in sorted_g_names: mkmk_attachments.append( # pos mark acmb mark @MC_above; - f'\tpos mark {g_name} ' + f'\tpos mark {g_name} ' f'mark @MC_{anchor_name};') output = [open_lookup] diff --git a/tests/kern_mock_rtl.fea b/tests/kern_mock_rtl.fea index 246e9bd..3328eb1 100644 --- a/tests/kern_mock_rtl.fea +++ b/tests/kern_mock_rtl.fea @@ -25,6 +25,7 @@ lookupflag RightToLeft IgnoreMarks; pos V backslash <10 0 10 0>; pos backslash V <-100 0 -100 0>; pos backslash backslash <-82 0 -82 0>; +pos iotadieresistonos lambda <60 0 60 0>; pos lambda lambda <5 0 5 0>; # RTL glyph, glyph exceptions: @@ -32,7 +33,6 @@ pos Lcaron V <-57 0 -57 0>; pos V adieresis <-60 0 -60 0>; pos V atilde <-50 0 -50 0>; pos V idieresis <30 0 30 0>; -pos iotadieresistonos lambda <60 0 60 0>; pos tcaron backslash <40 0 40 0>; # RTL glyph, group: diff --git a/tests/kern_ss4_exceptions.fea b/tests/kern_ss4_exceptions.fea index 801f7e6..75e57f6 100644 --- a/tests/kern_ss4_exceptions.fea +++ b/tests/kern_ss4_exceptions.fea @@ -20,6 +20,7 @@ pos V backslash 10; pos backslash V -100; pos backslash backslash -82; +pos iotadieresistonos lambda 60; pos lambda lambda 5; # glyph, glyph exceptions: @@ -27,7 +28,6 @@ pos Lcaron V -57; pos V adieresis -60; pos V atilde -50; pos V idieresis 30; -pos iotadieresistonos lambda 60; pos tcaron backslash 40; # glyph, group: diff --git a/tests/test_kernFeatureWriter.py b/tests/test_kernFeatureWriter.py index 882ab21..853d7d7 100755 --- a/tests/test_kernFeatureWriter.py +++ b/tests/test_kernFeatureWriter.py @@ -14,6 +14,16 @@ TEMP_DIR = Path(get_temp_dir_path()) +def run_local(input, args): + if input is None: + a = None + elif isinstance(input, defcon.Font): + a = UFOKernAdapter(input) + else: + a = DesignspaceKernAdapter(input) + return run(a, args) + + class Dummy(object): ''' for ad-hoc arguments @@ -45,7 +55,7 @@ def test_get_args(): def test_make_header(): - kfw = run(None, None) + kfw = run_local(None, None) dummy_args = Dummy() dummy_args.min_value = 1 dummy_args.write_timestamp = False @@ -58,7 +68,9 @@ def test_make_header(): def test_dict2pos(): - kfw = run(None, None) + ufo_path = TEST_DIR / 'kern_example.ufo' + f = defcon.Font(ufo_path) + kfw = run_local(f, None) kfw.write_trimmed_pairs = False pv_dict = { @@ -90,7 +102,9 @@ def test_dict2pos(): def test_remap_name(): - kp = KernProcessor() + ufo_path = TEST_DIR / 'kern_example.ufo' + a = UFOKernAdapter(defcon.Font(ufo_path)) + kp = KernProcessor(a) assert kp._remap_name('public.kern1.example') == '@MMK_L_example' assert kp._remap_name('public.kern1.@MMK_L_example') == '@MMK_L_example' assert kp._remap_name('public.kern2.example') == '@MMK_R_example' @@ -113,7 +127,7 @@ def test_remap_groups(): gr.replace('public.kern1.', '@MMK_L_'): gl for gr, gl in groups_l.items()} expected_groups_r = { gr.replace('public.kern2.', '@MMK_R_'): gl for gr, gl in groups_r.items()} - kp = KernProcessor() + kp = KernProcessor(UFOKernAdapter(f)) assert kp._remap_groups(groups_l) == expected_groups_l assert kp._remap_groups(groups_r) == expected_groups_r @@ -137,7 +151,7 @@ def test_remap_kerning(): lambda mo: replacements[mo.group()], ' '.join(pair)).split() remapped_pairs.append(tuple(new_pair)) - kp = KernProcessor() + kp = KernProcessor(UFOKernAdapter(f)) assert list(kp._remap_kerning(f.kerning).keys()) == remapped_pairs @@ -147,7 +161,7 @@ def test_sanityCheck(capsys): ''' ufo_path = TEST_DIR / 'kern_example.ufo' f = defcon.Font(ufo_path) - kp = KernProcessor() + kp = KernProcessor(UFOKernAdapter(f)) kp.pairs_processed = ['some pair'] kp.kerning = f.kerning kp._sanityCheck() @@ -163,7 +177,7 @@ def test_no_kerning(capsys): f = defcon.Font(ufo_path) f.kerning.clear() args = Defaults() - run(f, args) + run_local(f, args) out, err = capsys.readouterr() assert f'has no kerning' in out @@ -172,7 +186,7 @@ def test_all_zero(capsys): ufo_path = TEST_DIR / 'kern_all_zero_value.ufo' f = defcon.Font(ufo_path) args = Defaults() - run(f, args) + run_local(f, args) out, err = capsys.readouterr() assert f'All kerning values are zero' in out @@ -188,7 +202,7 @@ def test_default(): args.input_file = ufo_path args.output_name = fea_temp f = defcon.Font(ufo_path) - run(f, args) + run_local(f, args) assert read_file(fea_temp) == read_file(fea_example) ''' @@ -196,7 +210,7 @@ def test_default(): for this UFO (no single-item groups) ''' args.dissolve_single = True - run(f, args) + run_local(f, args) assert read_file(fea_temp) == read_file(fea_example) @@ -211,7 +225,7 @@ def test_default_ufo2(): args.input_file = ufo_path args.output_name = fea_temp f = defcon.Font(ufo_path) - run(f, args) + run_local(f, args) assert read_file(fea_temp) == read_file(fea_example) @@ -252,7 +266,7 @@ def test_invalid_input_file(capsys): with pytest.raises(SystemExit): main([str(ufo_path)]) out, err = capsys.readouterr() - assert 'some_file.xxx is not a UFO file' in err + assert 'Unrecognized input file type' in err def test_default_rtl(): @@ -263,7 +277,7 @@ def test_default_rtl(): args.input_file = ufo_path args.output_name = fea_temp f = defcon.Font(ufo_path) - run(f, args) + run_local(f, args) assert read_file(fea_temp) == read_file(fea_example) @@ -280,7 +294,7 @@ def test_subtable(): args.subtable_size = 128 args.output_name = fea_temp f = defcon.Font(ufo_path) - run(f, args) + run_local(f, args) assert read_file(fea_temp) == read_file(fea_example) @@ -297,7 +311,7 @@ def test_subtable_rtl(): args.subtable_size = 128 args.output_name = fea_temp f = defcon.Font(ufo_path) - run(f, args) + run_local(f, args) assert read_file(fea_temp) == read_file(fea_example) @@ -314,12 +328,12 @@ def test_dissolve(): args.input_file = ufo_path args.output_name = fea_temp_singletons f = defcon.Font(ufo_path) - run(f, args) + run_local(f, args) assert read_file(fea_temp_singletons) == read_file(fea_example_singletons) args.dissolve_single = True args.output_name = fea_temp_dissolved - run(f, args) + run_local(f, args) assert read_file(fea_temp_dissolved) == read_file(fea_example_dissolved) @@ -335,7 +349,7 @@ def test_left_side_exception(): args.input_file = ufo_path args.output_name = fea_temp f = defcon.Font(ufo_path) - run(f, args) + run_local(f, args) assert read_file(fea_temp) == read_file(fea_example) @@ -347,7 +361,7 @@ def test_unused_groups(): args = Defaults() args.input_file = ufo_path args.output_name = fea_temp - run(f, args) + run_local(f, args) assert read_file(fea_example) == read_file(fea_temp) @@ -362,7 +376,7 @@ def test_ignored_groups(): args = Defaults() args.input_file = ufo_path args.output_name = fea_temp - run(f, args) + run_local(f, args) assert read_file(fea_example) == read_file(fea_temp) @@ -377,7 +391,7 @@ def test_no_groups(): args = Defaults() args.input_file = ufo_path args.output_name = fea_temp - run(f, args) + run_local(f, args) assert read_file(fea_example) == read_file(fea_temp) @@ -392,7 +406,7 @@ def test_ss4_exceptions(): args = Defaults() args.input_file = ufo_path args.output_name = fea_temp - run(f, args) + run_local(f, args) assert read_file(fea_example) == read_file(fea_temp) @@ -407,7 +421,7 @@ def test_mock_rtl(): args = Defaults() args.input_file = ufo_path args.output_name = fea_temp - run(f, args) + run_local(f, args) assert read_file(fea_example) == read_file(fea_temp) @@ -421,7 +435,7 @@ def test_example_trim(capsys): args.output_name = fea_temp args.min_value = 100 args.write_trimmed_pairs = True - run(f, args) + run_local(f, args) out, err = capsys.readouterr() assert 'Trimmed pairs: 33' in out @@ -440,17 +454,19 @@ def test_nightmare(capsys): args = Defaults() args.input_file = ufo_path args.output_name = fea_temp - run(f, args) + run_local(f, args) assert read_file(fea_example) == read_file(fea_temp) out, err = capsys.readouterr() + print(out) expected_output = ( 'group public.kern1.empty is empty\n' 'group public.kern2.empty is empty\n' 'group public.kern1.invalid contains extraneous glyph(s): [a]\n' 'group public.kern1.lowercase contains extraneous glyph(s): [x, y, z]\n' - 'pair (A public.kern2.invalid) references non-existent group public.kern2.invalid\n' + 'pair (A public.kern2.invalid) references invalid group public.kern2.invalid\n' 'pair (public.kern1.LAT_A a) references non-existent glyph a\n' + 'pair (public.kern1.empty a) references invalid group public.kern1.empty\n' 'pair (public.kern1.empty a) references non-existent glyph a\n' ) assert expected_output in out @@ -464,12 +480,12 @@ def test_ignore_suffix(): args = Defaults() args.input_file = ufo_path args.output_name = fea_temp - run(f, args) + run_local(f, args) assert read_file(fea_example) == read_file(fea_temp) fea_example = TEST_DIR / 'kern_suffix_ignored.fea' fea_temp = TEMP_DIR / fea_example.name args.ignore_suffix = '.cxt' args.output_name = fea_temp - run(f, args) + run_local(f, args) assert read_file(fea_example) == read_file(fea_temp) diff --git a/tests/test_markFeatureWriter.py b/tests/test_markFeatureWriter.py index 9a58a6d..817d9b1 100755 --- a/tests/test_markFeatureWriter.py +++ b/tests/test_markFeatureWriter.py @@ -266,4 +266,4 @@ def test_invalid_input_file(capsys): with pytest.raises(SystemExit): main([str(ufo_path)]) out, err = capsys.readouterr() - assert 'some_file.xxx is not a UFO file' in err + assert 'Unrecognized input file type' in err From 7e1a473cefa11c8e3bada24907fd03e731a3ed53 Mon Sep 17 00:00:00 2001 From: Skef Iterum Date: Sun, 30 Mar 2025 15:44:17 -0700 Subject: [PATCH 2/6] Classify exceptions by groupings regardless of whether a fallback is present --- kernFeatureWriter.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/kernFeatureWriter.py b/kernFeatureWriter.py index e35c89f..c117667 100755 --- a/kernFeatureWriter.py +++ b/kernFeatureWriter.py @@ -907,10 +907,10 @@ def _get_fallbacks(self, pair): if not is_group(right) and right in self.right_glyph_to_group: right_group = self.right_glyph_to_group[right] if left_group is None and right_group is None: - return [] + return ([], False) candidate_pairs = [(left_group, right), (left, right_group), (left_group, right_group)] - return [ c for c in candidate_pairs if c in self.kerning ] + return ([ c for c in candidate_pairs if c in self.kerning ], True) def _find_exceptions(self): ''' @@ -929,7 +929,7 @@ def _find_exceptions(self): for pair, value in self.kerning.items(): std_map, exp_map = self._get_class_maps(pair) - fallbacks = self._get_fallbacks(pair) + fallbacks, is_except = self._get_fallbacks(pair) if len(fallbacks) > 0: assert exp_map is not None for f in fallbacks: @@ -940,7 +940,10 @@ def _find_exceptions(self): if self.a.value_is_zero(value): self.pairs_unprocessed.append(pair) else: - std_map[pair] = value + if is_except: + exp_map[pair] = value + else: + std_map[pair] = value self.pairs_processed.append(pair) From 69b62bf6d73b863a7a73e4ce203e24975fcac7a4 Mon Sep 17 00:00:00 2001 From: Skef Iterum Date: Mon, 31 Mar 2025 16:22:15 -0700 Subject: [PATCH 3/6] Adjust tests to last commit --- tests/kern_AV_dissolved.fea | 2 +- tests/kern_mock_rtl.fea | 2 +- tests/kern_ss4_exceptions.fea | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/kern_AV_dissolved.fea b/tests/kern_AV_dissolved.fea index b17c099..1ea0c68 100644 --- a/tests/kern_AV_dissolved.fea +++ b/tests/kern_AV_dissolved.fea @@ -2,7 +2,7 @@ # MinKern: +/- 3 inclusive -# glyph, glyph: +# glyph, glyph exceptions: pos A A 10; pos A V -119; pos V A -120; diff --git a/tests/kern_mock_rtl.fea b/tests/kern_mock_rtl.fea index 3328eb1..246e9bd 100644 --- a/tests/kern_mock_rtl.fea +++ b/tests/kern_mock_rtl.fea @@ -25,7 +25,6 @@ lookupflag RightToLeft IgnoreMarks; pos V backslash <10 0 10 0>; pos backslash V <-100 0 -100 0>; pos backslash backslash <-82 0 -82 0>; -pos iotadieresistonos lambda <60 0 60 0>; pos lambda lambda <5 0 5 0>; # RTL glyph, glyph exceptions: @@ -33,6 +32,7 @@ pos Lcaron V <-57 0 -57 0>; pos V adieresis <-60 0 -60 0>; pos V atilde <-50 0 -50 0>; pos V idieresis <30 0 30 0>; +pos iotadieresistonos lambda <60 0 60 0>; pos tcaron backslash <40 0 40 0>; # RTL glyph, group: diff --git a/tests/kern_ss4_exceptions.fea b/tests/kern_ss4_exceptions.fea index 75e57f6..801f7e6 100644 --- a/tests/kern_ss4_exceptions.fea +++ b/tests/kern_ss4_exceptions.fea @@ -20,7 +20,6 @@ pos V backslash 10; pos backslash V -100; pos backslash backslash -82; -pos iotadieresistonos lambda 60; pos lambda lambda 5; # glyph, glyph exceptions: @@ -28,6 +27,7 @@ pos Lcaron V -57; pos V adieresis -60; pos V atilde -50; pos V idieresis 30; +pos iotadieresistonos lambda 60; pos tcaron backslash 40; # glyph, group: From e6167b1f3aea25abd0016c749261cd631770d2ca Mon Sep 17 00:00:00 2001 From: Skef Iterum Date: Thu, 5 Jun 2025 01:42:59 -0700 Subject: [PATCH 4/6] Add ".type" to shortInstanceName key --- kernFeatureWriter.py | 2 +- markFeatureWriter.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/kernFeatureWriter.py b/kernFeatureWriter.py index c117667..b19b69e 100755 --- a/kernFeatureWriter.py +++ b/kernFeatureWriter.py @@ -63,7 +63,7 @@ # constants RTL_GROUP = 'RTL_KERNING' RTL_TAGS = ['_ARA', '_HEB', '_RTL'] -SHORTINSTNAMEKEY = 'com.adobe.shortInstanceName' +SHORTINSTNAMEKEY = 'com.adobe.type.shortInstanceName' GROUPSPLITSUFFIX = '_split' diff --git a/markFeatureWriter.py b/markFeatureWriter.py index cb3eb71..4343485 100755 --- a/markFeatureWriter.py +++ b/markFeatureWriter.py @@ -79,7 +79,7 @@ # ligature anchors end with 1ST, 2ND, 3RD, etc. ORDINALS = ['1ST', '2ND', '3RD'] + [f'{i}TH' for i in range(4, 10)] -SHORTINSTNAMEKEY = 'com.adobe.shortInstanceName' +SHORTINSTNAMEKEY = 'com.adobe.type.shortInstanceName' NONEPOS = (-inf, -inf) class Defaults(object): From 9b591bc4ab88f1e1dd6028a39973eab6e26b1c90 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Frank=20Grie=C3=9Fhammer?= Date: Wed, 24 Sep 2025 18:36:44 +0200 Subject: [PATCH 5/6] add pyproject.toml --- pyproject.toml | 97 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 pyproject.toml diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..7e2889f --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,97 @@ +[build-system] +requires = ["setuptools>=45", "wheel", "setuptools_scm[toml]>=6.2"] +build-backend = "setuptools.build_meta" + +[project] +name = "afdko-python-modules" +version = "2.0.0" +description = "Python modules for writing kern feature, mark feature, and GlyphOrderAndAliasDB files" +readme = "README.md" +license = {text = "MIT License"} +authors = [ + {name = "Miguel Sousa", email = "afdko@adobe.com"}, + {name = "Skef Iterum", email = "afdko@adobe.com"}, + {name = "Frank Grießhammer", email = "afdko@adobe.com"}, +] +maintainers = [ + {name = "Frank Grießhammer", email = "afdko@adobe.com"} +] +keywords = ["fonts", "typography", "afdko", "ufo", "opentype", "kerning", "mark", "goadb"] +classifiers = [ + "Development Status :: 4 - Beta", + "Environment :: Console", + "Environment :: Other Environment", + "Intended Audience :: Developers", + "Intended Audience :: End Users/Desktop", + "License :: OSI Approved :: MIT License", + "Natural Language :: English", + "Operating System :: OS Independent", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Topic :: Text Processing :: Fonts", +] +requires-python = ">=3.6" +dependencies = [ + "afdko", +] + +[project.urls] +Homepage = "https://github.com/adobe-type-tools/python-modules" +Repository = "https://github.com/adobe-type-tools/python-modules" +Issues = "https://github.com/adobe-type-tools/python-modules/issues" + +[project.scripts] +kernFeatureWriter = "kernFeatureWriter:main" +markFeatureWriter = "markFeatureWriter:main" +goadbWriter = "goadbWriter:main" + +[tool.setuptools] +py-modules = [ + "kernFeatureWriter", + "markFeatureWriter", + "goadbWriter", +] + +[tool.setuptools_scm] +write_to = "_version.py" + +[tool.pytest.ini_options] +testpaths = ["tests"] +python_files = ["test_*.py", "*_test.py"] +python_classes = ["Test*"] +python_functions = ["test_*"] +addopts = [ + "--strict-markers", + "--strict-config", + "--verbose", +] + +[tool.coverage.run] +source = ["kernFeatureWriter", "markFeatureWriter", "goadbWriter"] +omit = [ + "*/tests/*", + "*/test_*", + "*/__pycache__/*", + "*/.*", +] + +[tool.coverage.report] +exclude_lines = [ + "pragma: no cover", + "def __repr__", + "if self.debug:", + "if settings.DEBUG", + "raise AssertionError", + "raise NotImplementedError", + "if 0:", + "if __name__ == .__main__.:", + "class .*\\bProtocol\\):", + "@(abc\\.)?abstractmethod", +] From e86e994f758b728e4269cdb8a7ec12759e437d29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Frank=20Grie=C3=9Fhammer?= Date: Wed, 24 Sep 2025 18:36:53 +0200 Subject: [PATCH 6/6] update documentation --- README.md | 87 +++++++++++++++++++++++++++++++++---------------------- 1 file changed, 52 insertions(+), 35 deletions(-) diff --git a/README.md b/README.md index b41afd5..6a23fa6 100755 --- a/README.md +++ b/README.md @@ -9,53 +9,70 @@ AFDKO Python Modules pip3 install git+https://github.com/adobe-type-tools/python-modules ``` +## `goadbWriter` +The goadbWriter extracts a UFO’s glyph order into a GlyphOrderAndAliasDB file. In the `makeotf`-workflow, this file is essential for assignment of glyph order and code points. The GOADB can also be used to filter non-exporting glyphs. [Read more about the GOADB](https://adobe-type-tools.github.io/afdko/MakeOTFUserGuide.html#glyphorderandaliasdb-goadb) -## `kernFeatureWriter.py` -This tool exports the kerning and groups data within a UFO to a +#### Usage: +```zsh + + # simple, really + goadbWriter font.ufo + +``` + + + +## `kernFeatureWriter` +The kernFeatureWriter exports the kerning and groups data within a UFO to a `makeotf`-compatible GPOS kern feature file. #### Default functionality: -- writing of a sorted kern.fea file, which organizes pairs in order of - specificity (exceptions first, then glyph-to-glyph, then group pairs) -- filtering of small pairs (often results of interpolation). - Exceptions (even though they may be small) are not filtered. -- processing of right-to-left pairs (given that kerning groups containing - those glyphs are suffixed with `_ARA`, `_HEB`, or `_RTL`) +- write a sorted kern.fea file. Pairs are organized in order of specificity: + - glyph-glyph + - glyph-glyph exceptions + - glyph-group exceptions + - group-glyph exceptions + - glyph-group + - glyph-group + - group-glyph and group-group + +- filter low-value pairs (<3 or custom value), which are often results of interpolation (exceptions are not filtered) +- process right-to-left pairs (given that kerning groups containing + those glyphs are suffixed with `_ARA`, `_HEB`, or `_RTL`, or RTL glyphs are in a `RTL_KERNING` group) #### Optional functionality: -- dissolving single-element groups into glyph pairs – this helps with +- dissolve single-element groups into glyph pairs – this helps with subtable optimization, and can be seen as a means to avoid kerning overflow - subtable measuring and automatic insertion of subtable breaks -- specifying a maximum subtable size -- identification of glyph-to-glyph RTL pairs by way of a global `RTL_KERNING` +- specify a maximum subtable size +- identify of glyph-to-glyph RTL pairs by way of a global `RTL_KERNING` reference group -- specifying a glyph name suffix for glyphs to be ignored when writing the +- specify a glyph name suffix for glyphs to be ignored when writing the kern feature #### Usage: ```zsh # write a basic kern feature file - python kernFeatureWriter.py font.ufo + kernFeatureWriter font.ufo # write a kern feature file with minimum absolute kerning value of 5 - python kernFeatureWriter.py -min 5 font.ufo + kernFeatureWriter -min 5 font.ufo # write a kern feature with subtable breaks - python kernFeatureWriter.py -s font.ufo + kernFeatureWriter -s font.ufo # further usage information - python kernFeatureWriter.py -h + kernFeatureWriter -h ``` ---- -## `markFeatureWriter.py` -This tool interprets glyphs and anchor points within a UFO to write a -`makeotf`-compatible GPOS mark feature file. +## `markFeatureWriter` +The markFeatureWriter interprets glyphs and anchor points within a UFO to write a `makeotf`-compatible GPOS mark feature file. The input UFO file needs to have base glyphs and zero-width combining marks. Base- and mark glyphs attach via anchor pairs (e.g. `above` and @@ -64,9 +81,9 @@ Combining marks must be members of a `COMBINING_MARKS` reference group. #### Default functionality: -- writing a `mark.fea` file, which contains mark classes/groups, and +- write a `mark.fea` file, which contains mark classes/groups, and per-anchor mark-to-base positioning lookups (GPOS lookup type 4) -- writing mark-to-ligature positioning lookups (GPOS lookup type 5). +- write mark-to-ligature positioning lookups (GPOS lookup type 5). This requires anchor names to be suffixed with an ordinal (`1ST`, `2ND`, `3RD`, etc). For example – if a mark with an `_above` anchor is to be attached to a ligature, the ligature’s anchor names would be `above1ST`, @@ -74,12 +91,12 @@ Combining marks must be members of a `COMBINING_MARKS` reference group. #### Optional functionality: -- writing `mkmk.fea`, for mark-to-mark positioning (GPOS lookup type 6) -- writing `abvm.fea`/`blwm.fea` files, as used in Indic scripts (anchor pairs +- write `mkmk.fea`, for mark-to-mark positioning (GPOS lookup type 6) +- write `abvm.fea`/`blwm.fea` files, as used in Indic scripts (anchor pairs are `abvm`, `_abvm`, and `blwm`, `_blwm`, respectively) -- writing mark classes into a separate file (in case classes need to be +- write mark classes into a separate file (in case classes need to be shared across multiple lookup types) -- trimming casing tags (`UC`, `LC`, or `SC`) +- trim casing tags (`UC`, `LC`, or `SC`) Trimming tags is a somewhat specific feature, but it is quite essential: In a UFO, anchors can be used to build composite glyphs – for example @@ -99,16 +116,16 @@ Combining marks must be members of a `COMBINING_MARKS` reference group. ```zsh # write a basic mark feature - python markFeatureWriter.py font.ufo + markFeatureWriter font.ufo # write mark and mkmk feature files - python markFeatureWriter.py -m font.ufo + markFeatureWriter -m font.ufo # trim casing tags - python markFeatureWriter.py -t font.ufo + markFeatureWriter -t font.ufo # further usage information - python markFeatureWriter.py -h + markFeatureWriter -h ``` @@ -123,14 +140,14 @@ feature kern{ } kern; ``` -The benefit of this is that different feature flags can be used ([example](https://github.com/adobe-fonts/source-serif/blob/main/familyGPOS.fea#L12-L13)), or that mark groups can be shared across `mark`/`mkmk` features. Also, the (sometimes volatile) GPOS feature data can be re-generated periodically without affecting the overall structure of the feature tree. +The benefit of this approach is that different feature flags can be used ([example](https://github.com/adobe-fonts/source-serif/blob/main/familyGPOS.fea#L12-L13)), or that mark groups can be shared across `mark`/`mkmk` features. Also, the (sometimes volatile) GPOS feature data can be re-generated periodically without affecting the overall structure of the feature tree. ---- ## [utilities (folder `/utilities`)](/utilities) -* `flKernExport.py` +* `flKernExport` FLS5 script to export class kerning to UFO. Superseded by [vfb3ufo](https://github.com/LucasFonts/vfbLib). @@ -138,16 +155,16 @@ FLS5 script to export class kerning to UFO. Superseded by [vfb3ufo](https://gith Other modules are FontLab scripts which were used in pre-UFO days in a FLS5 environment. Those modules are not in active development. -* `AdobeFontLabUtils.py` +* `AdobeFontLabUtils` Support module for FontLab scripts. Defines commonly used functions and globals. -* `BezChar.py` +* `BezChar` This module converts between a FontLab glyph and a bez file data string. Used by the OutlineCheck and AutoHint scripts, to convert FL glyphs to bez programs as needed by C libraries that do the hard work. -* `WriteFeaturesKernFDK.py` +* `WriteFeaturesKernFDK` Former kern feature writer. -* `WriteFeaturesMarkFDK.py` +* `WriteFeaturesMarkFDK` Former mark feature writer.