* src/tools/*.py: Migrate to Python 3.

Fixes #1185, closes !205. Formatting changes according to PEP8.
This commit is contained in:
Azamat Hackimov 2022-09-28 22:35:49 -04:00 committed by Alexei Podtelezhnikov
parent df2601395f
commit 3f3427c6f3
3 changed files with 708 additions and 711 deletions

View File

@ -1,11 +1,10 @@
#!/usr/bin/env python
#!/usr/bin/env python3
#
# Check trace components in FreeType 2 source.
# Author: suzuki toshiya, 2009, 2013, 2020
#
# This code is explicitly into the public domain.
import sys
import os
import re
@ -14,101 +13,107 @@ SRC_FILE_LIST = []
USED_COMPONENT = {}
KNOWN_COMPONENT = {}
SRC_FILE_DIRS = [ "src" ]
TRACE_DEF_FILES = [ "include/freetype/internal/fttrace.h" ]
SRC_FILE_DIRS = ["src"]
TRACE_DEF_FILES = ["include/freetype/internal/fttrace.h"]
def usage():
print("Usage: %s [option]" % sys.argv[0])
print("Search used-but-defined and defined-but-not-used trace_XXX macros")
print("")
print(" --help:")
print(" Show this help")
print("")
print(" --src-dirs=dir1:dir2:...")
print(" Specify the directories of C source files to be checked")
print(" Default is %s" % ":".join(SRC_FILE_DIRS))
print("")
print(" --def-files=file1:file2:...")
print(" Specify the header files including FT_TRACE_DEF()")
print(" Default is %s" % ":".join(TRACE_DEF_FILES))
print("")
# --------------------------------------------------------------
# Parse command line options
#
for i in range( 1, len( sys.argv ) ):
if sys.argv[i].startswith( "--help" ):
print "Usage: %s [option]" % sys.argv[0]
print "Search used-but-defined and defined-but-not-used trace_XXX macros"
print ""
print " --help:"
print " Show this help"
print ""
print " --src-dirs=dir1:dir2:..."
print " Specify the directories of C source files to be checked"
print " Default is %s" % ":".join( SRC_FILE_DIRS )
print ""
print " --def-files=file1:file2:..."
print " Specify the header files including FT_TRACE_DEF()"
print " Default is %s" % ":".join( TRACE_DEF_FILES )
print ""
for i in range(1, len(sys.argv)):
if sys.argv[i].startswith("--help"):
usage()
exit(0)
if sys.argv[i].startswith( "--src-dirs=" ):
SRC_FILE_DIRS = sys.argv[i].replace( "--src-dirs=", "", 1 ).split( ":" )
elif sys.argv[i].startswith( "--def-files=" ):
TRACE_DEF_FILES = sys.argv[i].replace( "--def-files=", "", 1 ).split( ":" )
if sys.argv[i].startswith("--src-dirs="):
SRC_FILE_DIRS = sys.argv[i].replace("--src-dirs=", "", 1).split(":")
elif sys.argv[i].startswith("--def-files="):
TRACE_DEF_FILES = sys.argv[i].replace("--def-files=", "", 1).split(":")
# --------------------------------------------------------------
# Scan C source and header files using trace macros.
#
c_pathname_pat = re.compile( '^.*\.[ch]$', re.IGNORECASE )
trace_use_pat = re.compile( '^[ \t]*#define[ \t]+FT_COMPONENT[ \t]+' )
c_pathname_pat = re.compile('^.*\.[ch]$', re.IGNORECASE)
trace_use_pat = re.compile('^[ \t]*#define[ \t]+FT_COMPONENT[ \t]+')
for d in SRC_FILE_DIRS:
for ( p, dlst, flst ) in os.walk( d ):
for (p, dlst, flst) in os.walk(d):
for f in flst:
if c_pathname_pat.match( f ) != None:
src_pathname = os.path.join( p, f )
if c_pathname_pat.match(f) is not None:
src_pathname = os.path.join(p, f)
line_num = 0
for src_line in open( src_pathname, 'r' ):
for src_line in open(src_pathname, 'r'):
line_num = line_num + 1
src_line = src_line.strip()
if trace_use_pat.match( src_line ) != None:
component_name = trace_use_pat.sub( '', src_line )
if trace_use_pat.match(src_line) is not None:
component_name = trace_use_pat.sub('', src_line)
if component_name in USED_COMPONENT:
USED_COMPONENT[component_name].append( "%s:%d" % ( src_pathname, line_num ) )
USED_COMPONENT[component_name]\
.append("%s:%d" % (src_pathname, line_num))
else:
USED_COMPONENT[component_name] = [ "%s:%d" % ( src_pathname, line_num ) ]
USED_COMPONENT[component_name] =\
["%s:%d" % (src_pathname, line_num)]
# --------------------------------------------------------------
# Scan header file(s) defining trace macros.
#
trace_def_pat_opn = re.compile( '^.*FT_TRACE_DEF[ \t]*\([ \t]*' )
trace_def_pat_cls = re.compile( '[ \t\)].*$' )
trace_def_pat_opn = re.compile('^.*FT_TRACE_DEF[ \t]*\([ \t]*')
trace_def_pat_cls = re.compile('[ \t\)].*$')
for f in TRACE_DEF_FILES:
line_num = 0
for hdr_line in open( f, 'r' ):
for hdr_line in open(f, 'r'):
line_num = line_num + 1
hdr_line = hdr_line.strip()
if trace_def_pat_opn.match( hdr_line ) != None:
component_name = trace_def_pat_opn.sub( '', hdr_line )
component_name = trace_def_pat_cls.sub( '', component_name )
if trace_def_pat_opn.match(hdr_line) is not None:
component_name = trace_def_pat_opn.sub('', hdr_line)
component_name = trace_def_pat_cls.sub('', component_name)
if component_name in KNOWN_COMPONENT:
print "trace component %s is defined twice, see %s and fttrace.h:%d" % \
( component_name, KNOWN_COMPONENT[component_name], line_num )
print("trace component %s is defined twice,"
" see %s and fttrace.h:%d" %
(component_name, KNOWN_COMPONENT[component_name],
line_num))
else:
KNOWN_COMPONENT[component_name] = "%s:%d" % \
( os.path.basename( f ), line_num )
KNOWN_COMPONENT[component_name] =\
"%s:%d" % (os.path.basename(f), line_num)
# --------------------------------------------------------------
# Compare the used and defined trace macros.
#
print "# Trace component used in the implementations but not defined in fttrace.h."
cmpnt = USED_COMPONENT.keys()
print("# Trace component used in the implementations but not defined in "
"fttrace.h.")
cmpnt = list(USED_COMPONENT.keys())
cmpnt.sort()
for c in cmpnt:
if c not in KNOWN_COMPONENT:
print "Trace component %s (used in %s) is not defined." % ( c, ", ".join( USED_COMPONENT[c] ) )
print("Trace component %s (used in %s) is not defined." %
(c, ", ".join(USED_COMPONENT[c])))
print "# Trace component is defined but not used in the implementations."
cmpnt = KNOWN_COMPONENT.keys()
print("# Trace component is defined but not used in the implementations.")
cmpnt = list(KNOWN_COMPONENT.keys())
cmpnt.sort()
for c in cmpnt:
if c not in USED_COMPONENT:
if c != "any":
print "Trace component %s (defined in %s) is not used." % ( c, KNOWN_COMPONENT[c] )
print("Trace component %s (defined in %s) is not used." %
(c, KNOWN_COMPONENT[c]))

View File

@ -1,33 +1,32 @@
#!/usr/bin/env python3
# compute arctangent table for CORDIC computations in fttrigon.c
import sys, math
import math
#units = 64*65536.0 # don't change !!
units = 180 * 2**16
scale = units/math.pi
# units = 64*65536.0 # don't change !!
units = 180 * 2 ** 16
scale = units / math.pi
shrink = 1.0
comma = ""
angles2 = []
print ""
print "table of arctan( 1/2^n ) for PI = " + repr(units/65536.0) + " units"
print("")
print("table of arctan( 1/2^n ) for PI = " + repr(units / 65536.0) + " units")
for n in range(1,32):
for n in range(1, 32):
x = 0.5**n # tangent value
x = 0.5 ** n # tangent value
angle = math.atan(x) # arctangent
angle2 = round(angle*scale) # arctangent in FT_Angle units
angle2 = round(angle * scale) # arctangent in FT_Angle units
if angle2 <= 0:
break
sys.stdout.write( comma + repr( int(angle2) ) )
comma = ", "
shrink /= math.sqrt( 1 + x*x )
print
print "shrink factor = " + repr( shrink )
print "shrink factor 2 = " + repr( int( shrink * (2**32) ) )
print "expansion factor = " + repr( 1/shrink )
print ""
angles2.append(repr(int(angle2)))
shrink /= math.sqrt(1 + x * x)
print(", ".join(angles2))
print("shrink factor = " + repr(shrink))
print("shrink factor 2 = " + repr(int(shrink * (2 ** 32))))
print("expansion factor = " + repr(1 / shrink))
print("")

View File

@ -1,11 +1,8 @@
#!/usr/bin/env python
#
#!/usr/bin/env python3
#
# FreeType 2 glyph name builder
#
# Copyright (C) 1996-2022 by
# David Turner, Robert Wilhelm, and Werner Lemberg.
#
@ -16,8 +13,7 @@
# fully.
"""\
"""
usage: %s <output-file>
This python script generates the glyph names tables defined in the
@ -26,9 +22,9 @@ usage: %s <output-file>
Its single argument is the name of the header file to be created.
"""
import sys, string, struct, re, os.path
import os.path
import struct
import sys
# This table lists the glyphs according to the Macintosh specification.
# It is used by the TrueType Postscript names table.
@ -39,8 +35,7 @@ import sys, string, struct, re, os.path
#
# for the official list.
#
mac_standard_names = \
[
mac_standard_names = [
# 0
".notdef", ".null", "nonmarkingreturn", "space", "exclam",
"quotedbl", "numbersign", "dollar", "percent", "ampersand",
@ -147,14 +142,12 @@ mac_standard_names = \
"Ccaron", "ccaron", "dcroat"
]
# The list of standard `SID' glyph names. For the official list,
# see Annex A of document at
#
# https://www.adobe.com/content/dam/acom/en/devnet/font/pdfs/5176.CFF.pdf .
# https://www.adobe.com/content/dam/acom/en/devnet/font/pdfs/5176.CFF.pdf
#
sid_standard_names = \
[
sid_standard_names = [
# 0
".notdef", "space", "exclam", "quotedbl", "numbersign",
"dollar", "percent", "ampersand", "quoteright", "parenleft",
@ -333,12 +326,10 @@ sid_standard_names = \
"Semibold"
]
# This table maps character codes of the Adobe Standard Type 1
# encoding to glyph indices in the sid_standard_names table.
#
t1_standard_encoding = \
[
t1_standard_encoding = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@ -372,12 +363,10 @@ t1_standard_encoding = \
148, 149, 0, 0, 0, 0
]
# This table maps character codes of the Adobe Expert Type 1
# encoding to glyph indices in the sid_standard_names table.
#
t1_expert_encoding = \
[
t1_expert_encoding = [
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@ -411,7 +400,6 @@ t1_expert_encoding = \
373, 374, 375, 376, 377, 378
]
# This data has been taken literally from the files `glyphlist.txt'
# and `zapfdingbats.txt' version 2.0, Sept 2002. It is available from
#
@ -4906,7 +4894,7 @@ a9;2720
# string table management
#
class StringTable:
def __init__( self, name_list, master_table_name ):
def __init__(self, name_list, master_table_name):
self.names = name_list
self.master_table = master_table_name
self.indices = {}
@ -4914,54 +4902,54 @@ class StringTable:
for name in name_list:
self.indices[name] = index
index += len( name ) + 1
index += len(name) + 1
self.total = index
def dump( self, file ):
def dump(self, file):
write = file.write
write( "#ifndef DEFINE_PS_TABLES_DATA\n" )
write( "#ifdef __cplusplus\n" )
write( ' extern "C"\n' )
write( "#else\n" )
write( " extern\n" )
write( "#endif\n" )
write( "#endif\n" )
write( " const char " + self.master_table +
"[" + repr( self.total ) + "]\n" )
write( "#ifdef DEFINE_PS_TABLES_DATA\n" )
write( " =\n" )
write( " {\n" )
write("#ifndef DEFINE_PS_TABLES_DATA\n")
write("#ifdef __cplusplus\n")
write(' extern "C"\n')
write("#else\n")
write(" extern\n")
write("#endif\n")
write("#endif\n")
write(" const char " + self.master_table +
"[" + repr(self.total) + "]\n")
write("#ifdef DEFINE_PS_TABLES_DATA\n")
write(" =\n")
write(" {\n")
line = ""
for name in self.names:
line += " '"
line += string.join( ( re.findall( ".", name ) ), "','" )
line += "','".join(list(name))
line += "', 0,\n"
write( line )
write( " }\n" )
write( "#endif /* DEFINE_PS_TABLES_DATA */\n" )
write( " ;\n\n\n" )
write(line)
write(" }\n")
write("#endif /* DEFINE_PS_TABLES_DATA */\n")
write(" ;\n\n\n")
def dump_sublist( self, file, table_name, macro_name, sublist ):
def dump_sublist(self, file, table_name, macro_name, sublist):
write = file.write
write( "#define " + macro_name + " " + repr( len( sublist ) ) + "\n\n" )
write("#define " + macro_name + " " + repr(len(sublist)) + "\n\n")
write( " /* Values are offsets into the `" +
self.master_table + "' table */\n\n" )
write( "#ifndef DEFINE_PS_TABLES_DATA\n" )
write( "#ifdef __cplusplus\n" )
write( ' extern "C"\n' )
write( "#else\n" )
write( " extern\n" )
write( "#endif\n" )
write( "#endif\n" )
write( " const short " + table_name +
"[" + macro_name + "]\n" )
write( "#ifdef DEFINE_PS_TABLES_DATA\n" )
write( " =\n" )
write( " {\n" )
write(" /* Values are offsets into the `" +
self.master_table + "' table */\n\n")
write("#ifndef DEFINE_PS_TABLES_DATA\n")
write("#ifdef __cplusplus\n")
write(' extern "C"\n')
write("#else\n")
write(" extern\n")
write("#endif\n")
write("#endif\n")
write(" const short " + table_name +
"[" + macro_name + "]\n")
write("#ifdef DEFINE_PS_TABLES_DATA\n")
write(" =\n")
write(" {\n")
line = " "
comma = ""
@ -4976,11 +4964,11 @@ class StringTable:
col = 0
comma = ",\n "
write( line )
write( "\n" )
write( " }\n" )
write( "#endif /* DEFINE_PS_TABLES_DATA */\n" )
write( " ;\n\n\n" )
write(line)
write("\n")
write(" }\n")
write("#endif /* DEFINE_PS_TABLES_DATA */\n")
write(" ;\n\n\n")
# We now store the Adobe Glyph List in compressed form. The list is put
@ -5059,33 +5047,36 @@ class StringTable:
# The root node has first letter = 0, and no value.
#
class StringNode:
def __init__( self, letter, value ):
def __init__(self, letter, value):
self.letter = letter
self.value = value
self.children = {}
def __cmp__( self, other ):
return ord( self.letter[0] ) - ord( other.letter[0] )
def __cmp__(self, other):
return ord(self.letter[0]) - ord(other.letter[0])
def add( self, word, value ):
if len( word ) == 0:
def __lt__(self, other):
return self.letter[0] < other.letter[0]
def add(self, word, value):
if len(word) == 0:
self.value = value
return
letter = word[0]
word = word[1:]
if self.children.has_key( letter ):
if letter in self.children:
child = self.children[letter]
else:
child = StringNode( letter, 0 )
child = StringNode(letter, 0)
self.children[letter] = child
child.add( word, value )
child.add(word, value)
def optimize( self ):
def optimize(self):
# optimize all children first
children = self.children.values()
children = list(self.children.values())
self.children = {}
for child in children:
@ -5094,7 +5085,7 @@ class StringNode:
# don't optimize if there's a value,
# if we don't have any child or if we
# have more than one child
if ( self.value != 0 ) or ( not children ) or len( children ) > 1:
if (self.value != 0) or (not children) or len(children) > 1:
return self
child = children[0]
@ -5105,71 +5096,71 @@ class StringNode:
return self
def dump_debug( self, write, margin ):
def dump_debug(self, write, margin):
# this is used during debugging
line = margin + "+-"
if len( self.letter ) == 0:
if len(self.letter) == 0:
line += "<NOLETTER>"
else:
line += self.letter
if self.value:
line += " => " + repr( self.value )
line += " => " + repr(self.value)
write( line + "\n" )
write(line + "\n")
if self.children:
margin += "| "
for child in self.children.values():
child.dump_debug( write, margin )
child.dump_debug(write, margin)
def locate( self, index ):
def locate(self, index):
self.index = index
if len( self.letter ) > 0:
index += len( self.letter ) + 1
if len(self.letter) > 0:
index += len(self.letter) + 1
else:
index += 2
if self.value != 0:
index += 2
children = self.children.values()
children = list(self.children.values())
children.sort()
index += 2 * len( children )
index += 2 * len(children)
for child in children:
index = child.locate( index )
index = child.locate(index)
return index
def store( self, storage ):
def store(self, storage):
# write the letters
l = len( self.letter )
if l == 0:
storage += struct.pack( "B", 0 )
length = len(self.letter)
if length == 0:
storage += struct.pack("B", 0)
else:
for n in range( l ):
val = ord( self.letter[n] )
if n < l - 1:
for n in range(length):
val = ord(self.letter[n])
if n < length - 1:
val += 128
storage += struct.pack( "B", val )
storage += struct.pack("B", val)
# write the count
children = self.children.values()
children = list(self.children.values())
children.sort()
count = len( children )
count = len(children)
if self.value != 0:
storage += struct.pack( "!BH", count + 128, self.value )
storage += struct.pack("!BH", count + 128, self.value)
else:
storage += struct.pack( "B", count )
storage += struct.pack("B", count)
for child in children:
storage += struct.pack( "!H", child.index )
storage += struct.pack("!H", child.index)
for child in children:
storage = child.store( storage )
storage = child.store(storage)
return storage
@ -5177,23 +5168,23 @@ class StringNode:
def adobe_glyph_values():
"""return the list of glyph names and their unicode values"""
lines = string.split( adobe_glyph_list, '\n' )
lines = adobe_glyph_list.split("\n")
glyphs = []
values = []
for line in lines:
if line:
fields = string.split( line, ';' )
# print fields[1] + ' - ' + fields[0]
subfields = string.split( fields[1], ' ' )
if len( subfields ) == 1:
glyphs.append( fields[0] )
values.append( fields[1] )
fields = line.split(';')
# print fields[1] + ' - ' + fields[0]
subfields = fields[1].split(' ')
if len(subfields) == 1:
glyphs.append(fields[0])
values.append(fields[1])
return glyphs, values
def filter_glyph_names( alist, filter ):
def filter_glyph_names(alist, filter):
"""filter `alist' by taking _out_ all glyph names that are in `filter'"""
count = 0
@ -5201,30 +5192,30 @@ def filter_glyph_names( alist, filter ):
for name in alist:
try:
filtered_index = filter.index( name )
filtered_index = filter.index(name)
except:
extras.append( name )
extras.append(name)
return extras
def dump_encoding( file, encoding_name, encoding_list ):
def dump_encoding(file, encoding_name, encoding_list):
"""dump a given encoding"""
write = file.write
write( " /* the following are indices into the SID name table */\n" )
write( "#ifndef DEFINE_PS_TABLES_DATA\n" )
write( "#ifdef __cplusplus\n" )
write( ' extern "C"\n' )
write( "#else\n" )
write( " extern\n" )
write( "#endif\n" )
write( "#endif\n" )
write( " const unsigned short " + encoding_name +
"[" + repr( len( encoding_list ) ) + "]\n" )
write( "#ifdef DEFINE_PS_TABLES_DATA\n" )
write( " =\n" )
write( " {\n" )
write(" /* the following are indices into the SID name table */\n")
write("#ifndef DEFINE_PS_TABLES_DATA\n")
write("#ifdef __cplusplus\n")
write(' extern "C"\n')
write("#else\n")
write(" extern\n")
write("#endif\n")
write("#endif\n")
write(" const unsigned short " + encoding_name +
"[" + repr(len(encoding_list)) + "]\n")
write("#ifdef DEFINE_PS_TABLES_DATA\n")
write(" =\n")
write(" {\n")
line = " "
comma = ""
@ -5238,28 +5229,28 @@ def dump_encoding( file, encoding_name, encoding_list ):
col = 0
comma = ",\n "
write( line )
write( "\n" )
write( " }\n" )
write( "#endif /* DEFINE_PS_TABLES_DATA */\n" )
write( " ;\n\n\n" )
write(line)
write("\n")
write(" }\n")
write("#endif /* DEFINE_PS_TABLES_DATA */\n")
write(" ;\n\n\n")
def dump_array( the_array, write, array_name ):
def dump_array(the_array, write, array_name):
"""dumps a given encoding"""
write( "#ifndef DEFINE_PS_TABLES_DATA\n" )
write( "#ifdef __cplusplus\n" )
write( ' extern "C"\n' )
write( "#else\n" )
write( " extern\n" )
write( "#endif\n" )
write( "#endif\n" )
write( " const unsigned char " + array_name +
"[" + repr( len( the_array ) ) + "L]\n" )
write( "#ifdef DEFINE_PS_TABLES_DATA\n" )
write( " =\n" )
write( " {\n" )
write("#ifndef DEFINE_PS_TABLES_DATA\n")
write("#ifdef __cplusplus\n")
write(' extern "C"\n')
write("#else\n")
write(" extern\n")
write("#endif\n")
write("#endif\n")
write(" const unsigned char " + array_name +
"[" + repr(len(the_array)) + "L]\n")
write("#ifdef DEFINE_PS_TABLES_DATA\n")
write(" =\n")
write(" {\n")
line = ""
comma = " "
@ -5267,7 +5258,7 @@ def dump_array( the_array, write, array_name ):
for value in the_array:
line += comma
line += "%3d" % ord( value )
line += "%3d" % value
comma = ","
col += 1
@ -5275,91 +5266,93 @@ def dump_array( the_array, write, array_name ):
col = 0
comma = ",\n "
if len( line ) > 1024:
write( line )
if len(line) > 1024:
write(line)
line = ""
write( line )
write( "\n" )
write( " }\n" )
write( "#endif /* DEFINE_PS_TABLES_DATA */\n" )
write( " ;\n\n\n" )
write(line)
write("\n")
write(" }\n")
write("#endif /* DEFINE_PS_TABLES_DATA */\n")
write(" ;\n\n\n")
def main():
"""main program body"""
if len( sys.argv ) != 2:
print __doc__ % sys.argv[0]
sys.exit( 1 )
if len(sys.argv) != 2:
print(__doc__ % sys.argv[0])
sys.exit(1)
file = open( sys.argv[1], "wb" )
file = open(sys.argv[1], "w")
write = file.write
count_sid = len( sid_standard_names )
count_sid = len(sid_standard_names)
# `mac_extras' contains the list of glyph names in the Macintosh standard
# encoding which are not in the SID Standard Names.
#
mac_extras = filter_glyph_names( mac_standard_names, sid_standard_names )
mac_extras = filter_glyph_names(mac_standard_names, sid_standard_names)
# `base_list' contains the names of our final glyph names table.
# It consists of the `mac_extras' glyph names, followed by the SID
# standard names.
#
mac_extras_count = len( mac_extras )
mac_extras_count = len(mac_extras)
base_list = mac_extras + sid_standard_names
write( "/****************************************************************************\n" )
write( " *\n" )
write( " * %-71s\n" % os.path.basename( sys.argv[1] ) )
write( " *\n" )
write( " * PostScript glyph names.\n" )
write( " *\n" )
write( " * Copyright 2005-2019 by\n" )
write( " * David Turner, Robert Wilhelm, and Werner Lemberg.\n" )
write( " *\n" )
write( " * This file is part of the FreeType project, and may only be used,\n" )
write( " * modified, and distributed under the terms of the FreeType project\n" )
write( " * license, LICENSE.TXT. By continuing to use, modify, or distribute\n" )
write( " * this file you indicate that you have read the license and\n" )
write( " * understand and accept it fully.\n" )
write( " *\n" )
write( " */\n" )
write( "\n" )
write( "\n" )
write( " /* This file has been generated automatically -- do not edit! */\n" )
write( "\n" )
write( "\n" )
write("/*\n")
write(" *\n")
write(" * %-71s\n" % os.path.basename(sys.argv[1]))
write(" *\n")
write(" * PostScript glyph names.\n")
write(" *\n")
write(" * Copyright 2005-2022 by\n")
write(" * David Turner, Robert Wilhelm, and Werner Lemberg.\n")
write(" *\n")
write(" * This file is part of the FreeType project, and may only be "
"used,\n")
write(" * modified, and distributed under the terms of the FreeType "
"project\n")
write(" * license, LICENSE.TXT. By continuing to use, modify, or "
"distribute\n")
write(" * this file you indicate that you have read the license and\n")
write(" * understand and accept it fully.\n")
write(" *\n")
write(" */\n")
write("\n")
write("\n")
write(" /* This file has been generated automatically -- do not edit! */"
"\n")
write("\n")
write("\n")
# dump final glyph list (mac extras + sid standard names)
#
st = StringTable( base_list, "ft_standard_glyph_names" )
st = StringTable(base_list, "ft_standard_glyph_names")
st.dump( file )
st.dump_sublist( file, "ft_mac_names",
"FT_NUM_MAC_NAMES", mac_standard_names )
st.dump_sublist( file, "ft_sid_names",
"FT_NUM_SID_NAMES", sid_standard_names )
st.dump(file)
st.dump_sublist(file, "ft_mac_names",
"FT_NUM_MAC_NAMES", mac_standard_names)
st.dump_sublist(file, "ft_sid_names",
"FT_NUM_SID_NAMES", sid_standard_names)
dump_encoding( file, "t1_standard_encoding", t1_standard_encoding )
dump_encoding( file, "t1_expert_encoding", t1_expert_encoding )
dump_encoding(file, "t1_standard_encoding", t1_standard_encoding)
dump_encoding(file, "t1_expert_encoding", t1_expert_encoding)
# dump the AGL in its compressed form
#
agl_glyphs, agl_values = adobe_glyph_values()
dict = StringNode( "", 0 )
dictionary = StringNode("", 0)
for g in range( len( agl_glyphs ) ):
dict.add( agl_glyphs[g], eval( "0x" + agl_values[g] ) )
for g in range(len(agl_glyphs)):
dictionary.add(agl_glyphs[g], eval("0x" + agl_values[g]))
dict = dict.optimize()
dict_len = dict.locate( 0 )
dict_array = dict.store( "" )
dictionary = dictionary.optimize()
dict_len = dictionary.locate(0)
dict_array = dictionary.store(b"")
write( """\
write("""\
/*
* This table is a compressed version of the Adobe Glyph List (AGL),
* optimized for efficient searching. It has been generated by the
@ -5371,13 +5364,13 @@ def main():
#ifdef FT_CONFIG_OPTION_ADOBE_GLYPH_LIST
""" )
""")
dump_array( dict_array, write, "ft_adobe_glyph_list" )
dump_array(dict_array, write, "ft_adobe_glyph_list")
# write the lookup routine now
#
write( """\
write("""\
#ifdef DEFINE_PS_TABLES
/*
* This function searches the compressed table efficiently.
@ -5477,38 +5470,39 @@ def main():
#endif /* FT_CONFIG_OPTION_ADOBE_GLYPH_LIST */
""" )
""")
if 0: # generate unit test, or don't
#
# now write the unit test to check that everything works OK
#
write( "#ifdef TEST\n\n" )
write("#ifdef TEST\n\n")
write( "static const char* const the_names[] = {\n" )
write("static const char* const the_names[] = {\n")
for name in agl_glyphs:
write( ' "' + name + '",\n' )
write( " 0\n};\n" )
write(' "' + name + '",\n')
write(" 0\n};\n")
write( "static const unsigned long the_values[] = {\n" )
write("static const unsigned long the_values[] = {\n")
for val in agl_values:
write( ' 0x' + val + ',\n' )
write( " 0\n};\n" )
write(' 0x' + val + ',\n')
write(" 0\n};\n")
write( """
write("""
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
int
main( void )
{
int result = 0;
const char* const* names = the_names;
const unsigned long* values = the_values;
int
main( void )
{
int result = 0;
const char* const* names = the_names;
const unsigned long* values = the_values;
for ( ; *names; names++, values++ )
{
for ( ; *names; names++, values++ )
{
const char* name = *names;
unsigned long reference = *values;
unsigned long value;
@ -5521,13 +5515,13 @@ def main():
fprintf( stderr, "name '%s' => %04x instead of %04x\\n",
name, value, reference );
}
}
}
return result;
}
""" )
return result;
}
""")
write( "#endif /* TEST */\n" )
write("#endif /* TEST */\n")
write("\n/* END */\n")
@ -5536,5 +5530,4 @@ def main():
#
main()
# END