forked from minhngoc25a/freetype2
* src/tools/docmaker: Remove `docmaker'.
`Docmaker' has now upgraded to `docwriter', a pip package available at https://pypi.org/project/docwriter/
This commit is contained in:
parent
53c69ce04f
commit
195728d5ba
|
@ -1 +0,0 @@
|
|||
*.pyc
|
|
@ -1,672 +0,0 @@
|
|||
#
|
||||
# content.py
|
||||
#
|
||||
# Parse comment blocks to build content blocks (library file).
|
||||
#
|
||||
# Copyright 2002-2018 by
|
||||
# David Turner.
|
||||
#
|
||||
# This file is part of the FreeType project, and may only be used,
|
||||
# modified, and distributed under the terms of the FreeType project
|
||||
# license, LICENSE.TXT. By continuing to use, modify, or distribute
|
||||
# this file you indicate that you have read the license and
|
||||
# understand and accept it fully.
|
||||
|
||||
#
|
||||
# This file contains routines to parse documentation comment blocks,
|
||||
# building more structured objects out of them.
|
||||
#
|
||||
|
||||
|
||||
from sources import *
|
||||
from utils import *
|
||||
|
||||
import string, re
|
||||
|
||||
|
||||
#
|
||||
# Regular expressions to detect code sequences. `Code sequences' are simply
|
||||
# code fragments embedded in '{' and '}', as demonstrated in the following
|
||||
# example.
|
||||
#
|
||||
# {
|
||||
# x = y + z;
|
||||
# if ( zookoo == 2 )
|
||||
# {
|
||||
# foobar();
|
||||
# }
|
||||
# }
|
||||
#
|
||||
# Note that the indentation of the first opening brace and the last closing
|
||||
# brace must be exactly the same. The code sequence itself should have a
|
||||
# larger indentation than the surrounding braces.
|
||||
#
|
||||
re_code_start = re.compile( r"(\s*){\s*$" )
|
||||
re_code_end = re.compile( r"(\s*)}\s*$" )
|
||||
|
||||
|
||||
#
|
||||
# A regular expression to isolate identifiers from other text. Two syntax
|
||||
# forms are supported:
|
||||
#
|
||||
# <name>
|
||||
# <name>[<id>]
|
||||
#
|
||||
# where both `<name>' and `<id>' consist of alphanumeric characters, `_',
|
||||
# and `-'. Use `<id>' if there are multiple, valid `<name>' entries; in the
|
||||
# index, `<id>' will be appended in parentheses.
|
||||
#
|
||||
# For example,
|
||||
#
|
||||
# stem_darkening[autofit]
|
||||
#
|
||||
# becomes `stem_darkening (autofit)' in the index.
|
||||
#
|
||||
re_identifier = re.compile( r"""
|
||||
((?:\w|-)+
|
||||
(?:\[(?:\w|-)+\])?)
|
||||
""", re.VERBOSE )
|
||||
|
||||
|
||||
#
|
||||
# We collect macro names ending in `_H' (group 1), as defined in
|
||||
# `freetype/config/ftheader.h'. While outputting the object data, we use
|
||||
# this info together with the object's file location (group 2) to emit the
|
||||
# appropriate header file macro and its associated file name before the
|
||||
# object itself.
|
||||
#
|
||||
# Example:
|
||||
#
|
||||
# #define FT_FREETYPE_H <freetype.h>
|
||||
#
|
||||
re_header_macro = re.compile( r'^#define\s{1,}(\w{1,}_H)\s{1,}<(.*)>' )
|
||||
|
||||
|
||||
################################################################
|
||||
##
|
||||
## DOC CODE CLASS
|
||||
##
|
||||
## The `DocCode' class is used to store source code lines.
|
||||
##
|
||||
## `self.lines' contains a set of source code lines that will be dumped as
|
||||
## HTML in a <PRE> tag.
|
||||
##
|
||||
## The object is filled line by line by the parser; it strips the leading
|
||||
## `margin' space from each input line before storing it in `self.lines'.
|
||||
##
|
||||
class DocCode:
|
||||
|
||||
def __init__( self, margin, lines ):
|
||||
self.lines = []
|
||||
self.words = None
|
||||
|
||||
# remove margin spaces
|
||||
for l in lines:
|
||||
if string.strip( l[:margin] ) == "":
|
||||
l = l[margin:]
|
||||
self.lines.append( l )
|
||||
|
||||
def dump( self, prefix = "", width = 60 ):
|
||||
lines = self.dump_lines( 0, width )
|
||||
for l in lines:
|
||||
print( prefix + l )
|
||||
|
||||
def dump_lines( self, margin = 0, width = 60 ):
|
||||
result = []
|
||||
for l in self.lines:
|
||||
result.append( " " * margin + l )
|
||||
return result
|
||||
|
||||
|
||||
|
||||
################################################################
|
||||
##
|
||||
## DOC PARA CLASS
|
||||
##
|
||||
## `Normal' text paragraphs are stored in the `DocPara' class.
|
||||
##
|
||||
## `self.words' contains the list of words that make up the paragraph.
|
||||
##
|
||||
class DocPara:
|
||||
|
||||
def __init__( self, lines ):
|
||||
self.lines = None
|
||||
self.words = []
|
||||
for l in lines:
|
||||
l = string.strip( l )
|
||||
self.words.extend( string.split( l ) )
|
||||
|
||||
def dump( self, prefix = "", width = 60 ):
|
||||
lines = self.dump_lines( 0, width )
|
||||
for l in lines:
|
||||
print( prefix + l )
|
||||
|
||||
def dump_lines( self, margin = 0, width = 60 ):
|
||||
cur = "" # current line
|
||||
col = 0 # current width
|
||||
result = []
|
||||
|
||||
for word in self.words:
|
||||
ln = len( word )
|
||||
if col > 0:
|
||||
ln = ln + 1
|
||||
|
||||
if col + ln > width:
|
||||
result.append( " " * margin + cur )
|
||||
cur = word
|
||||
col = len( word )
|
||||
else:
|
||||
if col > 0:
|
||||
cur = cur + " "
|
||||
cur = cur + word
|
||||
col = col + ln
|
||||
|
||||
if col > 0:
|
||||
result.append( " " * margin + cur )
|
||||
|
||||
return result
|
||||
|
||||
|
||||
################################################################
|
||||
##
|
||||
## DOC FIELD CLASS
|
||||
##
|
||||
## The `DocField' class stores a list containing either `DocPara' or
|
||||
## `DocCode' objects. Each DocField object also has an optional `name'
|
||||
## that is used when the object corresponds to a field or value definition.
|
||||
##
|
||||
class DocField:
|
||||
|
||||
def __init__( self, name, lines ):
|
||||
self.name = name # can be `None' for normal paragraphs/sources
|
||||
self.items = [] # list of items
|
||||
|
||||
mode_none = 0 # start parsing mode
|
||||
mode_code = 1 # parsing code sequences
|
||||
mode_para = 3 # parsing normal paragraph
|
||||
|
||||
margin = -1 # current code sequence indentation
|
||||
cur_lines = []
|
||||
|
||||
# analyze the markup lines to check whether they contain paragraphs,
|
||||
# code sequences, or fields definitions
|
||||
#
|
||||
start = 0
|
||||
mode = mode_none
|
||||
|
||||
for l in lines:
|
||||
# are we parsing a code sequence?
|
||||
if mode == mode_code:
|
||||
m = re_code_end.match( l )
|
||||
if m and len( m.group( 1 ) ) <= margin:
|
||||
# that's it, we finished the code sequence
|
||||
code = DocCode( 0, cur_lines )
|
||||
self.items.append( code )
|
||||
margin = -1
|
||||
cur_lines = []
|
||||
mode = mode_none
|
||||
else:
|
||||
# otherwise continue the code sequence
|
||||
cur_lines.append( l[margin:] )
|
||||
else:
|
||||
# start of code sequence?
|
||||
m = re_code_start.match( l )
|
||||
if m:
|
||||
# save current lines
|
||||
if cur_lines:
|
||||
para = DocPara( cur_lines )
|
||||
self.items.append( para )
|
||||
cur_lines = []
|
||||
|
||||
# switch to code extraction mode
|
||||
margin = len( m.group( 1 ) )
|
||||
mode = mode_code
|
||||
else:
|
||||
if not string.split( l ) and cur_lines:
|
||||
# if the line is empty, we end the current paragraph,
|
||||
# if any
|
||||
para = DocPara( cur_lines )
|
||||
self.items.append( para )
|
||||
cur_lines = []
|
||||
else:
|
||||
# otherwise, simply add the line to the current
|
||||
# paragraph
|
||||
cur_lines.append( l )
|
||||
|
||||
if mode == mode_code:
|
||||
# unexpected end of code sequence
|
||||
code = DocCode( margin, cur_lines )
|
||||
self.items.append( code )
|
||||
elif cur_lines:
|
||||
para = DocPara( cur_lines )
|
||||
self.items.append( para )
|
||||
|
||||
def dump( self, prefix = "" ):
|
||||
if self.field:
|
||||
print( prefix + self.field + " ::" )
|
||||
prefix = prefix + "----"
|
||||
|
||||
first = 1
|
||||
for p in self.items:
|
||||
if not first:
|
||||
print( "" )
|
||||
p.dump( prefix )
|
||||
first = 0
|
||||
|
||||
def dump_lines( self, margin = 0, width = 60 ):
|
||||
result = []
|
||||
nl = None
|
||||
|
||||
for p in self.items:
|
||||
if nl:
|
||||
result.append( "" )
|
||||
|
||||
result.extend( p.dump_lines( margin, width ) )
|
||||
nl = 1
|
||||
|
||||
return result
|
||||
|
||||
|
||||
#
|
||||
# A regular expression to detect field definitions.
|
||||
#
|
||||
# Examples:
|
||||
#
|
||||
# foo ::
|
||||
# foo.bar ::
|
||||
#
|
||||
re_field = re.compile( r"""
|
||||
\s*
|
||||
(
|
||||
\w*
|
||||
|
|
||||
\w (\w | \.)* \w
|
||||
)
|
||||
\s* ::
|
||||
""", re.VERBOSE )
|
||||
|
||||
|
||||
################################################################
|
||||
##
|
||||
## DOC MARKUP CLASS
|
||||
##
|
||||
class DocMarkup:
|
||||
|
||||
def __init__( self, tag, lines ):
|
||||
self.tag = string.lower( tag )
|
||||
self.fields = []
|
||||
|
||||
cur_lines = []
|
||||
field = None
|
||||
mode = 0
|
||||
|
||||
for l in lines:
|
||||
m = re_field.match( l )
|
||||
if m:
|
||||
# We detected the start of a new field definition.
|
||||
|
||||
# first, save the current one
|
||||
if cur_lines:
|
||||
f = DocField( field, cur_lines )
|
||||
self.fields.append( f )
|
||||
cur_lines = []
|
||||
field = None
|
||||
|
||||
field = m.group( 1 ) # record field name
|
||||
ln = len( m.group( 0 ) )
|
||||
l = " " * ln + l[ln:]
|
||||
cur_lines = [l]
|
||||
else:
|
||||
cur_lines.append( l )
|
||||
|
||||
if field or cur_lines:
|
||||
f = DocField( field, cur_lines )
|
||||
self.fields.append( f )
|
||||
|
||||
def get_name( self ):
|
||||
try:
|
||||
return self.fields[0].items[0].words[0]
|
||||
except:
|
||||
return None
|
||||
|
||||
def dump( self, margin ):
|
||||
print( " " * margin + "<" + self.tag + ">" )
|
||||
for f in self.fields:
|
||||
f.dump( " " )
|
||||
print( " " * margin + "</" + self.tag + ">" )
|
||||
|
||||
|
||||
################################################################
|
||||
##
|
||||
## DOC CHAPTER CLASS
|
||||
##
|
||||
class DocChapter:
|
||||
|
||||
def __init__( self, block ):
|
||||
self.block = block
|
||||
self.sections = []
|
||||
if block:
|
||||
self.name = block.name
|
||||
self.title = block.get_markup_words( "title" )
|
||||
self.order = block.get_markup_words( "sections" )
|
||||
else:
|
||||
self.name = "Other"
|
||||
self.title = string.split( "Miscellaneous" )
|
||||
self.order = []
|
||||
|
||||
|
||||
################################################################
|
||||
##
|
||||
## DOC SECTION CLASS
|
||||
##
|
||||
class DocSection:
|
||||
|
||||
def __init__( self, name = "Other" ):
|
||||
self.name = name
|
||||
self.blocks = {}
|
||||
self.block_names = [] # ordered block names in section
|
||||
self.defs = []
|
||||
self.abstract = ""
|
||||
self.description = ""
|
||||
self.order = []
|
||||
self.title = "ERROR"
|
||||
self.chapter = None
|
||||
|
||||
def add_def( self, block ):
|
||||
self.defs.append( block )
|
||||
|
||||
def add_block( self, block ):
|
||||
self.block_names.append( block.name )
|
||||
self.blocks[block.name] = block
|
||||
|
||||
def process( self ):
|
||||
# look up one block that contains a valid section description
|
||||
for block in self.defs:
|
||||
title = block.get_markup_text( "title" )
|
||||
if title:
|
||||
self.title = title
|
||||
self.abstract = block.get_markup_words( "abstract" )
|
||||
self.description = block.get_markup_items( "description" )
|
||||
self.order = block.get_markup_words_all( "order" )
|
||||
return
|
||||
|
||||
def reorder( self ):
|
||||
self.block_names = sort_order_list( self.block_names, self.order )
|
||||
|
||||
|
||||
################################################################
|
||||
##
|
||||
## CONTENT PROCESSOR CLASS
|
||||
##
|
||||
class ContentProcessor:
|
||||
|
||||
def __init__( self ):
|
||||
"""Initialize a block content processor."""
|
||||
self.reset()
|
||||
|
||||
self.sections = {} # dictionary of documentation sections
|
||||
self.section = None # current documentation section
|
||||
|
||||
self.chapters = [] # list of chapters
|
||||
|
||||
self.headers = {} # dictionary of header macros
|
||||
|
||||
def set_section( self, section_name ):
|
||||
"""Set current section during parsing."""
|
||||
if not section_name in self.sections:
|
||||
section = DocSection( section_name )
|
||||
self.sections[section_name] = section
|
||||
self.section = section
|
||||
else:
|
||||
self.section = self.sections[section_name]
|
||||
|
||||
def add_chapter( self, block ):
|
||||
chapter = DocChapter( block )
|
||||
self.chapters.append( chapter )
|
||||
|
||||
def reset( self ):
|
||||
"""Reset the content processor for a new block."""
|
||||
self.markups = []
|
||||
self.markup = None
|
||||
self.markup_lines = []
|
||||
|
||||
def add_markup( self ):
|
||||
"""Add a new markup section."""
|
||||
if self.markup and self.markup_lines:
|
||||
|
||||
# get rid of last line of markup if it's empty
|
||||
marks = self.markup_lines
|
||||
if len( marks ) > 0 and not string.strip( marks[-1] ):
|
||||
self.markup_lines = marks[:-1]
|
||||
|
||||
m = DocMarkup( self.markup, self.markup_lines )
|
||||
|
||||
self.markups.append( m )
|
||||
|
||||
self.markup = None
|
||||
self.markup_lines = []
|
||||
|
||||
def process_content( self, content ):
|
||||
"""Process a block content and return a list of DocMarkup objects
|
||||
corresponding to it."""
|
||||
markup = None
|
||||
markup_lines = []
|
||||
first = 1
|
||||
|
||||
margin = -1
|
||||
in_code = 0
|
||||
|
||||
for line in content:
|
||||
if in_code:
|
||||
m = re_code_end.match( line )
|
||||
if m and len( m.group( 1 ) ) <= margin:
|
||||
in_code = 0
|
||||
margin = -1
|
||||
else:
|
||||
m = re_code_start.match( line )
|
||||
if m:
|
||||
in_code = 1
|
||||
margin = len( m.group( 1 ) )
|
||||
|
||||
found = None
|
||||
|
||||
if not in_code:
|
||||
for t in re_markup_tags:
|
||||
m = t.match( line )
|
||||
if m:
|
||||
found = string.lower( m.group( 1 ) )
|
||||
prefix = len( m.group( 0 ) )
|
||||
# remove markup from line
|
||||
line = " " * prefix + line[prefix:]
|
||||
break
|
||||
|
||||
# is it the start of a new markup section ?
|
||||
if found:
|
||||
first = 0
|
||||
self.add_markup() # add current markup content
|
||||
self.markup = found
|
||||
if len( string.strip( line ) ) > 0:
|
||||
self.markup_lines.append( line )
|
||||
elif first == 0:
|
||||
self.markup_lines.append( line )
|
||||
|
||||
self.add_markup()
|
||||
|
||||
return self.markups
|
||||
|
||||
def parse_sources( self, source_processor ):
|
||||
blocks = source_processor.blocks
|
||||
count = len( blocks )
|
||||
|
||||
for n in range( count ):
|
||||
source = blocks[n]
|
||||
if source.content:
|
||||
# this is a documentation comment, we need to catch
|
||||
# all following normal blocks in the "follow" list
|
||||
#
|
||||
follow = []
|
||||
m = n + 1
|
||||
while m < count and not blocks[m].content:
|
||||
follow.append( blocks[m] )
|
||||
m = m + 1
|
||||
|
||||
doc_block = DocBlock( source, follow, self )
|
||||
|
||||
def finish( self ):
|
||||
# process all sections to extract their abstract, description
|
||||
# and ordered list of items
|
||||
#
|
||||
for sec in self.sections.values():
|
||||
sec.process()
|
||||
|
||||
# process chapters to check that all sections are correctly
|
||||
# listed there
|
||||
for chap in self.chapters:
|
||||
for sec in chap.order:
|
||||
if sec in self.sections:
|
||||
section = self.sections[sec]
|
||||
section.chapter = chap
|
||||
section.reorder()
|
||||
chap.sections.append( section )
|
||||
else:
|
||||
sys.stderr.write( "WARNING: chapter '" + \
|
||||
chap.name + "' in " + chap.block.location() + \
|
||||
" lists unknown section '" + sec + "'\n" )
|
||||
|
||||
# check that all sections are in a chapter
|
||||
#
|
||||
others = []
|
||||
for sec in self.sections.values():
|
||||
if not sec.chapter:
|
||||
sec.reorder()
|
||||
others.append( sec )
|
||||
|
||||
# create a new special chapter for all remaining sections
|
||||
# when necessary
|
||||
#
|
||||
if others:
|
||||
chap = DocChapter( None )
|
||||
chap.sections = others
|
||||
self.chapters.append( chap )
|
||||
|
||||
|
||||
################################################################
|
||||
##
|
||||
## DOC BLOCK CLASS
|
||||
##
|
||||
class DocBlock:
|
||||
|
||||
def __init__( self, source, follow, processor ):
|
||||
processor.reset()
|
||||
|
||||
self.source = source
|
||||
self.code = []
|
||||
self.type = "ERRTYPE"
|
||||
self.name = "ERRNAME"
|
||||
self.section = processor.section
|
||||
self.markups = processor.process_content( source.content )
|
||||
|
||||
# compute block type from first markup tag
|
||||
try:
|
||||
self.type = self.markups[0].tag
|
||||
except:
|
||||
pass
|
||||
|
||||
# compute block name from first markup paragraph
|
||||
try:
|
||||
markup = self.markups[0]
|
||||
para = markup.fields[0].items[0]
|
||||
name = para.words[0]
|
||||
m = re_identifier.match( name )
|
||||
if m:
|
||||
name = m.group( 1 )
|
||||
self.name = name
|
||||
except:
|
||||
pass
|
||||
|
||||
if self.type == "section":
|
||||
# detect new section starts
|
||||
processor.set_section( self.name )
|
||||
processor.section.add_def( self )
|
||||
elif self.type == "chapter":
|
||||
# detect new chapter
|
||||
processor.add_chapter( self )
|
||||
else:
|
||||
processor.section.add_block( self )
|
||||
|
||||
# now, compute the source lines relevant to this documentation
|
||||
# block. We keep normal comments in for obvious reasons (??)
|
||||
source = []
|
||||
for b in follow:
|
||||
if b.format:
|
||||
break
|
||||
for l in b.lines:
|
||||
# collect header macro definitions
|
||||
m = re_header_macro.match( l )
|
||||
if m:
|
||||
processor.headers[m.group( 2 )] = m.group( 1 );
|
||||
|
||||
# we use "/* */" as a separator
|
||||
if re_source_sep.match( l ):
|
||||
break
|
||||
source.append( l )
|
||||
|
||||
# now strip the leading and trailing empty lines from the sources
|
||||
start = 0
|
||||
end = len( source ) - 1
|
||||
|
||||
while start < end and not string.strip( source[start] ):
|
||||
start = start + 1
|
||||
|
||||
while start < end and not string.strip( source[end] ):
|
||||
end = end - 1
|
||||
|
||||
if start == end and not string.strip( source[start] ):
|
||||
self.code = []
|
||||
else:
|
||||
self.code = source[start:end + 1]
|
||||
|
||||
def location( self ):
|
||||
return self.source.location()
|
||||
|
||||
def get_markup( self, tag_name ):
|
||||
"""Return the DocMarkup corresponding to a given tag in a block."""
|
||||
for m in self.markups:
|
||||
if m.tag == string.lower( tag_name ):
|
||||
return m
|
||||
return None
|
||||
|
||||
def get_markup_words( self, tag_name ):
|
||||
try:
|
||||
m = self.get_markup( tag_name )
|
||||
return m.fields[0].items[0].words
|
||||
except:
|
||||
return []
|
||||
|
||||
def get_markup_words_all( self, tag_name ):
|
||||
try:
|
||||
m = self.get_markup( tag_name )
|
||||
words = []
|
||||
for item in m.fields[0].items:
|
||||
# We honour empty lines in an `<Order>' section element by
|
||||
# adding the sentinel `/empty/'. The formatter should then
|
||||
# convert it to an appropriate representation in the
|
||||
# `section_enter' function.
|
||||
words += item.words
|
||||
words.append( "/empty/" )
|
||||
return words
|
||||
except:
|
||||
return []
|
||||
|
||||
def get_markup_text( self, tag_name ):
|
||||
result = self.get_markup_words( tag_name )
|
||||
return string.join( result )
|
||||
|
||||
def get_markup_items( self, tag_name ):
|
||||
try:
|
||||
m = self.get_markup( tag_name )
|
||||
return m.fields[0].items
|
||||
except:
|
||||
return None
|
||||
|
||||
# eof
|
|
@ -1,111 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# DocBeauty (c) 2003, 2004, 2008 David Turner <david@freetype.org>
|
||||
#
|
||||
# This program is used to beautify the documentation comments used
|
||||
# in the FreeType 2 public headers.
|
||||
#
|
||||
|
||||
from sources import *
|
||||
from content import *
|
||||
from utils import *
|
||||
|
||||
import sys, os, string, getopt
|
||||
|
||||
|
||||
content_processor = ContentProcessor()
|
||||
|
||||
|
||||
def beautify_block( block ):
|
||||
if block.content:
|
||||
content_processor.reset()
|
||||
|
||||
markups = content_processor.process_content( block.content )
|
||||
text = []
|
||||
first = 1
|
||||
|
||||
for markup in markups:
|
||||
text.extend( markup.beautify( first ) )
|
||||
first = 0
|
||||
|
||||
# now beautify the documentation "borders" themselves
|
||||
lines = [" /*************************************************************************"]
|
||||
for l in text:
|
||||
lines.append( " *" + l )
|
||||
lines.append( " */" )
|
||||
|
||||
block.lines = lines
|
||||
|
||||
|
||||
def usage():
|
||||
print( "\nDocBeauty 0.1 Usage information\n" )
|
||||
print( " docbeauty [options] file1 [file2 ...]\n" )
|
||||
print( "using the following options:\n" )
|
||||
print( " -h : print this page" )
|
||||
print( " -b : backup original files with the 'orig' extension" )
|
||||
print( "" )
|
||||
print( " --backup : same as -b" )
|
||||
|
||||
|
||||
def main( argv ):
|
||||
"""main program loop"""
|
||||
|
||||
global output_dir
|
||||
|
||||
try:
|
||||
opts, args = getopt.getopt( sys.argv[1:], \
|
||||
"hb", \
|
||||
["help", "backup"] )
|
||||
except getopt.GetoptError:
|
||||
usage()
|
||||
sys.exit( 2 )
|
||||
|
||||
if args == []:
|
||||
usage()
|
||||
sys.exit( 1 )
|
||||
|
||||
# process options
|
||||
#
|
||||
output_dir = None
|
||||
do_backup = None
|
||||
|
||||
for opt in opts:
|
||||
if opt[0] in ( "-h", "--help" ):
|
||||
usage()
|
||||
sys.exit( 0 )
|
||||
|
||||
if opt[0] in ( "-b", "--backup" ):
|
||||
do_backup = 1
|
||||
|
||||
# create context and processor
|
||||
source_processor = SourceProcessor()
|
||||
|
||||
# retrieve the list of files to process
|
||||
file_list = make_file_list( args )
|
||||
for filename in file_list:
|
||||
source_processor.parse_file( filename )
|
||||
|
||||
for block in source_processor.blocks:
|
||||
beautify_block( block )
|
||||
|
||||
new_name = filename + ".new"
|
||||
ok = None
|
||||
|
||||
try:
|
||||
file = open( new_name, "wt" )
|
||||
for block in source_processor.blocks:
|
||||
for line in block.lines:
|
||||
file.write( line )
|
||||
file.write( "\n" )
|
||||
file.close()
|
||||
except:
|
||||
ok = 0
|
||||
|
||||
|
||||
# if called from the command line
|
||||
#
|
||||
if __name__ == '__main__':
|
||||
main( sys.argv )
|
||||
|
||||
|
||||
# eof
|
|
@ -1,115 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
#
|
||||
# docmaker.py
|
||||
#
|
||||
# Convert source code markup to HTML documentation.
|
||||
#
|
||||
# Copyright 2002-2018 by
|
||||
# David Turner.
|
||||
#
|
||||
# This file is part of the FreeType project, and may only be used,
|
||||
# modified, and distributed under the terms of the FreeType project
|
||||
# license, LICENSE.TXT. By continuing to use, modify, or distribute
|
||||
# this file you indicate that you have read the license and
|
||||
# understand and accept it fully.
|
||||
|
||||
#
|
||||
# This program is a re-write of the original DocMaker tool used to generate
|
||||
# the API Reference of the FreeType font rendering engine by converting
|
||||
# in-source comments into structured HTML.
|
||||
#
|
||||
# This new version is capable of outputting XML data as well as accepting
|
||||
# more liberal formatting options. It also uses regular expression matching
|
||||
# and substitution to speed up operation significantly.
|
||||
#
|
||||
|
||||
from sources import *
|
||||
from content import *
|
||||
from utils import *
|
||||
from formatter import *
|
||||
from tohtml import *
|
||||
|
||||
import utils
|
||||
|
||||
import sys, glob, getopt
|
||||
|
||||
|
||||
def usage():
|
||||
print( "\nDocMaker Usage information\n" )
|
||||
print( " docmaker [options] file1 [file2 ...]\n" )
|
||||
print( "using the following options:\n" )
|
||||
print( " -h : print this page" )
|
||||
print( " -t : set project title, as in '-t \"My Project\"'" )
|
||||
print( " -o : set output directory, as in '-o mydir'" )
|
||||
print( " -p : set documentation prefix, as in '-p ft2'" )
|
||||
print( "" )
|
||||
print( " --title : same as -t, as in '--title=\"My Project\"'" )
|
||||
print( " --output : same as -o, as in '--output=mydir'" )
|
||||
print( " --prefix : same as -p, as in '--prefix=ft2'" )
|
||||
|
||||
|
||||
def main( argv ):
|
||||
"""Main program loop."""
|
||||
|
||||
global output_dir
|
||||
|
||||
try:
|
||||
opts, args = getopt.getopt( sys.argv[1:],
|
||||
"ht:o:p:",
|
||||
["help", "title=", "output=", "prefix="] )
|
||||
except getopt.GetoptError:
|
||||
usage()
|
||||
sys.exit( 2 )
|
||||
|
||||
if args == []:
|
||||
usage()
|
||||
sys.exit( 1 )
|
||||
|
||||
# process options
|
||||
project_title = "Project"
|
||||
project_prefix = None
|
||||
output_dir = None
|
||||
|
||||
for opt in opts:
|
||||
if opt[0] in ( "-h", "--help" ):
|
||||
usage()
|
||||
sys.exit( 0 )
|
||||
|
||||
if opt[0] in ( "-t", "--title" ):
|
||||
project_title = opt[1]
|
||||
|
||||
if opt[0] in ( "-o", "--output" ):
|
||||
utils.output_dir = opt[1]
|
||||
|
||||
if opt[0] in ( "-p", "--prefix" ):
|
||||
project_prefix = opt[1]
|
||||
|
||||
check_output()
|
||||
|
||||
# create context and processor
|
||||
source_processor = SourceProcessor()
|
||||
content_processor = ContentProcessor()
|
||||
|
||||
# retrieve the list of files to process
|
||||
file_list = make_file_list( args )
|
||||
for filename in file_list:
|
||||
source_processor.parse_file( filename )
|
||||
content_processor.parse_sources( source_processor )
|
||||
|
||||
# process sections
|
||||
content_processor.finish()
|
||||
|
||||
formatter = HtmlFormatter( content_processor,
|
||||
project_title,
|
||||
project_prefix )
|
||||
|
||||
formatter.toc_dump()
|
||||
formatter.index_dump()
|
||||
formatter.section_dump_all()
|
||||
|
||||
|
||||
# if called from the command line
|
||||
if __name__ == '__main__':
|
||||
main( sys.argv )
|
||||
|
||||
# eof
|
|
@ -1,228 +0,0 @@
|
|||
#
|
||||
# formatter.py
|
||||
#
|
||||
# Convert parsed content blocks to a structured document (library file).
|
||||
#
|
||||
# Copyright 2002-2018 by
|
||||
# David Turner.
|
||||
#
|
||||
# This file is part of the FreeType project, and may only be used,
|
||||
# modified, and distributed under the terms of the FreeType project
|
||||
# license, LICENSE.TXT. By continuing to use, modify, or distribute
|
||||
# this file you indicate that you have read the license and
|
||||
# understand and accept it fully.
|
||||
|
||||
#
|
||||
# This is the base Formatter class. Its purpose is to convert a content
|
||||
# processor's data into specific documents (i.e., table of contents, global
|
||||
# index, and individual API reference indices).
|
||||
#
|
||||
# You need to sub-class it to output anything sensible. For example, the
|
||||
# file `tohtml.py' contains the definition of the `HtmlFormatter' sub-class
|
||||
# to output HTML.
|
||||
#
|
||||
|
||||
|
||||
from sources import *
|
||||
from content import *
|
||||
from utils import *
|
||||
|
||||
|
||||
################################################################
|
||||
##
|
||||
## FORMATTER CLASS
|
||||
##
|
||||
class Formatter:
|
||||
|
||||
def __init__( self, processor ):
|
||||
self.processor = processor
|
||||
self.identifiers = {}
|
||||
self.chapters = processor.chapters
|
||||
self.sections = processor.sections.values()
|
||||
self.block_index = []
|
||||
|
||||
# store all blocks in a dictionary
|
||||
self.blocks = []
|
||||
for section in self.sections:
|
||||
for block in section.blocks.values():
|
||||
self.add_identifier( block.name, block )
|
||||
|
||||
# add enumeration values to the index, since this is useful
|
||||
for markup in block.markups:
|
||||
if markup.tag == 'values':
|
||||
for field in markup.fields:
|
||||
self.add_identifier( field.name, block )
|
||||
|
||||
self.block_index = self.identifiers.keys()
|
||||
self.block_index.sort( key = index_key )
|
||||
|
||||
# also add section names to dictionary (without making them appear
|
||||
# in the index)
|
||||
for section in self.sections:
|
||||
self.add_identifier( section.name, section )
|
||||
|
||||
def add_identifier( self, name, block ):
|
||||
if name in self.identifiers:
|
||||
# duplicate name!
|
||||
sys.stderr.write( "WARNING: duplicate definition for"
|
||||
+ " '" + name + "' "
|
||||
+ "in " + block.location() + ", "
|
||||
+ "previous definition in "
|
||||
+ self.identifiers[name].location()
|
||||
+ "\n" )
|
||||
else:
|
||||
self.identifiers[name] = block
|
||||
|
||||
#
|
||||
# formatting the table of contents
|
||||
#
|
||||
def toc_enter( self ):
|
||||
pass
|
||||
|
||||
def toc_chapter_enter( self, chapter ):
|
||||
pass
|
||||
|
||||
def toc_section_enter( self, section ):
|
||||
pass
|
||||
|
||||
def toc_section_exit( self, section ):
|
||||
pass
|
||||
|
||||
def toc_chapter_exit( self, chapter ):
|
||||
pass
|
||||
|
||||
def toc_index( self, index_filename ):
|
||||
pass
|
||||
|
||||
def toc_exit( self ):
|
||||
pass
|
||||
|
||||
def toc_dump( self, toc_filename = None, index_filename = None ):
|
||||
output = None
|
||||
if toc_filename:
|
||||
output = open_output( toc_filename )
|
||||
|
||||
self.toc_enter()
|
||||
|
||||
for chap in self.processor.chapters:
|
||||
|
||||
self.toc_chapter_enter( chap )
|
||||
|
||||
for section in chap.sections:
|
||||
self.toc_section_enter( section )
|
||||
self.toc_section_exit( section )
|
||||
|
||||
self.toc_chapter_exit( chap )
|
||||
|
||||
self.toc_index( index_filename )
|
||||
|
||||
self.toc_exit()
|
||||
|
||||
if output:
|
||||
close_output( output )
|
||||
|
||||
#
|
||||
# formatting the index
|
||||
#
|
||||
def index_enter( self ):
|
||||
pass
|
||||
|
||||
def index_name_enter( self, name ):
|
||||
pass
|
||||
|
||||
def index_name_exit( self, name ):
|
||||
pass
|
||||
|
||||
def index_exit( self ):
|
||||
pass
|
||||
|
||||
def index_dump( self, index_filename = None ):
|
||||
output = None
|
||||
if index_filename:
|
||||
output = open_output( index_filename )
|
||||
|
||||
self.index_enter()
|
||||
|
||||
for name in self.block_index:
|
||||
self.index_name_enter( name )
|
||||
self.index_name_exit( name )
|
||||
|
||||
self.index_exit()
|
||||
|
||||
if output:
|
||||
close_output( output )
|
||||
|
||||
#
|
||||
# formatting a section
|
||||
#
|
||||
def section_enter( self, section ):
|
||||
pass
|
||||
|
||||
def block_enter( self, block ):
|
||||
pass
|
||||
|
||||
def markup_enter( self, markup, block = None ):
|
||||
pass
|
||||
|
||||
def field_enter( self, field, markup = None, block = None ):
|
||||
pass
|
||||
|
||||
def field_exit( self, field, markup = None, block = None ):
|
||||
pass
|
||||
|
||||
def markup_exit( self, markup, block = None ):
|
||||
pass
|
||||
|
||||
def block_exit( self, block ):
|
||||
pass
|
||||
|
||||
def section_exit( self, section ):
|
||||
pass
|
||||
|
||||
def section_dump( self, section, section_filename = None ):
|
||||
output = None
|
||||
if section_filename:
|
||||
output = open_output( section_filename )
|
||||
|
||||
self.section_enter( section )
|
||||
|
||||
for name in section.block_names:
|
||||
skip_entry = 0
|
||||
try:
|
||||
block = self.identifiers[name]
|
||||
# `block_names' can contain field names also,
|
||||
# which we filter out
|
||||
for markup in block.markups:
|
||||
if markup.tag == 'values':
|
||||
for field in markup.fields:
|
||||
if field.name == name:
|
||||
skip_entry = 1
|
||||
except:
|
||||
skip_entry = 1 # this happens e.g. for `/empty/' entries
|
||||
|
||||
if skip_entry:
|
||||
continue
|
||||
|
||||
self.block_enter( block )
|
||||
|
||||
for markup in block.markups[1:]: # always ignore first markup!
|
||||
self.markup_enter( markup, block )
|
||||
|
||||
for field in markup.fields:
|
||||
self.field_enter( field, markup, block )
|
||||
self.field_exit( field, markup, block )
|
||||
|
||||
self.markup_exit( markup, block )
|
||||
|
||||
self.block_exit( block )
|
||||
|
||||
self.section_exit( section )
|
||||
|
||||
if output:
|
||||
close_output( output )
|
||||
|
||||
def section_dump_all( self ):
|
||||
for section in self.sections:
|
||||
self.section_dump( section )
|
||||
|
||||
# eof
|
|
@ -1,410 +0,0 @@
|
|||
#
|
||||
# sources.py
|
||||
#
|
||||
# Convert source code comments to multi-line blocks (library file).
|
||||
#
|
||||
# Copyright 2002-2018 by
|
||||
# David Turner.
|
||||
#
|
||||
# This file is part of the FreeType project, and may only be used,
|
||||
# modified, and distributed under the terms of the FreeType project
|
||||
# license, LICENSE.TXT. By continuing to use, modify, or distribute
|
||||
# this file you indicate that you have read the license and
|
||||
# understand and accept it fully.
|
||||
|
||||
#
|
||||
# This library file contains definitions of classes needed to decompose C
|
||||
# source code files into a series of multi-line `blocks'. There are two
|
||||
# kinds of blocks.
|
||||
#
|
||||
# - Normal blocks, which contain source code or ordinary comments.
|
||||
#
|
||||
# - Documentation blocks, which have restricted formatting, and whose text
|
||||
# always start with a documentation markup tag like `<Function>',
|
||||
# `<Type>', etc.
|
||||
#
|
||||
# The routines to process the content of documentation blocks are contained
|
||||
# in file `content.py'; the classes and methods found here only deal with
|
||||
# text parsing and basic documentation block extraction.
|
||||
#
|
||||
|
||||
|
||||
import fileinput, re, string
|
||||
|
||||
|
||||
################################################################
|
||||
##
|
||||
## SOURCE BLOCK FORMAT CLASS
|
||||
##
|
||||
## A simple class containing compiled regular expressions to detect
|
||||
## potential documentation format block comments within C source code.
|
||||
##
|
||||
## The `column' pattern must contain a group to `unbox' the content of
|
||||
## documentation comment blocks.
|
||||
##
|
||||
## Later on, paragraphs are converted to long lines, which simplifies the
|
||||
## regular expressions that act upon the text.
|
||||
##
|
||||
class SourceBlockFormat:
|
||||
|
||||
def __init__( self, id, start, column, end ):
|
||||
"""Create a block pattern, used to recognize special documentation
|
||||
blocks."""
|
||||
self.id = id
|
||||
self.start = re.compile( start, re.VERBOSE )
|
||||
self.column = re.compile( column, re.VERBOSE )
|
||||
self.end = re.compile( end, re.VERBOSE )
|
||||
|
||||
|
||||
#
|
||||
# Format 1 documentation comment blocks.
|
||||
#
|
||||
# /************************************/ (at least 2 asterisks)
|
||||
# /* */
|
||||
# /* */
|
||||
# /* */
|
||||
# /************************************/ (at least 2 asterisks)
|
||||
#
|
||||
start = r'''
|
||||
\s* # any number of whitespace
|
||||
/\*{2,}/ # followed by '/' and at least two asterisks then '/'
|
||||
\s*$ # probably followed by whitespace
|
||||
'''
|
||||
|
||||
column = r'''
|
||||
\s* # any number of whitespace
|
||||
/\*{1} # followed by '/' and precisely one asterisk
|
||||
([^*].*) # followed by anything (group 1)
|
||||
\*{1}/ # followed by one asterisk and a '/'
|
||||
\s*$ # probably followed by whitespace
|
||||
'''
|
||||
|
||||
re_source_block_format1 = SourceBlockFormat( 1, start, column, start )
|
||||
|
||||
|
||||
#
|
||||
# Format 2 documentation comment blocks.
|
||||
#
|
||||
# /************************************ (at least 2 asterisks)
|
||||
# *
|
||||
# * (1 asterisk)
|
||||
# *
|
||||
# */ (1 or more asterisks)
|
||||
#
|
||||
start = r'''
|
||||
\s* # any number of whitespace
|
||||
/\*{2,} # followed by '/' and at least two asterisks
|
||||
\s*$ # probably followed by whitespace
|
||||
'''
|
||||
|
||||
column = r'''
|
||||
\s* # any number of whitespace
|
||||
\*{1}(?![*/]) # followed by precisely one asterisk not followed by `/'
|
||||
(.*) # then anything (group1)
|
||||
'''
|
||||
|
||||
end = r'''
|
||||
\s* # any number of whitespace
|
||||
\*+/ # followed by at least one asterisk, then '/'
|
||||
'''
|
||||
|
||||
re_source_block_format2 = SourceBlockFormat( 2, start, column, end )
|
||||
|
||||
|
||||
#
|
||||
# The list of supported documentation block formats. We could add new ones
|
||||
# quite easily.
|
||||
#
|
||||
re_source_block_formats = [re_source_block_format1, re_source_block_format2]
|
||||
|
||||
|
||||
#
|
||||
# The following regular expressions correspond to markup tags within the
|
||||
# documentation comment blocks. They are equivalent despite their different
|
||||
# syntax.
|
||||
#
|
||||
# A markup tag consists of letters or character `-', to be found in group 1.
|
||||
#
|
||||
# Notice that a markup tag _must_ begin a new paragraph.
|
||||
#
|
||||
re_markup_tag1 = re.compile( r'''\s*<((?:\w|-)*)>''' ) # <xxxx> format
|
||||
re_markup_tag2 = re.compile( r'''\s*@((?:\w|-)*):''' ) # @xxxx: format
|
||||
|
||||
#
|
||||
# The list of supported markup tags. We could add new ones quite easily.
|
||||
#
|
||||
re_markup_tags = [re_markup_tag1, re_markup_tag2]
|
||||
|
||||
|
||||
#
|
||||
# A regular expression to detect a cross reference, after markup tags have
|
||||
# been stripped off.
|
||||
#
|
||||
# Two syntax forms are supported:
|
||||
#
|
||||
# @<name>
|
||||
# @<name>[<id>]
|
||||
#
|
||||
# where both `<name>' and `<id>' consist of alphanumeric characters, `_',
|
||||
# and `-'. Use `<id>' if there are multiple, valid `<name>' entries.
|
||||
#
|
||||
# Example: @foo[bar]
|
||||
#
|
||||
re_crossref = re.compile( r"""
|
||||
@
|
||||
(?P<name>(?:\w|-)+
|
||||
(?:\[(?:\w|-)+\])?)
|
||||
(?P<rest>.*)
|
||||
""", re.VERBOSE )
|
||||
|
||||
#
|
||||
# Two regular expressions to detect italic and bold markup, respectively.
|
||||
# Group 1 is the markup, group 2 the rest of the line.
|
||||
#
|
||||
# Note that the markup is limited to words consisting of letters, digits,
|
||||
# the characters `_' and `-', or an apostrophe (but not as the first
|
||||
# character).
|
||||
#
|
||||
re_italic = re.compile( r"_((?:\w|-)(?:\w|'|-)*)_(.*)" ) # _italic_
|
||||
re_bold = re.compile( r"\*((?:\w|-)(?:\w|'|-)*)\*(.*)" ) # *bold*
|
||||
|
||||
#
|
||||
# This regular expression code to identify an URL has been taken from
|
||||
#
|
||||
# https://mail.python.org/pipermail/tutor/2002-September/017228.html
|
||||
#
|
||||
# (with slight modifications).
|
||||
#
|
||||
urls = r'(?:https?|telnet|gopher|file|wais|ftp)'
|
||||
ltrs = r'\w'
|
||||
gunk = r'/#~:.?+=&%@!\-'
|
||||
punc = r'.:?\-'
|
||||
any = "%(ltrs)s%(gunk)s%(punc)s" % { 'ltrs' : ltrs,
|
||||
'gunk' : gunk,
|
||||
'punc' : punc }
|
||||
url = r"""
|
||||
(
|
||||
\b # start at word boundary
|
||||
%(urls)s : # need resource and a colon
|
||||
[%(any)s] +? # followed by one or more of any valid
|
||||
# character, but be conservative and
|
||||
# take only what you need to...
|
||||
(?= # [look-ahead non-consumptive assertion]
|
||||
[%(punc)s]* # either 0 or more punctuation
|
||||
(?: # [non-grouping parentheses]
|
||||
[^%(any)s] | $ # followed by a non-url char
|
||||
# or end of the string
|
||||
)
|
||||
)
|
||||
)
|
||||
""" % {'urls' : urls,
|
||||
'any' : any,
|
||||
'punc' : punc }
|
||||
|
||||
re_url = re.compile( url, re.VERBOSE | re.MULTILINE )
|
||||
|
||||
#
|
||||
# A regular expression that stops collection of comments for the current
|
||||
# block.
|
||||
#
|
||||
re_source_sep = re.compile( r'\s*/\*\s*\*/' ) # /* */
|
||||
|
||||
#
|
||||
# A regular expression to find possible C identifiers while outputting
|
||||
# source code verbatim, covering things like `*foo' or `(bar'. Group 1 is
|
||||
# the prefix, group 2 the identifier -- since we scan lines from left to
|
||||
# right, sequentially splitting the source code into prefix and identifier
|
||||
# is fully sufficient for our purposes.
|
||||
#
|
||||
re_source_crossref = re.compile( r'(\W*)(\w*)' )
|
||||
|
||||
#
|
||||
# A regular expression that matches a list of reserved C source keywords.
|
||||
#
|
||||
re_source_keywords = re.compile( '''\\b ( typedef |
|
||||
struct |
|
||||
enum |
|
||||
union |
|
||||
const |
|
||||
char |
|
||||
int |
|
||||
short |
|
||||
long |
|
||||
void |
|
||||
signed |
|
||||
unsigned |
|
||||
\#include |
|
||||
\#define |
|
||||
\#undef |
|
||||
\#if |
|
||||
\#ifdef |
|
||||
\#ifndef |
|
||||
\#else |
|
||||
\#endif ) \\b''', re.VERBOSE )
|
||||
|
||||
|
||||
################################################################
|
||||
##
|
||||
## SOURCE BLOCK CLASS
|
||||
##
|
||||
## There are two important fields in a `SourceBlock' object.
|
||||
##
|
||||
## self.lines
|
||||
## A list of text lines for the corresponding block.
|
||||
##
|
||||
## self.content
|
||||
## For documentation comment blocks only, this is the block content
|
||||
## that has been `unboxed' from its decoration. This is `None' for all
|
||||
## other blocks (i.e., sources or ordinary comments with no starting
|
||||
## markup tag)
|
||||
##
|
||||
class SourceBlock:
|
||||
|
||||
def __init__( self, processor, filename, lineno, lines ):
|
||||
self.processor = processor
|
||||
self.filename = filename
|
||||
self.lineno = lineno
|
||||
self.lines = lines[:]
|
||||
self.format = processor.format
|
||||
self.content = []
|
||||
|
||||
if self.format == None:
|
||||
return
|
||||
|
||||
words = []
|
||||
|
||||
# extract comment lines
|
||||
lines = []
|
||||
|
||||
for line0 in self.lines:
|
||||
m = self.format.column.match( line0 )
|
||||
if m:
|
||||
lines.append( m.group( 1 ) )
|
||||
|
||||
# now, look for a markup tag
|
||||
for l in lines:
|
||||
l = string.strip( l )
|
||||
if len( l ) > 0:
|
||||
for tag in re_markup_tags:
|
||||
if tag.match( l ):
|
||||
self.content = lines
|
||||
return
|
||||
|
||||
def location( self ):
|
||||
return "(" + self.filename + ":" + repr( self.lineno ) + ")"
|
||||
|
||||
# debugging only -- not used in normal operations
|
||||
def dump( self ):
|
||||
if self.content:
|
||||
print( "{{{content start---" )
|
||||
for l in self.content:
|
||||
print( l )
|
||||
print( "---content end}}}" )
|
||||
return
|
||||
|
||||
fmt = ""
|
||||
if self.format:
|
||||
fmt = repr( self.format.id ) + " "
|
||||
|
||||
for line in self.lines:
|
||||
print( line )
|
||||
|
||||
|
||||
################################################################
|
||||
##
|
||||
## SOURCE PROCESSOR CLASS
|
||||
##
|
||||
## The `SourceProcessor' is in charge of reading a C source file and
|
||||
## decomposing it into a series of different `SourceBlock' objects.
|
||||
##
|
||||
## A SourceBlock object consists of the following data.
|
||||
##
|
||||
## - A documentation comment block using one of the layouts above. Its
|
||||
## exact format will be discussed later.
|
||||
##
|
||||
## - Normal sources lines, including comments.
|
||||
##
|
||||
##
|
||||
class SourceProcessor:
|
||||
|
||||
def __init__( self ):
|
||||
"""Initialize a source processor."""
|
||||
self.blocks = []
|
||||
self.filename = None
|
||||
self.format = None
|
||||
self.lines = []
|
||||
|
||||
def reset( self ):
|
||||
"""Reset a block processor and clean up all its blocks."""
|
||||
self.blocks = []
|
||||
self.format = None
|
||||
|
||||
def parse_file( self, filename ):
|
||||
"""Parse a C source file and add its blocks to the processor's
|
||||
list."""
|
||||
self.reset()
|
||||
|
||||
self.filename = filename
|
||||
|
||||
fileinput.close()
|
||||
self.format = None
|
||||
self.lineno = 0
|
||||
self.lines = []
|
||||
|
||||
for line in fileinput.input( filename ):
|
||||
# strip trailing newlines, important on Windows machines!
|
||||
if line[-1] == '\012':
|
||||
line = line[0:-1]
|
||||
|
||||
if self.format == None:
|
||||
self.process_normal_line( line )
|
||||
else:
|
||||
if self.format.end.match( line ):
|
||||
# A normal block end. Add it to `lines' and create a
|
||||
# new block
|
||||
self.lines.append( line )
|
||||
self.add_block_lines()
|
||||
elif self.format.column.match( line ):
|
||||
# A normal column line. Add it to `lines'.
|
||||
self.lines.append( line )
|
||||
else:
|
||||
# An unexpected block end. Create a new block, but
|
||||
# don't process the line.
|
||||
self.add_block_lines()
|
||||
|
||||
# we need to process the line again
|
||||
self.process_normal_line( line )
|
||||
|
||||
# record the last lines
|
||||
self.add_block_lines()
|
||||
|
||||
def process_normal_line( self, line ):
|
||||
"""Process a normal line and check whether it is the start of a new
|
||||
block."""
|
||||
for f in re_source_block_formats:
|
||||
if f.start.match( line ):
|
||||
self.add_block_lines()
|
||||
self.format = f
|
||||
self.lineno = fileinput.filelineno()
|
||||
|
||||
self.lines.append( line )
|
||||
|
||||
def add_block_lines( self ):
|
||||
"""Add the current accumulated lines and create a new block."""
|
||||
if self.lines != []:
|
||||
block = SourceBlock( self,
|
||||
self.filename,
|
||||
self.lineno,
|
||||
self.lines )
|
||||
|
||||
self.blocks.append( block )
|
||||
self.format = None
|
||||
self.lines = []
|
||||
|
||||
# debugging only, not used in normal operations
|
||||
def dump( self ):
|
||||
"""Print all blocks in a processor."""
|
||||
for b in self.blocks:
|
||||
b.dump()
|
||||
|
||||
# eof
|
|
@ -1,726 +0,0 @@
|
|||
#
|
||||
# tohtml.py
|
||||
#
|
||||
# A sub-class container of the `Formatter' class to produce HTML.
|
||||
#
|
||||
# Copyright 2002-2018 by
|
||||
# David Turner.
|
||||
#
|
||||
# This file is part of the FreeType project, and may only be used,
|
||||
# modified, and distributed under the terms of the FreeType project
|
||||
# license, LICENSE.TXT. By continuing to use, modify, or distribute
|
||||
# this file you indicate that you have read the license and
|
||||
# understand and accept it fully.
|
||||
|
||||
# The parent class is contained in file `formatter.py'.
|
||||
|
||||
|
||||
from sources import *
|
||||
from content import *
|
||||
from formatter import *
|
||||
|
||||
import time
|
||||
|
||||
|
||||
# The following strings define the HTML header used by all generated pages.
|
||||
html_header_1 = """\
|
||||
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
|
||||
"https://www.w3.org/TR/html4/loose.dtd">
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
|
||||
<title>\
|
||||
"""
|
||||
|
||||
html_header_2 = """\
|
||||
API Reference</title>
|
||||
<style type="text/css">
|
||||
a:link { color: #0000EF; }
|
||||
a:visited { color: #51188E; }
|
||||
a:hover { color: #FF0000; }
|
||||
|
||||
body { font-family: Verdana, Geneva, Arial, Helvetica, serif;
|
||||
color: #000000;
|
||||
background: #FFFFFF;
|
||||
width: 87%;
|
||||
margin: auto; }
|
||||
|
||||
div.section { width: 75%;
|
||||
margin: auto; }
|
||||
div.section hr { margin: 4ex 0 1ex 0; }
|
||||
div.section h4 { background-color: #EEEEFF;
|
||||
font-size: medium;
|
||||
font-style: oblique;
|
||||
font-weight: bold;
|
||||
margin: 3ex 0 1.5ex 9%;
|
||||
padding: 0.3ex 0 0.3ex 1%; }
|
||||
div.section p { margin: 1.5ex 0 1.5ex 10%; }
|
||||
div.section pre { margin: 3ex 0 3ex 9%;
|
||||
background-color: #D6E8FF;
|
||||
padding: 2ex 0 2ex 1%; }
|
||||
div.section table.fields { width: 90%;
|
||||
margin: 1.5ex 0 1.5ex 10%; }
|
||||
div.section table.toc { width: 95%;
|
||||
margin: 1.5ex 0 1.5ex 5%; }
|
||||
div.timestamp { text-align: center;
|
||||
font-size: 69%;
|
||||
margin: 1.5ex 0 1.5ex 0; }
|
||||
|
||||
h1 { text-align: center; }
|
||||
h3 { font-size: medium;
|
||||
margin: 4ex 0 1.5ex 0; }
|
||||
|
||||
p { text-align: justify; }
|
||||
|
||||
pre.colored { color: blue; }
|
||||
|
||||
span.keyword { font-family: monospace;
|
||||
text-align: left;
|
||||
white-space: pre;
|
||||
color: darkblue; }
|
||||
|
||||
table.fields td.val { font-weight: bold;
|
||||
text-align: right;
|
||||
width: 30%;
|
||||
vertical-align: baseline;
|
||||
padding: 1ex 1em 1ex 0; }
|
||||
table.fields td.desc { vertical-align: baseline;
|
||||
padding: 1ex 0 1ex 1em; }
|
||||
table.fields td.desc p:first-child { margin: 0; }
|
||||
table.fields td.desc p { margin: 1.5ex 0 0 0; }
|
||||
table.index { margin: 6ex auto 6ex auto;
|
||||
border: 0;
|
||||
border-collapse: separate;
|
||||
border-spacing: 1em 0.3ex; }
|
||||
table.index tr { padding: 0; }
|
||||
table.index td { padding: 0; }
|
||||
table.index-toc-link { width: 100%;
|
||||
border: 0;
|
||||
border-spacing: 0;
|
||||
margin: 1ex 0 1ex 0; }
|
||||
table.index-toc-link td.left { padding: 0 0.5em 0 0.5em;
|
||||
font-size: 83%;
|
||||
text-align: left; }
|
||||
table.index-toc-link td.middle { padding: 0 0.5em 0 0.5em;
|
||||
font-size: 83%;
|
||||
text-align: center; }
|
||||
table.index-toc-link td.right { padding: 0 0.5em 0 0.5em;
|
||||
font-size: 83%;
|
||||
text-align: right; }
|
||||
table.synopsis { margin: 6ex auto 6ex auto;
|
||||
border: 0;
|
||||
border-collapse: separate;
|
||||
border-spacing: 2em 0.6ex; }
|
||||
table.synopsis tr { padding: 0; }
|
||||
table.synopsis td { padding: 0; }
|
||||
table.toc td.link { width: 30%;
|
||||
text-align: right;
|
||||
vertical-align: baseline;
|
||||
padding: 1ex 1em 1ex 0; }
|
||||
table.toc td.desc { vertical-align: baseline;
|
||||
padding: 1ex 0 1ex 1em;
|
||||
text-align: left; }
|
||||
table.toc td.desc p:first-child { margin: 0;
|
||||
text-align: left; }
|
||||
table.toc td.desc p { margin: 1.5ex 0 0 0;
|
||||
text-align: left; }
|
||||
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
"""
|
||||
|
||||
html_header_3l = """
|
||||
<table class="index-toc-link"><tr><td class="left">[<a href="\
|
||||
"""
|
||||
|
||||
html_header_3r = """
|
||||
<table class="index-toc-link"><tr><td class="right">[<a href="\
|
||||
"""
|
||||
|
||||
html_header_4 = """\
|
||||
">Index</a>]</td><td class="right">[<a href="\
|
||||
"""
|
||||
|
||||
html_header_5t = """\
|
||||
">TOC</a>]</td></tr></table>
|
||||
<h1>\
|
||||
"""
|
||||
|
||||
html_header_5i = """\
|
||||
">Index</a>]</td></tr></table>
|
||||
<h1>\
|
||||
"""
|
||||
|
||||
html_header_6 = """\
|
||||
API Reference</h1>
|
||||
"""
|
||||
|
||||
|
||||
# The HTML footer used by all generated pages.
|
||||
html_footer = """\
|
||||
</body>
|
||||
</html>\
|
||||
"""
|
||||
|
||||
# The header and footer used for each section.
|
||||
section_title_header1 = '<h1 id="'
|
||||
section_title_header2 = '">'
|
||||
section_title_footer = "</h1>"
|
||||
|
||||
# The header and footer used for code segments.
|
||||
code_header = '<pre class="colored">'
|
||||
code_footer = '</pre>'
|
||||
|
||||
# Paragraph header and footer.
|
||||
para_header = "<p>"
|
||||
para_footer = "</p>"
|
||||
|
||||
# Block header and footer.
|
||||
block_header = '<div class="section">'
|
||||
block_footer_start = """\
|
||||
<hr>
|
||||
<table class="index-toc-link"><tr><td class="left">[<a href="\
|
||||
"""
|
||||
block_footer_middle = """\
|
||||
">Index</a>]</td>\
|
||||
<td class="middle">[<a href="#">Top</a>]</td>\
|
||||
<td class="right">[<a href="\
|
||||
"""
|
||||
block_footer_end = """\
|
||||
">TOC</a>]</td></tr></table></div>
|
||||
"""
|
||||
|
||||
# Description header/footer.
|
||||
description_header = ""
|
||||
description_footer = ""
|
||||
|
||||
# Marker header/inter/footer combination.
|
||||
marker_header = "<h4>"
|
||||
marker_inter = "</h4>"
|
||||
marker_footer = ""
|
||||
|
||||
# Header location header/footer.
|
||||
header_location_header = "<p>"
|
||||
header_location_footer = "</p>"
|
||||
|
||||
# Source code extracts header/footer.
|
||||
source_header = "<pre>"
|
||||
source_footer = "</pre>"
|
||||
|
||||
# Chapter header/inter/footer.
|
||||
chapter_header = """\
|
||||
<div class="section">
|
||||
<h2>\
|
||||
"""
|
||||
chapter_inter = '</h2>'
|
||||
chapter_footer = '</div>'
|
||||
|
||||
# Index footer.
|
||||
index_footer_start = """\
|
||||
<hr>
|
||||
<table class="index-toc-link"><tr><td class="right">[<a href="\
|
||||
"""
|
||||
index_footer_end = """\
|
||||
">TOC</a>]</td></tr></table>
|
||||
"""
|
||||
|
||||
# TOC footer.
|
||||
toc_footer_start = """\
|
||||
<hr>
|
||||
<table class="index-toc-link"><tr><td class="left">[<a href="\
|
||||
"""
|
||||
toc_footer_end = """\
|
||||
">Index</a>]</td></tr></table>
|
||||
"""
|
||||
|
||||
|
||||
# Source language keyword coloration and styling.
|
||||
keyword_prefix = '<span class="keyword">'
|
||||
keyword_suffix = '</span>'
|
||||
|
||||
section_synopsis_header = '<h2>Synopsis</h2>'
|
||||
section_synopsis_footer = ''
|
||||
|
||||
|
||||
# Translate a single line of source to HTML. This converts `<', `>', and
|
||||
# `&' into `<',`>', and `&'.
|
||||
#
|
||||
def html_quote( line ):
|
||||
result = string.replace( line, "&", "&" )
|
||||
result = string.replace( result, "<", "<" )
|
||||
result = string.replace( result, ">", ">" )
|
||||
return result
|
||||
|
||||
|
||||
################################################################
|
||||
##
|
||||
## HTML FORMATTER CLASS
|
||||
##
|
||||
class HtmlFormatter( Formatter ):
|
||||
|
||||
def __init__( self, processor, project_title, file_prefix ):
|
||||
Formatter.__init__( self, processor )
|
||||
|
||||
global html_header_1
|
||||
global html_header_2
|
||||
global html_header_3l, html_header_3r
|
||||
global html_header_4
|
||||
global html_header_5t, html_header_5i
|
||||
global html_header_6
|
||||
global html_footer
|
||||
|
||||
if file_prefix:
|
||||
file_prefix = file_prefix + "-"
|
||||
else:
|
||||
file_prefix = ""
|
||||
|
||||
self.headers = processor.headers
|
||||
self.project_title = project_title
|
||||
self.file_prefix = file_prefix
|
||||
self.html_header = (
|
||||
html_header_1 + project_title
|
||||
+ html_header_2
|
||||
+ html_header_3l + file_prefix + "index.html"
|
||||
+ html_header_4 + file_prefix + "toc.html"
|
||||
+ html_header_5t + project_title
|
||||
+ html_header_6 )
|
||||
self.html_index_header = (
|
||||
html_header_1 + project_title
|
||||
+ html_header_2
|
||||
+ html_header_3r + file_prefix + "toc.html"
|
||||
+ html_header_5t + project_title
|
||||
+ html_header_6 )
|
||||
self.html_toc_header = (
|
||||
html_header_1 + project_title
|
||||
+ html_header_2
|
||||
+ html_header_3l + file_prefix + "index.html"
|
||||
+ html_header_5i + project_title
|
||||
+ html_header_6 )
|
||||
self.html_footer = (
|
||||
'<div class="timestamp">generated on '
|
||||
+ time.asctime( time.localtime( time.time() ) )
|
||||
+ "</div>" + html_footer )
|
||||
|
||||
self.columns = 3
|
||||
|
||||
def make_section_url( self, section ):
|
||||
return self.file_prefix + section.name + ".html"
|
||||
|
||||
def make_block_url( self, block, name = None ):
|
||||
if name == None:
|
||||
name = block.name
|
||||
|
||||
try:
|
||||
section_url = self.make_section_url( block.section )
|
||||
except:
|
||||
# we already have a section
|
||||
section_url = self.make_section_url( block )
|
||||
|
||||
return section_url + "#" + name
|
||||
|
||||
def make_html_word( self, word ):
|
||||
"""Analyze a simple word to detect cross-references and markup."""
|
||||
# handle cross-references
|
||||
m = re_crossref.match( word )
|
||||
if m:
|
||||
try:
|
||||
name = m.group( 'name' )
|
||||
rest = m.group( 'rest' )
|
||||
block = self.identifiers[name]
|
||||
url = self.make_block_url( block )
|
||||
# display `foo[bar]' as `foo'
|
||||
name = re.sub( r'\[.*\]', '', name )
|
||||
# normalize url, following RFC 3986
|
||||
url = string.replace( url, "[", "(" )
|
||||
url = string.replace( url, "]", ")" )
|
||||
|
||||
try:
|
||||
# for sections, display title
|
||||
url = ( '‘<a href="' + url + '">'
|
||||
+ block.title + '</a>’'
|
||||
+ rest )
|
||||
except:
|
||||
url = ( '<a href="' + url + '">'
|
||||
+ name + '</a>'
|
||||
+ rest )
|
||||
|
||||
return url
|
||||
except:
|
||||
# we detected a cross-reference to an unknown item
|
||||
sys.stderr.write( "WARNING: undefined cross reference"
|
||||
+ " '" + name + "'.\n" )
|
||||
return '?' + name + '?' + rest
|
||||
|
||||
# handle markup for italic and bold
|
||||
m = re_italic.match( word )
|
||||
if m:
|
||||
name = m.group( 1 )
|
||||
rest = m.group( 2 )
|
||||
return '<i>' + name + '</i>' + rest
|
||||
|
||||
m = re_bold.match( word )
|
||||
if m:
|
||||
name = m.group( 1 )
|
||||
rest = m.group( 2 )
|
||||
return '<b>' + name + '</b>' + rest
|
||||
|
||||
return html_quote( word )
|
||||
|
||||
def make_html_para( self, words ):
|
||||
"""Convert words of a paragraph into tagged HTML text. Also handle
|
||||
cross references."""
|
||||
line = ""
|
||||
if words:
|
||||
line = self.make_html_word( words[0] )
|
||||
for word in words[1:]:
|
||||
line = line + " " + self.make_html_word( word )
|
||||
# handle hyperlinks
|
||||
line = re_url.sub( r'<a href="\1">\1</a>', line )
|
||||
# convert `...' quotations into real left and right single quotes
|
||||
line = re.sub( r"(^|\W)`(.*?)'(\W|$)",
|
||||
r'\1‘\2’\3',
|
||||
line )
|
||||
# convert tilde into non-breakable space
|
||||
line = string.replace( line, "~", " " )
|
||||
|
||||
return para_header + line + para_footer
|
||||
|
||||
def make_html_code( self, lines ):
|
||||
"""Convert a code sequence to HTML."""
|
||||
line = code_header + '\n'
|
||||
for l in lines:
|
||||
line = line + html_quote( l ).rstrip() + '\n'
|
||||
|
||||
return line + code_footer
|
||||
|
||||
def make_html_items( self, items ):
|
||||
"""Convert a field's content into HTML."""
|
||||
lines = []
|
||||
for item in items:
|
||||
if item.lines:
|
||||
lines.append( self.make_html_code( item.lines ) )
|
||||
else:
|
||||
lines.append( self.make_html_para( item.words ) )
|
||||
|
||||
return string.join( lines, '\n' )
|
||||
|
||||
def print_html_items( self, items ):
|
||||
print( self.make_html_items( items ) )
|
||||
|
||||
def print_html_field( self, field ):
|
||||
if field.name:
|
||||
print( '<table><tr valign="top"><td><b>'
|
||||
+ field.name
|
||||
+ "</b></td><td>" )
|
||||
|
||||
print( self.make_html_items( field.items ) )
|
||||
|
||||
if field.name:
|
||||
print( "</td></tr></table>" )
|
||||
|
||||
def html_source_quote( self, line, block_name = None ):
|
||||
result = ""
|
||||
while line:
|
||||
m = re_source_crossref.match( line )
|
||||
if m:
|
||||
name = m.group( 2 )
|
||||
prefix = html_quote( m.group( 1 ) )
|
||||
length = len( m.group( 0 ) )
|
||||
|
||||
if name == block_name:
|
||||
# this is the current block name, if any
|
||||
result = result + prefix + '<b>' + name + '</b>'
|
||||
elif re_source_keywords.match( name ):
|
||||
# this is a C keyword
|
||||
result = ( result + prefix
|
||||
+ keyword_prefix + name + keyword_suffix )
|
||||
elif name in self.identifiers:
|
||||
# this is a known identifier
|
||||
block = self.identifiers[name]
|
||||
id = block.name
|
||||
|
||||
# link to a field ID if possible
|
||||
try:
|
||||
for markup in block.markups:
|
||||
if markup.tag == 'values':
|
||||
for field in markup.fields:
|
||||
if field.name:
|
||||
id = name
|
||||
|
||||
result = ( result + prefix
|
||||
+ '<a href="'
|
||||
+ self.make_block_url( block, id )
|
||||
+ '">' + name + '</a>' )
|
||||
except:
|
||||
# sections don't have `markups'; however, we don't
|
||||
# want references to sections here anyway
|
||||
result = result + html_quote( line[:length] )
|
||||
|
||||
else:
|
||||
result = result + html_quote( line[:length] )
|
||||
|
||||
line = line[length:]
|
||||
else:
|
||||
result = result + html_quote( line )
|
||||
line = []
|
||||
|
||||
return result
|
||||
|
||||
def print_html_field_list( self, fields ):
|
||||
print( '<table class="fields">' )
|
||||
for field in fields:
|
||||
print( '<tr><td class="val" id="' + field.name + '">'
|
||||
+ field.name
|
||||
+ '</td><td class="desc">' )
|
||||
self.print_html_items( field.items )
|
||||
print( "</td></tr>" )
|
||||
print( "</table>" )
|
||||
|
||||
def print_html_markup( self, markup ):
|
||||
table_fields = []
|
||||
for field in markup.fields:
|
||||
if field.name:
|
||||
# We begin a new series of field or value definitions. We
|
||||
# record them in the `table_fields' list before outputting
|
||||
# all of them as a single table.
|
||||
table_fields.append( field )
|
||||
else:
|
||||
if table_fields:
|
||||
self.print_html_field_list( table_fields )
|
||||
table_fields = []
|
||||
|
||||
self.print_html_items( field.items )
|
||||
|
||||
if table_fields:
|
||||
self.print_html_field_list( table_fields )
|
||||
|
||||
#
|
||||
# formatting the index
|
||||
#
|
||||
def index_enter( self ):
|
||||
print( self.html_index_header )
|
||||
self.index_items = {}
|
||||
|
||||
def index_name_enter( self, name ):
|
||||
block = self.identifiers[name]
|
||||
url = self.make_block_url( block )
|
||||
self.index_items[name] = url
|
||||
|
||||
def index_exit( self ):
|
||||
# `block_index' already contains the sorted list of index names
|
||||
count = len( self.block_index )
|
||||
rows = ( count + self.columns - 1 ) // self.columns
|
||||
|
||||
print( '<table class="index">' )
|
||||
for r in range( rows ):
|
||||
line = "<tr>"
|
||||
for c in range( self.columns ):
|
||||
i = r + c * rows
|
||||
if i < count:
|
||||
bname = self.block_index[r + c * rows]
|
||||
url = self.index_items[bname]
|
||||
# display `foo[bar]' as `foo (bar)'
|
||||
bname = string.replace( bname, "[", " (" )
|
||||
bname = string.replace( bname, "]", ")" )
|
||||
# normalize url, following RFC 3986
|
||||
url = string.replace( url, "[", "(" )
|
||||
url = string.replace( url, "]", ")" )
|
||||
line = ( line + '<td><a href="' + url + '">'
|
||||
+ bname + '</a></td>' )
|
||||
else:
|
||||
line = line + '<td></td>'
|
||||
line = line + "</tr>"
|
||||
print( line )
|
||||
|
||||
print( "</table>" )
|
||||
|
||||
print( index_footer_start
|
||||
+ self.file_prefix + "toc.html"
|
||||
+ index_footer_end )
|
||||
|
||||
print( self.html_footer )
|
||||
|
||||
self.index_items = {}
|
||||
|
||||
def index_dump( self, index_filename = None ):
|
||||
if index_filename == None:
|
||||
index_filename = self.file_prefix + "index.html"
|
||||
|
||||
Formatter.index_dump( self, index_filename )
|
||||
|
||||
#
|
||||
# formatting the table of contents
|
||||
#
|
||||
def toc_enter( self ):
|
||||
print( self.html_toc_header )
|
||||
print( "<h1>Table of Contents</h1>" )
|
||||
|
||||
def toc_chapter_enter( self, chapter ):
|
||||
print( chapter_header + string.join( chapter.title ) + chapter_inter )
|
||||
print( '<table class="toc">' )
|
||||
|
||||
def toc_section_enter( self, section ):
|
||||
print( '<tr><td class="link">'
|
||||
+ '<a href="' + self.make_section_url( section ) + '">'
|
||||
+ section.title + '</a></td><td class="desc">' )
|
||||
print( self.make_html_para( section.abstract ) )
|
||||
|
||||
def toc_section_exit( self, section ):
|
||||
print( "</td></tr>" )
|
||||
|
||||
def toc_chapter_exit( self, chapter ):
|
||||
print( "</table>" )
|
||||
print( chapter_footer )
|
||||
|
||||
def toc_index( self, index_filename ):
|
||||
print( chapter_header
|
||||
+ '<a href="' + index_filename + '">Global Index</a>'
|
||||
+ chapter_inter + chapter_footer )
|
||||
|
||||
def toc_exit( self ):
|
||||
print( toc_footer_start
|
||||
+ self.file_prefix + "index.html"
|
||||
+ toc_footer_end )
|
||||
|
||||
print( self.html_footer )
|
||||
|
||||
def toc_dump( self, toc_filename = None, index_filename = None ):
|
||||
if toc_filename == None:
|
||||
toc_filename = self.file_prefix + "toc.html"
|
||||
|
||||
if index_filename == None:
|
||||
index_filename = self.file_prefix + "index.html"
|
||||
|
||||
Formatter.toc_dump( self, toc_filename, index_filename )
|
||||
|
||||
#
|
||||
# formatting sections
|
||||
#
|
||||
def section_enter( self, section ):
|
||||
print( self.html_header )
|
||||
|
||||
print( section_title_header1 + section.name + section_title_header2
|
||||
+ section.title
|
||||
+ section_title_footer )
|
||||
|
||||
maxwidth = 0
|
||||
for b in section.blocks.values():
|
||||
if len( b.name ) > maxwidth:
|
||||
maxwidth = len( b.name )
|
||||
|
||||
width = 70 # XXX magic number
|
||||
if maxwidth > 0:
|
||||
# print section synopsis
|
||||
print( section_synopsis_header )
|
||||
print( '<table class="synopsis">' )
|
||||
|
||||
columns = width // maxwidth
|
||||
if columns < 1:
|
||||
columns = 1
|
||||
|
||||
count = len( section.block_names )
|
||||
# don't handle last entry if it is empty
|
||||
if section.block_names[-1] == "/empty/":
|
||||
count -= 1
|
||||
rows = ( count + columns - 1 ) // columns
|
||||
|
||||
for r in range( rows ):
|
||||
line = "<tr>"
|
||||
for c in range( columns ):
|
||||
i = r + c * rows
|
||||
line = line + '<td>'
|
||||
if i < count:
|
||||
name = section.block_names[i]
|
||||
if name == "/empty/":
|
||||
# it can happen that a complete row is empty, and
|
||||
# without a proper `filler' the browser might
|
||||
# collapse the row to a much smaller height (or
|
||||
# even omit it completely)
|
||||
line = line + " "
|
||||
else:
|
||||
url = name
|
||||
# display `foo[bar]' as `foo'
|
||||
name = re.sub( r'\[.*\]', '', name )
|
||||
# normalize url, following RFC 3986
|
||||
url = string.replace( url, "[", "(" )
|
||||
url = string.replace( url, "]", ")" )
|
||||
line = ( line + '<a href="#' + url + '">'
|
||||
+ name + '</a>' )
|
||||
|
||||
line = line + '</td>'
|
||||
line = line + "</tr>"
|
||||
print( line )
|
||||
|
||||
print( "</table>" )
|
||||
print( section_synopsis_footer )
|
||||
|
||||
print( description_header )
|
||||
print( self.make_html_items( section.description ) )
|
||||
print( description_footer )
|
||||
|
||||
def block_enter( self, block ):
|
||||
print( block_header )
|
||||
|
||||
# place html anchor if needed
|
||||
if block.name:
|
||||
url = block.name
|
||||
# display `foo[bar]' as `foo'
|
||||
name = re.sub( r'\[.*\]', '', block.name )
|
||||
# normalize url, following RFC 3986
|
||||
url = string.replace( url, "[", "(" )
|
||||
url = string.replace( url, "]", ")" )
|
||||
print( '<h3 id="' + url + '">' + name + '</h3>' )
|
||||
|
||||
# dump the block C source lines now
|
||||
if block.code:
|
||||
header = ''
|
||||
for f in self.headers.keys():
|
||||
header_filename = os.path.normpath(block.source.filename)
|
||||
if header_filename.find( os.path.normpath( f ) ) >= 0:
|
||||
header = self.headers[f] + ' (' + f + ')'
|
||||
break
|
||||
|
||||
# if not header:
|
||||
# sys.stderr.write(
|
||||
# "WARNING: No header macro for"
|
||||
# + " '" + block.source.filename + "'.\n" )
|
||||
|
||||
if header:
|
||||
print( header_location_header
|
||||
+ 'Defined in ' + header + '.'
|
||||
+ header_location_footer )
|
||||
|
||||
print( source_header )
|
||||
for l in block.code:
|
||||
print( self.html_source_quote( l, block.name ) )
|
||||
print( source_footer )
|
||||
|
||||
def markup_enter( self, markup, block ):
|
||||
if markup.tag == "description":
|
||||
print( description_header )
|
||||
else:
|
||||
print( marker_header + markup.tag + marker_inter )
|
||||
|
||||
self.print_html_markup( markup )
|
||||
|
||||
def markup_exit( self, markup, block ):
|
||||
if markup.tag == "description":
|
||||
print( description_footer )
|
||||
else:
|
||||
print( marker_footer )
|
||||
|
||||
def block_exit( self, block ):
|
||||
print( block_footer_start + self.file_prefix + "index.html"
|
||||
+ block_footer_middle + self.file_prefix + "toc.html"
|
||||
+ block_footer_end )
|
||||
|
||||
def section_exit( self, section ):
|
||||
print( html_footer )
|
||||
|
||||
def section_dump_all( self ):
|
||||
for section in self.sections:
|
||||
self.section_dump( section,
|
||||
self.file_prefix + section.name + '.html' )
|
||||
|
||||
# eof
|
|
@ -1,127 +0,0 @@
|
|||
#
|
||||
# utils.py
|
||||
#
|
||||
# Auxiliary functions for the `docmaker' tool (library file).
|
||||
#
|
||||
# Copyright 2002-2018 by
|
||||
# David Turner.
|
||||
#
|
||||
# This file is part of the FreeType project, and may only be used,
|
||||
# modified, and distributed under the terms of the FreeType project
|
||||
# license, LICENSE.TXT. By continuing to use, modify, or distribute
|
||||
# this file you indicate that you have read the license and
|
||||
# understand and accept it fully.
|
||||
|
||||
|
||||
import string, sys, os, glob, itertools
|
||||
|
||||
|
||||
# current output directory
|
||||
#
|
||||
output_dir = None
|
||||
|
||||
|
||||
# A function that generates a sorting key. We want lexicographical order
|
||||
# (primary key) except that capital letters are sorted before lowercase
|
||||
# ones (secondary key).
|
||||
#
|
||||
# The primary key is implemented by lowercasing the input. The secondary
|
||||
# key is simply the original data appended, character by character. For
|
||||
# example, the sort key for `FT_x' is `fFtT__xx', while the sort key for
|
||||
# `ft_X' is `fftt__xX'. Since ASCII codes of uppercase letters are
|
||||
# numerically smaller than the codes of lowercase letters, `fFtT__xx' gets
|
||||
# sorted before `fftt__xX'.
|
||||
#
|
||||
def index_key( s ):
|
||||
return string.join( itertools.chain( *zip( s.lower(), s ) ) )
|
||||
|
||||
|
||||
# Sort `input_list', placing the elements of `order_list' in front.
|
||||
#
|
||||
def sort_order_list( input_list, order_list ):
|
||||
new_list = order_list[:]
|
||||
for id in input_list:
|
||||
if not id in order_list:
|
||||
new_list.append( id )
|
||||
return new_list
|
||||
|
||||
|
||||
# Divert standard output to a given project documentation file. Use
|
||||
# `output_dir' to determine the filename location if necessary and save the
|
||||
# old stdout handle in a tuple that is returned by this function.
|
||||
#
|
||||
def open_output( filename ):
|
||||
global output_dir
|
||||
|
||||
if output_dir and output_dir != "":
|
||||
filename = output_dir + os.sep + filename
|
||||
|
||||
old_stdout = sys.stdout
|
||||
new_file = open( filename, "w" )
|
||||
sys.stdout = new_file
|
||||
|
||||
return ( new_file, old_stdout )
|
||||
|
||||
|
||||
# Close the output that was returned by `open_output'.
|
||||
#
|
||||
def close_output( output ):
|
||||
output[0].close()
|
||||
sys.stdout = output[1]
|
||||
|
||||
|
||||
# Check output directory.
|
||||
#
|
||||
def check_output():
|
||||
global output_dir
|
||||
if output_dir:
|
||||
if output_dir != "":
|
||||
if not os.path.isdir( output_dir ):
|
||||
sys.stderr.write( "argument"
|
||||
+ " '" + output_dir + "' "
|
||||
+ "is not a valid directory\n" )
|
||||
sys.exit( 2 )
|
||||
else:
|
||||
output_dir = None
|
||||
|
||||
|
||||
def file_exists( pathname ):
|
||||
"""Check that a given file exists."""
|
||||
result = 1
|
||||
try:
|
||||
file = open( pathname, "r" )
|
||||
file.close()
|
||||
except:
|
||||
result = None
|
||||
sys.stderr.write( pathname + " couldn't be accessed\n" )
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def make_file_list( args = None ):
|
||||
"""Build a list of input files from command-line arguments."""
|
||||
file_list = []
|
||||
# sys.stderr.write( repr( sys.argv[1 :] ) + '\n' )
|
||||
|
||||
if not args:
|
||||
args = sys.argv[1:]
|
||||
|
||||
for pathname in args:
|
||||
if string.find( pathname, '*' ) >= 0:
|
||||
newpath = glob.glob( pathname )
|
||||
newpath.sort() # sort files -- this is important because
|
||||
# of the order of files
|
||||
else:
|
||||
newpath = [pathname]
|
||||
|
||||
file_list.extend( newpath )
|
||||
|
||||
if len( file_list ) == 0:
|
||||
file_list = None
|
||||
else:
|
||||
# now filter the file list to remove non-existing ones
|
||||
file_list = filter( file_exists, file_list )
|
||||
|
||||
return file_list
|
||||
|
||||
# eof
|
Loading…
Reference in New Issue