Remove `docwriter' source (now a pip package).

`docwriter' can now be found at

  https://github.com/freetype/docwriter

The PyPI project is published at

  https://pypi.org/project/docwriter/

Development will continue on the GitHub repository.

* src/tools/docwriter: Remove this folder and its contents from the
repository.
This commit is contained in:
Nikhil Ramakrishnan 2018-08-04 17:22:37 +05:30
parent b83cf04d4a
commit b580cbb9db
20 changed files with 0 additions and 3340 deletions

View File

@ -1,115 +0,0 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Jupyter Notebook
.ipynb_checkpoints
# pyenv
.python-version
# celery beat schedule file
celerybeat-schedule
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
# Batch files
*.bat
# Header files
/include
/include_mod
/include_mark
# Movefiles personal script
movefiles.py

View File

@ -1,89 +0,0 @@
[![Build Status](https://travis-ci.com/nikramakrishnan/freetype-docwriter.svg?branch=master)](https://travis-ci.com/nikramakrishnan/freetype-docwriter)
[![Code Health](https://landscape.io/github/nikramakrishnan/freetype-docwriter/master/landscape.svg?style=flat)](https://landscape.io/github/nikramakrishnan/freetype-docwriter/master)
# FreeType Docwriter
Markdown documentation generator for the FreeType library.
## Setup Instructions
1. Clone this repository.
2. Clone the freetype2 repository from [here](http://git.savannah.gnu.org/cgit/freetype/freetype2.git/).
3. Convert the `include/` folder to markdown using the
[freetype-docs](https://github.com/nikramakrishnan/freetype-docs/tree/markdown) repository.
5. Copy files from `include_mark/`.
6. Run:
```bash
python -B docwriter.py --prefix=ft2 --title=FreeType-2.9.1 --output=./docs/reference \
./include_mark/freetype/*.h ./include_mark/freetype/config/*.h ./include_mark/freetype/cache/*.h
```
## Usage Information
```
docwriter [-h] [-t T] -o DIR [-p PRE] [-q | -v] files [files ...]
DocWriter Usage information
positional arguments:
files list of source files to parse, wildcards are allowed
optional arguments:
-h, --help show this help message and exit
-t T, --title T set project title, as in '-t "My Project"'
-o DIR, --output DIR set output directory, as in '-o mydir'
-p PRE, --prefix PRE set documentation prefix, as in '-p ft2'
-q, --quiet run quietly, show only errors
-v, --verbose increase output verbosity
```
## Running Tests
There are two possible test scenarios:
1. Running tests on both py27 and py36 (using tox - requires both python versions installed).
2. Running tests on the currently installed Python version.
They are detailed below.
### Test using Tox
To test on both py27 and py36:
1. Make sure `tox` is installed:
```bash
pip install tox
```
2. Ensure both py27 and py36 are installed.
3. Run tests:
```bash
tox
```
### Test on single python version
To test on current python version using pytest:
1. Make sure `pytest` is installed:
```bash
pip install pytest
```
2. Run tests:
```bash
python -m pytest
```
## License
This library is licensed under the [FreeType License](https://www.freetype.org/license.html).
## History
This library was originally written by David Turner as `docmaker` which collected and presented
documentation in HTML. It has since been modified multiple times, including a major refactor
to allow multiple output formats. The current `docwriter` is the biggest rewrite, with lots of
changes, additions etc. that allow it to be more flexible, readable, maintainable and usable.

View File

@ -1,51 +0,0 @@
#
# check.py
#
# Check if all external modules are present.
#
# Copyright 2018 by
# Nikhil Ramakrishnan.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
"""Utility to check if all required modules are available.
The list of required modules can be modified in this file.
Usage:
import check
status = check.check()
"""
import logging
log = logging.getLogger( __name__ )
#
# Required imports
# Note that this is not the package name, but the module name as would be
# used in an import statement.
#
import_list = ["mistune", "yaml"]
def check():
"""Check if all required modules are present.
Returns 0 on success, non-zero on error.
"""
flag = 0
for package in import_list:
try:
exec( "import " + package )
except Exception:
log.error( "Missing module: %s", package )
flag = True
if flag:
return 1
return 0
# eof

View File

@ -1,689 +0,0 @@
#
# content.py
#
# Parse comment blocks to build content blocks (library file).
#
# Copyright 2002-2018 by
# David Turner.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
"""This module contains routines to parse documentation comment blocks,
building more structured objects out of them."""
from __future__ import print_function
import logging
import re
import sources
import utils
log = logging.getLogger( __name__ )
#
# Regular expressions to detect code sequences. `Code sequences' are simply
# code fragments embedded in '```' and '```', as demonstrated in the following
# example. The language can optionally be specified on the first line after the
# backticks, and is used for syntax highlighting.
#
# ```c
# x = y + z;
# if ( zookoo == 2 )
# {
# foobar();
# }
# ```
#
# Note that the indentation of the first opening backticks and the last closing
# backticks must be exactly the same. The code sequence itself should have a
# larger indentation than the surrounding braces.
#
re_code_start = re.compile( r"(\s*)```([\w\+\#\-]+)?\s*$" )
re_code_end = re.compile( r"(\s*)```\s*$" )
#
# A regular expression to isolate identifiers from other text. Two syntax
# forms are supported:
#
# <name>
# <name>[<id>]
#
# where both `<name>' and `<id>' consist of alphanumeric characters, `_',
# and `-'. Use `<id>' if there are multiple, valid `<name>' entries; in the
# index, `<id>' will be appended in parentheses.
#
# For example,
#
# stem_darkening[autofit]
#
# becomes `stem_darkening (autofit)' in the index.
#
re_identifier = re.compile( r"""
((?:\w|-)+
(?:\[(?:\w|-)+\])?)
""", re.VERBOSE )
#
# We collect macro names ending in `_H' (group 1), as defined in
# `freetype/config/ftheader.h'. While outputting the object data, we use
# this info together with the object's file location (group 2) to emit the
# appropriate header file macro and its associated file name before the
# object itself.
#
# Example:
#
# #define FT_FREETYPE_H <freetype.h>
#
re_header_macro = re.compile( r'^#define\s{1,}(\w{1,}_H)\s{1,}<(.*)>' )
################################################################
##
## DOC CODE CLASS
##
## The `DocCode' class is used to store source code lines.
##
## `self.lines' contains a set of source code lines that will be dumped as
## HTML in a <PRE> tag.
##
## The object is filled line by line by the parser; it strips the leading
## `margin' space from each input line before storing it in `self.lines'.
##
class DocCode( object ):
def __init__( self, margin, lines, lang = None ):
self.lines = []
self.words = None
self.lang = lang
# remove margin spaces
for l in lines:
if l[:margin].strip( ) == "":
l = l[margin:]
self.lines.append( l )
def dump( self, prefix = "" ):
lines = self.dump_lines( 0 )
for l in lines:
print( prefix + l )
def dump_lines( self, margin = 0 ):
result = []
for l in self.lines:
result.append( " " * margin + l )
return result
################################################################
##
## DOC PARA CLASS
##
## `Normal' text paragraphs are stored in the `DocPara' class.
##
## `self.words' contains the list of words that make up the paragraph.
##
class DocPara( object ):
def __init__( self, lines, margin = -1 ):
self.lines = None
self.words = []
self.indent = len( lines[0] ) - len( lines[0].lstrip() )
first_line = lines[0].strip()
indent_diff = self.indent - margin
if margin > 0 and indent_diff >= 4:
# if the first line has an indentation >= 4,
# add those spaces to it.
indent_list = [''] * indent_diff
self.words.extend( indent_list )
# This para is indented, the next may also be relative
# to the parent, so set indent to margin
self.indent = margin
self.words.extend( first_line.split() )
for l in lines[1:]:
l = l.strip()
self.words.extend( l.split() )
def dump( self, prefix = "" ):
lines = self.dump_lines( 0 )
for l in lines:
print( prefix + l )
def dump_lines( self, margin = 0, width = 60 ):
cur = "" # current line
col = 0 # current width
result = []
for word in self.words:
ln = len( word )
if col > 0:
ln = ln + 1
if col + ln > width:
result.append( " " * margin + cur )
cur = word
col = len( word )
else:
if col > 0:
cur = cur + " "
cur = cur + word
col = col + ln
if col > 0:
result.append( " " * margin + cur )
return result
################################################################
##
## DOC FIELD CLASS
##
## The `DocField' class stores a list containing either `DocPara' or
## `DocCode' objects. Each DocField object also has an optional `name'
## that is used when the object corresponds to a field or value definition.
##
class DocField( object ):
def __init__( self, name, lines ):
self.name = name # can be `None' for normal paragraphs/sources
self.items = [] # list of items
mode_none = 0 # start parsing mode
mode_code = 1 # parsing code sequences
margin = -1 # current code sequence indentation
cur_lines = []
indent = -1
lang = None
# analyze the markup lines to check whether they contain paragraphs,
# code sequences, or fields definitions
#
mode = mode_none
for l in lines:
# are we parsing a code sequence?
if mode == mode_code:
m = re_code_end.match( l )
if m and len( m.group( 1 ) ) <= margin:
# that's it, we finished the code sequence
code = DocCode( 0, cur_lines, lang )
self.items.append( code )
margin = -1
cur_lines = []
mode = mode_none
else:
# otherwise continue the code sequence
cur_lines.append( l[margin:] )
else:
# start of code sequence?
m = re_code_start.match( l )
if m:
# save current lines
if cur_lines:
para = DocPara( cur_lines )
self.items.append( para )
cur_lines = []
# switch to code extraction mode
margin = len( m.group( 1 ) )
lang = m.group( 2 )
mode = mode_code
else:
if not l.split() and cur_lines:
# if the line is empty, we end the current paragraph,
# if any
para = DocPara( cur_lines, indent )
self.items.append( para )
# store indent value of current para
indent = para.indent
cur_lines = []
else:
# otherwise, simply add the line to the current
# paragraph
cur_lines.append( l )
if mode == mode_code:
# unexpected end of code sequence
code = DocCode( margin, cur_lines, lang )
self.items.append( code )
elif cur_lines:
para = DocPara( cur_lines, indent )
self.items.append( para )
def dump( self, prefix = "" ):
first = 1
for p in self.items:
if not first:
print( "" )
p.dump( prefix )
first = 0
def dump_lines( self, margin = 0, width = 60 ):
result = []
nl = None
for p in self.items:
if nl:
result.append( "" )
result.extend( p.dump_lines( margin, width ) )
nl = 1
return result
#
# A regular expression to detect field definitions.
#
# Examples:
#
# foo ::
# foo.bar ::
#
re_field = re.compile( r"""
\s*
(
\w*
|
\w (\w | \.)* \w
)
\s* ::
""", re.VERBOSE )
################################################################
##
## DOC MARKUP CLASS
##
class DocMarkup( object ):
def __init__( self, tag, lines ):
self.tag = tag.lower()
self.fields = []
cur_lines = []
field = None
for l in lines:
m = re_field.match( l )
if m:
# We detected the start of a new field definition.
# first, save the current one
if cur_lines:
f = DocField( field, cur_lines )
self.fields.append( f )
cur_lines = []
field = None
field = m.group( 1 ) # record field name
ln = len( m.group( 0 ) )
l = " " * ln + l[ln:]
cur_lines = [l]
else:
cur_lines.append( l )
if field or cur_lines:
f = DocField( field, cur_lines )
self.fields.append( f )
def get_name( self ):
try:
return self.fields[0].items[0].words[0]
except Exception:
return None
def dump( self, margin ):
print( " " * margin + "<" + self.tag + ">" )
for f in self.fields:
f.dump( " " )
print( " " * margin + "</" + self.tag + ">" )
################################################################
##
## DOC CHAPTER CLASS
##
class DocChapter( object ):
def __init__( self, block ):
self.block = block
self.sections = []
if block:
self.name = block.name
self.title = block.get_markup_words( "title" )
self.order = block.get_markup_words( "sections" )
else:
self.name = "Other"
self.title = "Miscellaneous".split()
self.order = []
################################################################
##
## DOC SECTION CLASS
##
class DocSection( object ):
def __init__( self, name = "Other" ):
self.name = name
self.blocks = {}
self.block_names = [] # ordered block names in section
self.defs = []
self.abstract = ""
self.description = ""
self.order = []
self.title = "ERROR"
self.chapter = None
def add_def( self, block ):
self.defs.append( block )
def add_block( self, block ):
self.block_names.append( block.name )
self.blocks[block.name] = block
def process( self ):
# look up one block that contains a valid section description
for block in self.defs:
title = block.get_markup_text( "title" )
if title:
self.title = title
self.abstract = block.get_markup_words( "abstract" )
self.description = block.get_markup_items( "description" )
self.order = block.get_markup_words_all( "order" )
return
def reorder( self ):
self.block_names = utils.sort_order_list( self.block_names,
self.order )
################################################################
##
## CONTENT PROCESSOR CLASS
##
class ContentProcessor( object ):
def __init__( self ):
"""Initialize a block content processor."""
self.reset()
self.sections = {} # dictionary of documentation sections
self.section = None # current documentation section
self.chapters = [] # list of chapters
self.headers = {} # dictionary of header macros
def set_section( self, section_name ):
"""Set current section during parsing."""
if not section_name in self.sections:
section = DocSection( section_name )
self.sections[section_name] = section
self.section = section
else:
self.section = self.sections[section_name]
def add_chapter( self, block ):
chapter = DocChapter( block )
self.chapters.append( chapter )
def reset( self ):
"""Reset the content processor for a new block."""
self.markups = []
self.markup = None
self.markup_lines = []
def add_markup( self ):
"""Add a new markup section."""
if self.markup and self.markup_lines:
# get rid of last line of markup if it's empty
marks = self.markup_lines
if len( marks ) > 0 and not marks[-1].strip():
self.markup_lines = marks[:-1]
m = DocMarkup( self.markup, self.markup_lines )
self.markups.append( m )
self.markup = None
self.markup_lines = []
def process_content( self, content ):
"""Process a block content and return a list of DocMarkup objects
corresponding to it."""
first = 1
margin = -1
in_code = 0
for line in content:
if in_code:
m = re_code_end.match( line )
if m and len( m.group( 1 ) ) <= margin:
in_code = 0
margin = -1
else:
m = re_code_start.match( line )
if m:
in_code = 1
margin = len( m.group( 1 ) )
found = None
if not in_code:
for t in sources.re_markup_tags:
m = t.match( line )
if m:
found = m.group( 1 ).lower()
prefix = len( m.group( 0 ) )
# remove markup from line
line = " " * prefix + line[prefix:]
break
# is it the start of a new markup section ?
if found:
first = 0
self.add_markup() # add current markup content
self.markup = found
if len( line.strip() ) > 0:
self.markup_lines.append( line )
elif first == 0:
self.markup_lines.append( line )
self.add_markup()
return self.markups
def parse_sources( self, source_processor ):
blocks = source_processor.blocks
count = len( blocks )
for n in range( count ):
source = blocks[n]
if source.content:
# this is a documentation comment, we need to catch
# all following normal blocks in the "follow" list
#
follow = []
m = n + 1
while m < count and not blocks[m].content:
follow.append( blocks[m] )
m = m + 1
DocBlock( source, follow, self )
def finish( self ):
# process all sections to extract their abstract, description
# and ordered list of items
#
for sec in self.sections.values():
sec.process()
# process chapters to check that all sections are correctly
# listed there
for chap in self.chapters:
for sec in chap.order:
if sec in self.sections:
section = self.sections[sec]
section.chapter = chap
section.reorder()
chap.sections.append( section )
else:
log.warn( "Chapter '%s' in %s"
" lists unknown section '%s'",
chap.name, chap.block.location(), sec )
# check that all sections are in a chapter
#
others = []
for sec in self.sections.values():
if not sec.chapter:
sec.reorder()
others.append( sec )
# create a new special chapter for all remaining sections
# when necessary
#
if others:
chap = DocChapter( None )
# Assign the chapter to all sections
for section in others:
section.chapter = chap
chap.sections = others
self.chapters.append( chap )
################################################################
##
## DOC BLOCK CLASS
##
class DocBlock( object ):
def __init__( self, source, follow, processor ):
processor.reset()
self.source = source
self.code = []
self.type = "ERRTYPE"
self.name = "ERRNAME"
self.section = processor.section
self.markups = processor.process_content( source.content )
# compute block type from first markup tag
try:
self.type = self.markups[0].tag
except Exception:
pass
# compute block name from first markup paragraph
try:
markup = self.markups[0]
para = markup.fields[0].items[0]
name = para.words[0]
m = re_identifier.match( name )
if m:
name = m.group( 1 )
self.name = name
except Exception:
pass
if self.type == "section":
# detect new section starts
processor.set_section( self.name )
processor.section.add_def( self )
elif self.type == "chapter":
# detect new chapter
processor.add_chapter( self )
else:
processor.section.add_block( self )
# now, compute the source lines relevant to this documentation
# block. We keep normal comments in for obvious reasons (??)
source = []
for b in follow:
if b.format:
break
for l in b.lines:
# collect header macro definitions
m = re_header_macro.match( l )
if m:
processor.headers[m.group( 2 )] = m.group( 1 )
# we use "/* */" as a separator
if sources.re_source_sep.match( l ):
break
source.append( l )
# now strip the leading and trailing empty lines from the sources
start = 0
end = len( source ) - 1
while start < end and not source[start].strip():
start = start + 1
while start < end and not source[end].strip():
end = end - 1
if start == end and not source[start].strip():
self.code = []
else:
self.code = source[start:end + 1]
def location( self ):
return self.source.location()
def get_markup( self, tag_name ):
"""Return the DocMarkup corresponding to a given tag in a block."""
for m in self.markups:
if m.tag == tag_name.lower():
return m
return None
def get_markup_words( self, tag_name ):
try:
m = self.get_markup( tag_name )
return m.fields[0].items[0].words
except Exception:
return []
def get_markup_words_all( self, tag_name ):
try:
m = self.get_markup( tag_name )
words = []
for item in m.fields[0].items:
# We honour empty lines in an `<Order>' section element by
# adding the sentinel `/empty/'. The formatter should then
# convert it to an appropriate representation in the
# `section_enter' function.
words += item.words
words.append( "/empty/" )
return words
except Exception:
return []
def get_markup_text( self, tag_name ):
result = self.get_markup_words( tag_name )
return " ".join( result )
def get_markup_items( self, tag_name ):
try:
m = self.get_markup( tag_name )
return m.fields[0].items
except Exception:
return None
# eof

View File

@ -1,163 +0,0 @@
#!/usr/bin/env python
#
# docwriter.py
#
# Convert source code markup to Markdown documentation.
#
# Copyright 2002-2018 by
# David Turner.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
#
# This program is a re-write of the original DocMaker tool used to generate
# the API Reference of the FreeType font rendering engine by converting
# in-source comments into structured HTML.
#
# This new version is capable of outputting XML/Markdown data as well as
# accepting more liberal formatting options. It also uses regular expression
# matching and substitution to speed up operation significantly.
#
"""This libaray is used to Convert source code markup to Markdown
documentation."""
from __future__ import print_function
import argparse
import logging
import sys
import check
import content
import sources
import tomarkdown
import utils
logger = logging.getLogger()
log_level = logging.INFO
def setup_logger(level=logging.INFO):
"""Set up the logger."""
logger.propagate = False
stream = logging.StreamHandler()
log_format = logging.Formatter("%(levelname)-7s - %(message)s")
stream.setFormatter(log_format)
logger.addHandler(stream)
logger.setLevel(level)
def main():
"""Main program loop."""
global output_dir
global log_level
parser = argparse.ArgumentParser(description="DocWriter Usage information")
parser.add_argument(
"files",
nargs="+",
help="list of source files to parse, wildcards are allowed",
)
parser.add_argument(
"-t",
"--title",
metavar="T",
help="set project title, as in '-t \"My Project\"'",
)
parser.add_argument(
"-o",
"--output",
metavar="DIR",
required=True,
help="set output directory, as in '-o mydir'",
)
parser.add_argument(
"-p",
"--prefix",
metavar="PRE",
help="set documentation prefix, as in '-p ft2'",
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-q",
"--quiet",
help="run quietly, show only errors",
action="store_true",
)
group.add_argument(
"-v", "--verbose", help="increase output verbosity", action="store_true"
)
args = parser.parse_args()
# process options
project_title = "Project"
project_prefix = None
output_dir = None
if args.title:
project_title = args.title
if args.output:
utils.output_dir = args.output
if args.prefix:
project_prefix = args.prefix
if args.quiet:
log_level = logging.ERROR
if args.verbose:
log_level = logging.DEBUG
# set up the logger
setup_logger(level=log_level)
log = logging.getLogger("docwriter")
# check all packages
status = check.check()
if status != 0:
sys.exit(3)
utils.check_output()
# create context and processor
source_processor = sources.SourceProcessor()
content_processor = content.ContentProcessor()
# retrieve the list of files to process
file_list = utils.make_file_list(args.files)
for filename in file_list:
source_processor.parse_file(filename)
content_processor.parse_sources(source_processor)
# process sections
content_processor.finish()
# clean up directory
log.info("Cleaning output directory")
utils.clean_markdown_dir()
formatter = tomarkdown.MdFormatter(
content_processor, project_title, project_prefix
)
# build the docs
utils.build_message()
formatter.toc_dump()
formatter.index_dump()
formatter.section_dump_all()
# if called from the command line
if __name__ == "__main__":
main()
# eof

View File

@ -1,233 +0,0 @@
#
# formatter.py
#
# Convert parsed content blocks to a structured document (library file).
#
# Copyright 2002-2018 by
# David Turner.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
"""Base formatter class.
The purpose of this module is to convert a content processor's data into
specific documents (i.e., table of contents, global index, and individual
API reference indices).
You need to sub-class it to output anything sensible. For example, the
module `tomarkdown` contains the definition of the `MdFormatter' sub-class
to output Markdown.
"""
import logging
import utils
log = logging.getLogger( __name__ )
################################################################
##
## FORMATTER CLASS
##
class Formatter( object ):
def __init__( self, processor ):
self.processor = processor
self.identifiers = {}
self.chapters = processor.chapters
self.sections = processor.sections.values()
self.block_index = []
# store all blocks in a dictionary
self.blocks = []
for section in self.sections:
for block in section.blocks.values():
self.add_identifier( block.name, block )
# add enumeration values to the index, since this is useful
for markup in block.markups:
if markup.tag == 'values':
for field in markup.fields:
self.add_identifier( field.name, block )
self.block_index = self.identifiers.keys()
self.block_index = sorted( self.block_index, key = str.lower )
# also add section names to dictionary (without making them appear
# in the index)
for section in self.sections:
self.add_identifier( section.name, section )
def add_identifier( self, name, block ):
if name in self.identifiers:
# duplicate name!
log.warn( "Duplicate definition for"
" '%s' in %s, "
"previous definition in %s",
name,
block.location(),
self.identifiers[name].location() )
else:
self.identifiers[name] = block
#
# formatting the table of contents
#
def toc_enter( self ):
pass
def toc_chapter_enter( self, chapter ):
pass
def toc_section_enter( self, section ):
pass
def toc_section_exit( self, section ):
pass
def toc_chapter_exit( self, chapter ):
pass
def toc_index( self, index_filename ):
pass
def toc_exit( self ):
pass
def toc_dump( self, toc_filename = None, index_filename = None ):
output = None
if toc_filename:
output = utils.open_output( toc_filename )
log.debug( "Building table of contents in %s.", toc_filename )
self.toc_enter()
for chap in self.processor.chapters:
self.toc_chapter_enter( chap )
for section in chap.sections:
self.toc_section_enter( section )
self.toc_section_exit( section )
self.toc_chapter_exit( chap )
self.toc_index( index_filename )
self.toc_exit()
if output:
utils.close_output( output )
#
# formatting the index
#
def index_enter( self ):
pass
def index_name_enter( self, name ):
pass
def index_name_exit( self, name ):
pass
def index_exit( self ):
pass
def index_dump( self, index_filename = None ):
output = None
if index_filename:
output = utils.open_output( index_filename )
log.debug("Building index in %s.", index_filename )
self.index_enter()
for name in self.block_index:
self.index_name_enter( name )
self.index_name_exit( name )
self.index_exit()
if output:
utils.close_output( output )
#
# formatting a section
#
def section_enter( self, section ):
pass
def block_enter( self, block ):
pass
def markup_enter( self, markup, block ):
pass
def field_enter( self, field, markup = None, block = None ):
pass
def field_exit( self, field, markup = None, block = None ):
pass
def markup_exit( self, markup, block ):
pass
def block_exit( self, block ):
pass
def section_exit( self, section ):
pass
def section_dump( self, section, section_filename = None ):
output = None
log.debug( "Building page %s.", section_filename )
if section_filename:
output = utils.open_output( section_filename )
self.section_enter( section )
for name in section.block_names:
skip_entry = 0
try:
block = self.identifiers[name]
# `block_names' can contain field names also,
# which we filter out
for markup in block.markups:
if markup.tag == 'values':
for field in markup.fields:
if field.name == name:
skip_entry = 1
except Exception:
skip_entry = 1 # this happens e.g. for `/empty/' entries
if skip_entry:
continue
self.block_enter( block )
for markup in block.markups[1:]: # always ignore first markup!
self.markup_enter( markup, block )
for field in markup.fields:
self.field_enter( field, markup, block )
self.field_exit( field, markup, block )
self.markup_exit( markup, block )
self.block_exit( block )
self.section_exit( section )
if output:
utils.close_output( output )
def section_dump_all( self ):
log.debug( "Building markdown pages for sections." )
for section in self.sections:
self.section_dump( section )
# eof

View File

@ -1,5 +0,0 @@
mistune==0.8.3
mkdocs<=0.17.5,>=0.17.3
mkdocs-material<=2.9.1,>=2.9.0
pymdown-extensions<=4.11,>=4.10.2
PyYAML>=3.10

View File

@ -1,297 +0,0 @@
#
# siteconfig.py
#
# Build site configuration and write to mkdocs.yml.
#
# Copyright 2018 by
# Nikhil Ramakrishnan.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
"""Module to generate Mkdocs config.
This module contains routines to generate the configuration file
`mkdocs.yml` required by Mkdocs to build static HTML documentation
from markdown.
More information can be found at:
<https://www.mkdocs.org/user-guide/configuration/>
"""
from __future__ import print_function
import datetime
import logging
import yaml
import utils
log = logging.getLogger( __name__ )
# Config file name
config_filename = "mkdocs.yml"
# Docs directory and site directory
docs_dir = "markdown"
site_dir = "site"
# Basic site configuration default values
site_name = "FreeType API Reference"
site_description = "API Reference documentation for FreeType"
site_author = "FreeType Contributors"
use_dir_url = False
# Theme configuration default values
theme_conf = {}
theme_conf['name'] = "material"
theme_conf['logo'] = "images/favico.ico"
theme_conf['language'] = "en"
theme_conf['favicon'] = "images/favico.ico"
theme_conf['palette'] = {}
theme_conf['font'] = {}
# Theme palette
theme_conf['palette']['primary'] = "green"
theme_conf['palette']['accent'] = "green"
# Theme fonts
theme_conf['font']['text'] = "Noto Serif"
theme_conf['font']['code'] = "Roboto Mono"
# Markdown extensions
md_extensions = '''\
markdown_extensions:
- toc:
permalink: true
- pymdownx.superfences:
disable_indented_code_blocks: true
- codehilite:
guess_lang: false
- pymdownx.betterem:
smart_enable: all
- pymdownx.magiclink
- pymdownx.smartsymbols
'''
# Extra scripts
extra_scripts = '''\
extra_css:
- 'stylesheets/extra.css'
extra_javascript:
- 'javascripts/extra.js'
'''
# Other config
year = datetime.datetime.utcnow().year
var_dict = { 'year': year }
other_config = '''\
copyright: Copyright {year} \
<a href = "https://www.freetype.org/license.html">\
The FreeType Project</a>.
'''
other_config = other_config.format( **var_dict )
def add_config( yml_string, config_name ):
config = None
try:
config = yaml.safe_load( yml_string )
except yaml.scanner.ScannerError:
log.warn( "Malformed '%s' config, ignoring.", config_name )
return config
def build_extras():
# Parse all configurations and save as Python objects
global md_extensions, yml_extra, yml_other
md_extensions = add_config( md_extensions, "markdown_extensions" )
yml_extra = add_config( extra_scripts, "extra scripts" )
yml_other = add_config( other_config, "other" )
class Chapter( object ):
def __init__( self, title ):
self.title = title
self.pages = []
def add_page( self, section_title, filename ):
"""Add a page to the chapter."""
cur_page = {}
cur_page[section_title] = filename
self.pages.append( cur_page )
def get_pages( self ):
"""Get dict of pages in the chapter."""
conf = {}
conf[self.title] = self.pages
return conf
class SiteConfig( object ):
"""Site configuration generator class.
This class is used to generate site configuration based on supplied
and default values.
"""
def __init__( self ):
self.site_config = {}
self.pages = []
self.chapter = None
self.sections = []
self.md_extensions = []
# Set configurations
self.site_name = site_name
self.site_desc = site_description
self.site_author = site_author
self.docs_dir = docs_dir
self.site_dir = site_dir
self.theme_conf = theme_conf
self.use_dir_url = use_dir_url
def set_site_info( self, name, description = None, author = None ):
"""Set the basic site information."""
if name:
self.site_name = name
else:
# Site name is required, throw warning and revert to default
log.warn( "Site name not specified, reverting to default." )
if description:
self.site_desc = description
if author:
self.site_author = author
def add_single_page( self, section_title, filename ):
"""Add a single page to the list of pages."""
cur_page = {}
cur_page[section_title] = filename
self.pages.append( cur_page )
def add_chapter_page( self, section_title, filename ):
"""Add a page to a chapter.
Chapter must be set first using `start_chapter()` If not set,
`add_single_page()` will be called internally.
"""
if self.chapter:
self.chapter.add_page( section_title, filename )
else:
log.warn( "Section '%s' added without starting chapter.",
section_title )
self.add_single_page( section_title, filename )
def start_chapter( self, chap ):
"""Start a chapter."""
if self.chapter:
self.end_chapter()
self.chapter = Chapter( chap )
def end_chapter( self ):
"""Explicitly end a chapter."""
if self.chapter:
chap_pages = self.chapter.get_pages()
self.pages.append( chap_pages )
self.chapter = None
def build_site_config( self ):
"""Add basic Project information to config."""
self.site_config['site_name'] = self.site_name
if site_description:
self.site_config['site_description'] = self.site_desc
if site_author:
self.site_config['site_author'] = self.site_author
if docs_dir:
self.site_config['docs_dir'] = self.docs_dir
if site_dir:
self.site_config['site_dir'] = self.site_dir
if use_dir_url is not None:
self.site_config['use_directory_urls'] = self.use_dir_url
def build_theme_config( self ):
# internal: build theme config
if theme_conf != {}:
self.site_config['theme'] = self.theme_conf
def build_pages( self ):
# internal: build pages config
if self.pages != []:
self.site_config['pages'] = self.pages
def populate_config( self, data ):
# internal: Add a given not None object to site_config
if data:
self.site_config.update( data )
def write_config( self, name ):
"""Write all values in site_config to output stream."""
if self.site_config != {}:
print( "# " + name )
print( yaml.dump( self.site_config, default_flow_style=False ) )
self.site_config.clear()
def write_config_order( self, name, order ):
"""Write all values in site_config to output stream in order."""
if self.site_config != {}:
print( "# " + name )
for key in order:
if key in self.site_config:
temp_config = {}
temp_config[key] = self.site_config[key]
print( yaml.dump( temp_config, default_flow_style=False ).rstrip() )
self.site_config.pop( key, None )
if self.site_config != {}:
# Print remaining values
print( yaml.dump( self.site_config, default_flow_style=False ).rstrip() )
# print an empty line
print()
self.site_config.clear()
def build_config( self ):
"""Build the YAML configuration."""
# End chapter if started
self.end_chapter()
# Open yml file
output = utils.open_output( config_filename, config = True )
# Build basic site info
self.build_site_config()
order = ['site_name', 'site_author', 'docs_dir', 'site_dir']
self.write_config_order( "Project information", order )
# Build theme configuration
self.build_theme_config()
self.write_config( "Configuration" )
# Build pages
self.build_pages()
self.write_config( "Pages" )
# Build extra scripts
build_extras()
# Add extra CSS and Javascript
self.populate_config( yml_extra )
self.write_config( "Customization" )
# Add Markdown extensions
self.populate_config( md_extensions )
self.write_config( "Extensions" )
# Add other options
self.populate_config( yml_other )
self.write_config( "Other Options" )
# Close the file
utils.close_output( output )
# eof

View File

@ -1,410 +0,0 @@
#
# sources.py
#
# Convert source code comments to multi-line blocks (library file).
#
# Copyright 2002-2018 by
# David Turner.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
"""Utility for parsing source files.
This library file contains definitions of classes needed to decompose C
source code files into a series of multi-line 'blocks'. There are two
kinds of blocks.
* Normal blocks, which contain source code or ordinary comments.
* Documentation blocks, which have restricted formatting, and whose text
always start with a documentation markup tag like `<Function>',
`<Type>', etc.
The routines to process the content of documentation blocks are contained
in file `content.py'; the classes and methods found here only deal with
text parsing and basic documentation block extraction.
"""
from __future__ import print_function
import fileinput
import logging
import re
log = logging.getLogger( __name__ )
################################################################
##
## SOURCE BLOCK FORMAT CLASS
##
## A simple class containing compiled regular expressions to detect
## potential documentation format block comments within C source code.
##
## The `column' pattern must contain a group to `unbox' the content of
## documentation comment blocks.
##
## Later on, paragraphs are converted to long lines, which simplifies the
## regular expressions that act upon the text.
##
class SourceBlockFormat( object ):
def __init__( self, iden, start, column, end ):
"""Create a block pattern, used to recognize special documentation
blocks."""
self.id = iden
self.start = re.compile( start, re.VERBOSE )
self.column = re.compile( column, re.VERBOSE )
self.end = re.compile( end, re.VERBOSE )
#
# Format 1 documentation comment blocks.
#
# /************************************/ (at least 2 asterisks)
# /* */
# /* */
# /* */
# /************************************/ (at least 2 asterisks)
#
start = r'''
\s* # any number of whitespace
/\*{2,}/ # followed by '/' and at least two asterisks then '/'
\s*$ # probably followed by whitespace
'''
column = r'''
\s* # any number of whitespace
/\*{1} # followed by '/' and precisely one asterisk
([^*].*) # followed by anything (group 1)
\*{1}/ # followed by one asterisk and a '/'
\s*$ # probably followed by whitespace
'''
re_source_block_format1 = SourceBlockFormat( 1, start, column, start )
#
# Format 2 documentation comment blocks.
#
# /************************************ (at least 2 asterisks)
# *
# * (1 asterisk)
# *
# */ (1 or more asterisks)
#
start = r'''
\s* # any number of whitespace
/\*{2,} # followed by '/' and at least two asterisks
\s*$ # probably followed by whitespace
'''
column = r'''
\s* # any number of whitespace
\*{1}(?![*/]) # followed by precisely one asterisk not followed by `/'
(.*) # then anything (group1)
'''
end = r'''
\s* # any number of whitespace
\*+/ # followed by at least one asterisk, then '/'
'''
re_source_block_format2 = SourceBlockFormat( 2, start, column, end )
#
# The list of supported documentation block formats. We could add new ones
# quite easily.
#
re_source_block_formats = [re_source_block_format1, re_source_block_format2]
#
# The following regular expressions correspond to markup tags within the
# documentation comment blocks. They are equivalent despite their different
# syntax.
#
# A markup tag consists of letters or character `-', to be found in group 1.
#
# Notice that a markup tag _must_ begin a new paragraph.
#
re_markup_tag1 = re.compile( r'''\s*<((?:\w|-)*)>''' ) # <xxxx> format
re_markup_tag2 = re.compile( r'''\s*@((?:\w|-)*):''' ) # @xxxx: format
#
# The list of supported markup tags. We could add new ones quite easily.
#
re_markup_tags = [re_markup_tag1, re_markup_tag2]
#
# A regular expression to detect a cross reference, after markup tags have
# been stripped off.
#
# Two syntax forms are supported:
#
# @<name>
# @<name>[<id>]
#
# where both `<name>' and `<id>' consist of alphanumeric characters, `_',
# and `-'. Use `<id>' if there are multiple, valid `<name>' entries.
#
# Example: @foo[bar]
#
re_crossref = re.compile( r"""
@
(?P<name>(?:\w|-)+
(?:\[(?:\w|-)+\])?)
(?P<rest>.*)
""", re.VERBOSE )
#
# Two regular expressions to detect italic and bold markup, respectively.
# Group 1 is the markup, group 2 the rest of the line.
#
# Note that the markup is limited to words consisting of letters, digits,
# the characters `_' and `-', or an apostrophe (but not as the first
# character).
#
re_italic = re.compile( r"_((?:\w|-)(?:\w|'|-)*)_(.*)" ) # _italic_
re_bold = re.compile( r"\*((?:\w|-)(?:\w|'|-)*)\*(.*)" ) # *bold*
#
# This regular expression code to identify an URL has been taken from
#
# https://mail.python.org/pipermail/tutor/2002-September/017228.html
#
# (with slight modifications).
#
urls = r'(?:https?|telnet|gopher|file|wais|ftp)'
ltrs = r'\w'
gunk = r'/#~:.?+=&%@!\-'
punc = r'.:?\-'
any_sym = "%(ltrs)s%(gunk)s%(punc)s" % { 'ltrs' : ltrs,
'gunk' : gunk,
'punc' : punc }
url = r"""
(
\b(?![<]) # start at word boundary ignore if < found
%(urls)s : # need resource and a colon
[%(any)s] +? # followed by one or more of any valid
# character, but be conservative and
# take only what you need to...
(?= # [look-ahead non-consumptive assertion]
[%(punc)s]* # either 0 or more punctuation
(?: # [non-grouping parentheses]
[^%(any)s] | $ # followed by a non-url char
# or end of the string
)
)
)
""" % {'urls' : urls,
'any' : any_sym,
'punc' : punc }
re_url = re.compile( url, re.VERBOSE | re.MULTILINE )
#
# A regular expression that stops collection of comments for the current
# block.
#
re_source_sep = re.compile( r'\s*/\*\s*\*/' ) # /* */
#
# A regular expression to find possible C identifiers while outputting
# source code verbatim, covering things like `*foo' or `(bar'. Group 1 is
# the prefix, group 2 the identifier -- since we scan lines from left to
# right, sequentially splitting the source code into prefix and identifier
# is fully sufficient for our purposes.
#
re_source_crossref = re.compile( r'(\W*)(\w*)' )
#
# A regular expression that matches a list of reserved C source keywords.
#
re_source_keywords = re.compile( '''\\b ( typedef |
struct |
enum |
union |
const |
char |
int |
short |
long |
void |
signed |
unsigned |
include |
define |
undef |
if |
ifdef |
ifndef |
else |
endif ) \\b''', re.VERBOSE )
################################################################
##
## SOURCE BLOCK CLASS
##
## There are two important fields in a `SourceBlock' object.
##
## self.lines
## A list of text lines for the corresponding block.
##
## self.content
## For documentation comment blocks only, this is the block content
## that has been `unboxed' from its decoration. This is `None' for all
## other blocks (i.e., sources or ordinary comments with no starting
## markup tag)
##
class SourceBlock( object ):
def __init__( self, processor, filename, lineno, lines ):
self.processor = processor
self.filename = filename
self.lineno = lineno
self.lines = lines[:]
self.format = processor.format
self.content = []
if self.format == None:
return
# extract comment lines
lines = []
for line0 in self.lines:
m = self.format.column.match( line0 )
if m:
lines.append( m.group( 1 ) )
# now, look for a markup tag
for l in lines:
l = l.strip()
if len( l ) > 0:
for tag in re_markup_tags:
if tag.match( l ):
self.content = lines
return
def location( self ):
return "(" + self.filename + ":" + repr( self.lineno ) + ")"
# debugging only -- not used in normal operations
def dump( self ):
if self.content:
print( "{{{content start---" )
for l in self.content:
print( l )
print( "---content end}}}" )
return
for line in self.lines:
print( line )
################################################################
##
## SOURCE PROCESSOR CLASS
##
## The `SourceProcessor' is in charge of reading a C source file and
## decomposing it into a series of different `SourceBlock' objects.
##
## A SourceBlock object consists of the following data.
##
## - A documentation comment block using one of the layouts above. Its
## exact format will be discussed later.
##
## - Normal sources lines, including comments.
##
##
class SourceProcessor( object ):
def __init__( self ):
"""Initialize a source processor."""
self.blocks = []
self.filename = None
self.format = None
self.lines = []
def reset( self ):
"""Reset a block processor and clean up all its blocks."""
self.blocks = []
self.format = None
def parse_file( self, filename ):
"""Parse a C source file and add its blocks to the processor's
list."""
self.reset()
self.filename = filename
log.debug( "Parsing file %s.", filename )
fileinput.close()
self.format = None
self.lineno = 0
self.lines = []
for line in fileinput.input( filename ):
# strip trailing newlines, important on Windows machines!
if line[-1] == '\012':
line = line[0:-1]
if self.format == None:
self.process_normal_line( line )
else:
if self.format.end.match( line ):
# A normal block end. Add it to `lines' and create a
# new block
self.lines.append( line )
self.add_block_lines()
elif self.format.column.match( line ):
# A normal column line. Add it to `lines'.
self.lines.append( line )
else:
# An unexpected block end. Create a new block, but
# don't process the line.
self.add_block_lines()
# we need to process the line again
self.process_normal_line( line )
# record the last lines
self.add_block_lines()
def process_normal_line( self, line ):
"""Process a normal line and check whether it is the start of a new
block."""
for f in re_source_block_formats:
if f.start.match( line ):
self.add_block_lines()
self.format = f
self.lineno = fileinput.filelineno()
self.lines.append( line )
def add_block_lines( self ):
"""Add the current accumulated lines and create a new block."""
if self.lines != []:
block = SourceBlock( self,
self.filename,
self.lineno,
self.lines )
self.blocks.append( block )
self.format = None
self.lines = []
# debugging only, not used in normal operations
def dump( self ):
"""Print all blocks in a processor."""
for b in self.blocks:
b.dump()
# eof

View File

@ -1,141 +0,0 @@
/****************************************************************************
*
* ftbbox.h
*
* Test header file for docwriter.
*
* Copyright 2018 by
* David Turner, Robert Wilhelm, and Werner Lemberg.
*
* This file is part of the FreeType project, and may only be used,
* modified, and distributed under the terms of the FreeType project
* license, LICENSE.TXT. By continuing to use, modify, or distribute
* this file you indicate that you have read the license and
* understand and accept it fully.
*
*/
/**************************************************************************
*
* This component has a _single_ role: to test docwriter
*
* This file is ONLY used to test docwriter, and should not be taken
* seriously.
*
*/
#ifndef FTBBOX_H_
#define FTBBOX_H_
#include <ft2build.h>
#include FT_FREETYPE_H
#ifdef FREETYPE_H
#error "freetype.h of FreeType 1 has been loaded!"
#error "Please fix the directory search order for header files"
#error "so that freetype.h of FreeType 2 is found first."
#endif
FT_BEGIN_HEADER
/**************************************************************************
*
* @section:
* outline_processing
*
* @title:
* Outline Processing
*
* @abstract:
* Functions to create, transform, and render vectorial glyph images.
*
* @description:
* This section contains routines used to create and destroy scalable
* glyph images known as 'outlines'. These can also be measured,
* transformed, and converted into bitmaps and pixmaps.
*
/**************************************************************************
*
* @function:
* FT_Foo_Bar
*
* @description:
* Compute the exact bar for the given foo.
*
* @input:
* foo ::
* A pointer to the source foo.
*
* @values:
* FT_FOO ::
* The foo.
*
* FT_BAR ::
* The bar.
*
* @output:
* bar ::
* The foo's exact bar.
*
* @return:
* FreeType error code. 0~means success.
*
* @note:
* If the foo is tricky and the bar has been loaded with
* @FT_FOO, the resulting bar is meaningless. To get
* reasonable values for the bar it is necessary to load the foo
* at a large baz value (so that the hinting instructions can
* properly shift and scale the subfoos), then extracting the bar,
* which can be eventually converted back to baz units.
*/
FT_EXPORT( FT_Error )
FT_Outline_Get_BBox( FT_Outline* outline,
FT_BBox *abbox );
/* */
FT_END_HEADER
#endif /* FTBBOX_H_ */
/****************************************************************************
*
* @chapter:
* support_api
*
* @title:
* Support API
*
* @sections:
* outline_processing
*
*/
/*************************************************************************
*
* @macro:
* FT_BBOX_H
*
* @description:
* A macro used in #include statements to name the file containing the
* API of the optional exact bounding box computation routines.
*
*/
#define FT_BBOX_H <freetype/ftbbox.h>
/* */
/* END */
/* Local Variables: */
/* coding: utf-8 */
/* End: */

View File

@ -1 +0,0 @@
*.yml

View File

@ -1 +0,0 @@
*.md

View File

@ -1,55 +0,0 @@
#
# test_integration.py
#
# Integration test for docwriter.
#
# Copyright 2018 by
# Nikhil Ramakrishnan.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
"""Docwriter Integration tests.
This is a simple integration test that builds Docwriter
documentation against a test file.
From the root of the Docwriter git repo, use:
python -m pytest tests/test_integration.py
"""
import logging
import subprocess
log = logging.getLogger('docwriter')
def test_integration( capfd ):
log.propagate = False
stream = logging.StreamHandler()
formatter = logging.Formatter(
"\033[1m\033[1;32m *** %(message)s *** \033[0m")
stream.setFormatter(formatter)
log.addHandler(stream)
log.setLevel(logging.DEBUG)
base_cmd = ['python', 'docwriter.py', '--prefix=test',
'--title=Docwriter Test', '--output=./tests/output',
'--verbose' ]
folders = ['./tests/assets/*.c']
log.debug("Building markdown docs.")
command = base_cmd + folders
# run the command
subprocess.check_call( command )
# capture output to check for warnings
captured = capfd.readouterr()
# print the logs on failure
print( captured.err )
# fail if there are warnings
assert not "WARNING" in captured.err
# eof

View File

@ -1,57 +0,0 @@
#
# test_parse.py
#
# Tests for docwriter parsing (sources.py and content.py).
#
# Copyright 2018 by
# Nikhil Ramakrishnan.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
"""Docwriter parse tests.
The tests in this module use the `SourceProcessor` and
`ContentProcessor` classes to test file and content parsing.
"""
import content
import sources
import utils
# create context and processor
source_processor = sources.SourceProcessor()
content_processor = content.ContentProcessor()
def test_parse_file():
# retrieve the list of files to process
file_list = utils.make_file_list( ['./tests/assets/*.c'] )
for filename in file_list:
source_processor.parse_file( filename )
# get blocks
blocks = source_processor.blocks
count = len( blocks )
# there must be 12 blocks in file
assert count == 12
def test_parse_source():
# retrieve the list of files to process
file_list = utils.make_file_list( ['./tests/assets/*.c'] )
for filename in file_list:
source_processor.parse_file( filename )
content_processor.parse_sources( source_processor )
# process sections
content_processor.finish()
# get headers
headers = content_processor.headers
# expected values
expected_key = 'freetype/ftbbox.h'
expected_val = 'FT_BBOX_H'
assert headers[expected_key] == expected_val
# eof

View File

@ -1,72 +0,0 @@
#
# test_siteconfig.py
#
# Tests for site config generation (siteconfig.py).
#
# Copyright 2018 by
# Nikhil Ramakrishnan.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
"""Docwriter site config tests.
This module tests the validity of the `yml` configuration
generated by `siteconfig.py`.
"""
import os
import yaml
import siteconfig
import utils
config = siteconfig.SiteConfig()
# Config vars
site_name = "Foo Bar Test"
site_description = "Test documentation for Foo Bar."
site_author = "Pytest"
toc_filename = "foo-toc.md"
index_filename = "foo-index.md"
# Add chapters and pagess
c1_sec = ["c1s1", "c1s2", "c1s3"]
c2_sec = ["c2s1", "c2s2"]
pages = {}
pages['chap1'] = c1_sec
pages['chap2'] = c2_sec
def test_config( tmpdir, caplog ):
utils.output_dir = str( tmpdir )
# Set site config
config.set_site_info( site_name, site_description,
site_author )
# Add toc and index
config.add_single_page( "TOC", toc_filename )
config.add_single_page( "Index", index_filename )
# Add chapters and pages
for chap, parts in pages.items():
config.start_chapter( chap )
for sec in parts:
config.add_chapter_page( sec, sec + ".md" )
config.end_chapter()
# Done, Build config
config.build_config()
# Open file and parse yml
filepath = os.path.join( str( tmpdir ), 'mkdocs.yml' )
result = open( filepath, 'rb' ).read()
data = yaml.safe_load(result)
# Assertions
assert data is not None
for record in caplog.records:
# Strict build - there should be no warnings
assert record.levelname != 'WARNING'

View File

@ -1,90 +0,0 @@
#
# test_tomarkdown.py
#
# Tests for markdown formatter (tomarkdown.py).
#
# Copyright 2018 by
# Nikhil Ramakrishnan.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
"""Unit tests for `tomarkdown`.
This module contains tests for functions in `tomarkdown.py`.
"""
import content
import sources
import tomarkdown
import utils
# Create test objects
# create context and processor
source_processor = sources.SourceProcessor()
content_processor = content.ContentProcessor()
# Names
project_title = 'Test Docs'
project_prefix = 'test'
# retrieve the list of files to process
file_list = utils.make_file_list( ['./tests/assets/*.c'] )
for filename in file_list:
source_processor.parse_file( filename )
content_processor.parse_sources( source_processor )
# process sections
content_processor.finish()
formatter = tomarkdown.MdFormatter( content_processor,
project_title,
project_prefix )
def test_html_quote():
test_string = '7 & 9 < 4 & 5 but 12 & 15 > 4 & 5'
expt_string = '7 &amp; 9 &lt; 4 &amp; 5 but 12 &amp; 15 &gt; 4 &amp; 5'
assert tomarkdown.html_quote(test_string) == expt_string
def test_normalize_url():
global formatter
url = 'protocol://test-url-with/[square-brackets]?and-query'
expected_url = 'protocol://test-url-with/(square-brackets)?and-query'
assert formatter.normalize_url( url ) == expected_url
def test_slugify():
global formatter
name = 'FT_HAS_MULTIPLE_MASTERS'
expected = 'ft_has_multiple_masters'
assert formatter.slugify( name ) == expected
def test_slugify2():
global formatter
name = 'FT_GetFilePath_From_Mac_ATS_Name'
expected = 'ft_getfilepath_from_mac_ats_name'
assert formatter.slugify( name ) == expected
def test_slugify3():
global formatter
name = 'default-script'
expected = 'default-script'
assert formatter.slugify( name ) == expected
def test_make_section_url():
global formatter
expected_url = '../test-outline_processing/index.html'
section = list(formatter.sections)[0]
out_url = formatter.make_section_url( section, code = True )
assert out_url == expected_url
def test_make_chapter_url():
global formatter
expected_text = '[Support API](test-toc.md#support-api)'
section = list(formatter.sections)[0]
out_text = formatter.make_chapter_url( section.chapter.title )
assert out_text == expected_text
# eof

View File

@ -1,67 +0,0 @@
#
# test_utils.py
#
# Tests for utility functions (utils.py).
#
# Copyright 2018 by
# Nikhil Ramakrishnan.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
"""Unit tests for `utils`.
This module contains tests for functions in `utils.py`.
"""
import utils
import sys
def test_index_key():
test_dict = {"hello": "world", "foo": "bar", "FOO": "BAZ",
"HELLO": "WORLD", "zzz": "sleep"}
# expected output
out_list = ["FOO", "foo", "HELLO", "hello", "zzz"]
block_index = test_dict.keys()
block_index = sorted( block_index, key = utils.index_key )
assert block_index == out_list
def test_sort_order_list():
input_list = ["z", "b", "a"]
order_list = ["b", "c", "d"]
# expected output
expected = ["b", "c", "d", "z", "a"]
out_list = utils.sort_order_list(input_list, order_list)
assert out_list == expected
def test_output( tmpdir ):
# check if sys.stdout is diverting to file
# this tests both open_output and close_output
utils.output_dir = str( tmpdir )
old_std = sys.stdout
out = utils.open_output("test.txt", config=True)
assert sys.stdout != old_std
utils.close_output( out )
assert sys.stdout == old_std
def test_make_file_list( tmpdir ):
utils.output_dir = tmpdir
f1 = tmpdir.join( "test1.c" )
f2 = tmpdir.join( "test2.c" )
f3 = tmpdir.join( "test3.txt" )
f1.write( "foo" )
f2.write( "bar" )
f3.write( "baz" )
args = [str( tmpdir + '/*.c' )]
expected = ['test1.c', 'test2.c']
out_list = utils.make_file_list( args )
out_list = [f for f in out_list]
for i in range( len( expected ) ):
assert expected[i] in out_list[i]
# eof

View File

@ -1,632 +0,0 @@
#
# tomarkdown.py
#
# A sub-class container of the `Formatter' class to produce Markdown.
#
# Copyright 2018 by
# Nikhil Ramakrishnan.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
# The parent class is contained in file `formatter.py'.
"""Subclass of `formatter` to generate Markdown.
This module subclasses `formatter` and implements syntax-specific
routines to build markdown output.
"""
import logging
import os
import re
import time
import mistune
from formatter import Formatter
import siteconfig
import sources
log = logging.getLogger( __name__ )
#---------------------------------------------------------------
# Begin initial configuration
# Docs Config.
api_ref_text = "API Reference"
docs_author = "FreeType Contributors"
# Breadcrumbs Navigation config.
md_crumbs_sep = " &raquo; "
md_header_1 = """\
[FreeType](//www.freetype.org) &raquo; \
"""
md_header_2 = """\
[Docs](../) &raquo; \
"""
md_line_sep = """
-------------------------------\
"""
# Heading Default Text.
md_api_ref = """\
API Reference
"""
# Chapter header/inter/footer.
chapter_header = """\
## \
"""
chapter_footer = ''
# Synopsis text
section_synopsis_header = '''
## Synopsis\
'''
section_synopsis_footer = ''
# Description header/footer.
description_header = ""
description_footer = ""
# Source code extracts header/footer.
source_header = """
<div class = "codehilite">
<pre>\
"""
source_footer = """\
</pre>
</div>\
"""
code_header = "```"
code_footer = "```"
# Source language keyword coloration and styling.
keyword_prefix = '<span class="keyword">'
keyword_suffix = '</span>'
# HTML paragraph header and footer.
para_header = "<p>"
para_footer = "</p>"
# General Markdown.
md_newline = "\n"
md_h1 = "# "
md_h2 = "## "
md_h3 = "### "
md_h4 = "<h4>"
md_h4_inter = "</h4>"
md_hr = """\
<hr>
"""
# End of initial configuration
#---------------------------------------------------------------
def html_quote( line ):
"""Change HTML special characters to their codes.
Follows ISO 8859-1 Characters changed: `&`, `<` and `>`.
"""
result = line
if "`" not in result:
result = line.replace( "&", "&amp;" )
result = result.replace( "<", "&lt;" )
result = result.replace( ">", "&gt;" )
return result
################################################################
##
## MARKDOWN FORMATTER CLASS
##
class MdFormatter( Formatter ):
def __init__( self, processor, project_title, file_prefix ):
Formatter.__init__( self, processor )
if file_prefix:
file_prefix = file_prefix + "-"
else:
file_prefix = ""
self.headers = processor.headers
self.project_title = project_title
self.file_prefix = file_prefix
self.toc_filename = self.file_prefix + "toc.md"
self.index_filename = self.file_prefix + "index.md"
self.markdown = mistune.Markdown()
self.config = siteconfig.SiteConfig()
self.md_index_header = (
md_header_1 + md_header_2
+ "Global Index"
+ md_line_sep + md_h1
+ project_title + md_api_ref
)
self.md_toc_header = (
md_header_1 + md_header_2
+ "Table of Contents"
+ md_line_sep + md_h1
+ project_title + md_api_ref
)
self.time_footer = (
'<div class="timestamp">generated on '
+ time.asctime( time.gmtime() ) + " UTC"
+ "</div>" )
self.columns = 3
self.site_name = project_title + " " + api_ref_text
self.site_description = api_ref_text + " Documentation for " + project_title
self.site_author = docs_author
# Set site config
self.config.set_site_info( self.site_name, self.site_description,
self.site_author )
# Add toc and index
self.config.add_single_page( "TOC", self.toc_filename )
self.config.add_single_page( "Index", self.index_filename )
def normalize_url( self, url ):
# normalize url, following RFC 3986
url = url.replace( "[", "(" )
url = url.replace( "]", ")" )
return url
def slugify( self, name ):
"""Slugify a cross-reference.
Python markdown uses a similar approach to process links so we
need to do this in order to have valid cross-references.
"""
name = name.lower().strip()
name = name.replace( " ", "-")
return name
def make_section_url( self, section, code = False ):
if code:
return "../" + self.file_prefix + section.name + "/index.html"
return self.file_prefix + section.name + ".md"
def make_block_url( self, block, name = None, code = False ):
if name == None:
name = block.name
name = self.slugify( name )
try:
# if it is a field def, link to its parent section
section_url = self.make_section_url( block.section, code )
except Exception:
# we already have a section
section_url = self.make_section_url( block, code )
return section_url + "#" + name
def make_chapter_url( self, chapter ):
chapter = ' '.join( chapter )
slug_chapter = self.slugify( chapter )
chapter_url = ( "[" + chapter + "]("
+ self.toc_filename + "#" + slug_chapter + ")"
)
return chapter_url
def make_md_word( self, word ):
"""Analyze a simple word to detect cross-references and markup."""
# handle cross-references
m = sources.re_crossref.match( word )
if m:
try:
name = m.group( 'name' )
rest = m.group( 'rest' )
block = self.identifiers[name]
url = self.make_block_url( block, code = True )
# display `foo[bar]' as `foo'
name = re.sub( r'\[.*\]', '', name )
# normalize url
url = self.normalize_url( url )
try:
# for sections, display title
url = ( '&lsquo;<a href="' + url + '">'
+ block.title + '</a>&rsquo;'
+ rest )
except Exception:
url = ( '<a href="' + url + '">'
+ name + '</a>'
+ rest )
return url
except Exception:
# we detected a cross-reference to an unknown item
log.warn( "Undefined cross reference '%s'.", name )
return '?' + name + '?' + rest
return html_quote( word )
def make_md_para( self, words, in_html = False ):
"""Convert words of a paragraph into tagged Markdown text.
Also handle cross references.
"""
line = ""
if words:
line = self.make_md_word( words[0] )
for word in words[1:]:
line = line + " " + self.make_md_word( word )
# handle hyperlinks
line = sources.re_url.sub( r'<\1>', line )
# convert '...' quotations into real left and right single quotes
line = re.sub( r"(^|\W)'(.*?)'(\W|$)",
r'\1&lsquo;\2&rsquo;\3',
line )
# convert tilde into non-breaking space
line = line.replace( "~", "&nbsp;" )
# Return
if in_html:
# If we are in an HTML tag, return with newline after para
return line + md_newline
# Otherwise return a Markdown paragraph
return md_newline + line
def make_md_code( self, lines, lang ):
"""Convert a code sequence to markdown."""
if not lang:
lang = ''
line = code_header + lang + '\n'
for l in lines:
# NOTE Markdown REQUIRES all special chars in code blocks
line = line + l.rstrip() + '\n'
return line + code_footer
def make_md_items( self, items, in_html = False ):
"""Convert a field's content into markdown."""
lines = []
for item in items:
if item.lines:
lines.append( self.make_md_code( item.lines, item.lang ) )
else:
lines.append( self.make_md_para( item.words, in_html ) )
return '\n'.join( lines )
def print_md_items( self, items, in_html = False ):
content = self.make_md_items( items, in_html )
if in_html:
# Parse markdown in content
content = self.markdown( content ).rstrip()
print( content )
def print_md_para( self, words, in_html = False ):
content = self.make_md_para( words, in_html )
if in_html:
# Parse markdown in content
content = self.markdown( content ).rstrip()
return content
def print_html_field( self, field ):
if field.name:
print( '<table><tr valign="top"><td><b>'
+ field.name
+ "</b></td><td>" )
print( self.make_md_items( field.items ) )
if field.name:
print( "</td></tr></table>" )
def source_quote( self, line, block_name = None ):
result = ""
while line:
m = sources.re_source_crossref.match( line )
if m:
name = m.group( 2 )
prefix = html_quote( m.group( 1 ) )
length = len( m.group( 0 ) )
if name == block_name:
# this is the current block name, if any
result = result + prefix + '<b>' + name + '</b>'
# result = result + prefix + name
# Keyword highlighting
elif sources.re_source_keywords.match( name ):
# this is a C keyword
result = ( result + prefix
+ keyword_prefix + name + keyword_suffix )
elif name in self.identifiers:
# this is a known identifier
block = self.identifiers[name]
iden = block.name
# link to a field ID if possible
try:
for markup in block.markups:
if markup.tag == 'values':
for field in markup.fields:
if field.name:
iden = name
result = ( result + prefix
+ '<a href="'
+ self.make_block_url( block, iden, code = True )
+ '">' + name + '</a>' )
except Exception:
# sections don't have `markups'; however, we don't
# want references to sections here anyway
result = result + html_quote( line[:length] )
else:
result = result + html_quote( line[:length] )
line = line[length:]
else:
result = result + html_quote( line )
line = []
return result
def print_md_field_list( self, fields ):
is_long = False
for field in fields:
# if any field name is longer than
# 25 chars change to long table
if len( field.name ) > 25:
is_long = True
break
# if any line has a code sequence
# change to long table
for item in field.items:
if item.lines:
is_long = True
break
if is_long:
print( '<table class="fields long">' )
else:
print( '<table class="fields">' )
for field in fields:
print( '<tr><td class="val" id="' + self.slugify( field.name ) + '">'
+ field.name
+ '</td><td class="desc">' )
self.print_md_items( field.items, in_html = True )
print( "</td></tr>" )
print( "</table>" )
def print_md_markup( self, markup ):
table_fields = []
for field in markup.fields:
if field.name:
# We begin a new series of field or value definitions. We
# record them in the `table_fields' list before outputting
# all of them as a single table.
table_fields.append( field )
else:
if table_fields:
self.print_md_field_list( table_fields )
table_fields = []
self.print_md_items( field.items )
if table_fields:
self.print_md_field_list( table_fields )
#
# formatting the index
#
def index_enter( self ):
print( self.md_index_header )
self.index_items = {}
def index_name_enter( self, name ):
block = self.identifiers[name]
url = self.make_block_url( block )
self.index_items[name] = url
def index_exit( self ):
# `block_index' already contains the sorted list of index names
letter = ''
for bname in self.block_index:
if letter != bname[0].upper():
# print letter heading
letter = bname[0].upper()
print( '\n' + md_h3 + letter + '\n' )
url = self.index_items[bname]
# display `foo[bar]' as `foo (bar)'
bname = bname.replace( "[", " (" )
bname = bname.replace( "]", ")" )
# normalize url
url = self.normalize_url( url )
line = ( '[' + bname + ']' + '(' + url + ')' + ' ' )
print( line )
# TODO Remove commented code once the above is ready
# count = len( self.block_index )
# rows = ( count + self.columns - 1 ) // self.columns
# print( '<table class="index">' )
# for r in range( rows ):
# line = "<tr>"
# for c in range( self.columns ):
# i = r + c * rows
# if i < count:
# bname = self.block_index[r + c * rows]
# url = self.index_items[bname]
# # display `foo[bar]' as `foo (bar)'
# bname = bname.replace( "[", " (" )
# bname = bname.replace( "]", ")" )
# # normalize url
# url = self.normalize_url( url )
# line = ( line + '<td><a href="' + url + '">'
# + bname + '</a></td>' )
# else:
# line = line + '<td></td>'
# line = line + "</tr>"
# print( line )
# print( "</table>" )
print( md_line_sep )
print( self.time_footer )
self.index_items = {}
def index_dump( self, index_filename = None ):
if index_filename == None:
index_filename = self.file_prefix + "index.md"
Formatter.index_dump( self, index_filename )
#
# Formatting the table of contents and
# config file for MkDocs.
#
def toc_enter( self ):
print( self.md_toc_header )
print( "# Table of Contents" )
def toc_chapter_enter( self, chapter ):
print( chapter_header + " ".join( chapter.title ) + md_newline )
print( '<table class="toc">' )
# add a chapter
self.config.start_chapter( " ".join( chapter.title ) )
def toc_section_enter( self, section ):
print( '<tr><td class="link">'
+ '<a href="'
+ self.make_section_url( section, code = True ) + '">'
+ section.title + '</a></td><td class="desc">' )
print( self.print_md_para( section.abstract, in_html = True ) )
# add section to chapter
self.config.add_chapter_page( section.title,
self.make_section_url( section ) )
def toc_section_exit( self, section ):
print( "</td></tr>" )
def toc_chapter_exit( self, chapter ):
print( "</table>" )
#print( chapter_footer )
# End the chapter
self.config.end_chapter()
def toc_index( self, index_filename ):
print( chapter_header
+ '[Global Index](' + index_filename + ')'
)
def toc_exit( self ):
print( md_line_sep )
print( self.time_footer )
# Build and flush MkDocs config
self.config.build_config()
def toc_dump( self, toc_filename = None, index_filename = None ):
if toc_filename == None:
toc_filename = self.file_prefix + "toc.md"
if index_filename == None:
index_filename = self.file_prefix + "index.md"
Formatter.toc_dump( self, toc_filename, index_filename )
#
# formatting sections
#
def section_enter( self, section ):
if section.chapter:
print( md_header_1 + md_header_2
+ self.make_chapter_url( section.chapter.title )
+ md_crumbs_sep + section.title
+ md_line_sep )
else:
# this should never happen!
log.warn( "No chapter name for Section '%s'.", section.title )
# Print section title
print( md_h1 + section.title )
# print section synopsis
print( section_synopsis_header )
#print( section_synopsis_footer )
#print( description_header )
print( self.make_md_items( section.description ) )
print( description_footer )
def block_enter( self, block ):
# place anchor if needed
if block.name:
url = block.name
# display `foo[bar]' as `foo'
name = re.sub( r'\[.*\]', '', block.name )
# normalize url
url = self.normalize_url( url )
print( md_h2 + name + md_newline )
# dump the block C source lines now
if block.code:
header = ''
for f in self.headers.keys():
header_filename = os.path.normpath( block.source.filename )
if header_filename.find( os.path.normpath( f ) ) >= 0:
header = self.headers[f] + ' (' + f + ')'
break
# Warn if header macro not found
# if not header:
# log.warn(
# "No header macro for"
# " '%s'.", block.source.filename )
if header:
print( 'Defined in ' + header + '.' )
print( source_header )
for l in block.code:
print( self.source_quote( l, block.name ) )
print( source_footer )
def markup_enter( self, markup, block ):
if markup.tag == "description":
print( description_header )
else:
print( md_h4 + markup.tag + md_h4_inter )
self.print_md_markup( markup )
def markup_exit( self, markup, block ):
if markup.tag == "description":
print( description_footer )
else:
print( "" )
def block_exit( self, block ):
print( md_hr )
def section_exit( self, section ):
pass
def section_dump_all( self ):
log.debug( "Building markdown pages for sections." )
for section in self.sections:
self.section_dump( section,
self.file_prefix + section.name + '.md' )
# eof

View File

@ -1,10 +0,0 @@
[tox]
envlist = py27, py36
skipsdist = True
[testenv]
deps =
pytest
-rrequirements.txt
commands =
python -m pytest -v

View File

@ -1,162 +0,0 @@
#
# utils.py
#
# Auxiliary functions for the `docmaker' tool (library file).
#
# Copyright 2002-2018 by
# David Turner.
#
# This file is part of the FreeType project, and may only be used,
# modified, and distributed under the terms of the FreeType project
# license, LICENSE.TXT. By continuing to use, modify, or distribute
# this file you indicate that you have read the license and
# understand and accept it fully.
"""Utility functions for Docwriter.
This module provides various utility functions for Docwriter.
"""
import glob
import itertools
import logging
import os
import sys
log = logging.getLogger( __name__ )
# current output directory
#
output_dir = None
markdown_dir = "markdown"
def build_message():
"""Print build message to console."""
path = os.path.join( output_dir, markdown_dir )
path = os.path.normpath(path)
log.info("Building markdown documentation to directory: %s", path)
def index_key( s ):
"""Generate a sorting key.
We want lexicographical order (primary key) except that capital
letters are sorted before lowercase ones(secondary key).
The primary key is implemented by lowercasing the input. The
secondary key is simply the original data appended, character by
character. For example, the sort key for `FT_x` is `fFtT__xx`,
while the sort key for `ft_X` is `fftt__xX`. Since ASCII codes of
uppercase letters are numerically smaller than the codes of
lowercase letters, `fFtT__xx` gets sorted before `fftt__xX`.
"""
return " ".join( itertools.chain( *zip( s.lower(), s ) ) )
def sort_order_list( input_list, order_list ):
"""Sort `input_list`, placing the elements of `order_list' in front."""
new_list = order_list[:]
for name in input_list:
if not name in order_list:
new_list.append( name )
return new_list
def open_output( filename, config = False ):
"""Divert standard output to a given project documentation file.
Use `output_dir` to determine the filename location if necessary and
save the old stdout handle in a tuple that is returned by this function.
If `config` is set to True, file is written to the parent directory.
This is because MkDocs (and other generators) require configuration
files to be in the parent directory.
"""
if output_dir and output_dir != "":
if not config:
filename = output_dir + os.sep + markdown_dir + os.sep + filename
else:
filename = output_dir + os.sep + filename
old_stdout = sys.stdout
new_file = open( filename, "w" )
sys.stdout = new_file
return ( new_file, old_stdout )
def close_output( output ):
"""Close the output that was returned by `open_output`."""
output[0].close()
sys.stdout = output[1]
def check_output():
"""Check if output directory is valid."""
global output_dir
if output_dir:
if output_dir != "":
if not os.path.isdir( output_dir ):
log.error( "Argument"
" '%s' is not a valid directory.",
output_dir )
sys.exit( 2 )
else:
output_dir = None
def file_exists( pathname ):
"""Check that a given file exists."""
result = 1
try:
file_handle = open( pathname, "r" )
file_handle.close()
except Exception:
result = None
log.error( "%s couldn't be accessed.", pathname )
return result
def clean_markdown_dir( ):
"""Remove markdown and yml files from a directory."""
directory = output_dir + os.sep + markdown_dir
if not os.path.exists(directory):
return
for entry in os.listdir(directory):
# Don't remove hidden files from the directory.
if entry.startswith('.'):
continue
path = os.path.join(directory, entry)
if os.path.isdir(path):
continue
if entry.endswith('.md') or entry.endswith('.yml'):
os.unlink(path)
def make_file_list( args = None ):
"""Build a list of input files from a list or command-line arguments."""
file_list = []
if not args:
args = sys.argv[1:]
for pathname in args:
if pathname.find( '*' ) >= 0:
newpath = glob.glob( pathname )
newpath.sort() # sort files -- this is important because
# of the order of files
else:
newpath = [pathname]
file_list.extend( newpath )
if len( file_list ) == 0:
file_list = None
else:
# now filter the file list to remove non-existing ones
file_list = filter( file_exists, file_list )
return file_list
# eof