mirror of
https://github.com/SheffieldML/GPy.git
synced 2026-04-27 22:06:22 +02:00
Added plot_directive and mathmpl extensions
This commit is contained in:
parent
62fe0f1cca
commit
73f132fe19
11 changed files with 3533 additions and 0 deletions
2
doc/sphinxext/__init__.py
Normal file
2
doc/sphinxext/__init__.py
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
from __future__ import print_function
|
||||
|
||||
427
doc/sphinxext/apigen.py
Normal file
427
doc/sphinxext/apigen.py
Normal file
|
|
@ -0,0 +1,427 @@
|
|||
"""Attempt to generate templates for module reference with Sphinx
|
||||
|
||||
XXX - we exclude extension modules
|
||||
|
||||
To include extension modules, first identify them as valid in the
|
||||
``_uri2path`` method, then handle them in the ``_parse_module`` script.
|
||||
|
||||
We get functions and classes by parsing the text of .py files.
|
||||
Alternatively we could import the modules for discovery, and we'd have
|
||||
to do that for extension modules. This would involve changing the
|
||||
``_parse_module`` method to work via import and introspection, and
|
||||
might involve changing ``discover_modules`` (which determines which
|
||||
files are modules, and therefore which module URIs will be passed to
|
||||
``_parse_module``).
|
||||
|
||||
NOTE: this is a modified version of a script originally shipped with the
|
||||
PyMVPA project, which we've adapted for NIPY use. PyMVPA is an MIT-licensed
|
||||
project."""
|
||||
|
||||
# Stdlib imports
|
||||
import os
|
||||
import re
|
||||
|
||||
# Functions and classes
|
||||
class ApiDocWriter(object):
|
||||
''' Class for automatic detection and parsing of API docs
|
||||
to Sphinx-parsable reST format'''
|
||||
|
||||
# only separating first two levels
|
||||
rst_section_levels = ['*', '=', '-', '~', '^']
|
||||
|
||||
def __init__(self,
|
||||
package_name,
|
||||
rst_extension='.rst',
|
||||
package_skip_patterns=None,
|
||||
module_skip_patterns=None,
|
||||
):
|
||||
''' Initialize package for parsing
|
||||
|
||||
Parameters
|
||||
----------
|
||||
package_name : string
|
||||
Name of the top-level package. *package_name* must be the
|
||||
name of an importable package
|
||||
rst_extension : string, optional
|
||||
Extension for reST files, default '.rst'
|
||||
package_skip_patterns : None or sequence of {strings, regexps}
|
||||
Sequence of strings giving URIs of packages to be excluded
|
||||
Operates on the package path, starting at (including) the
|
||||
first dot in the package path, after *package_name* - so,
|
||||
if *package_name* is ``sphinx``, then ``sphinx.util`` will
|
||||
result in ``.util`` being passed for earching by these
|
||||
regexps. If is None, gives default. Default is:
|
||||
['\.tests$']
|
||||
module_skip_patterns : None or sequence
|
||||
Sequence of strings giving URIs of modules to be excluded
|
||||
Operates on the module name including preceding URI path,
|
||||
back to the first dot after *package_name*. For example
|
||||
``sphinx.util.console`` results in the string to search of
|
||||
``.util.console``
|
||||
If is None, gives default. Default is:
|
||||
['\.setup$', '\._']
|
||||
'''
|
||||
if package_skip_patterns is None:
|
||||
package_skip_patterns = ['\\.tests$']
|
||||
if module_skip_patterns is None:
|
||||
module_skip_patterns = ['\\.setup$', '\\._']
|
||||
self.package_name = package_name
|
||||
self.rst_extension = rst_extension
|
||||
self.package_skip_patterns = package_skip_patterns
|
||||
self.module_skip_patterns = module_skip_patterns
|
||||
|
||||
def get_package_name(self):
|
||||
return self._package_name
|
||||
|
||||
def set_package_name(self, package_name):
|
||||
''' Set package_name
|
||||
|
||||
>>> docwriter = ApiDocWriter('sphinx')
|
||||
>>> import sphinx
|
||||
>>> docwriter.root_path == sphinx.__path__[0]
|
||||
True
|
||||
>>> docwriter.package_name = 'docutils'
|
||||
>>> import docutils
|
||||
>>> docwriter.root_path == docutils.__path__[0]
|
||||
True
|
||||
'''
|
||||
# It's also possible to imagine caching the module parsing here
|
||||
self._package_name = package_name
|
||||
self.root_module = __import__(package_name)
|
||||
self.root_path = self.root_module.__path__[0]
|
||||
self.written_modules = None
|
||||
|
||||
package_name = property(get_package_name, set_package_name, None,
|
||||
'get/set package_name')
|
||||
|
||||
def _get_object_name(self, line):
|
||||
''' Get second token in line
|
||||
>>> docwriter = ApiDocWriter('sphinx')
|
||||
>>> docwriter._get_object_name(" def func(): ")
|
||||
'func'
|
||||
>>> docwriter._get_object_name(" class Klass(object): ")
|
||||
'Klass'
|
||||
>>> docwriter._get_object_name(" class Klass: ")
|
||||
'Klass'
|
||||
'''
|
||||
name = line.split()[1].split('(')[0].strip()
|
||||
# in case we have classes which are not derived from object
|
||||
# ie. old style classes
|
||||
return name.rstrip(':')
|
||||
|
||||
def _uri2path(self, uri):
|
||||
''' Convert uri to absolute filepath
|
||||
|
||||
Parameters
|
||||
----------
|
||||
uri : string
|
||||
URI of python module to return path for
|
||||
|
||||
Returns
|
||||
-------
|
||||
path : None or string
|
||||
Returns None if there is no valid path for this URI
|
||||
Otherwise returns absolute file system path for URI
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> docwriter = ApiDocWriter('sphinx')
|
||||
>>> import sphinx
|
||||
>>> modpath = sphinx.__path__[0]
|
||||
>>> res = docwriter._uri2path('sphinx.builder')
|
||||
>>> res == os.path.join(modpath, 'builder.py')
|
||||
True
|
||||
>>> res = docwriter._uri2path('sphinx')
|
||||
>>> res == os.path.join(modpath, '__init__.py')
|
||||
True
|
||||
>>> docwriter._uri2path('sphinx.does_not_exist')
|
||||
|
||||
'''
|
||||
if uri == self.package_name:
|
||||
return os.path.join(self.root_path, '__init__.py')
|
||||
path = uri.replace('.', os.path.sep)
|
||||
path = path.replace(self.package_name + os.path.sep, '')
|
||||
path = os.path.join(self.root_path, path)
|
||||
# XXX maybe check for extensions as well?
|
||||
if os.path.exists(path + '.py'): # file
|
||||
path += '.py'
|
||||
elif os.path.exists(os.path.join(path, '__init__.py')):
|
||||
path = os.path.join(path, '__init__.py')
|
||||
else:
|
||||
return None
|
||||
return path
|
||||
|
||||
def _path2uri(self, dirpath):
|
||||
''' Convert directory path to uri '''
|
||||
relpath = dirpath.replace(self.root_path, self.package_name)
|
||||
if relpath.startswith(os.path.sep):
|
||||
relpath = relpath[1:]
|
||||
return relpath.replace(os.path.sep, '.')
|
||||
|
||||
def _parse_module(self, uri):
|
||||
''' Parse module defined in *uri* '''
|
||||
filename = self._uri2path(uri)
|
||||
if filename is None:
|
||||
# nothing that we could handle here.
|
||||
return ([],[])
|
||||
f = open(filename, 'rt')
|
||||
functions, classes = self._parse_lines(f)
|
||||
f.close()
|
||||
return functions, classes
|
||||
|
||||
def _parse_lines(self, linesource):
|
||||
''' Parse lines of text for functions and classes '''
|
||||
functions = []
|
||||
classes = []
|
||||
for line in linesource:
|
||||
if line.startswith('def ') and line.count('('):
|
||||
# exclude private stuff
|
||||
name = self._get_object_name(line)
|
||||
if not name.startswith('_'):
|
||||
functions.append(name)
|
||||
elif line.startswith('class '):
|
||||
# exclude private stuff
|
||||
name = self._get_object_name(line)
|
||||
if not name.startswith('_'):
|
||||
classes.append(name)
|
||||
else:
|
||||
pass
|
||||
functions.sort()
|
||||
classes.sort()
|
||||
return functions, classes
|
||||
|
||||
def generate_api_doc(self, uri):
|
||||
'''Make autodoc documentation template string for a module
|
||||
|
||||
Parameters
|
||||
----------
|
||||
uri : string
|
||||
python location of module - e.g 'sphinx.builder'
|
||||
|
||||
Returns
|
||||
-------
|
||||
S : string
|
||||
Contents of API doc
|
||||
'''
|
||||
# get the names of all classes and functions
|
||||
functions, classes = self._parse_module(uri)
|
||||
if not len(functions) and not len(classes):
|
||||
print 'WARNING: Empty -',uri # dbg
|
||||
return ''
|
||||
|
||||
# Make a shorter version of the uri that omits the package name for
|
||||
# titles
|
||||
uri_short = re.sub(r'^%s\.' % self.package_name,'',uri)
|
||||
|
||||
ad = '.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n'
|
||||
|
||||
chap_title = uri_short
|
||||
ad += (chap_title+'\n'+ self.rst_section_levels[1] * len(chap_title)
|
||||
+ '\n\n')
|
||||
|
||||
# Set the chapter title to read 'module' for all modules except for the
|
||||
# main packages
|
||||
if '.' in uri:
|
||||
title = 'Module: :mod:`' + uri_short + '`'
|
||||
else:
|
||||
title = ':mod:`' + uri_short + '`'
|
||||
ad += title + '\n' + self.rst_section_levels[2] * len(title)
|
||||
|
||||
if len(classes):
|
||||
ad += '\nInheritance diagram for ``%s``:\n\n' % uri
|
||||
ad += '.. inheritance-diagram:: %s \n' % uri
|
||||
ad += ' :parts: 3\n'
|
||||
|
||||
ad += '\n.. automodule:: ' + uri + '\n'
|
||||
ad += '\n.. currentmodule:: ' + uri + '\n'
|
||||
multi_class = len(classes) > 1
|
||||
multi_fx = len(functions) > 1
|
||||
if multi_class:
|
||||
ad += '\n' + 'Classes' + '\n' + \
|
||||
self.rst_section_levels[2] * 7 + '\n'
|
||||
elif len(classes) and multi_fx:
|
||||
ad += '\n' + 'Class' + '\n' + \
|
||||
self.rst_section_levels[2] * 5 + '\n'
|
||||
for c in classes:
|
||||
ad += '\n:class:`' + c + '`\n' \
|
||||
+ self.rst_section_levels[multi_class + 2 ] * \
|
||||
(len(c)+9) + '\n\n'
|
||||
ad += '\n.. autoclass:: ' + c + '\n'
|
||||
# must NOT exclude from index to keep cross-refs working
|
||||
ad += ' :members:\n' \
|
||||
' :undoc-members:\n' \
|
||||
' :show-inheritance:\n' \
|
||||
' :inherited-members:\n' \
|
||||
'\n' \
|
||||
' .. automethod:: __init__\n'
|
||||
if multi_fx:
|
||||
ad += '\n' + 'Functions' + '\n' + \
|
||||
self.rst_section_levels[2] * 9 + '\n\n'
|
||||
elif len(functions) and multi_class:
|
||||
ad += '\n' + 'Function' + '\n' + \
|
||||
self.rst_section_levels[2] * 8 + '\n\n'
|
||||
for f in functions:
|
||||
# must NOT exclude from index to keep cross-refs working
|
||||
ad += '\n.. autofunction:: ' + uri + '.' + f + '\n\n'
|
||||
return ad
|
||||
|
||||
def _survives_exclude(self, matchstr, match_type):
|
||||
''' Returns True if *matchstr* does not match patterns
|
||||
|
||||
``self.package_name`` removed from front of string if present
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> dw = ApiDocWriter('sphinx')
|
||||
>>> dw._survives_exclude('sphinx.okpkg', 'package')
|
||||
True
|
||||
>>> dw.package_skip_patterns.append('^\\.badpkg$')
|
||||
>>> dw._survives_exclude('sphinx.badpkg', 'package')
|
||||
False
|
||||
>>> dw._survives_exclude('sphinx.badpkg', 'module')
|
||||
True
|
||||
>>> dw._survives_exclude('sphinx.badmod', 'module')
|
||||
True
|
||||
>>> dw.module_skip_patterns.append('^\\.badmod$')
|
||||
>>> dw._survives_exclude('sphinx.badmod', 'module')
|
||||
False
|
||||
'''
|
||||
if match_type == 'module':
|
||||
patterns = self.module_skip_patterns
|
||||
elif match_type == 'package':
|
||||
patterns = self.package_skip_patterns
|
||||
else:
|
||||
raise ValueError('Cannot interpret match type "%s"'
|
||||
% match_type)
|
||||
# Match to URI without package name
|
||||
L = len(self.package_name)
|
||||
if matchstr[:L] == self.package_name:
|
||||
matchstr = matchstr[L:]
|
||||
for pat in patterns:
|
||||
try:
|
||||
pat.search
|
||||
except AttributeError:
|
||||
pat = re.compile(pat)
|
||||
if pat.search(matchstr):
|
||||
return False
|
||||
return True
|
||||
|
||||
def discover_modules(self):
|
||||
''' Return module sequence discovered from ``self.package_name``
|
||||
|
||||
|
||||
Parameters
|
||||
----------
|
||||
None
|
||||
|
||||
Returns
|
||||
-------
|
||||
mods : sequence
|
||||
Sequence of module names within ``self.package_name``
|
||||
|
||||
Examples
|
||||
--------
|
||||
>>> dw = ApiDocWriter('sphinx')
|
||||
>>> mods = dw.discover_modules()
|
||||
>>> 'sphinx.util' in mods
|
||||
True
|
||||
>>> dw.package_skip_patterns.append('\.util$')
|
||||
>>> 'sphinx.util' in dw.discover_modules()
|
||||
False
|
||||
>>>
|
||||
'''
|
||||
modules = [self.package_name]
|
||||
# raw directory parsing
|
||||
for dirpath, dirnames, filenames in os.walk(self.root_path):
|
||||
# Check directory names for packages
|
||||
root_uri = self._path2uri(os.path.join(self.root_path,
|
||||
dirpath))
|
||||
for dirname in dirnames[:]: # copy list - we modify inplace
|
||||
package_uri = '.'.join((root_uri, dirname))
|
||||
if (self._uri2path(package_uri) and
|
||||
self._survives_exclude(package_uri, 'package')):
|
||||
modules.append(package_uri)
|
||||
else:
|
||||
dirnames.remove(dirname)
|
||||
# Check filenames for modules
|
||||
for filename in filenames:
|
||||
module_name = filename[:-3]
|
||||
module_uri = '.'.join((root_uri, module_name))
|
||||
if (self._uri2path(module_uri) and
|
||||
self._survives_exclude(module_uri, 'module')):
|
||||
modules.append(module_uri)
|
||||
return sorted(modules)
|
||||
|
||||
def write_modules_api(self, modules,outdir):
|
||||
# write the list
|
||||
written_modules = []
|
||||
for m in modules:
|
||||
api_str = self.generate_api_doc(m)
|
||||
if not api_str:
|
||||
continue
|
||||
# write out to file
|
||||
outfile = os.path.join(outdir,
|
||||
m + self.rst_extension)
|
||||
fileobj = open(outfile, 'wt')
|
||||
fileobj.write(api_str)
|
||||
fileobj.close()
|
||||
written_modules.append(m)
|
||||
self.written_modules = written_modules
|
||||
|
||||
def write_api_docs(self, outdir):
|
||||
"""Generate API reST files.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
outdir : string
|
||||
Directory name in which to store files
|
||||
We create automatic filenames for each module
|
||||
|
||||
Returns
|
||||
-------
|
||||
None
|
||||
|
||||
Notes
|
||||
-----
|
||||
Sets self.written_modules to list of written modules
|
||||
"""
|
||||
if not os.path.exists(outdir):
|
||||
os.mkdir(outdir)
|
||||
# compose list of modules
|
||||
modules = self.discover_modules()
|
||||
self.write_modules_api(modules,outdir)
|
||||
|
||||
def write_index(self, outdir, froot='gen', relative_to=None):
|
||||
"""Make a reST API index file from written files
|
||||
|
||||
Parameters
|
||||
----------
|
||||
path : string
|
||||
Filename to write index to
|
||||
outdir : string
|
||||
Directory to which to write generated index file
|
||||
froot : string, optional
|
||||
root (filename without extension) of filename to write to
|
||||
Defaults to 'gen'. We add ``self.rst_extension``.
|
||||
relative_to : string
|
||||
path to which written filenames are relative. This
|
||||
component of the written file path will be removed from
|
||||
outdir, in the generated index. Default is None, meaning,
|
||||
leave path as it is.
|
||||
"""
|
||||
if self.written_modules is None:
|
||||
raise ValueError('No modules written')
|
||||
# Get full filename path
|
||||
path = os.path.join(outdir, froot+self.rst_extension)
|
||||
# Path written into index is relative to rootpath
|
||||
if relative_to is not None:
|
||||
relpath = outdir.replace(relative_to + os.path.sep, '')
|
||||
else:
|
||||
relpath = outdir
|
||||
idx = open(path,'wt')
|
||||
w = idx.write
|
||||
w('.. AUTO-GENERATED FILE -- DO NOT EDIT!\n\n')
|
||||
w('.. toctree::\n\n')
|
||||
for f in self.written_modules:
|
||||
w(' %s\n' % os.path.join(relpath,f))
|
||||
idx.close()
|
||||
497
doc/sphinxext/docscrape.py
Normal file
497
doc/sphinxext/docscrape.py
Normal file
|
|
@ -0,0 +1,497 @@
|
|||
"""Extract reference documentation from the NumPy source tree.
|
||||
|
||||
"""
|
||||
|
||||
import inspect
|
||||
import textwrap
|
||||
import re
|
||||
import pydoc
|
||||
from StringIO import StringIO
|
||||
from warnings import warn
|
||||
4
|
||||
class Reader(object):
|
||||
"""A line-based string reader.
|
||||
|
||||
"""
|
||||
def __init__(self, data):
|
||||
"""
|
||||
Parameters
|
||||
----------
|
||||
data : str
|
||||
String with lines separated by '\n'.
|
||||
|
||||
"""
|
||||
if isinstance(data,list):
|
||||
self._str = data
|
||||
else:
|
||||
self._str = data.split('\n') # store string as list of lines
|
||||
|
||||
self.reset()
|
||||
|
||||
def __getitem__(self, n):
|
||||
return self._str[n]
|
||||
|
||||
def reset(self):
|
||||
self._l = 0 # current line nr
|
||||
|
||||
def read(self):
|
||||
if not self.eof():
|
||||
out = self[self._l]
|
||||
self._l += 1
|
||||
return out
|
||||
else:
|
||||
return ''
|
||||
|
||||
def seek_next_non_empty_line(self):
|
||||
for l in self[self._l:]:
|
||||
if l.strip():
|
||||
break
|
||||
else:
|
||||
self._l += 1
|
||||
|
||||
def eof(self):
|
||||
return self._l >= len(self._str)
|
||||
|
||||
def read_to_condition(self, condition_func):
|
||||
start = self._l
|
||||
for line in self[start:]:
|
||||
if condition_func(line):
|
||||
return self[start:self._l]
|
||||
self._l += 1
|
||||
if self.eof():
|
||||
return self[start:self._l+1]
|
||||
return []
|
||||
|
||||
def read_to_next_empty_line(self):
|
||||
self.seek_next_non_empty_line()
|
||||
def is_empty(line):
|
||||
return not line.strip()
|
||||
return self.read_to_condition(is_empty)
|
||||
|
||||
def read_to_next_unindented_line(self):
|
||||
def is_unindented(line):
|
||||
return (line.strip() and (len(line.lstrip()) == len(line)))
|
||||
return self.read_to_condition(is_unindented)
|
||||
|
||||
def peek(self,n=0):
|
||||
if self._l + n < len(self._str):
|
||||
return self[self._l + n]
|
||||
else:
|
||||
return ''
|
||||
|
||||
def is_empty(self):
|
||||
return not ''.join(self._str).strip()
|
||||
|
||||
|
||||
class NumpyDocString(object):
|
||||
def __init__(self,docstring):
|
||||
docstring = textwrap.dedent(docstring).split('\n')
|
||||
|
||||
self._doc = Reader(docstring)
|
||||
self._parsed_data = {
|
||||
'Signature': '',
|
||||
'Summary': [''],
|
||||
'Extended Summary': [],
|
||||
'Parameters': [],
|
||||
'Returns': [],
|
||||
'Raises': [],
|
||||
'Warns': [],
|
||||
'Other Parameters': [],
|
||||
'Attributes': [],
|
||||
'Methods': [],
|
||||
'See Also': [],
|
||||
'Notes': [],
|
||||
'Warnings': [],
|
||||
'References': '',
|
||||
'Examples': '',
|
||||
'index': {}
|
||||
}
|
||||
|
||||
self._parse()
|
||||
|
||||
def __getitem__(self,key):
|
||||
return self._parsed_data[key]
|
||||
|
||||
def __setitem__(self,key,val):
|
||||
if not self._parsed_data.has_key(key):
|
||||
warn("Unknown section %s" % key)
|
||||
else:
|
||||
self._parsed_data[key] = val
|
||||
|
||||
def _is_at_section(self):
|
||||
self._doc.seek_next_non_empty_line()
|
||||
|
||||
if self._doc.eof():
|
||||
return False
|
||||
|
||||
l1 = self._doc.peek().strip() # e.g. Parameters
|
||||
|
||||
if l1.startswith('.. index::'):
|
||||
return True
|
||||
|
||||
l2 = self._doc.peek(1).strip() # ---------- or ==========
|
||||
return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1))
|
||||
|
||||
def _strip(self,doc):
|
||||
i = 0
|
||||
j = 0
|
||||
for i,line in enumerate(doc):
|
||||
if line.strip(): break
|
||||
|
||||
for j,line in enumerate(doc[::-1]):
|
||||
if line.strip(): break
|
||||
|
||||
return doc[i:len(doc)-j]
|
||||
|
||||
def _read_to_next_section(self):
|
||||
section = self._doc.read_to_next_empty_line()
|
||||
|
||||
while not self._is_at_section() and not self._doc.eof():
|
||||
if not self._doc.peek(-1).strip(): # previous line was empty
|
||||
section += ['']
|
||||
|
||||
section += self._doc.read_to_next_empty_line()
|
||||
|
||||
return section
|
||||
|
||||
def _read_sections(self):
|
||||
while not self._doc.eof():
|
||||
data = self._read_to_next_section()
|
||||
name = data[0].strip()
|
||||
|
||||
if name.startswith('..'): # index section
|
||||
yield name, data[1:]
|
||||
elif len(data) < 2:
|
||||
yield StopIteration
|
||||
else:
|
||||
yield name, self._strip(data[2:])
|
||||
|
||||
def _parse_param_list(self,content):
|
||||
r = Reader(content)
|
||||
params = []
|
||||
while not r.eof():
|
||||
header = r.read().strip()
|
||||
if ' : ' in header:
|
||||
arg_name, arg_type = header.split(' : ')[:2]
|
||||
else:
|
||||
arg_name, arg_type = header, ''
|
||||
|
||||
desc = r.read_to_next_unindented_line()
|
||||
desc = dedent_lines(desc)
|
||||
|
||||
params.append((arg_name,arg_type,desc))
|
||||
|
||||
return params
|
||||
|
||||
|
||||
_name_rgx = re.compile(r"^\s*(:(?P<role>\w+):`(?P<name>[a-zA-Z0-9_.-]+)`|"
|
||||
r" (?P<name2>[a-zA-Z0-9_.-]+))\s*", re.X)
|
||||
def _parse_see_also(self, content):
|
||||
"""
|
||||
func_name : Descriptive text
|
||||
continued text
|
||||
another_func_name : Descriptive text
|
||||
func_name1, func_name2, :meth:`func_name`, func_name3
|
||||
|
||||
"""
|
||||
items = []
|
||||
|
||||
def parse_item_name(text):
|
||||
"""Match ':role:`name`' or 'name'"""
|
||||
m = self._name_rgx.match(text)
|
||||
if m:
|
||||
g = m.groups()
|
||||
if g[1] is None:
|
||||
return g[3], None
|
||||
else:
|
||||
return g[2], g[1]
|
||||
raise ValueError("%s is not a item name" % text)
|
||||
|
||||
def push_item(name, rest):
|
||||
if not name:
|
||||
return
|
||||
name, role = parse_item_name(name)
|
||||
items.append((name, list(rest), role))
|
||||
del rest[:]
|
||||
|
||||
current_func = None
|
||||
rest = []
|
||||
|
||||
for line in content:
|
||||
if not line.strip(): continue
|
||||
|
||||
m = self._name_rgx.match(line)
|
||||
if m and line[m.end():].strip().startswith(':'):
|
||||
push_item(current_func, rest)
|
||||
current_func, line = line[:m.end()], line[m.end():]
|
||||
rest = [line.split(':', 1)[1].strip()]
|
||||
if not rest[0]:
|
||||
rest = []
|
||||
elif not line.startswith(' '):
|
||||
push_item(current_func, rest)
|
||||
current_func = None
|
||||
if ',' in line:
|
||||
for func in line.split(','):
|
||||
push_item(func, [])
|
||||
elif line.strip():
|
||||
current_func = line
|
||||
elif current_func is not None:
|
||||
rest.append(line.strip())
|
||||
push_item(current_func, rest)
|
||||
return items
|
||||
|
||||
def _parse_index(self, section, content):
|
||||
"""
|
||||
.. index: default
|
||||
:refguide: something, else, and more
|
||||
|
||||
"""
|
||||
def strip_each_in(lst):
|
||||
return [s.strip() for s in lst]
|
||||
|
||||
out = {}
|
||||
section = section.split('::')
|
||||
if len(section) > 1:
|
||||
out['default'] = strip_each_in(section[1].split(','))[0]
|
||||
for line in content:
|
||||
line = line.split(':')
|
||||
if len(line) > 2:
|
||||
out[line[1]] = strip_each_in(line[2].split(','))
|
||||
return out
|
||||
|
||||
def _parse_summary(self):
|
||||
"""Grab signature (if given) and summary"""
|
||||
if self._is_at_section():
|
||||
return
|
||||
|
||||
summary = self._doc.read_to_next_empty_line()
|
||||
summary_str = " ".join([s.strip() for s in summary]).strip()
|
||||
if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str):
|
||||
self['Signature'] = summary_str
|
||||
if not self._is_at_section():
|
||||
self['Summary'] = self._doc.read_to_next_empty_line()
|
||||
else:
|
||||
self['Summary'] = summary
|
||||
|
||||
if not self._is_at_section():
|
||||
self['Extended Summary'] = self._read_to_next_section()
|
||||
|
||||
def _parse(self):
|
||||
self._doc.reset()
|
||||
self._parse_summary()
|
||||
|
||||
for (section,content) in self._read_sections():
|
||||
if not section.startswith('..'):
|
||||
section = ' '.join([s.capitalize() for s in section.split(' ')])
|
||||
if section in ('Parameters', 'Attributes', 'Methods',
|
||||
'Returns', 'Raises', 'Warns'):
|
||||
self[section] = self._parse_param_list(content)
|
||||
elif section.startswith('.. index::'):
|
||||
self['index'] = self._parse_index(section, content)
|
||||
elif section == 'See Also':
|
||||
self['See Also'] = self._parse_see_also(content)
|
||||
else:
|
||||
self[section] = content
|
||||
|
||||
# string conversion routines
|
||||
|
||||
def _str_header(self, name, symbol='-'):
|
||||
return [name, len(name)*symbol]
|
||||
|
||||
def _str_indent(self, doc, indent=4):
|
||||
out = []
|
||||
for line in doc:
|
||||
out += [' '*indent + line]
|
||||
return out
|
||||
|
||||
def _str_signature(self):
|
||||
if self['Signature']:
|
||||
return [self['Signature'].replace('*','\*')] + ['']
|
||||
else:
|
||||
return ['']
|
||||
|
||||
def _str_summary(self):
|
||||
if self['Summary']:
|
||||
return self['Summary'] + ['']
|
||||
else:
|
||||
return []
|
||||
|
||||
def _str_extended_summary(self):
|
||||
if self['Extended Summary']:
|
||||
return self['Extended Summary'] + ['']
|
||||
else:
|
||||
return []
|
||||
|
||||
def _str_param_list(self, name):
|
||||
out = []
|
||||
if self[name]:
|
||||
out += self._str_header(name)
|
||||
for param,param_type,desc in self[name]:
|
||||
out += ['%s : %s' % (param, param_type)]
|
||||
out += self._str_indent(desc)
|
||||
out += ['']
|
||||
return out
|
||||
|
||||
def _str_section(self, name):
|
||||
out = []
|
||||
if self[name]:
|
||||
out += self._str_header(name)
|
||||
out += self[name]
|
||||
out += ['']
|
||||
return out
|
||||
|
||||
def _str_see_also(self, func_role):
|
||||
if not self['See Also']: return []
|
||||
out = []
|
||||
out += self._str_header("See Also")
|
||||
last_had_desc = True
|
||||
for func, desc, role in self['See Also']:
|
||||
if role:
|
||||
link = ':%s:`%s`' % (role, func)
|
||||
elif func_role:
|
||||
link = ':%s:`%s`' % (func_role, func)
|
||||
else:
|
||||
link = "`%s`_" % func
|
||||
if desc or last_had_desc:
|
||||
out += ['']
|
||||
out += [link]
|
||||
else:
|
||||
out[-1] += ", %s" % link
|
||||
if desc:
|
||||
out += self._str_indent([' '.join(desc)])
|
||||
last_had_desc = True
|
||||
else:
|
||||
last_had_desc = False
|
||||
out += ['']
|
||||
return out
|
||||
|
||||
def _str_index(self):
|
||||
idx = self['index']
|
||||
out = []
|
||||
out += ['.. index:: %s' % idx.get('default','')]
|
||||
for section, references in idx.iteritems():
|
||||
if section == 'default':
|
||||
continue
|
||||
out += [' :%s: %s' % (section, ', '.join(references))]
|
||||
return out
|
||||
|
||||
def __str__(self, func_role=''):
|
||||
out = []
|
||||
out += self._str_signature()
|
||||
out += self._str_summary()
|
||||
out += self._str_extended_summary()
|
||||
for param_list in ('Parameters','Returns','Raises'):
|
||||
out += self._str_param_list(param_list)
|
||||
out += self._str_section('Warnings')
|
||||
out += self._str_see_also(func_role)
|
||||
for s in ('Notes','References','Examples'):
|
||||
out += self._str_section(s)
|
||||
out += self._str_index()
|
||||
return '\n'.join(out)
|
||||
|
||||
|
||||
def indent(str,indent=4):
|
||||
indent_str = ' '*indent
|
||||
if str is None:
|
||||
return indent_str
|
||||
lines = str.split('\n')
|
||||
return '\n'.join(indent_str + l for l in lines)
|
||||
|
||||
def dedent_lines(lines):
|
||||
"""Deindent a list of lines maximally"""
|
||||
return textwrap.dedent("\n".join(lines)).split("\n")
|
||||
|
||||
def header(text, style='-'):
|
||||
return text + '\n' + style*len(text) + '\n'
|
||||
|
||||
|
||||
class FunctionDoc(NumpyDocString):
|
||||
def __init__(self, func, role='func', doc=None):
|
||||
self._f = func
|
||||
self._role = role # e.g. "func" or "meth"
|
||||
if doc is None:
|
||||
doc = inspect.getdoc(func) or ''
|
||||
try:
|
||||
NumpyDocString.__init__(self, doc)
|
||||
except ValueError, e:
|
||||
print '*'*78
|
||||
print "ERROR: '%s' while parsing `%s`" % (e, self._f)
|
||||
print '*'*78
|
||||
#print "Docstring follows:"
|
||||
#print doclines
|
||||
#print '='*78
|
||||
|
||||
if not self['Signature']:
|
||||
func, func_name = self.get_func()
|
||||
try:
|
||||
# try to read signature
|
||||
argspec = inspect.getargspec(func)
|
||||
argspec = inspect.formatargspec(*argspec)
|
||||
argspec = argspec.replace('*','\*')
|
||||
signature = '%s%s' % (func_name, argspec)
|
||||
except TypeError, e:
|
||||
signature = '%s()' % func_name
|
||||
self['Signature'] = signature
|
||||
|
||||
def get_func(self):
|
||||
func_name = getattr(self._f, '__name__', self.__class__.__name__)
|
||||
if inspect.isclass(self._f):
|
||||
func = getattr(self._f, '__call__', self._f.__init__)
|
||||
else:
|
||||
func = self._f
|
||||
return func, func_name
|
||||
|
||||
def __str__(self):
|
||||
out = ''
|
||||
|
||||
func, func_name = self.get_func()
|
||||
signature = self['Signature'].replace('*', '\*')
|
||||
|
||||
roles = {'func': 'function',
|
||||
'meth': 'method'}
|
||||
|
||||
if self._role:
|
||||
if not roles.has_key(self._role):
|
||||
print "Warning: invalid role %s" % self._role
|
||||
out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''),
|
||||
func_name)
|
||||
|
||||
out += super(FunctionDoc, self).__str__(func_role=self._role)
|
||||
return out
|
||||
|
||||
|
||||
class ClassDoc(NumpyDocString):
|
||||
def __init__(self,cls,modulename='',func_doc=FunctionDoc,doc=None):
|
||||
if not inspect.isclass(cls):
|
||||
raise ValueError("Initialise using a class. Got %r" % cls)
|
||||
self._cls = cls
|
||||
|
||||
if modulename and not modulename.endswith('.'):
|
||||
modulename += '.'
|
||||
self._mod = modulename
|
||||
self._name = cls.__name__
|
||||
self._func_doc = func_doc
|
||||
|
||||
if doc is None:
|
||||
doc = pydoc.getdoc(cls)
|
||||
|
||||
NumpyDocString.__init__(self, doc)
|
||||
|
||||
@property
|
||||
def methods(self):
|
||||
return [name for name,func in inspect.getmembers(self._cls)
|
||||
if not name.startswith('_') and callable(func)]
|
||||
|
||||
def __str__(self):
|
||||
out = ''
|
||||
out += super(ClassDoc, self).__str__()
|
||||
out += "\n\n"
|
||||
|
||||
#for m in self.methods:
|
||||
# print "Parsing `%s`" % m
|
||||
# out += str(self._func_doc(getattr(self._cls,m), 'meth')) + '\n\n'
|
||||
# out += '.. index::\n single: %s; %s\n\n' % (self._name, m)
|
||||
|
||||
return out
|
||||
|
||||
|
||||
136
doc/sphinxext/docscrape_sphinx.py
Normal file
136
doc/sphinxext/docscrape_sphinx.py
Normal file
|
|
@ -0,0 +1,136 @@
|
|||
import re, inspect, textwrap, pydoc
|
||||
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
|
||||
|
||||
class SphinxDocString(NumpyDocString):
|
||||
# string conversion routines
|
||||
def _str_header(self, name, symbol='`'):
|
||||
return ['.. rubric:: ' + name, '']
|
||||
|
||||
def _str_field_list(self, name):
|
||||
return [':' + name + ':']
|
||||
|
||||
def _str_indent(self, doc, indent=4):
|
||||
out = []
|
||||
for line in doc:
|
||||
out += [' '*indent + line]
|
||||
return out
|
||||
|
||||
def _str_signature(self):
|
||||
return ['']
|
||||
if self['Signature']:
|
||||
return ['``%s``' % self['Signature']] + ['']
|
||||
else:
|
||||
return ['']
|
||||
|
||||
def _str_summary(self):
|
||||
return self['Summary'] + ['']
|
||||
|
||||
def _str_extended_summary(self):
|
||||
return self['Extended Summary'] + ['']
|
||||
|
||||
def _str_param_list(self, name):
|
||||
out = []
|
||||
if self[name]:
|
||||
out += self._str_field_list(name)
|
||||
out += ['']
|
||||
for param,param_type,desc in self[name]:
|
||||
out += self._str_indent(['**%s** : %s' % (param.strip(),
|
||||
param_type)])
|
||||
out += ['']
|
||||
out += self._str_indent(desc,8)
|
||||
out += ['']
|
||||
return out
|
||||
|
||||
def _str_section(self, name):
|
||||
out = []
|
||||
if self[name]:
|
||||
out += self._str_header(name)
|
||||
out += ['']
|
||||
content = textwrap.dedent("\n".join(self[name])).split("\n")
|
||||
out += content
|
||||
out += ['']
|
||||
return out
|
||||
|
||||
def _str_see_also(self, func_role):
|
||||
out = []
|
||||
if self['See Also']:
|
||||
see_also = super(SphinxDocString, self)._str_see_also(func_role)
|
||||
out = ['.. seealso::', '']
|
||||
out += self._str_indent(see_also[2:])
|
||||
return out
|
||||
|
||||
def _str_warnings(self):
|
||||
out = []
|
||||
if self['Warnings']:
|
||||
out = ['.. warning::', '']
|
||||
out += self._str_indent(self['Warnings'])
|
||||
return out
|
||||
|
||||
def _str_index(self):
|
||||
idx = self['index']
|
||||
out = []
|
||||
if len(idx) == 0:
|
||||
return out
|
||||
|
||||
out += ['.. index:: %s' % idx.get('default','')]
|
||||
for section, references in idx.iteritems():
|
||||
if section == 'default':
|
||||
continue
|
||||
elif section == 'refguide':
|
||||
out += [' single: %s' % (', '.join(references))]
|
||||
else:
|
||||
out += [' %s: %s' % (section, ','.join(references))]
|
||||
return out
|
||||
|
||||
def _str_references(self):
|
||||
out = []
|
||||
if self['References']:
|
||||
out += self._str_header('References')
|
||||
if isinstance(self['References'], str):
|
||||
self['References'] = [self['References']]
|
||||
out.extend(self['References'])
|
||||
out += ['']
|
||||
return out
|
||||
|
||||
def __str__(self, indent=0, func_role="obj"):
|
||||
out = []
|
||||
out += self._str_signature()
|
||||
out += self._str_index() + ['']
|
||||
out += self._str_summary()
|
||||
out += self._str_extended_summary()
|
||||
for param_list in ('Parameters', 'Attributes', 'Methods',
|
||||
'Returns','Raises'):
|
||||
out += self._str_param_list(param_list)
|
||||
out += self._str_warnings()
|
||||
out += self._str_see_also(func_role)
|
||||
out += self._str_section('Notes')
|
||||
out += self._str_references()
|
||||
out += self._str_section('Examples')
|
||||
out = self._str_indent(out,indent)
|
||||
return '\n'.join(out)
|
||||
|
||||
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
|
||||
pass
|
||||
|
||||
class SphinxClassDoc(SphinxDocString, ClassDoc):
|
||||
pass
|
||||
|
||||
def get_doc_object(obj, what=None, doc=None):
|
||||
if what is None:
|
||||
if inspect.isclass(obj):
|
||||
what = 'class'
|
||||
elif inspect.ismodule(obj):
|
||||
what = 'module'
|
||||
elif callable(obj):
|
||||
what = 'function'
|
||||
else:
|
||||
what = 'object'
|
||||
if what == 'class':
|
||||
return SphinxClassDoc(obj, '', func_doc=SphinxFunctionDoc, doc=doc)
|
||||
elif what in ('function', 'method'):
|
||||
return SphinxFunctionDoc(obj, '', doc=doc)
|
||||
else:
|
||||
if doc is None:
|
||||
doc = pydoc.getdoc(obj)
|
||||
return SphinxDocString(doc)
|
||||
|
||||
407
doc/sphinxext/inheritance_diagram.py
Normal file
407
doc/sphinxext/inheritance_diagram.py
Normal file
|
|
@ -0,0 +1,407 @@
|
|||
"""
|
||||
Defines a docutils directive for inserting inheritance diagrams.
|
||||
|
||||
Provide the directive with one or more classes or modules (separated
|
||||
by whitespace). For modules, all of the classes in that module will
|
||||
be used.
|
||||
|
||||
Example::
|
||||
|
||||
Given the following classes:
|
||||
|
||||
class A: pass
|
||||
class B(A): pass
|
||||
class C(A): pass
|
||||
class D(B, C): pass
|
||||
class E(B): pass
|
||||
|
||||
.. inheritance-diagram: D E
|
||||
|
||||
Produces a graph like the following:
|
||||
|
||||
A
|
||||
/ \
|
||||
B C
|
||||
/ \ /
|
||||
E D
|
||||
|
||||
The graph is inserted as a PNG+image map into HTML and a PDF in
|
||||
LaTeX.
|
||||
"""
|
||||
|
||||
import inspect
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
try:
|
||||
from hashlib import md5
|
||||
except ImportError:
|
||||
from md5 import md5
|
||||
|
||||
from docutils.nodes import Body, Element
|
||||
from docutils.parsers.rst import directives
|
||||
from sphinx.roles import xfileref_role
|
||||
|
||||
def my_import(name):
|
||||
"""Module importer - taken from the python documentation.
|
||||
|
||||
This function allows importing names with dots in them."""
|
||||
|
||||
mod = __import__(name)
|
||||
components = name.split('.')
|
||||
for comp in components[1:]:
|
||||
mod = getattr(mod, comp)
|
||||
return mod
|
||||
|
||||
class DotException(Exception):
|
||||
pass
|
||||
|
||||
class InheritanceGraph(object):
|
||||
"""
|
||||
Given a list of classes, determines the set of classes that
|
||||
they inherit from all the way to the root "object", and then
|
||||
is able to generate a graphviz dot graph from them.
|
||||
"""
|
||||
def __init__(self, class_names, show_builtins=False):
|
||||
"""
|
||||
*class_names* is a list of child classes to show bases from.
|
||||
|
||||
If *show_builtins* is True, then Python builtins will be shown
|
||||
in the graph.
|
||||
"""
|
||||
self.class_names = class_names
|
||||
self.classes = self._import_classes(class_names)
|
||||
self.all_classes = self._all_classes(self.classes)
|
||||
if len(self.all_classes) == 0:
|
||||
raise ValueError("No classes found for inheritance diagram")
|
||||
self.show_builtins = show_builtins
|
||||
|
||||
py_sig_re = re.compile(r'''^([\w.]*\.)? # class names
|
||||
(\w+) \s* $ # optionally arguments
|
||||
''', re.VERBOSE)
|
||||
|
||||
def _import_class_or_module(self, name):
|
||||
"""
|
||||
Import a class using its fully-qualified *name*.
|
||||
"""
|
||||
try:
|
||||
path, base = self.py_sig_re.match(name).groups()
|
||||
except:
|
||||
raise ValueError(
|
||||
"Invalid class or module '%s' specified for inheritance diagram" % name)
|
||||
fullname = (path or '') + base
|
||||
path = (path and path.rstrip('.'))
|
||||
if not path:
|
||||
path = base
|
||||
try:
|
||||
module = __import__(path, None, None, [])
|
||||
# We must do an import of the fully qualified name. Otherwise if a
|
||||
# subpackage 'a.b' is requested where 'import a' does NOT provide
|
||||
# 'a.b' automatically, then 'a.b' will not be found below. This
|
||||
# second call will force the equivalent of 'import a.b' to happen
|
||||
# after the top-level import above.
|
||||
my_import(fullname)
|
||||
|
||||
except ImportError:
|
||||
raise ValueError(
|
||||
"Could not import class or module '%s' specified for inheritance diagram" % name)
|
||||
|
||||
try:
|
||||
todoc = module
|
||||
for comp in fullname.split('.')[1:]:
|
||||
todoc = getattr(todoc, comp)
|
||||
except AttributeError:
|
||||
raise ValueError(
|
||||
"Could not find class or module '%s' specified for inheritance diagram" % name)
|
||||
|
||||
# If a class, just return it
|
||||
if inspect.isclass(todoc):
|
||||
return [todoc]
|
||||
elif inspect.ismodule(todoc):
|
||||
classes = []
|
||||
for cls in todoc.__dict__.values():
|
||||
if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
|
||||
classes.append(cls)
|
||||
return classes
|
||||
raise ValueError(
|
||||
"'%s' does not resolve to a class or module" % name)
|
||||
|
||||
def _import_classes(self, class_names):
|
||||
"""
|
||||
Import a list of classes.
|
||||
"""
|
||||
classes = []
|
||||
for name in class_names:
|
||||
classes.extend(self._import_class_or_module(name))
|
||||
return classes
|
||||
|
||||
def _all_classes(self, classes):
|
||||
"""
|
||||
Return a list of all classes that are ancestors of *classes*.
|
||||
"""
|
||||
all_classes = {}
|
||||
|
||||
def recurse(cls):
|
||||
all_classes[cls] = None
|
||||
for c in cls.__bases__:
|
||||
if c not in all_classes:
|
||||
recurse(c)
|
||||
|
||||
for cls in classes:
|
||||
recurse(cls)
|
||||
|
||||
return all_classes.keys()
|
||||
|
||||
def class_name(self, cls, parts=0):
|
||||
"""
|
||||
Given a class object, return a fully-qualified name. This
|
||||
works for things I've tested in matplotlib so far, but may not
|
||||
be completely general.
|
||||
"""
|
||||
module = cls.__module__
|
||||
if module == '__builtin__':
|
||||
fullname = cls.__name__
|
||||
else:
|
||||
fullname = "%s.%s" % (module, cls.__name__)
|
||||
if parts == 0:
|
||||
return fullname
|
||||
name_parts = fullname.split('.')
|
||||
return '.'.join(name_parts[-parts:])
|
||||
|
||||
def get_all_class_names(self):
|
||||
"""
|
||||
Get all of the class names involved in the graph.
|
||||
"""
|
||||
return [self.class_name(x) for x in self.all_classes]
|
||||
|
||||
# These are the default options for graphviz
|
||||
default_graph_options = {
|
||||
"rankdir": "LR",
|
||||
"size": '"8.0, 12.0"'
|
||||
}
|
||||
default_node_options = {
|
||||
"shape": "box",
|
||||
"fontsize": 10,
|
||||
"height": 0.25,
|
||||
"fontname": "Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans",
|
||||
"style": '"setlinewidth(0.5)"'
|
||||
}
|
||||
default_edge_options = {
|
||||
"arrowsize": 0.5,
|
||||
"style": '"setlinewidth(0.5)"'
|
||||
}
|
||||
|
||||
def _format_node_options(self, options):
|
||||
return ','.join(["%s=%s" % x for x in options.items()])
|
||||
def _format_graph_options(self, options):
|
||||
return ''.join(["%s=%s;\n" % x for x in options.items()])
|
||||
|
||||
def generate_dot(self, fd, name, parts=0, urls={},
|
||||
graph_options={}, node_options={},
|
||||
edge_options={}):
|
||||
"""
|
||||
Generate a graphviz dot graph from the classes that
|
||||
were passed in to __init__.
|
||||
|
||||
*fd* is a Python file-like object to write to.
|
||||
|
||||
*name* is the name of the graph
|
||||
|
||||
*urls* is a dictionary mapping class names to http urls
|
||||
|
||||
*graph_options*, *node_options*, *edge_options* are
|
||||
dictionaries containing key/value pairs to pass on as graphviz
|
||||
properties.
|
||||
"""
|
||||
g_options = self.default_graph_options.copy()
|
||||
g_options.update(graph_options)
|
||||
n_options = self.default_node_options.copy()
|
||||
n_options.update(node_options)
|
||||
e_options = self.default_edge_options.copy()
|
||||
e_options.update(edge_options)
|
||||
|
||||
fd.write('digraph %s {\n' % name)
|
||||
fd.write(self._format_graph_options(g_options))
|
||||
|
||||
for cls in self.all_classes:
|
||||
if not self.show_builtins and cls in __builtins__.values():
|
||||
continue
|
||||
|
||||
name = self.class_name(cls, parts)
|
||||
|
||||
# Write the node
|
||||
this_node_options = n_options.copy()
|
||||
url = urls.get(self.class_name(cls))
|
||||
if url is not None:
|
||||
this_node_options['URL'] = '"%s"' % url
|
||||
fd.write(' "%s" [%s];\n' %
|
||||
(name, self._format_node_options(this_node_options)))
|
||||
|
||||
# Write the edges
|
||||
for base in cls.__bases__:
|
||||
if not self.show_builtins and base in __builtins__.values():
|
||||
continue
|
||||
|
||||
base_name = self.class_name(base, parts)
|
||||
fd.write(' "%s" -> "%s" [%s];\n' %
|
||||
(base_name, name,
|
||||
self._format_node_options(e_options)))
|
||||
fd.write('}\n')
|
||||
|
||||
def run_dot(self, args, name, parts=0, urls={},
|
||||
graph_options={}, node_options={}, edge_options={}):
|
||||
"""
|
||||
Run graphviz 'dot' over this graph, returning whatever 'dot'
|
||||
writes to stdout.
|
||||
|
||||
*args* will be passed along as commandline arguments.
|
||||
|
||||
*name* is the name of the graph
|
||||
|
||||
*urls* is a dictionary mapping class names to http urls
|
||||
|
||||
Raises DotException for any of the many os and
|
||||
installation-related errors that may occur.
|
||||
"""
|
||||
try:
|
||||
dot = subprocess.Popen(['dot'] + list(args),
|
||||
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
|
||||
close_fds=True)
|
||||
except OSError:
|
||||
raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?")
|
||||
except ValueError:
|
||||
raise DotException("'dot' called with invalid arguments")
|
||||
except:
|
||||
raise DotException("Unexpected error calling 'dot'")
|
||||
|
||||
self.generate_dot(dot.stdin, name, parts, urls, graph_options,
|
||||
node_options, edge_options)
|
||||
dot.stdin.close()
|
||||
result = dot.stdout.read()
|
||||
returncode = dot.wait()
|
||||
if returncode != 0:
|
||||
raise DotException("'dot' returned the errorcode %d" % returncode)
|
||||
return result
|
||||
|
||||
class inheritance_diagram(Body, Element):
|
||||
"""
|
||||
A docutils node to use as a placeholder for the inheritance
|
||||
diagram.
|
||||
"""
|
||||
pass
|
||||
|
||||
def inheritance_diagram_directive(name, arguments, options, content, lineno,
|
||||
content_offset, block_text, state,
|
||||
state_machine):
|
||||
"""
|
||||
Run when the inheritance_diagram directive is first encountered.
|
||||
"""
|
||||
node = inheritance_diagram()
|
||||
|
||||
class_names = arguments
|
||||
|
||||
# Create a graph starting with the list of classes
|
||||
graph = InheritanceGraph(class_names)
|
||||
|
||||
# Create xref nodes for each target of the graph's image map and
|
||||
# add them to the doc tree so that Sphinx can resolve the
|
||||
# references to real URLs later. These nodes will eventually be
|
||||
# removed from the doctree after we're done with them.
|
||||
for name in graph.get_all_class_names():
|
||||
refnodes, x = xfileref_role(
|
||||
'class', ':class:`%s`' % name, name, 0, state)
|
||||
node.extend(refnodes)
|
||||
# Store the graph object so we can use it to generate the
|
||||
# dot file later
|
||||
node['graph'] = graph
|
||||
# Store the original content for use as a hash
|
||||
node['parts'] = options.get('parts', 0)
|
||||
node['content'] = " ".join(class_names)
|
||||
return [node]
|
||||
|
||||
def get_graph_hash(node):
|
||||
return md5(node['content'] + str(node['parts'])).hexdigest()[-10:]
|
||||
|
||||
def html_output_graph(self, node):
|
||||
"""
|
||||
Output the graph for HTML. This will insert a PNG with clickable
|
||||
image map.
|
||||
"""
|
||||
graph = node['graph']
|
||||
parts = node['parts']
|
||||
|
||||
graph_hash = get_graph_hash(node)
|
||||
name = "inheritance%s" % graph_hash
|
||||
path = '_images'
|
||||
dest_path = os.path.join(setup.app.builder.outdir, path)
|
||||
if not os.path.exists(dest_path):
|
||||
os.makedirs(dest_path)
|
||||
png_path = os.path.join(dest_path, name + ".png")
|
||||
path = setup.app.builder.imgpath
|
||||
|
||||
# Create a mapping from fully-qualified class names to URLs.
|
||||
urls = {}
|
||||
for child in node:
|
||||
if child.get('refuri') is not None:
|
||||
urls[child['reftitle']] = child.get('refuri')
|
||||
elif child.get('refid') is not None:
|
||||
urls[child['reftitle']] = '#' + child.get('refid')
|
||||
|
||||
# These arguments to dot will save a PNG file to disk and write
|
||||
# an HTML image map to stdout.
|
||||
image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'],
|
||||
name, parts, urls)
|
||||
return ('<img src="%s/%s.png" usemap="#%s" class="inheritance"/>%s' %
|
||||
(path, name, name, image_map))
|
||||
|
||||
def latex_output_graph(self, node):
|
||||
"""
|
||||
Output the graph for LaTeX. This will insert a PDF.
|
||||
"""
|
||||
graph = node['graph']
|
||||
parts = node['parts']
|
||||
|
||||
graph_hash = get_graph_hash(node)
|
||||
name = "inheritance%s" % graph_hash
|
||||
dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images'))
|
||||
if not os.path.exists(dest_path):
|
||||
os.makedirs(dest_path)
|
||||
pdf_path = os.path.abspath(os.path.join(dest_path, name + ".pdf"))
|
||||
|
||||
graph.run_dot(['-Tpdf', '-o%s' % pdf_path],
|
||||
name, parts, graph_options={'size': '"6.0,6.0"'})
|
||||
return '\n\\includegraphics{%s}\n\n' % pdf_path
|
||||
|
||||
def visit_inheritance_diagram(inner_func):
|
||||
"""
|
||||
This is just a wrapper around html/latex_output_graph to make it
|
||||
easier to handle errors and insert warnings.
|
||||
"""
|
||||
def visitor(self, node):
|
||||
try:
|
||||
content = inner_func(self, node)
|
||||
except DotException, e:
|
||||
# Insert the exception as a warning in the document
|
||||
warning = self.document.reporter.warning(str(e), line=node.line)
|
||||
warning.parent = node
|
||||
node.children = [warning]
|
||||
else:
|
||||
source = self.document.attributes['source']
|
||||
self.body.append(content)
|
||||
node.children = []
|
||||
return visitor
|
||||
|
||||
def do_nothing(self, node):
|
||||
pass
|
||||
|
||||
def setup(app):
|
||||
setup.app = app
|
||||
setup.confdir = app.confdir
|
||||
|
||||
app.add_node(
|
||||
inheritance_diagram,
|
||||
latex=(visit_inheritance_diagram(latex_output_graph), do_nothing),
|
||||
html=(visit_inheritance_diagram(html_output_graph), do_nothing))
|
||||
app.add_directive(
|
||||
'inheritance-diagram', inheritance_diagram_directive,
|
||||
False, (1, 100, 0), parts = directives.nonnegative_int)
|
||||
115
doc/sphinxext/ipython_console_highlighting.py
Normal file
115
doc/sphinxext/ipython_console_highlighting.py
Normal file
|
|
@ -0,0 +1,115 @@
|
|||
"""reST directive for syntax-highlighting ipython interactive sessions.
|
||||
|
||||
XXX - See what improvements can be made based on the new (as of Sept 2009)
|
||||
'pycon' lexer for the python console. At the very least it will give better
|
||||
highlighted tracebacks.
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Needed modules
|
||||
|
||||
# Standard library
|
||||
import re
|
||||
|
||||
# Third party
|
||||
from pygments.lexer import Lexer, do_insertions
|
||||
from pygments.lexers.agile import (PythonConsoleLexer, PythonLexer,
|
||||
PythonTracebackLexer)
|
||||
from pygments.token import Comment, Generic
|
||||
|
||||
from sphinx import highlighting
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Global constants
|
||||
line_re = re.compile('.*?\n')
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Code begins - classes and functions
|
||||
|
||||
class IPythonConsoleLexer(Lexer):
|
||||
"""
|
||||
For IPython console output or doctests, such as:
|
||||
|
||||
.. sourcecode:: ipython
|
||||
|
||||
In [1]: a = 'foo'
|
||||
|
||||
In [2]: a
|
||||
Out[2]: 'foo'
|
||||
|
||||
In [3]: print a
|
||||
foo
|
||||
|
||||
In [4]: 1 / 0
|
||||
|
||||
Notes:
|
||||
|
||||
- Tracebacks are not currently supported.
|
||||
|
||||
- It assumes the default IPython prompts, not customized ones.
|
||||
"""
|
||||
|
||||
name = 'IPython console session'
|
||||
aliases = ['ipython']
|
||||
mimetypes = ['text/x-ipython-console']
|
||||
input_prompt = re.compile("(In \[[0-9]+\]: )|( \.\.\.+:)")
|
||||
output_prompt = re.compile("(Out\[[0-9]+\]: )|( \.\.\.+:)")
|
||||
continue_prompt = re.compile(" \.\.\.+:")
|
||||
tb_start = re.compile("\-+")
|
||||
|
||||
def get_tokens_unprocessed(self, text):
|
||||
pylexer = PythonLexer(**self.options)
|
||||
tblexer = PythonTracebackLexer(**self.options)
|
||||
|
||||
curcode = ''
|
||||
insertions = []
|
||||
for match in line_re.finditer(text):
|
||||
line = match.group()
|
||||
input_prompt = self.input_prompt.match(line)
|
||||
continue_prompt = self.continue_prompt.match(line.rstrip())
|
||||
output_prompt = self.output_prompt.match(line)
|
||||
if line.startswith("#"):
|
||||
insertions.append((len(curcode),
|
||||
[(0, Comment, line)]))
|
||||
elif input_prompt is not None:
|
||||
insertions.append((len(curcode),
|
||||
[(0, Generic.Prompt, input_prompt.group())]))
|
||||
curcode += line[input_prompt.end():]
|
||||
elif continue_prompt is not None:
|
||||
insertions.append((len(curcode),
|
||||
[(0, Generic.Prompt, continue_prompt.group())]))
|
||||
curcode += line[continue_prompt.end():]
|
||||
elif output_prompt is not None:
|
||||
# Use the 'error' token for output. We should probably make
|
||||
# our own token, but error is typicaly in a bright color like
|
||||
# red, so it works fine for our output prompts.
|
||||
insertions.append((len(curcode),
|
||||
[(0, Generic.Error, output_prompt.group())]))
|
||||
curcode += line[output_prompt.end():]
|
||||
else:
|
||||
if curcode:
|
||||
for item in do_insertions(insertions,
|
||||
pylexer.get_tokens_unprocessed(curcode)):
|
||||
yield item
|
||||
curcode = ''
|
||||
insertions = []
|
||||
yield match.start(), Generic.Output, line
|
||||
if curcode:
|
||||
for item in do_insertions(insertions,
|
||||
pylexer.get_tokens_unprocessed(curcode)):
|
||||
yield item
|
||||
|
||||
|
||||
def setup(app):
|
||||
"""Setup as a sphinx extension."""
|
||||
|
||||
# This is only a lexer, so adding it below to pygments appears sufficient.
|
||||
# But if somebody knows that the right API usage should be to do that via
|
||||
# sphinx, by all means fix it here. At least having this setup.py
|
||||
# suppresses the sphinx warning we'd get without it.
|
||||
pass
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Register the extension as a valid pygments lexer
|
||||
highlighting.lexers['ipython'] = IPythonConsoleLexer()
|
||||
830
doc/sphinxext/ipython_directive.py
Normal file
830
doc/sphinxext/ipython_directive.py
Normal file
|
|
@ -0,0 +1,830 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""Sphinx directive to support embedded IPython code.
|
||||
|
||||
This directive allows pasting of entire interactive IPython sessions, prompts
|
||||
and all, and their code will actually get re-executed at doc build time, with
|
||||
all prompts renumbered sequentially. It also allows you to input code as a pure
|
||||
python input by giving the argument python to the directive. The output looks
|
||||
like an interactive ipython section.
|
||||
|
||||
To enable this directive, simply list it in your Sphinx ``conf.py`` file
|
||||
(making sure the directory where you placed it is visible to sphinx, as is
|
||||
needed for all Sphinx directives).
|
||||
|
||||
By default this directive assumes that your prompts are unchanged IPython ones,
|
||||
but this can be customized. The configurable options that can be placed in
|
||||
conf.py are
|
||||
|
||||
ipython_savefig_dir:
|
||||
The directory in which to save the figures. This is relative to the
|
||||
Sphinx source directory. The default is `html_static_path`.
|
||||
ipython_rgxin:
|
||||
The compiled regular expression to denote the start of IPython input
|
||||
lines. The default is re.compile('In \[(\d+)\]:\s?(.*)\s*'). You
|
||||
shouldn't need to change this.
|
||||
ipython_rgxout:
|
||||
The compiled regular expression to denote the start of IPython output
|
||||
lines. The default is re.compile('Out\[(\d+)\]:\s?(.*)\s*'). You
|
||||
shouldn't need to change this.
|
||||
ipython_promptin:
|
||||
The string to represent the IPython input prompt in the generated ReST.
|
||||
The default is 'In [%d]:'. This expects that the line numbers are used
|
||||
in the prompt.
|
||||
ipython_promptout:
|
||||
|
||||
The string to represent the IPython prompt in the generated ReST. The
|
||||
default is 'Out [%d]:'. This expects that the line numbers are used
|
||||
in the prompt.
|
||||
|
||||
ToDo
|
||||
----
|
||||
|
||||
- Turn the ad-hoc test() function into a real test suite.
|
||||
- Break up ipython-specific functionality from matplotlib stuff into better
|
||||
separated code.
|
||||
|
||||
Authors
|
||||
-------
|
||||
|
||||
- John D Hunter: orignal author.
|
||||
- Fernando Perez: refactoring, documentation, cleanups, port to 0.11.
|
||||
- VáclavŠmilauer <eudoxos-AT-arcig.cz>: Prompt generalizations.
|
||||
- Skipper Seabold, refactoring, cleanups, pure python addition
|
||||
"""
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Imports
|
||||
#-----------------------------------------------------------------------------
|
||||
|
||||
# Stdlib
|
||||
import cStringIO
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import tempfile
|
||||
import ast
|
||||
|
||||
# To keep compatibility with various python versions
|
||||
try:
|
||||
from hashlib import md5
|
||||
except ImportError:
|
||||
from md5 import md5
|
||||
|
||||
# Third-party
|
||||
import matplotlib
|
||||
import sphinx
|
||||
from docutils.parsers.rst import directives
|
||||
from docutils import nodes
|
||||
from sphinx.util.compat import Directive
|
||||
|
||||
matplotlib.use('Agg')
|
||||
|
||||
# Our own
|
||||
from IPython import Config, InteractiveShell
|
||||
from IPython.core.profiledir import ProfileDir
|
||||
from IPython.utils import io
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Globals
|
||||
#-----------------------------------------------------------------------------
|
||||
# for tokenizing blocks
|
||||
COMMENT, INPUT, OUTPUT = range(3)
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
# Functions and class declarations
|
||||
#-----------------------------------------------------------------------------
|
||||
def block_parser(part, rgxin, rgxout, fmtin, fmtout):
|
||||
"""
|
||||
part is a string of ipython text, comprised of at most one
|
||||
input, one ouput, comments, and blank lines. The block parser
|
||||
parses the text into a list of::
|
||||
|
||||
blocks = [ (TOKEN0, data0), (TOKEN1, data1), ...]
|
||||
|
||||
where TOKEN is one of [COMMENT | INPUT | OUTPUT ] and
|
||||
data is, depending on the type of token::
|
||||
|
||||
COMMENT : the comment string
|
||||
|
||||
INPUT: the (DECORATOR, INPUT_LINE, REST) where
|
||||
DECORATOR: the input decorator (or None)
|
||||
INPUT_LINE: the input as string (possibly multi-line)
|
||||
REST : any stdout generated by the input line (not OUTPUT)
|
||||
|
||||
|
||||
OUTPUT: the output string, possibly multi-line
|
||||
"""
|
||||
|
||||
block = []
|
||||
lines = part.split('\n')
|
||||
N = len(lines)
|
||||
i = 0
|
||||
decorator = None
|
||||
while 1:
|
||||
|
||||
if i==N:
|
||||
# nothing left to parse -- the last line
|
||||
break
|
||||
|
||||
line = lines[i]
|
||||
i += 1
|
||||
line_stripped = line.strip()
|
||||
if line_stripped.startswith('#'):
|
||||
block.append((COMMENT, line))
|
||||
continue
|
||||
|
||||
if line_stripped.startswith('@'):
|
||||
# we're assuming at most one decorator -- may need to
|
||||
# rethink
|
||||
decorator = line_stripped
|
||||
continue
|
||||
|
||||
# does this look like an input line?
|
||||
matchin = rgxin.match(line)
|
||||
if matchin:
|
||||
lineno, inputline = int(matchin.group(1)), matchin.group(2)
|
||||
|
||||
# the ....: continuation string
|
||||
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
|
||||
Nc = len(continuation)
|
||||
# input lines can continue on for more than one line, if
|
||||
# we have a '\' line continuation char or a function call
|
||||
# echo line 'print'. The input line can only be
|
||||
# terminated by the end of the block or an output line, so
|
||||
# we parse out the rest of the input line if it is
|
||||
# multiline as well as any echo text
|
||||
|
||||
rest = []
|
||||
while i<N:
|
||||
|
||||
# look ahead; if the next line is blank, or a comment, or
|
||||
# an output line, we're done
|
||||
|
||||
nextline = lines[i]
|
||||
matchout = rgxout.match(nextline)
|
||||
#print "nextline=%s, continuation=%s, starts=%s"%(nextline, continuation, nextline.startswith(continuation))
|
||||
if matchout or nextline.startswith('#'):
|
||||
break
|
||||
elif nextline.startswith(continuation):
|
||||
inputline += '\n' + nextline[Nc:]
|
||||
else:
|
||||
rest.append(nextline)
|
||||
i+= 1
|
||||
|
||||
block.append((INPUT, (decorator, inputline, '\n'.join(rest))))
|
||||
continue
|
||||
|
||||
# if it looks like an output line grab all the text to the end
|
||||
# of the block
|
||||
matchout = rgxout.match(line)
|
||||
if matchout:
|
||||
lineno, output = int(matchout.group(1)), matchout.group(2)
|
||||
if i<N-1:
|
||||
output = '\n'.join([output] + lines[i:])
|
||||
|
||||
block.append((OUTPUT, output))
|
||||
break
|
||||
|
||||
return block
|
||||
|
||||
class EmbeddedSphinxShell(object):
|
||||
"""An embedded IPython instance to run inside Sphinx"""
|
||||
|
||||
def __init__(self):
|
||||
|
||||
self.cout = cStringIO.StringIO()
|
||||
|
||||
|
||||
# Create config object for IPython
|
||||
config = Config()
|
||||
config.Global.display_banner = False
|
||||
config.Global.exec_lines = ['import numpy as np',
|
||||
'from pylab import *'
|
||||
]
|
||||
config.InteractiveShell.autocall = False
|
||||
config.InteractiveShell.autoindent = False
|
||||
config.InteractiveShell.colors = 'NoColor'
|
||||
|
||||
# create a profile so instance history isn't saved
|
||||
tmp_profile_dir = tempfile.mkdtemp(prefix='profile_')
|
||||
profname = 'auto_profile_sphinx_build'
|
||||
pdir = os.path.join(tmp_profile_dir,profname)
|
||||
profile = ProfileDir.create_profile_dir(pdir)
|
||||
|
||||
# Create and initialize ipython, but don't start its mainloop
|
||||
IP = InteractiveShell.instance(config=config, profile_dir=profile)
|
||||
# io.stdout redirect must be done *after* instantiating InteractiveShell
|
||||
io.stdout = self.cout
|
||||
io.stderr = self.cout
|
||||
|
||||
# For debugging, so we can see normal output, use this:
|
||||
#from IPython.utils.io import Tee
|
||||
#io.stdout = Tee(self.cout, channel='stdout') # dbg
|
||||
#io.stderr = Tee(self.cout, channel='stderr') # dbg
|
||||
|
||||
# Store a few parts of IPython we'll need.
|
||||
self.IP = IP
|
||||
self.user_ns = self.IP.user_ns
|
||||
self.user_global_ns = self.IP.user_global_ns
|
||||
|
||||
self.input = ''
|
||||
self.output = ''
|
||||
|
||||
self.is_verbatim = False
|
||||
self.is_doctest = False
|
||||
self.is_suppress = False
|
||||
|
||||
# on the first call to the savefig decorator, we'll import
|
||||
# pyplot as plt so we can make a call to the plt.gcf().savefig
|
||||
self._pyplot_imported = False
|
||||
|
||||
def clear_cout(self):
|
||||
self.cout.seek(0)
|
||||
self.cout.truncate(0)
|
||||
|
||||
def process_input_line(self, line, store_history=True):
|
||||
"""process the input, capturing stdout"""
|
||||
#print "input='%s'"%self.input
|
||||
stdout = sys.stdout
|
||||
splitter = self.IP.input_splitter
|
||||
try:
|
||||
sys.stdout = self.cout
|
||||
splitter.push(line)
|
||||
more = splitter.push_accepts_more()
|
||||
if not more:
|
||||
source_raw = splitter.source_raw_reset()[1]
|
||||
self.IP.run_cell(source_raw, store_history=store_history)
|
||||
finally:
|
||||
sys.stdout = stdout
|
||||
|
||||
def process_image(self, decorator):
|
||||
"""
|
||||
# build out an image directive like
|
||||
# .. image:: somefile.png
|
||||
# :width 4in
|
||||
#
|
||||
# from an input like
|
||||
# savefig somefile.png width=4in
|
||||
"""
|
||||
savefig_dir = self.savefig_dir
|
||||
source_dir = self.source_dir
|
||||
saveargs = decorator.split(' ')
|
||||
filename = saveargs[1]
|
||||
# insert relative path to image file in source
|
||||
outfile = os.path.relpath(os.path.join(savefig_dir,filename),
|
||||
source_dir)
|
||||
|
||||
imagerows = ['.. image:: %s'%outfile]
|
||||
|
||||
for kwarg in saveargs[2:]:
|
||||
arg, val = kwarg.split('=')
|
||||
arg = arg.strip()
|
||||
val = val.strip()
|
||||
imagerows.append(' :%s: %s'%(arg, val))
|
||||
|
||||
image_file = os.path.basename(outfile) # only return file name
|
||||
image_directive = '\n'.join(imagerows)
|
||||
return image_file, image_directive
|
||||
|
||||
|
||||
# Callbacks for each type of token
|
||||
def process_input(self, data, input_prompt, lineno):
|
||||
"""Process data block for INPUT token."""
|
||||
decorator, input, rest = data
|
||||
image_file = None
|
||||
image_directive = None
|
||||
#print 'INPUT:', data # dbg
|
||||
is_verbatim = decorator=='@verbatim' or self.is_verbatim
|
||||
is_doctest = decorator=='@doctest' or self.is_doctest
|
||||
is_suppress = decorator=='@suppress' or self.is_suppress
|
||||
is_savefig = decorator is not None and \
|
||||
decorator.startswith('@savefig')
|
||||
|
||||
input_lines = input.split('\n')
|
||||
if len(input_lines) > 1:
|
||||
if input_lines[-1] != "":
|
||||
input_lines.append('') # make sure there's a blank line
|
||||
# so splitter buffer gets reset
|
||||
|
||||
continuation = ' %s:'%''.join(['.']*(len(str(lineno))+2))
|
||||
Nc = len(continuation)
|
||||
|
||||
if is_savefig:
|
||||
image_file, image_directive = self.process_image(decorator)
|
||||
|
||||
ret = []
|
||||
is_semicolon = False
|
||||
|
||||
for i, line in enumerate(input_lines):
|
||||
if line.endswith(';'):
|
||||
is_semicolon = True
|
||||
|
||||
if i==0:
|
||||
# process the first input line
|
||||
if is_verbatim:
|
||||
self.process_input_line('')
|
||||
self.IP.execution_count += 1 # increment it anyway
|
||||
else:
|
||||
# only submit the line in non-verbatim mode
|
||||
self.process_input_line(line, store_history=True)
|
||||
formatted_line = '%s %s'%(input_prompt, line)
|
||||
else:
|
||||
# process a continuation line
|
||||
if not is_verbatim:
|
||||
self.process_input_line(line, store_history=True)
|
||||
|
||||
formatted_line = '%s %s'%(continuation, line)
|
||||
|
||||
if not is_suppress:
|
||||
ret.append(formatted_line)
|
||||
|
||||
if not is_suppress and len(rest.strip()) and is_verbatim:
|
||||
# the "rest" is the standard output of the
|
||||
# input, which needs to be added in
|
||||
# verbatim mode
|
||||
ret.append(rest)
|
||||
|
||||
self.cout.seek(0)
|
||||
output = self.cout.read()
|
||||
if not is_suppress and not is_semicolon:
|
||||
ret.append(output)
|
||||
elif is_semicolon: # get spacing right
|
||||
ret.append('')
|
||||
|
||||
self.cout.truncate(0)
|
||||
return (ret, input_lines, output, is_doctest, image_file,
|
||||
image_directive)
|
||||
#print 'OUTPUT', output # dbg
|
||||
|
||||
def process_output(self, data, output_prompt,
|
||||
input_lines, output, is_doctest, image_file):
|
||||
"""Process data block for OUTPUT token."""
|
||||
if is_doctest:
|
||||
submitted = data.strip()
|
||||
found = output
|
||||
if found is not None:
|
||||
found = found.strip()
|
||||
|
||||
# XXX - fperez: in 0.11, 'output' never comes with the prompt
|
||||
# in it, just the actual output text. So I think all this code
|
||||
# can be nuked...
|
||||
|
||||
# the above comment does not appear to be accurate... (minrk)
|
||||
|
||||
ind = found.find(output_prompt)
|
||||
if ind<0:
|
||||
e='output prompt="%s" does not match out line=%s' % \
|
||||
(output_prompt, found)
|
||||
raise RuntimeError(e)
|
||||
found = found[len(output_prompt):].strip()
|
||||
|
||||
if found!=submitted:
|
||||
e = ('doctest failure for input_lines="%s" with '
|
||||
'found_output="%s" and submitted output="%s"' %
|
||||
(input_lines, found, submitted) )
|
||||
raise RuntimeError(e)
|
||||
#print 'doctest PASSED for input_lines="%s" with found_output="%s" and submitted output="%s"'%(input_lines, found, submitted)
|
||||
|
||||
def process_comment(self, data):
|
||||
"""Process data fPblock for COMMENT token."""
|
||||
if not self.is_suppress:
|
||||
return [data]
|
||||
|
||||
def save_image(self, image_file):
|
||||
"""
|
||||
Saves the image file to disk.
|
||||
"""
|
||||
self.ensure_pyplot()
|
||||
command = 'plt.gcf().savefig("%s")'%image_file
|
||||
#print 'SAVEFIG', command # dbg
|
||||
self.process_input_line('bookmark ipy_thisdir', store_history=False)
|
||||
self.process_input_line('cd -b ipy_savedir', store_history=False)
|
||||
self.process_input_line(command, store_history=False)
|
||||
self.process_input_line('cd -b ipy_thisdir', store_history=False)
|
||||
self.process_input_line('bookmark -d ipy_thisdir', store_history=False)
|
||||
self.clear_cout()
|
||||
|
||||
|
||||
def process_block(self, block):
|
||||
"""
|
||||
process block from the block_parser and return a list of processed lines
|
||||
"""
|
||||
ret = []
|
||||
output = None
|
||||
input_lines = None
|
||||
lineno = self.IP.execution_count
|
||||
|
||||
input_prompt = self.promptin%lineno
|
||||
output_prompt = self.promptout%lineno
|
||||
image_file = None
|
||||
image_directive = None
|
||||
|
||||
for token, data in block:
|
||||
if token==COMMENT:
|
||||
out_data = self.process_comment(data)
|
||||
elif token==INPUT:
|
||||
(out_data, input_lines, output, is_doctest, image_file,
|
||||
image_directive) = \
|
||||
self.process_input(data, input_prompt, lineno)
|
||||
elif token==OUTPUT:
|
||||
out_data = \
|
||||
self.process_output(data, output_prompt,
|
||||
input_lines, output, is_doctest,
|
||||
image_file)
|
||||
if out_data:
|
||||
ret.extend(out_data)
|
||||
|
||||
# save the image files
|
||||
if image_file is not None:
|
||||
self.save_image(image_file)
|
||||
|
||||
return ret, image_directive
|
||||
|
||||
def ensure_pyplot(self):
|
||||
if self._pyplot_imported:
|
||||
return
|
||||
self.process_input_line('import matplotlib.pyplot as plt',
|
||||
store_history=False)
|
||||
|
||||
def process_pure_python(self, content):
|
||||
"""
|
||||
content is a list of strings. it is unedited directive conent
|
||||
|
||||
This runs it line by line in the InteractiveShell, prepends
|
||||
prompts as needed capturing stderr and stdout, then returns
|
||||
the content as a list as if it were ipython code
|
||||
"""
|
||||
output = []
|
||||
savefig = False # keep up with this to clear figure
|
||||
multiline = False # to handle line continuation
|
||||
multiline_start = None
|
||||
fmtin = self.promptin
|
||||
|
||||
ct = 0
|
||||
|
||||
for lineno, line in enumerate(content):
|
||||
|
||||
line_stripped = line.strip()
|
||||
if not len(line):
|
||||
output.append(line)
|
||||
continue
|
||||
|
||||
# handle decorators
|
||||
if line_stripped.startswith('@'):
|
||||
output.extend([line])
|
||||
if 'savefig' in line:
|
||||
savefig = True # and need to clear figure
|
||||
continue
|
||||
|
||||
# handle comments
|
||||
if line_stripped.startswith('#'):
|
||||
output.extend([line])
|
||||
continue
|
||||
|
||||
# deal with lines checking for multiline
|
||||
continuation = u' %s:'% ''.join(['.']*(len(str(ct))+2))
|
||||
if not multiline:
|
||||
modified = u"%s %s" % (fmtin % ct, line_stripped)
|
||||
output.append(modified)
|
||||
ct += 1
|
||||
try:
|
||||
ast.parse(line_stripped)
|
||||
output.append(u'')
|
||||
except Exception: # on a multiline
|
||||
multiline = True
|
||||
multiline_start = lineno
|
||||
else: # still on a multiline
|
||||
modified = u'%s %s' % (continuation, line)
|
||||
output.append(modified)
|
||||
try:
|
||||
mod = ast.parse(
|
||||
'\n'.join(content[multiline_start:lineno+1]))
|
||||
if isinstance(mod.body[0], ast.FunctionDef):
|
||||
# check to see if we have the whole function
|
||||
for element in mod.body[0].body:
|
||||
if isinstance(element, ast.Return):
|
||||
multiline = False
|
||||
else:
|
||||
output.append(u'')
|
||||
multiline = False
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if savefig: # clear figure if plotted
|
||||
self.ensure_pyplot()
|
||||
self.process_input_line('plt.clf()', store_history=False)
|
||||
self.clear_cout()
|
||||
savefig = False
|
||||
|
||||
return output
|
||||
|
||||
class IpythonDirective(Directive):
|
||||
|
||||
has_content = True
|
||||
required_arguments = 0
|
||||
optional_arguments = 4 # python, suppress, verbatim, doctest
|
||||
final_argumuent_whitespace = True
|
||||
option_spec = { 'python': directives.unchanged,
|
||||
'suppress' : directives.flag,
|
||||
'verbatim' : directives.flag,
|
||||
'doctest' : directives.flag,
|
||||
}
|
||||
|
||||
shell = EmbeddedSphinxShell()
|
||||
|
||||
def get_config_options(self):
|
||||
# contains sphinx configuration variables
|
||||
config = self.state.document.settings.env.config
|
||||
|
||||
# get config variables to set figure output directory
|
||||
confdir = self.state.document.settings.env.app.confdir
|
||||
savefig_dir = config.ipython_savefig_dir
|
||||
source_dir = os.path.dirname(self.state.document.current_source)
|
||||
if savefig_dir is None:
|
||||
savefig_dir = config.html_static_path
|
||||
if isinstance(savefig_dir, list):
|
||||
savefig_dir = savefig_dir[0] # safe to assume only one path?
|
||||
savefig_dir = os.path.join(confdir, savefig_dir)
|
||||
|
||||
# get regex and prompt stuff
|
||||
rgxin = config.ipython_rgxin
|
||||
rgxout = config.ipython_rgxout
|
||||
promptin = config.ipython_promptin
|
||||
promptout = config.ipython_promptout
|
||||
|
||||
return savefig_dir, source_dir, rgxin, rgxout, promptin, promptout
|
||||
|
||||
def setup(self):
|
||||
# reset the execution count if we haven't processed this doc
|
||||
#NOTE: this may be borked if there are multiple seen_doc tmp files
|
||||
#check time stamp?
|
||||
seen_docs = [i for i in os.listdir(tempfile.tempdir)
|
||||
if i.startswith('seen_doc')]
|
||||
if seen_docs:
|
||||
fname = os.path.join(tempfile.tempdir, seen_docs[0])
|
||||
docs = open(fname).read().split('\n')
|
||||
if not self.state.document.current_source in docs:
|
||||
self.shell.IP.history_manager.reset()
|
||||
self.shell.IP.execution_count = 1
|
||||
else: # haven't processed any docs yet
|
||||
docs = []
|
||||
|
||||
|
||||
# get config values
|
||||
(savefig_dir, source_dir, rgxin,
|
||||
rgxout, promptin, promptout) = self.get_config_options()
|
||||
|
||||
# and attach to shell so we don't have to pass them around
|
||||
self.shell.rgxin = rgxin
|
||||
self.shell.rgxout = rgxout
|
||||
self.shell.promptin = promptin
|
||||
self.shell.promptout = promptout
|
||||
self.shell.savefig_dir = savefig_dir
|
||||
self.shell.source_dir = source_dir
|
||||
|
||||
# setup bookmark for saving figures directory
|
||||
|
||||
self.shell.process_input_line('bookmark ipy_savedir %s'%savefig_dir,
|
||||
store_history=False)
|
||||
self.shell.clear_cout()
|
||||
|
||||
# write the filename to a tempfile because it's been "seen" now
|
||||
if not self.state.document.current_source in docs:
|
||||
fd, fname = tempfile.mkstemp(prefix="seen_doc", text=True)
|
||||
fout = open(fname, 'a')
|
||||
fout.write(self.state.document.current_source+'\n')
|
||||
fout.close()
|
||||
|
||||
return rgxin, rgxout, promptin, promptout
|
||||
|
||||
|
||||
def teardown(self):
|
||||
# delete last bookmark
|
||||
self.shell.process_input_line('bookmark -d ipy_savedir',
|
||||
store_history=False)
|
||||
self.shell.clear_cout()
|
||||
|
||||
def run(self):
|
||||
debug = False
|
||||
|
||||
#TODO, any reason block_parser can't be a method of embeddable shell
|
||||
# then we wouldn't have to carry these around
|
||||
rgxin, rgxout, promptin, promptout = self.setup()
|
||||
|
||||
options = self.options
|
||||
self.shell.is_suppress = 'suppress' in options
|
||||
self.shell.is_doctest = 'doctest' in options
|
||||
self.shell.is_verbatim = 'verbatim' in options
|
||||
|
||||
|
||||
# handle pure python code
|
||||
if 'python' in self.arguments:
|
||||
content = self.content
|
||||
self.content = self.shell.process_pure_python(content)
|
||||
|
||||
parts = '\n'.join(self.content).split('\n\n')
|
||||
|
||||
lines = ['.. code-block:: ipython','']
|
||||
figures = []
|
||||
|
||||
for part in parts:
|
||||
|
||||
block = block_parser(part, rgxin, rgxout, promptin, promptout)
|
||||
|
||||
if len(block):
|
||||
rows, figure = self.shell.process_block(block)
|
||||
for row in rows:
|
||||
lines.extend([' %s'%line for line in row.split('\n')])
|
||||
|
||||
if figure is not None:
|
||||
figures.append(figure)
|
||||
|
||||
#text = '\n'.join(lines)
|
||||
#figs = '\n'.join(figures)
|
||||
|
||||
for figure in figures:
|
||||
lines.append('')
|
||||
lines.extend(figure.split('\n'))
|
||||
lines.append('')
|
||||
|
||||
#print lines
|
||||
if len(lines)>2:
|
||||
if debug:
|
||||
print '\n'.join(lines)
|
||||
else: #NOTE: this raises some errors, what's it for?
|
||||
#print 'INSERTING %d lines'%len(lines)
|
||||
self.state_machine.insert_input(
|
||||
lines, self.state_machine.input_lines.source(0))
|
||||
|
||||
text = '\n'.join(lines)
|
||||
txtnode = nodes.literal_block(text, text)
|
||||
txtnode['language'] = 'ipython'
|
||||
#imgnode = nodes.image(figs)
|
||||
|
||||
# cleanup
|
||||
self.teardown()
|
||||
|
||||
return []#, imgnode]
|
||||
|
||||
# Enable as a proper Sphinx directive
|
||||
def setup(app):
|
||||
setup.app = app
|
||||
|
||||
app.add_directive('ipython', IpythonDirective)
|
||||
app.add_config_value('ipython_savefig_dir', None, True)
|
||||
app.add_config_value('ipython_rgxin',
|
||||
re.compile('In \[(\d+)\]:\s?(.*)\s*'), True)
|
||||
app.add_config_value('ipython_rgxout',
|
||||
re.compile('Out\[(\d+)\]:\s?(.*)\s*'), True)
|
||||
app.add_config_value('ipython_promptin', 'In [%d]:', True)
|
||||
app.add_config_value('ipython_promptout', 'Out[%d]:', True)
|
||||
|
||||
|
||||
# Simple smoke test, needs to be converted to a proper automatic test.
|
||||
def test():
|
||||
|
||||
examples = [
|
||||
r"""
|
||||
In [9]: pwd
|
||||
Out[9]: '/home/jdhunter/py4science/book'
|
||||
|
||||
In [10]: cd bookdata/
|
||||
/home/jdhunter/py4science/book/bookdata
|
||||
|
||||
In [2]: from pylab import *
|
||||
|
||||
In [2]: ion()
|
||||
|
||||
In [3]: im = imread('stinkbug.png')
|
||||
|
||||
@savefig mystinkbug.png width=4in
|
||||
In [4]: imshow(im)
|
||||
Out[4]: <matplotlib.image.AxesImage object at 0x39ea850>
|
||||
|
||||
""",
|
||||
r"""
|
||||
|
||||
In [1]: x = 'hello world'
|
||||
|
||||
# string methods can be
|
||||
# used to alter the string
|
||||
@doctest
|
||||
In [2]: x.upper()
|
||||
Out[2]: 'HELLO WORLD'
|
||||
|
||||
@verbatim
|
||||
In [3]: x.st<TAB>
|
||||
x.startswith x.strip
|
||||
""",
|
||||
r"""
|
||||
|
||||
In [130]: url = 'http://ichart.finance.yahoo.com/table.csv?s=CROX\
|
||||
.....: &d=9&e=22&f=2009&g=d&a=1&br=8&c=2006&ignore=.csv'
|
||||
|
||||
In [131]: print url.split('&')
|
||||
['http://ichart.finance.yahoo.com/table.csv?s=CROX', 'd=9', 'e=22', 'f=2009', 'g=d', 'a=1', 'b=8', 'c=2006', 'ignore=.csv']
|
||||
|
||||
In [60]: import urllib
|
||||
|
||||
""",
|
||||
r"""\
|
||||
|
||||
In [133]: import numpy.random
|
||||
|
||||
@suppress
|
||||
In [134]: numpy.random.seed(2358)
|
||||
|
||||
@doctest
|
||||
In [135]: numpy.random.rand(10,2)
|
||||
Out[135]:
|
||||
array([[ 0.64524308, 0.59943846],
|
||||
[ 0.47102322, 0.8715456 ],
|
||||
[ 0.29370834, 0.74776844],
|
||||
[ 0.99539577, 0.1313423 ],
|
||||
[ 0.16250302, 0.21103583],
|
||||
[ 0.81626524, 0.1312433 ],
|
||||
[ 0.67338089, 0.72302393],
|
||||
[ 0.7566368 , 0.07033696],
|
||||
[ 0.22591016, 0.77731835],
|
||||
[ 0.0072729 , 0.34273127]])
|
||||
|
||||
""",
|
||||
|
||||
r"""
|
||||
In [106]: print x
|
||||
jdh
|
||||
|
||||
In [109]: for i in range(10):
|
||||
.....: print i
|
||||
.....:
|
||||
.....:
|
||||
0
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
5
|
||||
6
|
||||
7
|
||||
8
|
||||
9
|
||||
""",
|
||||
|
||||
r"""
|
||||
|
||||
In [144]: from pylab import *
|
||||
|
||||
In [145]: ion()
|
||||
|
||||
# use a semicolon to suppress the output
|
||||
@savefig test_hist.png width=4in
|
||||
In [151]: hist(np.random.randn(10000), 100);
|
||||
|
||||
|
||||
@savefig test_plot.png width=4in
|
||||
In [151]: plot(np.random.randn(10000), 'o');
|
||||
""",
|
||||
|
||||
r"""
|
||||
# use a semicolon to suppress the output
|
||||
In [151]: plt.clf()
|
||||
|
||||
@savefig plot_simple.png width=4in
|
||||
In [151]: plot([1,2,3])
|
||||
|
||||
@savefig hist_simple.png width=4in
|
||||
In [151]: hist(np.random.randn(10000), 100);
|
||||
|
||||
""",
|
||||
r"""
|
||||
# update the current fig
|
||||
In [151]: ylabel('number')
|
||||
|
||||
In [152]: title('normal distribution')
|
||||
|
||||
|
||||
@savefig hist_with_text.png
|
||||
In [153]: grid(True)
|
||||
|
||||
""",
|
||||
]
|
||||
# skip local-file depending first example:
|
||||
examples = examples[1:]
|
||||
|
||||
#ipython_directive.DEBUG = True # dbg
|
||||
#options = dict(suppress=True) # dbg
|
||||
options = dict()
|
||||
for example in examples:
|
||||
content = example.split('\n')
|
||||
ipython_directive('debug', arguments=None, options=options,
|
||||
content=content, lineno=0,
|
||||
content_offset=None, block_text=None,
|
||||
state=None, state_machine=None,
|
||||
)
|
||||
|
||||
# Run test suite as a script
|
||||
if __name__=='__main__':
|
||||
if not os.path.isdir('_static'):
|
||||
os.mkdir('_static')
|
||||
test()
|
||||
print 'All OK? Check figures in _static/'
|
||||
120
doc/sphinxext/mathmpl.py
Normal file
120
doc/sphinxext/mathmpl.py
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
from __future__ import print_function
|
||||
import os
|
||||
import sys
|
||||
try:
|
||||
from hashlib import md5
|
||||
except ImportError:
|
||||
from md5 import md5
|
||||
|
||||
from docutils import nodes
|
||||
from docutils.parsers.rst import directives
|
||||
import warnings
|
||||
|
||||
from matplotlib import rcParams
|
||||
from matplotlib.mathtext import MathTextParser
|
||||
rcParams['mathtext.fontset'] = 'cm'
|
||||
mathtext_parser = MathTextParser("Bitmap")
|
||||
|
||||
# Define LaTeX math node:
|
||||
class latex_math(nodes.General, nodes.Element):
|
||||
pass
|
||||
|
||||
def fontset_choice(arg):
|
||||
return directives.choice(arg, ['cm', 'stix', 'stixsans'])
|
||||
|
||||
options_spec = {'fontset': fontset_choice}
|
||||
|
||||
def math_role(role, rawtext, text, lineno, inliner,
|
||||
options={}, content=[]):
|
||||
i = rawtext.find('`')
|
||||
latex = rawtext[i+1:-1]
|
||||
node = latex_math(rawtext)
|
||||
node['latex'] = latex
|
||||
node['fontset'] = options.get('fontset', 'cm')
|
||||
return [node], []
|
||||
math_role.options = options_spec
|
||||
|
||||
def math_directive(name, arguments, options, content, lineno,
|
||||
content_offset, block_text, state, state_machine):
|
||||
latex = ''.join(content)
|
||||
node = latex_math(block_text)
|
||||
node['latex'] = latex
|
||||
node['fontset'] = options.get('fontset', 'cm')
|
||||
return [node]
|
||||
|
||||
# This uses mathtext to render the expression
|
||||
def latex2png(latex, filename, fontset='cm'):
|
||||
latex = "$%s$" % latex
|
||||
orig_fontset = rcParams['mathtext.fontset']
|
||||
rcParams['mathtext.fontset'] = fontset
|
||||
if os.path.exists(filename):
|
||||
depth = mathtext_parser.get_depth(latex, dpi=100)
|
||||
else:
|
||||
try:
|
||||
depth = mathtext_parser.to_png(filename, latex, dpi=100)
|
||||
except:
|
||||
warnings.warn("Could not render math expression %s" % latex,
|
||||
Warning)
|
||||
depth = 0
|
||||
rcParams['mathtext.fontset'] = orig_fontset
|
||||
sys.stdout.write("#")
|
||||
sys.stdout.flush()
|
||||
return depth
|
||||
|
||||
# LaTeX to HTML translation stuff:
|
||||
def latex2html(node, source):
|
||||
inline = isinstance(node.parent, nodes.TextElement)
|
||||
latex = node['latex']
|
||||
name = 'math-%s' % md5(latex).hexdigest()[-10:]
|
||||
|
||||
destdir = os.path.join(setup.app.builder.outdir, '_images', 'mathmpl')
|
||||
if not os.path.exists(destdir):
|
||||
os.makedirs(destdir)
|
||||
dest = os.path.join(destdir, '%s.png' % name)
|
||||
path = os.path.join(setup.app.builder.imgpath, 'mathmpl')
|
||||
|
||||
depth = latex2png(latex, dest, node['fontset'])
|
||||
|
||||
if inline:
|
||||
cls = ''
|
||||
else:
|
||||
cls = 'class="center" '
|
||||
if inline and depth != 0:
|
||||
style = 'style="position: relative; bottom: -%dpx"' % (depth + 1)
|
||||
else:
|
||||
style = ''
|
||||
|
||||
return '<img src="%s/%s.png" %s%s/>' % (path, name, cls, style)
|
||||
|
||||
def setup(app):
|
||||
setup.app = app
|
||||
|
||||
app.add_node(latex_math)
|
||||
app.add_role('math', math_role)
|
||||
|
||||
# Add visit/depart methods to HTML-Translator:
|
||||
def visit_latex_math_html(self, node):
|
||||
source = self.document.attributes['source']
|
||||
self.body.append(latex2html(node, source))
|
||||
def depart_latex_math_html(self, node):
|
||||
pass
|
||||
|
||||
# Add visit/depart methods to LaTeX-Translator:
|
||||
def visit_latex_math_latex(self, node):
|
||||
inline = isinstance(node.parent, nodes.TextElement)
|
||||
if inline:
|
||||
self.body.append('$%s$' % node['latex'])
|
||||
else:
|
||||
self.body.extend(['\\begin{equation}',
|
||||
node['latex'],
|
||||
'\\end{equation}'])
|
||||
def depart_latex_math_latex(self, node):
|
||||
pass
|
||||
|
||||
app.add_node(latex_math, html=(visit_latex_math_html,
|
||||
depart_latex_math_html))
|
||||
app.add_node(latex_math, latex=(visit_latex_math_latex,
|
||||
depart_latex_math_latex))
|
||||
app.add_role('math', math_role)
|
||||
app.add_directive('math', math_directive,
|
||||
True, (0, 0, 0), **options_spec)
|
||||
116
doc/sphinxext/numpydoc.py
Normal file
116
doc/sphinxext/numpydoc.py
Normal file
|
|
@ -0,0 +1,116 @@
|
|||
"""
|
||||
========
|
||||
numpydoc
|
||||
========
|
||||
|
||||
Sphinx extension that handles docstrings in the Numpy standard format. [1]
|
||||
|
||||
It will:
|
||||
|
||||
- Convert Parameters etc. sections to field lists.
|
||||
- Convert See Also section to a See also entry.
|
||||
- Renumber references.
|
||||
- Extract the signature from the docstring, if it can't be determined otherwise.
|
||||
|
||||
.. [1] http://projects.scipy.org/scipy/numpy/wiki/CodingStyleGuidelines#docstring-standard
|
||||
|
||||
"""
|
||||
|
||||
import os, re, pydoc
|
||||
from docscrape_sphinx import get_doc_object, SphinxDocString
|
||||
import inspect
|
||||
|
||||
def mangle_docstrings(app, what, name, obj, options, lines,
|
||||
reference_offset=[0]):
|
||||
if what == 'module':
|
||||
# Strip top title
|
||||
title_re = re.compile(r'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
|
||||
re.I|re.S)
|
||||
lines[:] = title_re.sub('', "\n".join(lines)).split("\n")
|
||||
else:
|
||||
doc = get_doc_object(obj, what, "\n".join(lines))
|
||||
lines[:] = str(doc).split("\n")
|
||||
|
||||
if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
|
||||
obj.__name__:
|
||||
if hasattr(obj, '__module__'):
|
||||
v = dict(full_name="%s.%s" % (obj.__module__, obj.__name__))
|
||||
else:
|
||||
v = dict(full_name=obj.__name__)
|
||||
lines += ['', '.. htmlonly::', '']
|
||||
lines += [' %s' % x for x in
|
||||
(app.config.numpydoc_edit_link % v).split("\n")]
|
||||
|
||||
# replace reference numbers so that there are no duplicates
|
||||
references = []
|
||||
for l in lines:
|
||||
l = l.strip()
|
||||
if l.startswith('.. ['):
|
||||
try:
|
||||
references.append(int(l[len('.. ['):l.index(']')]))
|
||||
except ValueError:
|
||||
print "WARNING: invalid reference in %s docstring" % name
|
||||
|
||||
# Start renaming from the biggest number, otherwise we may
|
||||
# overwrite references.
|
||||
references.sort()
|
||||
if references:
|
||||
for i, line in enumerate(lines):
|
||||
for r in references:
|
||||
new_r = reference_offset[0] + r
|
||||
lines[i] = lines[i].replace('[%d]_' % r,
|
||||
'[%d]_' % new_r)
|
||||
lines[i] = lines[i].replace('.. [%d]' % r,
|
||||
'.. [%d]' % new_r)
|
||||
|
||||
reference_offset[0] += len(references)
|
||||
|
||||
def mangle_signature(app, what, name, obj, options, sig, retann):
|
||||
# Do not try to inspect classes that don't define `__init__`
|
||||
if (inspect.isclass(obj) and
|
||||
'initializes x; see ' in pydoc.getdoc(obj.__init__)):
|
||||
return '', ''
|
||||
|
||||
if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return
|
||||
if not hasattr(obj, '__doc__'): return
|
||||
|
||||
doc = SphinxDocString(pydoc.getdoc(obj))
|
||||
if doc['Signature']:
|
||||
sig = re.sub("^[^(]*", "", doc['Signature'])
|
||||
return sig, ''
|
||||
|
||||
def initialize(app):
|
||||
try:
|
||||
app.connect('autodoc-process-signature', mangle_signature)
|
||||
except:
|
||||
monkeypatch_sphinx_ext_autodoc()
|
||||
|
||||
def setup(app, get_doc_object_=get_doc_object):
|
||||
global get_doc_object
|
||||
get_doc_object = get_doc_object_
|
||||
|
||||
app.connect('autodoc-process-docstring', mangle_docstrings)
|
||||
app.connect('builder-inited', initialize)
|
||||
app.add_config_value('numpydoc_edit_link', None, True)
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# Monkeypatch sphinx.ext.autodoc to accept argspecless autodocs (Sphinx < 0.5)
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
def monkeypatch_sphinx_ext_autodoc():
|
||||
global _original_format_signature
|
||||
import sphinx.ext.autodoc
|
||||
|
||||
if sphinx.ext.autodoc.format_signature is our_format_signature:
|
||||
return
|
||||
|
||||
print "[numpydoc] Monkeypatching sphinx.ext.autodoc ..."
|
||||
_original_format_signature = sphinx.ext.autodoc.format_signature
|
||||
sphinx.ext.autodoc.format_signature = our_format_signature
|
||||
|
||||
def our_format_signature(what, obj):
|
||||
r = mangle_signature(None, what, None, obj, None, None, None)
|
||||
if r is not None:
|
||||
return r[0]
|
||||
else:
|
||||
return _original_format_signature(what, obj)
|
||||
64
doc/sphinxext/only_directives.py
Normal file
64
doc/sphinxext/only_directives.py
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
#
|
||||
# A pair of directives for inserting content that will only appear in
|
||||
# either html or latex.
|
||||
#
|
||||
|
||||
from __future__ import print_function
|
||||
from docutils.nodes import Body, Element
|
||||
from docutils.parsers.rst import directives
|
||||
|
||||
class only_base(Body, Element):
|
||||
def dont_traverse(self, *args, **kwargs):
|
||||
return []
|
||||
|
||||
class html_only(only_base):
|
||||
pass
|
||||
|
||||
class latex_only(only_base):
|
||||
pass
|
||||
|
||||
def run(content, node_class, state, content_offset):
|
||||
text = '\n'.join(content)
|
||||
node = node_class(text)
|
||||
state.nested_parse(content, content_offset, node)
|
||||
return [node]
|
||||
|
||||
def html_only_directive(name, arguments, options, content, lineno,
|
||||
content_offset, block_text, state, state_machine):
|
||||
return run(content, html_only, state, content_offset)
|
||||
|
||||
def latex_only_directive(name, arguments, options, content, lineno,
|
||||
content_offset, block_text, state, state_machine):
|
||||
return run(content, latex_only, state, content_offset)
|
||||
|
||||
def builder_inited(app):
|
||||
if app.builder.name == 'html':
|
||||
latex_only.traverse = only_base.dont_traverse
|
||||
else:
|
||||
html_only.traverse = only_base.dont_traverse
|
||||
|
||||
def setup(app):
|
||||
app.add_directive('htmlonly', html_only_directive, True, (0, 0, 0))
|
||||
app.add_directive('latexonly', latex_only_directive, True, (0, 0, 0))
|
||||
app.add_node(html_only)
|
||||
app.add_node(latex_only)
|
||||
|
||||
# This will *really* never see the light of day As it turns out,
|
||||
# this results in "broken" image nodes since they never get
|
||||
# processed, so best not to do this.
|
||||
# app.connect('builder-inited', builder_inited)
|
||||
|
||||
# Add visit/depart methods to HTML-Translator:
|
||||
def visit_perform(self, node):
|
||||
pass
|
||||
def depart_perform(self, node):
|
||||
pass
|
||||
def visit_ignore(self, node):
|
||||
node.children = []
|
||||
def depart_ignore(self, node):
|
||||
node.children = []
|
||||
|
||||
app.add_node(html_only, html=(visit_perform, depart_perform))
|
||||
app.add_node(html_only, latex=(visit_ignore, depart_ignore))
|
||||
app.add_node(latex_only, latex=(visit_perform, depart_perform))
|
||||
app.add_node(latex_only, html=(visit_ignore, depart_ignore))
|
||||
819
doc/sphinxext/plot_directive.py
Normal file
819
doc/sphinxext/plot_directive.py
Normal file
|
|
@ -0,0 +1,819 @@
|
|||
"""
|
||||
A directive for including a matplotlib plot in a Sphinx document.
|
||||
|
||||
By default, in HTML output, `plot` will include a .png file with a
|
||||
link to a high-res .png and .pdf. In LaTeX output, it will include a
|
||||
.pdf.
|
||||
|
||||
The source code for the plot may be included in one of three ways:
|
||||
|
||||
1. **A path to a source file** as the argument to the directive::
|
||||
|
||||
.. plot:: path/to/plot.py
|
||||
|
||||
When a path to a source file is given, the content of the
|
||||
directive may optionally contain a caption for the plot::
|
||||
|
||||
.. plot:: path/to/plot.py
|
||||
|
||||
This is the caption for the plot
|
||||
|
||||
Additionally, one my specify the name of a function to call (with
|
||||
no arguments) immediately after importing the module::
|
||||
|
||||
.. plot:: path/to/plot.py plot_function1
|
||||
|
||||
2. Included as **inline content** to the directive::
|
||||
|
||||
.. plot::
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import matplotlib.image as mpimg
|
||||
import numpy as np
|
||||
img = mpimg.imread('_static/stinkbug.png')
|
||||
imgplot = plt.imshow(img)
|
||||
|
||||
3. Using **doctest** syntax::
|
||||
|
||||
.. plot::
|
||||
A plotting example:
|
||||
>>> import matplotlib.pyplot as plt
|
||||
>>> plt.plot([1,2,3], [4,5,6])
|
||||
|
||||
Options
|
||||
-------
|
||||
|
||||
The ``plot`` directive supports the following options:
|
||||
|
||||
format : {'python', 'doctest'}
|
||||
Specify the format of the input
|
||||
|
||||
include-source : bool
|
||||
Whether to display the source code. The default can be changed
|
||||
using the `plot_include_source` variable in conf.py
|
||||
|
||||
encoding : str
|
||||
If this source file is in a non-UTF8 or non-ASCII encoding,
|
||||
the encoding must be specified using the `:encoding:` option.
|
||||
The encoding will not be inferred using the ``-*- coding -*-``
|
||||
metacomment.
|
||||
|
||||
context : bool
|
||||
If provided, the code will be run in the context of all
|
||||
previous plot directives for which the `:context:` option was
|
||||
specified. This only applies to inline code plot directives,
|
||||
not those run from files.
|
||||
|
||||
nofigs : bool
|
||||
If specified, the code block will be run, but no figures will
|
||||
be inserted. This is usually useful with the ``:context:``
|
||||
option.
|
||||
|
||||
Additionally, this directive supports all of the options of the
|
||||
`image` directive, except for `target` (since plot will add its own
|
||||
target). These include `alt`, `height`, `width`, `scale`, `align` and
|
||||
`class`.
|
||||
|
||||
Configuration options
|
||||
---------------------
|
||||
|
||||
The plot directive has the following configuration options:
|
||||
|
||||
plot_include_source
|
||||
Default value for the include-source option
|
||||
|
||||
plot_pre_code
|
||||
Code that should be executed before each plot.
|
||||
|
||||
plot_basedir
|
||||
Base directory, to which ``plot::`` file names are relative
|
||||
to. (If None or empty, file names are relative to the
|
||||
directoly where the file containing the directive is.)
|
||||
|
||||
plot_formats
|
||||
File formats to generate. List of tuples or strings::
|
||||
|
||||
[(suffix, dpi), suffix, ...]
|
||||
|
||||
that determine the file format and the DPI. For entries whose
|
||||
DPI was omitted, sensible defaults are chosen.
|
||||
|
||||
plot_html_show_formats
|
||||
Whether to show links to the files in HTML.
|
||||
|
||||
plot_rcparams
|
||||
A dictionary containing any non-standard rcParams that should
|
||||
be applied before each plot.
|
||||
|
||||
plot_apply_rcparams
|
||||
By default, rcParams are applied when `context` option is not used in
|
||||
a plot directive. This configuration option overrides this behaviour
|
||||
and applies rcParams before each plot.
|
||||
|
||||
plot_working_directory
|
||||
By default, the working directory will be changed to the directory of
|
||||
the example, so the code can get at its data files, if any. Also its
|
||||
path will be added to `sys.path` so it can import any helper modules
|
||||
sitting beside it. This configuration option can be used to specify
|
||||
a central directory (also added to `sys.path`) where data files and
|
||||
helper modules for all code are located.
|
||||
|
||||
plot_template
|
||||
Provide a customized template for preparing resturctured text.
|
||||
|
||||
|
||||
"""
|
||||
from __future__ import print_function
|
||||
|
||||
import sys, os, glob, shutil, imp, warnings, cStringIO, re, textwrap, \
|
||||
traceback, exceptions
|
||||
|
||||
from docutils.parsers.rst import directives
|
||||
from docutils import nodes
|
||||
from docutils.parsers.rst.directives.images import Image
|
||||
align = Image.align
|
||||
import sphinx
|
||||
|
||||
sphinx_version = sphinx.__version__.split(".")
|
||||
# The split is necessary for sphinx beta versions where the string is
|
||||
# '6b1'
|
||||
sphinx_version = tuple([int(re.split('[a-z]', x)[0])
|
||||
for x in sphinx_version[:2]])
|
||||
|
||||
try:
|
||||
# Sphinx depends on either Jinja or Jinja2
|
||||
import jinja2
|
||||
def format_template(template, **kw):
|
||||
return jinja2.Template(template).render(**kw)
|
||||
except ImportError:
|
||||
import jinja
|
||||
def format_template(template, **kw):
|
||||
return jinja.from_string(template, **kw)
|
||||
|
||||
import matplotlib
|
||||
import matplotlib.cbook as cbook
|
||||
matplotlib.use('Agg')
|
||||
import matplotlib.pyplot as plt
|
||||
from matplotlib import _pylab_helpers
|
||||
|
||||
__version__ = 2
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# Relative pathnames
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# os.path.relpath is new in Python 2.6
|
||||
try:
|
||||
from os.path import relpath
|
||||
except ImportError:
|
||||
# Copied from Python 2.7
|
||||
if 'posix' in sys.builtin_module_names:
|
||||
def relpath(path, start=os.path.curdir):
|
||||
"""Return a relative version of a path"""
|
||||
from os.path import sep, curdir, join, abspath, commonprefix, \
|
||||
pardir
|
||||
|
||||
if not path:
|
||||
raise ValueError("no path specified")
|
||||
|
||||
start_list = abspath(start).split(sep)
|
||||
path_list = abspath(path).split(sep)
|
||||
|
||||
# Work out how much of the filepath is shared by start and path.
|
||||
i = len(commonprefix([start_list, path_list]))
|
||||
|
||||
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
|
||||
if not rel_list:
|
||||
return curdir
|
||||
return join(*rel_list)
|
||||
elif 'nt' in sys.builtin_module_names:
|
||||
def relpath(path, start=os.path.curdir):
|
||||
"""Return a relative version of a path"""
|
||||
from os.path import sep, curdir, join, abspath, commonprefix, \
|
||||
pardir, splitunc
|
||||
|
||||
if not path:
|
||||
raise ValueError("no path specified")
|
||||
start_list = abspath(start).split(sep)
|
||||
path_list = abspath(path).split(sep)
|
||||
if start_list[0].lower() != path_list[0].lower():
|
||||
unc_path, rest = splitunc(path)
|
||||
unc_start, rest = splitunc(start)
|
||||
if bool(unc_path) ^ bool(unc_start):
|
||||
raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
|
||||
% (path, start))
|
||||
else:
|
||||
raise ValueError("path is on drive %s, start on drive %s"
|
||||
% (path_list[0], start_list[0]))
|
||||
# Work out how much of the filepath is shared by start and path.
|
||||
for i in range(min(len(start_list), len(path_list))):
|
||||
if start_list[i].lower() != path_list[i].lower():
|
||||
break
|
||||
else:
|
||||
i += 1
|
||||
|
||||
rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
|
||||
if not rel_list:
|
||||
return curdir
|
||||
return join(*rel_list)
|
||||
else:
|
||||
raise RuntimeError("Unsupported platform (no relpath available!)")
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# Registration hook
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
def plot_directive(name, arguments, options, content, lineno,
|
||||
content_offset, block_text, state, state_machine):
|
||||
return run(arguments, content, options, state_machine, state, lineno)
|
||||
plot_directive.__doc__ = __doc__
|
||||
|
||||
def _option_boolean(arg):
|
||||
if not arg or not arg.strip():
|
||||
# no argument given, assume used as a flag
|
||||
return True
|
||||
elif arg.strip().lower() in ('no', '0', 'false'):
|
||||
return False
|
||||
elif arg.strip().lower() in ('yes', '1', 'true'):
|
||||
return True
|
||||
else:
|
||||
raise ValueError('"%s" unknown boolean' % arg)
|
||||
|
||||
def _option_format(arg):
|
||||
return directives.choice(arg, ('python', 'doctest'))
|
||||
|
||||
def _option_align(arg):
|
||||
return directives.choice(arg, ("top", "middle", "bottom", "left", "center",
|
||||
"right"))
|
||||
|
||||
def mark_plot_labels(app, document):
|
||||
"""
|
||||
To make plots referenceable, we need to move the reference from
|
||||
the "htmlonly" (or "latexonly") node to the actual figure node
|
||||
itself.
|
||||
"""
|
||||
for name, explicit in document.nametypes.iteritems():
|
||||
if not explicit:
|
||||
continue
|
||||
labelid = document.nameids[name]
|
||||
if labelid is None:
|
||||
continue
|
||||
node = document.ids[labelid]
|
||||
if node.tagname in ('html_only', 'latex_only'):
|
||||
for n in node:
|
||||
if n.tagname == 'figure':
|
||||
sectname = name
|
||||
for c in n:
|
||||
if c.tagname == 'caption':
|
||||
sectname = c.astext()
|
||||
break
|
||||
|
||||
node['ids'].remove(labelid)
|
||||
node['names'].remove(name)
|
||||
n['ids'].append(labelid)
|
||||
n['names'].append(name)
|
||||
document.settings.env.labels[name] = \
|
||||
document.settings.env.docname, labelid, sectname
|
||||
break
|
||||
|
||||
def setup(app):
|
||||
setup.app = app
|
||||
setup.config = app.config
|
||||
setup.confdir = app.confdir
|
||||
|
||||
options = {'alt': directives.unchanged,
|
||||
'height': directives.length_or_unitless,
|
||||
'width': directives.length_or_percentage_or_unitless,
|
||||
'scale': directives.nonnegative_int,
|
||||
'align': _option_align,
|
||||
'class': directives.class_option,
|
||||
'include-source': _option_boolean,
|
||||
'format': _option_format,
|
||||
'context': directives.flag,
|
||||
'nofigs': directives.flag,
|
||||
'encoding': directives.encoding
|
||||
}
|
||||
|
||||
app.add_directive('plot', plot_directive, True, (0, 2, False), **options)
|
||||
app.add_config_value('plot_pre_code', None, True)
|
||||
app.add_config_value('plot_include_source', False, True)
|
||||
app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True)
|
||||
app.add_config_value('plot_basedir', None, True)
|
||||
app.add_config_value('plot_html_show_formats', True, True)
|
||||
app.add_config_value('plot_rcparams', {}, True)
|
||||
app.add_config_value('plot_apply_rcparams', False, True)
|
||||
app.add_config_value('plot_working_directory', None, True)
|
||||
app.add_config_value('plot_template', None, True)
|
||||
|
||||
app.connect('doctree-read', mark_plot_labels)
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# Doctest handling
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
def contains_doctest(text):
|
||||
try:
|
||||
# check if it's valid Python as-is
|
||||
compile(text, '<string>', 'exec')
|
||||
return False
|
||||
except SyntaxError:
|
||||
pass
|
||||
r = re.compile(r'^\s*>>>', re.M)
|
||||
m = r.search(text)
|
||||
return bool(m)
|
||||
|
||||
def unescape_doctest(text):
|
||||
"""
|
||||
Extract code from a piece of text, which contains either Python code
|
||||
or doctests.
|
||||
|
||||
"""
|
||||
if not contains_doctest(text):
|
||||
return text
|
||||
|
||||
code = ""
|
||||
for line in text.split("\n"):
|
||||
m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line)
|
||||
if m:
|
||||
code += m.group(2) + "\n"
|
||||
elif line.strip():
|
||||
code += "# " + line.strip() + "\n"
|
||||
else:
|
||||
code += "\n"
|
||||
return code
|
||||
|
||||
def split_code_at_show(text):
|
||||
"""
|
||||
Split code at plt.show()
|
||||
|
||||
"""
|
||||
|
||||
parts = []
|
||||
is_doctest = contains_doctest(text)
|
||||
|
||||
part = []
|
||||
for line in text.split("\n"):
|
||||
if (not is_doctest and line.strip() == 'plt.show()') or \
|
||||
(is_doctest and line.strip() == '>>> plt.show()'):
|
||||
part.append(line)
|
||||
parts.append("\n".join(part))
|
||||
part = []
|
||||
else:
|
||||
part.append(line)
|
||||
if "\n".join(part).strip():
|
||||
parts.append("\n".join(part))
|
||||
return parts
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# Template
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
|
||||
TEMPLATE = """
|
||||
{{ source_code }}
|
||||
|
||||
{{ only_html }}
|
||||
|
||||
{% if source_link or (html_show_formats and not multi_image) %}
|
||||
(
|
||||
{%- if source_link -%}
|
||||
`Source code <{{ source_link }}>`__
|
||||
{%- endif -%}
|
||||
{%- if html_show_formats and not multi_image -%}
|
||||
{%- for img in images -%}
|
||||
{%- for fmt in img.formats -%}
|
||||
{%- if source_link or not loop.first -%}, {% endif -%}
|
||||
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
|
||||
{%- endfor -%}
|
||||
{%- endfor -%}
|
||||
{%- endif -%}
|
||||
)
|
||||
{% endif %}
|
||||
|
||||
{% for img in images %}
|
||||
.. figure:: {{ build_dir }}/{{ img.basename }}.png
|
||||
{%- for option in options %}
|
||||
{{ option }}
|
||||
{% endfor %}
|
||||
|
||||
{% if html_show_formats and multi_image -%}
|
||||
(
|
||||
{%- for fmt in img.formats -%}
|
||||
{%- if not loop.first -%}, {% endif -%}
|
||||
`{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__
|
||||
{%- endfor -%}
|
||||
)
|
||||
{%- endif -%}
|
||||
|
||||
{{ caption }}
|
||||
{% endfor %}
|
||||
|
||||
{{ only_latex }}
|
||||
|
||||
{% for img in images %}
|
||||
.. image:: {{ build_dir }}/{{ img.basename }}.pdf
|
||||
{% endfor %}
|
||||
|
||||
{{ only_texinfo }}
|
||||
|
||||
{% for img in images %}
|
||||
.. image:: {{ build_dir }}/{{ img.basename }}.png
|
||||
{%- for option in options %}
|
||||
{{ option }}
|
||||
{% endfor %}
|
||||
|
||||
{% endfor %}
|
||||
|
||||
"""
|
||||
|
||||
exception_template = """
|
||||
.. htmlonly::
|
||||
|
||||
[`source code <%(linkdir)s/%(basename)s.py>`__]
|
||||
|
||||
Exception occurred rendering plot.
|
||||
|
||||
"""
|
||||
|
||||
# the context of the plot for all directives specified with the
|
||||
# :context: option
|
||||
plot_context = dict()
|
||||
|
||||
class ImageFile(object):
|
||||
def __init__(self, basename, dirname):
|
||||
self.basename = basename
|
||||
self.dirname = dirname
|
||||
self.formats = []
|
||||
|
||||
def filename(self, format):
|
||||
return os.path.join(self.dirname, "%s.%s" % (self.basename, format))
|
||||
|
||||
def filenames(self):
|
||||
return [self.filename(fmt) for fmt in self.formats]
|
||||
|
||||
def out_of_date(original, derived):
|
||||
"""
|
||||
Returns True if derivative is out-of-date wrt original,
|
||||
both of which are full file paths.
|
||||
"""
|
||||
return (not os.path.exists(derived) or
|
||||
(os.path.exists(original) and
|
||||
os.stat(derived).st_mtime < os.stat(original).st_mtime))
|
||||
|
||||
class PlotError(RuntimeError):
|
||||
pass
|
||||
|
||||
def run_code(code, code_path, ns=None, function_name=None):
|
||||
"""
|
||||
Import a Python module from a path, and run the function given by
|
||||
name, if function_name is not None.
|
||||
"""
|
||||
|
||||
# Change the working directory to the directory of the example, so
|
||||
# it can get at its data files, if any. Add its path to sys.path
|
||||
# so it can import any helper modules sitting beside it.
|
||||
|
||||
pwd = os.getcwd()
|
||||
old_sys_path = list(sys.path)
|
||||
if setup.config.plot_working_directory is not None:
|
||||
try:
|
||||
os.chdir(setup.config.plot_working_directory)
|
||||
except OSError as err:
|
||||
raise OSError(str(err) + '\n`plot_working_directory` option in'
|
||||
'Sphinx configuration file must be a valid '
|
||||
'directory path')
|
||||
except TypeError as err:
|
||||
raise TypeError(str(err) + '\n`plot_working_directory` option in '
|
||||
'Sphinx configuration file must be a string or '
|
||||
'None')
|
||||
sys.path.insert(0, setup.config.plot_working_directory)
|
||||
elif code_path is not None:
|
||||
dirname = os.path.abspath(os.path.dirname(code_path))
|
||||
os.chdir(dirname)
|
||||
sys.path.insert(0, dirname)
|
||||
|
||||
# Redirect stdout
|
||||
stdout = sys.stdout
|
||||
sys.stdout = cStringIO.StringIO()
|
||||
|
||||
# Reset sys.argv
|
||||
old_sys_argv = sys.argv
|
||||
sys.argv = [code_path]
|
||||
|
||||
try:
|
||||
try:
|
||||
code = unescape_doctest(code)
|
||||
if ns is None:
|
||||
ns = {}
|
||||
if not ns:
|
||||
if setup.config.plot_pre_code is None:
|
||||
exec "import numpy as np\nfrom matplotlib import pyplot as plt\n" in ns
|
||||
else:
|
||||
exec setup.config.plot_pre_code in ns
|
||||
if "__main__" in code:
|
||||
exec "__name__ = '__main__'" in ns
|
||||
exec code in ns
|
||||
if function_name is not None:
|
||||
exec function_name + "()" in ns
|
||||
except (Exception, SystemExit), err:
|
||||
raise PlotError(traceback.format_exc())
|
||||
finally:
|
||||
os.chdir(pwd)
|
||||
sys.argv = old_sys_argv
|
||||
sys.path[:] = old_sys_path
|
||||
sys.stdout = stdout
|
||||
return ns
|
||||
|
||||
def clear_state(plot_rcparams):
|
||||
plt.close('all')
|
||||
matplotlib.rc_file_defaults()
|
||||
matplotlib.rcParams.update(plot_rcparams)
|
||||
|
||||
def render_figures(code, code_path, output_dir, output_base, context,
|
||||
function_name, config):
|
||||
"""
|
||||
Run a pyplot script and save the low and high res PNGs and a PDF
|
||||
in outdir.
|
||||
|
||||
Save the images under *output_dir* with file names derived from
|
||||
*output_base*
|
||||
"""
|
||||
# -- Parse format list
|
||||
default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 200}
|
||||
formats = []
|
||||
plot_formats = config.plot_formats
|
||||
if isinstance(plot_formats, (str, unicode)):
|
||||
plot_formats = eval(plot_formats)
|
||||
for fmt in plot_formats:
|
||||
if isinstance(fmt, str):
|
||||
formats.append((fmt, default_dpi.get(fmt, 80)))
|
||||
elif type(fmt) in (tuple, list) and len(fmt)==2:
|
||||
formats.append((str(fmt[0]), int(fmt[1])))
|
||||
else:
|
||||
raise PlotError('invalid image format "%r" in plot_formats' % fmt)
|
||||
|
||||
# -- Try to determine if all images already exist
|
||||
|
||||
code_pieces = split_code_at_show(code)
|
||||
|
||||
# Look for single-figure output files first
|
||||
# Look for single-figure output files first
|
||||
all_exists = True
|
||||
img = ImageFile(output_base, output_dir)
|
||||
for format, dpi in formats:
|
||||
if out_of_date(code_path, img.filename(format)):
|
||||
all_exists = False
|
||||
break
|
||||
img.formats.append(format)
|
||||
|
||||
if all_exists:
|
||||
return [(code, [img])]
|
||||
|
||||
# Then look for multi-figure output files
|
||||
results = []
|
||||
all_exists = True
|
||||
for i, code_piece in enumerate(code_pieces):
|
||||
images = []
|
||||
for j in xrange(1000):
|
||||
if len(code_pieces) > 1:
|
||||
img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir)
|
||||
else:
|
||||
img = ImageFile('%s_%02d' % (output_base, j), output_dir)
|
||||
for format, dpi in formats:
|
||||
if out_of_date(code_path, img.filename(format)):
|
||||
all_exists = False
|
||||
break
|
||||
img.formats.append(format)
|
||||
|
||||
# assume that if we have one, we have them all
|
||||
if not all_exists:
|
||||
all_exists = (j > 0)
|
||||
break
|
||||
images.append(img)
|
||||
if not all_exists:
|
||||
break
|
||||
results.append((code_piece, images))
|
||||
|
||||
if all_exists:
|
||||
return results
|
||||
|
||||
# We didn't find the files, so build them
|
||||
|
||||
results = []
|
||||
if context:
|
||||
ns = plot_context
|
||||
else:
|
||||
ns = {}
|
||||
|
||||
for i, code_piece in enumerate(code_pieces):
|
||||
if not context or config.plot_apply_rcparams:
|
||||
clear_state(config.plot_rcparams)
|
||||
run_code(code_piece, code_path, ns, function_name)
|
||||
|
||||
images = []
|
||||
fig_managers = _pylab_helpers.Gcf.get_all_fig_managers()
|
||||
for j, figman in enumerate(fig_managers):
|
||||
if len(fig_managers) == 1 and len(code_pieces) == 1:
|
||||
img = ImageFile(output_base, output_dir)
|
||||
elif len(code_pieces) == 1:
|
||||
img = ImageFile("%s_%02d" % (output_base, j), output_dir)
|
||||
else:
|
||||
img = ImageFile("%s_%02d_%02d" % (output_base, i, j),
|
||||
output_dir)
|
||||
images.append(img)
|
||||
for format, dpi in formats:
|
||||
try:
|
||||
figman.canvas.figure.savefig(img.filename(format), dpi=dpi)
|
||||
except Exception,err:
|
||||
raise PlotError(traceback.format_exc())
|
||||
img.formats.append(format)
|
||||
|
||||
results.append((code_piece, images))
|
||||
|
||||
if not context or config.plot_apply_rcparams:
|
||||
clear_state(config.plot_rcparams)
|
||||
|
||||
return results
|
||||
|
||||
def run(arguments, content, options, state_machine, state, lineno):
|
||||
# The user may provide a filename *or* Python code content, but not both
|
||||
if arguments and content:
|
||||
raise RuntimeError("plot:: directive can't have both args and content")
|
||||
|
||||
document = state_machine.document
|
||||
config = document.settings.env.config
|
||||
nofigs = options.has_key('nofigs')
|
||||
|
||||
options.setdefault('include-source', config.plot_include_source)
|
||||
context = options.has_key('context')
|
||||
|
||||
rst_file = document.attributes['source']
|
||||
rst_dir = os.path.dirname(rst_file)
|
||||
|
||||
if len(arguments):
|
||||
if not config.plot_basedir:
|
||||
source_file_name = os.path.join(setup.app.builder.srcdir,
|
||||
directives.uri(arguments[0]))
|
||||
else:
|
||||
source_file_name = os.path.join(setup.confdir, config.plot_basedir,
|
||||
directives.uri(arguments[0]))
|
||||
|
||||
# If there is content, it will be passed as a caption.
|
||||
caption = '\n'.join(content)
|
||||
|
||||
# If the optional function name is provided, use it
|
||||
if len(arguments) == 2:
|
||||
function_name = arguments[1]
|
||||
else:
|
||||
function_name = None
|
||||
|
||||
with open(source_file_name, 'r') as fd:
|
||||
code = fd.read()
|
||||
output_base = os.path.basename(source_file_name)
|
||||
else:
|
||||
source_file_name = rst_file
|
||||
code = textwrap.dedent("\n".join(map(str, content)))
|
||||
counter = document.attributes.get('_plot_counter', 0) + 1
|
||||
document.attributes['_plot_counter'] = counter
|
||||
base, ext = os.path.splitext(os.path.basename(source_file_name))
|
||||
output_base = '%s-%d.py' % (base, counter)
|
||||
function_name = None
|
||||
caption = ''
|
||||
|
||||
base, source_ext = os.path.splitext(output_base)
|
||||
if source_ext in ('.py', '.rst', '.txt'):
|
||||
output_base = base
|
||||
else:
|
||||
source_ext = ''
|
||||
|
||||
# ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames
|
||||
output_base = output_base.replace('.', '-')
|
||||
|
||||
# is it in doctest format?
|
||||
is_doctest = contains_doctest(code)
|
||||
if options.has_key('format'):
|
||||
if options['format'] == 'python':
|
||||
is_doctest = False
|
||||
else:
|
||||
is_doctest = True
|
||||
|
||||
# determine output directory name fragment
|
||||
source_rel_name = relpath(source_file_name, setup.confdir)
|
||||
source_rel_dir = os.path.dirname(source_rel_name)
|
||||
while source_rel_dir.startswith(os.path.sep):
|
||||
source_rel_dir = source_rel_dir[1:]
|
||||
|
||||
# build_dir: where to place output files (temporarily)
|
||||
build_dir = os.path.join(os.path.dirname(setup.app.doctreedir),
|
||||
'plot_directive',
|
||||
source_rel_dir)
|
||||
# get rid of .. in paths, also changes pathsep
|
||||
# see note in Python docs for warning about symbolic links on Windows.
|
||||
# need to compare source and dest paths at end
|
||||
build_dir = os.path.normpath(build_dir)
|
||||
|
||||
if not os.path.exists(build_dir):
|
||||
os.makedirs(build_dir)
|
||||
|
||||
# output_dir: final location in the builder's directory
|
||||
dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir,
|
||||
source_rel_dir))
|
||||
if not os.path.exists(dest_dir):
|
||||
os.makedirs(dest_dir) # no problem here for me, but just use built-ins
|
||||
|
||||
# how to link to files from the RST file
|
||||
dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir),
|
||||
source_rel_dir).replace(os.path.sep, '/')
|
||||
build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/')
|
||||
source_link = dest_dir_link + '/' + output_base + source_ext
|
||||
|
||||
# make figures
|
||||
try:
|
||||
results = render_figures(code, source_file_name, build_dir, output_base,
|
||||
context, function_name, config)
|
||||
errors = []
|
||||
except PlotError, err:
|
||||
reporter = state.memo.reporter
|
||||
sm = reporter.system_message(
|
||||
2, "Exception occurred in plotting %s\n from %s:\n%s" % (output_base,
|
||||
source_file_name, err),
|
||||
line=lineno)
|
||||
results = [(code, [])]
|
||||
errors = [sm]
|
||||
|
||||
# Properly indent the caption
|
||||
caption = '\n'.join(' ' + line.strip()
|
||||
for line in caption.split('\n'))
|
||||
|
||||
# generate output restructuredtext
|
||||
total_lines = []
|
||||
for j, (code_piece, images) in enumerate(results):
|
||||
if options['include-source']:
|
||||
if is_doctest:
|
||||
lines = ['']
|
||||
lines += [row.rstrip() for row in code_piece.split('\n')]
|
||||
else:
|
||||
lines = ['.. code-block:: python', '']
|
||||
lines += [' %s' % row.rstrip()
|
||||
for row in code_piece.split('\n')]
|
||||
source_code = "\n".join(lines)
|
||||
else:
|
||||
source_code = ""
|
||||
|
||||
if nofigs:
|
||||
images = []
|
||||
|
||||
opts = [':%s: %s' % (key, val) for key, val in options.items()
|
||||
if key in ('alt', 'height', 'width', 'scale', 'align', 'class')]
|
||||
|
||||
only_html = ".. only:: html"
|
||||
only_latex = ".. only:: latex"
|
||||
only_texinfo = ".. only:: texinfo"
|
||||
|
||||
if j == 0:
|
||||
src_link = source_link
|
||||
else:
|
||||
src_link = None
|
||||
|
||||
result = format_template(
|
||||
config.plot_template or TEMPLATE,
|
||||
dest_dir=dest_dir_link,
|
||||
build_dir=build_dir_link,
|
||||
source_link=src_link,
|
||||
multi_image=len(images) > 1,
|
||||
only_html=only_html,
|
||||
only_latex=only_latex,
|
||||
only_texinfo=only_texinfo,
|
||||
options=opts,
|
||||
images=images,
|
||||
source_code=source_code,
|
||||
html_show_formats=config.plot_html_show_formats,
|
||||
caption=caption)
|
||||
|
||||
total_lines.extend(result.split("\n"))
|
||||
total_lines.extend("\n")
|
||||
|
||||
if total_lines:
|
||||
state_machine.insert_input(total_lines, source=source_file_name)
|
||||
|
||||
# copy image files to builder's output directory, if necessary
|
||||
if not os.path.exists(dest_dir):
|
||||
cbook.mkdirs(dest_dir)
|
||||
|
||||
for code_piece, images in results:
|
||||
for img in images:
|
||||
for fn in img.filenames():
|
||||
destimg = os.path.join(dest_dir, os.path.basename(fn))
|
||||
if fn != destimg:
|
||||
shutil.copyfile(fn, destimg)
|
||||
|
||||
# copy script (if necessary)
|
||||
target_name = os.path.join(dest_dir, output_base + source_ext)
|
||||
with open(target_name, 'w') as f:
|
||||
if source_file_name == rst_file:
|
||||
code_escaped = unescape_doctest(code)
|
||||
else:
|
||||
code_escaped = code
|
||||
f.write(code_escaped)
|
||||
|
||||
return errors
|
||||
Loading…
Add table
Add a link
Reference in a new issue