pytorch/docs/source/scripts/build_activation_images.py
Vishwak Srinivasan 32b3841553 [ready] General documentation improvements (#5450)
* Improvize documentation
1. Add formula for erf, erfinv
2. Make exp, expm1 similar to log, log1p
3. Symbol change in ge, le, ne, isnan

* Fix minor nit in the docstring

* More doc improvements
1. Added some formulae
2. Complete scanning till "Other Operations" in Tensor docs

* Add more changes
1. Modify all torch.Tensor wherever required

* Fix Conv docs
1. Fix minor nits in the references for LAPACK routines

* Improve Pooling docs
1. Fix lint error

* Improve docs for RNN, Normalization and Padding
1. Fix flake8 error for pooling

* Final fixes for torch.nn.* docs.
1. Improve Loss Function documentation
2. Improve Vision Layers documentation

* Fix lint error

* Improve docstrings in torch.nn.init

* Fix lint error

* Fix minor error in torch.nn.init.sparse

* Fix Activation and Utils Docs
1. Fix Math Errors
2. Add explicit clean to Makefile in docs to prevent running graph generation script
while cleaning
3. Fix utils docs

* Make PYCMD a Makefile argument, clear up prints in the build_activation_images.py

* Fix batch norm doc error
2018-03-08 13:21:12 -05:00

95 lines
2.5 KiB
Python

"""
This script will generate input-out plots for all of the activation
functions. These are for use in the documentation, and potentially in
online tutorials.
"""
import os.path
import torch.nn.modules.activation
import torch.autograd
import matplotlib
matplotlib.use('Agg')
import pylab
# Create a directory for the images, if it doesn't exist
DOCS_PATH = os.path.realpath(os.path.join(__file__, "../../.."))
ACTIVATION_IMAGE_PATH = os.path.join(
DOCS_PATH,
"source/_static/img/activation/"
)
print(ACTIVATION_IMAGE_PATH)
if not os.path.exists(ACTIVATION_IMAGE_PATH):
os.mkdir(ACTIVATION_IMAGE_PATH)
# In a refactor, these ought to go into their own module or entry
# points so we can generate this list programmaticly
functions = [
'ELU',
'Hardshrink',
'Hardtanh',
'LeakyReLU', # Perhaps we should add text explaining slight slope?
'LogSigmoid',
'PReLU',
'ReLU',
'ReLU6',
'RReLU',
'SELU',
'Sigmoid',
'Softplus',
'Softshrink',
'Softsign',
'Tanh',
'Tanhshrink'
# 'Threshold' Omit, pending cleanup. See PR5457
]
def plot_function(function, **args):
"""
Plot a function on the current plot. The additional arguments may
be used to specify color, alpha, etc.
"""
xrange = torch.arange(-7.0, 7.0, 0.01) # We need to go beyond 6 for ReLU6
pylab.plot(
xrange.numpy(),
function(torch.autograd.Variable(xrange)).data.numpy(),
**args
)
# Step through all the functions
for function_name in functions:
plot_path = os.path.join(ACTIVATION_IMAGE_PATH, function_name + ".png")
if not os.path.exists(plot_path):
function = torch.nn.modules.activation.__dict__[function_name]()
# Start a new plot
pylab.clf()
# Add an overlay on the background of faint traces of all the other
# functions. This is nice when flipping through images
for background_function in functions:
plot_function(
torch.nn.modules.activation.__dict__[background_function](),
alpha=0.03, color='k'
)
# Plot the current function
plot_function(function)
# The titles are a little redundant, given context?
pylab.title(function_name + " activation function")
pylab.xlabel("Input")
pylab.ylabel("Output")
pylab.xlim([-7, 7])
pylab.ylim([-7, 7])
# And save it
pylab.savefig(plot_path)
print('Saved activation image for {} at {}'.format(function, plot_path))