mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
Here's the command I used to invoke autopep8 (in parallel!):
git ls-files | grep '\.py$' | xargs -n1 -P`nproc` autopep8 -i
Several rules are ignored in setup.cfg. The goal is to let autopep8
handle everything which it can handle safely, and to disable any rules
which are tricky or controversial to address. We may want to come back
and re-enable some of these rules later, but I'm trying to make this
patch as safe as possible.
Also configures flake8 to match pep8's behavior.
Also configures TravisCI to check the whole project for lint.
64 lines
1.9 KiB
Python
64 lines
1.9 KiB
Python
import math
|
|
|
|
import torch
|
|
from torch.nn.parameter import Parameter
|
|
|
|
from .module import Module
|
|
|
|
|
|
class Linear(Module):
|
|
r"""Applies a linear transformation to the incoming data: :math:`y = Ax + b`
|
|
|
|
Args:
|
|
in_features: size of each input sample
|
|
out_features: size of each output sample
|
|
bias: If set to False, the layer will not learn an additive bias. Default: True
|
|
|
|
Shape:
|
|
- Input: :math:`(N, in\_features)`
|
|
- Output: :math:`(N, out\_features)`
|
|
|
|
Attributes:
|
|
weight: the learnable weights of the module of shape (out_features x in_features)
|
|
bias: the learnable bias of the module of shape (out_features)
|
|
|
|
Examples::
|
|
|
|
>>> m = nn.Linear(20, 30)
|
|
>>> input = autograd.Variable(torch.randn(128, 20))
|
|
>>> output = m(input)
|
|
>>> print(output.size())
|
|
"""
|
|
|
|
def __init__(self, in_features, out_features, bias=True):
|
|
super(Linear, self).__init__()
|
|
self.in_features = in_features
|
|
self.out_features = out_features
|
|
self.weight = Parameter(torch.Tensor(out_features, in_features))
|
|
if bias:
|
|
self.bias = Parameter(torch.Tensor(out_features))
|
|
else:
|
|
self.register_parameter('bias', None)
|
|
self.reset_parameters()
|
|
|
|
def reset_parameters(self):
|
|
stdv = 1. / math.sqrt(self.weight.size(1))
|
|
self.weight.data.uniform_(-stdv, stdv)
|
|
if self.bias is not None:
|
|
self.bias.data.uniform_(-stdv, stdv)
|
|
|
|
def forward(self, input):
|
|
if self.bias is None:
|
|
return self._backend.Linear()(input, self.weight)
|
|
else:
|
|
return self._backend.Linear()(input, self.weight, self.bias)
|
|
|
|
def __repr__(self):
|
|
return self.__class__.__name__ + ' (' \
|
|
+ str(self.in_features) + ' -> ' \
|
|
+ str(self.out_features) + ')'
|
|
|
|
|
|
# TODO: Bilinear
|
|
# TODO: PartialLinear - maybe in sparse?
|