mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 00:21:07 +01:00
Summary: The current optimizer code in c2/python has the following issues: (1) the optimizers in sgd.py cannot config per param-blob optimizer; (2) sgd.py is a bad file name. optimizer.py is a better name; (3) layer_model_helper.py has another set of optimizer code (which supports per param-blob optimizer) This diff did the following (1) create optimizer objects so that we can config per param-blob optimizer and that are also compatible to the existing optimizer code (2) the new optimizer code are much more modulized (3) move the optimizer code to file with better name (optimizer.py) (4) replace the optimizer imports in the existing code will do in next diffs (1) optimizers with structured parameters for dper2 (2) get rid of the optimizer code in layer_model_helper.py Reviewed By: salexspb Differential Revision: D4609013 fbshipit-source-id: 2e2d6dfa8685d10498f89069157453d9feca3f27
28 lines
851 B
Python
28 lines
851 B
Python
from __future__ import absolute_import
|
|
from __future__ import division
|
|
from __future__ import print_function
|
|
from caffe2.python.optimizer import build_sgd, build_ftrl, build_adagrad, build_adam
|
|
from caffe2.python.optimizer_test_util import TestBase
|
|
from caffe2.python.test_util import TestCase
|
|
|
|
|
|
class TestSgd(TestBase, TestCase):
|
|
def build_optimizer(self, model):
|
|
build_sgd(model, base_learning_rate=0.1)
|
|
|
|
|
|
class TestFtrl(TestBase, TestCase):
|
|
def build_optimizer(self, model):
|
|
build_ftrl(
|
|
model, engine=None, alpha=1.0, beta=0.1, lambda1=0.0, lambda2=0.0)
|
|
|
|
|
|
class TestAdagrad(TestBase, TestCase):
|
|
def build_optimizer(self, model):
|
|
build_adagrad(model, base_learning_rate=1.0)
|
|
|
|
|
|
class TestAdam(TestBase, TestCase):
|
|
def build_optimizer(self, model):
|
|
build_adam(model, base_learning_rate=0.1)
|