Remove old code that is unused in test/ (#66331)

Summary:
.

Pull Request resolved: https://github.com/pytorch/pytorch/pull/66331

Reviewed By: gchanan

Differential Revision: D31533549

Pulled By: albanD

fbshipit-source-id: 5addd11edc4199a88f10f0ff236be59ec2289903
This commit is contained in:
Alban Desmaison 2021-10-11 08:41:04 -07:00 committed by Facebook GitHub Bot
parent 4775419850
commit d3b29afbb6
4 changed files with 0 additions and 229 deletions

View File

@ -1,14 +0,0 @@
th test.lua > lua.out
python3 test.py > python.out
diff lua.out python.out >/dev/null 2>&1
RESULT=$?
if [[ RESULT -eq 0 ]]; then
echo "PASS"
else
echo "FAIL"
echo "Press ENTER to open vimdiff"
read
vimdiff lua.out python.out
fi

View File

@ -1,32 +0,0 @@
local cjson = require 'cjson'
require 'optim'
function rosenbrock(t)
x, y = t[1], t[2]
return (1 - x) ^ 2 + 100 * (y - x^2)^2
end
function drosenbrock(t)
x, y = t[1], t[2]
return torch.DoubleTensor({-400 * x * (y - x^2) - 2 * (1 - x), 200 * x * (y - x^2)})
end
local fd = io.open('tests.json', 'r')
local tests = cjson.decode(fd:read('*a'))
fd:close()
for i, test in ipairs(tests) do
print(test.algorithm)
algorithm = optim[test.algorithm]
for i, config in ipairs(test.config) do
print('================================================================================')
params = torch.DoubleTensor({1.5, 1.5})
for i = 1, 100 do
function closure(x)
return rosenbrock(x), drosenbrock(x)
end
algorithm(closure, params, config)
print(string.format('%.8f\t%.8f', params[1], params[2]))
end
end
end

View File

@ -1,41 +0,0 @@
import json
import torch
import torch.legacy.optim as optim
def rosenbrock(tensor):
x, y = tensor
return (1 - x) ** 2 + 100 * (y - x ** 2) ** 2
def drosenbrock(tensor):
x, y = tensor
return torch.DoubleTensor((-400 * x * (y - x ** 2) - 2 * (1 - x), 200 * (y - x ** 2)))
algorithms = {
'adadelta': optim.adadelta,
'adagrad': optim.adagrad,
'adam': optim.adam,
'adamw': optim.adamw,
'adamax': optim.adamax,
'asgd': optim.asgd,
'cg': optim.cg,
'nag': optim.nag,
'rmsprop': optim.rmsprop,
'rprop': optim.rprop,
'sgd': optim.sgd,
'lbfgs': optim.lbfgs,
}
with open('tests.json', 'r') as f:
tests = json.loads(f.read())
for test in tests:
print(test['algorithm'] + '\t')
algorithm = algorithms[test['algorithm']]
for config in test['config']:
print('================================================================================\t')
params = torch.DoubleTensor((1.5, 1.5))
for i in range(100):
algorithm(lambda x: (rosenbrock(x), drosenbrock(x)), params, config)
print('{:.8f}\t{:.8f}\t'.format(params[0], params[1]))

View File

@ -1,142 +0,0 @@
[
{
"algorithm": "adadelta",
"config": [
{},
{"rho": 0.95},
{"rho": 0.95, "eps": 1e-3},
{"weightDecay": 0.2}
]
},
{
"algorithm": "adagrad",
"config": [
{}
]
},
{
"algorithm": "adam",
"config": [
{},
{"learningRate": 1e-4},
{"learningRate": 1e-4, "beta1": 0.92},
{"learningRate": 1e-4, "beta1": 0.92, "beta2": 0.96},
{"learningRate": 1e-4, "beta1": 0.92, "beta2": 0.96, "epsilon": 1e-3},
{"learningRate": 1e-4, "weightDecay": 0.1}
]
},
{
"algorithm": "radam",
"config": [
{},
{"learningRate": 1e-4},
{"learningRate": 1e-4, "beta1": 0.92},
{"learningRate": 1e-4, "beta1": 0.92, "beta2": 0.96},
{"learningRate": 1e-4, "beta1": 0.92, "beta2": 0.96, "epsilon": 1e-3},
{"learningRate": 1e-4, "weightDecay": 0.1}
]
},
{
"algorithm": "adamw",
"config": [
{},
{"learningRate": 1e-4},
{"learningRate": 1e-4, "beta1": 0.92},
{"learningRate": 1e-4, "beta1": 0.92, "beta2": 0.96},
{"learningRate": 1e-4, "beta1": 0.92, "beta2": 0.96, "epsilon": 1e-3},
{"learningRate": 1e-4, "weightDecay": 0.1}
]
},
{
"algorithm": "nadam",
"config": [
{},
{"learningRate": 1e-4},
{"learningRate": 1e-4, "beta1": 0.92},
{"learningRate": 1e-4, "beta1": 0.92, "beta2": 0.96},
{"learningRate": 1e-4, "beta1": 0.92, "beta2": 0.96, "epsilon": 1e-3},
{"learningRate": 1e-4, "weightDecay": 0.1}
]
},
{
"algorithm": "adamax",
"config": [
{},
{"learningRate": 1e-4},
{"learningRate": 1e-4, "beta1": 0.92},
{"learningRate": 1e-4, "beta1": 0.92, "beta2": 0.96},
{"learningRate": 1e-4, "beta1": 0.92, "beta2": 0.96, "epsilon": 1e-3}
]
},
{
"algorithm": "asgd",
"config": [
{},
{"eta0": 1e-4},
{"eta0": 1e-4, "lambda": 1e-2},
{"eta0": 1e-4, "lambda": 1e-2, "alpha": 0.9},
{"eta0": 1e-4, "lambda": 1e-2, "alpha": 0.9, "t0": 10}
]
},
{
"algorithm": "cg",
"config": [
{},
{"rho": 0.02},
{"sig": 0.06},
{"int": 0.12},
{"ext": 3.2},
{"maxIter": 5},
{"ratio": 95}
]
},
{
"algorithm": "nag",
"config": [
{},
{"learningRate": 1e-4},
{"learningRate": 1e-4, "learningRateDecay": 0.1},
{"learningRate": 1e-4, "weightDecay": 0.3},
{"learningRate": 1e-4, "momentum": 0.95},
{"learningRate": 1e-4, "momentum": 0.95, "dampening": 0.8}
]
},
{
"algorithm": "rmsprop",
"config": [
{},
{"learningRate": 1e-4},
{"learningRate": 1e-4, "alpha": 0.95},
{"learningRate": 1e-4, "alpha": 0.95, "epsilon": 1e-3},
{"weightDecay": 0.2}
]
},
{
"algorithm": "rprop",
"config": [
{},
{"stepsize": 0.05},
{"stepsize": 0.05, "etaplus": 1.15},
{"stepsize": 0.05, "etaplus": 1.15, "etaminus": 0.6},
{"stepsize": 0.05, "etaplus": 1.15, "etaminus": 0.6, "stepsizemax": 1, "stepsizemin": 1e-3},
{"stepsize": 0.05, "etaplus": 1.15, "etaminus": 0.6, "niter": 10}
]
},
{
"algorithm": "sgd",
"config": [
{},
{"learningRate": 1e-4},
{"learningRate": 1e-4, "momentum": 0.95, "dampening": 0.9},
{"learningRate": 1e-4, "nesterov": true, "momentum": 0.95, "dampening": 0},
{"weightDecay": 0.2}
]
},
{
"algorithm": "lbfgs",
"config": [
{},
{"learningRate": 1e-1}
]
}
]