mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-06 00:20:18 +01:00
[BE] Get rid of future (#92596)
PyTorch has been Python-3.X+ for ages, so it's a shame to still rely on `future.utils` even in a deprecated Caffe2 codebase For the reference: https://peps.python.org/pep-0469/#migrating-directly-to-python-3 Pull Request resolved: https://github.com/pytorch/pytorch/pull/92596 Approved by: https://github.com/kit1980, https://github.com/orionr
This commit is contained in:
parent
1bc60c6b31
commit
1906eaf22f
|
|
@ -75,7 +75,7 @@ if [ -n "$ANACONDA_PYTHON_VERSION" ]; then
|
|||
}
|
||||
|
||||
# Install PyTorch conda deps, as per https://github.com/pytorch/pytorch README
|
||||
CONDA_COMMON_DEPS="astunparse pyyaml mkl=2022.0.1 mkl-include=2022.0.1 setuptools cffi future six"
|
||||
CONDA_COMMON_DEPS="astunparse pyyaml mkl=2022.0.1 mkl-include=2022.0.1 setuptools cffi six"
|
||||
if [ "$ANACONDA_PYTHON_VERSION" = "3.10" ]; then
|
||||
# Install llvm-8 as it is required to compile llvmlite-0.30.0 from source
|
||||
conda_install numpy=1.21.2 ${CONDA_COMMON_DEPS} llvmdev=8.0.0
|
||||
|
|
|
|||
|
|
@ -184,7 +184,7 @@ Other potentially useful environment variables may be found in `setup.py`.
|
|||
**Common**
|
||||
|
||||
```bash
|
||||
conda install astunparse numpy ninja pyyaml setuptools cmake cffi typing_extensions future six requests dataclasses
|
||||
conda install astunparse numpy ninja pyyaml setuptools cmake cffi typing_extensions six requests dataclasses
|
||||
```
|
||||
|
||||
**On Linux**
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ Implement functions for controlling execution of nets and steps, including
|
|||
|
||||
|
||||
from caffe2.python import core
|
||||
from future.utils import viewitems
|
||||
|
||||
|
||||
# Used to generate names of the steps created by the control functions.
|
||||
|
|
@ -201,7 +200,7 @@ def MergeConditionNets(name, condition_nets, relation):
|
|||
else:
|
||||
last_cond = merged_net.__getattr__(relation)([last_cond, curr_cond])
|
||||
# merge attributes
|
||||
for k, v in viewitems(condition_nets[i]._attr_dict):
|
||||
for k, v in condition_nets[i]._attr_dict.items():
|
||||
merged_net._attr_dict[k] += v
|
||||
|
||||
merged_net.AddExternalOutput(last_cond)
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@
|
|||
|
||||
from collections import namedtuple, OrderedDict, defaultdict
|
||||
from past.builtins import basestring
|
||||
from future.utils import viewitems, viewkeys, viewvalues
|
||||
from itertools import chain
|
||||
from six import binary_type, string_types, text_type
|
||||
|
||||
|
|
@ -311,7 +310,7 @@ class BlobReference(object):
|
|||
if '_ENGINE_' not in op or '_ENGINE_CUDNN' in op]
|
||||
return sorted(set(chain(
|
||||
dir(type(self)),
|
||||
viewkeys(self.__dict__),
|
||||
self.__dict__.keys(),
|
||||
additional_methods
|
||||
)))
|
||||
|
||||
|
|
@ -414,7 +413,7 @@ def CreateOperator(
|
|||
if arg is not None:
|
||||
operator.arg.extend(arg)
|
||||
# Add all other arguments
|
||||
for key, value in viewitems(kwargs):
|
||||
for key, value in kwargs.items():
|
||||
if value is not None:
|
||||
operator.arg.add().CopyFrom(utils.MakeArgument(key, value))
|
||||
|
||||
|
|
@ -627,8 +626,8 @@ StopGradient. Op:\n\n{}""".format(op.output[0], str(op)))
|
|||
|
||||
def AppendSparseGenerators(self, sparse_generators):
|
||||
# merge indices and values generators for sparse gradients
|
||||
for name, input_generators in viewitems(sparse_generators):
|
||||
for version, generators in viewitems(input_generators):
|
||||
for name, input_generators in sparse_generators.items():
|
||||
for version, generators in input_generators.items():
|
||||
if len(generators) == 1:
|
||||
# either indices or values are generated (but not both)
|
||||
generator = generators[0]
|
||||
|
|
@ -995,7 +994,7 @@ StopGradient. Op:\n\n{}""".format(op.output[0], str(op)))
|
|||
input_to_grad = {}
|
||||
gradient_ops = []
|
||||
|
||||
for y, g in viewitems(ys):
|
||||
for y, g in ys.items():
|
||||
autograd_op = None
|
||||
if g is None:
|
||||
autograd_op = CreateOperator(
|
||||
|
|
@ -1062,7 +1061,7 @@ StopGradient. Op:\n\n{}""".format(op.output[0], str(op)))
|
|||
|
||||
# Set the gradient frontier with the initialized external
|
||||
# gradients.
|
||||
for y in viewkeys(ys):
|
||||
for y in ys.keys():
|
||||
self.gradient_frontier[y] = self.frontier[y]
|
||||
self.input_usages[str(y)][self.frontier[str(y)]].append(
|
||||
len(self.ssa))
|
||||
|
|
@ -1094,7 +1093,7 @@ StopGradient. Op:\n\n{}""".format(op.output[0], str(op)))
|
|||
# operators ready. For the output map, we will convert everything to
|
||||
# BlobReferences for easier handling in python.
|
||||
all_input_to_grad_out = {}
|
||||
for key, val in viewitems(all_input_to_grad):
|
||||
for key, val in all_input_to_grad.items():
|
||||
if val is not None:
|
||||
if (isinstance(val, string_types) or
|
||||
isinstance(val, binary_type)):
|
||||
|
|
@ -1412,7 +1411,7 @@ def clone_and_bind_net(net, name, prefix, blob_remap=None, inputs=None,
|
|||
ssa, blob_versions = get_ssa(proto)
|
||||
undef_blobs = get_undefined_blobs(ssa)
|
||||
|
||||
for blob in viewkeys(blob_versions):
|
||||
for blob in blob_versions.keys():
|
||||
if blob in blob_remap:
|
||||
continue
|
||||
elif blob in undef_blobs:
|
||||
|
|
@ -1824,7 +1823,7 @@ class Net(object):
|
|||
OrderedDict(zip(inputs, inputs)))
|
||||
for output in outputs:
|
||||
assert self.BlobIsDefined(output), "{} is not defined".format(output)
|
||||
input_names = {str(k): str(v) for k, v in viewitems(inputs)}
|
||||
input_names = {str(k): str(v) for k, v in inputs.items()}
|
||||
output_names = [str(o) for o in outputs]
|
||||
proto = self._net
|
||||
blob_versions = {str(i): 0 for i in inputs}
|
||||
|
|
@ -1836,7 +1835,7 @@ class Net(object):
|
|||
'generate the given input.')
|
||||
|
||||
sub_ssa = [op for i, op in enumerate(ssa) if i in used_op_ids]
|
||||
undef_blobs = get_undefined_blobs(sub_ssa) - set(viewkeys(input_names))
|
||||
undef_blobs = get_undefined_blobs(sub_ssa) - set(input_names.keys())
|
||||
prefix = (name + '/') if name else ''
|
||||
|
||||
def remap(blob_name):
|
||||
|
|
@ -1847,10 +1846,10 @@ class Net(object):
|
|||
else:
|
||||
return prefix + blob_name
|
||||
|
||||
blob_mapping = {b: remap(b) for b in viewkeys(blob_versions)}
|
||||
blob_mapping = {b: remap(b) for b in blob_versions.keys()}
|
||||
new_net = self.Clone(name, blob_mapping, used_op_ids, remap_funcs)
|
||||
new_in = [
|
||||
blob_mapping[i] for i in viewkeys(input_names)] + list(undef_blobs)
|
||||
blob_mapping[i] for i in input_names.keys()] + list(undef_blobs)
|
||||
new_out = [blob_mapping[o] for o in output_names]
|
||||
del new_net.Proto().external_input[:]
|
||||
new_net.Proto().external_input.extend(new_in)
|
||||
|
|
@ -2285,7 +2284,7 @@ class Net(object):
|
|||
if '_ENGINE_' not in op]
|
||||
return sorted(set(chain(
|
||||
dir(type(self)),
|
||||
viewkeys(self.__dict__),
|
||||
self.__dict__.keys(),
|
||||
additional_methods
|
||||
)))
|
||||
|
||||
|
|
@ -2366,7 +2365,7 @@ class Net(object):
|
|||
grad_output_indices=grad_output_indices,
|
||||
grad_input_indices=grad_input_indices,
|
||||
*args,
|
||||
**dict(chain(viewitems(kwargs), viewitems(core_kwargs)))
|
||||
**dict(chain(kwargs.items(), core_kwargs.items()))
|
||||
)
|
||||
|
||||
def is_external_input(self, blob):
|
||||
|
|
@ -2725,7 +2724,7 @@ class ExecutionStep(object):
|
|||
len(self._step.substep) > 0)
|
||||
|
||||
def Nets(self):
|
||||
return list(viewvalues(self._net_dict))
|
||||
return list(self._net_dict.values())
|
||||
|
||||
def Substeps(self):
|
||||
return self._substeps
|
||||
|
|
@ -2805,7 +2804,7 @@ class ExecutionStep(object):
|
|||
"""
|
||||
return [
|
||||
attr
|
||||
for net in viewvalues(self._net_dict)
|
||||
for net in self._net_dict.values()
|
||||
for attr in net.get_attributes(name)
|
||||
]
|
||||
|
||||
|
|
@ -2902,7 +2901,7 @@ class Plan(object):
|
|||
self._plan.network.add().CopyFrom(net.Proto())
|
||||
|
||||
def Nets(self):
|
||||
return list(viewvalues(self._net_dict))
|
||||
return list(self._net_dict.values())
|
||||
|
||||
def AddStep(self, step):
|
||||
assert isinstance(step, ExecutionStep)
|
||||
|
|
@ -2926,7 +2925,7 @@ class Plan(object):
|
|||
"""
|
||||
return [
|
||||
attr
|
||||
for net in viewvalues(self._net_dict)
|
||||
for net in self._net_dict.values()
|
||||
for attr in net.get_attributes(name)
|
||||
]
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@
|
|||
|
||||
|
||||
from collections import OrderedDict
|
||||
from future.utils import viewitems, viewkeys, viewvalues
|
||||
import logging
|
||||
import copy
|
||||
|
||||
|
|
@ -258,9 +257,9 @@ def Parallelize(
|
|||
model_helper_obj._device_grouped_blobs.update(computed_params_grouped)
|
||||
|
||||
model_helper_obj._param_names =\
|
||||
list(viewkeys(model_helper_obj._device_grouped_blobs))
|
||||
list(model_helper_obj._device_grouped_blobs.keys())
|
||||
model_helper_obj._computed_param_names =\
|
||||
list(viewkeys(computed_params_grouped))
|
||||
list(computed_params_grouped.keys())
|
||||
|
||||
if pre_grad_net_transformer_fun:
|
||||
pre_grad_net_transformer_fun(model_helper_obj)
|
||||
|
|
@ -305,7 +304,7 @@ def Parallelize(
|
|||
non_datapar_grads
|
||||
)
|
||||
model_helper_obj._device_grouped_blobs.update(gradients_grouped)
|
||||
model_helper_obj._grad_names = list(viewkeys(gradients_grouped))
|
||||
model_helper_obj._grad_names = list(gradients_grouped.keys())
|
||||
model_helper_obj._losses_by_gpu = losses_by_gpu
|
||||
|
||||
_InferBlobDevice(model_helper_obj)
|
||||
|
|
@ -554,7 +553,7 @@ def Parallelize_BMUF(
|
|||
model_helper_obj.params, non_datapar_params)
|
||||
|
||||
model_helper_obj._param_names =\
|
||||
list(viewkeys(model_helper_obj._device_grouped_blobs))
|
||||
list(model_helper_obj._device_grouped_blobs.keys())
|
||||
|
||||
_AddGradientOperators(
|
||||
devices, model_helper_obj, model_helper_obj._losses_by_gpu
|
||||
|
|
@ -574,7 +573,7 @@ def Parallelize_BMUF(
|
|||
)
|
||||
|
||||
model_parameter_names = list(
|
||||
viewkeys(model_helper_obj._device_grouped_blobs)
|
||||
model_helper_obj._device_grouped_blobs.keys()
|
||||
)
|
||||
if warmup_iterations is not None:
|
||||
model_helper_obj._warmup_iterations = warmup_iterations
|
||||
|
|
@ -594,13 +593,13 @@ def Parallelize_BMUF(
|
|||
model_parameter_names,
|
||||
max_concurrent_distributed_ops
|
||||
)
|
||||
for param_name in viewkeys(model_helper_obj._device_grouped_blobs):
|
||||
for param_name in model_helper_obj._device_grouped_blobs.keys():
|
||||
param = model_helper_obj._device_grouped_blobs[param_name][master_device]
|
||||
with core.DeviceScope(master_dev_opt):
|
||||
model_helper_obj._warmup_broadcast.Copy(param, _g(param))
|
||||
|
||||
# (Step-0) Initialize momentum parameters on master device.
|
||||
for param_name in viewkeys(model_helper_obj._device_grouped_blobs):
|
||||
for param_name in model_helper_obj._device_grouped_blobs.keys():
|
||||
param = model_helper_obj._device_grouped_blobs[param_name][master_device]
|
||||
with core.DeviceScope(master_dev_opt):
|
||||
model_helper_obj._global_model_init_net.ConstantFill(
|
||||
|
|
@ -1019,8 +1018,8 @@ def _Broadcast(devices, model, net, param, use_nccl=False):
|
|||
# _device_. Thus we always use root=0, regardless of the
|
||||
# devices used.
|
||||
net.NCCLBroadcast(
|
||||
list(viewvalues(model._device_grouped_blobs[param])),
|
||||
list(viewvalues(model._device_grouped_blobs[param])),
|
||||
list(model._device_grouped_blobs[param].values()),
|
||||
list(model._device_grouped_blobs[param].values()),
|
||||
root=0,
|
||||
)
|
||||
return
|
||||
|
|
@ -1039,7 +1038,7 @@ def _Broadcast(devices, model, net, param, use_nccl=False):
|
|||
|
||||
|
||||
def _AllReduce(devices, model, net, param, use_nccl=False, control_input=None):
|
||||
blobs_group = list(viewvalues(model._device_grouped_blobs[param]))
|
||||
blobs_group = list(model._device_grouped_blobs[param].values())
|
||||
if model._device_type == caffe2_pb2.CUDA and use_nccl:
|
||||
# TODO: for _shared_model, do only NCCLReduce
|
||||
model.NCCLAllreduce(
|
||||
|
|
@ -1195,7 +1194,7 @@ def _SyncAllParamsDistributed(
|
|||
|
||||
for param_name in sorted(unique_param_names):
|
||||
master_param = model._device_grouped_blobs[param_name][devices[0]]
|
||||
params_group = list(viewvalues(model._device_grouped_blobs[param_name]))
|
||||
params_group = list(model._device_grouped_blobs[param_name].values())
|
||||
|
||||
def broadcast(params):
|
||||
comm_world, control_input = context.get_control_and_context(params)
|
||||
|
|
@ -1372,7 +1371,7 @@ def _AllReduceBlobsDistributed(
|
|||
|
||||
for blob_name in blob_names:
|
||||
master_blob = model._device_grouped_blobs[blob_name][devices[0]]
|
||||
blobs_group = list(viewvalues(model._device_grouped_blobs[blob_name]))
|
||||
blobs_group = list(model._device_grouped_blobs[blob_name].values())
|
||||
|
||||
assert master_blob in blobs_group
|
||||
|
||||
|
|
@ -1439,7 +1438,7 @@ def _AllReduceBlobsSingleHost(blob_names, devices, model, net, use_nccl):
|
|||
|
||||
for blob_name in blob_names:
|
||||
# Group by blob_name for reduce.
|
||||
blobs_group = list(viewvalues(model._device_grouped_blobs[blob_name]))
|
||||
blobs_group = list(model._device_grouped_blobs[blob_name].values())
|
||||
if len(blobs_group) == 1:
|
||||
# Non-reducible
|
||||
continue
|
||||
|
|
@ -1478,7 +1477,7 @@ def _AllReduceBlobsSingleHost(blob_names, devices, model, net, use_nccl):
|
|||
axis=0,
|
||||
name="note:data_parallel_model")
|
||||
|
||||
for gpu, g in viewitems(model._device_grouped_blobs[blob_name]):
|
||||
for gpu, g in model._device_grouped_blobs[blob_name].items():
|
||||
device_opt = core.DeviceOption(model._device_type, gpu)
|
||||
with core.DeviceScope(device_opt):
|
||||
model.Copy(grad_idx_concat, g.indices)
|
||||
|
|
@ -1490,7 +1489,7 @@ def _AllReduceBlobsSingleHost(blob_names, devices, model, net, use_nccl):
|
|||
"{}/{}_val_splitinfo".format(master_ns, blob_name)],
|
||||
axis=0, name="note:data_parallel_model")
|
||||
|
||||
for gpu, g in viewitems(model._device_grouped_blobs[blob_name]):
|
||||
for gpu, g in model._device_grouped_blobs[blob_name].items():
|
||||
device_opt = core.DeviceOption(model._device_type, gpu)
|
||||
with core.DeviceScope(device_opt):
|
||||
model.Copy(grad_val_concat, g.values)
|
||||
|
|
@ -1740,7 +1739,7 @@ def _OptimizeGradientMemorySimple(model, losses_by_gpu, devices):
|
|||
model.net._net = memonger.share_grad_blobs(
|
||||
model.net,
|
||||
losses_by_gpu[device],
|
||||
set(viewvalues(model.param_to_grad)),
|
||||
set(model.param_to_grad.values()),
|
||||
namescope,
|
||||
share_activations=False,
|
||||
)
|
||||
|
|
@ -1760,7 +1759,7 @@ def _AddDynamicMemoryOptimization(model, blobs_to_keep, devices):
|
|||
# iterations so we need to remove param grads from the dynamic memory
|
||||
# management.
|
||||
blobs_to_keep_all_devices.update(
|
||||
[str(b) for b in viewvalues(model.param_to_grad)]
|
||||
[str(b) for b in model.param_to_grad.values()]
|
||||
)
|
||||
|
||||
model.net._net = memonger.release_blobs_when_used(
|
||||
|
|
@ -1784,7 +1783,7 @@ def OptimizeGradientMemory(model,
|
|||
"""
|
||||
if input_shapes is not None:
|
||||
input_shapes_all_devices = {}
|
||||
for b, shp in viewitems(input_shapes):
|
||||
for b, shp in input_shapes.items():
|
||||
for d in model._devices:
|
||||
input_shapes_all_devices["{}_{}/{}".
|
||||
format(model._device_prefix, d, b)] = shp
|
||||
|
|
@ -1802,7 +1801,7 @@ def OptimizeGradientMemory(model,
|
|||
model.net._net = memonger.share_grad_blobs(
|
||||
model.net,
|
||||
model._losses_by_gpu[device],
|
||||
set(viewvalues(model.param_to_grad)),
|
||||
set(model.param_to_grad.values()),
|
||||
namescope,
|
||||
dont_share_blobs=excluded_blobs_by_device,
|
||||
share_activations=recycle_activations,
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@
|
|||
|
||||
|
||||
|
||||
from future.utils import viewkeys
|
||||
from multiprocessing import Process, Queue
|
||||
import numpy as np
|
||||
import os
|
||||
|
|
@ -1175,7 +1174,7 @@ class ParallelizeBMUFTest(TestCase):
|
|||
|
||||
# Check initial momentum params are zeros
|
||||
self.assertEqual(
|
||||
list(viewkeys(model._device_grouped_blobs)), ['fc_w', 'fc_b']
|
||||
list(model._device_grouped_blobs.keys()), ['fc_w', 'fc_b']
|
||||
)
|
||||
self.assertEqual(workspace.FetchBlob('{}_0/fc_b_v'.format(device_prefix)), 0)
|
||||
np.testing.assert_equal(
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ import numpy as np
|
|||
import copy
|
||||
from caffe2.python import workspace
|
||||
from caffe2.python.core import InferOpBlobDevicesAsDict
|
||||
from future.utils import viewitems
|
||||
|
||||
|
||||
class DeviceChecker(object):
|
||||
|
|
@ -88,7 +87,7 @@ class DeviceChecker(object):
|
|||
blobs_to_check = [b for b in blobs_to_check if b not in ignore]
|
||||
workspace.SwitchWorkspace("_device_check_", True)
|
||||
for device_option in self._device_options:
|
||||
for name, arr in viewitems(inputs):
|
||||
for name, arr in inputs.items():
|
||||
# print 'feeding', name
|
||||
workspace.FeedBlob(name, arr, device_option)
|
||||
for op in net.op:
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@ import argparse
|
|||
import os
|
||||
from caffe2.python import core, workspace
|
||||
from caffe2.python.docs.formatter import Markdown
|
||||
from future.utils import viewitems, viewvalues
|
||||
|
||||
OpSchema = workspace.C.OpSchema
|
||||
|
||||
|
|
@ -76,13 +75,13 @@ class OpDocGenerator(DocGenerator):
|
|||
priority = 4
|
||||
self.operators[name] = self.getOperatorDoc(name, schema, priority)
|
||||
|
||||
for name, engines in viewitems(self.engines):
|
||||
for name, engines in self.engines.items():
|
||||
if name in self.operators:
|
||||
self.operators[name].addEngines(engines)
|
||||
|
||||
# Generate a sorted list of operators
|
||||
return sorted(
|
||||
viewvalues(self.operators),
|
||||
self.operators.values(),
|
||||
key=lambda op: (op.priority, op.name)
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ import socket
|
|||
import abc
|
||||
|
||||
from collections import OrderedDict
|
||||
from future.utils import viewkeys, viewvalues
|
||||
|
||||
'''
|
||||
Utilities for logging experiment run stats, such as accuracy
|
||||
|
|
@ -96,15 +95,15 @@ class ModelTrainerLog():
|
|||
else:
|
||||
logdict['inputs_per_sec'] = 0.0
|
||||
|
||||
for k in sorted(viewkeys(additional_values)):
|
||||
for k in sorted(additional_values.keys()):
|
||||
logdict[k] = additional_values[k]
|
||||
|
||||
# Write the headers if they are not written yet
|
||||
if self.headers is None:
|
||||
self.headers = list(viewkeys(logdict))
|
||||
self.headers = list(logdict.keys())
|
||||
self.logstr(",".join(self.headers))
|
||||
|
||||
self.logstr(",".join(str(v) for v in viewvalues(logdict)))
|
||||
self.logstr(",".join(str(v) for v in logdict.values()))
|
||||
|
||||
for logger in self.external_loggers:
|
||||
try:
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ import numpy as np
|
|||
import copy
|
||||
import time
|
||||
from functools import partial, reduce
|
||||
from future.utils import viewitems, viewkeys
|
||||
from hypothesis import assume, given, settings, HealthCheck
|
||||
import hypothesis.strategies as st
|
||||
import unittest
|
||||
|
|
@ -130,7 +129,7 @@ class TestOperators(hu.HypothesisTestCase):
|
|||
"LE": lambda x1, x2: [x1 <= x2],
|
||||
"GT": lambda x1, x2: [x1 > x2],
|
||||
"GE": lambda x1, x2: [x1 >= x2]}
|
||||
for name, ref in viewitems(ops):
|
||||
for name, ref in ops.items():
|
||||
_test_binary(name, ref, gcs=hu.gcs_cpu_only)(self)
|
||||
_test_binary_broadcast(name, ref, gcs=hu.gcs_cpu_only)(self)
|
||||
|
||||
|
|
@ -2116,8 +2115,8 @@ class TestOperators(hu.HypothesisTestCase):
|
|||
|
||||
|
||||
@given(a=hu.tensor(),
|
||||
src=st.sampled_from(list(viewkeys(_NUMPY_TYPE_TO_ENUM))),
|
||||
dst=st.sampled_from(list(viewkeys(_NUMPY_TYPE_TO_ENUM))),
|
||||
src=st.sampled_from(list(_NUMPY_TYPE_TO_ENUM.keys())),
|
||||
dst=st.sampled_from(list(_NUMPY_TYPE_TO_ENUM.keys())),
|
||||
use_name=st.booleans(),
|
||||
**hu.gcs)
|
||||
@settings(deadline=1000)
|
||||
|
|
@ -2284,7 +2283,7 @@ class TestOperators(hu.HypothesisTestCase):
|
|||
backward_ops, backward_mapping = core.GradientRegistry.GetBackwardPass(
|
||||
step_net.Proto().op, {"hidden_t": "hidden_t_grad"})
|
||||
backward_mapping = {
|
||||
str(k): str(v) for k, v in viewitems(backward_mapping)
|
||||
str(k): str(v) for k, v in backward_mapping.items()
|
||||
}
|
||||
backward_step_net = core.Net("ElmanBackward")
|
||||
del backward_step_net.Proto().op[:]
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ from caffe2.python.modeling.net_modifier import NetModifier
|
|||
from caffe2.python.optimizer import get_param_device, Optimizer
|
||||
from caffe2.python.regularizer import Regularizer, RegularizationBy
|
||||
from caffe2.python.layers import layers
|
||||
from future.utils import viewitems, viewvalues
|
||||
|
||||
import logging
|
||||
import numpy as np
|
||||
|
|
@ -222,7 +221,7 @@ class LayerModelHelper(model_helper.ModelHelper):
|
|||
self.add_global_constant('ZERO_RANGE', [0, 0], dtype='int32')
|
||||
|
||||
def _add_global_constants(self, init_net):
|
||||
for initializer_op in viewvalues(self.global_constant_initializers):
|
||||
for initializer_op in self.global_constant_initializers.values():
|
||||
init_net._net.op.extend([initializer_op])
|
||||
|
||||
def create_init_net(self, name):
|
||||
|
|
@ -632,7 +631,7 @@ class LayerModelHelper(model_helper.ModelHelper):
|
|||
blob_to_device=None,
|
||||
):
|
||||
logger.info("apply regularizer on loss")
|
||||
for param, regularizer in viewitems(self.param_to_reg):
|
||||
for param, regularizer in self.param_to_reg.items():
|
||||
if regularizer is None:
|
||||
continue
|
||||
logger.info("add regularizer {0} for param {1} to loss".format(regularizer, param))
|
||||
|
|
@ -657,7 +656,7 @@ class LayerModelHelper(model_helper.ModelHelper):
|
|||
CPU = muji.OnCPU()
|
||||
# if given, blob_to_device is a map from blob to device_option
|
||||
blob_to_device = blob_to_device or {}
|
||||
for param, regularizer in viewitems(self.param_to_reg):
|
||||
for param, regularizer in self.param_to_reg.items():
|
||||
if regularizer is None:
|
||||
continue
|
||||
assert isinstance(regularizer, Regularizer)
|
||||
|
|
@ -713,7 +712,7 @@ class LayerModelHelper(model_helper.ModelHelper):
|
|||
CPU = muji.OnCPU()
|
||||
# if given, blob_to_device is a map from blob to device_option
|
||||
blob_to_device = blob_to_device or {}
|
||||
for param, optimizer in viewitems(self.param_to_optim):
|
||||
for param, optimizer in self.param_to_optim.items():
|
||||
assert optimizer is not None, \
|
||||
"default optimizer must have been set in add_layer"
|
||||
# note that not all params has gradient and thus we sent None if
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ from caffe2.python import schema
|
|||
from caffe2.python.layers.layers import (
|
||||
ModelLayer,
|
||||
)
|
||||
from future.utils import viewitems
|
||||
import numpy as np
|
||||
from collections import defaultdict
|
||||
|
||||
|
|
@ -76,7 +75,7 @@ class Concat(ModelLayer):
|
|||
format(input_record)
|
||||
|
||||
shapes = []
|
||||
for field_name, field_type in viewitems(input_record.fields):
|
||||
for field_name, field_type in input_record.fields.items():
|
||||
assert isinstance(field_type, schema.Scalar),\
|
||||
"Incorrect input type for {}. Expected Scalar, but got: {}".\
|
||||
format(field_name, field_type)
|
||||
|
|
|
|||
|
|
@ -13,7 +13,6 @@ from caffe2.python import workspace, core
|
|||
from caffe2.proto import caffe2_pb2
|
||||
import enum
|
||||
import logging
|
||||
from future.utils import viewitems, viewvalues
|
||||
import caffe2.python._import_c_extension as C
|
||||
|
||||
log = logging.getLogger("memonger")
|
||||
|
|
@ -439,7 +438,7 @@ def topological_sort_traversal_longest_path(g):
|
|||
gt = _add_single_target_ifneeded(g)
|
||||
source_nodes = _find_source_nodes(gt)
|
||||
lpaths = _get_longest_paths(gt, source_nodes)
|
||||
tree, root = _build_tree(list(viewvalues(lpaths)))
|
||||
tree, root = _build_tree(list(lpaths.values()))
|
||||
sorted_sources = _sort_tree_leaves(tree, root)
|
||||
assert(sorted(sorted_sources) == sorted(source_nodes))
|
||||
|
||||
|
|
@ -729,7 +728,7 @@ def compute_assignments(ranges, static_blobs, algo):
|
|||
# be consumed externally. Sort these to the end of the list as opposed
|
||||
# to the beginning so that they can be shared as well.
|
||||
ranges = sorted(
|
||||
viewitems(ranges),
|
||||
ranges.items(),
|
||||
key=lambda p: (p[1].used is None, p[1].used),
|
||||
)
|
||||
# Update None values
|
||||
|
|
@ -822,7 +821,7 @@ def apply_recurrent_blob_assignments(op, blob_assignments, canonical_name):
|
|||
step_arg.n.external_input[i] = canonical_name(einp)
|
||||
|
||||
# Store renamings
|
||||
for blob, renamed in viewitems(blob_assignments):
|
||||
for blob, renamed in blob_assignments.items():
|
||||
if blob in list(op.input) + list(op.output):
|
||||
a = caffe2_pb2.Argument()
|
||||
a.name = blob + ".rename"
|
||||
|
|
@ -983,7 +982,7 @@ def compute_statistics(assignments):
|
|||
blob_bytes = {
|
||||
blob: blob_nbytes(blob) for assignment in assignments
|
||||
for (blob, _) in assignment}
|
||||
baseline_nbytes = sum(viewvalues(blob_bytes))
|
||||
baseline_nbytes = sum(blob_bytes.values())
|
||||
optimized_nbytes = sum(
|
||||
max(blob_bytes[blob] for (blob, _) in assignment)
|
||||
for assignment in assignments)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ import numpy as np
|
|||
from caffe2.python import workspace, memonger, core, model_helper, brew
|
||||
from caffe2.proto import caffe2_pb2
|
||||
import caffe2.python.hypothesis_test_util as hu
|
||||
from future.utils import viewvalues
|
||||
import hypothesis.strategies as st
|
||||
from hypothesis import given, settings
|
||||
import unittest
|
||||
|
|
@ -168,7 +167,7 @@ class MemongerTest(hu.HypothesisTestCase):
|
|||
optim_proto = memonger.share_grad_blobs(
|
||||
m.net,
|
||||
["name_x/loss"],
|
||||
set(viewvalues(m.param_to_grad)),
|
||||
set(m.param_to_grad.values()),
|
||||
"name_x/",
|
||||
share_activations=False,
|
||||
)
|
||||
|
|
@ -178,7 +177,7 @@ class MemongerTest(hu.HypothesisTestCase):
|
|||
optim_proto_wacts = memonger.share_grad_blobs(
|
||||
m.net,
|
||||
["name_x/loss"],
|
||||
set(viewvalues(m.param_to_grad)),
|
||||
set(m.param_to_grad.values()),
|
||||
"name_x/",
|
||||
share_activations=True,
|
||||
dont_share_blobs=set([str(input_to_grad["name_x/fc1_w"])]),
|
||||
|
|
@ -244,7 +243,7 @@ class MemongerTest(hu.HypothesisTestCase):
|
|||
optim_proto = memonger.share_grad_blobs(
|
||||
m.net,
|
||||
["loss"],
|
||||
set(viewvalues(m.param_to_grad)),
|
||||
set(m.param_to_grad.values()),
|
||||
"",
|
||||
share_activations=True,
|
||||
dont_share_blobs=set(),
|
||||
|
|
@ -293,7 +292,7 @@ class MemongerTest(hu.HypothesisTestCase):
|
|||
optim_proto = memonger.share_grad_blobs(
|
||||
m.net,
|
||||
["name_x/loss1", "name_x/loss2"],
|
||||
set(viewvalues(m.param_to_grad)),
|
||||
set(m.param_to_grad.values()),
|
||||
"name_x", # "name_x//shared_gradinp_0_shared" if using "name_x/"
|
||||
share_activations=True,
|
||||
dont_share_blobs=set(['name_x/fc6', 'name_x/fc5',
|
||||
|
|
@ -572,7 +571,7 @@ class MemongerTest(hu.HypothesisTestCase):
|
|||
optim_proto = memonger.share_grad_blobs(
|
||||
model.net,
|
||||
["loss"],
|
||||
set(viewvalues(model.param_to_grad)),
|
||||
set(model.param_to_grad.values()),
|
||||
"",
|
||||
share_activations=True,
|
||||
dont_share_blobs=set(),
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@ from caffe2.python.optimizer_context import (
|
|||
)
|
||||
from caffe2.python.regularizer_context import RegularizerContext
|
||||
|
||||
from future.utils import viewitems, viewkeys
|
||||
from itertools import chain
|
||||
|
||||
import logging
|
||||
|
|
@ -360,7 +359,7 @@ class ModelHelper(object):
|
|||
param_to_grad = self.get_param_to_grad(params)
|
||||
|
||||
return [
|
||||
self.get_param_info(param) for param, grad in viewitems(param_to_grad)
|
||||
self.get_param_info(param) for param, grad in param_to_grad.items()
|
||||
if (
|
||||
not self.skip_sparse_optim or
|
||||
not isinstance(grad, core.GradientSlice)
|
||||
|
|
@ -445,7 +444,7 @@ class ModelHelper(object):
|
|||
def __dir__(self):
|
||||
return sorted(set(chain(
|
||||
dir(type(self)),
|
||||
viewkeys(self.__dict__),
|
||||
self.__dict__.keys(),
|
||||
_known_working_ops
|
||||
)))
|
||||
|
||||
|
|
|
|||
|
|
@ -8,7 +8,6 @@
|
|||
|
||||
|
||||
import collections
|
||||
from future.utils import viewitems
|
||||
|
||||
import caffe2.proto.caffe2_pb2 as caffe2_pb2
|
||||
from caffe2.python import attention, core, rnn_cell, brew
|
||||
|
|
@ -38,7 +37,7 @@ def gen_vocab(corpus, unk_threshold):
|
|||
tokens = sentence.strip().split()
|
||||
for token in tokens:
|
||||
freqs[token] += 1
|
||||
for token, freq in viewitems(freqs):
|
||||
for token, freq in freqs.items():
|
||||
if freq > unk_threshold:
|
||||
vocab[token]
|
||||
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@
|
|||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
import argparse
|
||||
from future.utils import viewitems
|
||||
import logging
|
||||
import numpy as np
|
||||
import sys
|
||||
|
|
@ -538,7 +537,7 @@ def run_seq2seq_beam_decoder(args, model_params, decoding_params):
|
|||
args.target_corpus,
|
||||
args.unk_threshold,
|
||||
)
|
||||
inversed_target_vocab = {v: k for (k, v) in viewitems(target_vocab)}
|
||||
inversed_target_vocab = {v: k for (k, v) in target_vocab.items()}
|
||||
logger.info('Target vocab size {}'.format(len(target_vocab)))
|
||||
|
||||
decoder = Seq2SeqModelCaffe2EnsembleDecoder(
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@ import json
|
|||
import logging
|
||||
from collections import defaultdict
|
||||
from caffe2.python import utils
|
||||
from future.utils import viewitems
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.setLevel(logging.INFO)
|
||||
|
|
@ -379,7 +378,7 @@ def main():
|
|||
caffe2_pb2.NetDef: lambda x: {x.name: x.op},
|
||||
}
|
||||
)
|
||||
for key, operators in viewitems(graphs):
|
||||
for key, operators in graphs.items():
|
||||
if args.minimal:
|
||||
graph = GetPydotGraphMinimal(
|
||||
operators,
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ from caffe2.python.task import Task, TaskGroup, WorkspaceType, TaskOutput
|
|||
from collections import defaultdict
|
||||
from contextlib import contextmanager
|
||||
from copy import copy
|
||||
from future.utils import viewkeys
|
||||
from itertools import chain
|
||||
from six import binary_type, text_type
|
||||
|
||||
|
|
@ -109,7 +108,7 @@ def analyze_step(analyzer, step):
|
|||
if proto.should_stop_blob:
|
||||
analyzer.need_blob(proto.should_stop_blob)
|
||||
if proto.concurrent_substeps:
|
||||
new_blobs = set(viewkeys(ws_in)) - set(viewkeys(analyzer.workspace))
|
||||
new_blobs = set(ws_in.keys()) - set(analyzer.workspace.keys())
|
||||
assert len(all_new_blobs & new_blobs) == 0, (
|
||||
'Error: Blobs created by multiple parallel steps: %s' % (
|
||||
', '.join(all_new_blobs & new_blobs)))
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@
|
|||
|
||||
|
||||
from caffe2.python import model_helper, workspace, core, rnn_cell
|
||||
from future.utils import viewitems
|
||||
import numpy as np
|
||||
|
||||
import unittest
|
||||
|
|
@ -116,9 +115,9 @@ class TestLSTMs(unittest.TestCase):
|
|||
cudnn_lstm_params = {
|
||||
input_type: {
|
||||
k: workspace.FetchBlob(v[0])
|
||||
for k, v in viewitems(pars)
|
||||
for k, v in pars.items()
|
||||
}
|
||||
for input_type, pars in viewitems(param_extract_mapping)
|
||||
for input_type, pars in param_extract_mapping.items()
|
||||
}
|
||||
|
||||
# Run the model 3 times, so that some parameter updates are done
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@ import tempfile
|
|||
import unittest
|
||||
import numpy as np
|
||||
from caffe2.python import cnn, workspace, core
|
||||
from future.utils import viewitems
|
||||
|
||||
from caffe2.python.predictor_constants import predictor_constants as pc
|
||||
import caffe2.python.predictor.predictor_exporter as pe
|
||||
|
|
@ -113,7 +112,7 @@ class PredictorExporterTest(unittest.TestCase):
|
|||
)
|
||||
|
||||
def test_meta_net_def_net_runs(self):
|
||||
for param, value in viewitems(self.params):
|
||||
for param, value in self.params.items():
|
||||
workspace.FeedBlob(param, value)
|
||||
|
||||
extra_init_net = core.Net('extra_init')
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@
|
|||
|
||||
|
||||
from caffe2.python import core, workspace
|
||||
from future.utils import viewitems, viewkeys
|
||||
|
||||
def recurrent_net(
|
||||
net, cell_net, inputs, initial_cell_inputs,
|
||||
|
|
@ -76,7 +75,7 @@ def recurrent_net(
|
|||
if not forward_only:
|
||||
backward_ops, backward_mapping = core.GradientRegistry.GetBackwardPass(
|
||||
cell_net.Proto().op, inner_outputs_map)
|
||||
backward_mapping = {str(k): v for k, v in viewitems(backward_mapping)}
|
||||
backward_mapping = {str(k): v for k, v in backward_mapping.items()}
|
||||
|
||||
backward_cell_net = core.Net("RecurrentBackwardStep")
|
||||
del backward_cell_net.Proto().op[:]
|
||||
|
|
@ -106,7 +105,7 @@ def recurrent_net(
|
|||
ssa, blob_versions = core.get_ssa(cell_net.Proto())
|
||||
scratches = [
|
||||
blob
|
||||
for blob, ver in viewitems(blob_versions)
|
||||
for blob, ver in blob_versions.items()
|
||||
if (ver > 0 and
|
||||
blob in undefined and
|
||||
blob not in cell_net.Proto().external_output)
|
||||
|
|
@ -233,7 +232,7 @@ def recurrent_net(
|
|||
|
||||
backward_args = {}
|
||||
if backward_cell_net is not None:
|
||||
backward_mapping_keys = set(viewkeys(backward_mapping))
|
||||
backward_mapping_keys = set(backward_mapping.keys())
|
||||
backward_link_internal, backward_link_external, backward_link_offset = \
|
||||
unpack_triple(backward_links)
|
||||
params = [x for x in references if x in backward_mapping_keys]
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ import inspect
|
|||
import logging
|
||||
import numpy as np
|
||||
import random
|
||||
from future.utils import viewkeys
|
||||
|
||||
from caffe2.proto import caffe2_pb2
|
||||
from caffe2.python.attention import (
|
||||
|
|
@ -1677,7 +1676,7 @@ def InitFromLSTMParams(lstm_pblobs, param_values):
|
|||
'''
|
||||
weight_params = GetLSTMParamNames()['weights']
|
||||
bias_params = GetLSTMParamNames()['biases']
|
||||
for input_type in viewkeys(param_values):
|
||||
for input_type in param_values.keys():
|
||||
weight_values = [
|
||||
param_values[input_type][w].flatten()
|
||||
for w in weight_params
|
||||
|
|
|
|||
|
|
@ -25,7 +25,6 @@ from caffe2.python import workspace
|
|||
from caffe2.python.core import BlobReference
|
||||
from collections import OrderedDict, namedtuple
|
||||
from past.builtins import basestring
|
||||
from future.utils import viewitems, viewkeys, viewvalues
|
||||
from itertools import islice
|
||||
from six import StringIO
|
||||
from typing import Sequence
|
||||
|
|
@ -417,9 +416,9 @@ class Struct(Field):
|
|||
):
|
||||
raise ValueError('Duplicate field name: %s' % name)
|
||||
self.fields[name] = self.fields[name] + field
|
||||
for id, (_, field) in enumerate(viewitems(self.fields)):
|
||||
for id, (_, field) in enumerate(self.fields.items()):
|
||||
field._set_parent(self, id)
|
||||
super(Struct, self).__init__(viewvalues(self.fields))
|
||||
super(Struct, self).__init__(self.fields.values())
|
||||
self._frozen = True
|
||||
|
||||
def _struct_from_nested_name(self, nested_name, field):
|
||||
|
|
@ -436,45 +435,45 @@ class Struct(Field):
|
|||
return names[0], create_internal(names[1], field)
|
||||
|
||||
def get_children(self):
|
||||
return list(viewitems(self.fields))
|
||||
return list(self.fields.items())
|
||||
|
||||
def field_names(self):
|
||||
names = []
|
||||
for name, field in viewitems(self.fields):
|
||||
for name, field in self.fields.items():
|
||||
names += [_join_field_name(name, f) for f in field.field_names()]
|
||||
return names
|
||||
|
||||
def field_types(self):
|
||||
types = []
|
||||
for _, field in viewitems(self.fields):
|
||||
for field in self.fields.values():
|
||||
types += field.field_types()
|
||||
return types
|
||||
|
||||
def field_metadata(self):
|
||||
metadata = []
|
||||
for _, field in viewitems(self.fields):
|
||||
for field in self.fields.values():
|
||||
metadata += field.field_metadata()
|
||||
return metadata
|
||||
|
||||
def field_blobs(self):
|
||||
blobs = []
|
||||
for _, field in viewitems(self.fields):
|
||||
for field in self.fields.values():
|
||||
blobs += field.field_blobs()
|
||||
return blobs
|
||||
|
||||
def all_scalars(self):
|
||||
scalars = []
|
||||
for _, field in viewitems(self.fields):
|
||||
for field in self.fields.values():
|
||||
scalars += field.all_scalars()
|
||||
return scalars
|
||||
|
||||
def has_blobs(self):
|
||||
return all(field.has_blobs() for field in viewvalues(self.fields))
|
||||
return all(field.has_blobs() for field in self.fields.values())
|
||||
|
||||
def clone(self, keep_blobs=True):
|
||||
normalized_fields = [
|
||||
(k, _normalize_field(v, keep_blobs=keep_blobs))
|
||||
for k, v in viewitems(self.fields)
|
||||
for k, v in self.fields.items()
|
||||
]
|
||||
return type(self)(*normalized_fields)
|
||||
|
||||
|
|
@ -495,7 +494,7 @@ class Struct(Field):
|
|||
|
||||
def _pprint_impl(self, indent, str_buffer):
|
||||
str_buffer.write(' ' * indent + "Struct( \n")
|
||||
for name, field in viewitems(self.fields):
|
||||
for name, field in self.fields.items():
|
||||
str_buffer.write(' ' * (indent + 1) + "{}=".format(name) + "\n")
|
||||
field._pprint_impl(indent=indent + 2, str_buffer=str_buffer)
|
||||
str_buffer.write(' ' * indent + ") \n")
|
||||
|
|
@ -515,7 +514,7 @@ class Struct(Field):
|
|||
Struct.
|
||||
"""
|
||||
if isinstance(item, list) or isinstance(item, tuple):
|
||||
keys = list(viewkeys(self.fields))
|
||||
keys = list(self.fields.keys())
|
||||
return Struct(
|
||||
* [
|
||||
(
|
||||
|
|
@ -525,7 +524,7 @@ class Struct(Field):
|
|||
]
|
||||
)
|
||||
elif isinstance(item, int):
|
||||
return next(islice(viewvalues(self.fields), item, None))
|
||||
return next(islice(self.fields.values(), item, None))
|
||||
else:
|
||||
field = self._get_field_by_nested_name(item)
|
||||
if field is None:
|
||||
|
|
@ -603,7 +602,7 @@ class Struct(Field):
|
|||
", must both the Struct to allow merging of the field, " + name)
|
||||
children[name] = left_field + right_field
|
||||
|
||||
return Struct(*(viewitems(children)))
|
||||
return Struct(*(children.items()))
|
||||
|
||||
def __sub__(self, other):
|
||||
"""
|
||||
|
|
@ -1137,7 +1136,7 @@ def as_record(value):
|
|||
else:
|
||||
return Tuple(* [as_record(f) for f in value])
|
||||
elif isinstance(value, dict):
|
||||
return Struct(* [(k, as_record(v)) for k, v in viewitems(value)])
|
||||
return Struct(* [(k, as_record(v)) for k, v in value.items()])
|
||||
else:
|
||||
return _normalize_field(value)
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ from caffe2.python import core, context
|
|||
from caffe2.python.schema import Field, from_blob_list
|
||||
from collections import defaultdict
|
||||
from copy import copy
|
||||
from future.utils import viewitems
|
||||
|
||||
|
||||
def _merge_node_kwargs(a, b):
|
||||
|
|
@ -276,7 +275,7 @@ class TaskGroup(context.Managed):
|
|||
return tasks_by_node
|
||||
|
||||
# now we have report_steps. report_net is deprecated
|
||||
for node, (net, interval) in viewitems(self._report_nets):
|
||||
for node, (net, interval) in self._report_nets.items():
|
||||
self.report_step(net, node=node, interval_ms=interval * 1000)
|
||||
self._report_nets = {}
|
||||
|
||||
|
|
@ -290,7 +289,7 @@ class TaskGroup(context.Managed):
|
|||
report_steps_by_node[node_map[original_node]].append(step)
|
||||
|
||||
grouped_by_node = TaskGroup()
|
||||
for node, tasks in viewitems(tasks_by_node):
|
||||
for node, tasks in tasks_by_node.items():
|
||||
report_steps = report_steps_by_node[node]
|
||||
node_inits, node_exits = get_setup_nets(
|
||||
TaskGroup.LOCAL_SETUP,
|
||||
|
|
|
|||
|
|
@ -11,7 +11,6 @@ import os
|
|||
import time
|
||||
import signal
|
||||
import logging
|
||||
from future.utils import viewitems
|
||||
|
||||
|
||||
'''
|
||||
|
|
@ -59,7 +58,7 @@ class WatcherThread(threading.Thread):
|
|||
import sys
|
||||
import traceback
|
||||
code = []
|
||||
for threadId, stack in viewitems(sys._current_frames()):
|
||||
for threadId, stack in sys._current_frames().items():
|
||||
if threadId == self.caller_thread.ident:
|
||||
code.append("\n# ThreadID: %s" % threadId)
|
||||
for filename, lineno, name, line in traceback.extract_stack(stack):
|
||||
|
|
@ -81,7 +80,7 @@ class WatcherThread(threading.Thread):
|
|||
import sys
|
||||
import traceback
|
||||
code = []
|
||||
for threadId, stack in viewitems(sys._current_frames()):
|
||||
for threadId, stack in sys._current_frames().items():
|
||||
code.append("\n# ThreadID: %s" % threadId)
|
||||
for filename, lineno, name, line in traceback.extract_stack(stack):
|
||||
code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
|
||||
|
|
|
|||
|
|
@ -6,7 +6,6 @@
|
|||
|
||||
|
||||
from caffe2.proto import caffe2_pb2
|
||||
from future.utils import viewitems
|
||||
from google.protobuf.message import DecodeError, Message
|
||||
from google.protobuf import text_format
|
||||
|
||||
|
|
@ -221,13 +220,13 @@ def TryReadProtoWithClass(cls, s):
|
|||
def GetContentFromProto(obj, function_map):
|
||||
"""Gets a specific field from a protocol buffer that matches the given class
|
||||
"""
|
||||
for cls, func in viewitems(function_map):
|
||||
for cls, func in function_map.items():
|
||||
if type(obj) is cls:
|
||||
return func(obj)
|
||||
|
||||
|
||||
def GetContentFromProtoString(s, function_map):
|
||||
for cls, func in viewitems(function_map):
|
||||
for cls, func in function_map.items():
|
||||
try:
|
||||
obj = TryReadProtoWithClass(cls, s)
|
||||
return func(obj)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
# Python dependencies required for development
|
||||
astunparse
|
||||
expecttest
|
||||
future
|
||||
hypothesis
|
||||
numpy
|
||||
psutil
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user