mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
* Track checkpoint performance in scuba As title. * [C2/CUDA]: fix cross entropy sigmoid with logits when adding log_d_trick, I forgot to add it to the cuda impl; this diff fixes it. * Back out "[caffe2] Unregister MKL fallbacks for NCHW conversions" Original commit changeset: 8918dd40205a Will land after @jongsoo's diff https://phabricator.intern.facebook.com/D7596315 lands * [Easy][C2] Don't add blob to external outputs from output_record if it's already external output As desc. * On Mobile phones, call GlobalInit with no arguments in predictor in case we need to perform initialization FACEBOOK: The QPL logger needs the initialization code. In the past, the initialization code is put in the pipeline calling Caffe2. However, those places become obsolete quickly, as the product teams change places to call Caffe2 from time to time. We also need to track which teams use Caffe2 so that we can put the initialization code there. With this diff, the initialization code is put in the predictor constructor, only enabled for mobile phones. This way, we can always enable QPL logging. Once we do this, we can check how many times Caffe2 inference is called in production, and which models are more popular in production. This way, we can prioritize our effort supporting those models. Will clean up the old code calling the init in the product in a separate diff. * add padding op for sparse length tensor to pad length-based sparse tensor with padding_value * Add conv_op with cudaconvnet engine Add conv_op with cudaconvnet engine * [numa] Fix simple NUMA copy benchmark Move XavierFill into init_net and also compute BW * call roundf (device function) instead of round (host function) * [caffe2_benchmark][observer] Make caffe2_benchmark use its own observer 1. Add ClearGlobalNetObservers() 2. Make caffe2_benchmark use its own observer and observer_reporter * [detectron] Use roundf instead of round in the detectron module ops * allow K larger than number of elements in top k op one use case is to use this op together with PackSegments for sparse tensors, where the number of elements in each slice is not statistically defined. * add ChannelShuffle DNNLOWP op * fixup math_cpu.cc break
70 lines
2.3 KiB
Python
70 lines
2.3 KiB
Python
from __future__ import absolute_import
|
|
from __future__ import division
|
|
from __future__ import print_function
|
|
|
|
from caffe2.python import core, workspace
|
|
from caffe2.proto import caffe2_pb2
|
|
import time
|
|
|
|
SHAPE_LEN = 4096
|
|
NUM_ITER = 1000
|
|
GB = 1024 * 1024 * 1024
|
|
NUM_REPLICAS = 48
|
|
|
|
|
|
def build_net(net_name, cross_socket):
|
|
init_net = core.Net(net_name + "_init")
|
|
init_net.Proto().type = "async_scheduling"
|
|
numa_device_option = caffe2_pb2.DeviceOption()
|
|
numa_device_option.device_type = caffe2_pb2.CPU
|
|
numa_device_option.numa_node_id = 0
|
|
for replica_id in range(NUM_REPLICAS):
|
|
init_net.XavierFill([], net_name + "/input_blob_" + str(replica_id),
|
|
shape=[SHAPE_LEN, SHAPE_LEN], device_option=numa_device_option)
|
|
|
|
net = core.Net(net_name)
|
|
net.Proto().type = "async_scheduling"
|
|
if cross_socket:
|
|
numa_device_option.numa_node_id = 1
|
|
for replica_id in range(NUM_REPLICAS):
|
|
net.Copy(net_name + "/input_blob_" + str(replica_id),
|
|
net_name + "/output_blob_" + str(replica_id),
|
|
device_option=numa_device_option)
|
|
return init_net, net
|
|
|
|
|
|
def main():
|
|
assert workspace.IsNUMAEnabled() and workspace.GetNumNUMANodes() >= 2
|
|
|
|
single_init, single_net = build_net("single_net", False)
|
|
cross_init, cross_net = build_net("cross_net", True)
|
|
|
|
workspace.CreateNet(single_init)
|
|
workspace.RunNet(single_init.Name())
|
|
workspace.CreateNet(cross_init)
|
|
workspace.RunNet(cross_init.Name())
|
|
|
|
workspace.CreateNet(single_net)
|
|
workspace.CreateNet(cross_net)
|
|
|
|
for _ in range(4):
|
|
t = time.time()
|
|
workspace.RunNet(single_net.Name(), NUM_ITER)
|
|
dt = time.time() - t
|
|
print("Single socket time:", dt)
|
|
single_bw = 4 * SHAPE_LEN * SHAPE_LEN * NUM_REPLICAS * NUM_ITER / dt / GB
|
|
print("Single socket BW: {} GB/s".format(single_bw))
|
|
|
|
t = time.time()
|
|
workspace.RunNet(cross_net.Name(), NUM_ITER)
|
|
dt = time.time() - t
|
|
print("Cross socket time:", dt)
|
|
cross_bw = 4 * SHAPE_LEN * SHAPE_LEN * NUM_REPLICAS * NUM_ITER / dt / GB
|
|
print("Cross socket BW: {} GB/s".format(cross_bw))
|
|
print("Single BW / Cross BW: {}".format(single_bw / cross_bw))
|
|
|
|
|
|
if __name__ == '__main__':
|
|
core.GlobalInit(["caffe2", "--caffe2_cpu_numa_enabled=1"])
|
|
main()
|