mirror of
https://github.com/zebrajr/pytorch.git
synced 2025-12-07 12:21:27 +01:00
* Also pass torch includes to nvcc build * Export ATen/cuda headers with install * Refactor flags common to C++ and CUDA * Improve tests for C++/CUDA extensions * Export .cuh files under THC * Refactor and clean cpp_extension.py slightly * Include ATen in cuda extension test * Clarifying comment in cuda_extension.cu * Replace cuda_extension.cu with cuda_extension_kernel.cu in setup.py * Copy compile args in C++ extension and add second kernel * Conditionally add -std=c++11 to cuda_flags * Also export cuDNN headers * Add comment about deepcopy
24 lines
710 B
Plaintext
24 lines
710 B
Plaintext
#include <cuda.h>
|
|
#include <cuda_runtime.h>
|
|
|
|
#include <ATen/ATen.h>
|
|
|
|
__global__ void tanh_add_kernel(
|
|
const float* __restrict__ x,
|
|
const float* __restrict__ y,
|
|
float* __restrict__ output,
|
|
const int size) {
|
|
const int index = blockIdx.x * blockDim.x + threadIdx.x;
|
|
if (index < size) {
|
|
const float tanh_x = 2.0f / (1.0f + __expf(-2.0f * x[index])) - 1;
|
|
const float tanh_y = 2.0f / (1.0f + __expf(-2.0f * y[index])) - 1;
|
|
output[index] = tanh_x + tanh_y;
|
|
}
|
|
}
|
|
|
|
void tanh_add_cuda(const float* x, const float* y, float* output, int size) {
|
|
const int threads = 1024;
|
|
const int blocks = (size + threads - 1) / threads;
|
|
tanh_add_kernel<<<blocks, threads>>>(x, y, output, size);
|
|
}
|