mirror of
https://github.com/zebrajr/tensorflow.git
synced 2025-12-07 00:20:20 +01:00
Change 109240606 Fix typo Change 109240358 Fix bug in Concat's shape inference due to legacy scalar handling. The shape function was inadvertently converting outputs of unknown shape (rank=None) to vectors of unknown length (rank=1), due to inability to distinguish between legacy scalars and vectors, because `max(1, None)` is 1. Change 109237152 Remove numarray requirement in python_config. Change 109234003 Fix typo in elu documentation. Change 109232946 Python must now be configured via ./configure script Change 109232134 Backported fixes to the tensor comparison operators from the public Eigen repository Change 109231761 Test invalid inputs to softmax_cross_entropy_with_logits. Change 109230218 Backported fixes to the tensor comparison operators from the public Eigen repository Change 109229915 Correct comments in seq2seq to show the right input types for embedding models. (Thanks to hugman@github for bringing this up.) Change 109229118 Fix resize_images example in documentation and allow resize_images to run on a single image with partially-known shape. Change 109228940 Fix demo and node add/remove button spacing Change 109227909 Include Elu in the NN docs. Change 109227059 Adds variable_op_scope and makes variable_scope always add a name_scope. This creates an op scope for variables that makes it easy to create independent operations with a default name by making that name unique for the current scope and it allows explicit names that are not made unique. Change 109224492 Streamline yuv -> rgb conversion to be done in one pass in native code. The entire process now takes ~2ms (including the ByteBuffer.get() calls), down from 10+ ms when the arrays were being interleaved in Java prior to conversion. Also abstracting common yuv->rgb color conversion into helper method. Change 109224389 Add ability to move nodes in and out of auxiliary nodes in graph. Change 109217177 Update generated Op docs. Change 109215030 Implementation of the ELU activation function: http://arxiv.org/abs/1511.07289 Change 109209848 When GPUBFCAllocator runs out of memory, also log a summary of chunks in use by size. Change 109206569 Switched to the public version of the Eigen::sign method since it supports complex numbers. Change 109199813 Modify tensorflow.SequenceExample to support multiple-length sequences. Base CL: 109241553
173 lines
6.1 KiB
Bash
Executable File
173 lines
6.1 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
## Set up python-related environment settings
|
|
while true; do
|
|
fromuser=""
|
|
if [ -z "$PYTHON_BIN_PATH" ]; then
|
|
default_python_bin_path=$(which python)
|
|
read -p "Please specify the location of python. [Default is $default_python_bin_path]: " PYTHON_BIN_PATH
|
|
fromuser="1"
|
|
if [ -z "$PYTHON_BIN_PATH" ]; then
|
|
PYTHON_BIN_PATH=$default_python_bin_path
|
|
fi
|
|
fi
|
|
if [ -e "$PYTHON_BIN_PATH" ]; then
|
|
break
|
|
fi
|
|
echo "Invalid python path. ${PYTHON_BIN_PATH} cannot be found" 1>&2
|
|
if [ -z "$fromuser" ]; then
|
|
exit 1
|
|
fi
|
|
PYTHON_BIN_PATH=""
|
|
# Retry
|
|
done
|
|
|
|
# Invoke python_config and set up symlinks to python includes
|
|
(./util/python/python_config.sh --setup "$PYTHON_BIN_PATH";) || exit -1
|
|
|
|
## Set up Cuda-related environment settings
|
|
|
|
while [ "$TF_NEED_CUDA" == "" ]; do
|
|
read -p "Do you wish to build TensorFlow with GPU support? [y/N] " INPUT
|
|
case $INPUT in
|
|
[Yy]* ) echo "GPU support will be enabled for TensorFlow"; TF_NEED_CUDA=1;;
|
|
[Nn]* ) echo "No GPU support will be enabled for TensorFlow"; TF_NEED_CUDA=0;;
|
|
"" ) echo "No GPU support will be enabled for TensorFlow"; TF_NEED_CUDA=0;;
|
|
* ) echo "Invalid selection: " $INPUT;;
|
|
esac
|
|
done
|
|
|
|
if [ "$TF_NEED_CUDA" == "0" ]; then
|
|
echo "Configuration finished"
|
|
exit
|
|
fi
|
|
|
|
# Find out where the CUDA toolkit is installed
|
|
while true; do
|
|
fromuser=""
|
|
if [ -z "$CUDA_TOOLKIT_PATH" ]; then
|
|
default_cuda_path=/usr/local/cuda
|
|
read -p "Please specify the location where CUDA 7.0 toolkit is installed. Refer to README.md for more details. [Default is $default_cuda_path]: " CUDA_TOOLKIT_PATH
|
|
fromuser="1"
|
|
if [ -z "$CUDA_TOOLKIT_PATH" ]; then
|
|
CUDA_TOOLKIT_PATH=$default_cuda_path
|
|
fi
|
|
fi
|
|
if [ -e "$CUDA_TOOLKIT_PATH/lib64/libcudart.so.7.0" ]; then
|
|
break
|
|
fi
|
|
echo "Invalid path to CUDA 7.0 toolkit. ${CUDA_TOOLKIT_PATH}/lib64/libcudart.so.7.0 cannot be found"
|
|
if [ -z "$fromuser" ]; then
|
|
exit 1
|
|
fi
|
|
CUDA_TOOLKIT_PATH=""
|
|
# Retry
|
|
done
|
|
|
|
# Find out where the CUDNN library is installed
|
|
while true; do
|
|
fromuser=""
|
|
if [ -z "$CUDNN_INSTALL_PATH" ]; then
|
|
default_cudnn_path=${CUDA_TOOLKIT_PATH}
|
|
read -p "Please specify the location where CUDNN 6.5 V2 library is installed. Refer to README.md for more details. [Default is $default_cudnn_path]: " CUDNN_INSTALL_PATH
|
|
fromuser="1"
|
|
if [ -z "$CUDNN_INSTALL_PATH" ]; then
|
|
CUDNN_INSTALL_PATH=$default_cudnn_path
|
|
fi
|
|
# Result returned from "read" will be used unexpanded. That make "~" unuseable.
|
|
# Going through one more level of expansion to handle that.
|
|
CUDNN_INSTALL_PATH=$(bash -c "readlink -f $CUDNN_INSTALL_PATH")
|
|
fi
|
|
if [ -e "$CUDNN_INSTALL_PATH/libcudnn.so.6.5" -o -e "$CUDNN_INSTALL_PATH/lib64/libcudnn.so.6.5" ]; then
|
|
break
|
|
fi
|
|
echo "Invalid path to CUDNN 6.5 V2 toolkit. Neither of the following two files can be found:"
|
|
echo "$CUDNN_INSTALL_PATH/lib64/libcudnn.so.6.5"
|
|
echo "$CUDNN_INSTALL_PATH/libcudnn.so.6.5"
|
|
if [ -z "$fromuser" ]; then
|
|
exit 1
|
|
fi
|
|
CUDNN_INSTALL_PATH=""
|
|
# Retry
|
|
done
|
|
|
|
cat > third_party/gpus/cuda/cuda.config <<EOF
|
|
# CUDA_TOOLKIT_PATH refers to the CUDA toolkit. Tensorflow requries Cuda 7.0
|
|
# at the moment.
|
|
CUDA_TOOLKIT_PATH="$CUDA_TOOLKIT_PATH"
|
|
|
|
# CUDNN_INSTALL_PATH refers to the CUDNN toolkit. The cudnn header and library
|
|
# files can be either in this directory, or under include/ and lib64/
|
|
# directories separately.
|
|
CUDNN_INSTALL_PATH="$CUDNN_INSTALL_PATH"
|
|
EOF
|
|
|
|
function UnofficialSetting() {
|
|
echo -e "\nWARNING: You are configuring unofficial settings in TensorFlow. Because some external libraries are not backward compatible, these settings are largely untested and unsupported. \n" 1>&2
|
|
|
|
# Configure the compute capabilities that TensorFlow builds for.
|
|
# Since Cuda toolkit is not backward-compatible, this is not guaranteed to work.
|
|
while true; do
|
|
fromuser=""
|
|
if [ -z "$TF_CUDA_COMPUTE_CAPABILITIES" ]; then
|
|
cat << EOF
|
|
Please specify a list of comma-separated Cuda compute capabilities you want to build with.
|
|
You can find the compute capability of your device at: https://developer.nvidia.com/cuda-gpus.
|
|
Please note that each additional compute capability significantly increases your build time and binary size.
|
|
EOF
|
|
read -p "[Default is: \"3.5,5.2\"]: " TF_CUDA_COMPUTE_CAPABILITIES
|
|
fromuser=1
|
|
fi
|
|
# Check whether all capabilities from the input is valid
|
|
COMPUTE_CAPABILITIES=${TF_CUDA_COMPUTE_CAPABILITIES//,/ }
|
|
ALL_VALID=1
|
|
for CAPABILITY in $COMPUTE_CAPABILITIES; do
|
|
if [[ ! "$CAPABILITY" =~ [0-9]+.[0-9]+ ]]; then
|
|
echo "Invalid compute capability: " $CAPABILITY
|
|
ALL_VALID=0
|
|
break
|
|
fi
|
|
done
|
|
if [ "$ALL_VALID" == "0" ]; then
|
|
if [ -z "$fromuser" ]; then
|
|
exit 1
|
|
fi
|
|
else
|
|
break
|
|
fi
|
|
TF_CUDA_COMPUTE_CAPABILITIES=""
|
|
done
|
|
|
|
if [ ! -z "$TF_CUDA_COMPUTE_CAPABILITIES" ]; then
|
|
export WARNING="Unofficial setting. DO NOT"" SUBMIT!!!"
|
|
function CudaGenCodeOpts() {
|
|
OUTPUT=""
|
|
for CAPABILITY in $@; do
|
|
OUTPUT=${OUTPUT}" \"${CAPABILITY}\", "
|
|
done
|
|
echo $OUTPUT
|
|
}
|
|
export CUDA_GEN_CODES_OPTS=$(CudaGenCodeOpts ${TF_CUDA_COMPUTE_CAPABILITIES//,/ })
|
|
perl -pi -0 -e 's,\n( *)([^\n]*supported_cuda_compute_capabilities\s*=\s*\[).*?(\]),\n\1# $ENV{WARNING}\n\1\2$ENV{CUDA_GEN_CODES_OPTS}\3,s' third_party/gpus/crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc
|
|
function CudaVersionOpts() {
|
|
OUTPUT=""
|
|
for CAPABILITY in $@; do
|
|
OUTPUT=$OUTPUT"CudaVersion(\"${CAPABILITY}\"), "
|
|
done
|
|
echo $OUTPUT
|
|
}
|
|
export CUDA_VERSION_OPTS=$(CudaVersionOpts ${TF_CUDA_COMPUTE_CAPABILITIES//,/ })
|
|
perl -pi -0 -e 's,\n( *)([^\n]*supported_cuda_compute_capabilities\s*=\s*\{).*?(\}),\n\1// $ENV{WARNING}\n\1\2$ENV{CUDA_VERSION_OPTS}\3,s' tensorflow/core/common_runtime/gpu/gpu_device.cc
|
|
fi
|
|
}
|
|
|
|
# Only run the unofficial settings when users explicitly choose to.
|
|
if [ "$TF_UNOFFICIAL_SETTING" == "1" ]; then
|
|
UnofficialSetting
|
|
fi
|
|
|
|
# Invoke the cuda_config.sh and set up the TensorFlow's canonical view of the Cuda libraries
|
|
(cd third_party/gpus/cuda; ./cuda_config.sh;) || exit -1
|
|
|
|
echo "Configuration finished"
|