mirror of
https://github.com/zebrajr/tensorflow.git
synced 2025-12-06 12:20:11 +01:00
Merge branch 'terrytangyuan-multGPU'
This commit is contained in:
commit
cff8e6e57e
21
README.md
21
README.md
|
|
@ -106,6 +106,27 @@ score = metrics.accuracy_score(classifier.predict(iris.data), iris.target)
|
|||
print("Accuracy: %f" % score)
|
||||
```
|
||||
|
||||
### Custom model with multiple GPUs
|
||||
|
||||
To use multiple GPUs to build a custom model, everything else is the same as the example above
|
||||
except that in the definition of custom model you'll need to specify the device:
|
||||
|
||||
```Python
|
||||
import tensorflow as tf
|
||||
|
||||
def my_model(X, y):
|
||||
"""
|
||||
This is DNN with 10, 20, 10 hidden layers, and dropout of 0.5 probability.
|
||||
|
||||
Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and
|
||||
CUDNN 6.5 V2 from NVIDIA need to be installed beforehand.
|
||||
"""
|
||||
with tf.device('/gpu:1'):
|
||||
layers = skflow.ops.dnn(X, [10, 20, 10], keep_prob=0.5)
|
||||
with tf.device('/gpu:2'):
|
||||
return skflow.models.logistic_regression(layers, y)
|
||||
```
|
||||
|
||||
## Coming soon
|
||||
|
||||
* Easy way to handle categorical variables
|
||||
|
|
|
|||
42
examples/multiple_gpu.py
Normal file
42
examples/multiple_gpu.py
Normal file
|
|
@ -0,0 +1,42 @@
|
|||
# Copyright 2015 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import random
|
||||
|
||||
import skflow
|
||||
import tensorflow as tf
|
||||
from sklearn import datasets, metrics
|
||||
|
||||
iris = datasets.load_iris()
|
||||
X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target,
|
||||
test_size=0.2, random_state=42)
|
||||
|
||||
random.seed(42)
|
||||
|
||||
def my_model(X, y):
|
||||
"""
|
||||
This is DNN with 10, 20, 10 hidden layers, and dropout of 0.5 probability.
|
||||
|
||||
Note: If you want to run this example with multiple GPUs, Cuda Toolkit 7.0 and
|
||||
CUDNN 6.5 V2 from NVIDIA need to be installed beforehand.
|
||||
"""
|
||||
with tf.device('/gpu:1'):
|
||||
layers = skflow.ops.dnn(X, [10, 20, 10], keep_prob=0.5)
|
||||
with tf.device('/gpu:2'):
|
||||
return skflow.models.logistic_regression(layers, y)
|
||||
|
||||
classifier = skflow.TensorFlowEstimator(model_fn=my_model, n_classes=3)
|
||||
classifier.fit(X_train, y_train)
|
||||
score = metrics.accuracy_score(classifier.predict(X_test), y_test)
|
||||
print('Accuracy: {0:f}'.format(score))
|
||||
|
|
@ -46,7 +46,7 @@ class TensorFlowEstimator(BaseEstimator):
|
|||
"""
|
||||
|
||||
def __init__(self, model_fn, n_classes, tf_master="", batch_size=32, steps=50, optimizer="SGD",
|
||||
learning_rate=0.1, tf_random_seed=42, continue_training=False):
|
||||
learning_rate=0.1, tf_random_seed=42, continue_training=False, log_device_placement=True):
|
||||
self.n_classes = n_classes
|
||||
self.tf_master = tf_master
|
||||
self.batch_size = batch_size
|
||||
|
|
@ -56,6 +56,7 @@ class TensorFlowEstimator(BaseEstimator):
|
|||
self.tf_random_seed = tf_random_seed
|
||||
self.model_fn = model_fn
|
||||
self.continue_training = continue_training
|
||||
self.log_device_placement = log_device_placement
|
||||
self._initialized = False
|
||||
|
||||
def _setup_data_feeder(self, X, y):
|
||||
|
|
@ -93,7 +94,8 @@ class TensorFlowEstimator(BaseEstimator):
|
|||
# Create trainer and augment graph with gradients and optimizer.
|
||||
self._trainer = TensorFlowTrainer(self._model_loss,
|
||||
self._global_step, self.optimizer, self.learning_rate)
|
||||
self._session = tf.Session(self.tf_master)
|
||||
self._session = tf.Session(self.tf_master,
|
||||
config=tf.ConfigProto(log_device_placement=self.log_device_placement))
|
||||
|
||||
def fit(self, X, y):
|
||||
"""Builds a neural network model given provided `model_fn` and training
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user