Add TensorFlow multi-gpu computation notebook.

This commit is contained in:
Donne Martin 2015-12-28 08:01:59 -05:00
parent 438c18dd78
commit 4b403ccca2
2 changed files with 185 additions and 0 deletions

View File

@ -102,6 +102,7 @@ IPython Notebook(s) demonstrating deep learning functionality.
| [tsf-cnn](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-examples/3_neural_networks/convolutional_network.ipynb) | Implement convolutional neural networks in TensorFlow. |
| [tsf-mlp](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-examples/3_neural_networks/multilayer_perceptron.ipynb) | Implement multilayer perceptrons in TensorFlow. |
| [tsf-rnn](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-examples/3_neural_networks/recurrent_network.ipynb) | Implement recurrent neural networks in TensorFlow. |
| [tsf-gpu](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-examples/4_multi_gpu/multigpu_basics.ipynb) | Learn about basic multi-GPU computation in TensorFlow. |
### tensor-flow-exercises

View File

@ -0,0 +1,184 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Basic Multi GPU Computation in TensorFlow\n",
"\n",
"Credits: Forked from [TensorFlow-Examples](https://github.com/aymericdamien/TensorFlow-Examples) by Aymeric Damien\n",
"\n",
"## Setup\n",
"\n",
"Refer to the [setup instructions](http://nbviewer.ipython.org/github/donnemartin/data-science-ipython-notebooks/blob/master/deep-learning/tensor-flow-examples/Setup_TensorFlow.md)"
]
},
{
"cell_type": "markdown",
"metadata": {
"collapsed": true
},
"source": [
"This tutorial requires your machine to have 2 GPUs\n",
"* \"/cpu:0\": The CPU of your machine.\n",
"* \"/gpu:0\": The first GPU of your machine\n",
"* \"/gpu:1\": The second GPU of your machine\n",
"* For this example, we are using 2 GTX-980"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import numpy as np\n",
"import tensorflow as tf\n",
"import datetime"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"#Processing Units logs\n",
"log_device_placement = True\n",
"\n",
"#num of multiplications to perform\n",
"n = 10"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"# Example: compute A^n + B^n on 2 GPUs\n",
"\n",
"# Create random large matrix\n",
"A = np.random.rand(1e4, 1e4).astype('float32')\n",
"B = np.random.rand(1e4, 1e4).astype('float32')\n",
"\n",
"# Creates a graph to store results\n",
"c1 = []\n",
"c2 = []\n",
"\n",
"# Define matrix power\n",
"def matpow(M, n):\n",
" if n < 1: #Abstract cases where n < 1\n",
" return M\n",
" else:\n",
" return tf.matmul(M, matpow(M, n-1))"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# Single GPU computing\n",
"\n",
"with tf.device('/gpu:0'):\n",
" a = tf.constant(A)\n",
" b = tf.constant(B)\n",
" #compute A^n and B^n and store results in c1\n",
" c1.append(matpow(a, n))\n",
" c1.append(matpow(b, n))\n",
"\n",
"with tf.device('/cpu:0'):\n",
" sum = tf.add_n(c1) #Addition of all elements in c1, i.e. A^n + B^n\n",
"\n",
"t1_1 = datetime.datetime.now()\n",
"with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:\n",
" # Runs the op.\n",
" sess.run(sum)\n",
"t2_1 = datetime.datetime.now()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"# Multi GPU computing\n",
"# GPU:0 computes A^n\n",
"with tf.device('/gpu:0'):\n",
" #compute A^n and store result in c2\n",
" a = tf.constant(A)\n",
" c2.append(matpow(a, n))\n",
"\n",
"#GPU:1 computes B^n\n",
"with tf.device('/gpu:1'):\n",
" #compute B^n and store result in c2\n",
" b = tf.constant(B)\n",
" c2.append(matpow(b, n))\n",
"\n",
"with tf.device('/cpu:0'):\n",
" sum = tf.add_n(c2) #Addition of all elements in c2, i.e. A^n + B^n\n",
"\n",
"t1_2 = datetime.datetime.now()\n",
"with tf.Session(config=tf.ConfigProto(log_device_placement=log_device_placement)) as sess:\n",
" # Runs the op.\n",
" sess.run(sum)\n",
"t2_2 = datetime.datetime.now()"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Single GPU computation time: 0:00:11.833497\n",
"Multi GPU computation time: 0:00:07.085913\n"
]
}
],
"source": [
"print \"Single GPU computation time: \" + str(t2_1-t1_1)\n",
"print \"Multi GPU computation time: \" + str(t2_2-t1_2)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.4.3"
}
},
"nbformat": 4,
"nbformat_minor": 0
}