Skip to content
This repository was archived by the owner on Mar 12, 2021. It is now read-only.

Value function iteration on the gpu tutorial #281

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 8 additions & 2 deletions tutorials/make.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,12 @@ cd(joinpath(@__DIR__, "src")) do
rm("../build"; force=true, recursive=true)

# intro tutorial
weave("intro.jl", out_path="../build", doctype="md2html")
cp("intro1.png", "../build/intro1.png")
# weave("intro.jl", out_path="../build", doctype="md2html")
# cp("intro1.png", "../build/intro1.png")

# value function iteration tutorial
weave("vfi.jl", out_path="../build", doctype="md2html",css="style.css")
cp("addprocs.png", "../build/addprocs.png")
cp("iterms.png", "../build/iterms.png")
cp("threads.png", "../build/threads.png")
end
55 changes: 55 additions & 0 deletions tutorials/src/addprocs-vfi.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
include(joinpath(@__DIR__, "common.jl"))
using Distributions
using BenchmarkTools
using SharedArrays
using Distributed

p = param()

addprocs(5) # add 5 workers
using Weave
@everywhere function v(ix::Int,ie::Int,age::Int,Vnext::Matrix{Float64},p::NamedTuple)
out = typemin(eltype(Vnext))
ixpopt = 0 # optimal policy
expected = 0.0
utility = out
for jx in 1:p.nx
if age < p.T
for je in 1:p.ne
expected += p.P[ie,je] * Vnext[jx,je]
end
end
cons = (1+p.r)*p.xgrid[ix] + p.egrid[ie]*p.w - p.xgrid[jx]
utility = (cons^(1-p.sigma))/(1-p.sigma) + p.beta * expected
if cons <= 0
utility = typemin(eltype(Vnext))
end
if utility >= out
out = utility
ixpopt = jx
end
end
return out
end

function cpu_lifecycle_multicore(p::NamedTuple)
println("julia running with $(length(workers())) workers")
V = zeros(p.nx,p.ne,p.T)
Vnext = zeros(p.nx,p.ne)
Vshared = SharedArray{Float64}(p.nx,p.ne) # note!
for age in p.T:-1:1
@sync @distributed for i in 1:(p.nx*p.ne) # distribute over a single index
ix,ie = Tuple(CartesianIndices((p.nx,p.ne))[i])
value = v(ix,ie,age,Vnext,p)
Vshared[ix,ie] = value
end
V[:,:,age] = Vshared
Vnext[:,:] = Vshared
end
return V
end

println("@time cpu_lifecycle_multicore(p)")
V = cpu_lifecycle_multicore(p);
println("first three shock states at age 1: $(V[1,1:3,1])")
@time cpu_lifecycle_multicore(p)
Binary file added tutorials/src/addprocs.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
52 changes: 52 additions & 0 deletions tutorials/src/common.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,55 @@ function script(code; wrapper=``, args=``)
end
nothing
end


#' Let's setup a named tuple to hold our model parameters.
function param(;nx=1500,ne=15,T=10)
p = (
nx = nx,
xmax = 4.0,
xmin = 0.1,
ne = ne,
sigma_eps = 0.02058,
lambda_eps = 0.99,
m = 1.5,
sigma = 2,
beta = 0.97,
T = 10,
r = 0.07,
w = 5,
xgrid = zeros(Float32,nx),
egrid = zeros(Float32,ne),
P = zeros(Float32,ne,ne))


# populate transition matrix

# Grid for capital (x)
p.xgrid[:] = collect(range(p.xmin,stop=p.xmax,length=p.nx));

# Grid for productivity (e) with Tauchen (1986)
sigma_y = sqrt((p.sigma_eps^2) / (1 - (p.lambda_eps^2)));
estep = 2*sigma_y*p.m / (p.ne-1);
for i = 1:p.ne
p.egrid[i] = (-p.m*sqrt((p.sigma_eps^2) / (1 - (p.lambda_eps^2))) + (i-1)*estep);
end

# Transition probability matrix (P) Tauchen (1986)
mm = p.egrid[2] - p.egrid[1];
for j = 1:p.ne
for k = 1:p.ne
if(k == 1)
p.P[j, k] = cdf(Normal(), (p.egrid[k] - p.lambda_eps*p.egrid[j] + (mm/2))/p.sigma_eps);
elseif(k == ne)
p.P[j, k] = 1 - cdf(Normal(), (p.egrid[k] - p.lambda_eps*p.egrid[j] - (mm/2))/p.sigma_eps);
else
p.P[j, k] = cdf(Normal(), (p.egrid[k] - p.lambda_eps*p.egrid[j] + (mm/2))/p.sigma_eps) - cdf(Normal(), (p.egrid[k] - p.lambda_eps*p.egrid[j] - (mm/2))/p.sigma_eps);
end
end
end

# exponential of egrid
p.egrid[:] = exp.(p.egrid)
return p
end
12 changes: 6 additions & 6 deletions tutorials/src/intro.jl
Original file line number Diff line number Diff line change
Expand Up @@ -123,8 +123,8 @@ using BenchmarkTools

using CuArrays

x_d = cufill(1.0f0, N) # a vector stored on the GPU filled with 1.0 (Float32)
y_d = cufill(2.0f0, N) # a vector stored on the GPU filled with 2.0
x_d = CuArray(fill(1.0f0, N)) # a vector stored on the GPU filled with 1.0 (Float32)
y_d = CuArray(fill(2.0f0, N)) # a vector stored on the GPU filled with 2.0

#' Here the `d` means "device," in contrast with "host". Now let's do the increment:

Expand Down Expand Up @@ -239,8 +239,8 @@ code = """
end

N = 2^20
x_d = cufill(1.0f0, N)
y_d = cufill(2.0f0, N)
x_d = CuArray(fill(1.0f0, N))
y_d = CuArray(fill(2.0f0, N))

bench_gpu1!(y_d, x_d)
CUDAdrv.@profile bench_gpu1!(y_d, x_d)
Expand Down Expand Up @@ -383,8 +383,8 @@ code = """
end

N = 2^20
x_d = cufill(1.0f0, N)
y_d = cufill(2.0f0, N)
x_d = CuArray(fill(1.0f0, N))
y_d = CuArray(fill(2.0f0, N))

bench_gpu3!(y_d, x_d)
CUDAdrv.@profile bench_gpu3!(y_d, x_d)
Expand Down
Binary file added tutorials/src/iterms.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading