@@ -120,7 +120,11 @@ function finite_difference_gradient!(df::AbstractArray{<:Number}, f, x::Abstract
120
120
if fdtype == Val{:forward }
121
121
@inbounds for i ∈ eachindex (x)
122
122
c2[i] += c1[i]
123
- df[i] = (f (c2) - f (x)) / c1[i]
123
+ if typeof (fx) != Void
124
+ df[i] = (f (c2) - fx) / c1[i]
125
+ else
126
+ df[i] = (f (c2) - f (x)) / c1[i]
127
+ end
124
128
c2[i] -= c1[i]
125
129
end
126
130
elseif fdtype == Val{:central }
@@ -177,14 +181,63 @@ function finite_difference_gradient!(df::AbstractArray{<:Number}, f, x::Number,
177
181
df
178
182
end
179
183
180
-
184
+ # vector of derivatives of f : C^n -> C by each component of a vector x
181
185
function finite_difference_gradient! (df:: AbstractArray{<:Number} , f, x:: AbstractArray{<:Number} ,
182
186
cache:: GradientCache{T1,T2,T3,fdtype,Val{:Complex}} ) where {T1,T2,T3,fdtype}
183
187
184
- error (" Not implemented yet." )
188
+ # NOTE: in this case epsilon is a vector, we need two arrays for epsilon and x1
189
+ # c1 denotes epsilon (pre-computed by the cache constructor),
190
+ # c2 is x1, pre-set to the values of x by the cache constructor
191
+ fx, c1, c2 = cache. fx, cache. c1, cache. c2
192
+ if fdtype == Val{:forward }
193
+ @inbounds for i ∈ eachindex (x)
194
+ epsilon = c1[i]
195
+ c2[i] += epsilon
196
+ if typeof (fx) == Void
197
+ df[i] = real (f (c2) - f (x)) / epsilon
198
+ else
199
+ df[i] = real (f (c2) - fx) / epsilon
200
+ end
201
+ c2[i] -= epsilon
202
+ c2[i] += im* epsilon
203
+ if typeof (fx) == Void
204
+ df[i] += im* imag (f (c2) - f (x)) / epsilon
205
+ else
206
+ df[i] += im* imag (f (c2) - fx) / epsilon
207
+ end
208
+ c2[i] -= im* epsilon
209
+ end
210
+ elseif fdtype == Val{:central }
211
+ @inbounds for i ∈ eachindex (x)
212
+ epsilon = c1[i]
213
+ c2[i] += epsilon
214
+ x[i] -= epsilon
215
+ df[i] = real (f (c2) - f (x)) / (2 * epsilon)
216
+ c2[i] -= c1[i]
217
+ x[i] += c1[i]
218
+ c2[i] += im* epsilon
219
+ x[i] -= im* epsilon
220
+ df[i] += im* imag (f (c2) - f (x)) / (2 * epsilon)
221
+ c2[i] -= im* epsilon
222
+ x[i] += im* epsilon
223
+ end
224
+ elseif fdtype == Val{:complex }
225
+ epsilon_elemtype = compute_epsilon_elemtype (nothing , x)
226
+ epsilon_complex = eps (epsilon_elemtype)
227
+ # we use c1 here to avoid typing issues with x
228
+ @inbounds for i ∈ eachindex (x)
229
+ c1[i] += im* epsilon_complex
230
+ df[i] = imag (f (c1)) / epsilon_complex
231
+ c1[i] -= im* epsilon_complex
232
+ end
233
+ else
234
+ fdtype_error (Val{:Complex })
235
+ end
185
236
df
186
237
end
187
238
239
+ # vector of derivatives of f : C -> C^n
240
+ # this is effectively a vector of partial derivatives, but we still call it a gradient
188
241
function finite_difference_gradient! (df:: AbstractArray{<:Number} , f, x:: Number ,
189
242
cache:: GradientCache{T1,T2,T3,fdtype,Val{:Complex}} ) where {T1,T2,T3,fdtype}
190
243
0 commit comments