Skip to content

Commit fb44c9a

Browse files
committed
- Remove hard torch dependency
- Tweaking geotop
1 parent 999cdc7 commit fb44c9a

File tree

1 file changed

+0
-136
lines changed

1 file changed

+0
-136
lines changed

gempy_engine/modules/activator/activator_interface.py

Lines changed: 0 additions & 136 deletions
Original file line numberDiff line numberDiff line change
@@ -85,39 +85,8 @@ def activate_formation_block_from_args_hard_sigmoid(Z_x, ids, scalar_value_at_sp
8585
return sigm.reshape(1, -1)
8686

8787

88-
import torch
8988

9089

91-
class HardSigmoidModified2(torch.autograd.Function):
92-
@staticmethod
93-
def forward(ctx, input, a, b, id):
94-
ctx.save_for_backward(input)
95-
ctx.bounds = (a, b)
96-
ctx.id = id
97-
output = bt.t.zeros_like(input)
98-
slope_up = 1 / (b - a)
99-
100-
# For x in the range [a, b]
101-
b_ = (input > a) & (input <= b)
102-
pos = slope_up * (input[b_] - a)
103-
104-
output[b_] = id + pos
105-
106-
return output
107-
108-
@staticmethod
109-
def backward(ctx, grad_output):
110-
input = ctx.saved_tensors[0]
111-
a, b = ctx.bounds
112-
slope_up = 1 / (b - a)
113-
114-
b_ = (input > a) & (input <= b)
115-
116-
grad_input = grad_output.clone()
117-
# Apply gradient only within the range [a, b]
118-
grad_input[b_] = grad_input[b_] * slope_up
119-
120-
return grad_input, None, None, None
12190

12291

12392
def _compute_sigmoid(Z_x, scale_0, scale_1, drift_0, drift_1, drift_id, sigmoid_slope):
@@ -138,108 +107,3 @@ def _compute_sigmoid(Z_x, scale_0, scale_1, drift_0, drift_1, drift_id, sigmoid_
138107

139108
sigm = activation_sig + drift_id.reshape((-1, 1))
140109
return sigm
141-
142-
143-
def _add_relu():
144-
# ReLU_up = T.switch(Z_x < scalar_field_iter[1], 0,
145-
# - 0.01 * (Z_x - scalar_field_iter[1]))
146-
# ReLU_down = T.switch(Z_x > scalar_field_iter[-2], 0,
147-
# 0.01 * T.abs_(Z_x - scalar_field_iter[-2]))
148-
# formations_block += ReLU_down + ReLU_up
149-
pass
150-
151-
152-
# * This gets the scalar gradient
153-
154-
155-
class HardSigmoidModified(torch.autograd.Function):
156-
@staticmethod
157-
def forward(ctx, input, a, b, id):
158-
ctx.save_for_backward(input)
159-
ctx.bounds = (a, b)
160-
output = torch.zeros_like(input)
161-
slope_up = 100 / (b - a)
162-
midpoint = (a + b) / 2
163-
164-
# For x in the range [a, b]
165-
b_ = (input > a) & (input <= b)
166-
167-
pos = slope_up * (input[b_] - a)
168-
169-
neg = -slope_up * (input[b_] - b)
170-
171-
print("Max min:", pos.max(), pos.min())
172-
foo = id * pos - (id - 1) * neg
173-
174-
# output[b_] = id * pos
175-
output[b_] = id + pos
176-
177-
# output[(input >= a) & (input <= b)] = torch.clamp(neg, min=0, max=1)
178-
# output[(input >= a) & (input <= b)] = torch.clamp(pos + neg, min=0, max=1)
179-
# output[(input >= a) & (input <= b)] = torch.clamp(pos + neg, min=0, max=1)
180-
181-
# Clamping the values outside the range [a, c] to zero
182-
# output[input < a] = 0
183-
# output[input >= b] = 0
184-
185-
# output[b_] *= id
186-
187-
return output
188-
189-
@staticmethod
190-
def backward(ctx, grad_output):
191-
input, = ctx.saved_tensors
192-
a, b = ctx.bounds
193-
midpoint = (a + b) / 2
194-
grad_input = grad_output.clone()
195-
196-
# Gradient is 1/(b-a) for x in [a, midpoint), -1/(b-a) for x in (midpoint, b], and 0 elsewhere
197-
grad_input[input < a] = 0
198-
grad_input[input > b] = 0
199-
grad_input[(input >= a) & (input < midpoint)] = 1 / (b - a)
200-
grad_input[(input > midpoint) & (input <= b)] = -1 / (b - a)
201-
202-
return grad_input, None, None, None
203-
204-
205-
class HardSigmoid(torch.autograd.Function):
206-
@staticmethod
207-
def forward(ctx, input, a, b, c):
208-
ctx.save_for_backward(input)
209-
ctx.bounds = (a, b)
210-
slope = 1 / (b - a)
211-
return torch.clamp(slope * (input - a) + 0.5, min=0, max=1)
212-
213-
@staticmethod
214-
def backward(ctx, grad_output):
215-
input, = ctx.saved_tensors
216-
a, b = ctx.bounds
217-
grad_input = grad_output.clone()
218-
grad_input[input < a] = 0
219-
grad_input[input > b] = 0
220-
grad_input[(input >= a) & (input <= b)] = 1 / (b - a)
221-
return grad_input, None, None
222-
223-
224-
class CustomSigmoidFunction(torch.autograd.Function):
225-
@staticmethod
226-
def forward(ctx, Z_x, scale_0, scale_1, drift_0, drift_1, drift_id, sigmoid_slope, epsilon=1e-7):
227-
sigmoid_slope_tensor = sigmoid_slope
228-
229-
active_sig = -scale_0 / (1 + torch.exp(-sigmoid_slope_tensor * (Z_x - drift_0)).clamp(min=epsilon))
230-
deactive_sig = -scale_1 / (1 + torch.exp(sigmoid_slope_tensor * (Z_x - drift_1)).clamp(min=epsilon))
231-
activation_sig = active_sig + deactive_sig
232-
233-
sigm = activation_sig + drift_id
234-
235-
ctx.save_for_backward(sigm)
236-
return sigm
237-
238-
@staticmethod
239-
def backward(ctx, grad_output):
240-
sigm, = ctx.saved_tensors
241-
# Here you need to compute the actual gradient of your function with respect to the inputs.
242-
# The following is just a placeholder to illustrate replacing NaNs with zeros.
243-
# grad_input = torch.nan_to_num(grad_output) # Replace NaNs with zeros
244-
# Do the actual gradient computation here
245-
return grad_output, None, None, None, None, None, None

0 commit comments

Comments
 (0)