Skip to content

Commit 686c157

Browse files
authored
Merge pull request #742 from edejong-caltech/collision_merge
Add breakup process
2 parents e15e51e + fa07366 commit 686c157

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

64 files changed

+1324
-299
lines changed

.gitignore

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -129,3 +129,6 @@ dmypy.json
129129

130130
# Pyre type checker
131131
.pyre/
132+
133+
# VSCode stuff
134+
.code-workspace

PySDM/backends/impl_common/pairwise_storage.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,4 +27,7 @@ def sort(self, other, is_first_in_pair):
2727
def sum(self, other, is_first_in_pair):
2828
backend.sum_pair(self, other, is_first_in_pair, other.idx)
2929

30+
def multiply(self, other, is_first_in_pair):
31+
backend.multiply_pair(self, other, is_first_in_pair, other.idx)
32+
3033
return PairwiseStorage

PySDM/backends/impl_numba/methods/collisions_methods.py

Lines changed: 198 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -3,23 +3,98 @@
33
"""
44
import numba
55
import numpy as np
6+
from PySDM.physics.constants import sqrt_pi, sqrt_two
67
from PySDM.backends.impl_numba import conf
78
from PySDM.backends.impl_numba.storage import Storage
89
from PySDM.backends.impl_common.backend_methods import BackendMethods
10+
from PySDM.backends.impl_numba.atomic_operations import atomic_add
911

1012

1113
@numba.njit(**{**conf.JIT_FLAGS, **{'parallel': False}})
1214
def pair_indices(i, idx, is_first_in_pair):
15+
""" given permutation array `idx` and `is_first_in_pair` flag array,
16+
returns indices `j` and `k` of droplets within pair `i`
17+
such that `j` points to the droplet with higher (or equal) multiplicity
18+
"""
1319
offset = 1 - is_first_in_pair[2 * i]
1420
j = idx[2 * i + offset]
1521
k = idx[2 * i + 1 + offset]
1622
return j, k
1723

1824

25+
@numba.njit(**{**conf.JIT_FLAGS, **{'parallel': False}})
26+
def flag_zero_multiplicity(j, k, multiplicity, healthy):
27+
if multiplicity[k] == 0 or multiplicity[j] == 0:
28+
healthy[0] = 0
29+
30+
31+
@numba.njit(**{**conf.JIT_FLAGS, **{'parallel': False}})
32+
def coalesce(i, j, k, cid, multiplicity, gamma, attributes, coalescence_rate):
33+
atomic_add(coalescence_rate, cid, gamma[i] * multiplicity[k])
34+
new_n = multiplicity[j] - gamma[i] * multiplicity[k]
35+
if new_n > 0:
36+
multiplicity[j] = new_n
37+
for a in range(0, len(attributes)):
38+
attributes[a, k] += gamma[i] * attributes[a, j]
39+
else: # new_n == 0
40+
multiplicity[j] = multiplicity[k] // 2
41+
multiplicity[k] = multiplicity[k] - multiplicity[j]
42+
for a in range(0, len(attributes)):
43+
attributes[a, j] = gamma[i] * attributes[a, j] + attributes[a, k]
44+
attributes[a, k] = attributes[a, j]
45+
46+
47+
@numba.njit(**{**conf.JIT_FLAGS, **{'parallel': False}})
48+
def break_up(i, j, k, cid, multiplicity, gamma, attributes, n_fragment, max_multiplicity,
49+
breakup_rate, success):
50+
if n_fragment[i] ** gamma[i] > max_multiplicity:
51+
success[0] = False
52+
return
53+
atomic_add(breakup_rate, cid, gamma[i] * multiplicity[k])
54+
gamma_tmp = 0
55+
gamma_deficit = gamma[i]
56+
while gamma_deficit > 0:
57+
if multiplicity[k] > multiplicity[j]:
58+
j, k = k, j
59+
tmp1 = 0
60+
for m in range(int(gamma_deficit)):
61+
tmp1 += n_fragment[i] ** m
62+
new_n = multiplicity[j] - tmp1 * multiplicity[k]
63+
gamma_tmp = m + 1
64+
if new_n < 0:
65+
gamma_tmp = m
66+
tmp1 -= n_fragment[i] ** m
67+
break
68+
gamma_deficit -= gamma_tmp
69+
tmp2 = n_fragment[i] ** gamma_tmp
70+
new_n = multiplicity[j] - tmp1 * multiplicity[k]
71+
if new_n > 0:
72+
nj = new_n
73+
nk = multiplicity[k] * tmp2
74+
for a in range(0, len(attributes)):
75+
attributes[a, k] += tmp1 * attributes[a, j]
76+
attributes[a, k] /= tmp2
77+
else: # new_n = 0
78+
nj = tmp2 * multiplicity[k] / 2
79+
nk = nj
80+
for a in range(0, len(attributes)):
81+
attributes[a, k] += tmp1 * attributes[a, j]
82+
attributes[a, k] /= tmp2
83+
attributes[a, j] = attributes[a, k]
84+
85+
multiplicity[j] = round(nj)
86+
multiplicity[k] = round(nk)
87+
factor_j = nj / multiplicity[j]
88+
factor_k = nk / multiplicity[k]
89+
for a in range(0, len(attributes)):
90+
attributes[a, k] *= factor_k
91+
attributes[a, j] *= factor_j
92+
93+
1994
class CollisionsMethods(BackendMethods):
2095
@staticmethod
2196
@numba.njit(**{**conf.JIT_FLAGS, **{'parallel': False}})
22-
def adaptive_sdm_end_body(dt_left, n_cell, cell_start):
97+
def __adaptive_sdm_end_body(dt_left, n_cell, cell_start):
2398
end = 0
2499
for i in range(n_cell - 1, -1, -1):
25100
if dt_left[i] == 0:
@@ -29,16 +104,15 @@ def adaptive_sdm_end_body(dt_left, n_cell, cell_start):
29104
return end
30105

31106
def adaptive_sdm_end(self, dt_left, cell_start):
32-
return self.adaptive_sdm_end_body(
107+
return self.__adaptive_sdm_end_body(
33108
dt_left.data, len(dt_left), cell_start.data
34109
)
35110

36111
@staticmethod
37112
@numba.njit(**conf.JIT_FLAGS)
38113
# pylint: disable=too-many-arguments,too-many-locals
39-
def adaptive_sdm_gamma_body(gamma, idx, length, multiplicity, cell_id, dt_left, dt,
40-
dt_range, is_first_in_pair,
41-
stats_n_substep, stats_dt_min):
114+
def __adaptive_sdm_gamma_body(gamma, idx, length, multiplicity, cell_id, dt_left, dt,
115+
dt_range, is_first_in_pair, stats_n_substep, stats_dt_min):
42116
dt_todo = np.empty_like(dt_left)
43117
for cid in numba.prange(len(dt_todo)): # pylint: disable=not-an-iterable
44118
dt_todo[cid] = min(dt_left[cid], dt_range[1])
@@ -65,54 +139,139 @@ def adaptive_sdm_gamma_body(gamma, idx, length, multiplicity, cell_id, dt_left,
65139
# pylint: disable=too-many-arguments
66140
def adaptive_sdm_gamma(self, gamma, n, cell_id, dt_left, dt, dt_range, is_first_in_pair,
67141
stats_n_substep, stats_dt_min):
68-
return self.adaptive_sdm_gamma_body(
142+
return self.__adaptive_sdm_gamma_body(
69143
gamma.data, n.idx.data, len(n), n.data, cell_id.data,
70144
dt_left.data, dt, dt_range, is_first_in_pair.indicator.data,
71145
stats_n_substep.data, stats_dt_min.data)
72146

73147
@staticmethod
74148
# @numba.njit(**conf.JIT_FLAGS) # note: as of Numba 0.51, np.dot() does not support ints
75-
def cell_id_body(cell_id, cell_origin, strides):
149+
def __cell_id_body(cell_id, cell_origin, strides):
76150
cell_id[:] = np.dot(strides, cell_origin)
77151

78152
def cell_id(self, cell_id, cell_origin, strides):
79-
return self.cell_id_body(cell_id.data, cell_origin.data, strides.data)
153+
return self.__cell_id_body(cell_id.data, cell_origin.data, strides.data)
80154

81155
@staticmethod
82156
@numba.njit(**conf.JIT_FLAGS)
83-
# pylint: disable=too-many-arguments
84-
def coalescence_body(multiplicity, idx, length, attributes, gamma, healthy, is_first_in_pair):
85-
for i in numba.prange(length // 2): # pylint: disable=not-an-iterable
157+
def __collision_coalescence_body(
158+
multiplicity, idx, length, attributes, gamma, healthy, cell_id, coalescence_rate,
159+
is_first_in_pair,
160+
):
161+
for i in numba.prange(length // 2): # pylint: disable=not-an-iterable,too-many-nested-blocks
86162
if gamma[i] == 0:
87163
continue
88164
j, k = pair_indices(i, idx, is_first_in_pair)
165+
coalesce(i, j, k, cell_id[i], multiplicity, gamma, attributes, coalescence_rate)
166+
flag_zero_multiplicity(j, k, multiplicity, healthy)
89167

90-
new_n = multiplicity[j] - gamma[i] * multiplicity[k]
91-
if new_n > 0:
92-
multiplicity[j] = new_n
93-
for a in range(0, len(attributes)):
94-
attributes[a, k] += gamma[i] * attributes[a, j]
95-
else: # new_n == 0
96-
multiplicity[j] = multiplicity[k] // 2
97-
multiplicity[k] = multiplicity[k] - multiplicity[j]
98-
for a in range(0, len(attributes)):
99-
attributes[a, j] = gamma[i] * attributes[a, j] + attributes[a, k]
100-
attributes[a, k] = attributes[a, j]
101-
if multiplicity[k] == 0 or multiplicity[j] == 0:
102-
healthy[0] = 0
168+
def collision_coalescence(
169+
self, multiplicity, idx, attributes, gamma, healthy, cell_id, coalescence_rate,
170+
is_first_in_pair
171+
):
172+
self.__collision_coalescence_body(
173+
multiplicity.data, idx.data, len(idx), attributes.data, gamma.data, healthy.data,
174+
cell_id.data, coalescence_rate.data, is_first_in_pair.indicator.data
175+
)
103176

104-
# pylint: disable=too-many-arguments
105-
def coalescence(self, multiplicity, idx, attributes, gamma, healthy, is_first_in_pair):
106-
self.coalescence_body(multiplicity.data, idx.data, len(idx),
107-
attributes.data, gamma.data, healthy.data,
108-
is_first_in_pair.indicator.data)
177+
@staticmethod
178+
@numba.njit(**conf.JIT_FLAGS)
179+
def __collision_coalescence_breakup_body(multiplicity, idx, length, attributes, gamma, rand,
180+
Ec, Eb, n_fragment, healthy, cell_id, coalescence_rate,
181+
breakup_rate, is_first_in_pair, success, max_multiplicity):
182+
for i in numba.prange(length // 2): # pylint: disable=not-an-iterable,too-many-nested-blocks
183+
if gamma[i] == 0:
184+
continue
185+
bouncing = rand[i] - Ec[i] - Eb[i] > 0
186+
if bouncing:
187+
continue
188+
j, k = pair_indices(i, idx, is_first_in_pair)
189+
190+
if rand[i] - Ec[i] < 0:
191+
coalesce(i, j, k, cell_id[i], multiplicity, gamma, attributes, coalescence_rate)
192+
else:
193+
break_up(i, j, k, cell_id[i], multiplicity, gamma, attributes, n_fragment,
194+
max_multiplicity, breakup_rate, success)
195+
flag_zero_multiplicity(j, k, multiplicity, healthy)
196+
197+
def collision_coalescence_breakup(self, multiplicity, idx, attributes, gamma, rand, Ec, Eb,
198+
n_fragment, healthy, cell_id, coalescence_rate, breakup_rate,
199+
is_first_in_pair):
200+
max_multiplicity = np.iinfo(multiplicity.data.dtype).max
201+
success = np.ones(1, dtype=bool)
202+
self.__collision_coalescence_breakup_body(
203+
multiplicity.data, idx.data, len(idx), attributes.data, gamma.data, rand.data,
204+
Ec.data, Eb.data, n_fragment.data, healthy.data, cell_id.data, coalescence_rate.data,
205+
breakup_rate.data, is_first_in_pair.indicator.data, success, max_multiplicity
206+
)
207+
assert success
208+
209+
@staticmethod
210+
@numba.njit(**{**conf.JIT_FLAGS})
211+
def __slams_fragmentation_body(n_fragment, probs, rand):
212+
for i in numba.prange(len(n_fragment)): # pylint: disable=not-an-iterable
213+
probs[i] = 0.0
214+
n_fragment[i] = 1
215+
for n in range(22):
216+
probs[i] += 0.91 * (n + 2)**(-1.56)
217+
if rand[i] < probs[i]:
218+
n_fragment[i] = n + 2
219+
break
220+
221+
def slams_fragmentation(self, n_fragment, probs, rand):
222+
self.__slams_fragmentation_body(n_fragment.data, probs.data, rand.data)
223+
224+
@staticmethod
225+
@numba.njit(**{**conf.JIT_FLAGS})
226+
def __exp_fragmentation_body(n_fragment, scale, frag_size, r_max, rand):
227+
'''
228+
Exponential PDF
229+
'''
230+
for i in numba.prange(len(n_fragment)): # pylint: disable=not-an-iterable
231+
frag_size[i] = -scale * np.log(1-rand[i])
232+
if frag_size[i] > r_max[i]:
233+
n_fragment[i] = 1
234+
else:
235+
n_fragment[i] = r_max[i] / frag_size[i]
236+
237+
def exp_fragmentation(self, n_fragment, scale, frag_size, r_max, rand):
238+
self.__exp_fragmentation_body(n_fragment.data, scale, frag_size.data, r_max.data, rand.data)
239+
240+
@staticmethod
241+
@numba.njit(**{**conf.JIT_FLAGS})
242+
def __gauss_fragmentation_body(n_fragment, mu, scale, frag_size, r_max, rand):
243+
'''
244+
Gaussian PDF
245+
CDF = erf(x); approximate as erf(x) ~ tanh(ax) with a = 2/sqrt(pi) as in Vedder 1987
246+
'''
247+
for i in numba.prange(len(n_fragment)): # pylint: disable=not-an-iterable
248+
frag_size[i] = mu + sqrt_pi * sqrt_two * scale / 4 * np.log((1 + rand[i])/(1 - rand[i]))
249+
if frag_size[i] > r_max[i]:
250+
n_fragment[i] = 1.0
251+
else:
252+
n_fragment[i] = r_max[i] / frag_size[i]
253+
254+
def gauss_fragmentation(self, n_fragment, mu, scale, frag_size, r_max, rand):
255+
self.__gauss_fragmentation_body(n_fragment.data, mu, scale, frag_size.data, r_max.data,
256+
rand.data)
257+
258+
@staticmethod
259+
@numba.njit(**{**conf.JIT_FLAGS})
260+
def __ll1982_fragmentation_body(n_fragment, probs, rand): # pylint: disable=unused-argument
261+
for i in numba.prange(len(n_fragment)): # pylint: disable=not-an-iterable
262+
probs[i] = 0.0
263+
n_fragment[i] = 1
264+
265+
# first consider filament breakup
266+
267+
def ll1982_fragmentation(self, n_fragment, probs, rand):
268+
self.__ll1982_fragmentation_body(n_fragment.data, probs.data, rand.data)
109269

110270
@staticmethod
111271
@numba.njit(**conf.JIT_FLAGS)
112272
# pylint: disable=too-many-arguments
113-
def compute_gamma_body(gamma, rand, idx, length, multiplicity, cell_id,
273+
def __compute_gamma_body(gamma, rand, idx, length, multiplicity, cell_id,
114274
collision_rate_deficit, collision_rate, is_first_in_pair):
115-
116275
"""
117276
return in "gamma" array gamma (see: http://doi.org/10.1002/qj.441, section 5)
118277
formula:
@@ -122,21 +281,22 @@ def compute_gamma_body(gamma, rand, idx, length, multiplicity, cell_id,
122281
for i in numba.prange(length // 2): # pylint: disable=not-an-iterable
123282
gamma[i] = np.ceil(gamma[i] - rand[i])
124283

125-
if gamma[i] == 0:
284+
no_collision = gamma[i] == 0
285+
if no_collision:
126286
continue
127287

128288
j, k = pair_indices(i, idx, is_first_in_pair)
129289
prop = multiplicity[j] // multiplicity[k]
130290
g = min(int(gamma[i]), prop)
131291
cid = cell_id[j]
132-
collision_rate[cid] += g * multiplicity[k]
133-
collision_rate_deficit[cid] += (int(gamma[i]) - g) * multiplicity[k]
292+
atomic_add(collision_rate, cid, g * multiplicity[k])
293+
atomic_add(collision_rate_deficit, cid, (int(gamma[i]) - g) * multiplicity[k])
134294
gamma[i] = g
135295

136296
# pylint: disable=too-many-arguments
137297
def compute_gamma(self, gamma, rand, multiplicity, cell_id,
138298
collision_rate_deficit, collision_rate, is_first_in_pair):
139-
return self.compute_gamma_body(
299+
return self.__compute_gamma_body(
140300
gamma.data, rand.data, multiplicity.idx.data, len(multiplicity), multiplicity.data,
141301
cell_id.data,
142302
collision_rate_deficit.data, collision_rate.data, is_first_in_pair.indicator.data)
@@ -176,7 +336,7 @@ def __call__(self, cell_id, cell_idx, cell_start, idx):
176336
@staticmethod
177337
@numba.njit(**{**conf.JIT_FLAGS, **{'parallel': False}})
178338
# pylint: disable=too-many-arguments
179-
def normalize_body(prob, cell_id, cell_idx, cell_start, norm_factor, timestep, dv):
339+
def __normalize_body(prob, cell_id, cell_idx, cell_start, norm_factor, timestep, dv):
180340
n_cell = cell_start.shape[0] - 1
181341
for i in range(n_cell):
182342
sd_num = cell_start[i + 1] - cell_start[i]
@@ -189,10 +349,11 @@ def normalize_body(prob, cell_id, cell_idx, cell_start, norm_factor, timestep, d
189349

190350
# pylint: disable=too-many-arguments
191351
def normalize(self, prob, cell_id, cell_idx, cell_start, norm_factor, timestep, dv):
192-
return self.normalize_body(
352+
return self.__normalize_body(
193353
prob.data, cell_id.data, cell_idx.data, cell_start.data,
194354
norm_factor.data, timestep, dv)
195355

356+
196357
@staticmethod
197358
@numba.njit(**{**conf.JIT_FLAGS, **{'parallel': False}})
198359
def remove_zero_n_or_flagged(multiplicity, idx, length) -> int:

PySDM/backends/impl_numba/methods/pair_methods.py

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -93,3 +93,16 @@ def sum_pair_body(data_out, data_in, is_first_in_pair, idx, length):
9393
def sum_pair(data_out, data_in, is_first_in_pair, idx):
9494
return PairMethods.sum_pair_body(
9595
data_out.data, data_in.data, is_first_in_pair.indicator.data, idx.data, len(idx))
96+
97+
@staticmethod
98+
@numba.njit(**conf.JIT_FLAGS)
99+
def multiply_pair_body(data_out, data_in, is_first_in_pair, idx, length):
100+
data_out[:] = 0
101+
for i in numba.prange(length - 1): # pylint: disable=not-an-iterable
102+
if is_first_in_pair[i]:
103+
data_out[i//2] = (data_in[idx[i]] * data_in[idx[i + 1]])
104+
105+
@staticmethod
106+
def multiply_pair(data_out, data_in, is_first_in_pair, idx):
107+
return PairMethods.multiply_pair_body(
108+
data_out.data, data_in.data, is_first_in_pair.indicator.data, idx.data, len(idx))

0 commit comments

Comments
 (0)