Skip to content

Commit 5d32fda

Browse files
committed
[ENH][TEST] Add custom grid setups and profiling for performance
Introduce custom grid configurations to improve control over model evaluations. Add profiling in test_benchmark to measure and analyze computation speeds and iterations. Update tests to track interpolation counts and backend changes.
1 parent 270f3d0 commit 5d32fda

File tree

4 files changed

+51
-3
lines changed

4 files changed

+51
-3
lines changed

gempy_probability/modules/model_definition/model_examples.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,6 +43,8 @@ def model(geo_model: gempy.core.data.GeoModel, normal, y_obs_list):
4343
# endregion
4444

4545
# region Forward model computation
46+
47+
geo_model.counter +=1
4648

4749
# * Compute the geological model
4850
geo_model.solutions = gempy_engine.compute_model(
@@ -51,7 +53,9 @@ def model(geo_model: gempy.core.data.GeoModel, normal, y_obs_list):
5153
data_descriptor=geo_model.input_data_descriptor,
5254
geophysics_input=geo_model.geophysics_input,
5355
)
54-
56+
# if i does not exist init
57+
58+
5559
# Compute and observe the thickness of the geological layer
5660
model_solutions: gp.data.Solutions = geo_model.solutions
5761
thickness = apparent_thickness_likelihood(model_solutions)

tests/test_benchmark/notes.md

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
- Running 250 iterations took 28 sec. This is 8.81 it/s
2+
- The number of gempy evaluations are:
3+
- 53 evals/s or 18 ms per eval
4+
- 6 evals per iteration
5+
- More than 53 gempy evaluations per second seem hard to improve
6+
- Wait till I have a likelihood with a bigger model and a likelihood function that needs
7+
big grids (i.e.) gravity

tests/test_benchmark/test_speed_I.py

Lines changed: 33 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,49 @@
1+
import pytest
2+
13
import gempy as gp
24
import gempy_viewer as gpv
5+
import numpy as np
6+
37
from gempy_probability.plugins.examples_generator import generate_example_model, ExampleProbModel
48

59

610
def test_speed_I():
11+
712
two_wells: gp.data.GeoModel = generate_example_model(
813
example_model=ExampleProbModel.TWO_WELLS,
914
compute_model=False
1015
)
1116

1217
assert two_wells.interpolation_options.number_octree_levels == 4, "Number of octrees should be 4"
1318

14-
gp.compute_model(two_wells)
19+
# region Minimal grid for the specific likelihood function
20+
x_loc = 6000
21+
y_loc = 0
22+
z_loc = np.linspace(0, 4000, 100)
23+
xyz_coord = np.array([[x_loc, y_loc, z] for z in z_loc])
24+
gp.set_custom_grid(two_wells.grid, xyz_coord=xyz_coord)
25+
# endregion
26+
27+
two_wells.grid.active_grids = gp.data.Grid.GridTypes.CUSTOM
28+
29+
profiler = cProfile.Profile()
30+
profiler.enable()
31+
iterations = 100
32+
for _ in range(iterations):
33+
gp.compute_model(
34+
gempy_model=two_wells,
35+
engine_config=gp.data.GemPyEngineConfig(
36+
backend=gp.data.AvailableBackends.numpy
37+
)
38+
)
39+
profiler.disable()
40+
stats = pstats.Stats(profiler).sort_stats('cumtime')
41+
stats.print_stats(10)
1542

1643
if PLOT := False:
1744
gpv.plot_2d(two_wells, show_scalar=False)
45+
46+
47+
@pytest.mark.skip(reason="Not implemented yet")
48+
def test_speed_on_gravity_likelihood():
49+
pass

tests/test_prob_model/test_prob_I.py

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,8 +38,9 @@ def test_basic_gempy_I() -> None:
3838

3939
geo_model.grid.active_grids = gp.data.Grid.GridTypes.CUSTOM
4040
assert geo_model.grid.values.shape[0] == 100, "Custom grid should have 100 cells"
41+
geo_model.counter = 0
4142
gp.compute_model(gempy_model=geo_model)
42-
43+
BackendTensor.change_backend_gempy(engine_backend=gp.data.AvailableBackends.PYTORCH)
4344

4445
normal = dist.Normal(
4546
loc=(geo_model.surface_points_copy_transformed.xyz[0, 2]),
@@ -73,6 +74,7 @@ def _prob_run(geo_model: gp.data.GeoModel, prob_model: callable,
7374
num_samples=50
7475
)
7576
prior = predictive(geo_model, normal, y_obs_list)
77+
print("Number of interpolations: ", geo_model.counter)
7678

7779
data = az.from_pyro(prior=prior)
7880
az.plot_trace(data.prior)
@@ -103,9 +105,12 @@ def _prob_run(geo_model: gp.data.GeoModel, prob_model: callable,
103105
posterior_samples=posterior_samples
104106
)
105107
posterior_predictive = posterior_predictive_fn(geo_model, normal, y_obs_list)
108+
106109
data = az.from_pyro(posterior=mcmc, prior=prior, posterior_predictive=posterior_predictive)
107110
# endregion
108111

112+
print("Number of interpolations: ", geo_model.counter)
113+
109114
if True: # * Save the arviz data
110115
data.to_netcdf("arviz_data.nc")
111116

0 commit comments

Comments
 (0)