Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions Readme
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ Installation
Pippi requires almost no installation.

1. Make sure you have the following already installed:
* Either Python v2.7 or later or Python v3
* NumPy and SciPy libraries for python (NumPy v0.9.0 or
* Either Python v2.7 or later or Python v3.8 or later
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does the use of packaging for versioning break compatibility with Python 2.7? If so, we should just remove 2.7 here, there's no real need to support it any longer.

* NumPy, SciPy, and Packaging libraries for python (NumPy v0.9.0 or
greater is required for full functionality)
* ctioga2 v0.8 or later
* bash
Expand Down
28 changes: 18 additions & 10 deletions pippi
Original file line number Diff line number Diff line change
Expand Up @@ -43,11 +43,15 @@ def main(arguments):
try:
command(arguments[2:])
except BaseException as err:
print()
print('Running pippi failed in '+command.__name__+' operation, due to error:')
print(err)
print()
sys.exit()
clean_exit = False
if isinstance(err, SystemExit):
clean_exit = True if err.code == 0 else clean_exit
Comment on lines +46 to +48
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I find this construction kind of convoluted and therefore a bit difficult to follow. How about something like just

clean_exit = isinstance(err, SystemExit) and err.code == 0

?

if not clean_exit:
print()
print('Running pippi failed in '+command.__name__+' operation, due to error:')
print(err)
print()
sys.exit()
if not command in [merge, pare]:
print()
print('Completed sucessfully.')
Expand All @@ -61,11 +65,15 @@ def main(arguments):
try:
command(arguments[1:])
except BaseException as err:
print()
print('Running pippi failed in '+command.__name__+' operation.')
print(err)
print()
sys.exit()
clean_exit = False
if isinstance(err, SystemExit):
clean_exit = True if err.code == 0 else clean_exit
Comment on lines +68 to +70
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same comment

if not clean_exit:
print()
print('Running pippi failed in '+command.__name__+' operation.')
print(err)
print()
sys.exit()
print()
print('Completed sucessfully.')
print()
Expand Down
6 changes: 3 additions & 3 deletions pippi_parse.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,8 @@
from scipy.interpolate import InterpolatedUnivariateSpline as oneDspline
from scipy.interpolate import RectBivariateSpline as twoDbilinear

from distutils.version import StrictVersion
if StrictVersion(scipyCurrent.version) >= StrictVersion("0.9.0"):
from packaging.version import Version
if Version(scipyCurrent.version) >= Version("0.9.0"):
from scipy.interpolate import CloughTocher2DInterpolator as twoDspline

# Define parse-specific pip file entries
Expand Down Expand Up @@ -69,7 +69,7 @@ def parse(filename):
if twoDplots.value is not None:
if intMethod.value is None: intMethod.value = allowedIntMethods[0]
if intMethod.value not in allowedIntMethods: sys.exit('Error: unrecognised interpolation_method.')
if intMethod.value == 'spline' and StrictVersion(scipyCurrent.version) < StrictVersion("0.9.0"):
if intMethod.value == 'spline' and Version(scipyCurrent.version) < Version("0.9.0"):
sys.exit('Sorry, Clough-Tocher 2D interpolation is not supported in SciPy \n'+
'v0.8 or lower; please upgrade your installation to use this option.')

Expand Down
8 changes: 4 additions & 4 deletions pippi_read.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ def getChainData(filename, cut_all_invalid=None, requested_cols=None, assignment
for i, column_name in enumerate(column_names):
if column_name != '': print(" ", i, ":", column_name)
print()
quit()
sys.exit(0)

# Identify any likelihood or multiplicity indicated by the labels.
if labels:
Expand Down Expand Up @@ -313,7 +313,7 @@ def getChainData(filename, cut_all_invalid=None, requested_cols=None, assignment
data_isvalid.append(np.array(entries[column_names[index]+"_isvalid"], dtype=np.float64))
lookup_key[index] = index_count
index_count += 1
non_functional_cols = [i for i, elem in enumerate(data) if data[i] != 'functional']
non_functional_cols = [i for i, elem in enumerate(data) if np.all(data[i] != 'functional')] # Numpy now requires the user cast to a scalar when performing boolean checks on arrays
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This seems confusing, or at least not the optimal way to check. What's being checked for here is just whether data[i] is a string with value 'functional', or a numpy array. Better would probably be to just check if data[i] is a numpy array. Same for the following changes using np.all.

if not non_functional_cols:
print("ERROR: At least one non-function assignment is needed in")
print("assign_to_pippi_datastream, or a multiplicity or likelihood")
Expand All @@ -325,13 +325,13 @@ def getChainData(filename, cut_all_invalid=None, requested_cols=None, assignment
# Fill in the functional columns with zeros. Note that this uses more memory than doing it after validity
# cuts, but should actually be faster (I think; haven't actually tested that). It makes the code simpler too.
for i, elem in enumerate(data):
if elem == 'functional':
if np.any(elem == 'functional'): # Numpy cast to a scalar boolean check
data[i] = np.zeros(total_samples, dtype=np.float64)
else:
# Do some pruning to deal with cases where the some datasets have extra entries (although this arguably indicates a bug in the sampler)
data[i] = elem[:total_samples]
for i, elem in enumerate(data_isvalid):
if elem == 'functional':
if np.any(elem == 'functional'): # Numpy cast to a scalar boolean check
data_isvalid[i] = np.ones(total_samples, dtype=np.float64)
else:
# Do some pruning to deal with cases where the some datasets have extra entries (although this arguably indicates a bug in the sampler)
Expand Down