diff --git a/example/example.pip b/example/example.pip index 9681c5d..fbef51f 100644 --- a/example/example.pip +++ b/example/example.pip @@ -40,8 +40,8 @@ assign_to_pippi_datastream = 'np.log10($2)-np.log10($3)':64 \ ;Assign a python function or named hdf5 data stream to a particular pippi datastream index (use pippi probe to test; EFN) quantity_labels = 0:'multiplicity' \ 1:'-lnlike' \ - 2:'$\log_{10}\left(m_0/\mathrm{TeV}\right)$' \ - 3:'$\log_{10}\left(m_\frac12/\mathrm{TeV}\right)$' \ + 2:'$m_0$ (TeV)' \ + 3:'$m_\frac12$ (TeV)' \ 4:'$A_0$ (TeV)' \ 5:'$\tan\beta$' \ 64:'$\log_{10}\left(m_0/m_\frac12\right)$' \ @@ -67,7 +67,8 @@ plot_colourbar_2D = {3,2} {5,4} ;2D plots that should include colour plot_comparison = T ;Overplot distributions from comparisonFilename in 1D plots and contours in 2D plots extra_legend_lines = 'Flat priors' 'CMSSM $\mu>0$';Additional text lines to be included in any legends (EFN) -blame = 'pippi v2.2' ;Credit line to be placed in top corner of all plots +blame_text = 'pippi v2.2' ;Credit line to be placed in top corner of all plots +blame_scale = 0.5 ;Scale of credit line, default is 0.5 yaxis_number_angle = -90 ;Angle relative to vertical for y-axis numerical labels plot_posterior_mean_on_posterior_pdf = T ;Indicate the posterior mean in all posterior pdf plots @@ -77,6 +78,8 @@ plot_best_fit_on_profile_like = T ;Indicate the best fit in all profil axis_ranges = 2:{2.0,3.5} 3:{2.05,3.6} 4:{-4,4} ;Axis ranges over which to plot parameters/observables (defaults to data_ranges if absent) +custom_ticks = ;Parameters/observables that use custom ticks + reference_point = 2:3.3 3:2.5 ;Coordinates of reference point (only plotted where one of the listed axes is present) reference_text = 'True value' ;Key string to be printed for reference point diff --git a/example/example_diver.pip b/example/example_diver.pip index aef2493..7a466a8 100644 --- a/example/example_diver.pip +++ b/example/example_diver.pip @@ -59,7 +59,8 @@ plot_colourbar_2D = ;2D plots that should include colour plot_comparison = F ;Overplot distributions from comparisonFilename in 1D plots and contours in 2D plots extra_legend_lines = ;Additional text lines to be included in any legends (EFN) -blame = 'pippi v2.2' ;Credit line to be placed in top corner of all plots +blame_text = 'pippi v2.2' ;Credit line to be placed in top corner of all plots +blame_scale = 0.5 ;Scale of credit line, default is 0.5 yaxis_number_angle = -90 ;Angle relative to vertical for y-axis numerical labels plot_posterior_mean_on_posterior_pdf = F ;Indicate the posterior mean in all posterior pdf plots @@ -69,6 +70,8 @@ plot_best_fit_on_profile_like = T ;Indicate the best fit in all profil axis_ranges = ;Axis ranges over which to plot parameters/observables (defaults to data_ranges if absent) +custom_ticks = ;Parameters/observables that use custom ticks + reference_point = ;Coordinates of reference point (only plotted where one of the listed axes is present) reference_text = ;Key string to be printed for reference point diff --git a/example/pippi.pdf b/example/pippi.pdf deleted file mode 120000 index 5565466..0000000 --- a/example/pippi.pdf +++ /dev/null @@ -1 +0,0 @@ -../pippi.pdf \ No newline at end of file diff --git a/pippi b/pippi index 5152af7..f7c638a 100755 --- a/pippi +++ b/pippi @@ -15,6 +15,7 @@ # Originally developed: March 2012 ############################################################# +from __future__ import print_function from pippi_probe import * from pippi_merge import * from pippi_pare import * @@ -37,41 +38,41 @@ def main(arguments): #Check if pippi has been invoked with one of the five known specific commands command = commandLineOptions[arguments[1]] if not command in [merge, pare]: - print - print 'Beginning pippi '+arguments[1]+' operation...' + print() + print('Beginning pippi '+arguments[1]+' operation...') try: command(arguments[2:]) except BaseException as err: - print - print 'Running pippi failed in '+command.__name__+' operation, due to error:' - print err - print + print() + print('Running pippi failed in '+command.__name__+' operation, due to error:') + print(err) + print() sys.exit() if not command in [merge, pare]: - print - print 'Completed sucessfully.' - print + print() + print('Completed sucessfully.') + print() except KeyError: #Otherise check if it has been invoked with just a filename if os.path.isfile(arguments[1]): - print - print 'Beginning pippi parse-to-plot operation...' + print() + print('Beginning pippi parse-to-plot operation...') for command in [parse, script, plot]: try: command(arguments[1:]) except BaseException as err: - print - print 'Running pippi failed in '+command.__name__+' operation.' - print err - print + print() + print('Running pippi failed in '+command.__name__+' operation.') + print(err) + print() sys.exit() - print - print 'Completed sucessfully.' - print + print() + print('Completed sucessfully.') + print() else: #Otherwise crack it and tell the user to get their shit in order - print - print 'Can\'t find file '+arguments[1] + print() + print('Can\'t find file '+arguments[1]) usage() sys.exit() diff --git a/pippi.pdf b/pippi.pdf deleted file mode 100755 index bc351ce..0000000 Binary files a/pippi.pdf and /dev/null differ diff --git a/pippi_colours.py b/pippi_colours.py index 1621879..9381005 100644 --- a/pippi_colours.py +++ b/pippi_colours.py @@ -9,6 +9,7 @@ # Originally developed: March 2012 ############################################################# +from __future__ import print_function import re import copy @@ -16,6 +17,7 @@ def Blockshading(colour,line_code, fill_code): scheme = colourScheme('Blockshading_'+colour) + scheme.backgroundColour = '#fff' scheme.baseProfColourMap = '#fff--#fff(contour1)--#'+fill_code+'(contour1)--#'+fill_code scheme.basePostColourMap = '#fff--#fff(contour1)--#'+fill_code+'(contour1)--#'+fill_code scheme.mainPostContourColour2D = '\'#'+line_code+'\'' @@ -43,6 +45,7 @@ class colourScheme: comparisonProfColour1D = 'Grey' comparisonPostContourColour2D = 'Grey' comparisonProfContourColour2D = 'Grey' + backgroundColour = '#fff' baseProfColourMap = '#fff--#fff(contour2)--#f45(contour1)--#612' basePostColourMap = '#fff--#fff(contour2)--#88f(contour1)--#229' @@ -122,6 +125,7 @@ def colourMap(self,contours,kind): # iceCube colour scheme iceCube = colourScheme('iceCube') +iceCube.backgroundColour = '#fff' iceCube.baseProfColourMap = '#fff--#fff(contour2)--#292(contour1)--#f55(contour1)--#000' iceCube.basePostColourMap = '#fff--#fff(contour2)--#29d(contour1)--#f55(contour1)--#000' iceCube.baseObsColourMap = 'hls:White(contour1)--Red(contour2)--Green(contour3)' @@ -132,6 +136,7 @@ def colourMap(self,contours,kind): # iceCube79 colour scheme iceCube79 = colourScheme('iceCube79') +iceCube79.backgroundColour = '#fff' iceCube79.baseProfColourMap = '#fff--#fff(contour2)--#fab(contour1)--#f45' iceCube79.basePostColourMap = '#fff--#fff(contour2)--#ddf(contour1)--#88f' iceCube79.baseObsColourMap = 'hls:White(contour1)--Red(contour2)--Green(contour3)' @@ -145,6 +150,7 @@ def colourMap(self,contours,kind): # iceCube3sig colour scheme iceCube3sig = colourScheme('iceCube3sig') +iceCube3sig.backgroundColour = '#fff' iceCube3sig.baseProfColourMap = '#fff--#fff(contour3)--#292(contour2)--#fff(contour2)--#929(contour1)--#f55(contour1)--#000' iceCube3sig.basePostColourMap = '#fff--#fff(contour3)--#29d(contour2)--#fff(contour2)--#929(contour1)--#f55(contour1)--#000' iceCube3sig.baseObsColourMap = 'hls:White(contour1)--Red(contour2)--Green(contour3)' @@ -155,6 +161,7 @@ def colourMap(self,contours,kind): # SBClassic colour scheme SBClassic = colourScheme('SBClassic') +SBClassic.backgroundColour = '#fff' SBClassic.baseProfColourMap = '#fff--#fff(contour2)--#2f2(contour1)--#f33(0.5)--#000' SBClassic.basePostColourMap = '#fff--#fff(contour2)--#95d(contour1)--#f33(0.5)--#000' SBClassic.baseObsColourMap = 'hls:White(contour1)--Red(contour2)--Green(contour3)' @@ -165,6 +172,7 @@ def colourMap(self,contours,kind): # BlueGold colour scheme BlueGold = colourScheme('BlueGold') +BlueGold.backgroundColour = '#fff' BlueGold.baseProfColourMap = '#fff--#fff(contour2)--#f44(contour2)--#f44(contour1)--#ece(contour1)--#ece' BlueGold.basePostColourMap = '#fff--#fff(contour2)--#44f(contour2)--#44f(contour1)--#fc0(contour1)--#fc0' BlueGold.baseObsColourMap = 'hls:White(contour1)--Red(contour2)--Green(contour3)' @@ -178,6 +186,7 @@ def colourMap(self,contours,kind): # nightOfTheAllanachs colour scheme nightOfTheAllanachs = colourScheme('nightOfTheAllanachs') +nightOfTheAllanachs.backgroundColour = '#000' nightOfTheAllanachs.basePostColourMap = '#000--#000(contour2)--#808(contour1)--#f33(0.5)--#ff0' nightOfTheAllanachs.baseProfColourMap = '#000--#000(contour2)--#33f(contour1)--#0ff(0.5)--#ff0' nightOfTheAllanachs.baseObsColourMap = 'Black(contour1)--Red(contour2)--Green(contour3)' @@ -195,6 +204,7 @@ def colourMap(self,contours,kind): # nightOfTheAllanachs2 colour scheme nightOfTheAllanachs2 = colourScheme('nightOfTheAllanachs2') +nightOfTheAllanachs2.backgroundColour = '#000' nightOfTheAllanachs2.basePostColourMap = '#000--#000(contour2)--#808(contour1)--#f33(0.5)--#ff0' nightOfTheAllanachs2.baseProfColourMap = '#000--#000(contour2)--#33f(contour1)--#0ff(0.5)--#ff0' nightOfTheAllanachs2.baseObsColourMap = 'Black(contour1)--Red(contour2)--#00FFFF(contour3)' @@ -214,6 +224,7 @@ def colourMap(self,contours,kind): # nightOfTheAllanachs3 colour scheme nightOfTheAllanachs3 = colourScheme('nightOfTheAllanachs3') +nightOfTheAllanachs3.backgroundcolour = '#000' nightOfTheAllanachs3.basePostColourMap = '#000--#000(contour2)--#808(contour1)--#f33(0.5)--#ff0' nightOfTheAllanachs3.baseProfColourMap = '#000--#000(contour2)--#33f(contour1)--#0ff(0.5)--#ff0' nightOfTheAllanachs3.baseObsColourMap = 'Black(contour1)--Blue(contour2)--Orange(contour3)' @@ -240,3 +251,211 @@ def colourMap(self,contours,kind): Blockshading_orange = Blockshading("orange", "840", "f90") Blockshading_yellow = Blockshading("yellow", "870", "fe0") Blockshading_cyan = Blockshading("cyan", "088", "3ee") + +# Extended Basic scheme - red +ExtendedBasic_red = colourScheme('extendedBasic_red') +ExtendedBasic_red.basePostColourMap = '#fff--#fab(contour2)--#f45(contour1)--#612' +ExtendedBasic_red.baseProfColourMap = '#fff--#fab(contour2)--#f45(contour1)--#612' +ExtendedBasic_red.baseObsColourMap = '#fff(contour1)--#00f(contour2)--#0f0(contour3)' +ExtendedBasic_red.mainBestFitColour1D = '#300' +ExtendedBasic_red.mainBestFitColour2D = '#300' +ExtendedBasic_red.mainPostContourColour2D = 'Black' +ExtendedBasic_red.mainProfContourColour2D = 'Black' +ExtendedBasic_red.axisColour2D = 'Black' +ExtendedBasic_red.mainBestFitColour1D = 'Red' +ExtendedBasic_red.mainPostMeanColour1D = 'Red' +ExtendedBasic_red.mainBestFitColour2D = 'White' +ExtendedBasic_red.mainBestFitColourOutline2D = 'Black' +ExtendedBasic_red.mainPostMeanColour2D = 'White' +ExtendedBasic_red.mainPostMeanColourOutline2D = 'Black' +ExtendedBasic_red.legendTextColour2D = 'Black' +ExtendedBasic_red.keyTextColour2D = 'Black' +ExtendedBasic_red.comparisonContourStyle = 'Dashes' +ExtendedBasic_red.comparison1DLineStyle = 'Dashes' + +# Extended Basic scheme - green +ExtendedBasic_green = colourScheme('extendedBasic_green') +ExtendedBasic_green.basePostColourMap = '#fff--#ad9(contour2)--#3a5(contour1)--#162' +ExtendedBasic_green.baseProfColourMap = '#fff--#ad9(contour2)--#3a5(contour1)--#162' +ExtendedBasic_green.baseObsColourMap = '#fff(contour1)--#00f(contour2)--#f00(contour3)' +ExtendedBasic_green.mainBestFitColour1D = '#030' +ExtendedBasic_green.mainBestFitColour2D = '#030' +ExtendedBasic_green.mainPostContourColour2D = 'Black' +ExtendedBasic_green.mainProfContourColour2D = 'Black' +ExtendedBasic_green.axisColour2D = 'Black' +ExtendedBasic_green.mainBestFitColour1D = 'Green' +ExtendedBasic_green.mainPostMeanColour1D = 'Green' +ExtendedBasic_green.mainBestFitColour2D = 'White' +ExtendedBasic_green.mainBestFitColourOutline2D = 'Black' +ExtendedBasic_green.mainPostMeanColour2D = 'White' +ExtendedBasic_green.mainPostMeanColourOutline2D = 'Black' +ExtendedBasic_green.legendTextColour2D = 'Black' +ExtendedBasic_green.keyTextColour2D = 'Black' +ExtendedBasic_green.comparisonContourStyle = 'Dashes' +ExtendedBasic_green.comparison1DLineStyle = 'Dashes' + +# Extended Basic scheme - blue +ExtendedBasic_blue = colourScheme('extendedBasic_blue') +ExtendedBasic_blue.basePostColourMap = '#fff--#9ce(contour2)--#38b(contour1)--#126' +ExtendedBasic_blue.baseProfColourMap = '#fff--#9ce(contour2)--#38b(contour1)--#126' +ExtendedBasic_blue.baseObsColourMap = '#fff(contour1)--#f00(contour2)--#0f0(contour3)' +ExtendedBasic_blue.mainBestFitColour1D = '#003' +ExtendedBasic_blue.mainBestFitColour2D = '#003' +ExtendedBasic_blue.mainPostContourColour2D = 'Black' +ExtendedBasic_blue.mainProfContourColour2D = 'Black' +ExtendedBasic_blue.axisColour2D = 'Black' +ExtendedBasic_blue.mainBestFitColour1D = 'Blue' +ExtendedBasic_blue.mainPostMeanColour1D = 'Blue' +ExtendedBasic_blue.mainBestFitColour2D = 'White' +ExtendedBasic_blue.mainBestFitColourOutline2D = 'Black' +ExtendedBasic_blue.mainPostMeanColour2D = 'White' +ExtendedBasic_blue.mainPostMeanColourOutline2D = 'Black' +ExtendedBasic_blue.legendTextColour2D = 'Black' +ExtendedBasic_blue.keyTextColour2D = 'Black' +ExtendedBasic_blue.comparisonContourStyle = 'Dashes' +ExtendedBasic_blue.comparison1DLineStyle = 'Dashes' + +# Extended Basic scheme - grey +ExtendedBasic_grey = colourScheme('extendedBasic_grey') +ExtendedBasic_grey.basePostColourMap = '#fff--#aaa(contour2)--#555(contour1)--#222' +ExtendedBasic_grey.baseProfColourMap = '#fff--#aaa(contour2)--#555(contour1)--#222' +ExtendedBasic_grey.baseObsColourMap = '#fff(contour1)--#999(contour2)--#333(contour3)' +ExtendedBasic_grey.mainBestFitColour1D = '#000' +ExtendedBasic_grey.mainBestFitColour2D = '#000' +ExtendedBasic_grey.mainPostContourColour2D = 'Black' +ExtendedBasic_grey.mainProfContourColour2D = 'Black' +ExtendedBasic_grey.axisColour2D = 'Black' +ExtendedBasic_grey.mainBestFitColour1D = 'Black' +ExtendedBasic_grey.mainPostMeanColour1D = 'Black' +ExtendedBasic_grey.mainBestFitColour2D = 'White' +ExtendedBasic_grey.mainBestFitColourOutline2D = 'Black' +ExtendedBasic_grey.mainPostMeanColour2D = 'White' +ExtendedBasic_grey.mainPostMeanColourOutline2D = 'Black' +ExtendedBasic_grey.legendTextColour2D = 'Black' +ExtendedBasic_grey.keyTextColour2D = 'Black' +ExtendedBasic_grey.comparisonContourStyle = 'Dashes' +ExtendedBasic_grey.comparison1DLineStyle = 'Dashes' + +# Sharp Basic scheme - red +SharpBasic_red = colourScheme('sharpBasic_red') +SharpBasic_red.basePostColourMap = '#fff--#fab(contour2)--#f89(contour2)--#d45(contour1)--#b34(contour1)--#612' +SharpBasic_red.baseProfColourMap = '#fff--#fab(contour2)--#f89(contour2)--#d45(contour1)--#b34(contour1)--#612' +SharpBasic_red.baseObsColourMap = '#fff(contour1)--#7a9(contour2)--#365(contour3)' +SharpBasic_red.mainBestFitColour1D = '#300' +SharpBasic_red.mainBestFitColour2D = '#300' +SharpBasic_red.mainPostContourColour2D = 'Black' +SharpBasic_red.mainProfContourColour2D = 'Black' +SharpBasic_red.axisColour2D = 'Black' +SharpBasic_red.mainBestFitColour1D = 'Red' +SharpBasic_red.mainPostMeanColour1D = 'Red' +SharpBasic_red.mainBestFitColour2D = 'White' +SharpBasic_red.mainBestFitColourOutline2D = 'Black' +SharpBasic_red.mainPostMeanColour2D = 'White' +SharpBasic_red.mainPostMeanColourOutline2D = 'Black' +SharpBasic_red.legendTextColour2D = 'Black' +SharpBasic_red.keyTextColour2D = 'Black' +SharpBasic_red.comparisonContourStyle = 'Dashes' +SharpBasic_red.comparison1DLineStyle = 'Dashes' +SharpBasic_red.comparisonBestFitColour = 'Green' +SharpBasic_red.comparisonPostMeanColour = 'Green' +SharpBasic_red.comparisonPostContourColour2D = 'Green' +SharpBasic_red.comparisonProfContourColour2D = 'Green' + + +# Sharp Basic scheme - green +SharpBasic_green = colourScheme('sharpBasic_green') +SharpBasic_green.basePostColourMap = '#fff--#ad9(contour2)--#7a9(contour2)--#385(contour1)--#365(contour1)--#142' +SharpBasic_green.baseProfColourMap = '#fff--#ad9(contour2)--#7a9(contour2)--#385(contour1)--#365(contour1)--#142' +SharpBasic_green.baseObsColourMap = '#fff(contour1)--#f89(contour2)--#b34(contour3)' +SharpBasic_green.mainBestFitColour1D = '#030' +SharpBasic_green.mainBestFitColour2D = '#030' +SharpBasic_green.mainPostContourColour2D = 'Black' +SharpBasic_green.mainProfContourColour2D = 'Black' +SharpBasic_green.axisColour2D = 'Black' +SharpBasic_green.mainBestFitColour1D = 'Green' +SharpBasic_green.mainPostMeanColour1D = 'Green' +SharpBasic_green.mainBestFitColour2D = 'White' +SharpBasic_green.mainBestFitColourOutline2D = 'Black' +SharpBasic_green.mainPostMeanColour2D = 'White' +SharpBasic_green.mainPostMeanColourOutline2D = 'Black' +SharpBasic_green.legendTextColour2D = 'Black' +SharpBasic_green.keyTextColour2D = 'Black' +SharpBasic_green.comparisonContourStyle = 'Dashes' +SharpBasic_green.comparison1DLineStyle = 'Dashes' +SharpBasic_green.comparisonBestFitColour = 'Red' +SharpBasic_green.comparisonPostMeanColour = 'Red' +SharpBasic_green.comparisonPostContourColour2D = 'Red' +SharpBasic_green.comparisonProfContourColour2D = 'Red' + +# Sharp Basic scheme - blue +SharpBasic_blue = colourScheme('sharpBasic_blue') +SharpBasic_blue.basePostColourMap = '#fff--#9ce(contour2)--#6be(contour2)--#38b(contour1)--#169(contour1)--#126' +SharpBasic_blue.baseProfColourMap = '#fff--#9ce(contour2)--#6be(contour2)--#38b(contour1)--#169(contour1)--#126' +SharpBasic_blue.baseObsColourMap = '#fff(contour1)--#f92(contour2)--#930(contour3)' +SharpBasic_blue.mainBestFitColour1D = '#003' +SharpBasic_blue.mainBestFitColour2D = '#003' +SharpBasic_blue.mainBestFitColour2D = 'White' +SharpBasic_blue.mainPostContourColour2D = 'Black' +SharpBasic_blue.mainProfContourColour2D = 'Black' +SharpBasic_blue.axisColour2D = 'Black' +SharpBasic_blue.mainBestFitColour1D = 'Blue' +SharpBasic_blue.mainPostMeanColour1D = 'Blue' +SharpBasic_blue.mainBestFitColour2D = 'White' +SharpBasic_blue.mainBestFitColourOutline2D = 'Black' +SharpBasic_blue.mainPostMeanColour2D = 'White' +SharpBasic_blue.mainPostMeanColourOutline2D = 'Black' +SharpBasic_blue.legendTextColour2D = 'Black' +SharpBasic_blue.keyTextColour2D = 'Black' +SharpBasic_blue.comparisonContourStyle = 'Solid' +SharpBasic_blue.comparison1DLineStyle = 'Solid' +#SharpBasic_blue.comparisonBestFitColour2D = 'Chocolate' +#SharpBasic_blue.comparisonBestFitcolourOutline2D = 'DarkChocolate' +#SharpBasic_blue.comparisonPostMeanColour2D = 'Chocolate' +#SharpBasic_blue.comparisonPostMeanColourOutline2D = 'DarkChocolate' +#SharpBasic_blue.comparisonPostContourColour2D = 'DarkChocolate' +#SharpBasic_blue.comparisonProfContourColour2D = 'DarkChocolate' + +# Sharp Basic scheme - grey +SharpBasic_grey = colourScheme('sharpBasic_grey') +SharpBasic_grey.basePostColourMap = '#fff--#ccc(contour2)--#aaa(contour2)--#777(contour1)--#555(contour1)--#222' +SharpBasic_grey.baseProfColourMap = '#fff--#ccc(contour2)--#aaa(contour2)--#777(contour1)--#555(contour1)--#222' +SharpBasic_grey.baseObsColourMap = '#fff--#fff(contour1)--#ccc(contour2)--#aaa(contour2)--#777(contour3)--#555(contour3)--#222' +SharpBasic_grey.mainBestFitColour1D = '#000' +SharpBasic_grey.mainBestFitColour2D = '#000' +SharpBasic_grey.mainPostContourColour2D = 'Black' +SharpBasic_grey.mainProfContourColour2D = 'Black' +SharpBasic_grey.axisColour2D = 'Black' +SharpBasic_grey.mainBestFitColour1D = 'Black' +SharpBasic_grey.mainPostMeanColour1D = 'Black' +SharpBasic_grey.mainBestFitColour2D = 'White' +SharpBasic_grey.mainBestFitColourOutline2D = 'Black' +SharpBasic_grey.mainPostMeanColour2D = 'White' +SharpBasic_grey.mainPostMeanColourOutline2D = 'Black' +SharpBasic_grey.legendTextColour2D = 'Black' +SharpBasic_grey.keyTextColour2D = 'Black' +SharpBasic_grey.comparisonContourStyle = 'Dashes' +SharpBasic_grey.comparison1DLineStyle = 'Dashes' + +# Orange-Red scheme +OrangeRed = colourScheme('orangeRed') +OrangeRed.baseProfColourMap = '#fff--#fff(contour3)--#dcd0b7(contour2)--#fdcc8a(contour2)--#da6b37(contour1)--#f9523f(contour1)--#612' +OrangeRed.mainBestFitColour1D = '#300' +OrangeRed.mainBestFitColour2D = '#300' + +# Blue-Purple scheme +BluePurple = colourScheme('bluePurple') +BluePurple.baseProfColourMap = '#fff--#cbd6d9(contour2)--#b3cde3(contour2)--#6a74a4(contour1)--#aa63bf(contour1)--#424' +BluePurple.mainBestFitColour1D = '#303' +BluePurple.mainBestFitColour2D = '#303' + +# Green-Blue scheme +GreenBlue = colourScheme('greenBlue') +GreenBlue.baseProfColourMap = '#fff-#d0d7c6(contour2)--#bae4bc(contour2)--#59aaa2(contour1)--#4daedf(contour1)--#126' +GreenBlue.mainBestFitColour1D = '#003' +GreenBlue.mainBestFitColour2D = '#003' + +# Yellow-Green scheme +YellowGreen = colourScheme('yellowGreen') +YellowGreen.baseProfColourMap = '#fff--#ddddaa(contour2)--#c2e699(contour2)--#56a457(contour1)--#45a665(contour1)--#142' +YellowGreen.mainBestFitColour1D = '#030' +YellowGreen.mainBestFitColour2D = '#030' diff --git a/pippi_merge.py b/pippi_merge.py index a4054ce..60eab74 100644 --- a/pippi_merge.py +++ b/pippi_merge.py @@ -30,11 +30,11 @@ def merge(filenames): import h5py f = h5py.File(filenames[0],'r') h5merge = True - print - print "Files identified as hdf5. Interpreting final argument as output filename." - print - print "Concatenating common datasets and outputting to {0}...".format(filenames[-1]) - print + print() + print("Files identified as hdf5. Interpreting final argument as output filename.") + print() + print("Concatenating common datasets and outputting to {0}...".format(filenames[-1])) + print() except: h5merge = False @@ -49,19 +49,19 @@ def merge(filenames): try: fout = h5py.File(filenames[-1],'w-') except: - print "Could not create output file {0}!".format(filenames[-1]) - print "Please make sure it does not exist already." - print + print("Could not create output file {0}!".format(filenames[-1])) + print("Please make sure it does not exist already.") + print() return - print " Determining common datasets..." + print(" Determining common datasets...") for fname in filenames[0:-1]: - print " Opening: {0}".format(fname) + print(" Opening: {0}".format(fname)) try: f = h5py.File(fname,'r') except: - print "Could not open file {0}!".format(fname) - print + print("Could not open file {0}!".format(fname)) + print() return files[fname] = f datasets = {} @@ -77,13 +77,13 @@ def merge(filenames): datashape = dataset_collection[0][x].shape if all(f[x].dtype == datatype and f[x].shape[1:] == datashape[1:] for f in dataset_collection): common_datasets.add(x) - print - print " Common datasets: " - for x in common_datasets: print " {0}".format(x) - print + print() + print(" Common datasets: ") + for x in common_datasets: print(" {0}".format(x)) + print() #Find the length of each dataset and create it (empty) in the new file - print " Creating empty datasets of required lengths in {0}...".format(filenames[-1]) + print(" Creating empty datasets of required lengths in {0}...".format(filenames[-1])) out_dsets = {} for ds in common_datasets: length = 0 @@ -91,21 +91,21 @@ def merge(filenames): datatype = dataset_collection[0][ds].dtype datashape = (length,) + dataset_collection[0][ds].shape[1:] out_dsets[ds] = fout.create_dataset(ds, datashape, dtype=datatype) - print + print() #Copy the data over to the new file - print " Adding data to empty datasets in {0}...".format(filenames[-1]) + print(" Adding data to empty datasets in {0}...".format(filenames[-1])) for ds in common_datasets: - print " Populating {0}".format(ds) + print(" Populating {0}".format(ds)) index_low = 0 for f in dataset_collection: index_high = index_low + f[ds].len() out_dsets[ds][index_low:index_high,...] = f[ds][...] index_low = index_high - print - print "Done." - print + print() + print("Done.") + print() else: # We are doing an ASCII merge @@ -137,7 +137,7 @@ def merge(filenames): #Crash if a later chain or line has a different number of columns to the first one sys.exit('Error: chains do not match (number of columns differ). Quitting...') #Output the current line to stdout and get the next one - print line.rstrip('\n') + print(line.rstrip('\n')) #Read the next line line = infile.readline() #Work out the number of columns in the next line @@ -150,6 +150,6 @@ def merge(filenames): def get_datasets(g,datasets): import h5py - for name, item in g.iteritems(): + for name, item in g.items(): if isinstance(item,h5py.Group): get_datasets(item,datasets) if isinstance(item,h5py.Dataset): datasets[item.name] = item diff --git a/pippi_pare.py b/pippi_pare.py index 2484352..42f46e5 100644 --- a/pippi_pare.py +++ b/pippi_pare.py @@ -45,6 +45,6 @@ def pare(argstring): # Pump it through the user-supplied function, printing each new point to stdout for i in range(chainArray.shape[0]): - print '\t'.join([str(x) for x in pareFunc(chainArray[i,:])]) + print('\t'.join([str(x) for x in pareFunc(chainArray[i,:])])) diff --git a/pippi_parse.py b/pippi_parse.py index 7753172..61697ac 100644 --- a/pippi_parse.py +++ b/pippi_parse.py @@ -8,6 +8,7 @@ # Originally developed: March 2012 ############################################################# +from __future__ import print_function import subprocess from pippi_utils import * from pippi_read import * @@ -77,13 +78,13 @@ def parse(filename): #Check that flags match up for profile likelihood if all(x not in labels.value for x in permittedLikes) and doProfile.value: - print ' Warning: no likelihood in chain labels.\n Skipping profile likelihood...' + print(' Warning: no likelihood in chain labels.\n Skipping profile likelihood...') doProfile.value = False #Work out whether to do posterior mean and check that flags match up for posterior pdf doPosteriorMean = any(x in labels.value for x in permittedMults) if doPosterior.value and not doPosteriorMean: - print ' Warning: do_posterior_pdf = T but no multiplicity in chain labels.\n Skipping posterior PDF...' + print(' Warning: do_posterior_pdf = T but no multiplicity in chain labels.\n Skipping posterior PDF...') doPosterior.value = False #Check that flags match up for evidence @@ -92,10 +93,10 @@ def parse(filename): if all(x not in labels.value for x in permittedLikes) or \ all(x not in labels.value for x in permittedMults) or \ all(x not in labels.value for x in permittedPriors): - print ' The evidence cannot be calculated without multiplicity, prior and likelihood.\n Skipping evidence...' + print(' The evidence cannot be calculated without multiplicity, prior and likelihood.\n Skipping evidence...') doEvidence.value = False else: - print ' The evidence can only be calculated from an MCMC chain.\n Skipping evidence...' + print(' The evidence can only be calculated from an MCMC chain.\n Skipping evidence...') doEvidence.value = False #Check that flags and match up for quantities selected for plotting @@ -147,8 +148,8 @@ def parse(filename): # Parse comparison chain doParse(mainArray,lookupKey,outputBaseFilename,setOfRequestedColumns,hdf5_names,dataRanges,all_best_fit_data,nBins,iBins,alt_best_fit) else: - print ' Chain '+secChain.value+' has less columns than required to do all requested plots.' - print ' Skipping parsing of this chain...' + print(' Chain '+secChain.value+' has less columns than required to do all requested plots.') + print(' Skipping parsing of this chain...') def doParse(dataArray,lk,outputBaseFilename,setOfRequestedColumns,column_names,dataRanges,all_best_fit_data,nBins,iBins,alt_best_fit): @@ -171,6 +172,8 @@ def doParse(dataArray,lk,outputBaseFilename,setOfRequestedColumns,column_names,d [lnZMain,lnZMainError] = getEvidence(dataArray,lk,bestFit,totalMult,outputBaseFilename) # Save data minima and maxima saveExtrema(dataArray,lk,outputBaseFilename,setOfRequestedColumns,dataRanges) + # Save variables to plot in log scale + saveLogVars(lk,outputBaseFilename,logPlots) # Save lookup keys for parameters saveLookupKeys(lk,outputBaseFilename) # Do binning for 1D plots @@ -182,7 +185,7 @@ def doParse(dataArray,lk,outputBaseFilename,setOfRequestedColumns,column_names,d def standardise(dataArray,lk): global firstLikeKey # Standardise likelihood, prior and multiplicity labels, rescale likelihood if necessary, - for key, entry in labels.value.copy().iteritems(): + for key, entry in labels.value.copy().items(): if any(key == mult for mult in permittedMults): labels.value[refMult] = labels.value[key] if key != refMult: del labels.value[key] @@ -203,21 +206,21 @@ def standardise(dataArray,lk): #if any(entry == obs for obs in permittedObs): labels.value[key] = refObs # Rescale columns if requested if rescalings.value is not None: - for key, entry in rescalings.value.iteritems(): dataArray[:,lk[key]] *= entry + for key, entry in rescalings.value.items(): dataArray[:,lk[key]] *= entry # Convert columns to log if requested if logPlots.value is not None: for column in logPlots.value: if column in lk: if any(dataArray[:,lk[column]] <= 0.0): - print "Error: column {0} requested for log plotting has non-positive values!".format(column) + print("Error: column {0} requested for log plotting has non-positive values!".format(column)) bad_indices = np.where(dataArray[:,lk[column]] <= 0.0)[0] - print "Here is the first point with bad values, for example: " + print("Here is the first point with bad values, for example: ") for i,val in enumerate(dataArray[bad_indices[0],:]): index = i for x in lk: if lk[x] == i: index = x - print " col {0}: {1}".format(index,val) + print(" col {0}: {1}".format(index,val)) sys.exit('\nPlease fix log settings (or your data) and rerun pippi.') dataArray[:,lk[column]] = np.log10(dataArray[:,lk[column]]) @@ -234,7 +237,7 @@ def getBestFit(dataArray,lk,outputBaseFilename,column_names,all_best_fit_data,al bestFitIndex = dataArray[:,lk[labels.value[refLike]]].argmin() bestFit = dataArray[bestFitIndex,lk[labels.value[refLike]]] worstFit = dataArray[:,lk[labels.value[refLike]]].max() - print ' Best fit -lnlike: ',bestFit + print(' Best fit -lnlike: ',bestFit) outfile = smart_open(outputBaseFilename+'.best','w') outfile.write('# This best-fit/posterior mean file created by pippi ' +pippiVersion+' on '+datetime.datetime.now().strftime('%c')+'\n') @@ -267,7 +270,7 @@ def getBestFit(dataArray,lk,outputBaseFilename,column_names,all_best_fit_data,al outfile2.write('# This best-fit file created in GAMBIT yaml format by pippi ' +pippiVersion+' on '+datetime.datetime.now().strftime('%c')+'\n') outfile2.write('# Best-fit log-likelihood: '+str(-bestFit)+'\n\n') - for model, parameters in parameter_sets.iteritems(): + for model, parameters in parameter_sets.items(): outfile2.write(' ' + model + ':\n') for parval in parameters: outfile2.write(' ' + parval + '\n') outfile2.close @@ -275,7 +278,7 @@ def getBestFit(dataArray,lk,outputBaseFilename,column_names,all_best_fit_data,al if alt_best_fit.value is not None: halt = (min_contour is not None) and (bestFit+alt_best_fit.value > min_contour) bestFit = -alt_best_fit.value - print ' Best fit -lnlike to be used to define profile likelihood ratio: ',bestFit + print(' Best fit -lnlike to be used to define profile likelihood ratio: ',bestFit) if halt: sys.exit('\n The highest CL likelihood likelihood contour you have requested contains no samples! No more pippi for you.\n') return [bestFit,worstFit,bestFitIndex] @@ -309,7 +312,7 @@ def getEvidence(dataArray,lk,bestFit,totalMult,outputBaseFilename): np.exp(bestFit-dataArray[:,lk[labels.value[refLike]]]))) \ - bestFit - np.log(totalMult) lnZError = np.log(1.0 - pow(totalMult,-0.5)) - print ' ln(evidence): ',lnZ,'+/-',lnZError + print(' ln(evidence): ',lnZ,'+/-',lnZError) else: sys.exit('Error: evidence calculation only possible for MCMC (should never get here).') outfile = smart_open(outputBaseFilename+'.lnZ','w') @@ -334,13 +337,23 @@ def saveExtrema(dataArray,lk,outputBaseFilename,setOfRequestedColumns,dataRanges outfile.write('\n') outfile.close +def saveLogVars(lk,outputBaseFilename,logPlots): + # Save the variables requested to be plot in log scale + outfile = smart_open(outputBaseFilename+'_savedkeys.pip','a') + outfile.write('use_log_scale =') + if logPlots.value is not None: + for column in logPlots.value: + if column in lk: + outfile.write(' '+str(column)) + outfile.write('\n') + outfile.close def saveLookupKeys(lk,outputBaseFilename): # Save the lookup keys for all the requested parameters outfile = smart_open(outputBaseFilename+'_savedkeys.pip','a') outfile.write('lookup_keys =') if type(lk) == dict: - for key, value in lk.iteritems(): outfile.write(' '+str(key)+':'+str(value)) + for key, value in lk.items(): outfile.write(' '+str(key)+':'+str(value)) else: for i in lk: outfile.write(' '+str(i)+':'+str(i)) outfile.write('\n') @@ -363,7 +376,7 @@ def oneDsampler(dataArray,lk,bestFit,worstFit,outputBaseFilename,dataRanges,nAll for plot in oneDplots.value: - print ' Parsing data for 1D plots of quantity ',plot + print(' Parsing data for 1D plots of quantity ',plot) nBins = nAllBins[plot] resolution = rAllBins[plot] @@ -497,7 +510,7 @@ def twoDsampler(dataArray,lk,bestFit,worstFit,outputBaseFilename,dataRanges,nAll for plot in twoDplots.value: - print ' Parsing data for 2D plots of quantities ',plot + print(' Parsing data for 2D plots of quantities ',plot) nBins = [nAllBins[plot[j]] for j in range(2)] resolution = [rAllBins[plot[j]] for j in range(2)] diff --git a/pippi_plot.py b/pippi_plot.py index 60cef47..3779e16 100644 --- a/pippi_plot.py +++ b/pippi_plot.py @@ -8,6 +8,7 @@ # Originally developed: March 2012 ############################################################# +from __future__ import print_function import subprocess from pippi_utils import * from pippi_read import * @@ -27,7 +28,7 @@ def plot(filename): - print + print() # Parse pip file getIniData(filename,keys) @@ -60,7 +61,7 @@ def plot(filename): #Work out whether to do posteriors check that flags match up for posterior pdf if doPosterior.value and not any(x in labels.value for x in permittedMults): - print ' Warning: do_posterior_pdf = T but no multiplicity in chain labels.\n Skipping posterior PDF...' + print(' Warning: do_posterior_pdf = T but no multiplicity in chain labels.\n Skipping posterior PDF...') doPosterior.value = False # Set defaults for prepend and append string @@ -72,7 +73,7 @@ def plot(filename): if oneDplots.value is not None: # Work through 1D plotting scripts for plot in oneDplots.value: - print ' Running plotting scripts for 1D plots of quantity ',plot + print(' Running plotting scripts for 1D plots of quantity ',plot) # Set up filenames currentBase = baseFilename+'_'+str(plot) # Make profile likelihood plots @@ -95,7 +96,7 @@ def plot(filename): if twoDplots.value is not None: # Loop over requested plots for plot in twoDplots.value: - print ' Running plotting scripts for 2D plots of quantity ',plot + print(' Running plotting scripts for 2D plots of quantity ',plot) # Set up filenames currentBase = baseFilename+'_'+'_'.join([str(x) for x in plot]) # Make profile likelihood plots @@ -108,7 +109,7 @@ def plot(filename): subprocess.check_call('cd '+baseFiledir+'; ./'+currentBase+'_post2D.bsh', shell=True) subprocess.check_call('mv '+baseFiledir+currentBase+'_post2D.pdf '+ outdirectory+'/'+prestring+currentBase+'_post2D'+appstring+'.pdf', shell=True) - + #if doObservable.value: if obsPlots.value is not None: for column in obsPlots.value: diff --git a/pippi_read.py b/pippi_read.py index 117f5d3..f8aa05d 100644 --- a/pippi_read.py +++ b/pippi_read.py @@ -9,6 +9,7 @@ # Pat Scott, 2016 ############################################################# +from __future__ import print_function import datetime import sys from pippi_utils import * @@ -44,7 +45,7 @@ def getIniData(filename,keys,savekeys=None,savedir=None): #Find relevant bits in pipfile for i,key in enumerate(keys): - lines = filter(key.seek,parse_options) + lines = list(filter(key.seek,parse_options)) if (len(lines) == 0): sys.exit('Error: field '+key.pipFileKey+' required for requested operation not found in '+filename+'. Quitting...\n') if (len(lines) > 1): @@ -57,7 +58,7 @@ def getIniData(filename,keys,savekeys=None,savedir=None): # Make sure saving is actually possible if not mainChain in keys: - print '\n Warning: saving of keys not possible because mainChain is undefined.\n Skipping save...' + print('\n Warning: saving of keys not possible because mainChain is undefined.\n Skipping save...') return # Open the file keys will be saved to @@ -72,7 +73,7 @@ def getIniData(filename,keys,savekeys=None,savedir=None): # Parse the pip file again and save the requested keys for key in savekeys: - lines = filter(key.seek,parse_options) + lines = list(filter(key.seek,parse_options)) # Save keys verbatim to the savedkeys pip file if savekeys is not None and key in savekeys: outfile.write(lines[0]) @@ -109,11 +110,11 @@ def getChainData(filename, cut_all_invalid=None, requested_cols=None, assignment for index in assignments.value: if castable_to_int(index): if ncols+n_extra_cols not in assignments.value: - print 'ERROR: When working with ASCII chains and trying to assign function' - print 'results to datastreams, all functional datastreams must be assigned indices' - print 'in continuous ascending order starting from the index of the last' - print 'column in the ASCII file. In this case, that means you must start with' - print str(ncols)+' and go up by one for each subsequent functional stream.' + print('ERROR: When working with ASCII chains and trying to assign function') + print('results to datastreams, all functional datastreams must be assigned indices') + print('in continuous ascending order starting from the index of the last') + print('column in the ASCII file. In this case, that means you must start with') + print(str(ncols)+' and go up by one for each subsequent functional stream.') sys.exit("") if not is_functional_assignment(assignments.value[ncols+n_extra_cols]): sys.exit('ERROR: When working with ASCII chains, all entries in assign_to_pippi_datastream\nmust be functional assignments.') @@ -131,9 +132,9 @@ def getChainData(filename, cut_all_invalid=None, requested_cols=None, assignment #Close the chainfile and indicate success chainfile.close if not silent: - print - print ' Read chain '+filename - print + print() + print(' Read chain '+filename) + print() #Turn the whole lot into a numpy array of doubles data = np.array(data, dtype=np.float64) @@ -148,23 +149,23 @@ def getChainData(filename, cut_all_invalid=None, requested_cols=None, assignment # Read in any python preamble specified in the pip file. if preamble is not None: exec(preamble) exec('data[:,'+str(i)+'] = '+expression) - except KeyError, e: - print 'ERROR: Datastream '+str(e)+', which you have tried to define a function of' - print 'in assign_to_pippi_datastream, is not itself defined as a datastream.' - print 'This usually happens because it does not exist in the chain you are trying' - print 'to parse. Please fix assignment "'+assignments.value[i]+'".' + except KeyError as e: + print('ERROR: Datastream '+str(e)+', which you have tried to define a function of') + print('in assign_to_pippi_datastream, is not itself defined as a datastream.') + print('This usually happens because it does not exist in the chain you are trying') + print('to parse. Please fix assignment "'+assignments.value[i]+'".') sys.exit("") except: - print 'ERROR: something in one of the functions of datastreams you defined in ' - print 'assign_to_pippi_datastream is buggy. Please fix the expression: ' - print assignments.value[i] - print 'Now raising the original error, so you can see the stacktrace for yourself...' + print('ERROR: something in one of the functions of datastreams you defined in ') + print('assign_to_pippi_datastream is buggy. Please fix the expression: ') + print(assignments.value[i]) + print('Now raising the original error, so you can see the stacktrace for yourself...') raise # Filter out points inside the requested data ranges cut = None if data_ranges is not None and data_ranges.value: - for key, value in data_ranges.value.iteritems(): + for key, value in data_ranges.value.items(): lowercut = value[0] uppercut = value[1] if log_plots.value is not None and key in log_plots.value: @@ -179,24 +180,24 @@ def getChainData(filename, cut_all_invalid=None, requested_cols=None, assignment cut = np.logical_and(cut, np.logical_and(data[:,key] >= lowercut, data[:,key] <= uppercut)) # Print the details of the cuts rescaling = 1.0 - print " Total samples: ", data.shape[0] + print(" Total samples: ", data.shape[0]) if cut is not None: data = data[cut,:] sumcut = sum(cut) - print " Total samples within requested data ranges: ", sumcut + print(" Total samples within requested data ranges: ", sumcut) if sumcut <= 0.0: sys.exit('Requested data cuts leave no remaining samples!') rescaling = 1.0*sumcut/len(cut) - print " Fraction of samples within requested data ranges: %.4f"%(rescaling) + print(" Fraction of samples within requested data ranges: %.4f"%(rescaling)) # HDF5 file else: filename, groupname = filename.split(":") if not silent: - print - print " Reading HDF5 chain file" - print " filename:", filename - print " group:", groupname - print + print() + print(" Reading HDF5 chain file") + print(" filename:", filename) + print(" group:", groupname) + print() # Parse group entry groups = groupname.split('/') @@ -220,7 +221,7 @@ def getChainData(filename, cut_all_invalid=None, requested_cols=None, assignment entries = entries[key] except KeyError: sys.exit("ERROR: requested group \""+key+"\" does not exist in hdf5 file.") - column_names = filter(lambda x: x[-8:] != "_isvalid", list(entries)) + column_names = list(filter(lambda x: x[-8:] != "_isvalid", list(entries))) # Reorganize MPIrank, pointID and other requested entries for convenience. indices = [] @@ -257,7 +258,7 @@ def getChainData(filename, cut_all_invalid=None, requested_cols=None, assignment column_names = np.array(column_names)[all_indices] # Pick up all the datastreams with indices larger than the largest index in the hdf5 file if assignments.value is not None: - for index, assignment in assignments.value.iteritems(): + for index, assignment in assignments.value.items(): if castable_to_int(index) and index >= index_count: if is_functional_assignment(assignment): functional_assignment_indices.append(index) @@ -275,19 +276,19 @@ def getChainData(filename, cut_all_invalid=None, requested_cols=None, assignment # Print probed contents and split if probe_only: for i, column_name in enumerate(column_names): - if column_name != '': print " ", i, ":", column_name - print + if column_name != '': print(" ", i, ":", column_name) + print() quit() # Identify any likelihood or multiplicity indicated by the labels. if labels: - likelihood_index = [value for key, value in labels.value.iteritems() if key in permittedLikes] - if not likelihood_index: likelihood_index = [key for key, value in labels.value.iteritems() if value in permittedLikes] + likelihood_index = [value for key, value in labels.value.items() if key in permittedLikes] + if not likelihood_index: likelihood_index = [key for key, value in labels.value.items() if value in permittedLikes] if likelihood_index: likelihood_index = likelihood_index[0] if likelihood_index not in requested_cols: requested_cols.add(likelihood_index) - multiplicity_index = [value for key, value in labels.value.iteritems() if key in permittedMults] - if not multiplicity_index: multiplicity_index = [key for key, value in labels.value.iteritems() if value in permittedMults] + multiplicity_index = [value for key, value in labels.value.items() if key in permittedMults] + if not multiplicity_index: multiplicity_index = [key for key, value in labels.value.items() if value in permittedMults] if multiplicity_index: multiplicity_index = multiplicity_index[0] if multiplicity_index not in requested_cols: requested_cols.add(multiplicity_index) @@ -312,25 +313,25 @@ def getChainData(filename, cut_all_invalid=None, requested_cols=None, assignment data_isvalid.append(np.array(entries[column_names[index]+"_isvalid"], dtype=np.float64)) lookup_key[index] = index_count index_count += 1 - non_functional_cols = [i for i, elem in enumerate(data) if data[i] is not 'functional'] + non_functional_cols = [i for i, elem in enumerate(data) if data[i] != 'functional'] if not non_functional_cols: - print "ERROR: At least one non-function assignment is needed in" - print "assign_to_pippi_datastream, or a multiplicity or likelihood" - print "identification in quantity_labels." + print("ERROR: At least one non-function assignment is needed in") + print("assign_to_pippi_datastream, or a multiplicity or likelihood") + print("identification in quantity_labels.") sys.exit("") # Print the raw number of samples in the hdf5 file total_samples = data[non_functional_cols[0]].size - print " Total samples: ", total_samples + print(" Total samples: ", total_samples) # Fill in the functional columns with zeros. Note that this uses more memory than doing it after validity # cuts, but should actually be faster (I think; haven't actually tested that). It makes the code simpler too. for i, elem in enumerate(data): - if elem is 'functional': + if elem == 'functional': data[i] = np.zeros(total_samples, dtype=np.float64) else: # Do some pruning to deal with cases where the some datasets have extra entries (although this arguably indicates a bug in the sampler) data[i] = elem[:total_samples] for i, elem in enumerate(data_isvalid): - if elem is 'functional': + if elem == 'functional': data_isvalid[i] = np.ones(total_samples, dtype=np.float64) else: # Do some pruning to deal with cases where the some datasets have extra entries (although this arguably indicates a bug in the sampler) @@ -352,7 +353,7 @@ def getChainData(filename, cut_all_invalid=None, requested_cols=None, assignment # Based on the likelihood entry only if likelihood_index is not None: cut = (data_isvalid[lookup_key[likelihood_index]] == 1) - print " Total valid samples: ", sum(cut) + print(" Total valid samples: ", sum(cut)) # Fill in the derived quantities specified via functional assignments for i in sorted(functional_assignment_indices, key=int): @@ -361,27 +362,27 @@ def getChainData(filename, cut_all_invalid=None, requested_cols=None, assignment # Read in any python preamble specified in the pip file. if preamble is not None: exec(preamble) exec('data[lookup_key['+str(i)+']] = '+expression) - except KeyError, e: - print 'ERROR: Datastream '+str(e)+', which you have tried to define a function of' - print 'in assign_to_pippi_datastream, is not itself defined as a datastream.' - print 'This usually happens because you have not requested it for plotting in' - print 'either oneD_plot_quantities or twoD_plot_quantities, so it has not been' - print 'extracted from the hdf5 file. Please add it to one of these lists if you' - print 'really want to do the calculation "'+assignments.value[i]+'"' + except KeyError as e: + print('ERROR: Datastream '+str(e)+', which you have tried to define a function of') + print('in assign_to_pippi_datastream, is not itself defined as a datastream.') + print('This usually happens because you have not requested it for plotting in') + print('either oneD_plot_quantities or twoD_plot_quantities, so it has not been') + print('extracted from the hdf5 file. Please add it to one of these lists if you') + print('really want to do the calculation "'+assignments.value[i]+'"') sys.exit("") except: - print 'ERROR: something in one of the functions of datastreams you defined in ' - print 'assign_to_pippi_datastream is buggy. Please fix the expression: ' - print assignments.value[i] - print 'Now raising the original error, so you can see the stacktrace for yourself...' + print('ERROR: something in one of the functions of datastreams you defined in ') + print('assign_to_pippi_datastream is buggy. Please fix the expression: ') + print(assignments.value[i]) + print('Now raising the original error, so you can see the stacktrace for yourself...') raise # Filter out points inside the requested data ranges if data_ranges.value: - for key, value in data_ranges.value.iteritems(): + for key, value in data_ranges.value.items(): if key not in requested_cols: - print 'ERROR: '+str(key)+' mentioned in data_ranges does not' - print 'appear in requested_cols! Please report this as a pippi bug.' + print('ERROR: '+str(key)+' mentioned in data_ranges does not') + print('appear in requested_cols! Please report this as a pippi bug.') lowercut = value[0] uppercut = value[1] if log_plots.value is not None and key in log_plots.value: @@ -401,7 +402,7 @@ def getChainData(filename, cut_all_invalid=None, requested_cols=None, assignment data = data[:,cut] data_isvalid = data_isvalid[:,cut] sumcut = sum(cut) - print " Total valid samples within requested data ranges: ", sumcut + print(" Total valid samples within requested data ranges: ", sumcut) if sumcut == 0: sys.exit(' You cut out all your samples! Pippi can\'t do much more from here.\n') rescaling = 1.0*sumcut/len(cut) # Find the full details of the best-fit point @@ -409,7 +410,7 @@ def getChainData(filename, cut_all_invalid=None, requested_cols=None, assignment if likelihood_index in labels.value: findMin = labels.value[likelihood_index] in permittedLikes_samesign else: - findMin = [value for key, value in labels.value.iteritems() if key in permittedLikes_samesign] + findMin = [value for key, value in labels.value.items() if key in permittedLikes_samesign] if findMin: bestfit_any_index = np.ma.array(old_likelihood_column, mask=~cut).argmin() bestfit_index = data[lookup_key[likelihood_index]].argmin() @@ -421,15 +422,15 @@ def getChainData(filename, cut_all_invalid=None, requested_cols=None, assignment all_best_fit_data.append(str(data[lookup_key[i]][bestfit_index])) else: if column_name != '': all_best_fit_data.append(str(entries[column_name][bestfit_any_index])) - print " Fraction of samples deemed valid and within requested data ranges: %.4f"%(rescaling) + print(" Fraction of samples deemed valid and within requested data ranges: %.4f"%(rescaling)) # Print list of contents for convenience if not silent: - for key, value in sorted(lookup_key.iteritems()): - print " ",key, ":", column_names[key] - print " mean: %.2e min: %.2e max %.2e"%(np.mean(data[value]), np.min(data[value]), np.max(data[value])) - print " Fraction of valid points where this is invalid: %.4f"%(1.0-data_isvalid[value].mean()) - print + for key, value in sorted(lookup_key.items()): + print(" ",key, ":", column_names[key]) + print(" mean: %.2e min: %.2e max %.2e"%(np.mean(data[value]), np.min(data[value]), np.max(data[value]))) + print(" Fraction of valid points where this is invalid: %.4f"%(1.0-data_isvalid[value].mean())) + print() # Flip 'em. data = np.array(data.T, dtype=np.float64) diff --git a/pippi_script.py b/pippi_script.py index ff3b182..8d0ddb4 100644 --- a/pippi_script.py +++ b/pippi_script.py @@ -8,6 +8,8 @@ # Originally developed: March 2012 ############################################################# +from __future__ import print_function + left_margin = 0.16 right_margin = 0.03 top_margin = 0.05 @@ -41,28 +43,31 @@ doHistograms = dataObject('plot_as_histograms_1D',boolean) legendLines = dataObject('extra_legend_lines',string_list) plotSize = dataObject('plot_size',string) -blame = dataObject('blame',string) +blame = dataObject('blame_text',string) +blameScale = dataObject('blame_scale', floater) logoFile = dataObject('logo_file',string) logoLoc = dataObject('logo_loc',floatuple_list) logoWidth = dataObject('logo_width',floater) colours = dataObject('colour_scheme',internal) axisRanges = dataObject('axis_ranges',floatuple_dictionary) yAxisAngle = dataObject('yaxis_number_angle',floater) +customTicks = dataObject('custom_ticks', int_list) refPoint = dataObject('reference_point',float_dictionary) refKey = dataObject('reference_text',string) keys = keys+[scriptdir,doComparison,postMeanOnPost,postMeanOnProf,bestFitOnPost, bestFitOnProf,doColourbar,doLegend1D,doLegend2D,legendLoc1D,legendLoc2D, - doHistograms,legendLines,blame,colours,axisRanges,yAxisAngle,refPoint, + doHistograms,legendLines,blame,blameScale,colours,axisRanges,yAxisAngle,customTicks,refPoint, refKey,doKey1D,doKey2D,keyLoc1D,keyLoc2D,parsedir,logoFile,logoLoc,logoWidth] # Define pip file entries to be read from savedkeys file labels = dataObject('quantity_labels',string_dictionary) +logVars = dataObject('use_log_scale', int_list) dataRanges = dataObject('data_ranges',floatuple_dictionary) lookupKeys = dataObject('lookup_keys',int_dictionary) # Constants blameFractionalVerticalOffset = 1.2e-2 PosteriorIsMainInComboPlot = True -likeColourbarString = 'Profile likelihood ratio $\Lambda=\mathcal{L}/\mathcal{L}_\mathrm{max}$' +likeColourbarString = 'Profile likelihood ratio $\mathcal{L}/\mathcal{L}_\mathrm{max}$' postColourbarString = 'Relative probability $P/P_\mathrm{max}$' defaultLegendLocation = 'bl' defaultKeyLocation = 'tr' @@ -75,14 +80,14 @@ def script(filename): # input: filename = the name of the pip file - print + print() # Parse pip file getIniData(filename,keys) # Make sure that comparison is turned off if comparison filename is missing if doComparison.value and secChain.value is None: - print ' Warning: comparison curves requested but no comparison file specified.\n Skipping comparison...\n' + print(' Warning: comparison curves requested but no comparison file specified.\n Skipping comparison...\n') doComparison.value = False # Work out where the parse output is located @@ -137,11 +142,11 @@ def script(filename): secParseFilenameFromScriptFiledir = parseFiledirFromScriptFiledir + re.sub(r'.*/|\..?.?.?$', '', secChain.value) + '_comparison' # Retrieve labels and data ranges saved in earlier parsing run - getIniData([parseFilename+'_savedkeys.pip'],[labels,dataRanges,lookupKeys]) + getIniData([parseFilename+'_savedkeys.pip'],[labels,logVars,dataRanges,lookupKeys]) #Work out whether to do posteriors and check that flags match up if doPosterior.value and not any(x in labels.value for x in permittedMults): - print ' Warning: do_posterior_pdf = T but no multiplicity in chain labels.\n Skipping posterior PDF...' + print(' Warning: do_posterior_pdf = T but no multiplicity in chain labels.\n Skipping posterior PDF...') doPosterior.value = False # set colour scheme if it is undefined @@ -156,7 +161,7 @@ def script(filename): # Loop over requested plots for plot in oneDplots.value: - print ' Writing scripts for 1D plots of quantity ',plot + print(' Writing scripts for 1D plots of quantity ',plot) # Set up filenames currentBase = baseFilename+'_'+str(plot) @@ -170,6 +175,17 @@ def script(filename): ytrema = [0.0,1.0] yRange = 1.0 + # Determine whether to use log scale + xlog = False + if logVars.value is not None and plot in logVars.value: + xlog = True + + # Find the optimal ticks + xCustomTicks = False + if customTicks.value is not None and plot in customTicks.value: + xCustomTicks = True + ticks_major, ticks_minor, ticks_labels, x_tick_label_scale = getOptimalTicks(xtrema, log=xlog) + # Locate and scale logo (if any) if logoFile.value is not None: logoCoords = [xtrema[0]+logoLoc.value[0][0]*xRange,logoLoc.value[0][1]] @@ -188,7 +204,7 @@ def script(filename): plotRef = False # Determine plot size - if plotSize.value is None or plotSize.value is '': + if plotSize.value is None or plotSize.value == '': plotSizeInternal = '11cm x 4in' else: plotSizeInternal = plotSize.value @@ -286,6 +302,8 @@ def script(filename): outfile.write(' --plot '+currentParse+'_like1D'+histString+'.ct2@1:2 /fill xaxis /fill-transparency '+colours.value.fillTransparency1D+ ' /fill-color '+colours.value.mainProfColour1D+' /color '+colours.value.mainProfColour1D+ ' /line-style '+colours.value.main1DLineStyle+' /line-width '+colours.value.lineWidth1D+'\\\n') + if xlog: + outfile.write(' --xlog\\\n') if doLegend1D.value is not None and plot in doLegend1D.value: # Write legend try: @@ -309,6 +327,8 @@ def script(filename): outfile.write(' --draw-marker '+str(postMean)+','+str(yRange*colours.value.mainPostMeanMarkerScale/40.0)+' '+ colours.value.mainPostMeanMarker+' /color \''+colours.value.mainPostMeanColour1D+ '\' /scale '+str(colours.value.mainPostMeanMarkerScale)+' \\\n') + # Fill the background colour + outfile.write(' --background \'' + colours.value.backgroundColour + '\'\\\n') # Plot reference point if plotRef: outfile.write(refString) # Draw key @@ -316,13 +336,26 @@ def script(filename): # Write credits if blame.value is not None: blameYCoordinate = str(blameFractionalVerticalOffset * yRange + ytrema[1]) - outfile.write(' --draw-text '+str(xtrema[1])+','+blameYCoordinate+' \''+blame.value+'\' /scale 0.5 /justification right\\\n') + outfile.write(' --draw-text '+str(xtrema[1])+','+blameYCoordinate+' \''+blame.value+'\'') + if blameScale.value is not None: + outfile.write(' /scale '+str(blameScale.value)) + outfile.write(' /justification right\\\n') # Add logo if logoFile.value is not None: outfile.write(' --draw-text '+str(logoCoords[0])+','+str(logoCoords[1])+' '+logoString+'\\\n') - # Set axis colours - for x in ['top', 'bottom', 'left', 'right']: - outfile.write(' --axis-style '+x+' /stroke_color \''+colours.value.axisColour1D+'\'\\\n') + # Set axis colours and ticks for x axes + for x in ['top', 'bottom']: + outfile.write(' --axis-style '+x+' /stroke_color \''+colours.value.axisColour1D+'\'') + if xCustomTicks: + outfile.write(" /ticks-major="+','.join([str(tick) for tick in ticks_major])) + outfile.write(" /ticks-labels="+ticks_labels) + outfile.write(" /ticks-minor="+','.join([str(tick) for tick in ticks_minor])) + if x_tick_label_scale < 1: + outfile.write(" /tick-label-scale="+str(x_tick_label_scale)) + outfile.write('\\\n') + # Set axis colours for y axes + for y in ['left', 'right']: + outfile.write(' --axis-style '+y+' /stroke_color \''+colours.value.axisColour1D+'\'\\\n') outfile.close subprocess.call('chmod +x '+currentBase+'_like1D.bsh', shell=True) @@ -426,6 +459,8 @@ def script(filename): outfile.write(' --plot '+currentParse+'_post1D'+histString+'.ct2@1:2 /fill xaxis /fill-transparency '+colours.value.fillTransparency1D+ ' /fill-color '+colours.value.mainPostColour1D+' /color '+colours.value.mainPostColour1D+ ' /line-style '+colours.value.main1DLineStyle+' /line-width '+colours.value.lineWidth1D+'\\\n') + if xlog: + outfile.write(' --xlog\\\n') if doLegend1D.value is not None and plot in doLegend1D.value: # Write legend try: @@ -456,13 +491,26 @@ def script(filename): # Write credits if blame.value is not None: blameYCoordinate = str(blameFractionalVerticalOffset * yRange + ytrema[1]) - outfile.write(' --draw-text '+str(xtrema[1])+','+blameYCoordinate+' \''+blame.value+'\' /scale 0.5 /justification right\\\n') + outfile.write(' --draw-text '+str(xtrema[1])+','+blameYCoordinate+' \''+blame.value+'\'') + if blameScale.value is not None: + outfile.write(' /scale '+str(blameScale.value)) + outfile.write(' /justification right\\\n') # Add logo if logoFile.value is not None: outfile.write(' --draw-text '+str(logoCoords[0])+','+str(logoCoords[1])+' '+logoString+'\\\n') - # Set axis colours - for x in ['top', 'bottom', 'left', 'right']: - outfile.write(' --axis-style '+x+' /stroke_color \''+colours.value.axisColour1D+'\'\\\n') + # Set axis colours and ticks for x axes + for x in ['top', 'bottom']: + outfile.write(' --axis-style '+x+' /stroke_color \''+colours.value.axisColour1D+'\'') + if xCustomTicks: + outfile.write(" /ticks-major="+','.join([str(tick) for tick in ticks_major])) + outfile.write(" /ticks-labels="+ticks_labels) + outfile.write(" /ticks-minor="+','.join([str(tick) for tick in ticks_minor])) + if x_tick_label_scale < 1: + outfile.write(" /tick-label-scale="+str(x_tick_label_scale)) + outfile.write('\\\n') + # Set axis colours for y axes + for y in ['left', 'right']: + outfile.write(' --axis-style '+y+' /stroke_color \''+colours.value.axisColour1D+'\'\\\n') outfile.close subprocess.call('chmod +x '+currentBase+'_post1D.bsh', shell=True) @@ -561,6 +609,8 @@ def script(filename): outfile.write(' --plot '+currentParse+'_'+main+'1D'+histString+'.ct2@1:2 /fill xaxis /fill-transparency '+colours.value.fillTransparency1D+ ' /fill-color '+mainData[3]+' /color '+mainData[3]+ ' /line-style '+colours.value.main1DLineStyle+' /line-width '+colours.value.lineWidth1D+'\\\n') + if xlog: + outfile.write(' --xlog\\\n') if doLegend1D.value is not None and plot in doLegend1D.value: # Write legend try: @@ -587,13 +637,26 @@ def script(filename): # Write credits if blame.value is not None: blameYCoordinate = str(blameFractionalVerticalOffset * yRange + ytrema[1]) - outfile.write(' --draw-text '+str(xtrema[1])+','+blameYCoordinate+' \''+blame.value+'\' /scale 0.5 /justification right\\\n') + outfile.write(' --draw-text '+str(xtrema[1])+','+blameYCoordinate+' \''+blame.value+'\'') + if blameScale.value is not None: + outfile.write(' /scale '+str(blameScale.value)) + outfile.write(' /justification right\\\n') # Add logo if logoFile.value is not None: outfile.write(' --draw-text '+str(logoCoords[0])+','+str(logoCoords[1])+' '+logoString+'\\\n') - # Set axis colours - for x in ['top', 'bottom', 'left', 'right']: - outfile.write(' --axis-style '+x+' /stroke_color \''+colours.value.axisColour1D+'\'\\\n') + # Set axis colours and ticks for x axes + for x in ['top', 'bottom']: + outfile.write(' --axis-style '+x+' /stroke_color \''+colours.value.axisColour1D+'\'') + if xCustomTicks: + outfile.write(" /ticks-major="+','.join([str(tick) for tick in ticks_major])) + outfile.write(" /ticks-labels="+ticks_labels) + outfile.write(" /ticks-minor="+','.join([str(tick) for tick in ticks_minor])) + if x_tick_label_scale < 1: + outfile.write(" /tick-label-scale="+str(x_tick_label_scale)) + outfile.write('\\\n') + # Set axis colours for y axes + for y in ['left', 'right']: + outfile.write(' --axis-style '+y+' /stroke_color \''+colours.value.axisColour1D+'\'\\\n') outfile.close subprocess.call('chmod +x '+currentBase+'_combo1D.bsh', shell=True) @@ -604,7 +667,7 @@ def script(filename): # Loop over requested plots for plot in twoDplots.value: - print ' Writing scripts for 2D plots of quantities ',plot + print(' Writing scripts for 2D plots of quantities ',plot) # Set up filenames currentBase = baseFilename+'_'+'_'.join([str(x) for x in plot]) @@ -618,6 +681,24 @@ def script(filename): xRange = xtrema[1] - xtrema[0] yRange = ytrema[1] - ytrema[0] + # Determine whether to use log scale + xlog = False + ylog = False + if logVars.value is not None and plot[0] in logVars.value: + xlog = True + if logVars.value is not None and plot[1] in logVars.value: + ylog = True + + # Find the optimal ticks + xCustomTicks = False + yCustomTicks = False + if customTicks.value is not None and plot[0] in customTicks.value: + xCustomTicks = True + x_ticks_major, x_ticks_minor, x_ticks_labels, x_tick_label_scale = getOptimalTicks(xtrema, log=xlog) + if customTicks.value is not None and plot[1] in customTicks.value: + yCustomTicks = True + y_ticks_major, y_ticks_minor, y_ticks_labels, y_tick_label_scale = getOptimalTicks(ytrema, log=ylog) + # Locate and scale logo (if any) if logoFile.value is not None: logoCoords = [xtrema[0]+logoLoc.value[0][0]*xRange,ytrema[0]+logoLoc.value[0][1]*yRange] @@ -636,7 +717,7 @@ def script(filename): plotRef = False # Determine plot size - if plotSize.value is None or plotSize.value is '': + if plotSize.value is None or plotSize.value == '': if doColourbar.value is not None and plot in doColourbar.value: plotSizeInternal = '12.5cm x 4in' else: @@ -755,6 +836,10 @@ def script(filename): colours.value.comparisonPostMeanMarker+' /color \''+colours.value.comparisonPostMeanColour+ '\' /scale '+str(colours.value.comparisonPostMeanMarkerScale)+' \\\n') outfile.write(' --plot '+currentParse+'_like2D.ct2@1:2:3 /fill-transparency 1\\\n') + if xlog: + outfile.write(' --xlog\\\n') + if ylog: + outfile.write(' --ylog\\\n') if contours2D.value is not None: # Plot contours for contour in contourLevels: @@ -783,6 +868,8 @@ def script(filename): outfile.write(' --draw-marker '+str(postMean[0])+','+str(postMean[1])+' '+ colours.value.mainPostMeanMarker+' /fill-color \''+str(colours.value.mainPostMeanColour2D)+'\' /stroke-color \''+str(colours.value.mainPostMeanColourOutline2D)+ '\' /scale '+str(colours.value.mainPostMeanMarkerScale)+' \\\n') + # Fill the background colour + outfile.write(' --background \'' + colours.value.backgroundColour + '\'\\\n') # Plot reference point if plotRef: outfile.write(refString) # Draw key @@ -790,13 +877,33 @@ def script(filename): # Write credits if blame.value is not None: blameYCoordinate = str(blameFractionalVerticalOffset * yRange + ytrema[1]) - outfile.write(' --draw-text '+str(xtrema[1])+','+blameYCoordinate+' \''+blame.value+'\' /scale 0.5 /justification right\\\n') + outfile.write(' --draw-text '+str(xtrema[1])+','+blameYCoordinate+' \''+blame.value+'\'') + if blameScale.value is not None: + outfile.write(' /scale '+str(blameScale.value)) + outfile.write(' /justification right\\\n') # Add logo if logoFile.value is not None: outfile.write(' --draw-text '+str(logoCoords[0])+','+str(logoCoords[1])+' '+logoString+'\\\n') - # Set axis colours - for x in ['top', 'bottom', 'left', 'right']: - outfile.write(' --axis-style '+x+' /stroke_color \''+colours.value.axisColour2D+'\'\\\n') + # Set axis colours and ticks for x axes + for x in ['top', 'bottom']: + outfile.write(' --axis-style '+x+' /stroke_color \''+colours.value.axisColour2D+'\'') + if xCustomTicks: + outfile.write(" /ticks-major="+','.join([str(tick) for tick in x_ticks_major])) + outfile.write(" /ticks-labels="+x_ticks_labels) + outfile.write(" /ticks-minor="+','.join([str(tick) for tick in x_ticks_minor])) + if x_tick_label_scale < 1: + outfile.write(" /tick-label-scale="+str(x_tick_label_scale)) + outfile.write('\\\n') + # Set axis colours and ticks for y axes + for y in ['left', 'right']: + outfile.write(' --axis-style '+y+' /stroke_color \''+colours.value.axisColour2D+'\'') + if yCustomTicks: + outfile.write(" /ticks-major="+','.join([str(tick) for tick in y_ticks_major])) + outfile.write(" /ticks-labels="+y_ticks_labels) + outfile.write(" /ticks-minor="+','.join([str(tick) for tick in y_ticks_minor])) + if y_tick_label_scale < 1: + outfile.write(" /tick-label-scale="+str(y_tick_label_scale)) + outfile.write('\\\n') if doColourbar.value is not None and plot in doColourbar.value: # Do labelling for colourbar outfile.write(' --y2 --plot '+currentParse+'_like2D.ct2@1:2:3 /fill-transparency 1\\\n') @@ -916,6 +1023,10 @@ def script(filename): colours.value.comparisonPostMeanMarker+' /color \''+colours.value.comparisonPostMeanColour+ '\' /scale '+str(colours.value.comparisonPostMeanMarkerScale)+' \\\n') outfile.write(' --plot '+currentParse+'_post2D.ct2@1:2:3 /fill-transparency 1\\\n') + if xlog: + outfile.write(' --xlog\\\n') + if ylog: + outfile.write(' --ylog\\\n') if contours2D.value is not None: # Plot contours for contour in mainContourLevels: @@ -943,6 +1054,8 @@ def script(filename): outfile.write(' --draw-marker '+str(postMean[0])+','+str(postMean[1])+' '+ colours.value.mainPostMeanMarker+' /fill-color \''+str(colours.value.mainPostMeanColour2D)+'\' /stroke-color \''+str(colours.value.mainPostMeanColourOutline2D)+ '\' /scale '+str(colours.value.mainPostMeanMarkerScale)+' \\\n') + # Fill the background colour + outfile.write(' --background \'' + colours.value.backgroundColour + '\'\\\n') # Plot reference point if plotRef: outfile.write(refString) # Draw key @@ -950,13 +1063,33 @@ def script(filename): # Write credits if blame.value is not None: blameYCoordinate = str(blameFractionalVerticalOffset * yRange + ytrema[1]) - outfile.write(' --draw-text '+str(xtrema[1])+','+blameYCoordinate+' \''+blame.value+'\' /scale 0.5 /justification right\\\n') + outfile.write(' --draw-text '+str(xtrema[1])+','+blameYCoordinate+' \''+blame.value+'\'') + if blameScale.value is not None: + outfile.write(' /scale '+str(blameScale.value)) + outfile.write(' /justification right\\\n') # Add logo if logoFile.value is not None: outfile.write(' --draw-text '+str(logoCoords[0])+','+str(logoCoords[1])+' '+logoString+'\\\n') - # Set axis colours - for x in ['top', 'bottom', 'left', 'right']: - outfile.write(' --axis-style '+x+' /stroke_color \''+colours.value.axisColour2D+'\'\\\n') + # Set axis colours and ticks for x axes + for x in ['top', 'bottom']: + outfile.write(' --axis-style '+x+' /stroke_color \''+colours.value.axisColour2D+'\'') + if xCustomTicks: + outfile.write(" /ticks-major="+','.join([str(tick) for tick in x_ticks_major])) + outfile.write(" /ticks-labels="+x_ticks_labels) + outfile.write(" /ticks-minor="+','.join([str(tick) for tick in x_ticks_minor])) + if x_tick_label_scale < 1: + outfile.write(" /tick-label-scale="+str(x_tick_label_scale)) + outfile.write('\\\n') + # Set axis colours and ticks for y axes + for y in ['left', 'right']: + outfile.write(' --axis-style '+y+' /stroke_color \''+colours.value.axisColour2D+'\'') + if yCustomTicks: + outfile.write(" /ticks-major="+','.join([str(tick) for tick in y_ticks_major])) + outfile.write(" /ticks-labels="+y_ticks_labels) + outfile.write(" /ticks-minor="+','.join([str(tick) for tick in y_ticks_minor])) + if y_tick_label_scale < 1: + outfile.write(" /tick-label-scale="+str(y_tick_label_scale)) + outfile.write('\\\n') if doColourbar.value is not None and plot in doColourbar.value: # Do labelling for colourbar outfile.write(' --y2 --plot '+currentParse+'_post2D.ct2@1:2:3 /fill-transparency 1\\\n') @@ -970,7 +1103,7 @@ def script(filename): #if doObservable.value: if obsPlots.value is not None: for column in obsPlots.value: - + # Get contours if contours2D.value is not None: contourLevelsLike = getContours(parseFilename,plot,'like') @@ -1104,6 +1237,8 @@ def script(filename): outfile.write(' --draw-marker '+str(postMean[0])+','+str(postMean[1])+' '+ colours.value.mainPostMeanMarker+' /fill-color \''+str(colours.value.mainPostMeanColour2D)+'\' /stroke-color \''+str(colours.value.mainPostMeanColourOutline2D)+ '\' /scale '+str(colours.value.mainPostMeanMarkerScale)+' \\\n') + # Fill the background colour + outfile.write(' --background \'' + colours.value.backgroundColour + '\'\\\n') # Plot reference point if plotRef: outfile.write(refString) # Draw key @@ -1111,13 +1246,33 @@ def script(filename): # Write credits if blame.value is not None: blameYCoordinate = str(blameFractionalVerticalOffset * yRange + ytrema[1]) - outfile.write(' --draw-text '+str(xtrema[1])+','+blameYCoordinate+' \''+blame.value+'\' /scale 0.5 /justification right\\\n') + outfile.write(' --draw-text '+str(xtrema[1])+','+blameYCoordinate+' \''+blame.value+'\'') + if blameScale.value is not None: + outfile.write(' /scale '+str(blameScale.value)) + outfile.write(' /justification right\\\n') # Add logo if logoFile.value is not None: outfile.write(' --draw-text '+str(logoCoords[0])+','+str(logoCoords[1])+' '+logoString+'\\\n') - # Set axis colours - for x in ['top', 'bottom', 'left', 'right']: - outfile.write(' --axis-style '+x+' /stroke_color \''+colours.value.axisColour2D+'\'\\\n') + # Set axis colours and ticks for x axes + for x in ['top', 'bottom']: + outfile.write(' --axis-style '+x+' /stroke_color \''+colours.value.axisColour2D+'\'') + if xCustomTicks: + outfile.write(" /ticks-major="+','.join([str(tick) for tick in x_ticks_major])) + outfile.write(" /ticks-labels="+x_ticks_labels) + outfile.write(" /ticks-minor="+','.join([str(tick) for tick in x_ticks_minor])) + if x_tick_label_scale < 1: + outfile.write(" /tick-label-scale="+str(x_tick_label_scale)) + outfile.write('\\\n') + # Set axis colours and ticks for y axes + for y in ['left', 'right']: + outfile.write(' --axis-style '+y+' /stroke_color \''+colours.value.axisColour2D+'\'') + if yCustomTicks: + outfile.write(" /ticks-major="+','.join([str(tick) for tick in y_ticks_major])) + outfile.write(" /ticks-labels="+y_ticks_labels) + outfile.write(" /ticks-minor="+','.join([str(tick) for tick in y_ticks_minor])) + if y_tick_label_scale < 1: + outfile.write(" /tick-label-scale="+str(y_tick_label_scale)) + outfile.write('\\\n') if doColourbar.value is not None and plot in doColourbar.value: # Do colourbar outfile.write(' --xyz-map\\\n') @@ -1234,6 +1389,10 @@ def script(filename): outfile.write(' --draw-contour '+contour+' /color '+colours.value.comparisonPostContourColour2D+ ' /style '+colours.value.comparisonContourStyle+' /width '+colours.value.lineWidth2D+'\\\n') outfile.write(' --plot '+currentParse+'_'+main+'2D.ct2@1:2:3 /fill-transparency 1\\\n') + if xlog: + outfile.write(' --xlog\\\n') + if ylog: + outfile.write(' --ylog\\\n') if contours2D.value is not None: # Plot contours for contour in mainContourLevels: @@ -1264,6 +1423,8 @@ def script(filename): '\' /scale '+str(bestFitData[3])+' \\\n') if postMean: outfile.write(' --draw-marker '+str(postMean[0])+','+str(postMean[1])+' '+postMeanData[0]+' /fill-color \''+str(postMeanData[1])+'\' /stroke-color \''+str(postMeanData[2])+ '\' /scale '+str(postMeanData[3])+' \\\n') + # Fill the background colour + outfile.write(' --background \'' + colours.value.backgroundColour + '\'\\\n') # Plot reference point if plotRef: outfile.write(refString) # Draw key @@ -1271,13 +1432,33 @@ def script(filename): # Write credits if blame.value is not None: blameYCoordinate = str(blameFractionalVerticalOffset * yRange + ytrema[1]) - outfile.write(' --draw-text '+str(xtrema[1])+','+blameYCoordinate+' \''+blame.value+'\' /scale 0.5 /justification right\\\n') + outfile.write(' --draw-text '+str(xtrema[1])+','+blameYCoordinate+' \''+blame.value+'\'') + if blameScale.value is not None: + outfile.write(' /scale '+str(blameScale.value)) + outfile.write(' /justification right\\\n') # Add logo if logoFile.value is not None: outfile.write(' --draw-text '+str(logoCoords[0])+','+str(logoCoords[1])+' '+logoString+'\\\n') - # Set axis colours - for x in ['top', 'bottom', 'left', 'right']: - outfile.write(' --axis-style '+x+' /stroke_color \''+colours.value.axisColour2D+'\'\\\n') + # Set axis colours and ticks for x axes + for x in ['top', 'bottom']: + outfile.write(' --axis-style '+x+' /stroke_color \''+colours.value.axisColour2D+'\'') + if xCustomTicks: + outfile.write(" /ticks-major="+','.join([str(tick) for tick in x_ticks_major])) + outfile.write(" /ticks-labels="+x_ticks_labels) + outfile.write(" /ticks-minor="+','.join([str(tick) for tick in x_ticks_minor])) + if x_tick_label_scale < 1: + outfile.write(" /tick-label-scale="+str(x_tick_label_scale)) + outfile.write('\\\n') + # Set axis colours and ticks for y axes + for y in ['left', 'right']: + outfile.write(' --axis-style '+y+' /stroke_color \''+colours.value.axisColour2D+'\'') + if yCustomTicks: + outfile.write(" /ticks-major="+','.join([str(tick) for tick in y_ticks_major])) + outfile.write(" /ticks-labels="+y_ticks_labels) + outfile.write(" /ticks-minor="+','.join([str(tick) for tick in y_ticks_minor])) + if y_tick_label_scale < 1: + outfile.write(" /tick-label-scale="+str(y_tick_label_scale)) + outfile.write('\\\n') if doColourbar.value is not None and plot in doColourbar.value: # Do labelling for colourbar outfile.write(' --y2 --plot '+currentParse+'_'+main+'2D.ct2@1:2:3 /fill-transparency 1\\\n') @@ -1345,6 +1526,75 @@ def getCentralVal(parseFilename,plot,statistic,lk): coordinates = point[lk.value[plot]] return coordinates +def getOptimalTicks(xtrema, log=False): + # Find the optimal ticks for the axes, in either linear or log scale + # Stick with between 5 and 10 major ticks, and up to 50 total ticks + + minnticks = 5 + maxnticks = 8 + maxminorticks = 30 + + xRange = xtrema[1] - xtrema[0] + + tick_label_scale = 1 + + if not log: + # Nearest order with enough ticks + order = int(np.log10(xRange)) + # Optimal number of ticks for that order + nticks = int(xRange/10**order) + if nticks < minnticks: + order -= 1 + nticks = int(xRange/10**order) + tick_step = 10**order * (int(nticks/maxnticks)+1) + nticks = nticks if nticks < maxnticks else maxnticks + # First tick + firsttick = (int(xtrema[0]/tick_step)+1)*tick_step + # Build major ticks + ticks_major = [firsttick + i*tick_step for i in range(nticks) if firsttick + i*tick_step < xtrema[1]] + # Optimal number of minor ticks is a power of 2 + 1, so that there are no more than 50 ticks in total + nminorticks = int(maxminorticks / nticks) + nminorticks = 2**int(np.log2(nminorticks-1)) + minor_tick_step = float(tick_step) / nminorticks + # First minor tick + firstminortick = (int(xtrema[0]/minor_tick_step) + 1)*minor_tick_step + # Build minor ticks + ticks_minor = [firstminortick + i*minor_tick_step for i in range(nminorticks*(nticks+1)) if firstminortick + i*minor_tick_step < xtrema[1]] + # Labels + ticks_labels = ",".join([str(tick) for tick in ticks_major]) + + else: + nticks = int(xRange)+1 + + if nticks < 2: + # If the range doesn't span more than one order of magnitude, you shouldn't be using log scale + sys.exit('Error: Cannot use log scale for variable, change to linear scale.\nQuitting...') + + tick_step = int( (xRange + 1) / nticks) + # Take full powers of 10 as major ticks + ticks_major = [int(xtrema[0])+i*tick_step for i in range(0,nticks+1)] + + # Redo minor ticks on log scale, 10 minor ticks for 1-5 major ticks and 5 minor ticks for 5-10 major ticks + if nticks <= 10: + nminorticks = 10 if nticks <= 5 else 5 + ticks_minor = sorted(list({tick + np.log10(1+float(i)/(nminorticks-1)*(10**tick_step-1)) for tick in ticks_major for i in range(nminorticks)})) + else: + # For more than 10 ticks just show a selection of 10 major ticks and the rest as minor ticks + ticks_minor = ticks_major + nticks = int(nticks / (int(nticks/10) + 1)) + tick_step = int( (xRange + 1) / nticks) + ticks_major = [int(xtrema[0])+i*tick_step for i in range(0,nticks+1)] + + # Trim to within range + ticks_major = [tick for tick in ticks_major if tick > xtrema[0] and tick < xtrema[1]] + ticks_minor = [tick for tick in ticks_minor if tick > xtrema[0] and tick < xtrema[1]] + # Labels + ticks_labels = ",".join(['\'$10^{'+str(int(i))+'}$\'' for i in ticks_major]) + # Tick label scale, reduce size if there's 10 or more ticks + if nticks >= 10: tick_label_scale = 0.7 + + return ticks_major, ticks_minor, ticks_labels, tick_label_scale + def dictFallback(risky,safe,key): # Try to extract entry corresponding to key from risky dataObject, otherwise use safe dataObject try: diff --git a/pippi_utils.py b/pippi_utils.py index 8ce78dc..cd6763f 100644 --- a/pippi_utils.py +++ b/pippi_utils.py @@ -7,6 +7,7 @@ # Originally developed: March 2012 ############################################################# +from __future__ import print_function import sys import os.path import re @@ -37,30 +38,30 @@ def castable_to_int(x): def usage(): #Print pippi usage information - print - print 'You must be new here (or have fat fingers). You can use pippi to' - print - print ' merge two or more chains:' - print ' pippi merge ... ' - print - print ' post-process a chain:' - print ' pippi pare ' - print - print ' parse a chain using options in iniFile.pip:' - print ' pippi parse iniFile.pip' - print - print ' write plotting scipts for a chain using options in iniFile.pip:' - print ' pippi script iniFile.pip' - print - print ' run plotting scipts for a chain using options in iniFile.pip:' - print ' pippi plot iniFile.pip' - print - print ' print an hdf5 file\'s computed column indices, using options in iniFile.pip:' - print ' pippi probe iniFile.pip' - print - print ' parse, script and plot in one go:' - print ' pippi iniFile.pip' - print + print() + print('You must be new here (or have fat fingers). You can use pippi to') + print() + print(' merge two or more chains:') + print(' pippi merge ... ') + print() + print(' post-process a chain:') + print(' pippi pare ') + print() + print(' parse a chain using options in iniFile.pip:') + print(' pippi parse iniFile.pip') + print() + print(' write plotting scipts for a chain using options in iniFile.pip:') + print(' pippi script iniFile.pip') + print() + print(' run plotting scipts for a chain using options in iniFile.pip:') + print(' pippi plot iniFile.pip') + print() + print(' print an hdf5 file\'s computed column indices, using options in iniFile.pip:') + print(' pippi probe iniFile.pip') + print() + print(' parse, script and plot in one go:') + print(' pippi iniFile.pip') + print() def safe_open(filename): #Try to open input file @@ -111,8 +112,8 @@ def convert(self,string): else: self.value = self.conversion(string) except: - print "Failed to convert string:" - print string + print("Failed to convert string:") + print(string) sys.exit('Error: invalid data format in field '+self.pipFileKey+'. Quitting...\n') #Conversion functions for parsing pip file entries