@@ -10,7 +10,7 @@ class DBS(Player):
10
10
Desired Belief Strategy as described in [Au2006]_
11
11
http://www.cs.utexas.edu/%7Echiu/papers/Au06NoisyIPD.pdf
12
12
13
- A strategy that learns the opponent's strategy, and use symbolic
13
+ A strategy that learns the opponent's strategy, and uses symbolic
14
14
noise detection for detecting whether anomalies in player’s behavior
15
15
are deliberate or accidental, hence increasing performance in noisy
16
16
tournaments.
@@ -111,8 +111,8 @@ def should_promote(self, r_plus, promotion_threshold=3):
111
111
opposite_action = 1
112
112
k = 1
113
113
count = 0
114
- # We iterates on the history, while we do not encounter
115
- # counter-exemples of r_plus, i.e. while we do not encounter
114
+ # We iterate on the history, while we do not encounter
115
+ # counter-examples of r_plus, i.e. while we do not encounter
116
116
# r_minus
117
117
while (
118
118
k < len (self .history_by_cond [r_plus [0 ]][0 ])
@@ -129,9 +129,7 @@ def should_promote(self, r_plus, promotion_threshold=3):
129
129
return False
130
130
131
131
def should_demote (self , r_minus , violation_threshold = 4 ):
132
- if (self .violation_counts [r_minus [0 ]] >= violation_threshold ):
133
- return True
134
- return False
132
+ return (self .violation_counts [r_minus [0 ]] >= violation_threshold )
135
133
136
134
def update_history_by_cond (self , opponent_history ):
137
135
two_moves_ago = (self .history [- 2 ], opponent_history [- 2 ])
@@ -306,14 +304,13 @@ def is_stochastic(self):
306
304
return False
307
305
308
306
def get_value (self ):
309
- if (self .action1 == C and self .action2 == C ):
310
- return 3
311
- elif (self .action1 == C and self .action2 == D ):
312
- return 0
313
- elif (self .action1 == D and self .action2 == C ):
314
- return 5
315
- elif (self .action1 == D and self .action2 == D ):
316
- return 1
307
+ values = {
308
+ (C , C ): 3 ,
309
+ (C , D ): 0 ,
310
+ (D , C ): 5 ,
311
+ (D , D ): 1
312
+ }
313
+ return values [(self .action1 , self .action2 )]
317
314
318
315
319
316
def create_policy (pCC , pCD , pDC , pDD ):
0 commit comments