Skip to content

Commit 988b849

Browse files
author
margaret
committed
Incorporate comments
Set memory_depth to float('inf'). Remove redundant history length check. Remove redundant else statements. Remove Prober4.cooperation_pool and replace with history length check. Test Prober4.turned_defector value. Revert the update of strategies counter in doc tests.
1 parent 8e9126f commit 988b849

File tree

3 files changed

+32
-38
lines changed

3 files changed

+32
-38
lines changed

axelrod/strategies/prober.py

Lines changed: 18 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -118,7 +118,7 @@ class Prober4(Player):
118118
name = 'Prober 4'
119119
classifier = {
120120
'stochastic': False,
121-
'memory_depth': 1,
121+
'memory_depth': float('inf'),
122122
'makes_use_of': set(),
123123
'long_run_time': False,
124124
'inspects_source': False,
@@ -134,39 +134,33 @@ def __init__(self):
134134
]
135135
self.just_Ds = 0
136136
self.unjust_Ds = 0
137-
self.cooperation_pool = [C] * 5
138137
self.turned_defector = False
139138

140139
def strategy(self, opponent):
141-
if len(opponent.history) == 0:
142-
return self.init_sequence[0]
143140
if len(self.history) == 0:
144141
return self.init_sequence[0]
145-
else:
146-
turn = len(self.history)
147-
if turn < len(self.init_sequence):
148-
if opponent.history[-1] == D:
149-
if self.history[-1] == D:
150-
self.just_Ds += 1
151-
if self.history[-1] == C:
152-
self.unjust_Ds += 1
153-
return self.init_sequence[turn]
154-
if turn == len(self.init_sequence):
155-
diff_in_Ds = abs(self.just_Ds - self.unjust_Ds)
156-
self.turned_defector = (diff_in_Ds <= 2)
157-
if self.turned_defector:
158-
return D
159-
if not self.turned_defector:
160-
if self.cooperation_pool:
161-
return self.cooperation_pool.pop()
162-
else:
163-
return D if opponent.history[-1] == D else C
142+
turn = len(self.history)
143+
if turn < len(self.init_sequence):
144+
if opponent.history[-1] == D:
145+
if self.history[-1] == D:
146+
self.just_Ds += 1
147+
if self.history[-1] == C:
148+
self.unjust_Ds += 1
149+
return self.init_sequence[turn]
150+
if turn == len(self.init_sequence):
151+
diff_in_Ds = abs(self.just_Ds - self.unjust_Ds)
152+
self.turned_defector = (diff_in_Ds <= 2)
153+
if self.turned_defector:
154+
return D
155+
if not self.turned_defector:
156+
if turn < len(self.init_sequence) + 5:
157+
return C
158+
return D if opponent.history[-1] == D else C
164159

165160
def reset(self):
166161
Player.reset(self)
167162
self.just_Ds = 0
168163
self.unjust_Ds = 0
169-
self.cooperation_pool = [C] * 5
170164
self.turned_defector = False
171165

172166

axelrod/tests/unit/test_prober.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ class TestProber4(TestPlayer):
105105
player = axelrod.Prober4
106106
expected_classifier = {
107107
'stochastic': False,
108-
'memory_depth': 1,
108+
'memory_depth': float('inf'),
109109
'makes_use_of': set(),
110110
'long_run_time': False,
111111
'inspects_source': False,
@@ -115,7 +115,6 @@ class TestProber4(TestPlayer):
115115
initial_sequence = [
116116
C, C, D, C, D, D, D, C, C, D, C, D, C, C, D, C, D, D, C, D
117117
]
118-
cooperation_pool = [C] * 5
119118

120119
def test_initial_strategy(self):
121120
"""Starts by playing CCDCDDDCCDCDCCDCDDCD."""
@@ -137,8 +136,9 @@ def test_strategy(self):
137136

138137
history1 = self.initial_sequence
139138
responses = [D] * 10
139+
attrs = {'turned_defector': True}
140140
for history2 in provocative_histories:
141-
self.responses_test(history1, history2, responses)
141+
self.responses_test(history1, history2, responses, attrs=attrs)
142142

143143
# Otherwise cooperates for 5 rounds
144144
unprovocative_histories = [
@@ -149,27 +149,27 @@ def test_strategy(self):
149149
[C, C, C, C, D, D, C, C, D, C, C, D, D, C, D, C, D, C, C, C],
150150
]
151151

152-
history1 = self.initial_sequence
153-
responses = self.cooperation_pool
152+
responses = [C] * 5
153+
attrs = {'turned_defector': False}
154154
for history2 in unprovocative_histories:
155-
self.responses_test(history1, history2, responses)
155+
self.responses_test(history1, history2, responses, attrs=attrs)
156156

157157
# and plays like TFT afterwards
158-
history1 += self.cooperation_pool
159-
history2 += self.cooperation_pool
160-
self.responses_test(history1, history2, [C])
158+
history1 += responses
159+
history2 += responses
160+
self.responses_test(history1, history2, [C], attrs=attrs)
161161

162162
history1 += [C]
163163
history2 += [D]
164-
self.responses_test(history1, history2, [D])
164+
self.responses_test(history1, history2, [D], attrs=attrs)
165165

166166
history1 += [D]
167167
history2 += [C]
168-
self.responses_test(history1, history2, [C])
168+
self.responses_test(history1, history2, [C], attrs=attrs)
169169

170170
history1 += [C]
171171
history2 += [D]
172-
self.responses_test(history1, history2, [D])
172+
self.responses_test(history1, history2, [D], attrs=attrs)
173173

174174

175175
class TestHardProber(TestPlayer):

docs/tutorials/advanced/classification_of_strategies.rst

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ make a decision::
5858
... }
5959
>>> strategies = axl.filtered_strategies(filterset)
6060
>>> len(strategies)
61-
25
61+
24
6262

6363
Multiple filters can be specified within the filterset dictionary. To specify a
6464
range of memory_depth values, we can use the 'min_memory_depth' and
@@ -70,7 +70,7 @@ range of memory_depth values, we can use the 'min_memory_depth' and
7070
... }
7171
>>> strategies = axl.filtered_strategies(filterset)
7272
>>> len(strategies)
73-
42
73+
41
7474

7575
We can also identify strategies that make use of particular properties of the
7676
tournament. For example, here is the number of strategies that make use of the

0 commit comments

Comments
 (0)