Skip to content

Commit 97f78e3

Browse files
authored
Merge pull request #1032 from Axelrod-Python/884-testmemoryone
Refactor tests for memory one
2 parents 0fdac5f + b170ca1 commit 97f78e3

File tree

3 files changed

+147
-40
lines changed

3 files changed

+147
-40
lines changed

axelrod/strategies/memoryone.py

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,16 @@
1111

1212

1313
class MemoryOnePlayer(Player):
14-
"""Uses a four-vector for strategies based on the last round of play,
14+
"""
15+
Uses a four-vector for strategies based on the last round of play,
1516
(P(C|CC), P(C|CD), P(C|DC), P(C|DD)), defaults to Win-Stay Lose-Shift.
1617
Intended to be used as an abstract base class or to at least be supplied
17-
with a initializing four_vector."""
18+
with a initializing four_vector.
19+
20+
Names
21+
22+
- Memory One: [Nowak1990]_
23+
"""
1824

1925
name = 'Generic Memory One Player'
2026
classifier = {
@@ -191,7 +197,12 @@ def __init__(self) -> None:
191197

192198

193199
class StochasticCooperator(MemoryOnePlayer):
194-
"""Stochastic Cooperator, http://www.nature.com/ncomms/2013/130801/ncomms3193/full/ncomms3193.html."""
200+
"""Stochastic Cooperator.
201+
202+
Names:
203+
204+
- Stochastic Cooperator: [Adami2013]_
205+
"""
195206

196207
name = 'Stochastic Cooperator'
197208

axelrod/tests/strategies/test_memoryone.py

Lines changed: 130 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -125,11 +125,18 @@ def test_four_vector(self):
125125

126126
def test_strategy(self):
127127
self.first_play_test(C)
128-
self.responses_test([C], [C], [C])
129-
self.responses_test([D], [C], [D])
130-
self.responses_test([C], [D], [C])
131-
self.responses_test([C], [D], [D], seed=1)
132-
self.responses_test([D], [D], [D], seed=2)
128+
129+
actions = [(C, C), (C, D), (D, C), (C, D), (D, C)]
130+
self.versus_test(opponent=axelrod.Alternator(),
131+
expected_actions=actions)
132+
133+
actions = [(C, D), (D, D), (D, D), (D, D), (C, D)]
134+
self.versus_test(opponent=axelrod.Defector(),
135+
expected_actions=actions, seed=0)
136+
137+
actions = [(C, D), (D, D), (C, D), (D, D), (D, D)]
138+
self.versus_test(opponent=axelrod.Defector(),
139+
expected_actions=actions, seed=1)
133140

134141

135142
class TestStochasticCooperator(TestPlayer):
@@ -153,14 +160,22 @@ def test_four_vector(self):
153160

154161
def test_strategy(self):
155162
self.first_play_test(C)
156-
# With probability 0.065 will defect
157-
self.responses_test([D, C, C, C], [C], [C], seed=15)
158-
# With probability 0.266 will cooperate
159-
self.responses_test([C], [C], [D], seed=1)
160-
# With probability 0.42 will cooperate
161-
self.responses_test([C], [D], [C], seed=3)
162-
# With probability 0.229 will cooperate
163-
self.responses_test([C], [D], [D], seed=13)
163+
164+
actions = [(C, C), (D, D), (C, C), (C, D), (C, C), (D, D)]
165+
self.versus_test(opponent=axelrod.Alternator(),
166+
expected_actions=actions, seed=15)
167+
168+
actions = [(C, C), (C, D), (D, C), (D, D), (C, C), (C, D)]
169+
self.versus_test(opponent=axelrod.Alternator(),
170+
expected_actions=actions, seed=1)
171+
172+
actions = [(C, C), (C, D), (D, C), (D, D), (D, C), (D, D)]
173+
self.versus_test(opponent=axelrod.Alternator(),
174+
expected_actions=actions, seed=3)
175+
176+
actions = [(C, C), (C, D), (D, C), (D, D), (D, C), (C, D)]
177+
self.versus_test(opponent=axelrod.Alternator(),
178+
expected_actions=actions, seed=13)
164179

165180

166181
class TestStochasticWSLS(TestPlayer):
@@ -179,14 +194,22 @@ class TestStochasticWSLS(TestPlayer):
179194

180195
def test_strategy(self):
181196
self.first_play_test(C)
182-
# With probability 0.05 will defect
183-
self.responses_test([D], [C], [C], seed=2)
184-
# With probability 0.05 will cooperate
185-
self.responses_test([C], [C], [D], seed=31)
186-
# With probability 0.05 will cooperate
187-
self.responses_test([C], [D], [C], seed=31)
188-
# With probability 0.05 will defect
189-
self.responses_test([D], [D], [D], seed=2)
197+
198+
actions = [(C, C), (D, D), (C, C), (C, D), (D, C), (D, D)]
199+
self.versus_test(opponent=axelrod.Alternator(),
200+
expected_actions=actions, seed=2)
201+
202+
actions = [(C, C), (C, D), (D, C), (D, D), (C, C), (C, D)]
203+
self.versus_test(opponent=axelrod.Alternator(),
204+
expected_actions=actions, seed=31)
205+
206+
actions = [(C, D), (D, C), (D, D), (C, C), (C, D), (D, C)]
207+
self.versus_test(opponent=axelrod.CyclerDC(),
208+
expected_actions=actions, seed=2)
209+
210+
actions = [(C, D), (C, C), (C, D), (D, C), (D, D), (C, C)]
211+
self.versus_test(opponent=axelrod.CyclerDC(),
212+
expected_actions=actions, seed=31)
190213

191214
def test_four_vector(self):
192215
player = self.player()
@@ -240,10 +263,22 @@ def test_four_vector(self):
240263

241264
def test_strategy(self):
242265
self.first_play_test(C)
243-
self.responses_test([D, D, C, C], [C], [C], seed=2)
244-
self.responses_test([D, D, C, C], [C], [D], seed=2)
245-
self.responses_test([D, D, C, C], [D], [C], seed=2)
246-
self.responses_test([D, D, C, C], [C], [D], seed=2)
266+
267+
actions = [(C, C), (D, D), (D, C), (D, D), (D, C), (C, D)]
268+
self.versus_test(opponent=axelrod.Alternator(),
269+
expected_actions=actions, seed=2)
270+
271+
actions = [(C, C), (C, D), (C, C), (C, D), (D, C), (C, D)]
272+
self.versus_test(opponent=axelrod.Alternator(),
273+
expected_actions=actions, seed=31)
274+
275+
actions = [(C, D), (D, C), (D, D), (D, C), (C, D), (C, C)]
276+
self.versus_test(opponent=axelrod.CyclerDC(),
277+
expected_actions=actions, seed=2)
278+
279+
actions = [(C, D), (C, C), (C, D), (C, C), (C, D), (C, C)]
280+
self.versus_test(opponent=axelrod.CyclerDC(),
281+
expected_actions=actions, seed=31)
247282

248283

249284
class TestZDExtort2v2(TestPlayer):
@@ -268,6 +303,14 @@ def test_four_vector(self):
268303
def test_strategy(self):
269304
self.first_play_test(C)
270305

306+
actions = [(C, C), (D, D), (D, C), (D, D), (D, C), (C, D)]
307+
self.versus_test(opponent=axelrod.Alternator(),
308+
expected_actions=actions, seed=2)
309+
310+
actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C)]
311+
self.versus_test(opponent=axelrod.CyclerDC(),
312+
expected_actions=actions, seed=5)
313+
271314

272315
class TestZDExtort4(TestPlayer):
273316

@@ -291,6 +334,14 @@ def test_four_vector(self):
291334
def test_strategy(self):
292335
self.first_play_test(C)
293336

337+
actions = [(C, C), (D, D), (D, C), (D, D), (D, C), (C, D)]
338+
self.versus_test(opponent=axelrod.Alternator(),
339+
expected_actions=actions, seed=2)
340+
341+
actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C)]
342+
self.versus_test(opponent=axelrod.CyclerDC(),
343+
expected_actions=actions, seed=5)
344+
294345

295346
class TestZDGen2(TestPlayer):
296347

@@ -314,6 +365,22 @@ def test_four_vector(self):
314365
def test_strategy(self):
315366
self.first_play_test(C)
316367

368+
actions = [(C, C), (C, D), (D, C), (D, D), (C, C), (C, D)]
369+
self.versus_test(opponent=axelrod.Alternator(),
370+
expected_actions=actions, seed=2)
371+
372+
actions = [(C, C), (C, D), (C, C), (C, D), (C, C), (C, D)]
373+
self.versus_test(opponent=axelrod.Alternator(),
374+
expected_actions=actions, seed=31)
375+
376+
actions = [(C, D), (D, C), (D, D), (C, C), (C, D), (C, C)]
377+
self.versus_test(opponent=axelrod.CyclerDC(),
378+
expected_actions=actions, seed=2)
379+
380+
actions = [(C, D), (C, C), (C, D), (C, C), (C, D), (C, C)]
381+
self.versus_test(opponent=axelrod.CyclerDC(),
382+
expected_actions=actions, seed=31)
383+
317384

318385
class TestZDGTFT2(TestPlayer):
319386

@@ -335,10 +402,22 @@ def test_four_vector(self):
335402

336403
def test_strategy(self):
337404
self.first_play_test(C)
338-
self.responses_test([C, C, C, C], [C], [C], seed=2)
339-
self.responses_test([D], [C], [D], seed=2)
340-
self.responses_test([C, C, C, C], [D], [C], seed=2)
341-
self.responses_test([D], [D], [D], seed=2)
405+
406+
actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D)]
407+
self.versus_test(opponent=axelrod.Alternator(),
408+
expected_actions=actions, seed=2)
409+
410+
actions = [(C, C), (C, D), (C, C), (C, D), (C, C), (C, D)]
411+
self.versus_test(opponent=axelrod.Alternator(),
412+
expected_actions=actions, seed=31)
413+
414+
actions = [(C, D), (D, C), (C, D), (D, C), (C, D), (C, C)]
415+
self.versus_test(opponent=axelrod.CyclerDC(),
416+
expected_actions=actions, seed=2)
417+
418+
actions = [(C, D), (C, C), (C, D), (C, C), (C, D), (D, C)]
419+
self.versus_test(opponent=axelrod.CyclerDC(),
420+
expected_actions=actions, seed=31)
342421

343422

344423
class TestZDSet2(TestPlayer):
@@ -363,6 +442,14 @@ def test_four_vector(self):
363442
def test_strategy(self):
364443
self.first_play_test(C)
365444

445+
actions = [(C, C), (D, D), (D, C), (C, D), (C, C), (D, D)]
446+
self.versus_test(opponent=axelrod.Alternator(),
447+
expected_actions=actions, seed=2)
448+
449+
actions = [(C, D), (D, C), (D, D), (D, C), (D, D), (D, C)]
450+
self.versus_test(opponent=axelrod.CyclerDC(),
451+
expected_actions=actions, seed=5)
452+
366453

367454
class TestSoftJoss(TestPlayer):
368455

@@ -383,8 +470,13 @@ def test_four_vector(self):
383470
test_four_vector(self, expected_dictionary)
384471

385472
def test_strategy(self):
386-
self.responses_test([C], [C], [C], seed=2)
387-
self.responses_test([D], [C], [D], seed=5)
473+
actions = [(C, C), (C, D), (D, C), (C, D), (D, C), (C, D)]
474+
self.versus_test(opponent=axelrod.Alternator(),
475+
expected_actions=actions, seed=2)
476+
477+
actions = [(C, D), (D, C), (C, D), (D, C), (C, D), (D, C)]
478+
self.versus_test(opponent=axelrod.CyclerDC(),
479+
expected_actions=actions, seed=5)
388480

389481

390482
class TestALLCorALLD(TestPlayer):
@@ -402,8 +494,10 @@ class TestALLCorALLD(TestPlayer):
402494
}
403495

404496
def test_strategy(self):
405-
self.responses_test([D] * 10, seed=2)
406-
self.responses_test([C] * 10, seed=3)
407-
self.responses_test([C] * 10, seed=4)
408-
self.responses_test([D] * 10, seed=5)
409-
self.responses_test([D] * 10, seed=6)
497+
498+
actions = [(D, C)] * 10
499+
self.versus_test(opponent=axelrod.Cooperator(),
500+
expected_actions=actions, seed=0)
501+
actions = [(C, C)] * 10
502+
self.versus_test(opponent=axelrod.Cooperator(),
503+
expected_actions=actions, seed=1)

docs/reference/bibliography.rst

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ Bibliography
66
This is a collection of various bibliographic items referenced in the
77
documentation.
88

9+
.. [Adami2013] Adami C and Hintze A. (2013) Evolutionary instability of zero-determinant strategies demonstrates that winning is not everything. Nature communications. https://www.nature.com/articles/ncomms3193
910
.. [Andre2013] Andre L. C., Honovan P., Felipe T. and Frederico G. (2013). Iterated Prisoner’s Dilemma - An extended analysis, http://abricom.org.br/wp-content/uploads/2016/03/bricsccicbic2013_submission_202.pdf
1011
.. [Ashlock2006] Ashlock, D., & Kim E. Y, & Leahy, N. (2006). Understanding Representational Sensitivity in the Iterated Prisoner’s Dilemma with Fingerprints. IEEE Transactions On Systems, Man, And Cybernetics, Part C: Applications And Reviews, 36 (4)
1112
.. [Ashlock2006b] Ashlock, W. & Ashlock, D. (2006). Changes in Prisoner's Dilemma Strategies Over Evolutionary Time With Different Population Sizes 2006 IEEE International Conference on Evolutionary Computation. http://DOI.org/10.1109/CEC.2006.1688322
@@ -33,7 +34,8 @@ documentation.
3334
for the Iterated Prisoner's Dilemma. Proceedings of the 2015
3435
International Conference on Autonomous Agents and Multiagent Systems.
3536
.. [Nachbar1992] Nachbar J., Evolution in the finitely repeated prisoner’s dilemma, Journal of Economic Behavior & Organization, 19(3): 307-326, 1992.
36-
.. [Nowak1992] Nowak, M. a., & May, R. M. (1992). Evolutionary games and spatial chaos. Nature. http://doi.org/10.1038/359826a0
37+
.. [Nowak1990] Nowak, M., & Sigmund, K. (1990). The evolution of stochastic strategies in the Prisoner's Dilemma. Acta Applicandae Mathematica. https://link.springer.com/article/10.1007/BF00049570
38+
.. [Nowak1992] Nowak, M.., & May, R. M. (1992). Evolutionary games and spatial chaos. Nature. http://doi.org/10.1038/359826a0
3739
.. [Nowak1993] Nowak, M., & Sigmund, K. (1993). A strategy of win-stay, lose-shift that outperforms tit-for-tat in the Prisoner’s Dilemma game. Nature, 364(6432), 56–58. http://doi.org/10.1038/364056a0
3840
.. [Press2012] Press, W. H., & Dyson, F. J. (2012). Iterated Prisoner’s Dilemma contains strategies that dominate any evolutionary opponent. Proceedings of the National Academy of Sciences, 109(26), 10409–10413. http://doi.org/10.1073/pnas.1206569109
3941
.. [Prison1998] LIFL (1998) PRISON. Available at: http://www.lifl.fr/IPD/ipd.frame.html (Accessed: 19 September 2016).

0 commit comments

Comments
 (0)