Skip to content

Commit 3806862

Browse files
authored
Merge pull request #990 from eric-s-s/884-gbm
#884 gobymajority
2 parents f683a59 + 86d856c commit 3806862

File tree

2 files changed

+97
-54
lines changed

2 files changed

+97
-54
lines changed

axelrod/strategies/gobymajority.py

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
from axelrod.actions import Actions, Action
22
from axelrod.player import Player
33

4-
from typing import Dict, Any
4+
from typing import Dict, Any, Union
55

66
import copy
77

@@ -30,23 +30,24 @@ class GoByMajority(Player):
3030
'manipulates_source': False,
3131
'manipulates_state': False,
3232
'memory_depth': float('inf')
33-
} # type: Dict[str, Any]
33+
} # type: Dict[str, Any]
3434

35-
def __init__(self, memory_depth: int = float('inf'), soft: bool = True) -> None:
35+
def __init__(self, memory_depth: Union[int, float] = float('inf'),
36+
soft: bool = True) -> None:
3637
"""
3738
Parameters
3839
----------
39-
memory_depth, int >= 0
40+
memory_depth: int >= 0
4041
The number of rounds to use for the calculation of the cooperation
4142
and defection probabilities of the opponent.
42-
soft, bool
43+
soft: bool
4344
Indicates whether to cooperate or not in the case that the
4445
cooperation and defection probabilities are equal.
4546
"""
4647

4748
super().__init__()
4849
self.soft = soft
49-
self.classifier['memory_depth'] = memory_depth
50+
self.classifier['memory_depth'] = memory_depth
5051
if self.classifier['memory_depth'] < float('inf'):
5152
self.memory = self.classifier['memory_depth']
5253
else:
@@ -140,7 +141,7 @@ class HardGoByMajority(GoByMajority):
140141
"""
141142
name = 'Hard Go By Majority'
142143

143-
def __init__(self, memory_depth: int = float('inf')) -> None:
144+
def __init__(self, memory_depth: Union[int, float] = float('inf')) -> None:
144145
super().__init__(memory_depth=memory_depth, soft=False)
145146

146147

axelrod/tests/strategies/test_gobymajority.py

Lines changed: 89 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22

33
import axelrod
44
from .test_player import TestPlayer
5+
from axelrod import MockPlayer
56

67
C, D = axelrod.Actions.C, axelrod.Actions.D
78

@@ -11,7 +12,6 @@ class TestHardGoByMajority(TestPlayer):
1112
name = "Hard Go By Majority"
1213
player = axelrod.HardGoByMajority
1314
default_soft = False
14-
eq_play = D
1515

1616
expected_classifier = {
1717
'stochastic': False,
@@ -23,62 +23,99 @@ class TestHardGoByMajority(TestPlayer):
2323
'manipulates_state': False
2424
}
2525

26-
def test_strategy(self):
27-
# Starts by defecting.
28-
self.first_play_test(self.eq_play)
29-
# If opponent cooperates at least as often as they defect then the
30-
# player defects.
31-
self.responses_test([self.eq_play], [C, D, D, D], [D, D, C, C])
32-
# If opponent defects strictly more often than they defect then the
33-
# player defects.
34-
self.responses_test([D], [C, C, D, D, C], [D, D, C, C, D])
35-
# If opponent cooperates strictly more often than they defect then the
36-
# player cooperates.
37-
self.responses_test([C], [C, C, D, D, C], [D, C, C, C, D])
38-
39-
def test_default_soft(self):
26+
def test_first_play(self):
27+
self.first_play_test(D)
28+
29+
def test_memory_depth_infinite_soft_is_false(self):
30+
init_kwargs = {}
31+
if self.default_soft:
32+
init_kwargs['soft'] = False
33+
34+
opponent_actions = [C] * 50 + [D] * 100 + [C] * 52
35+
actions = ([(D, C)] + [(C, C)] * 49 + [(C, D)] * 50 + [(D, D)] * 50 +
36+
[(D, C)] * 51 + [(C, C)])
37+
opponent = MockPlayer(actions=opponent_actions)
38+
self.versus_test(opponent, expected_actions=actions,
39+
init_kwargs=init_kwargs)
40+
41+
def test_memory_depth_even_soft_is_false(self):
42+
memory_depth = 4
43+
init_kwargs = {'memory_depth': memory_depth}
44+
if self.default_soft:
45+
init_kwargs['soft'] = False
46+
47+
opponent = MockPlayer(actions=[C] * memory_depth + [D] * memory_depth)
48+
actions = ([(D, C)] + [(C, C)] * 3 + [(C, D)] * 2 + [(D, D)] * 2 +
49+
[(D, C)] * 3 + [(C, C)])
50+
self.versus_test(opponent, expected_actions=actions,
51+
init_kwargs=init_kwargs)
52+
53+
def test_memory_depth_odd(self):
54+
memory_depth = 5
55+
init_kwargs = {'memory_depth': memory_depth}
56+
if self.default_soft:
57+
first_action = [(C, C)]
58+
else:
59+
first_action = [(D, C)]
60+
opponent = MockPlayer(actions=[C] * memory_depth + [D] * memory_depth)
61+
actions = (first_action + [(C, C)] * 4 + [(C, D)] * 3 + [(D, D)] * 2 +
62+
[(D, C)] * 3 + [(C, C)] * 2)
63+
self.versus_test(opponent, expected_actions=actions,
64+
init_kwargs=init_kwargs)
65+
66+
def test_default_values(self):
4067
player = self.player()
4168
self.assertEqual(player.soft, self.default_soft)
69+
self.assertEqual(player.memory, 0)
4270

4371

4472
class TestGoByMajority(TestHardGoByMajority):
4573

4674
name = "Soft Go By Majority"
4775
player = axelrod.GoByMajority
4876
default_soft = True
49-
eq_play = C
5077

51-
def test_strategy(self):
52-
# In case of equality (including first play), cooperates.
53-
super().test_strategy()
78+
def test_first_play(self):
79+
self.first_play_test(C)
5480

55-
# Test tie break rule for soft=False
56-
player = self.player(soft=False)
57-
opponent = axelrod.Cooperator()
58-
self.assertEqual('D', player.strategy(opponent))
81+
def test_memory_depth_infinite_soft_is_true(self):
82+
opponent_actions = [C] * 50 + [D] * 100 + [C] * 52
83+
actions = ([(C, C)] * 50 + [(C, D)] * 51 + [(D, D)] * 49 +
84+
[(D, C)] * 50 + [(C, C)] * 2)
85+
opponent = MockPlayer(actions=opponent_actions)
86+
self.versus_test(opponent, expected_actions=actions)
5987

60-
def test_soft(self):
61-
player = self.player(soft=True)
62-
self.assertTrue(player.soft)
63-
player = self.player(soft=False)
64-
self.assertFalse(player.soft)
88+
def test_memory_depth_even_soft_is_true(self):
89+
memory_depth = 4
90+
init_kwargs = {'memory_depth': memory_depth}
91+
92+
opponent = MockPlayer([C] * memory_depth + [D] * memory_depth)
93+
actions = ([(C, C)] * 4 + [(C, D)] * 3 + [(D, D)] +
94+
[(D, C)] * 2 + [(C, C)] * 2)
95+
self.versus_test(opponent, expected_actions=actions,
96+
init_kwargs=init_kwargs)
6597

6698
def test_name(self):
6799
player = self.player(soft=True)
68100
self.assertEqual(player.name, "Soft Go By Majority")
69101
player = self.player(soft=False)
70102
self.assertEqual(player.name, "Hard Go By Majority")
103+
player = self.player(memory_depth=5)
104+
self.assertEqual(player.name, "Soft Go By Majority: 5")
71105

72-
def test_repr(self):
106+
def test_str(self):
73107
player = self.player(soft=True)
74108
name = str(player)
75109
self.assertEqual(name, "Soft Go By Majority")
76110
player = self.player(soft=False)
77111
name = str(player)
78112
self.assertEqual(name, "Hard Go By Majority")
113+
player = self.player(memory_depth=5)
114+
name = str(player)
115+
self.assertEqual(name, "Soft Go By Majority: 5")
79116

80117

81-
def factory_TestGoByRecentMajority(L, soft=True):
118+
def factory_TestGoByRecentMajority(memory_depth, soft=True):
82119

83120
prefix = "Hard"
84121
prefix2 = "Hard"
@@ -88,12 +125,13 @@ def factory_TestGoByRecentMajority(L, soft=True):
88125

89126
class TestGoByRecentMajority(TestPlayer):
90127

91-
name = "{} Go By Majority: {}".format(prefix, L)
92-
player = getattr(axelrod, "{}GoByMajority{}".format(prefix2, L))
128+
name = "{} Go By Majority: {}".format(prefix, memory_depth)
129+
player = getattr(axelrod, "{}GoByMajority{}".format(prefix2,
130+
memory_depth))
93131

94132
expected_classifier = {
95133
'stochastic': False,
96-
'memory_depth': L,
134+
'memory_depth': memory_depth,
97135
'makes_use_of': set(),
98136
'long_run_time': False,
99137
'inspects_source': False,
@@ -102,27 +140,31 @@ class TestGoByRecentMajority(TestPlayer):
102140
}
103141

104142
def test_strategy(self):
105-
# Test initial play.
143+
106144
if soft:
107145
self.first_play_test(C)
108146
else:
109147
self.first_play_test(D)
110148

111-
self.responses_test([C], [C] * L,
112-
[C] * (L // 2 + 1) + [D] * (L // 2 - 1))
113-
self.responses_test([D], [C] * L,
114-
[D] * (L // 2 + 1) + [C] * (L // 2 - 1))
115-
116-
# Test 50:50 play difference with soft
117-
k = L
118-
if L % 2 == 1:
119-
k -= 1
149+
# for example memory_depth=2 plays against [C, C, D, D]
150+
# soft actions = [(C, C), (C, C), (C, D), (C, D)]
151+
# hard actions = [(D, C), (C, C), (C, D), (D, D)]
152+
opponent_actions = [C] * memory_depth + [D] * memory_depth
153+
opponent = MockPlayer(actions=opponent_actions)
120154
if soft:
121-
self.responses_test([C], [C] * k,
122-
[C] * (k // 2) + [D] * (k // 2))
155+
first_player_action = [C]
123156
else:
124-
self.responses_test([D], [C] * k,
125-
[C] * (k // 2) + [D] * (k // 2))
157+
first_player_action = [D]
158+
if memory_depth % 2 == 1 or soft:
159+
cooperations = int(memory_depth * 1.5)
160+
else:
161+
cooperations = int(memory_depth * 1.5) - 1
162+
defections = len(opponent_actions) - cooperations - 1
163+
player_actions = (first_player_action + [C] * cooperations +
164+
[D] * defections)
165+
166+
actions = list(zip(player_actions, opponent_actions))
167+
self.versus_test(opponent, expected_actions=actions)
126168

127169
return TestGoByRecentMajority
128170

0 commit comments

Comments
 (0)