@@ -11,8 +11,7 @@ class TestHardGoByMajority(TestPlayer):
11
11
12
12
name = "Hard Go By Majority"
13
13
player = axelrod .HardGoByMajority
14
- eq_play = D
15
- soft = False
14
+ default_soft = False
16
15
17
16
expected_classifier = {
18
17
'stochastic' : False ,
@@ -24,60 +23,77 @@ class TestHardGoByMajority(TestPlayer):
24
23
'manipulates_state' : False
25
24
}
26
25
27
- def test_strategy (self ):
28
- self .first_play_test (self . eq_play )
26
+ def test_first_play (self ):
27
+ self .first_play_test (D )
29
28
30
- expected , opponent_actions = self .get_infinite_memory_depth_actions ()
31
- self .versus_test (MockPlayer (actions = opponent_actions ),
32
- expected_actions = expected )
29
+ def test_memory_depth_infinite_soft_is_false (self ):
30
+ init_kwargs = {}
31
+ if self .default_soft :
32
+ init_kwargs ['soft' ] = False
33
33
34
- def get_infinite_memory_depth_actions (self ):
35
- opponent_actions = [C , D , D ]
36
- first_three = [(self .eq_play , C ), (C , D ), (self .eq_play , D )]
37
- second_three = [(D , C ), (self .eq_play , D ), (D , D )]
38
- subsequent = [(D , C ), (D , D ), (D , D )]
39
- expected = first_three + second_three + subsequent * 10
40
- return expected , opponent_actions
34
+ opponent_actions = [C ] * 50 + [D ] * 100 + [C ] * 52
35
+ actions = ([(D , C )] + [(C , C )] * 49 + [(C , D )] * 50 + [(D , D )] * 50 +
36
+ [(D , C )] * 51 + [(C , C )])
37
+ opponent = MockPlayer (actions = opponent_actions )
38
+ self .versus_test (opponent , expected_actions = actions ,
39
+ init_kwargs = init_kwargs )
41
40
42
- def test_memory_depth (self ):
41
+ def test_memory_depth_even_soft_is_false (self ):
43
42
memory_depth = 4
44
- opponent_actions = [C , C , C , D , D , D ]
45
- first_six = [(self .eq_play , C ), (C , C ), (C , C ),
46
- (C , D ), (C , D ), (self .eq_play , D )]
47
- subsequent = [(D , C ), (D , C ), (self .eq_play , C ),
48
- (C , D ), (C , D ), (self .eq_play , D )]
49
-
50
- expected = first_six + subsequent * 10
51
- self .versus_test (MockPlayer (actions = opponent_actions ),
52
- expected_actions = expected ,
53
- init_kwargs = {'memory_depth' : memory_depth })
54
-
55
- def test_soft_value (self ):
43
+ init_kwargs = {'memory_depth' : memory_depth }
44
+ if self .default_soft :
45
+ init_kwargs ['soft' ] = False
46
+
47
+ opponent = MockPlayer (actions = [C ] * memory_depth + [D ] * memory_depth )
48
+ actions = ([(D , C )] + [(C , C )] * 3 + [(C , D )] * 2 + [(D , D )] * 2 +
49
+ [(D , C )] * 3 + [(C , C )])
50
+ self .versus_test (opponent , expected_actions = actions ,
51
+ init_kwargs = init_kwargs )
52
+
53
+ def test_memory_depth_odd (self ):
54
+ memory_depth = 5
55
+ init_kwargs = {'memory_depth' : memory_depth }
56
+ if self .default_soft :
57
+ first_action = [(C , C )]
58
+ else :
59
+ first_action = [(D , C )]
60
+ opponent = MockPlayer (actions = [C ] * memory_depth + [D ] * memory_depth )
61
+ actions = (first_action + [(C , C )] * 4 + [(C , D )] * 3 + [(D , D )] * 2 +
62
+ [(D , C )] * 3 + [(C , C )] * 2 )
63
+ self .versus_test (opponent , expected_actions = actions ,
64
+ init_kwargs = init_kwargs )
65
+
66
+ def test_default_values (self ):
56
67
player = self .player ()
57
- self .assertFalse (player .soft )
68
+ self .assertEqual (player .soft , self .default_soft )
69
+ self .assertEqual (player .memory , 0 )
58
70
59
71
60
72
class TestGoByMajority (TestHardGoByMajority ):
61
73
62
74
name = "Soft Go By Majority"
63
75
player = axelrod .GoByMajority
64
- eq_play = C
65
- soft = True
66
-
67
- def test_set_soft_to_false (self ):
68
- self .eq_play = D
69
- expected , opponent_actions = self .get_infinite_memory_depth_actions ()
70
- self .versus_test (MockPlayer (actions = opponent_actions ),
71
- expected_actions = expected , init_kwargs = {'soft' : False })
72
- self .eq_play = C
73
-
74
- def test_soft_value (self ):
75
- default = self .player ()
76
- self .assertTrue (default .soft )
77
- player = self .player (soft = True )
78
- self .assertTrue (player .soft )
79
- player = self .player (soft = False )
80
- self .assertFalse (player .soft )
76
+ default_soft = True
77
+
78
+ def test_first_play (self ):
79
+ self .first_play_test (C )
80
+
81
+ def test_memory_depth_infinite_soft_is_true (self ):
82
+ opponent_actions = [C ] * 50 + [D ] * 100 + [C ] * 52
83
+ actions = ([(C , C )] * 50 + [(C , D )] * 51 + [(D , D )] * 49 +
84
+ [(D , C )] * 50 + [(C , C )] * 2 )
85
+ opponent = MockPlayer (actions = opponent_actions )
86
+ self .versus_test (opponent , expected_actions = actions )
87
+
88
+ def test_memory_depth_even_soft_is_true (self ):
89
+ memory_depth = 4
90
+ init_kwargs = {'memory_depth' : memory_depth }
91
+
92
+ opponent = MockPlayer ([C ] * memory_depth + [D ] * memory_depth )
93
+ actions = ([(C , C )] * 4 + [(C , D )] * 3 + [(D , D )] +
94
+ [(D , C )] * 2 + [(C , C )] * 2 )
95
+ self .versus_test (opponent , expected_actions = actions ,
96
+ init_kwargs = init_kwargs )
81
97
82
98
def test_name (self ):
83
99
player = self .player (soft = True )
@@ -124,38 +140,31 @@ class TestGoByRecentMajority(TestPlayer):
124
140
}
125
141
126
142
def test_strategy (self ):
127
- """
128
- with memory_depth=3 always switches after
129
- opponent_history=[C, C, C, D, D] (int(3*1.5) + 1 = 5)
130
- with memory_depth=4 soft switches after
131
- op_history=[C, C, C, C, D, D, D] (int(4*1.5) + 1 = 7)
132
- and hard switches after
133
- op_history=[C, C, C, C, D, D] (int(4 * 1.5) = 6)
134
- """
135
143
136
144
if soft :
137
145
self .first_play_test (C )
138
146
else :
139
147
self .first_play_test (D )
140
148
149
+ # for example memory_depth=2 plays against [C, C, D, D]
150
+ # soft actions = [(C, C), (C, C), (C, D), (C, D)]
151
+ # hard actions = [(D, C), (C, C), (C, D), (D, D)]
141
152
opponent_actions = [C ] * memory_depth + [D ] * memory_depth
142
-
143
- if memory_depth % 2 == 1 or soft :
144
- cooperation_len = int (memory_depth * 1.5 ) + 1
145
- else :
146
- cooperation_len = int (memory_depth * 1.5 )
147
- defect_len = 2 * memory_depth - cooperation_len
148
-
153
+ opponent = MockPlayer (actions = opponent_actions )
149
154
if soft :
150
- first_move = [C ]
155
+ first_player_action = [C ]
156
+ else :
157
+ first_player_action = [D ]
158
+ if memory_depth % 2 == 1 or soft :
159
+ cooperations = int (memory_depth * 1.5 )
151
160
else :
152
- first_move = [D ]
161
+ cooperations = int (memory_depth * 1.5 ) - 1
162
+ defections = len (opponent_actions ) - cooperations - 1
163
+ player_actions = (first_player_action + [C ] * cooperations +
164
+ [D ] * defections )
153
165
154
- player_actions = (first_move + [C ] * (cooperation_len - 1 ) +
155
- [D ] * defect_len )
156
- expected = list (zip (player_actions , opponent_actions ))
157
- self .versus_test (MockPlayer (actions = opponent_actions ),
158
- expected_actions = expected )
166
+ actions = list (zip (player_actions , opponent_actions ))
167
+ self .versus_test (opponent , expected_actions = actions )
159
168
160
169
return TestGoByRecentMajority
161
170
0 commit comments