2
2
// Copyright(c) 2020 Intel Corporation.
3
3
4
4
#include <linux/bits.h>
5
+ #include <linux/delay.h>
5
6
#include <linux/device.h>
6
7
#include <linux/errno.h>
8
+ #include <linux/iopoll.h>
7
9
#include <linux/module.h>
8
10
#include <linux/regmap.h>
9
11
#include <linux/soundwire/sdw.h>
10
12
#include <linux/soundwire/sdw_registers.h>
13
+ #include <sound/sdca_function.h>
11
14
#include "internal.h"
12
15
13
16
struct regmap_mbq_context {
@@ -16,6 +19,7 @@ struct regmap_mbq_context {
16
19
struct regmap_sdw_mbq_cfg cfg ;
17
20
18
21
int val_size ;
22
+ bool (* readable_reg )(struct device * dev , unsigned int reg );
19
23
};
20
24
21
25
static int regmap_sdw_mbq_size (struct regmap_mbq_context * ctx , unsigned int reg )
@@ -31,18 +35,48 @@ static int regmap_sdw_mbq_size(struct regmap_mbq_context *ctx, unsigned int reg)
31
35
return size ;
32
36
}
33
37
34
- static int regmap_sdw_mbq_write (void * context , unsigned int reg , unsigned int val )
38
+ static bool regmap_sdw_mbq_deferrable (struct regmap_mbq_context * ctx , unsigned int reg )
39
+ {
40
+ if (ctx -> cfg .deferrable )
41
+ return ctx -> cfg .deferrable (ctx -> dev , reg );
42
+
43
+ return false;
44
+ }
45
+
46
+ static int regmap_sdw_mbq_poll_busy (struct sdw_slave * slave , unsigned int reg ,
47
+ struct regmap_mbq_context * ctx )
48
+ {
49
+ struct device * dev = & slave -> dev ;
50
+ int val , ret = 0 ;
51
+
52
+ dev_dbg (dev , "Deferring transaction for 0x%x\n" , reg );
53
+
54
+ reg = SDW_SDCA_CTL (SDW_SDCA_CTL_FUNC (reg ), 0 ,
55
+ SDCA_CTL_ENTITY_0_FUNCTION_STATUS , 0 );
56
+
57
+ if (ctx -> readable_reg (dev , reg )) {
58
+ ret = read_poll_timeout (sdw_read_no_pm , val ,
59
+ val < 0 || !(val & SDCA_CTL_ENTITY_0_FUNCTION_BUSY ),
60
+ ctx -> cfg .timeout_us , ctx -> cfg .retry_us ,
61
+ false, slave , reg );
62
+ if (val < 0 )
63
+ return val ;
64
+ if (ret )
65
+ dev_err (dev , "Function busy timed out 0x%x: %d\n" , reg , val );
66
+ } else {
67
+ fsleep (ctx -> cfg .timeout_us );
68
+ }
69
+
70
+ return ret ;
71
+ }
72
+
73
+ static int regmap_sdw_mbq_write_impl (struct sdw_slave * slave ,
74
+ unsigned int reg , unsigned int val ,
75
+ int mbq_size , bool deferrable )
35
76
{
36
- struct regmap_mbq_context * ctx = context ;
37
- struct device * dev = ctx -> dev ;
38
- struct sdw_slave * slave = dev_to_sdw_dev (dev );
39
- int mbq_size = regmap_sdw_mbq_size (ctx , reg );
40
77
int shift = mbq_size * BITS_PER_BYTE ;
41
78
int ret ;
42
79
43
- if (mbq_size < 0 )
44
- return mbq_size ;
45
-
46
80
while (-- mbq_size > 0 ) {
47
81
shift -= BITS_PER_BYTE ;
48
82
@@ -52,24 +86,58 @@ static int regmap_sdw_mbq_write(void *context, unsigned int reg, unsigned int va
52
86
return ret ;
53
87
}
54
88
55
- return sdw_write_no_pm (slave , reg , val & 0xff );
89
+ ret = sdw_write_no_pm (slave , reg , val & 0xff );
90
+ if (deferrable && ret == - ENODATA )
91
+ return - EAGAIN ;
92
+
93
+ return ret ;
56
94
}
57
95
58
- static int regmap_sdw_mbq_read (void * context , unsigned int reg , unsigned int * val )
96
+ static int regmap_sdw_mbq_write (void * context , unsigned int reg , unsigned int val )
59
97
{
60
98
struct regmap_mbq_context * ctx = context ;
61
99
struct device * dev = ctx -> dev ;
62
100
struct sdw_slave * slave = dev_to_sdw_dev (dev );
101
+ bool deferrable = regmap_sdw_mbq_deferrable (ctx , reg );
63
102
int mbq_size = regmap_sdw_mbq_size (ctx , reg );
64
- int shift = BITS_PER_BYTE ;
65
- int read ;
103
+ int ret ;
66
104
67
105
if (mbq_size < 0 )
68
106
return mbq_size ;
69
107
108
+ /*
109
+ * Technically the spec does allow a device to set itself to busy for
110
+ * internal reasons, but since it doesn't provide any information on
111
+ * how to handle timeouts in that case, for now the code will only
112
+ * process a single wait/timeout on function busy and a single retry
113
+ * of the transaction.
114
+ */
115
+ ret = regmap_sdw_mbq_write_impl (slave , reg , val , mbq_size , deferrable );
116
+ if (ret == - EAGAIN ) {
117
+ ret = regmap_sdw_mbq_poll_busy (slave , reg , ctx );
118
+ if (ret )
119
+ return ret ;
120
+
121
+ ret = regmap_sdw_mbq_write_impl (slave , reg , val , mbq_size , false);
122
+ }
123
+
124
+ return ret ;
125
+ }
126
+
127
+ static int regmap_sdw_mbq_read_impl (struct sdw_slave * slave ,
128
+ unsigned int reg , unsigned int * val ,
129
+ int mbq_size , bool deferrable )
130
+ {
131
+ int shift = BITS_PER_BYTE ;
132
+ int read ;
133
+
70
134
read = sdw_read_no_pm (slave , reg );
71
- if (read < 0 )
135
+ if (read < 0 ) {
136
+ if (deferrable && read == - ENODATA )
137
+ return - EAGAIN ;
138
+
72
139
return read ;
140
+ }
73
141
74
142
* val = read ;
75
143
@@ -85,6 +153,37 @@ static int regmap_sdw_mbq_read(void *context, unsigned int reg, unsigned int *va
85
153
return 0 ;
86
154
}
87
155
156
+ static int regmap_sdw_mbq_read (void * context , unsigned int reg , unsigned int * val )
157
+ {
158
+ struct regmap_mbq_context * ctx = context ;
159
+ struct device * dev = ctx -> dev ;
160
+ struct sdw_slave * slave = dev_to_sdw_dev (dev );
161
+ bool deferrable = regmap_sdw_mbq_deferrable (ctx , reg );
162
+ int mbq_size = regmap_sdw_mbq_size (ctx , reg );
163
+ int ret ;
164
+
165
+ if (mbq_size < 0 )
166
+ return mbq_size ;
167
+
168
+ /*
169
+ * Technically the spec does allow a device to set itself to busy for
170
+ * internal reasons, but since it doesn't provide any information on
171
+ * how to handle timeouts in that case, for now the code will only
172
+ * process a single wait/timeout on function busy and a single retry
173
+ * of the transaction.
174
+ */
175
+ ret = regmap_sdw_mbq_read_impl (slave , reg , val , mbq_size , deferrable );
176
+ if (ret == - EAGAIN ) {
177
+ ret = regmap_sdw_mbq_poll_busy (slave , reg , ctx );
178
+ if (ret )
179
+ return ret ;
180
+
181
+ ret = regmap_sdw_mbq_read_impl (slave , reg , val , mbq_size , false);
182
+ }
183
+
184
+ return ret ;
185
+ }
186
+
88
187
static const struct regmap_bus regmap_sdw_mbq = {
89
188
.reg_read = regmap_sdw_mbq_read ,
90
189
.reg_write = regmap_sdw_mbq_write ,
@@ -119,11 +218,13 @@ regmap_sdw_mbq_gen_context(struct device *dev,
119
218
return ERR_PTR (- ENOMEM );
120
219
121
220
ctx -> dev = dev ;
122
- ctx -> val_size = config -> val_bits / BITS_PER_BYTE ;
123
221
124
222
if (mbq_config )
125
223
ctx -> cfg = * mbq_config ;
126
224
225
+ ctx -> val_size = config -> val_bits / BITS_PER_BYTE ;
226
+ ctx -> readable_reg = config -> readable_reg ;
227
+
127
228
return ctx ;
128
229
}
129
230
0 commit comments