@@ -104,7 +104,7 @@ static int eth_nxp_enet_qos_tx(const struct device *dev, struct net_pkt *pkt)
104
104
105
105
/* Setting up the descriptors */
106
106
fragment = pkt -> frags ;
107
- tx_desc_ptr -> read .control2 |= FIRST_TX_DESCRIPTOR_FLAG ;
107
+ tx_desc_ptr -> read .control2 |= FIRST_DESCRIPTOR_FLAG ;
108
108
for (int i = 0 ; i < frags_count ; i ++ ) {
109
109
net_pkt_frag_ref (fragment );
110
110
@@ -116,7 +116,7 @@ static int eth_nxp_enet_qos_tx(const struct device *dev, struct net_pkt *pkt)
116
116
tx_desc_ptr ++ ;
117
117
}
118
118
last_desc_ptr = tx_desc_ptr - 1 ;
119
- last_desc_ptr -> read .control2 |= LAST_TX_DESCRIPTOR_FLAG ;
119
+ last_desc_ptr -> read .control2 |= LAST_DESCRIPTOR_FLAG ;
120
120
last_desc_ptr -> read .control1 |= TX_INTERRUPT_ON_COMPLETE_FLAG ;
121
121
122
122
LOG_DBG ("Starting TX DMA on packet %p" , pkt );
@@ -177,10 +177,11 @@ static void eth_nxp_enet_qos_rx(struct k_work *work)
177
177
volatile union nxp_enet_qos_rx_desc * desc_arr = data -> rx .descriptors ;
178
178
volatile union nxp_enet_qos_rx_desc * desc ;
179
179
uint32_t desc_idx ;
180
- struct net_pkt * pkt ;
180
+ struct net_pkt * pkt = NULL ;
181
181
struct net_buf * new_buf ;
182
182
struct net_buf * buf ;
183
183
size_t pkt_len ;
184
+ size_t processed_len ;
184
185
185
186
LOG_DBG ("iteration work:%p, rx_data:%p, data:%p" , work , rx_data , data );
186
187
/* We are going to find all of the descriptors we own and update them */
@@ -189,33 +190,57 @@ static void eth_nxp_enet_qos_rx(struct k_work *work)
189
190
desc = & desc_arr [desc_idx ];
190
191
191
192
if (desc -> write .control3 & OWN_FLAG ) {
192
- /* The DMA owns the descriptor, we cannot touch it */
193
+ /* The DMA owns the descriptor, we have processed all */
193
194
break ;
194
195
}
195
196
196
197
rx_data -> next_desc_idx = (desc_idx + 1U ) % NUM_RX_BUFDESC ;
197
198
198
- if ((desc -> write .control3 & (FIRST_TX_DESCRIPTOR_FLAG | LAST_TX_DESCRIPTOR_FLAG )) !=
199
- (FIRST_TX_DESCRIPTOR_FLAG | LAST_TX_DESCRIPTOR_FLAG )) {
200
- LOG_DBG ("receive packet mask %X " , (desc -> write .control3 >> 28 ) & 0x0f );
201
- LOG_ERR ("Rx pkt spans over multiple DMA bufs, not implemented, drop here" );
202
- desc -> read .control = rx_desc_refresh_flags ;
203
- continue ;
204
- }
199
+ if (pkt == NULL ) {
200
+ if ((desc -> write .control3 & FIRST_DESCRIPTOR_FLAG ) !=
201
+ FIRST_DESCRIPTOR_FLAG ) {
202
+ LOG_DBG ("receive packet mask %X " ,
203
+ (desc -> write .control3 >> 28 ) & 0x0f );
204
+ LOG_ERR ("Rx descriptor does not have first descriptor flag, drop" );
205
+ desc -> read .control = rx_desc_refresh_flags ;
206
+ /* Error statistics for this packet already updated earlier */
207
+ continue ;
208
+ }
205
209
206
- /* Otherwise, we found a packet that we need to process */
207
- pkt = net_pkt_rx_alloc (K_NO_WAIT );
210
+ /* Otherwise, we found a packet that we need to process */
211
+ pkt = net_pkt_rx_alloc (K_NO_WAIT );
208
212
209
- if (!pkt ) {
210
- LOG_ERR ("Could not alloc new RX pkt" );
211
- /* error: no new buffer, reuse previous immediately
212
- */
213
+ if (!pkt ) {
214
+ LOG_ERR ("Could not alloc new RX pkt" );
215
+ /* error: no new buffer, reuse previous immediately */
216
+ desc -> read .control = rx_desc_refresh_flags ;
217
+ eth_stats_update_errors_rx (data -> iface );
218
+ continue ;
219
+ }
220
+
221
+ processed_len = 0U ;
222
+
223
+ LOG_DBG ("Created new RX pkt %u of %d: %p" ,
224
+ desc_idx + 1U , NUM_RX_BUFDESC , pkt );
225
+ }
226
+
227
+ /* Read the cumulative length of data in this buffer and previous buffers (if any).
228
+ * The complete length is in a descriptor with the last descriptor flag set
229
+ * (note that it includes four byte FCS as well). This length will be validated
230
+ * against processed_len to ensure it's within expected bounds.
231
+ */
232
+ pkt_len = desc -> write .control3 & DESC_RX_PKT_LEN ;
233
+ if ((pkt_len < processed_len ) ||
234
+ ((pkt_len - processed_len ) > ENET_QOS_RX_BUFFER_SIZE )) {
235
+ LOG_ERR ("Invalid packet length in descriptor: pkt_len=%u, processed_len=%u" ,
236
+ pkt_len , processed_len );
237
+ net_pkt_unref (pkt );
238
+ pkt = NULL ;
213
239
desc -> read .control = rx_desc_refresh_flags ;
214
240
eth_stats_update_errors_rx (data -> iface );
215
241
continue ;
216
242
}
217
243
218
- LOG_DBG ("Created new RX pkt %u of %d: %p" , desc_idx + 1U , NUM_RX_BUFDESC , pkt );
219
244
/* We need to know if we can replace the reserved fragment in advance.
220
245
* At no point can we allow the driver to have less the amount of reserved
221
246
* buffers it needs to function, so we will not give up our previous buffer
@@ -230,47 +255,55 @@ static void eth_nxp_enet_qos_rx(struct k_work *work)
230
255
*/
231
256
LOG_ERR ("No new RX buf available" );
232
257
net_pkt_unref (pkt );
258
+ pkt = NULL ;
233
259
desc -> read .control = rx_desc_refresh_flags ;
234
260
eth_stats_update_errors_rx (data -> iface );
235
261
continue ;
236
262
}
237
263
264
+ /* Append buffer to a packet */
238
265
buf = data -> rx .reserved_bufs [desc_idx ];
239
- pkt_len = desc -> write .control3 & DESC_RX_PKT_LEN ;
240
-
241
- LOG_DBG ("Receiving RX packet" );
242
- /* Finally, we have decided that it is time to wrap the buffer nicely
243
- * up within a packet, and try to send it. It's only one buffer,
244
- * thanks to ENET QOS hardware handing the fragmentation,
245
- * so the construction of the packet is very simple.
246
- */
247
- net_buf_add (buf , pkt_len );
248
- net_pkt_frag_insert (pkt , buf );
249
- if (net_recv_data (data -> iface , pkt )) {
250
- LOG_ERR ("RECV failed" );
251
- /* Quite a shame. */
252
- /* error during processing, we continue with new allocated */
253
- net_pkt_unref (pkt );
254
- eth_stats_update_errors_rx (data -> iface );
266
+ net_buf_add (buf , pkt_len - processed_len );
267
+ net_pkt_frag_add (pkt , buf );
268
+ processed_len = pkt_len ;
269
+
270
+ if ((desc -> write .control3 & LAST_DESCRIPTOR_FLAG ) == LAST_DESCRIPTOR_FLAG ) {
271
+ /* Propagate completed packet to network stack */
272
+ LOG_DBG ("Receiving RX packet" );
273
+ if (net_recv_data (data -> iface , pkt )) {
274
+ LOG_ERR ("RECV failed" );
275
+ /* Error during processing, we continue with new buffer */
276
+ net_pkt_unref (pkt );
277
+ eth_stats_update_errors_rx (data -> iface );
278
+ } else {
279
+ /* Record successfully received packet */
280
+ eth_stats_update_pkts_rx (data -> iface );
281
+ }
282
+ pkt = NULL ;
255
283
}
256
284
257
285
LOG_DBG ("Swap RX buf" );
258
- /* Fresh meat */
286
+ /* Allow receive into a new buffer */
259
287
data -> rx .reserved_bufs [desc_idx ] = new_buf ;
260
288
desc -> read .buf1_addr = (uint32_t )new_buf -> data ;
261
289
desc -> read .control = rx_desc_refresh_flags ;
290
+ }
262
291
263
- /* Record our glorious victory */
264
- eth_stats_update_pkts_rx (data -> iface );
292
+ if (pkt != NULL ) {
293
+ /* Looped through descriptors without reaching the final
294
+ * fragment of the packet, deallocate the incomplete one
295
+ */
296
+ LOG_DBG ("Incomplete packet received, cleaning up" );
297
+ net_pkt_unref (pkt );
298
+ pkt = NULL ;
299
+ eth_stats_update_errors_rx (data -> iface );
265
300
}
266
301
267
302
/* try to restart if halted */
268
303
const struct device * dev = net_if_get_device (data -> iface );
269
- atomic_val_t rbu_flag = atomic_get (& rx_data -> rbu_flag );
270
304
271
- if (rbu_flag ) {
305
+ if (atomic_cas ( & rx_data -> rbu_flag , 1 , 0 ) ) {
272
306
LOG_DBG ("handle RECEIVE BUFFER UNDERRUN in worker" );
273
- atomic_clear (& rx_data -> rbu_flag );
274
307
275
308
/* When the DMA reaches the tail pointer, it suspends. Set to last descriptor */
276
309
const struct nxp_enet_qos_mac_config * config = dev -> config ;
@@ -551,7 +584,7 @@ static inline int enet_qos_rx_desc_init(enet_qos_t *base, struct nxp_enet_qos_rx
551
584
ENET_QOS_REG_PREP (DMA_CH_DMA_CHX_RX_CONTROL2 , RDRL , NUM_RX_BUFDESC - 1 );
552
585
base -> DMA_CH [0 ].DMA_CHX_RX_CTRL |=
553
586
/* Set DMA receive buffer size. The low 2 bits are not entered to this field. */
554
- ENET_QOS_REG_PREP (DMA_CH_DMA_CHX_RX_CTRL , RBSZ_13_Y , CONFIG_NET_BUF_DATA_SIZE >> 2 );
587
+ ENET_QOS_REG_PREP (DMA_CH_DMA_CHX_RX_CTRL , RBSZ_13_Y , ENET_QOS_RX_BUFFER_SIZE >> 2 );
555
588
556
589
return 0 ;
557
590
}
0 commit comments