@@ -157,13 +157,42 @@ static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci)
157
157
rcu_read_lock ();
158
158
opinfo = list_first_or_null_rcu (& ci -> m_op_list , struct oplock_info ,
159
159
op_entry );
160
- if (opinfo && !atomic_inc_not_zero (& opinfo -> refcount ))
161
- opinfo = NULL ;
160
+ if (opinfo ) {
161
+ if (!atomic_inc_not_zero (& opinfo -> refcount ))
162
+ opinfo = NULL ;
163
+ else {
164
+ atomic_inc (& opinfo -> conn -> r_count );
165
+ if (ksmbd_conn_releasing (opinfo -> conn )) {
166
+ atomic_dec (& opinfo -> conn -> r_count );
167
+ atomic_dec (& opinfo -> refcount );
168
+ opinfo = NULL ;
169
+ }
170
+ }
171
+ }
172
+
162
173
rcu_read_unlock ();
163
174
164
175
return opinfo ;
165
176
}
166
177
178
+ static void opinfo_conn_put (struct oplock_info * opinfo )
179
+ {
180
+ struct ksmbd_conn * conn ;
181
+
182
+ if (!opinfo )
183
+ return ;
184
+
185
+ conn = opinfo -> conn ;
186
+ /*
187
+ * Checking waitqueue to dropping pending requests on
188
+ * disconnection. waitqueue_active is safe because it
189
+ * uses atomic operation for condition.
190
+ */
191
+ if (!atomic_dec_return (& conn -> r_count ) && waitqueue_active (& conn -> r_count_q ))
192
+ wake_up (& conn -> r_count_q );
193
+ opinfo_put (opinfo );
194
+ }
195
+
167
196
void opinfo_put (struct oplock_info * opinfo )
168
197
{
169
198
if (!atomic_dec_and_test (& opinfo -> refcount ))
@@ -666,13 +695,6 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
666
695
667
696
out :
668
697
ksmbd_free_work_struct (work );
669
- /*
670
- * Checking waitqueue to dropping pending requests on
671
- * disconnection. waitqueue_active is safe because it
672
- * uses atomic operation for condition.
673
- */
674
- if (!atomic_dec_return (& conn -> r_count ) && waitqueue_active (& conn -> r_count_q ))
675
- wake_up (& conn -> r_count_q );
676
698
}
677
699
678
700
/**
@@ -706,7 +728,6 @@ static int smb2_oplock_break_noti(struct oplock_info *opinfo)
706
728
work -> conn = conn ;
707
729
work -> sess = opinfo -> sess ;
708
730
709
- atomic_inc (& conn -> r_count );
710
731
if (opinfo -> op_state == OPLOCK_ACK_WAIT ) {
711
732
INIT_WORK (& work -> work , __smb2_oplock_break_noti );
712
733
ksmbd_queue_work (work );
@@ -776,13 +797,6 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
776
797
777
798
out :
778
799
ksmbd_free_work_struct (work );
779
- /*
780
- * Checking waitqueue to dropping pending requests on
781
- * disconnection. waitqueue_active is safe because it
782
- * uses atomic operation for condition.
783
- */
784
- if (!atomic_dec_return (& conn -> r_count ) && waitqueue_active (& conn -> r_count_q ))
785
- wake_up (& conn -> r_count_q );
786
800
}
787
801
788
802
/**
@@ -822,7 +836,6 @@ static int smb2_lease_break_noti(struct oplock_info *opinfo)
822
836
work -> conn = conn ;
823
837
work -> sess = opinfo -> sess ;
824
838
825
- atomic_inc (& conn -> r_count );
826
839
if (opinfo -> op_state == OPLOCK_ACK_WAIT ) {
827
840
list_for_each_safe (tmp , t , & opinfo -> interim_list ) {
828
841
struct ksmbd_work * in_work ;
@@ -1144,28 +1157,30 @@ int smb_grant_oplock(struct ksmbd_work *work, int req_op_level, u64 pid,
1144
1157
}
1145
1158
prev_opinfo = opinfo_get_list (ci );
1146
1159
if (!prev_opinfo ||
1147
- (prev_opinfo -> level == SMB2_OPLOCK_LEVEL_NONE && lctx ))
1160
+ (prev_opinfo -> level == SMB2_OPLOCK_LEVEL_NONE && lctx )) {
1161
+ opinfo_conn_put (prev_opinfo );
1148
1162
goto set_lev ;
1163
+ }
1149
1164
prev_op_has_lease = prev_opinfo -> is_lease ;
1150
1165
if (prev_op_has_lease )
1151
1166
prev_op_state = prev_opinfo -> o_lease -> state ;
1152
1167
1153
1168
if (share_ret < 0 &&
1154
1169
prev_opinfo -> level == SMB2_OPLOCK_LEVEL_EXCLUSIVE ) {
1155
1170
err = share_ret ;
1156
- opinfo_put (prev_opinfo );
1171
+ opinfo_conn_put (prev_opinfo );
1157
1172
goto err_out ;
1158
1173
}
1159
1174
1160
1175
if (prev_opinfo -> level != SMB2_OPLOCK_LEVEL_BATCH &&
1161
1176
prev_opinfo -> level != SMB2_OPLOCK_LEVEL_EXCLUSIVE ) {
1162
- opinfo_put (prev_opinfo );
1177
+ opinfo_conn_put (prev_opinfo );
1163
1178
goto op_break_not_needed ;
1164
1179
}
1165
1180
1166
1181
list_add (& work -> interim_entry , & prev_opinfo -> interim_list );
1167
1182
err = oplock_break (prev_opinfo , SMB2_OPLOCK_LEVEL_II );
1168
- opinfo_put (prev_opinfo );
1183
+ opinfo_conn_put (prev_opinfo );
1169
1184
if (err == - ENOENT )
1170
1185
goto set_lev ;
1171
1186
/* Check all oplock was freed by close */
@@ -1228,14 +1243,14 @@ static void smb_break_all_write_oplock(struct ksmbd_work *work,
1228
1243
return ;
1229
1244
if (brk_opinfo -> level != SMB2_OPLOCK_LEVEL_BATCH &&
1230
1245
brk_opinfo -> level != SMB2_OPLOCK_LEVEL_EXCLUSIVE ) {
1231
- opinfo_put (brk_opinfo );
1246
+ opinfo_conn_put (brk_opinfo );
1232
1247
return ;
1233
1248
}
1234
1249
1235
1250
brk_opinfo -> open_trunc = is_trunc ;
1236
1251
list_add (& work -> interim_entry , & brk_opinfo -> interim_list );
1237
1252
oplock_break (brk_opinfo , SMB2_OPLOCK_LEVEL_II );
1238
- opinfo_put (brk_opinfo );
1253
+ opinfo_conn_put (brk_opinfo );
1239
1254
}
1240
1255
1241
1256
/**
@@ -1263,6 +1278,13 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
1263
1278
list_for_each_entry_rcu (brk_op , & ci -> m_op_list , op_entry ) {
1264
1279
if (!atomic_inc_not_zero (& brk_op -> refcount ))
1265
1280
continue ;
1281
+
1282
+ atomic_inc (& brk_op -> conn -> r_count );
1283
+ if (ksmbd_conn_releasing (brk_op -> conn )) {
1284
+ atomic_dec (& brk_op -> conn -> r_count );
1285
+ continue ;
1286
+ }
1287
+
1266
1288
rcu_read_unlock ();
1267
1289
if (brk_op -> is_lease && (brk_op -> o_lease -> state &
1268
1290
(~(SMB2_LEASE_READ_CACHING_LE |
@@ -1292,7 +1314,7 @@ void smb_break_all_levII_oplock(struct ksmbd_work *work, struct ksmbd_file *fp,
1292
1314
brk_op -> open_trunc = is_trunc ;
1293
1315
oplock_break (brk_op , SMB2_OPLOCK_LEVEL_NONE );
1294
1316
next :
1295
- opinfo_put (brk_op );
1317
+ opinfo_conn_put (brk_op );
1296
1318
rcu_read_lock ();
1297
1319
}
1298
1320
rcu_read_unlock ();
0 commit comments