15
15
#include <linux/cdev.h>
16
16
#include <linux/slab.h>
17
17
#include <linux/module.h>
18
+ #include <linux/kobject.h>
18
19
19
20
#include <linux/uaccess.h>
20
21
#include <asm/cio.h>
21
22
#include <asm/ccwdev.h>
22
23
#include <asm/debug.h>
23
24
#include <asm/diag.h>
25
+ #include <asm/scsw.h>
24
26
25
27
#include "vmur.h"
26
28
@@ -78,6 +80,8 @@ static struct ccw_driver ur_driver = {
78
80
79
81
static DEFINE_MUTEX (vmur_mutex );
80
82
83
+ static void ur_uevent (struct work_struct * ws );
84
+
81
85
/*
82
86
* Allocation, freeing, getting and putting of urdev structures
83
87
*
@@ -108,6 +112,7 @@ static struct urdev *urdev_alloc(struct ccw_device *cdev)
108
112
ccw_device_get_id (cdev , & urd -> dev_id );
109
113
mutex_init (& urd -> io_mutex );
110
114
init_waitqueue_head (& urd -> wait );
115
+ INIT_WORK (& urd -> uevent_work , ur_uevent );
111
116
spin_lock_init (& urd -> open_lock );
112
117
refcount_set (& urd -> ref_count , 1 );
113
118
urd -> cdev = cdev ;
@@ -275,6 +280,18 @@ static int do_ur_io(struct urdev *urd, struct ccw1 *cpa)
275
280
return rc ;
276
281
}
277
282
283
+ static void ur_uevent (struct work_struct * ws )
284
+ {
285
+ struct urdev * urd = container_of (ws , struct urdev , uevent_work );
286
+ char * envp [] = {
287
+ "EVENT=unsol_de" , /* Unsolicited device-end interrupt */
288
+ NULL
289
+ };
290
+
291
+ kobject_uevent_env (& urd -> cdev -> dev .kobj , KOBJ_CHANGE , envp );
292
+ urdev_put (urd );
293
+ }
294
+
278
295
/*
279
296
* ur interrupt handler, called from the ccw_device layer
280
297
*/
@@ -288,12 +305,21 @@ static void ur_int_handler(struct ccw_device *cdev, unsigned long intparm,
288
305
intparm , irb -> scsw .cmd .cstat , irb -> scsw .cmd .dstat ,
289
306
irb -> scsw .cmd .count );
290
307
}
308
+ urd = dev_get_drvdata (& cdev -> dev );
291
309
if (!intparm ) {
292
310
TRACE ("ur_int_handler: unsolicited interrupt\n" );
311
+
312
+ if (scsw_dstat (& irb -> scsw ) & DEV_STAT_DEV_END ) {
313
+ /*
314
+ * Userspace might be interested in a transition to
315
+ * device-ready state.
316
+ */
317
+ urdev_get (urd );
318
+ schedule_work (& urd -> uevent_work );
319
+ }
320
+
293
321
return ;
294
322
}
295
- urd = dev_get_drvdata (& cdev -> dev );
296
- BUG_ON (!urd );
297
323
/* On special conditions irb is an error pointer */
298
324
if (IS_ERR (irb ))
299
325
urd -> io_request_rc = PTR_ERR (irb );
@@ -809,7 +835,6 @@ static int ur_probe(struct ccw_device *cdev)
809
835
rc = - ENOMEM ;
810
836
goto fail_urdev_put ;
811
837
}
812
- cdev -> handler = ur_int_handler ;
813
838
814
839
/* validate virtual unit record device */
815
840
urd -> class = get_urd_class (urd );
@@ -823,6 +848,7 @@ static int ur_probe(struct ccw_device *cdev)
823
848
}
824
849
spin_lock_irq (get_ccwdev_lock (cdev ));
825
850
dev_set_drvdata (& cdev -> dev , urd );
851
+ cdev -> handler = ur_int_handler ;
826
852
spin_unlock_irq (get_ccwdev_lock (cdev ));
827
853
828
854
mutex_unlock (& vmur_mutex );
@@ -928,6 +954,10 @@ static int ur_set_offline_force(struct ccw_device *cdev, int force)
928
954
rc = - EBUSY ;
929
955
goto fail_urdev_put ;
930
956
}
957
+ if (cancel_work_sync (& urd -> uevent_work )) {
958
+ /* Work not run yet - need to release reference here */
959
+ urdev_put (urd );
960
+ }
931
961
device_destroy (vmur_class , urd -> char_device -> dev );
932
962
cdev_del (urd -> char_device );
933
963
urd -> char_device = NULL ;
@@ -963,6 +993,7 @@ static void ur_remove(struct ccw_device *cdev)
963
993
spin_lock_irqsave (get_ccwdev_lock (cdev ), flags );
964
994
urdev_put (dev_get_drvdata (& cdev -> dev ));
965
995
dev_set_drvdata (& cdev -> dev , NULL );
996
+ cdev -> handler = NULL ;
966
997
spin_unlock_irqrestore (get_ccwdev_lock (cdev ), flags );
967
998
968
999
mutex_unlock (& vmur_mutex );
0 commit comments