17
17
#include <linux/fs.h>
18
18
#include "dax-private.h"
19
19
20
+ /**
21
+ * struct dax_device - anchor object for dax services
22
+ * @inode: core vfs
23
+ * @cdev: optional character interface for "device dax"
24
+ * @host: optional name for lookups where the device path is not available
25
+ * @private: dax driver private data
26
+ * @flags: state and boolean properties
27
+ */
28
+ struct dax_device {
29
+ struct hlist_node list ;
30
+ struct inode inode ;
31
+ struct cdev cdev ;
32
+ const char * host ;
33
+ void * private ;
34
+ unsigned long flags ;
35
+ const struct dax_operations * ops ;
36
+ };
37
+
20
38
static dev_t dax_devt ;
21
39
DEFINE_STATIC_SRCU (dax_srcu );
22
40
static struct vfsmount * dax_mnt ;
@@ -40,6 +58,42 @@ void dax_read_unlock(int id)
40
58
}
41
59
EXPORT_SYMBOL_GPL (dax_read_unlock );
42
60
61
+ static int dax_host_hash (const char * host )
62
+ {
63
+ return hashlen_hash (hashlen_string ("DAX" , host )) % DAX_HASH_SIZE ;
64
+ }
65
+
66
+ /**
67
+ * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
68
+ * @host: alternate name for the device registered by a dax driver
69
+ */
70
+ static struct dax_device * dax_get_by_host (const char * host )
71
+ {
72
+ struct dax_device * dax_dev , * found = NULL ;
73
+ int hash , id ;
74
+
75
+ if (!host )
76
+ return NULL ;
77
+
78
+ hash = dax_host_hash (host );
79
+
80
+ id = dax_read_lock ();
81
+ spin_lock (& dax_host_lock );
82
+ hlist_for_each_entry (dax_dev , & dax_host_list [hash ], list ) {
83
+ if (!dax_alive (dax_dev )
84
+ || strcmp (host , dax_dev -> host ) != 0 )
85
+ continue ;
86
+
87
+ if (igrab (& dax_dev -> inode ))
88
+ found = dax_dev ;
89
+ break ;
90
+ }
91
+ spin_unlock (& dax_host_lock );
92
+ dax_read_unlock (id );
93
+
94
+ return found ;
95
+ }
96
+
43
97
#ifdef CONFIG_BLOCK
44
98
#include <linux/blkdev.h>
45
99
@@ -65,45 +119,39 @@ struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
65
119
return dax_get_by_host (bdev -> bd_disk -> disk_name );
66
120
}
67
121
EXPORT_SYMBOL_GPL (fs_dax_get_by_bdev );
68
- #endif
69
122
70
- bool __generic_fsdax_supported (struct dax_device * dax_dev ,
123
+ bool generic_fsdax_supported (struct dax_device * dax_dev ,
71
124
struct block_device * bdev , int blocksize , sector_t start ,
72
125
sector_t sectors )
73
126
{
74
127
bool dax_enabled = false;
75
128
pgoff_t pgoff , pgoff_end ;
76
- char buf [BDEVNAME_SIZE ];
77
129
void * kaddr , * end_kaddr ;
78
130
pfn_t pfn , end_pfn ;
79
131
sector_t last_page ;
80
132
long len , len2 ;
81
133
int err , id ;
82
134
83
135
if (blocksize != PAGE_SIZE ) {
84
- pr_info ("%s: error: unsupported blocksize for dax\n" ,
85
- bdevname (bdev , buf ));
136
+ pr_info ("%pg: error: unsupported blocksize for dax\n" , bdev );
86
137
return false;
87
138
}
88
139
89
140
if (!dax_dev ) {
90
- pr_debug ("%s: error: dax unsupported by block device\n" ,
91
- bdevname (bdev , buf ));
141
+ pr_debug ("%pg: error: dax unsupported by block device\n" , bdev );
92
142
return false;
93
143
}
94
144
95
145
err = bdev_dax_pgoff (bdev , start , PAGE_SIZE , & pgoff );
96
146
if (err ) {
97
- pr_info ("%s: error: unaligned partition for dax\n" ,
98
- bdevname (bdev , buf ));
147
+ pr_info ("%pg: error: unaligned partition for dax\n" , bdev );
99
148
return false;
100
149
}
101
150
102
151
last_page = PFN_DOWN ((start + sectors - 1 ) * 512 ) * PAGE_SIZE / 512 ;
103
152
err = bdev_dax_pgoff (bdev , last_page , PAGE_SIZE , & pgoff_end );
104
153
if (err ) {
105
- pr_info ("%s: error: unaligned partition for dax\n" ,
106
- bdevname (bdev , buf ));
154
+ pr_info ("%pg: error: unaligned partition for dax\n" , bdev );
107
155
return false;
108
156
}
109
157
@@ -112,8 +160,8 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
112
160
len2 = dax_direct_access (dax_dev , pgoff_end , 1 , & end_kaddr , & end_pfn );
113
161
114
162
if (len < 1 || len2 < 1 ) {
115
- pr_info ("%s : error: dax access failed (%ld)\n" ,
116
- bdevname ( bdev , buf ) , len < 1 ? len : len2 );
163
+ pr_info ("%pg : error: dax access failed (%ld)\n" ,
164
+ bdev , len < 1 ? len : len2 );
117
165
dax_read_unlock (id );
118
166
return false;
119
167
}
@@ -147,57 +195,32 @@ bool __generic_fsdax_supported(struct dax_device *dax_dev,
147
195
dax_read_unlock (id );
148
196
149
197
if (!dax_enabled ) {
150
- pr_info ("%s: error: dax support not enabled\n" ,
151
- bdevname (bdev , buf ));
198
+ pr_info ("%pg: error: dax support not enabled\n" , bdev );
152
199
return false;
153
200
}
154
201
return true;
155
202
}
156
- EXPORT_SYMBOL_GPL (__generic_fsdax_supported );
203
+ EXPORT_SYMBOL_GPL (generic_fsdax_supported );
157
204
158
- /**
159
- * __bdev_dax_supported() - Check if the device supports dax for filesystem
160
- * @bdev: block device to check
161
- * @blocksize: The block size of the device
162
- *
163
- * This is a library function for filesystems to check if the block device
164
- * can be mounted with dax option.
165
- *
166
- * Return: true if supported, false if unsupported
167
- */
168
- bool __bdev_dax_supported (struct block_device * bdev , int blocksize )
205
+ bool dax_supported (struct dax_device * dax_dev , struct block_device * bdev ,
206
+ int blocksize , sector_t start , sector_t len )
169
207
{
170
- struct dax_device * dax_dev ;
171
- struct request_queue * q ;
172
- char buf [BDEVNAME_SIZE ];
173
- bool ret ;
208
+ bool ret = false;
174
209
int id ;
175
210
176
- q = bdev_get_queue (bdev );
177
- if (!q || !blk_queue_dax (q )) {
178
- pr_debug ("%s: error: request queue doesn't support dax\n" ,
179
- bdevname (bdev , buf ));
180
- return false;
181
- }
182
-
183
- dax_dev = dax_get_by_host (bdev -> bd_disk -> disk_name );
184
- if (!dax_dev ) {
185
- pr_debug ("%s: error: device does not support dax\n" ,
186
- bdevname (bdev , buf ));
211
+ if (!dax_dev )
187
212
return false;
188
- }
189
213
190
214
id = dax_read_lock ();
191
- ret = dax_supported (dax_dev , bdev , blocksize , 0 ,
192
- i_size_read (bdev -> bd_inode ) / 512 );
215
+ if (dax_alive (dax_dev ) && dax_dev -> ops -> dax_supported )
216
+ ret = dax_dev -> ops -> dax_supported (dax_dev , bdev , blocksize ,
217
+ start , len );
193
218
dax_read_unlock (id );
194
-
195
- put_dax (dax_dev );
196
-
197
219
return ret ;
198
220
}
199
- EXPORT_SYMBOL_GPL (__bdev_dax_supported );
200
- #endif
221
+ EXPORT_SYMBOL_GPL (dax_supported );
222
+ #endif /* CONFIG_FS_DAX */
223
+ #endif /* CONFIG_BLOCK */
201
224
202
225
enum dax_device_flags {
203
226
/* !alive + rcu grace period == no new operations / mappings */
@@ -208,24 +231,6 @@ enum dax_device_flags {
208
231
DAXDEV_SYNC ,
209
232
};
210
233
211
- /**
212
- * struct dax_device - anchor object for dax services
213
- * @inode: core vfs
214
- * @cdev: optional character interface for "device dax"
215
- * @host: optional name for lookups where the device path is not available
216
- * @private: dax driver private data
217
- * @flags: state and boolean properties
218
- */
219
- struct dax_device {
220
- struct hlist_node list ;
221
- struct inode inode ;
222
- struct cdev cdev ;
223
- const char * host ;
224
- void * private ;
225
- unsigned long flags ;
226
- const struct dax_operations * ops ;
227
- };
228
-
229
234
static ssize_t write_cache_show (struct device * dev ,
230
235
struct device_attribute * attr , char * buf )
231
236
{
@@ -323,19 +328,6 @@ long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
323
328
}
324
329
EXPORT_SYMBOL_GPL (dax_direct_access );
325
330
326
- bool dax_supported (struct dax_device * dax_dev , struct block_device * bdev ,
327
- int blocksize , sector_t start , sector_t len )
328
- {
329
- if (!dax_dev )
330
- return false;
331
-
332
- if (!dax_alive (dax_dev ))
333
- return false;
334
-
335
- return dax_dev -> ops -> dax_supported (dax_dev , bdev , blocksize , start , len );
336
- }
337
- EXPORT_SYMBOL_GPL (dax_supported );
338
-
339
331
size_t dax_copy_from_iter (struct dax_device * dax_dev , pgoff_t pgoff , void * addr ,
340
332
size_t bytes , struct iov_iter * i )
341
333
{
@@ -423,11 +415,6 @@ bool dax_alive(struct dax_device *dax_dev)
423
415
}
424
416
EXPORT_SYMBOL_GPL (dax_alive );
425
417
426
- static int dax_host_hash (const char * host )
427
- {
428
- return hashlen_hash (hashlen_string ("DAX" , host )) % DAX_HASH_SIZE ;
429
- }
430
-
431
418
/*
432
419
* Note, rcu is not protecting the liveness of dax_dev, rcu is ensuring
433
420
* that any fault handlers or operations that might have seen
@@ -624,38 +611,6 @@ void put_dax(struct dax_device *dax_dev)
624
611
}
625
612
EXPORT_SYMBOL_GPL (put_dax );
626
613
627
- /**
628
- * dax_get_by_host() - temporary lookup mechanism for filesystem-dax
629
- * @host: alternate name for the device registered by a dax driver
630
- */
631
- struct dax_device * dax_get_by_host (const char * host )
632
- {
633
- struct dax_device * dax_dev , * found = NULL ;
634
- int hash , id ;
635
-
636
- if (!host )
637
- return NULL ;
638
-
639
- hash = dax_host_hash (host );
640
-
641
- id = dax_read_lock ();
642
- spin_lock (& dax_host_lock );
643
- hlist_for_each_entry (dax_dev , & dax_host_list [hash ], list ) {
644
- if (!dax_alive (dax_dev )
645
- || strcmp (host , dax_dev -> host ) != 0 )
646
- continue ;
647
-
648
- if (igrab (& dax_dev -> inode ))
649
- found = dax_dev ;
650
- break ;
651
- }
652
- spin_unlock (& dax_host_lock );
653
- dax_read_unlock (id );
654
-
655
- return found ;
656
- }
657
- EXPORT_SYMBOL_GPL (dax_get_by_host );
658
-
659
614
/**
660
615
* inode_dax: convert a public inode into its dax_dev
661
616
* @inode: An inode with i_cdev pointing to a dax_dev
0 commit comments