@@ -222,7 +222,8 @@ int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,
222
222
{
223
223
unsigned long address = (unsigned long )uaddr ;
224
224
struct mm_struct * mm = current -> mm ;
225
- struct page * page , * tail ;
225
+ struct page * page ;
226
+ struct folio * folio ;
226
227
struct address_space * mapping ;
227
228
int err , ro = 0 ;
228
229
@@ -273,54 +274,52 @@ int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,
273
274
err = 0 ;
274
275
275
276
/*
276
- * The treatment of mapping from this point on is critical. The page
277
- * lock protects many things but in this context the page lock
277
+ * The treatment of mapping from this point on is critical. The folio
278
+ * lock protects many things but in this context the folio lock
278
279
* stabilizes mapping, prevents inode freeing in the shared
279
280
* file-backed region case and guards against movement to swap cache.
280
281
*
281
- * Strictly speaking the page lock is not needed in all cases being
282
- * considered here and page lock forces unnecessarily serialization
282
+ * Strictly speaking the folio lock is not needed in all cases being
283
+ * considered here and folio lock forces unnecessarily serialization.
283
284
* From this point on, mapping will be re-verified if necessary and
284
- * page lock will be acquired only if it is unavoidable
285
+ * folio lock will be acquired only if it is unavoidable
285
286
*
286
- * Mapping checks require the head page for any compound page so the
287
- * head page and mapping is looked up now. For anonymous pages, it
288
- * does not matter if the page splits in the future as the key is
289
- * based on the address. For filesystem-backed pages, the tail is
290
- * required as the index of the page determines the key. For
291
- * base pages, there is no tail page and tail == page.
287
+ * Mapping checks require the folio so it is looked up now. For
288
+ * anonymous pages, it does not matter if the folio is split
289
+ * in the future as the key is based on the address. For
290
+ * filesystem-backed pages, the precise page is required as the
291
+ * index of the page determines the key.
292
292
*/
293
- tail = page ;
294
- page = compound_head (page );
295
- mapping = READ_ONCE (page -> mapping );
293
+ folio = page_folio (page );
294
+ mapping = READ_ONCE (folio -> mapping );
296
295
297
296
/*
298
- * If page ->mapping is NULL, then it cannot be a PageAnon
297
+ * If folio ->mapping is NULL, then it cannot be an anonymous
299
298
* page; but it might be the ZERO_PAGE or in the gate area or
300
299
* in a special mapping (all cases which we are happy to fail);
301
300
* or it may have been a good file page when get_user_pages_fast
302
301
* found it, but truncated or holepunched or subjected to
303
- * invalidate_complete_page2 before we got the page lock (also
302
+ * invalidate_complete_page2 before we got the folio lock (also
304
303
* cases which we are happy to fail). And we hold a reference,
305
304
* so refcount care in invalidate_inode_page's remove_mapping
306
305
* prevents drop_caches from setting mapping to NULL beneath us.
307
306
*
308
307
* The case we do have to guard against is when memory pressure made
309
308
* shmem_writepage move it from filecache to swapcache beneath us:
310
- * an unlikely race, but we do need to retry for page ->mapping.
309
+ * an unlikely race, but we do need to retry for folio ->mapping.
311
310
*/
312
311
if (unlikely (!mapping )) {
313
312
int shmem_swizzled ;
314
313
315
314
/*
316
- * Page lock is required to identify which special case above
317
- * applies. If this is really a shmem page then the page lock
315
+ * Folio lock is required to identify which special case above
316
+ * applies. If this is really a shmem page then the folio lock
318
317
* will prevent unexpected transitions.
319
318
*/
320
- lock_page ( page );
321
- shmem_swizzled = PageSwapCache ( page ) || page -> mapping ;
322
- unlock_page ( page );
323
- put_page ( page );
319
+ folio_lock ( folio );
320
+ shmem_swizzled = folio_test_swapcache ( folio ) || folio -> mapping ;
321
+ folio_unlock ( folio );
322
+ folio_put ( folio );
324
323
325
324
if (shmem_swizzled )
326
325
goto again ;
@@ -331,14 +330,14 @@ int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,
331
330
/*
332
331
* Private mappings are handled in a simple way.
333
332
*
334
- * If the futex key is stored on an anonymous page , then the associated
333
+ * If the futex key is stored in anonymous memory , then the associated
335
334
* object is the mm which is implicitly pinned by the calling process.
336
335
*
337
336
* NOTE: When userspace waits on a MAP_SHARED mapping, even if
338
337
* it's a read-only handle, it's expected that futexes attach to
339
338
* the object not the particular process.
340
339
*/
341
- if (PageAnon ( page )) {
340
+ if (folio_test_anon ( folio )) {
342
341
/*
343
342
* A RO anonymous page will never change and thus doesn't make
344
343
* sense for futex operations.
@@ -357,40 +356,40 @@ int get_futex_key(u32 __user *uaddr, bool fshared, union futex_key *key,
357
356
358
357
/*
359
358
* The associated futex object in this case is the inode and
360
- * the page ->mapping must be traversed. Ordinarily this should
361
- * be stabilised under page lock but it's not strictly
359
+ * the folio ->mapping must be traversed. Ordinarily this should
360
+ * be stabilised under folio lock but it's not strictly
362
361
* necessary in this case as we just want to pin the inode, not
363
- * update the radix tree or anything like that.
362
+ * update i_pages or anything like that.
364
363
*
365
364
* The RCU read lock is taken as the inode is finally freed
366
365
* under RCU. If the mapping still matches expectations then the
367
366
* mapping->host can be safely accessed as being a valid inode.
368
367
*/
369
368
rcu_read_lock ();
370
369
371
- if (READ_ONCE (page -> mapping ) != mapping ) {
370
+ if (READ_ONCE (folio -> mapping ) != mapping ) {
372
371
rcu_read_unlock ();
373
- put_page ( page );
372
+ folio_put ( folio );
374
373
375
374
goto again ;
376
375
}
377
376
378
377
inode = READ_ONCE (mapping -> host );
379
378
if (!inode ) {
380
379
rcu_read_unlock ();
381
- put_page ( page );
380
+ folio_put ( folio );
382
381
383
382
goto again ;
384
383
}
385
384
386
385
key -> both .offset |= FUT_OFF_INODE ; /* inode-based key */
387
386
key -> shared .i_seq = get_inode_sequence_number (inode );
388
- key -> shared .pgoff = page_to_pgoff ( tail );
387
+ key -> shared .pgoff = folio -> index + folio_page_idx ( folio , page );
389
388
rcu_read_unlock ();
390
389
}
391
390
392
391
out :
393
- put_page ( page );
392
+ folio_put ( folio );
394
393
return err ;
395
394
}
396
395
0 commit comments