diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst index 719510247292..805c073fd5e6 100644 --- a/Documentation/arm64/silicon-errata.rst +++ b/Documentation/arm64/silicon-errata.rst @@ -92,12 +92,18 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A510 | #2051678 | ARM64_ERRATUM_2051678 | ++----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1349291 | N/A | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Neoverse-N2 | #2067961 | ARM64_ERRATUM_2067961 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | MMU-500 | #841119,826419 | N/A | +----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+ diff --git a/Documentation/filesystems/incfs.rst b/Documentation/filesystems/incfs.rst index 03ae39ec72dc..19db303b8327 100644 --- a/Documentation/filesystems/incfs.rst +++ b/Documentation/filesystems/incfs.rst @@ -7,7 +7,7 @@ incfs: A stacked incremental filesystem for Linux /sys/fs interface ================= -Please update Documentation/ABI/testing/sys-fs-incfs if you update this +Please update Documentation/ABI/testing/sysfs-fs-incfs if you update this section. incfs creates the following files in /sys/fs. diff --git a/android/abi_gki_aarch64.xml b/android/abi_gki_aarch64.xml index beeb9ae2d41f..91c6054b792a 100755 --- a/android/abi_gki_aarch64.xml +++ b/android/abi_gki_aarch64.xml @@ -311,6 +311,7 @@ + @@ -1664,6 +1665,7 @@ + @@ -1705,6 +1707,7 @@ + @@ -4904,6 +4907,7 @@ + @@ -5888,6 +5892,7 @@ + @@ -7289,63 +7294,63 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -8157,30 +8162,30 @@ - + - + - + - + - + - + - + - + - + @@ -10476,15 +10481,15 @@ - + - + - + - + @@ -11358,39 +11363,39 @@ - + - + - + - + - + - + - + - + - + - + - + - + @@ -11960,21 +11965,21 @@ - + - + - + - + - + - + @@ -12969,69 +12974,69 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -15973,18 +15978,18 @@ - + - + - + - + - + @@ -16027,72 +16032,72 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -17937,7 +17942,7 @@ - + @@ -18810,69 +18815,69 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -29918,69 +29923,69 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -36243,9 +36248,9 @@ - + - + @@ -44027,27 +44032,27 @@ - + - + - + - + - + - + - + - + @@ -46506,168 +46511,168 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -52491,12 +52496,12 @@ - + - + - + @@ -53592,15 +53597,15 @@ - + - + - + - + @@ -54605,21 +54610,21 @@ - + - + - + - + - + - + @@ -57845,7 +57850,7 @@ - + @@ -63135,15 +63140,15 @@ - + - + - + - + @@ -69569,33 +69574,33 @@ - + - + - + - + - + - + - + - + - + - + @@ -72248,69 +72253,69 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -74137,12 +74142,12 @@ - + - + - + @@ -75572,9 +75577,9 @@ - + - + @@ -77334,45 +77339,45 @@ - + - + - + - + - + - + - + - + - + - + - + - + - + - + @@ -77715,21 +77720,21 @@ - + - + - + - + - + - + @@ -80155,15 +80160,15 @@ - + - + - + - + @@ -89278,15 +89283,15 @@ - + - + - + - + @@ -97782,39 +97787,39 @@ - + - + - + - + - + - + - + - + - + - + - + - + @@ -100328,12 +100333,12 @@ - + - + - + @@ -111166,7 +111171,7 @@ - + @@ -111492,24 +111497,24 @@ - + - + - + - + - + - + - + @@ -113514,11 +113519,11 @@ - - - - - + + + + + @@ -113804,49 +113809,49 @@ - - - - + + + + - - - + + + - - - - - - - + + + + + + + - - - - - - - + + + + + + + - - - - + + + + - - - - + + + + @@ -113888,11 +113893,11 @@ - - - - - + + + + + @@ -113918,13 +113923,13 @@ - - - - - - - + + + + + + + @@ -114122,9 +114127,9 @@ - - - + + + @@ -114132,15 +114137,15 @@ - - - + + + - - - - + + + + @@ -114605,7 +114610,7 @@ - + @@ -114669,34 +114674,34 @@ - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + @@ -114719,8 +114724,8 @@ - - + + @@ -114761,18 +114766,18 @@ - - - + + + - - - - - - - + + + + + + + @@ -115081,16 +115086,16 @@ - - - - + + + + - - - - + + + + @@ -115177,10 +115182,10 @@ - - - - + + + + @@ -115190,11 +115195,11 @@ - - - - - + + + + + @@ -115213,9 +115218,9 @@ - - - + + + @@ -115226,10 +115231,10 @@ - - - - + + + + @@ -115239,11 +115244,11 @@ - - - - - + + + + + @@ -115278,10 +115283,10 @@ - - - - + + + + @@ -115294,23 +115299,30 @@ - - - + + + - - - + + + - - - - - - - + + + + + + + + + + + + + + @@ -115368,9 +115380,9 @@ - - - + + + @@ -115390,14 +115402,14 @@ - - - - - - - - + + + + + + + + @@ -115516,15 +115528,15 @@ - - - + + + - - - - + + + + @@ -115648,12 +115660,12 @@ - - - - - - + + + + + + @@ -115684,11 +115696,11 @@ - - - - - + + + + + @@ -116129,15 +116141,15 @@ - - - - + + + + - - - + + + @@ -116224,39 +116236,39 @@ - - - - - + + + + + - - - - - - - - - - - - - - - + - - - - - - + + + + + + + + + + + + + + + + + + + + @@ -116286,11 +116298,11 @@ - - - - - + + + + + @@ -116322,31 +116334,31 @@ - - - + + + - - - - + + + + - - - - + + + + - - - + + + - - - + + + @@ -116354,20 +116366,20 @@ - - - - - - - - + + + + + + + + - - - - + + + + @@ -116431,10 +116443,10 @@ - - - - + + + + @@ -116500,29 +116512,29 @@ - - - - + + + + - - - - + + + + - - - - - + + + + + - - - - + + + + @@ -116615,11 +116627,11 @@ - - - - - + + + + + @@ -116647,10 +116659,10 @@ - - - - + + + + @@ -116716,12 +116728,12 @@ - - - - - - + + + + + + @@ -117264,10 +117276,10 @@ - - - - + + + + @@ -117280,30 +117292,31 @@ - + - + - - + + - + - + - + - - - + + + + @@ -117312,12 +117325,12 @@ - + - + @@ -117336,8 +117349,8 @@ - - + + @@ -117363,12 +117376,12 @@ - + - + @@ -117425,7 +117438,7 @@ - + @@ -117435,8 +117448,8 @@ - - + + @@ -117451,30 +117464,30 @@ - - - - - + + + + + - + - + - - - - - + + + + + - - + + @@ -117483,10 +117496,10 @@ - + - + @@ -117506,11 +117519,11 @@ - - - + + + - + @@ -117527,19 +117540,19 @@ - + - + - + @@ -117547,7 +117560,7 @@ - + @@ -117572,7 +117585,7 @@ - + @@ -117677,12 +117690,12 @@ - - - - - - + + + + + + @@ -117810,11 +117823,11 @@ - - - - - + + + + + @@ -117853,48 +117866,48 @@ - - - - - - - - - - - - - + - - - + + + - - - + + + - + + + + + + + - + + + + + + + @@ -118050,13 +118063,13 @@ - - + + - - - + + + @@ -118072,8 +118085,8 @@ - - + + @@ -118108,9 +118121,9 @@ - - - + + + @@ -118212,13 +118225,13 @@ - - - - - - - + + + + + + + @@ -118227,9 +118240,9 @@ - - - + + + @@ -118240,10 +118253,10 @@ - - - - + + + + @@ -118521,19 +118534,19 @@ - - - - - + + + + + - - - - - - + + + + + + @@ -118556,8 +118569,8 @@ - - + + @@ -118566,30 +118579,30 @@ - - + + - - - - - + + + + + - - + + - - - - - + + + + + @@ -119207,9 +119220,9 @@ - - - + + + @@ -119256,10 +119269,10 @@ - - - - + + + + @@ -119277,87 +119290,12 @@ - - - + + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + @@ -119365,9 +119303,13 @@ + + + + - + @@ -119376,9 +119318,13 @@ + + + + - + @@ -119388,23 +119334,90 @@ + + + + - - - - - - - - - - - + + + + - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -119529,9 +119542,9 @@ - - - + + + @@ -119571,16 +119584,16 @@ - - + + - - + + - - + + @@ -119772,9 +119785,9 @@ - - - + + + @@ -119970,9 +119983,9 @@ - - - + + + @@ -120009,8 +120022,8 @@ - - + + @@ -120042,14 +120055,14 @@ - - - + + + - - - + + + @@ -120060,9 +120073,9 @@ - - - + + + @@ -120174,8 +120187,8 @@ - - + + @@ -120318,8 +120331,8 @@ - - + + @@ -120381,10 +120394,10 @@ - - - - + + + + @@ -120678,9 +120691,9 @@ - - - + + + @@ -121162,14 +121175,14 @@ - - - + + + - - - + + + @@ -121201,7 +121214,7 @@ - + @@ -121492,8 +121505,8 @@ - - + + @@ -121504,8 +121517,8 @@ - - + + @@ -121516,8 +121529,8 @@ - - + + @@ -121532,10 +121545,10 @@ - - - - + + + + @@ -121559,20 +121572,20 @@ - - + + - - - - + + + + - - - + + + @@ -121594,14 +121607,14 @@ - - - + + + - - - + + + @@ -121927,17 +121940,17 @@ - - - - + + + + - - - - + + + + @@ -121956,9 +121969,9 @@ - - - + + + @@ -121976,11 +121989,11 @@ - - - - - + + + + + @@ -122085,8 +122098,8 @@ - - + + @@ -122095,9 +122108,9 @@ - - - + + + @@ -122108,54 +122121,54 @@ - - - - - - + + + + + + - - - + + + - - - + + + - - - - - - - + + + + + + + - - + + - - - + + + - - - - + + + + - - - - + + + + @@ -122195,38 +122208,38 @@ - - + + - - - - + + + + - - + + - - - + + + - - - + + + - - - + + + - - - + + + @@ -122272,42 +122285,42 @@ - - + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + - - - + + + @@ -122320,34 +122333,34 @@ - - - - + + + + - - - - + + + + - - - - - + + + + + - - - - - + + + + + - - + + @@ -122411,9 +122424,9 @@ - - - + + + @@ -122428,9 +122441,9 @@ - - - + + + @@ -122469,19 +122482,19 @@ - - - + + + - - - + + + - - - + + + @@ -122796,25 +122809,25 @@ - - - + + + - - - + + + - - - + + + - - - - + + + + @@ -122919,15 +122932,15 @@ - - - + + + - - - - + + + + @@ -122966,10 +122979,10 @@ - - - - + + + + @@ -123254,6 +123267,14 @@ + + + + + + + + @@ -123375,15 +123396,15 @@ - - - - - - + + + + + + - + @@ -123417,9 +123438,9 @@ - - - + + + @@ -123466,6 +123487,14 @@ + + + + + + + + @@ -123492,46 +123521,46 @@ - - + + - - - - - + + + + + - - + + - - - - - + + + + + - - + + - - + + - - + + - - + + - - + + @@ -123857,8 +123886,8 @@ - - + + @@ -124608,9 +124637,9 @@ - - - + + + @@ -124629,8 +124658,8 @@ - - + + @@ -124640,26 +124669,26 @@ - - + + - - - + + + - - - + + + - - + + @@ -126372,10 +126401,10 @@ - - - - + + + + @@ -126879,8 +126908,8 @@ - - + + @@ -126918,20 +126947,20 @@ - - + + - - + + - - + + @@ -126984,18 +127013,18 @@ - - + + - - - + + + - - - + + + @@ -127007,8 +127036,8 @@ - - + + @@ -127078,8 +127107,8 @@ - - + + @@ -127498,8 +127527,8 @@ - - + + @@ -127536,12 +127565,12 @@ - - + + - - + + @@ -127574,20 +127603,20 @@ - - - + + + - - - + + + - + - + @@ -127600,7 +127629,7 @@ - + @@ -127671,12 +127700,12 @@ - - + + - - + + @@ -127693,7 +127722,7 @@ - + @@ -128004,8 +128033,8 @@ - - + + @@ -128020,34 +128049,34 @@ - - - - - - + + + + + + - - - + + + - - - + + + - - + + @@ -128187,8 +128216,8 @@ - - + + @@ -128999,8 +129028,8 @@ - - + + @@ -130202,8 +130231,8 @@ - - + + @@ -130393,10 +130422,10 @@ - - - - + + + + @@ -130414,8 +130443,8 @@ - - + + @@ -130731,9 +130760,9 @@ - - - + + + @@ -130859,9 +130888,9 @@ - - - + + + @@ -131909,11 +131938,11 @@ - - - - - + + + + + @@ -132062,13 +132091,13 @@ - + - - - + + + @@ -132076,25 +132105,25 @@ - - + + - - - + + + - - - - + + + + - - - + + + @@ -132131,17 +132160,17 @@ - - - + + + - - + + @@ -132155,9 +132184,9 @@ - - - + + + @@ -132218,13 +132247,13 @@ - - - + + + - - + + @@ -132433,61 +132462,61 @@ - - - + + + - - + + - - - + + + - - - - + + + + - - - - + + + + - - - + + + - - + + - - - - - + + + + + - - - - - + + + + + - - + + - - + + @@ -132496,62 +132525,62 @@ - - + + - - - + + + - - - - + + + + - - - - + + + + - - + + - - - + + + - - - + + + - - + + - - + + - - - + + + - - - + + + - - - + + + @@ -132559,20 +132588,20 @@ - - - + + + - - - + + + - - - - + + + + @@ -133016,36 +133045,36 @@ - - - + + + - - - + + + - - - - + + + + - - - - - - + + + + + + - - - - - - + + + + + + @@ -133220,9 +133249,9 @@ - - - + + + @@ -133488,8 +133517,8 @@ - - + + @@ -133737,14 +133766,14 @@ - - - + + + - - - + + + @@ -133767,10 +133796,10 @@ - - - - + + + + @@ -134078,12 +134107,12 @@ - - - - - - + + + + + + @@ -134127,20 +134156,20 @@ - - - - + + + + - - - - + + + + - - + + @@ -134149,15 +134178,15 @@ - - - - - - - - - + + + + + + + + + @@ -134499,9 +134528,9 @@ - - - + + + @@ -134932,50 +134961,50 @@ - - + + - - + + - - + + - - + + - - + + - - + + - - - + + + - + + + + + - - - - - - - + + + @@ -135015,8 +135044,8 @@ - - + + @@ -135024,89 +135053,89 @@ - - - - + + + + - - - + + + - - + + - - - + + + - - - - - + + + + + - - + + - - - - + + + + - + - - - - + + + + - - + + - - - + + + - - + + - - - - + + + + - - - - + + + + - - - - + + + + - - + + - - + + @@ -135268,13 +135297,13 @@ - - - + + + - - + + @@ -135347,16 +135376,16 @@ - - + + - - + + @@ -135453,17 +135482,17 @@ - - - - - + + + + + - - - - + + + + @@ -135573,7 +135602,7 @@ - + @@ -135590,7 +135619,7 @@ - + @@ -135609,8 +135638,8 @@ - - + + @@ -135678,8 +135707,8 @@ - - + + @@ -135805,12 +135834,12 @@ - - + + - - + + @@ -135825,12 +135854,12 @@ - - + + - - + + @@ -135887,8 +135916,8 @@ - - + + @@ -135897,18 +135926,18 @@ - - - - - + + + + + - - - - - + + + + + @@ -135922,26 +135951,26 @@ - - + + - - - + + + - - - - - - - + + + + + + + - - + + @@ -135957,65 +135986,65 @@ - - - - + + + + - - - - + + + + - - - - - + + + + + - - - - - + + + + + - - - - - + + + + + - - - - + + + + - - - - + + + + - - - - - - - - + + + + + + + + - - - - + + + + @@ -136265,8 +136294,8 @@ - - + + @@ -136283,8 +136312,8 @@ - - + + @@ -136391,8 +136420,8 @@ - - + + @@ -136586,8 +136615,8 @@ - - + + @@ -136622,9 +136651,9 @@ - - - + + + @@ -137034,38 +137063,38 @@ - - + + - - + + - - - - + + + + - - + + - - + + - - + + - - + + - - + + @@ -138030,27 +138059,27 @@ - - - - + + + + - - - + + + - - - - + + + + - - - - + + + + @@ -138060,11 +138089,11 @@ - + - - + + @@ -138174,16 +138203,16 @@ - - - - - - + + + + + + - - + + @@ -138289,10 +138318,10 @@ - - - - + + + + @@ -138347,9 +138376,9 @@ - - - + + + @@ -138699,24 +138728,24 @@ - - - - - - - + + + + + + + - - - + + + - - - - + + + + @@ -138937,16 +138966,16 @@ - - - + + + - - - - - + + + + + @@ -139275,19 +139304,19 @@ - - - + + + - - - + + + - - - + + + @@ -139305,9 +139334,9 @@ - - - + + + @@ -139315,9 +139344,9 @@ - - - + + + @@ -139337,10 +139366,10 @@ - - - - + + + + @@ -139438,19 +139467,19 @@ - - - + + + - - - + + + - - - + + + @@ -139545,11 +139574,11 @@ - - - - - + + + + + @@ -139560,65 +139589,65 @@ - - + + - - - - - + + + + + - - + + - - - + + + - - - - - + + + + + - - - + + + - - - - + + + + - - - - - - + + + + + + - - - + + + - - - + + + - - - + + + @@ -139635,20 +139664,20 @@ - - - - - - + + + + + + - - + + @@ -139950,6 +139979,10 @@ + + + + @@ -140123,8 +140156,8 @@ - - + + @@ -140168,13 +140201,13 @@ - + - + - + @@ -140347,18 +140380,18 @@ - + - - + + - - - + + + - - + + @@ -140370,9 +140403,9 @@ - - - + + + @@ -140400,12 +140433,12 @@ - - - - - - + + + + + + @@ -140452,16 +140485,16 @@ - - + + - - + + @@ -140474,32 +140507,32 @@ - - - + + + - - + + - - + + - - + + - - - - - + + + + + - - + + @@ -140671,8 +140704,8 @@ - - + + @@ -140835,11 +140868,11 @@ - - - - - + + + + + @@ -140883,27 +140916,27 @@ - - - - - - + + + + + + - - + + - - - - + + + + - - - + + + @@ -140912,18 +140945,18 @@ - - - - - - - - - - - - + + + + + + + + + + + + @@ -140941,11 +140974,11 @@ - - - - - + + + + + @@ -140993,10 +141026,10 @@ - - - - + + + + @@ -141370,14 +141403,14 @@ - - - + + + - - - + + + @@ -141400,26 +141433,26 @@ - - + + - - - + + + - - - + + + - - - - - - + + + + + + @@ -141430,13 +141463,13 @@ - - + + - - - + + + @@ -141458,13 +141491,13 @@ - - - + + + - - + + @@ -141829,21 +141862,21 @@ - - + + - - + + - - - + + + @@ -141854,12 +141887,12 @@ - - + + - - + + @@ -141878,8 +141911,8 @@ - - + + @@ -141946,10 +141979,10 @@ - - - - + + + + @@ -142076,17 +142109,17 @@ - - - - + + + + - - - - - + + + + + @@ -142313,8 +142346,8 @@ - - + + @@ -142346,8 +142379,8 @@ - - + + @@ -142361,19 +142394,19 @@ - - - - + + + + - - - + + + - - + + @@ -142402,16 +142435,16 @@ - - + + - - + + @@ -142434,9 +142467,9 @@ - - - + + + @@ -142455,8 +142488,8 @@ - - + + @@ -142506,12 +142539,12 @@ - - + + - - + + @@ -142526,16 +142559,16 @@ - - + + - - + + @@ -142558,8 +142591,8 @@ - - + + @@ -142596,17 +142629,17 @@ - - + + - - + + - - - + + + @@ -142650,16 +142683,16 @@ - - + + - - + + @@ -143691,10 +143724,10 @@ - - - - + + + + @@ -143712,8 +143745,8 @@ - - + + @@ -143894,106 +143927,106 @@ - - - - - - + + + + + + - - - - - - - + + + + + + + - - - - - - + + + + + + - - - - - - - + + + + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - - + + + - - - - + + + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - + + + @@ -144116,43 +144149,43 @@ - - - - - - - - - - - + + + + + + + + + + + - - + + - - - + + + - - - - - - - - - - - + + + + + + + + + + + - - + + @@ -144344,20 +144377,20 @@ - - + + - - - - + + + + - - - - + + + + @@ -144461,12 +144494,12 @@ - - - - - - + + + + + + diff --git a/android/abi_gki_aarch64_mtk b/android/abi_gki_aarch64_mtk index f49b88bc41c0..137ee9d40680 100644 --- a/android/abi_gki_aarch64_mtk +++ b/android/abi_gki_aarch64_mtk @@ -22,12 +22,14 @@ __alloc_percpu_gfp __alloc_skb alloc_workqueue + all_vm_events android_debug_symbol android_rvh_probe_register anon_inode_getfd anon_inode_getfile arc4_crypt arc4_setkey + __arch_clear_user __arch_copy_from_user __arch_copy_to_user arch_timer_read_counter @@ -51,10 +53,12 @@ bio_reset __bitmap_andnot __bitmap_clear + __bitmap_equal bitmap_find_free_region bitmap_find_next_zero_area_off bitmap_free bitmap_from_arr32 + __bitmap_or bitmap_print_to_pagebuf bitmap_release_region __bitmap_set @@ -96,6 +100,7 @@ bus_register bus_set_iommu bus_unregister + cache_line_size call_rcu cancel_delayed_work cancel_delayed_work_sync @@ -107,6 +112,8 @@ cdev_device_del cdev_init __cfi_slowpath + cgroup_taskset_first + cgroup_taskset_next __check_object_size check_preempt_curr __class_create @@ -115,6 +122,7 @@ class_for_each_device __class_register class_unregister + clear_page __ClearPageMovable clk_bulk_disable clk_bulk_enable @@ -172,7 +180,6 @@ config_group_init_type_name console_drivers console_suspend_enabled - console_unlock __const_udelay consume_skb contig_page_data @@ -203,6 +210,8 @@ cpufreq_this_cpu_can_update cpufreq_unregister_driver cpufreq_unregister_governor + cpufreq_update_limits + cpufreq_update_util_data cpu_hotplug_disable cpu_hotplug_enable __cpuhp_remove_state @@ -223,6 +232,8 @@ cpumask_next_and cpu_number __cpu_online_mask + cpu_pm_register_notifier + cpu_pm_unregister_notifier __cpu_possible_mask __cpu_present_mask cpu_scale @@ -289,10 +300,10 @@ del_gendisk del_timer del_timer_sync + desc_to_gpio destroy_workqueue dev_alloc_name dev_base_lock - dev_change_flags dev_change_net_namespace dev_close dev_driver_string @@ -300,21 +311,26 @@ _dev_err dev_err_probe dev_fetch_sw_netstats + devfreq_add_device devfreq_add_governor + devfreq_cooling_unregister devfreq_get_devfreq_by_phandle devfreq_monitor_resume devfreq_monitor_start devfreq_monitor_stop devfreq_monitor_suspend devfreq_recommended_opp + devfreq_register_opp_notifier devfreq_remove_device devfreq_remove_governor devfreq_resume_device devfreq_suspend_device + devfreq_unregister_opp_notifier devfreq_update_interval dev_fwnode __dev_get_by_index dev_get_by_index + dev_get_by_name dev_get_regmap dev_get_stats device_add @@ -341,6 +357,7 @@ device_remove_file device_rename device_set_of_node_from_dev + device_set_wakeup_capable device_show_bool device_store_bool device_unregister @@ -380,6 +397,7 @@ devm_ioremap devm_ioremap_resource devm_ioremap_wc + devm_iounmap devm_kasprintf devm_kfree devm_kmalloc @@ -404,6 +422,8 @@ devm_platform_ioremap_resource_byname devm_power_supply_get_by_phandle devm_power_supply_register + devm_rc_allocate_device + devm_rc_register_device devm_regmap_add_irq_chip devm_regmap_field_alloc __devm_regmap_init @@ -424,6 +444,7 @@ devm_snd_soc_register_component devm_spi_register_controller devm_thermal_zone_of_sensor_register + devm_usb_get_phy devm_watchdog_register_device _dev_notice dev_pm_domain_attach_by_id @@ -434,6 +455,7 @@ dev_pm_opp_add dev_pm_opp_find_freq_ceil dev_pm_opp_find_freq_ceil_by_volt + dev_pm_opp_find_freq_exact dev_pm_opp_find_freq_floor dev_pm_opp_get_freq dev_pm_opp_get_level @@ -445,7 +467,9 @@ dev_pm_opp_of_remove_table dev_pm_opp_put dev_pm_opp_put_opp_table + dev_pm_opp_put_regulators dev_pm_opp_remove_all_dynamic + dev_pm_opp_set_regulators dev_pm_qos_update_request dev_printk dev_queue_xmit @@ -473,6 +497,7 @@ dma_buf_fd dma_buf_get dma_buf_map_attachment + dma_buf_mmap dmabuf_page_pool_alloc dmabuf_page_pool_create dmabuf_page_pool_destroy @@ -482,11 +507,15 @@ dma_buf_vmap dma_buf_vunmap dma_fence_add_callback + dma_fence_array_ops dma_fence_context_alloc dma_fence_default_wait + dma_fence_enable_sw_signaling dma_fence_free + dma_fence_get_status dma_fence_init dma_fence_release + dma_fence_remove_callback dma_fence_signal dma_fence_signal_locked dma_fence_signal_timestamp_locked @@ -496,6 +525,7 @@ dma_heap_add dma_heap_buffer_alloc dma_heap_bufferfd_alloc + dma_heap_buffer_free dma_heap_find dma_heap_get_dev dma_heap_get_drvdata @@ -518,14 +548,17 @@ dma_sync_sg_for_cpu dma_sync_sg_for_device dma_sync_single_for_cpu + dma_sync_single_for_device dma_unmap_page_attrs dma_unmap_resource dma_unmap_sg_attrs do_exit do_wait_intr_irq down + downgrade_write down_interruptible down_read + down_read_trylock down_timeout down_trylock down_write @@ -587,6 +620,7 @@ drm_dev_put drm_dev_register drm_dev_unregister + drm_display_mode_to_videomode drm_dp_aux_init drm_dp_aux_register drm_dp_channel_eq_ok @@ -621,6 +655,7 @@ drm_gem_prime_import drm_gem_vm_close drm_get_edid + drm_get_format_name drm_helper_hpd_irq_event drm_helper_mode_fill_fb_struct drm_helper_probe_single_connector_modes @@ -636,6 +671,7 @@ drm_mode_copy drm_mode_crtc_set_gamma_size drm_mode_duplicate + drm_mode_equal drm_mode_object_find drm_mode_object_put drm_mode_probed_add @@ -759,6 +795,7 @@ gen_pool_best_fit gen_pool_create gen_pool_destroy + gen_pool_dma_alloc_align gen_pool_free_owner gen_pool_has_addr gen_pool_set_algo @@ -784,6 +821,8 @@ get_task_exe_file get_task_mm get_unused_fd_flags + get_user_pages + get_user_pages_fast get_user_pages_remote get_vaddr_frames get_zeroed_page @@ -801,9 +840,13 @@ gpiod_direction_input gpiod_direction_output gpiod_direction_output_raw + gpiod_get_optional gpiod_get_raw_value + gpiod_get_value gpiod_get_value_cansleep gpiod_set_debounce + gpiod_set_raw_value + gpiod_set_raw_value_cansleep gpiod_set_value gpiod_set_value_cansleep gpiod_to_irq @@ -814,9 +857,9 @@ handle_level_irq handle_nested_irq handle_simple_irq - hashlen_string have_governor_per_policy hex_asc + hex_to_bin hrtimer_active hrtimer_cancel hrtimer_forward @@ -859,7 +902,9 @@ idr_find idr_for_each idr_get_next + idr_preload idr_remove + idr_replace iio_alloc_pollfunc iio_buffer_init iio_buffer_put @@ -897,6 +942,7 @@ input_set_abs_params input_set_capability input_unregister_device + iomem_resource iommu_alloc_resv_region iommu_device_register iommu_device_sysfs_add @@ -942,10 +988,12 @@ irq_set_parent irq_to_desc irq_work_queue + irq_work_run irq_work_sync is_dma_buf_file is_vmalloc_addr iterate_fd + jiffies_64_to_clock_t jiffies jiffies_to_msecs jiffies_to_usecs @@ -958,7 +1006,10 @@ kernel_power_off kernel_restart kernel_sigaction + kernfs_find_and_get_ns + kernfs_notify kernfs_path_from_node + kernfs_put kern_mount kern_unmount key_create_or_update @@ -967,6 +1018,7 @@ __kfifo_alloc __kfifo_free __kfifo_in + __kfifo_init __kfifo_out __kfifo_to_user kfree @@ -997,10 +1049,12 @@ kobject_uevent kobject_uevent_env krealloc + ksize kstrdup kstrdup_const kstrndup kstrtobool + kstrtobool_from_user kstrtoint kstrtoint_from_user kstrtol_from_user @@ -1033,6 +1087,7 @@ ktime_get ktime_get_coarse_with_offset ktime_get_mono_fast_ns + ktime_get_raw ktime_get_raw_ts64 ktime_get_real_ts64 ktime_get_seconds @@ -1063,12 +1118,11 @@ __log_read_mmio log_threaded_irq_wakeup_reason __log_write_mmio + loops_per_jiffy lzo1x_1_compress lzo1x_decompress_safe lzorle1x_1_compress - match_hex - match_int - match_token + match_string mbox_chan_received_data mbox_client_txdone mbox_controller_register @@ -1105,6 +1159,7 @@ memcpy __memcpy_fromio __memcpy_toio + memdup_user memmove memory_read_from_buffer memparse @@ -1138,24 +1193,33 @@ mmc_add_host mmc_alloc_host mmc_can_gpio_cd + mmc_cmdq_disable + mmc_cmdq_enable mmc_cqe_request_done mmc_detect_change mmc_free_host + mmc_get_card mmc_gpio_get_cd mmc_gpio_get_ro mmc_hw_reset mmc_of_parse + mmc_put_card mmc_regulator_get_supply mmc_regulator_set_ocr mmc_regulator_set_vqmmc mmc_remove_host mmc_request_done mmc_send_tuning + mmc_set_data_timeout + mmc_switch + mmc_wait_for_req + __mmdrop mmput mod_delayed_work_on mod_timer mod_timer_pending module_layout + module_put __msecs_to_jiffies msleep msleep_interruptible @@ -1172,6 +1236,8 @@ __napi_schedule napi_schedule_prep nd_tbl + neigh_destroy + neigh_lookup netdev_alloc_frag __netdev_alloc_skb netdev_err @@ -1189,6 +1255,7 @@ netif_tx_wake_queue netlink_broadcast __netlink_kernel_create + netlink_kernel_release netlink_register_notifier netlink_unicast netlink_unregister_notifier @@ -1210,6 +1277,7 @@ nr_cpu_ids nsecs_to_jiffies ns_to_timespec64 + n_tty_ioctl_helper __num_online_cpus nvmem_cell_get nvmem_cell_put @@ -1230,6 +1298,7 @@ of_clk_src_simple_get of_count_phandle_with_args of_cpu_node_to_id + of_devfreq_cooling_register_power of_device_get_match_data of_device_is_available of_device_is_compatible @@ -1242,6 +1311,7 @@ of_find_backlight_by_node of_find_compatible_node of_find_device_by_node + of_find_i2c_device_by_node of_find_matching_node_and_match of_find_node_by_name of_find_node_by_phandle @@ -1289,6 +1359,7 @@ of_property_read_string_helper of_property_read_u32_index of_property_read_u64 + of_property_read_u64_index of_property_read_variable_u16_array of_property_read_variable_u32_array of_property_read_variable_u64_array @@ -1298,6 +1369,7 @@ of_remove_property of_reserved_mem_device_init_by_idx of_reserved_mem_lookup + of_root of_thermal_get_trip_points of_translate_address on_each_cpu @@ -1314,11 +1386,15 @@ param_get_uint param_get_ulong param_ops_bool + param_ops_byte param_ops_charp param_ops_int + param_ops_long + param_ops_string param_ops_uint param_set_bool param_set_charp + param_set_int param_set_uint param_set_ulong pause_cpus @@ -1329,6 +1405,7 @@ perf_event_enable perf_event_release_kernel perf_event_update_userpage + perf_num_counters perf_pmu_migrate_context perf_pmu_register perf_pmu_unregister @@ -1370,12 +1447,14 @@ platform_bus_type platform_device_add platform_device_add_data + platform_device_add_resources platform_device_alloc platform_device_del platform_device_put platform_device_register platform_device_register_full platform_device_unregister + __platform_driver_probe __platform_driver_register platform_driver_unregister platform_find_device_by_driver @@ -1415,22 +1494,28 @@ power_supply_get_by_name power_supply_get_drvdata power_supply_get_property + power_supply_is_system_supplied power_supply_put power_supply_register power_supply_reg_notifier power_supply_set_property + power_supply_unreg_notifier prandom_bytes prandom_u32 preempt_schedule preempt_schedule_notrace + prepare_to_wait prepare_to_wait_event print_hex_dump printk printk_deferred proc_create proc_create_data + proc_create_seq_private proc_create_single_data + proc_dointvec_minmax proc_mkdir + proc_mkdir_data proc_remove proc_set_user pskb_expand_head @@ -1480,6 +1565,8 @@ rb_first rb_insert_color rb_next + rb_prev + rb_replace_node rcu_barrier rcu_idle_enter rcu_idle_exit @@ -1513,6 +1600,7 @@ __register_rpmsg_driver register_shrinker register_syscore_ops + register_sysctl_table register_virtio_device register_virtio_driver regmap_bulk_read @@ -1551,6 +1639,7 @@ regulator_set_active_discharge_regmap regulator_set_current_limit regulator_set_current_limit_regmap + regulator_set_load regulator_set_mode regulator_set_voltage regulator_set_voltage_sel_regmap @@ -1559,6 +1648,7 @@ regulator_sync_voltage release_firmware release_pages + __release_region remap_pfn_range remap_vmalloc_range remove_proc_entry @@ -1567,6 +1657,7 @@ request_firmware request_firmware_nowait __request_percpu_irq + __request_region request_threaded_irq reset_control_assert reset_control_deassert @@ -1588,7 +1679,6 @@ __rht_bucket_nested rht_bucket_nested rht_bucket_nested_insert - root_task_group round_jiffies round_jiffies_relative round_jiffies_up @@ -1626,10 +1716,13 @@ __sbitmap_queue_get sched_clock sched_feat_keys + sched_feat_names sched_setattr_nocheck + sched_set_fifo sched_set_normal sched_setscheduler sched_setscheduler_nocheck + sched_show_task sched_uclamp_used schedule schedule_timeout @@ -1649,7 +1742,26 @@ scsi_normalize_sense scsi_print_sense_hdr scsi_unblock_requests + sdio_claim_host + sdio_claim_irq + sdio_disable_func + sdio_enable_func + sdio_f0_readb + sdio_f0_writeb + sdio_get_host_pm_caps + sdio_readb + sdio_readl + sdio_readsb + sdio_register_driver + sdio_release_host + sdio_release_irq + sdio_set_block_size + sdio_set_host_pm_flags sdio_signal_irq + sdio_unregister_driver + sdio_writeb + sdio_writel + sdio_writesb send_sig seq_hex_dump seq_lseek @@ -1661,6 +1773,7 @@ seq_read seq_release seq_release_private + seq_vprintf seq_write serial8250_do_set_termios serial8250_do_shutdown @@ -1673,6 +1786,7 @@ serial8250_suspend_port serial8250_unregister_port set_cpus_allowed_ptr + set_freezable set_normalized_timespec64 set_page_dirty_lock __SetPageMovable @@ -1680,6 +1794,8 @@ set_user_nice sg_alloc_table sg_alloc_table_from_pages + sg_copy_from_buffer + sg_copy_to_buffer sg_free_table sg_init_one sg_init_table @@ -1689,13 +1805,16 @@ sg_next __sg_page_iter_next __sg_page_iter_start + shmem_file_setup si_mem_available + si_meminfo simple_attr_open simple_attr_read simple_attr_release simple_attr_write simple_open simple_read_from_buffer + simple_strtol simple_write_to_buffer single_open single_release @@ -1717,14 +1836,18 @@ skb_queue_head skb_queue_purge skb_queue_tail + skb_realloc_headroom skb_trim smp_call_function + smp_call_function_single snd_card_add_dev_attr snd_ctl_boolean_mono_info snd_jack_set_key snd_pcm_format_physical_width snd_pcm_format_width snd_pcm_hw_constraint_integer + snd_pcm_hw_constraint_list + snd_pcm_hw_constraint_mask64 snd_pcm_hw_constraint_minmax snd_pcm_hw_constraint_step snd_pcm_lib_free_pages @@ -1740,6 +1863,7 @@ snd_soc_component_exit_regmap snd_soc_component_init_regmap snd_soc_component_read + snd_soc_component_set_jack snd_soc_component_update_bits snd_soc_component_write snd_soc_dai_set_sysclk @@ -1756,6 +1880,7 @@ snd_soc_dapm_put_pin_switch snd_soc_dapm_put_volsw snd_soc_dapm_sync + snd_soc_find_dai snd_soc_get_volsw snd_soc_info_enum_double snd_soc_info_volsw @@ -1781,6 +1906,7 @@ __spi_register_driver spi_setup spi_sync + split_page spmi_controller_add spmi_controller_alloc spmi_controller_remove @@ -1793,6 +1919,8 @@ spmi_register_write spmi_register_zero_write sprintf + sprint_symbol + sprint_symbol_no_offset srcu_init_notifier_head srcu_notifier_call_chain srcu_notifier_chain_register @@ -1802,6 +1930,8 @@ __stack_chk_fail __stack_chk_guard stack_trace_save + static_key_disable_cpuslocked + static_key_enable_cpuslocked static_key_slow_dec static_key_slow_inc stop_one_cpu_nowait @@ -1841,11 +1971,14 @@ syscon_node_to_regmap syscon_regmap_lookup_by_compatible syscon_regmap_lookup_by_phandle + sysctl_sched_features sysfs_create_bin_file sysfs_create_file_ns sysfs_create_group sysfs_create_link __sysfs_match_string + sysfs_merge_group + sysfs_notify sysfs_remove_bin_file sysfs_remove_file_ns sysfs_remove_group @@ -1855,6 +1988,7 @@ system_freezable_wq system_freezing_cnt system_highpri_wq + system_long_wq system_power_efficient_wq system_state system_unbound_wq @@ -1866,12 +2000,13 @@ tasklet_kill __tasklet_schedule tasklet_setup - tasklist_lock + task_may_not_preempt __task_pid_nr_ns __task_rq_lock task_sched_runtime thermal_cooling_device_unregister thermal_of_cooling_device_register + thermal_zone_device_update thermal_zone_get_temp thermal_zone_get_zone_by_name tick_nohz_get_idle_calls_cpu @@ -1903,10 +2038,11 @@ __traceiter_android_rvh_rtmutex_prepare_setprio __traceiter_android_rvh_sched_newidle_balance __traceiter_android_rvh_select_task_rq_fair + __traceiter_android_rvh_select_task_rq_rt __traceiter_android_rvh_setscheduler __traceiter_android_rvh_set_user_nice __traceiter_android_rvh_tick_entry - __traceiter_android_rvh_uclamp_eff_get + __traceiter_android_rvh_update_cpu_capacity __traceiter_android_rvh_v4l2subdev_set_fmt __traceiter_android_rvh_v4l2subdev_set_frame_interval __traceiter_android_rvh_v4l2subdev_set_selection @@ -1916,6 +2052,7 @@ __traceiter_android_vh_binder_restore_priority __traceiter_android_vh_binder_set_priority __traceiter_android_vh_binder_transaction_init + __traceiter_android_vh_cgroup_attach __traceiter_android_vh_cgroup_set_task __traceiter_android_vh_check_bpf_syscall __traceiter_android_vh_check_file_open @@ -1923,18 +2060,15 @@ __traceiter_android_vh_clear_mask_adjust __traceiter_android_vh_clear_reserved_fmt_fields __traceiter_android_vh_commit_creds - __traceiter_android_vh_em_cpu_energy __traceiter_android_vh_exit_creds __traceiter_android_vh_fill_ext_fmtdesc __traceiter_android_vh_finish_update_load_avg_se __traceiter_android_vh_freq_qos_add_request __traceiter_android_vh_freq_qos_update_request - __traceiter_android_vh_freq_qos_remove_request - __traceiter_android_vh_iommu_alloc_iova __traceiter_android_vh_iommu_iovad_alloc_iova - __traceiter_android_vh_iommu_free_iova __traceiter_android_vh_iommu_iovad_free_iova __traceiter_android_vh_ipv6_gen_linklocal_addr + __traceiter_android_vh_is_fpsimd_save __traceiter_android_vh_logbuf __traceiter_android_vh_override_creds __traceiter_android_vh_prepare_update_load_avg_se @@ -1943,7 +2077,6 @@ __traceiter_android_vh_rwsem_wake __traceiter_android_vh_rwsem_write_finished __traceiter_android_vh_scheduler_tick - __traceiter_android_vh_scmi_timeout_sync __traceiter_android_vh_selinux_avc_insert __traceiter_android_vh_selinux_avc_lookup __traceiter_android_vh_selinux_avc_node_delete @@ -1956,11 +2089,15 @@ __traceiter_android_vh_set_module_permit_after_init __traceiter_android_vh_set_module_permit_before_init __traceiter_android_vh_set_wake_flags - __traceiter_android_vh_snd_soc_card_get_comp_chain + __traceiter_android_vh_show_resume_epoch_val + __traceiter_android_vh_show_suspend_epoch_val + __traceiter_android_vh_snd_compr_use_pause_in_drain + __traceiter_android_vh_sound_usb_support_cpu_suspend __traceiter_android_vh_syscall_prctl_finished __traceiter_android_vh_ufs_send_command __traceiter_android_vh_ufs_send_tm_command __traceiter_cpu_frequency + __traceiter_gpu_mem_total __traceiter_pelt_se_tp __traceiter_rwmmio_post_read __traceiter_rwmmio_read @@ -1968,6 +2105,7 @@ __traceiter_sched_update_nr_running_tp __traceiter_task_newtask __traceiter_xhci_urb_giveback + trace_output_call __tracepoint_android_rvh_after_enqueue_task __tracepoint_android_rvh_cpu_overutilized __tracepoint_android_rvh_dequeue_task @@ -1983,10 +2121,11 @@ __tracepoint_android_rvh_rtmutex_prepare_setprio __tracepoint_android_rvh_sched_newidle_balance __tracepoint_android_rvh_select_task_rq_fair + __tracepoint_android_rvh_select_task_rq_rt __tracepoint_android_rvh_setscheduler __tracepoint_android_rvh_set_user_nice __tracepoint_android_rvh_tick_entry - __tracepoint_android_rvh_uclamp_eff_get + __tracepoint_android_rvh_update_cpu_capacity __tracepoint_android_rvh_v4l2subdev_set_fmt __tracepoint_android_rvh_v4l2subdev_set_frame_interval __tracepoint_android_rvh_v4l2subdev_set_selection @@ -1996,6 +2135,7 @@ __tracepoint_android_vh_binder_restore_priority __tracepoint_android_vh_binder_set_priority __tracepoint_android_vh_binder_transaction_init + __tracepoint_android_vh_cgroup_attach __tracepoint_android_vh_cgroup_set_task __tracepoint_android_vh_check_bpf_syscall __tracepoint_android_vh_check_file_open @@ -2003,18 +2143,15 @@ __tracepoint_android_vh_clear_mask_adjust __tracepoint_android_vh_clear_reserved_fmt_fields __tracepoint_android_vh_commit_creds - __tracepoint_android_vh_em_cpu_energy __tracepoint_android_vh_exit_creds __tracepoint_android_vh_fill_ext_fmtdesc __tracepoint_android_vh_finish_update_load_avg_se __tracepoint_android_vh_freq_qos_add_request __tracepoint_android_vh_freq_qos_update_request - __tracepoint_android_vh_freq_qos_remove_request - __tracepoint_android_vh_iommu_alloc_iova __tracepoint_android_vh_iommu_iovad_alloc_iova - __tracepoint_android_vh_iommu_free_iova __tracepoint_android_vh_iommu_iovad_free_iova __tracepoint_android_vh_ipv6_gen_linklocal_addr + __tracepoint_android_vh_is_fpsimd_save __tracepoint_android_vh_logbuf __tracepoint_android_vh_override_creds __tracepoint_android_vh_prepare_update_load_avg_se @@ -2023,7 +2160,6 @@ __tracepoint_android_vh_rwsem_wake __tracepoint_android_vh_rwsem_write_finished __tracepoint_android_vh_scheduler_tick - __tracepoint_android_vh_scmi_timeout_sync __tracepoint_android_vh_selinux_avc_insert __tracepoint_android_vh_selinux_avc_lookup __tracepoint_android_vh_selinux_avc_node_delete @@ -2036,11 +2172,15 @@ __tracepoint_android_vh_set_module_permit_after_init __tracepoint_android_vh_set_module_permit_before_init __tracepoint_android_vh_set_wake_flags - __tracepoint_android_vh_snd_soc_card_get_comp_chain + __tracepoint_android_vh_show_resume_epoch_val + __tracepoint_android_vh_show_suspend_epoch_val + __tracepoint_android_vh_snd_compr_use_pause_in_drain + __tracepoint_android_vh_sound_usb_support_cpu_suspend __tracepoint_android_vh_syscall_prctl_finished __tracepoint_android_vh_ufs_send_command __tracepoint_android_vh_ufs_send_tm_command __tracepoint_cpu_frequency + __tracepoint_gpu_mem_total __tracepoint_pelt_se_tp tracepoint_probe_register tracepoint_probe_unregister @@ -2052,7 +2192,7 @@ __tracepoint_task_newtask __tracepoint_xhci_urb_giveback trace_print_array_seq - trace_print_hex_seq + trace_print_flags_seq trace_print_symbols_seq __trace_puts trace_raw_output_prep @@ -2060,12 +2200,15 @@ trace_seq_putc trace_set_clr_event tracing_off + try_module_get try_wait_for_completion + tty_driver_flush_buffer tty_flip_buffer_push tty_insert_flip_string_fixed_flag + tty_register_ldisc tty_termios_baud_rate tty_termios_encode_baud_rate - typec_get_drvdata + tty_unregister_ldisc typec_mux_get_drvdata typec_mux_register typec_mux_set @@ -2082,6 +2225,7 @@ typec_switch_register typec_switch_unregister typec_unregister_partner + typec_unregister_port uart_get_baud_rate uart_get_divisor uart_update_timeout @@ -2113,7 +2257,9 @@ ufshcd_release ufshcd_remove ufshcd_uic_hibern8_exit + ufshcd_update_evt_hist unlock_page + unmap_mapping_range unpin_user_page unpin_user_pages unregister_blkdev @@ -2135,6 +2281,7 @@ unregister_reboot_notifier unregister_rpmsg_driver unregister_shrinker + unregister_sysctl_table unregister_virtio_device unregister_virtio_driver up @@ -2146,6 +2293,8 @@ usb_add_function usb_add_gadget_udc usb_add_hcd + usb_add_phy_dev + usb_assign_descriptors usb_composite_probe usb_composite_unregister usb_copy_descriptors @@ -2164,6 +2313,7 @@ usb_ep_queue usb_ep_set_halt usb_ep_set_maxpacket_limit + usb_free_all_descriptors usb_function_register usb_function_unregister usb_gadget_connect @@ -2178,8 +2328,15 @@ usb_get_function usb_get_function_instance usb_get_maximum_speed + usb_gstrings_attach + usb_hcd_check_unlink_urb + usb_hcd_giveback_urb usb_hcd_is_primary_hcd + usb_hcd_link_urb_to_ep usb_hcd_poll_rh_status + usb_hcd_resume_root_hub + usb_hcd_unlink_urb_from_ep + usb_hcd_unmap_urb_for_dma usb_interface_id usbnet_change_mtu usbnet_disconnect @@ -2209,12 +2366,15 @@ usbnet_write_cmd_async usbnet_write_cmd_nopm usb_os_desc_prepare_interf_dir + usb_otg_state_string + usb_phy_set_charger_current usb_put_function usb_put_function_instance usb_put_hcd usb_register_driver usb_remove_function usb_remove_hcd + usb_remove_phy usb_role_switch_get usb_role_switch_get_drvdata usb_role_switch_register @@ -2354,7 +2514,6 @@ virtqueue_detach_unused_buf virtqueue_get_buf virtqueue_get_vring_size - virtqueue_kick virtqueue_kick_prepare virtqueue_notify vmalloc @@ -2363,10 +2522,15 @@ vmalloc_user vmap vm_event_states + vmf_insert_mixed + vmf_insert_pfn_prot + vm_get_page_prot + vm_insert_page vm_map_ram vm_node_stat vm_unmap_ram vm_zone_stat + vprintk vring_del_virtqueue vring_interrupt vring_new_virtqueue @@ -2381,6 +2545,7 @@ wait_for_completion_interruptible_timeout wait_for_completion_io_timeout wait_for_completion_killable + wait_for_completion_killable_timeout wait_for_completion_timeout wait_woken __wake_up @@ -2399,7 +2564,6 @@ woken_wake_function work_busy work_on_cpu - ww_mutex_lock ww_mutex_unlock xhci_add_endpoint xhci_check_bandwidth @@ -2409,143 +2573,77 @@ xhci_get_ep_ctx xhci_init_driver xhci_reset_bandwidth + zlib_deflate + zlib_deflateEnd + zlib_deflateInit2 + zlib_deflateReset + zlib_deflate_workspacesize # preserved by --additions-only - all_vm_events - __arch_clear_user - __bitmap_equal - __bitmap_or blk_insert_cloned_request - cache_line_size - cgroup_taskset_first - cgroup_taskset_next class_create_file_ns class_remove_file_ns - clear_page - cpufreq_update_util_data - cpu_pm_register_notifier - cpu_pm_unregister_notifier + console_unlock debug_locks_off - devfreq_add_device - devfreq_cooling_unregister - devfreq_register_opp_notifier - devfreq_unregister_opp_notifier - dev_get_by_name + dev_change_flags devm_of_pwm_get - devm_rc_allocate_device - devm_rc_register_device - dev_pm_opp_find_freq_exact - dev_pm_opp_put_regulators - dev_pm_opp_set_regulators - dma_buf_mmap - dma_fence_get_status - dma_fence_remove_callback - dma_heap_buffer_free - dma_sync_single_for_device - downgrade_write - down_read_trylock drm_gem_private_object_init - get_user_pages - get_user_pages_fast - gpiod_set_raw_value + hashlen_string hci_alloc_dev hci_free_dev hci_recv_frame hci_register_dev hci_unregister_dev hex_dump_to_buffer - iomem_resource - irq_work_run - jiffies_64_to_clock_t - __kfifo_init kset_find_obj - kstrtobool_from_user - ktime_get_raw led_classdev_unregister - memdup_user - __mmdrop - module_put - netlink_kernel_release + match_hex + match_int + match_token nla_put_nohdr - n_tty_ioctl_helper - of_devfreq_cooling_register_power - of_root - param_ops_byte - param_ops_string - perf_num_counters pin_user_pages_remote - rb_prev - rb_replace_node - __release_region - __request_region + root_task_group schedutil_cpu_util - sdio_claim_host - sdio_claim_irq - sdio_disable_func - sdio_enable_func - sdio_f0_readb - sdio_f0_writeb - sdio_get_host_pm_caps - sdio_readb - sdio_readl - sdio_readsb - sdio_register_driver - sdio_release_host - sdio_release_irq - sdio_set_block_size - sdio_set_host_pm_flags - sdio_unregister_driver - sdio_writeb - sdio_writel - sdio_writesb send_sig_info - shmem_file_setup - si_meminfo skb_pull_rcsum - skb_realloc_headroom - smp_call_function_single snd_soc_component_test_bits - sprint_symbol_no_offset strpbrk strspn syscore_resume syscore_suspend - system_long_wq - thermal_zone_device_update + tasklist_lock __traceiter_android_rvh_sched_rebalance_domains - __traceiter_android_vh_cgroup_attach - __traceiter_android_vh_is_fpsimd_save + __traceiter_android_rvh_uclamp_eff_get + __traceiter_android_vh_em_cpu_energy + __traceiter_android_vh_freq_qos_remove_request + __traceiter_android_vh_iommu_alloc_iova + __traceiter_android_vh_iommu_free_iova __traceiter_android_vh_media_device_setup_link + __traceiter_android_vh_scmi_timeout_sync + __traceiter_android_vh_snd_soc_card_get_comp_chain __traceiter_android_vh_v4l2subdev_set_fmt __traceiter_android_vh_v4l2subdev_set_frame_interval __traceiter_android_vh_v4l2subdev_set_selection - __traceiter_gpu_mem_total - trace_output_call __tracepoint_android_rvh_sched_rebalance_domains - __tracepoint_android_vh_cgroup_attach - __tracepoint_android_vh_is_fpsimd_save + __tracepoint_android_rvh_uclamp_eff_get + __tracepoint_android_vh_em_cpu_energy + __tracepoint_android_vh_freq_qos_remove_request + __tracepoint_android_vh_iommu_alloc_iova + __tracepoint_android_vh_iommu_free_iova __tracepoint_android_vh_media_device_setup_link + __tracepoint_android_vh_scmi_timeout_sync + __tracepoint_android_vh_snd_soc_card_get_comp_chain __tracepoint_android_vh_ufs_update_sdev __tracepoint_android_vh_v4l2subdev_set_fmt __tracepoint_android_vh_v4l2subdev_set_frame_interval __tracepoint_android_vh_v4l2subdev_set_selection - __tracepoint_gpu_mem_total - trace_print_flags_seq - try_module_get - tty_driver_flush_buffer - tty_register_ldisc - tty_unregister_ldisc + trace_print_hex_seq + typec_get_drvdata ufshcd_auto_hibern8_update ufshcd_shutdown - unmap_mapping_range unregister_syscore_ops v4l2_m2m_buf_remove_by_buf - vmf_insert_pfn_prot - wait_for_completion_killable_timeout + virtqueue_kick wireless_send_event + ww_mutex_lock ww_mutex_lock_interruptible - zlib_deflate - zlib_deflateEnd - zlib_deflateInit2 - zlib_deflateReset - zlib_deflate_workspacesize diff --git a/android/abi_gki_aarch64_qcom b/android/abi_gki_aarch64_qcom index 01c5fe8eea68..e26f08dfe1c4 100644 --- a/android/abi_gki_aarch64_qcom +++ b/android/abi_gki_aarch64_qcom @@ -549,6 +549,7 @@ divider_ro_round_rate_parent divider_round_rate_parent dma_alloc_attrs + dma_alloc_noncoherent dma_async_device_register dma_async_device_unregister dma_async_tx_descriptor_init @@ -584,6 +585,7 @@ dma_fence_signal_timestamp_locked dma_fence_wait_timeout dma_free_attrs + dma_free_noncoherent dma_get_sgtable_attrs dma_get_slave_channel dma_heap_add @@ -2512,6 +2514,7 @@ __traceiter_android_rvh_force_compatible_post __traceiter_android_rvh_force_compatible_pre __traceiter_android_rvh_gic_v3_set_affinity + __traceiter_android_rvh_iommu_setup_dma_ops __traceiter_android_rvh_irqs_disable __traceiter_android_rvh_irqs_enable __traceiter_android_rvh_migrate_queued_task @@ -2625,6 +2628,7 @@ __tracepoint_android_rvh_force_compatible_post __tracepoint_android_rvh_force_compatible_pre __tracepoint_android_rvh_gic_v3_set_affinity + __tracepoint_android_rvh_iommu_setup_dma_ops __tracepoint_android_rvh_irqs_disable __tracepoint_android_rvh_irqs_enable __tracepoint_android_rvh_migrate_queued_task diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 33b6734a8196..270c4471ce00 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -669,6 +669,49 @@ config ARM64_ERRATUM_1508412 If unsure, say Y. +config ARM64_ERRATUM_2051678 + bool "Cortex-A510: 2051678: disable Hardware Update of the page table's dirty bit" + help + This options adds the workaround for ARM Cortex-A510 erratum ARM64_ERRATUM_2051678. + Affected Coretex-A510 might not respect the ordering rules for + hardware update of the page table's dirty bit. The workaround + is to not enable the feature on affected CPUs. + + If unsure, say Y. + +config ARM64_WORKAROUND_TSB_FLUSH_FAILURE + bool + +config ARM64_ERRATUM_2054223 + bool "Cortex-A710: 2054223: workaround TSB instruction failing to flush trace" + default y + select ARM64_WORKAROUND_TSB_FLUSH_FAILURE + help + Enable workaround for ARM Cortex-A710 erratum 2054223 + + Affected cores may fail to flush the trace data on a TSB instruction, when + the PE is in trace prohibited state. This will cause losing a few bytes + of the trace cached. + + Workaround is to issue two TSB consecutively on affected cores. + + If unsure, say Y. + +config ARM64_ERRATUM_2067961 + bool "Neoverse-N2: 2067961: workaround TSB instruction failing to flush trace" + default y + select ARM64_WORKAROUND_TSB_FLUSH_FAILURE + help + Enable workaround for ARM Neoverse-N2 erratum 2067961 + + Affected cores may fail to flush the trace data on a TSB instruction, when + the PE is in trace prohibited state. This will cause losing a few bytes + of the trace cached. + + Workaround is to issue two TSB consecutively on affected cores. + + If unsure, say Y. + config CAVIUM_ERRATUM_22375 bool "Cavium erratum 22375, 24313" default y diff --git a/arch/arm64/configs/gki_defconfig b/arch/arm64/configs/gki_defconfig index 27133a411ce4..42faae4a74aa 100644 --- a/arch/arm64/configs/gki_defconfig +++ b/arch/arm64/configs/gki_defconfig @@ -8,6 +8,7 @@ CONFIG_TASK_IO_ACCOUNTING=y CONFIG_PSI=y CONFIG_RCU_EXPERT=y CONFIG_RCU_FAST_NO_HZ=y +CONFIG_RCU_BOOST=y CONFIG_RCU_NOCB_CPU=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index bffa8860af1c..e9eeeb14485b 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -23,7 +23,7 @@ #define dsb(opt) asm volatile("dsb " #opt : : : "memory") #define psb_csync() asm volatile("hint #17" : : : "memory") -#define tsb_csync() asm volatile("hint #18" : : : "memory") +#define __tsb_csync() asm volatile("hint #18" : : : "memory") #define csdb() asm volatile("hint #20" : : : "memory") #define spec_bar() asm volatile(ALTERNATIVE("dsb nsh\nisb\n", \ @@ -50,6 +50,20 @@ #define dma_rmb() dmb(oshld) #define dma_wmb() dmb(oshst) + +#define tsb_csync() \ + do { \ + /* \ + * CPUs affected by Arm Erratum 2054223 or 2067961 needs \ + * another TSB to ensure the trace is flushed. The barriers \ + * don't have to be strictly back to back, as long as the \ + * CPU is in trace prohibited state. \ + */ \ + if (cpus_have_final_cap(ARM64_WORKAROUND_TSB_FLUSH_FAILURE)) \ + __tsb_csync(); \ + __tsb_csync(); \ + } while (0) + /* * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz * and 0 otherwise. diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h index 10a2d914d4ca..c0ff1d4bccab 100644 --- a/arch/arm64/include/asm/cpucaps.h +++ b/arch/arm64/include/asm/cpucaps.h @@ -69,6 +69,7 @@ #define ARM64_WORKAROUND_1508412 58 #define ARM64_HAS_LDAPR 59 #define ARM64_KVM_PROTECTED_MODE 60 +#define ARM64_WORKAROUND_TSB_FLUSH_FAILURE 61 /* kabi: reserve 62 - 76 for future cpu capabilities */ #define ARM64_NCAPS 76 diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index ef5b040dee44..f9adcf962699 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -72,6 +72,9 @@ #define ARM_CPU_PART_CORTEX_A76 0xD0B #define ARM_CPU_PART_NEOVERSE_N1 0xD0C #define ARM_CPU_PART_CORTEX_A77 0xD0D +#define ARM_CPU_PART_CORTEX_A510 0xD46 +#define ARM_CPU_PART_CORTEX_A710 0xD47 +#define ARM_CPU_PART_NEOVERSE_N2 0xD49 #define APM_CPU_PART_POTENZA 0x000 @@ -109,6 +112,9 @@ #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) #define MIDR_NEOVERSE_N1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N1) #define MIDR_CORTEX_A77 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A77) +#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510) +#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710) +#define MIDR_NEOVERSE_N2 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_NEOVERSE_N2) #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX) #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index a63428301f42..7c6d59c4f099 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -342,6 +342,18 @@ static const struct midr_range erratum_1463225[] = { }; #endif +#ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE +static const struct midr_range tsb_flush_fail_cpus[] = { +#ifdef CONFIG_ARM64_ERRATUM_2067961 + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), +#endif +#ifdef CONFIG_ARM64_ERRATUM_2054223 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), +#endif + {}, +}; +#endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */ + const struct arm64_cpu_capabilities arm64_errata[] = { #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE { @@ -527,6 +539,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = { 0, 0, 1, 0), }, +#endif +#ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE + { + .desc = "ARM erratum 2067961 or 2054223", + .capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE, + ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus), + }, #endif { } diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d9e5c16f2037..290bf2cba7ef 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1599,6 +1599,9 @@ static bool cpu_has_broken_dbm(void) MIDR_ALL_VERSIONS(MIDR_CORTEX_A55), /* Kryo4xx Silver (rdpe => r1p0) */ MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe), +#endif +#ifdef CONFIG_ARM64_ERRATUM_2051678 + MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2), #endif {}, }; diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 6c39a61ec115..acc7e7647cbb 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -414,7 +414,6 @@ NOKPROBE_SYMBOL(do_undefinstr); void do_bti(struct pt_regs *regs) { - trace_android_rvh_do_bti(regs, user_mode(regs)); BUG_ON(!user_mode(regs)); force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0); } diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index c834a6445842..e75e5e75b192 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -53,6 +53,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, if (iommu) { iommu_setup_dma_ops(dev, dma_base, size); trace_android_vh_iommu_setup_dma_ops(dev, dma_base, size); + trace_android_rvh_iommu_setup_dma_ops(dev, dma_base, size); } #ifdef CONFIG_XEN diff --git a/drivers/android/vendor_hooks.c b/drivers/android/vendor_hooks.c index f1e530388bd0..ef6b8e851608 100644 --- a/drivers/android/vendor_hooks.c +++ b/drivers/android/vendor_hooks.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include #include @@ -41,7 +40,6 @@ #include #include #include -#include #include #include #include @@ -60,7 +58,6 @@ #include #include #include -#include #include #include #include @@ -92,10 +89,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_prepare_prio_fork); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_finish_prio_fork); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_user_nice); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_setscheduler); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sk_alloc); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sk_free); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_nf_conn_alloc); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_nf_conn_free); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_arch_set_freq_scale); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_is_fpsimd_save); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_transaction_init); @@ -127,12 +120,10 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_busiest_group); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_resume); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_wq_lockup_pool); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ipi_stop); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sysrq_crash); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_dump_throttled_rt_tasks); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_printk_hotplug); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_jiffies_update); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_gic_v3_set_affinity); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_v3_affinity_init); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_suspend_epoch_val); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_resume_epoch_val); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_max_freq); @@ -191,7 +182,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_map_util_freq); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_report_bug); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_em_cpu_energy); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_up); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_down); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_balance_rt); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_timer_calc_index); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_watchdog_timer_softlockup); @@ -202,6 +192,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_die_kernel_fault); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sea); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_mem_abort); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sp_pc_abort); +EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_setup_dma_ops); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_setup_dma_ops); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_alloc_iova); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_iovad_alloc_iova); @@ -236,9 +227,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpufreq_transition); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cgroup_set_task); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cgroup_force_kthread_migration); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_syscall_prctl_finished); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_create_worker); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_check_preempt_tick); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_check_preempt_wakeup_ignore); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_replace_next_task_fair); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sched_yield); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_binder_wait_for_work); @@ -248,7 +237,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alter_mutex_list_add); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mutex_unlock_slowpath); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rwsem_wake_finish); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_undefinstr); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_bti); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_ptrauth_fault); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_bad_mode); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_arm64_serror_panic); @@ -274,11 +262,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exit_mm); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_get_from_fragment_pool); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_exclude_reserved_zone); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_include_reserved_zone); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_pages_slowpath); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_mem); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_print_slabinfo_header); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_shrink_slab); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cache_show); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_typec_tcpci_override_toggling); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_typec_tcpci_chk_contaminant); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_typec_tcpci_get_vbus); @@ -316,7 +300,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_after_enqueue_task); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_after_dequeue_task); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_enqueue_entity); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_dequeue_entity); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_entity_tick); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_enqueue_task_fair); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_dequeue_task_fair); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sched_stat_runtime_rt); @@ -341,7 +324,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_save_vmalloc_stack); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_stack_hash); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_save_track_hash); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_vmpressure); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_task_comm); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpufreq_acct_update_power); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_typec_tcpm_log); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_media_device_setup_link); @@ -357,8 +339,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_v4l2subdev_set_frame_interval); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_v4l2subdev_set_frame_interval); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_scmi_timeout_sync); EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_new_ilb); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_alloc_uid); -EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_user); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_freq_qos_add_request); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_freq_qos_update_request); EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_freq_qos_remove_request); diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index 6fee02bf7b71..bfcf9e0415c2 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -39,6 +39,7 @@ #include "coresight-etm4x.h" #include "coresight-etm-perf.h" +#include "coresight-self-hosted-trace.h" static int boot_enable; module_param(boot_enable, int, 0444); @@ -236,6 +237,45 @@ struct etm4_enable_arg { int rc; }; +/* + * etm4x_prohibit_trace - Prohibit the CPU from tracing at all ELs. + * When the CPU supports FEAT_TRF, we could move the ETM to a trace + * prohibited state by filtering the Exception levels via TRFCR_EL1. + */ +static void etm4x_prohibit_trace(struct etmv4_drvdata *drvdata) +{ + /* If the CPU doesn't support FEAT_TRF, nothing to do */ + if (!drvdata->trfcr) + return; + cpu_prohibit_trace(); +} + +/* + * etm4x_allow_trace - Allow CPU tracing in the respective ELs, + * as configured by the drvdata->config.mode for the current + * session. Even though we have TRCVICTLR bits to filter the + * trace in the ELs, it doesn't prevent the ETM from generating + * a packet (e.g, TraceInfo) that might contain the addresses from + * the excluded levels. Thus we use the additional controls provided + * via the Trace Filtering controls (FEAT_TRF) to make sure no trace + * is generated for the excluded ELs. + */ +static void etm4x_allow_trace(struct etmv4_drvdata *drvdata) +{ + u64 trfcr = drvdata->trfcr; + + /* If the CPU doesn't support FEAT_TRF, nothing to do */ + if (!trfcr) + return; + + if (drvdata->config.mode & ETM_MODE_EXCL_KERN) + trfcr &= ~TRFCR_ELx_ExTRE; + if (drvdata->config.mode & ETM_MODE_EXCL_USER) + trfcr &= ~TRFCR_ELx_E0TRE; + + write_trfcr(trfcr); +} + #ifdef CONFIG_ETM4X_IMPDEF_FEATURE #define HISI_HIP08_AMBA_ID 0x000b6d01 @@ -440,6 +480,7 @@ static int etm4_enable_hw(struct etmv4_drvdata *drvdata) if (etm4x_is_ete(drvdata)) etm4x_relaxed_write32(csa, TRCRSR_TA, TRCRSR); + etm4x_allow_trace(drvdata); /* Enable the trace unit */ etm4x_relaxed_write32(csa, 1, TRCPRGCTLR); @@ -723,7 +764,6 @@ static int etm4_enable(struct coresight_device *csdev, static void etm4_disable_hw(void *info) { u32 control; - u64 trfcr; struct etmv4_drvdata *drvdata = info; struct etmv4_config *config = &drvdata->config; struct coresight_device *csdev = drvdata->csdev; @@ -750,12 +790,7 @@ static void etm4_disable_hw(void *info) * If the CPU supports v8.4 Trace filter Control, * set the ETM to trace prohibited region. */ - if (drvdata->trfc) { - trfcr = read_sysreg_s(SYS_TRFCR_EL1); - write_sysreg_s(trfcr & ~(TRFCR_ELx_ExTRE | TRFCR_ELx_E0TRE), - SYS_TRFCR_EL1); - isb(); - } + etm4x_prohibit_trace(drvdata); /* * Make sure everything completes before disabling, as recommended * by section 7.3.77 ("TRCVICTLR, ViewInst Main Control Register, @@ -771,9 +806,6 @@ static void etm4_disable_hw(void *info) if (coresight_timeout(csa, TRCSTATR, TRCSTATR_PMSTABLE_BIT, 1)) dev_err(etm_dev, "timeout while waiting for PM stable Trace Status\n"); - if (drvdata->trfc) - write_sysreg_s(trfcr, SYS_TRFCR_EL1); - /* read the status of the single shot comparators */ for (i = 0; i < drvdata->nr_ss_cmp; i++) { config->ss_status[i] = @@ -968,15 +1000,15 @@ static bool etm4_init_csdev_access(struct etmv4_drvdata *drvdata, return false; } -static void cpu_enable_tracing(struct etmv4_drvdata *drvdata) +static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata) { u64 dfr0 = read_sysreg(id_aa64dfr0_el1); u64 trfcr; + drvdata->trfcr = 0; if (!cpuid_feature_extract_unsigned_field(dfr0, ID_AA64DFR0_TRACE_FILT_SHIFT)) return; - drvdata->trfc = true; /* * If the CPU supports v8.4 SelfHosted Tracing, enable * tracing at the kernel EL and EL0, forcing to use the @@ -990,7 +1022,7 @@ static void cpu_enable_tracing(struct etmv4_drvdata *drvdata) if (is_kernel_in_hyp_mode()) trfcr |= TRFCR_EL2_CX; - write_sysreg_s(trfcr, SYS_TRFCR_EL1); + drvdata->trfcr = trfcr; } static void etm4_init_arch_data(void *info) @@ -1176,7 +1208,7 @@ static void etm4_init_arch_data(void *info) /* NUMCNTR, bits[30:28] number of counters available for tracing */ drvdata->nr_cntr = BMVAL(etmidr5, 28, 30); etm4_cs_lock(drvdata, csa); - cpu_enable_tracing(drvdata); + cpu_detect_trace_filtering(drvdata); } static inline u32 etm4_get_victlr_access_type(struct etmv4_config *config) @@ -1528,7 +1560,7 @@ static void etm4_init_trace_id(struct etmv4_drvdata *drvdata) drvdata->trcid = coresight_get_trace_id(drvdata->cpu); } -static int etm4_cpu_save(struct etmv4_drvdata *drvdata) +static int __etm4_cpu_save(struct etmv4_drvdata *drvdata) { int i, ret = 0; struct etmv4_save_state *state; @@ -1667,7 +1699,23 @@ out: return ret; } -static void etm4_cpu_restore(struct etmv4_drvdata *drvdata) +static int etm4_cpu_save(struct etmv4_drvdata *drvdata) +{ + int ret = 0; + + /* Save the TRFCR irrespective of whether the ETM is ON */ + if (drvdata->trfcr) + drvdata->save_trfcr = read_trfcr(); + /* + * Save and restore the ETM Trace registers only if + * the ETM is active. + */ + if (local_read(&drvdata->mode) && drvdata->save_state) + ret = __etm4_cpu_save(drvdata); + return ret; +} + +static void __etm4_cpu_restore(struct etmv4_drvdata *drvdata) { int i; struct etmv4_save_state *state = drvdata->save_state; @@ -1763,6 +1811,14 @@ static void etm4_cpu_restore(struct etmv4_drvdata *drvdata) etm4_cs_lock(drvdata, csa); } +static void etm4_cpu_restore(struct etmv4_drvdata *drvdata) +{ + if (drvdata->trfcr) + write_trfcr(drvdata->save_trfcr); + if (drvdata->state_needs_restore) + __etm4_cpu_restore(drvdata); +} + static int etm4_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd, void *v) { @@ -1774,23 +1830,17 @@ static int etm4_cpu_pm_notify(struct notifier_block *nb, unsigned long cmd, drvdata = etmdrvdata[cpu]; - if (!drvdata->save_state) - return NOTIFY_OK; - if (WARN_ON_ONCE(drvdata->cpu != cpu)) return NOTIFY_BAD; switch (cmd) { case CPU_PM_ENTER: - /* save the state if self-hosted coresight is in use */ - if (local_read(&drvdata->mode)) - if (etm4_cpu_save(drvdata)) - return NOTIFY_BAD; + if (etm4_cpu_save(drvdata)) + return NOTIFY_BAD; break; case CPU_PM_EXIT: case CPU_PM_ENTER_FAILED: - if (drvdata->state_needs_restore) - etm4_cpu_restore(drvdata); + etm4_cpu_restore(drvdata); break; default: return NOTIFY_DONE; diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h index e5b79bdb9851..3c4d69b096ca 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.h +++ b/drivers/hwtracing/coresight/coresight-etm4x.h @@ -919,8 +919,12 @@ struct etmv4_save_state { * @nooverflow: Indicate if overflow prevention is supported. * @atbtrig: If the implementation can support ATB triggers * @lpoverride: If the implementation can support low-power state over. - * @trfc: If the implementation supports Arm v8.4 trace filter controls. + * @trfcr: If the CPU supports FEAT_TRF, value of the TRFCR_ELx that + * allows tracing at all ELs. We don't want to compute this + * at runtime, due to the additional setting of TRFCR_CX when + * in EL2. Otherwise, 0. * @config: structure holding configuration parameters. + * @save_trfcr: Saved TRFCR_EL1 register during a CPU PM event. * @save_state: State to be preserved across power loss * @state_needs_restore: True when there is context to restore after PM exit * @skip_power_up: Indicates if an implementation can skip powering up @@ -971,8 +975,9 @@ struct etmv4_drvdata { bool nooverflow; bool atbtrig; bool lpoverride; - bool trfc; + u64 trfcr; struct etmv4_config config; + u64 save_trfcr; struct etmv4_save_state *save_state; bool state_needs_restore; bool skip_power_up; diff --git a/drivers/hwtracing/coresight/coresight-self-hosted-trace.h b/drivers/hwtracing/coresight/coresight-self-hosted-trace.h new file mode 100644 index 000000000000..23f05df3f173 --- /dev/null +++ b/drivers/hwtracing/coresight/coresight-self-hosted-trace.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Arm v8 Self-Hosted trace support. + * + * Copyright (C) 2021 ARM Ltd. + */ + +#ifndef __CORESIGHT_SELF_HOSTED_TRACE_H +#define __CORESIGHT_SELF_HOSTED_TRACE_H + +#include + +static inline u64 read_trfcr(void) +{ + return read_sysreg_s(SYS_TRFCR_EL1); +} + +static inline void write_trfcr(u64 val) +{ + write_sysreg_s(val, SYS_TRFCR_EL1); + isb(); +} + +static inline void cpu_prohibit_trace(void) +{ + u64 trfcr = read_trfcr(); + + /* Prohibit tracing at EL0 & the kernel EL */ + write_trfcr(trfcr & ~(TRFCR_ELx_ExTRE | TRFCR_ELx_E0TRE)); +} +#endif /* __CORESIGHT_SELF_HOSTED_TRACE_H */ diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index ea5027e397d0..ec092b605c63 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -609,8 +609,9 @@ static int tmc_etr_alloc_flat_buf(struct tmc_drvdata *drvdata, if (!flat_buf) return -ENOMEM; - flat_buf->vaddr = dma_alloc_coherent(real_dev, etr_buf->size, - &flat_buf->daddr, GFP_KERNEL); + flat_buf->vaddr = dma_alloc_noncoherent(real_dev, etr_buf->size, + &flat_buf->daddr, + DMA_FROM_DEVICE, GFP_KERNEL); if (!flat_buf->vaddr) { kfree(flat_buf); return -ENOMEM; @@ -631,14 +632,18 @@ static void tmc_etr_free_flat_buf(struct etr_buf *etr_buf) if (flat_buf && flat_buf->daddr) { struct device *real_dev = flat_buf->dev->parent; - dma_free_coherent(real_dev, flat_buf->size, - flat_buf->vaddr, flat_buf->daddr); + dma_free_noncoherent(real_dev, etr_buf->size, + flat_buf->vaddr, flat_buf->daddr, + DMA_FROM_DEVICE); } kfree(flat_buf); } static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp) { + struct etr_flat_buf *flat_buf = etr_buf->private; + struct device *real_dev = flat_buf->dev->parent; + /* * Adjust the buffer to point to the beginning of the trace data * and update the available trace data. @@ -648,6 +653,19 @@ static void tmc_etr_sync_flat_buf(struct etr_buf *etr_buf, u64 rrp, u64 rwp) etr_buf->len = etr_buf->size; else etr_buf->len = rwp - rrp; + + /* + * The driver always starts tracing at the beginning of the buffer, + * the only reason why we would get a wrap around is when the buffer + * is full. Sync the entire buffer in one go for this case. + */ + if (etr_buf->offset + etr_buf->len > etr_buf->size) + dma_sync_single_for_cpu(real_dev, flat_buf->daddr, + etr_buf->size, DMA_FROM_DEVICE); + else + dma_sync_single_for_cpu(real_dev, + flat_buf->daddr + etr_buf->offset, + etr_buf->len, DMA_FROM_DEVICE); } static ssize_t tmc_etr_get_data_flat_buf(struct etr_buf *etr_buf, @@ -1563,6 +1581,14 @@ tmc_update_etr_buffer(struct coresight_device *csdev, */ if (etr_perf->snapshot) handle->head += size; + + /* + * Ensure that the AUX trace data is visible before the aux_head + * is updated via perf_aux_output_end(), as expected by the + * perf ring buffer. + */ + smp_wmb(); + out: /* * Don't set the TRUNCATED flag in snapshot mode because 1) the diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c index 176868496879..7b8fed7b63cc 100644 --- a/drivers/hwtracing/coresight/coresight-trbe.c +++ b/drivers/hwtracing/coresight/coresight-trbe.c @@ -869,6 +869,10 @@ static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cp if (WARN_ON(trbe_csdev)) return; + /* If the TRBE was not probed on the CPU, we shouldn't be here */ + if (WARN_ON(!cpudata->drvdata)) + return; + dev = &cpudata->drvdata->pdev->dev; desc.name = devm_kasprintf(dev, GFP_KERNEL, "trbe%d", cpu); if (!desc.name) @@ -950,7 +954,9 @@ static int arm_trbe_probe_coresight(struct trbe_drvdata *drvdata) return -ENOMEM; for_each_cpu(cpu, &drvdata->supported_cpus) { - smp_call_function_single(cpu, arm_trbe_probe_cpu, drvdata, 1); + /* If we fail to probe the CPU, let us defer it to hotplug callbacks */ + if (smp_call_function_single(cpu, arm_trbe_probe_cpu, drvdata, 1)) + continue; if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) arm_trbe_register_coresight_cpu(drvdata, cpu); if (cpumask_test_cpu(cpu, &drvdata->supported_cpus)) diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 3f1d2d9424cf..5b09e93f4ac5 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -812,15 +812,11 @@ static void __init gic_dist_init(void) * enabled. */ affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id())); - for (i = 32; i < GIC_LINE_NR; i++) { - trace_android_vh_gic_v3_affinity_init(i, GICD_IROUTER, &affinity); + for (i = 32; i < GIC_LINE_NR; i++) gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); - } - for (i = 0; i < GIC_ESPI_NR; i++) { - trace_android_vh_gic_v3_affinity_init(i, GICD_IROUTERnE, &affinity); + for (i = 0; i < GIC_ESPI_NR; i++) gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8); - } } static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *)) diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c index 2fe31ea5ae53..eaf7781f29f9 100644 --- a/drivers/power/supply/power_supply_core.c +++ b/drivers/power/supply/power_supply_core.c @@ -132,6 +132,7 @@ void power_supply_changed(struct power_supply *psy) } EXPORT_SYMBOL_GPL(power_supply_changed); +static int psy_register_cooler(struct power_supply *psy); /* * Notify that power supply was registered after parent finished the probing. * @@ -139,6 +140,8 @@ EXPORT_SYMBOL_GPL(power_supply_changed); * calling power_supply_changed() directly from power_supply_register() * would lead to execution of get_property() function provided by the driver * too early - before the probe ends. + * Also, registering cooling device from the probe will execute the + * get_property() function. So register the cooling device after the probe. * * Avoid that by waiting on parent's mutex. */ @@ -156,6 +159,7 @@ static void power_supply_deferred_register_work(struct work_struct *work) } power_supply_changed(psy); + psy_register_cooler(psy); if (psy->dev.parent) mutex_unlock(&psy->dev.parent->mutex); @@ -1134,9 +1138,15 @@ static int psy_register_cooler(struct power_supply *psy) for (i = 0; i < psy->desc->num_properties; i++) { if (psy->desc->properties[i] == POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT) { - psy->tcd = thermal_cooling_device_register( - (char *)psy->desc->name, - psy, &psy_tcd_ops); + if (psy->dev.parent) + psy->tcd = thermal_of_cooling_device_register( + dev_of_node(psy->dev.parent), + (char *)psy->desc->name, + psy, &psy_tcd_ops); + else + psy->tcd = thermal_cooling_device_register( + (char *)psy->desc->name, + psy, &psy_tcd_ops); return PTR_ERR_OR_ZERO(psy->tcd); } } @@ -1242,10 +1252,6 @@ __power_supply_register(struct device *parent, if (rc) goto register_thermal_failed; - rc = psy_register_cooler(psy); - if (rc) - goto register_cooler_failed; - rc = power_supply_create_triggers(psy); if (rc) goto create_triggers_failed; @@ -1275,8 +1281,6 @@ __power_supply_register(struct device *parent, add_hwmon_sysfs_failed: power_supply_remove_triggers(psy); create_triggers_failed: - psy_unregister_cooler(psy); -register_cooler_failed: psy_unregister_thermal(psy); register_thermal_failed: device_del(dev); diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 8f01cde61d20..959f9e121cc6 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -55,8 +55,6 @@ #include #include -#include - /* Whether we react on sysrq keys or just ignore them */ static int __read_mostly sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE; static bool __read_mostly sysrq_always_enabled; @@ -153,8 +151,6 @@ static void sysrq_handle_crash(int key) /* release the RCU read lock before crashing */ rcu_read_unlock(); - trace_android_vh_sysrq_crash(current); - panic("sysrq triggered crash\n"); } static const struct sysrq_key_op sysrq_crash_op = { diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 401949e75426..93759e839267 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1269,6 +1269,19 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable) trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(stream_id); + /* + * As per data book 4.2.3.2TRB Control Bit Rules section + * + * The controller autonomously checks the HWO field of a TRB to determine if the + * entire TRB is valid. Therefore, software must ensure that the rest of the TRB + * is valid before setting the HWO field to '1'. In most systems, this means that + * software must update the fourth DWORD of a TRB last. + * + * However there is a possibility of CPU re-ordering here which can cause + * controller to observe the HWO bit set prematurely. + * Add a write memory barrier to prevent CPU re-ordering. + */ + wmb(); trb->ctrl |= DWC3_TRB_CTRL_HWO; dwc3_ep_inc_enq(dep); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index d8652321e15e..bb0d92837f67 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1710,16 +1710,24 @@ static void ffs_data_put(struct ffs_data *ffs) static void ffs_data_closed(struct ffs_data *ffs) { + struct ffs_epfile *epfiles; + unsigned long flags; + ENTER(); if (atomic_dec_and_test(&ffs->opened)) { if (ffs->no_disconnect) { ffs->state = FFS_DEACTIVATED; - if (ffs->epfiles) { - ffs_epfiles_destroy(ffs->epfiles, - ffs->eps_count); - ffs->epfiles = NULL; - } + spin_lock_irqsave(&ffs->eps_lock, flags); + epfiles = ffs->epfiles; + ffs->epfiles = NULL; + spin_unlock_irqrestore(&ffs->eps_lock, + flags); + + if (epfiles) + ffs_epfiles_destroy(epfiles, + ffs->eps_count); + if (ffs->setup_state == FFS_SETUP_PENDING) __ffs_ep0_stall(ffs); } else { @@ -1766,14 +1774,27 @@ static struct ffs_data *ffs_data_new(const char *dev_name) static void ffs_data_clear(struct ffs_data *ffs) { + struct ffs_epfile *epfiles; + unsigned long flags; + ENTER(); ffs_closed(ffs); BUG_ON(ffs->gadget); - if (ffs->epfiles) { - ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count); + spin_lock_irqsave(&ffs->eps_lock, flags); + epfiles = ffs->epfiles; + ffs->epfiles = NULL; + spin_unlock_irqrestore(&ffs->eps_lock, flags); + + /* + * potential race possible between ffs_func_eps_disable + * & ffs_epfile_release therefore maintaining a local + * copy of epfile will save us from use-after-free. + */ + if (epfiles) { + ffs_epfiles_destroy(epfiles, ffs->eps_count); ffs->epfiles = NULL; } @@ -1921,12 +1942,15 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count) static void ffs_func_eps_disable(struct ffs_function *func) { - struct ffs_ep *ep = func->eps; - struct ffs_epfile *epfile = func->ffs->epfiles; - unsigned count = func->ffs->eps_count; + struct ffs_ep *ep; + struct ffs_epfile *epfile; + unsigned short count; unsigned long flags; spin_lock_irqsave(&func->ffs->eps_lock, flags); + count = func->ffs->eps_count; + epfile = func->ffs->epfiles; + ep = func->eps; while (count--) { /* pending requests get nuked */ if (likely(ep->ep)) @@ -1944,14 +1968,18 @@ static void ffs_func_eps_disable(struct ffs_function *func) static int ffs_func_eps_enable(struct ffs_function *func) { - struct ffs_data *ffs = func->ffs; - struct ffs_ep *ep = func->eps; - struct ffs_epfile *epfile = ffs->epfiles; - unsigned count = ffs->eps_count; + struct ffs_data *ffs; + struct ffs_ep *ep; + struct ffs_epfile *epfile; + unsigned short count; unsigned long flags; int ret = 0; spin_lock_irqsave(&func->ffs->eps_lock, flags); + ffs = func->ffs; + ep = func->eps; + epfile = ffs->epfiles; + count = ffs->eps_count; while(count--) { ep->ep->driver_data = ep; diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 59ee18377a5c..7fc79c47798a 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -737,7 +737,7 @@ hitted: retry: err = z_erofs_attach_page(clt, page, page_type, clt->mode >= COLLECT_PRIMARY_FOLLOWED); - /* should allocate an additional short-lived page for pagevec */ + /* should allocate an additional staging page for pagevec */ if (err == -EAGAIN) { struct page *const newpage = alloc_page(GFP_NOFS | __GFP_NOFAIL); diff --git a/fs/exec.c b/fs/exec.c index ec5ef10ce2db..5ce144cf810f 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -73,7 +73,6 @@ #include "internal.h" #include -#include EXPORT_TRACEPOINT_SYMBOL_GPL(task_rename); @@ -1230,7 +1229,6 @@ void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec) strlcpy(tsk->comm, buf, sizeof(tsk->comm)); task_unlock(tsk); perf_event_comm(tsk, exec); - trace_android_vh_set_task_comm(tsk); } /* diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index d03c6fc37a00..3390b8dcd634 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -350,13 +350,13 @@ static int f2fs_write_meta_pages(struct address_space *mapping, goto skip_write; /* if locked failed, cp will flush dirty pages instead */ - if (!down_write_trylock(&sbi->cp_global_sem)) + if (!f2fs_down_write_trylock(&sbi->cp_global_sem)) goto skip_write; trace_f2fs_writepages(mapping->host, wbc, META); diff = nr_pages_to_write(sbi, META, wbc); written = f2fs_sync_meta_pages(sbi, META, wbc->nr_to_write, FS_META_IO); - up_write(&sbi->cp_global_sem); + f2fs_up_write(&sbi->cp_global_sem); wbc->nr_to_write = max((long)0, wbc->nr_to_write - written - diff); return 0; @@ -650,7 +650,7 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino) /* truncate all the data during iput */ iput(inode); - err = f2fs_get_node_info(sbi, ino, &ni); + err = f2fs_get_node_info(sbi, ino, &ni, false); if (err) goto err_out; @@ -1148,7 +1148,7 @@ static bool __need_flush_quota(struct f2fs_sb_info *sbi) if (!is_journalled_quota(sbi)) return false; - if (!down_write_trylock(&sbi->quota_sem)) + if (!f2fs_down_write_trylock(&sbi->quota_sem)) return true; if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) { ret = false; @@ -1160,7 +1160,7 @@ static bool __need_flush_quota(struct f2fs_sb_info *sbi) } else if (get_pages(sbi, F2FS_DIRTY_QDATA)) { ret = true; } - up_write(&sbi->quota_sem); + f2fs_up_write(&sbi->quota_sem); return ret; } @@ -1217,10 +1217,10 @@ retry_flush_dents: * POR: we should ensure that there are no dirty node pages * until finishing nat/sit flush. inode->i_blocks can be updated. */ - down_write(&sbi->node_change); + f2fs_down_write(&sbi->node_change); if (get_pages(sbi, F2FS_DIRTY_IMETA)) { - up_write(&sbi->node_change); + f2fs_up_write(&sbi->node_change); f2fs_unlock_all(sbi); err = f2fs_sync_inode_meta(sbi); if (err) @@ -1230,15 +1230,15 @@ retry_flush_dents: } retry_flush_nodes: - down_write(&sbi->node_write); + f2fs_down_write(&sbi->node_write); if (get_pages(sbi, F2FS_DIRTY_NODES)) { - up_write(&sbi->node_write); + f2fs_up_write(&sbi->node_write); atomic_inc(&sbi->wb_sync_req[NODE]); err = f2fs_sync_node_pages(sbi, &wbc, false, FS_CP_NODE_IO); atomic_dec(&sbi->wb_sync_req[NODE]); if (err) { - up_write(&sbi->node_change); + f2fs_up_write(&sbi->node_change); f2fs_unlock_all(sbi); return err; } @@ -1251,13 +1251,13 @@ retry_flush_nodes: * dirty node blocks and some checkpoint values by block allocation. */ __prepare_cp_block(sbi); - up_write(&sbi->node_change); + f2fs_up_write(&sbi->node_change); return err; } static void unblock_operations(struct f2fs_sb_info *sbi) { - up_write(&sbi->node_write); + f2fs_up_write(&sbi->node_write); f2fs_unlock_all(sbi); } @@ -1592,7 +1592,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) f2fs_warn(sbi, "Start checkpoint disabled!"); } if (cpc->reason != CP_RESIZE) - down_write(&sbi->cp_global_sem); + f2fs_down_write(&sbi->cp_global_sem); if (!is_sbi_flag_set(sbi, SBI_IS_DIRTY) && ((cpc->reason & CP_FASTBOOT) || (cpc->reason & CP_SYNC) || @@ -1667,7 +1667,7 @@ stop: trace_f2fs_write_checkpoint(sbi->sb, cpc->reason, "finish checkpoint"); out: if (cpc->reason != CP_RESIZE) - up_write(&sbi->cp_global_sem); + f2fs_up_write(&sbi->cp_global_sem); return err; } @@ -1715,9 +1715,9 @@ static int __write_checkpoint_sync(struct f2fs_sb_info *sbi) struct cp_control cpc = { .reason = CP_SYNC, }; int err; - down_write(&sbi->gc_lock); + f2fs_down_write(&sbi->gc_lock); err = f2fs_write_checkpoint(sbi, &cpc); - up_write(&sbi->gc_lock); + f2fs_up_write(&sbi->gc_lock); return err; } @@ -1805,9 +1805,9 @@ int f2fs_issue_checkpoint(struct f2fs_sb_info *sbi) if (!test_opt(sbi, MERGE_CHECKPOINT) || cpc.reason != CP_SYNC) { int ret; - down_write(&sbi->gc_lock); + f2fs_down_write(&sbi->gc_lock); ret = f2fs_write_checkpoint(sbi, &cpc); - up_write(&sbi->gc_lock); + f2fs_up_write(&sbi->gc_lock); return ret; } diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index c07f04366e24..da86e845fa73 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -1203,7 +1203,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, * checkpoint. This can only happen to quota writes which can cause * the below discard race condition. */ - down_read(&sbi->node_write); + f2fs_down_read(&sbi->node_write); } else if (!f2fs_trylock_op(sbi)) { goto out_free; } @@ -1222,7 +1222,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, psize = (loff_t)(cc->rpages[last_index]->index + 1) << PAGE_SHIFT; - err = f2fs_get_node_info(fio.sbi, dn.nid, &ni); + err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false); if (err) goto out_put_dnode; @@ -1320,7 +1320,7 @@ unlock_continue: f2fs_put_dnode(&dn); if (IS_NOQUOTA(inode)) - up_read(&sbi->node_write); + f2fs_up_read(&sbi->node_write); else f2fs_unlock_op(sbi); @@ -1346,7 +1346,7 @@ out_put_dnode: f2fs_put_dnode(&dn); out_unlock_op: if (IS_NOQUOTA(inode)) - up_read(&sbi->node_write); + f2fs_up_read(&sbi->node_write); else f2fs_unlock_op(sbi); out_free: diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 4caf3e5653a7..57fb77e57330 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -593,7 +593,7 @@ static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi, enum page_type btype = PAGE_TYPE_OF_BIO(type); struct f2fs_bio_info *io = sbi->write_io[btype] + temp; - down_write(&io->io_rwsem); + f2fs_down_write(&io->io_rwsem); /* change META to META_FLUSH in the checkpoint procedure */ if (type >= META_FLUSH) { @@ -604,7 +604,7 @@ static void __f2fs_submit_merged_write(struct f2fs_sb_info *sbi, io->fio.op_flags |= REQ_PREFLUSH | REQ_FUA; } __submit_merged_bio(io); - up_write(&io->io_rwsem); + f2fs_up_write(&io->io_rwsem); } static void __submit_merged_write_cond(struct f2fs_sb_info *sbi, @@ -619,9 +619,9 @@ static void __submit_merged_write_cond(struct f2fs_sb_info *sbi, enum page_type btype = PAGE_TYPE_OF_BIO(type); struct f2fs_bio_info *io = sbi->write_io[btype] + temp; - down_read(&io->io_rwsem); + f2fs_down_read(&io->io_rwsem); ret = __has_merged_page(io->bio, inode, page, ino); - up_read(&io->io_rwsem); + f2fs_up_read(&io->io_rwsem); } if (ret) __f2fs_submit_merged_write(sbi, type, temp); @@ -745,9 +745,9 @@ static void add_bio_entry(struct f2fs_sb_info *sbi, struct bio *bio, if (bio_add_page(bio, page, PAGE_SIZE, 0) != PAGE_SIZE) f2fs_bug_on(sbi, 1); - down_write(&io->bio_list_lock); + f2fs_down_write(&io->bio_list_lock); list_add_tail(&be->list, &io->bio_list); - up_write(&io->bio_list_lock); + f2fs_up_write(&io->bio_list_lock); } static void del_bio_entry(struct bio_entry *be) @@ -769,7 +769,7 @@ static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio, struct list_head *head = &io->bio_list; struct bio_entry *be; - down_write(&io->bio_list_lock); + f2fs_down_write(&io->bio_list_lock); list_for_each_entry(be, head, list) { if (be->bio != *bio) continue; @@ -793,7 +793,7 @@ static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio, __submit_bio(sbi, *bio, DATA); break; } - up_write(&io->bio_list_lock); + f2fs_up_write(&io->bio_list_lock); } if (ret) { @@ -819,7 +819,7 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, if (list_empty(head)) continue; - down_read(&io->bio_list_lock); + f2fs_down_read(&io->bio_list_lock); list_for_each_entry(be, head, list) { if (target) found = (target == be->bio); @@ -829,14 +829,14 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, if (found) break; } - up_read(&io->bio_list_lock); + f2fs_up_read(&io->bio_list_lock); if (!found) continue; found = false; - down_write(&io->bio_list_lock); + f2fs_down_write(&io->bio_list_lock); list_for_each_entry(be, head, list) { if (target) found = (target == be->bio); @@ -849,7 +849,7 @@ void f2fs_submit_merged_ipu_write(struct f2fs_sb_info *sbi, break; } } - up_write(&io->bio_list_lock); + f2fs_up_write(&io->bio_list_lock); } if (found) @@ -909,7 +909,7 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio) f2fs_bug_on(sbi, is_read_io(fio->op)); - down_write(&io->io_rwsem); + f2fs_down_write(&io->io_rwsem); next: if (fio->in_list) { spin_lock(&io->io_lock); @@ -976,7 +976,7 @@ out: if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) || !f2fs_is_checkpoint_ready(sbi)) __submit_merged_bio(io); - up_write(&io->io_rwsem); + f2fs_up_write(&io->io_rwsem); } static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, @@ -1356,7 +1356,7 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type) if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC))) return -EPERM; - err = f2fs_get_node_info(sbi, dn->nid, &ni); + err = f2fs_get_node_info(sbi, dn->nid, &ni, false); if (err) return err; @@ -1437,9 +1437,9 @@ void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock) { if (flag == F2FS_GET_BLOCK_PRE_AIO) { if (lock) - down_read(&sbi->node_change); + f2fs_down_read(&sbi->node_change); else - up_read(&sbi->node_change); + f2fs_up_read(&sbi->node_change); } else { if (lock) f2fs_lock_op(sbi); @@ -1791,7 +1791,7 @@ static int f2fs_xattr_fiemap(struct inode *inode, if (!page) return -ENOMEM; - err = f2fs_get_node_info(sbi, inode->i_ino, &ni); + err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false); if (err) { f2fs_put_page(page, 1); return err; @@ -1823,7 +1823,7 @@ static int f2fs_xattr_fiemap(struct inode *inode, if (!page) return -ENOMEM; - err = f2fs_get_node_info(sbi, xnid, &ni); + err = f2fs_get_node_info(sbi, xnid, &ni, false); if (err) { f2fs_put_page(page, 1); return err; @@ -2655,7 +2655,7 @@ got_it: fio->need_lock = LOCK_REQ; } - err = f2fs_get_node_info(fio->sbi, dn.nid, &ni); + err = f2fs_get_node_info(fio->sbi, dn.nid, &ni, false); if (err) goto out_writepage; @@ -2768,13 +2768,13 @@ write: * the below discard race condition. */ if (IS_NOQUOTA(inode)) - down_read(&sbi->node_write); + f2fs_down_read(&sbi->node_write); fio.need_lock = LOCK_DONE; err = f2fs_do_write_data_page(&fio); if (IS_NOQUOTA(inode)) - up_read(&sbi->node_write); + f2fs_up_read(&sbi->node_write); goto done; } @@ -3232,14 +3232,14 @@ static void f2fs_write_failed(struct address_space *mapping, loff_t to) /* In the fs-verity case, f2fs_end_enable_verity() does the truncate */ if (to > i_size && !f2fs_verity_in_progress(inode)) { - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - down_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_down_write(&F2FS_I(inode)->i_mmap_sem); truncate_pagecache(inode, i_size); f2fs_truncate_blocks(inode, i_size, true); - up_write(&F2FS_I(inode)->i_mmap_sem); - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_up_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); } } @@ -3646,21 +3646,21 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) iocb->ki_hint = WRITE_LIFE_NOT_SET; if (iocb->ki_flags & IOCB_NOWAIT) { - if (!down_read_trylock(&fi->i_gc_rwsem[rw])) { + if (!f2fs_down_read_trylock(&fi->i_gc_rwsem[rw])) { iocb->ki_hint = hint; err = -EAGAIN; goto out; } - if (do_opu && !down_read_trylock(&fi->i_gc_rwsem[READ])) { - up_read(&fi->i_gc_rwsem[rw]); + if (do_opu && !f2fs_down_read_trylock(&fi->i_gc_rwsem[READ])) { + f2fs_up_read(&fi->i_gc_rwsem[rw]); iocb->ki_hint = hint; err = -EAGAIN; goto out; } } else { - down_read(&fi->i_gc_rwsem[rw]); + f2fs_down_read(&fi->i_gc_rwsem[rw]); if (do_opu) - down_read(&fi->i_gc_rwsem[READ]); + f2fs_down_read(&fi->i_gc_rwsem[READ]); } err = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, @@ -3670,9 +3670,9 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) DIO_SKIP_HOLES); if (do_opu) - up_read(&fi->i_gc_rwsem[READ]); + f2fs_up_read(&fi->i_gc_rwsem[READ]); - up_read(&fi->i_gc_rwsem[rw]); + f2fs_up_read(&fi->i_gc_rwsem[rw]); if (rw == WRITE) { if (whint_mode == WHINT_MODE_OFF) @@ -3944,13 +3944,13 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk, unsigned int end_sec = secidx + blkcnt / blk_per_sec; int ret = 0; - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - down_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_down_write(&F2FS_I(inode)->i_mmap_sem); set_inode_flag(inode, FI_ALIGNED_WRITE); for (; secidx < end_sec; secidx++) { - down_write(&sbi->pin_sem); + f2fs_down_write(&sbi->pin_sem); f2fs_lock_op(sbi); f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false); @@ -3964,7 +3964,7 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk, page = f2fs_get_lock_data_page(inode, blkidx, true); if (IS_ERR(page)) { - up_write(&sbi->pin_sem); + f2fs_up_write(&sbi->pin_sem); ret = PTR_ERR(page); goto done; } @@ -3977,7 +3977,7 @@ static int f2fs_migrate_blocks(struct inode *inode, block_t start_blk, ret = filemap_fdatawrite(inode->i_mapping); - up_write(&sbi->pin_sem); + f2fs_up_write(&sbi->pin_sem); if (ret) break; @@ -3987,8 +3987,8 @@ done: clear_inode_flag(inode, FI_DO_DEFRAG); clear_inode_flag(inode, FI_ALIGNED_WRITE); - up_write(&F2FS_I(inode)->i_mmap_sem); - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_up_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); return ret; } diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 032ae4535ed7..9ee895af75e4 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -768,7 +768,7 @@ add_dentry: f2fs_wait_on_page_writeback(dentry_page, DATA, true, true); if (inode) { - down_write(&F2FS_I(inode)->i_sem); + f2fs_down_write(&F2FS_I(inode)->i_sem); page = f2fs_init_inode_metadata(inode, dir, fname, NULL); if (IS_ERR(page)) { err = PTR_ERR(page); @@ -795,7 +795,7 @@ add_dentry: f2fs_update_parent_metadata(dir, inode, current_depth); fail: if (inode) - up_write(&F2FS_I(inode)->i_sem); + f2fs_up_write(&F2FS_I(inode)->i_sem); f2fs_put_page(dentry_page, 1); @@ -860,7 +860,7 @@ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir) struct page *page; int err = 0; - down_write(&F2FS_I(inode)->i_sem); + f2fs_down_write(&F2FS_I(inode)->i_sem); page = f2fs_init_inode_metadata(inode, dir, NULL, NULL); if (IS_ERR(page)) { err = PTR_ERR(page); @@ -871,7 +871,7 @@ int f2fs_do_tmpfile(struct inode *inode, struct inode *dir) clear_inode_flag(inode, FI_NEW_INODE); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); fail: - up_write(&F2FS_I(inode)->i_sem); + f2fs_up_write(&F2FS_I(inode)->i_sem); return err; } @@ -879,7 +879,7 @@ void f2fs_drop_nlink(struct inode *dir, struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(dir); - down_write(&F2FS_I(inode)->i_sem); + f2fs_down_write(&F2FS_I(inode)->i_sem); if (S_ISDIR(inode->i_mode)) f2fs_i_links_write(dir, false); @@ -890,7 +890,7 @@ void f2fs_drop_nlink(struct inode *dir, struct inode *inode) f2fs_i_links_write(inode, false); f2fs_i_size_write(inode, 0); } - up_write(&F2FS_I(inode)->i_sem); + f2fs_up_write(&F2FS_I(inode)->i_sem); if (inode->i_nlink == 0) f2fs_add_orphan_inode(inode); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index b95a44c308da..72ed44ffc89f 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -117,6 +117,18 @@ typedef u32 nid_t; #define COMPRESS_EXT_NUM 16 +/* + * An implementation of an rwsem that is explicitly unfair to readers. This + * prevents priority inversion when a low-priority reader acquires the read lock + * while sleeping on the write lock but the write lock is needed by + * higher-priority clients. + */ + +struct f2fs_rwsem { + struct rw_semaphore internal_rwsem; + wait_queue_head_t read_waiters; +}; + struct f2fs_mount_info { unsigned int opt; int write_io_size_bits; /* Write IO size bits */ @@ -726,7 +738,7 @@ struct f2fs_inode_info { /* Use below internally in f2fs*/ unsigned long flags[BITS_TO_LONGS(FI_MAX)]; /* use to pass per-file flags */ - struct rw_semaphore i_sem; /* protect fi info */ + struct f2fs_rwsem i_sem; /* protect fi info */ atomic_t dirty_pages; /* # of dirty pages */ f2fs_hash_t chash; /* hash value of given file name */ unsigned int clevel; /* maximum level of given file name */ @@ -751,9 +763,9 @@ struct f2fs_inode_info { struct extent_tree *extent_tree; /* cached extent_tree entry */ /* avoid racing between foreground op and gc */ - struct rw_semaphore i_gc_rwsem[2]; - struct rw_semaphore i_mmap_sem; - struct rw_semaphore i_xattr_sem; /* avoid racing between reading and changing EAs */ + struct f2fs_rwsem i_gc_rwsem[2]; + struct f2fs_rwsem i_mmap_sem; + struct f2fs_rwsem i_xattr_sem; /* avoid racing between reading and changing EAs */ int i_extra_isize; /* size of extra space located in i_addr */ kprojid_t i_projid; /* id for project quota */ @@ -870,7 +882,7 @@ struct f2fs_nm_info { /* NAT cache management */ struct radix_tree_root nat_root;/* root of the nat entry cache */ struct radix_tree_root nat_set_root;/* root of the nat set cache */ - struct rw_semaphore nat_tree_lock; /* protect nat entry tree */ + struct f2fs_rwsem nat_tree_lock; /* protect nat entry tree */ struct list_head nat_entries; /* cached nat entry list (clean) */ spinlock_t nat_list_lock; /* protect clean nat entry list */ unsigned int nat_cnt[MAX_NAT_STATE]; /* the # of cached nat entries */ @@ -983,7 +995,7 @@ struct f2fs_sm_info { struct dirty_seglist_info *dirty_info; /* dirty segment information */ struct curseg_info *curseg_array; /* active segment information */ - struct rw_semaphore curseg_lock; /* for preventing curseg change */ + struct f2fs_rwsem curseg_lock; /* for preventing curseg change */ block_t seg0_blkaddr; /* block address of 0'th segment */ block_t main_blkaddr; /* start block address of main area */ @@ -1167,11 +1179,11 @@ struct f2fs_bio_info { struct bio *bio; /* bios to merge */ sector_t last_block_in_bio; /* last block number */ struct f2fs_io_info fio; /* store buffered io info. */ - struct rw_semaphore io_rwsem; /* blocking op for bio */ + struct f2fs_rwsem io_rwsem; /* blocking op for bio */ spinlock_t io_lock; /* serialize DATA/NODE IOs */ struct list_head io_list; /* track fios */ struct list_head bio_list; /* bio entry list head */ - struct rw_semaphore bio_list_lock; /* lock to protect bio entry list */ + struct f2fs_rwsem bio_list_lock; /* lock to protect bio entry list */ }; #define FDEV(i) (sbi->devs[i]) @@ -1528,7 +1540,7 @@ struct f2fs_sb_info { struct super_block *sb; /* pointer to VFS super block */ struct proc_dir_entry *s_proc; /* proc entry */ struct f2fs_super_block *raw_super; /* raw super block pointer */ - struct rw_semaphore sb_lock; /* lock for raw super block */ + struct f2fs_rwsem sb_lock; /* lock for raw super block */ int valid_super_block; /* valid super block no */ unsigned long s_flag; /* flags for sbi */ struct mutex writepages; /* mutex for writepages() */ @@ -1548,7 +1560,7 @@ struct f2fs_sb_info { /* for bio operations */ struct f2fs_bio_info *write_io[NR_PAGE_TYPE]; /* for write bios */ /* keep migration IO order for LFS mode */ - struct rw_semaphore io_order_lock; + struct f2fs_rwsem io_order_lock; mempool_t *write_io_dummy; /* Dummy pages */ /* for checkpoint */ @@ -1556,10 +1568,10 @@ struct f2fs_sb_info { int cur_cp_pack; /* remain current cp pack */ spinlock_t cp_lock; /* for flag in ckpt */ struct inode *meta_inode; /* cache meta blocks */ - struct rw_semaphore cp_global_sem; /* checkpoint procedure lock */ - struct rw_semaphore cp_rwsem; /* blocking FS operations */ - struct rw_semaphore node_write; /* locking node writes */ - struct rw_semaphore node_change; /* locking node change */ + struct f2fs_rwsem cp_global_sem; /* checkpoint procedure lock */ + struct f2fs_rwsem cp_rwsem; /* blocking FS operations */ + struct f2fs_rwsem node_write; /* locking node writes */ + struct f2fs_rwsem node_change; /* locking node change */ wait_queue_head_t cp_wait; unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ long interval_time[MAX_TIME]; /* to store thresholds */ @@ -1619,7 +1631,7 @@ struct f2fs_sb_info { block_t unusable_block_count; /* # of blocks saved by last cp */ unsigned int nquota_files; /* # of quota sysfile */ - struct rw_semaphore quota_sem; /* blocking cp for flags */ + struct f2fs_rwsem quota_sem; /* blocking cp for flags */ /* # of pages, see count_type */ atomic_t nr_pages[NR_COUNT_TYPE]; @@ -1635,7 +1647,7 @@ struct f2fs_sb_info { struct f2fs_mount_info mount_opt; /* mount options */ /* for cleaning operations */ - struct rw_semaphore gc_lock; /* + struct f2fs_rwsem gc_lock; /* * semaphore for GC, avoid * race between GC and GC or CP */ @@ -1652,7 +1664,7 @@ struct f2fs_sb_info { /* threshold for gc trials on pinned files */ u64 gc_pin_file_threshold; - struct rw_semaphore pin_sem; + struct f2fs_rwsem pin_sem; /* maximum # of trials to find a victim segment for SSR and GC */ unsigned int max_victim_search; @@ -2069,29 +2081,85 @@ static inline bool enabled_nat_bits(struct f2fs_sb_info *sbi, return (cpc) ? (cpc->reason & CP_UMOUNT) && set : set; } +static inline void init_f2fs_rwsem(struct f2fs_rwsem *sem) +{ + init_rwsem(&sem->internal_rwsem); + init_waitqueue_head(&sem->read_waiters); +} + +static inline int f2fs_rwsem_is_locked(struct f2fs_rwsem *sem) +{ + return rwsem_is_locked(&sem->internal_rwsem); +} + +static inline int f2fs_rwsem_is_contended(struct f2fs_rwsem *sem) +{ + return rwsem_is_contended(&sem->internal_rwsem); +} + +static inline void f2fs_down_read(struct f2fs_rwsem *sem) +{ + wait_event(sem->read_waiters, down_read_trylock(&sem->internal_rwsem)); +} + +static inline int f2fs_down_read_trylock(struct f2fs_rwsem *sem) +{ + return down_read_trylock(&sem->internal_rwsem); +} + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +static inline void f2fs_down_read_nested(struct f2fs_rwsem *sem, int subclass) +{ + down_read_nested(&sem->internal_rwsem, subclass); +} +#else +#define f2fs_down_read_nested(sem, subclass) f2fs_down_read(sem) +#endif + +static inline void f2fs_up_read(struct f2fs_rwsem *sem) +{ + up_read(&sem->internal_rwsem); +} + +static inline void f2fs_down_write(struct f2fs_rwsem *sem) +{ + down_write(&sem->internal_rwsem); +} + +static inline int f2fs_down_write_trylock(struct f2fs_rwsem *sem) +{ + return down_write_trylock(&sem->internal_rwsem); +} + +static inline void f2fs_up_write(struct f2fs_rwsem *sem) +{ + up_write(&sem->internal_rwsem); + wake_up_all(&sem->read_waiters); +} + static inline void f2fs_lock_op(struct f2fs_sb_info *sbi) { - down_read(&sbi->cp_rwsem); + f2fs_down_read(&sbi->cp_rwsem); } static inline int f2fs_trylock_op(struct f2fs_sb_info *sbi) { - return down_read_trylock(&sbi->cp_rwsem); + return f2fs_down_read_trylock(&sbi->cp_rwsem); } static inline void f2fs_unlock_op(struct f2fs_sb_info *sbi) { - up_read(&sbi->cp_rwsem); + f2fs_up_read(&sbi->cp_rwsem); } static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) { - down_write(&sbi->cp_rwsem); + f2fs_down_write(&sbi->cp_rwsem); } static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) { - up_write(&sbi->cp_rwsem); + f2fs_up_write(&sbi->cp_rwsem); } static inline int __get_cp_reason(struct f2fs_sb_info *sbi) @@ -3421,7 +3489,7 @@ int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid); bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid); bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino); int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, - struct node_info *ni); + struct node_info *ni, bool checkpoint_context); pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs); int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode); int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index d183efa2c58e..19b568d61d62 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -37,9 +37,9 @@ static vm_fault_t f2fs_filemap_fault(struct vm_fault *vmf) struct inode *inode = file_inode(vmf->vma->vm_file); vm_fault_t ret; - down_read(&F2FS_I(inode)->i_mmap_sem); + f2fs_down_read(&F2FS_I(inode)->i_mmap_sem); ret = filemap_fault(vmf); - up_read(&F2FS_I(inode)->i_mmap_sem); + f2fs_up_read(&F2FS_I(inode)->i_mmap_sem); if (!ret) f2fs_update_iostat(F2FS_I_SB(inode), APP_MAPPED_READ_IO, @@ -100,7 +100,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) f2fs_bug_on(sbi, f2fs_has_inline_data(inode)); file_update_time(vmf->vma->vm_file); - down_read(&F2FS_I(inode)->i_mmap_sem); + f2fs_down_read(&F2FS_I(inode)->i_mmap_sem); lock_page(page); if (unlikely(page->mapping != inode->i_mapping || page_offset(page) > i_size_read(inode) || @@ -158,7 +158,7 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) trace_f2fs_vm_page_mkwrite(page, DATA); out_sem: - up_read(&F2FS_I(inode)->i_mmap_sem); + f2fs_up_read(&F2FS_I(inode)->i_mmap_sem); sb_end_pagefault(inode->i_sb); err: @@ -239,13 +239,13 @@ static void try_to_fix_pino(struct inode *inode) struct f2fs_inode_info *fi = F2FS_I(inode); nid_t pino; - down_write(&fi->i_sem); + f2fs_down_write(&fi->i_sem); if (file_wrong_pino(inode) && inode->i_nlink == 1 && get_parent_ino(inode, &pino)) { f2fs_i_pino_write(inode, pino); file_got_pino(inode); } - up_write(&fi->i_sem); + f2fs_up_write(&fi->i_sem); } static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, @@ -308,9 +308,9 @@ go_write: * Both of fdatasync() and fsync() are able to be recovered from * sudden-power-off. */ - down_read(&F2FS_I(inode)->i_sem); + f2fs_down_read(&F2FS_I(inode)->i_sem); cp_reason = need_do_checkpoint(inode); - up_read(&F2FS_I(inode)->i_sem); + f2fs_up_read(&F2FS_I(inode)->i_sem); if (cp_reason) { /* all the dirty node pages should be flushed for POR */ @@ -938,8 +938,8 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr) return err; } - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - down_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_down_write(&F2FS_I(inode)->i_mmap_sem); truncate_setsize(inode, attr->ia_size); @@ -949,8 +949,8 @@ int f2fs_setattr(struct dentry *dentry, struct iattr *attr) * do not trim all blocks after i_size if target size is * larger than i_size. */ - up_write(&F2FS_I(inode)->i_mmap_sem); - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_up_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); if (err) return err; @@ -1090,8 +1090,8 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len) blk_start = (loff_t)pg_start << PAGE_SHIFT; blk_end = (loff_t)pg_end << PAGE_SHIFT; - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - down_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_down_write(&F2FS_I(inode)->i_mmap_sem); truncate_pagecache_range(inode, blk_start, blk_end - 1); @@ -1099,8 +1099,8 @@ static int punch_hole(struct inode *inode, loff_t offset, loff_t len) ret = f2fs_truncate_hole(inode, pg_start, pg_end); f2fs_unlock_op(sbi); - up_write(&F2FS_I(inode)->i_mmap_sem); - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_up_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); } } @@ -1211,7 +1211,7 @@ static int __clone_blkaddrs(struct inode *src_inode, struct inode *dst_inode, if (ret) return ret; - ret = f2fs_get_node_info(sbi, dn.nid, &ni); + ret = f2fs_get_node_info(sbi, dn.nid, &ni, false); if (ret) { f2fs_put_dnode(&dn); return ret; @@ -1333,8 +1333,8 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len) f2fs_balance_fs(sbi, true); /* avoid gc operation during block exchange */ - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - down_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_down_write(&F2FS_I(inode)->i_mmap_sem); f2fs_lock_op(sbi); f2fs_drop_extent_tree(inode); @@ -1342,8 +1342,8 @@ static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len) ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true); f2fs_unlock_op(sbi); - up_write(&F2FS_I(inode)->i_mmap_sem); - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_up_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); return ret; } @@ -1373,13 +1373,13 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) return ret; /* write out all moved pages, if possible */ - down_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_down_write(&F2FS_I(inode)->i_mmap_sem); filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); truncate_pagecache(inode, offset); new_size = i_size_read(inode) - len; ret = f2fs_truncate_blocks(inode, new_size, true); - up_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_up_write(&F2FS_I(inode)->i_mmap_sem); if (!ret) f2fs_i_size_write(inode, new_size); return ret; @@ -1478,8 +1478,8 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, unsigned int end_offset; pgoff_t end; - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - down_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_down_write(&F2FS_I(inode)->i_mmap_sem); truncate_pagecache_range(inode, (loff_t)index << PAGE_SHIFT, @@ -1491,8 +1491,8 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE); if (ret) { f2fs_unlock_op(sbi); - up_write(&F2FS_I(inode)->i_mmap_sem); - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_up_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); goto out; } @@ -1503,8 +1503,8 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, f2fs_put_dnode(&dn); f2fs_unlock_op(sbi); - up_write(&F2FS_I(inode)->i_mmap_sem); - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_up_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_balance_fs(sbi, dn.node_changed); @@ -1560,9 +1560,9 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) f2fs_balance_fs(sbi, true); - down_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_down_write(&F2FS_I(inode)->i_mmap_sem); ret = f2fs_truncate_blocks(inode, i_size_read(inode), true); - up_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_up_write(&F2FS_I(inode)->i_mmap_sem); if (ret) return ret; @@ -1577,8 +1577,8 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); /* avoid gc operation during block exchange */ - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - down_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_down_write(&F2FS_I(inode)->i_mmap_sem); truncate_pagecache(inode, offset); while (!ret && idx > pg_start) { @@ -1594,14 +1594,14 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len) idx + delta, nr, false); f2fs_unlock_op(sbi); } - up_write(&F2FS_I(inode)->i_mmap_sem); - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_up_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); /* write out all moved pages, if possible */ - down_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_down_write(&F2FS_I(inode)->i_mmap_sem); filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX); truncate_pagecache(inode, offset); - up_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_up_write(&F2FS_I(inode)->i_mmap_sem); if (!ret) f2fs_i_size_write(inode, new_size); @@ -1651,13 +1651,13 @@ static int expand_inode_data(struct inode *inode, loff_t offset, next_alloc: if (has_not_enough_free_secs(sbi, 0, GET_SEC_FROM_SEG(sbi, overprovision_segments(sbi)))) { - down_write(&sbi->gc_lock); + f2fs_down_write(&sbi->gc_lock); err = f2fs_gc(sbi, true, false, false, NULL_SEGNO); if (err && err != -ENODATA && err != -EAGAIN) goto out_err; } - down_write(&sbi->pin_sem); + f2fs_down_write(&sbi->pin_sem); f2fs_lock_op(sbi); f2fs_allocate_new_section(sbi, CURSEG_COLD_DATA_PINNED, false); @@ -1666,7 +1666,7 @@ next_alloc: map.m_seg_type = CURSEG_COLD_DATA_PINNED; err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO); - up_write(&sbi->pin_sem); + f2fs_up_write(&sbi->pin_sem); expanded += map.m_len; sec_len -= map.m_len; @@ -2050,7 +2050,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp) if (ret) goto out; - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); /* * Should wait end_io to count F2FS_WB_CP_DATA correctly by @@ -2061,7 +2061,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp) inode->i_ino, get_dirty_pages(inode)); ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); if (ret) { - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); goto out; } @@ -2074,7 +2074,7 @@ static int f2fs_ioc_start_atomic_write(struct file *filp) /* add inode in inmem_list first and set atomic_file */ set_inode_flag(inode, FI_ATOMIC_FILE); clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST); - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); F2FS_I(inode)->inmem_task = current; @@ -2381,7 +2381,7 @@ static int f2fs_ioc_get_encryption_pwsalt(struct file *filp, unsigned long arg) if (err) return err; - down_write(&sbi->sb_lock); + f2fs_down_write(&sbi->sb_lock); if (uuid_is_nonzero(sbi->raw_super->encrypt_pw_salt)) goto got_it; @@ -2400,7 +2400,7 @@ got_it: 16)) err = -EFAULT; out_err: - up_write(&sbi->sb_lock); + f2fs_up_write(&sbi->sb_lock); mnt_drop_write_file(filp); return err; } @@ -2477,12 +2477,12 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg) return ret; if (!sync) { - if (!down_write_trylock(&sbi->gc_lock)) { + if (!f2fs_down_write_trylock(&sbi->gc_lock)) { ret = -EBUSY; goto out; } } else { - down_write(&sbi->gc_lock); + f2fs_down_write(&sbi->gc_lock); } ret = f2fs_gc(sbi, sync, true, false, NULL_SEGNO); @@ -2513,12 +2513,12 @@ static int __f2fs_ioc_gc_range(struct file *filp, struct f2fs_gc_range *range) do_more: if (!range->sync) { - if (!down_write_trylock(&sbi->gc_lock)) { + if (!f2fs_down_write_trylock(&sbi->gc_lock)) { ret = -EBUSY; goto out; } } else { - down_write(&sbi->gc_lock); + f2fs_down_write(&sbi->gc_lock); } ret = f2fs_gc(sbi, range->sync, true, false, @@ -2850,10 +2850,10 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, f2fs_balance_fs(sbi, true); - down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); + f2fs_down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); if (src != dst) { ret = -EBUSY; - if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE])) + if (!f2fs_down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE])) goto out_src; } @@ -2871,9 +2871,9 @@ static int f2fs_move_file_range(struct file *file_in, loff_t pos_in, f2fs_unlock_op(sbi); if (src != dst) - up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]); + f2fs_up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]); out_src: - up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); + f2fs_up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]); out_unlock: if (src != dst) inode_unlock(dst); @@ -2968,7 +2968,7 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg) end_segno = min(start_segno + range.segments, dev_end_segno); while (start_segno < end_segno) { - if (!down_write_trylock(&sbi->gc_lock)) { + if (!f2fs_down_write_trylock(&sbi->gc_lock)) { ret = -EBUSY; goto out; } @@ -3314,9 +3314,9 @@ int f2fs_precache_extents(struct inode *inode) while (map.m_lblk < end) { map.m_len = end - map.m_lblk; - down_write(&fi->i_gc_rwsem[WRITE]); + f2fs_down_write(&fi->i_gc_rwsem[WRITE]); err = f2fs_map_blocks(inode, &map, 0, F2FS_GET_BLOCK_PRECACHE); - up_write(&fi->i_gc_rwsem[WRITE]); + f2fs_up_write(&fi->i_gc_rwsem[WRITE]); if (err) return err; @@ -3393,11 +3393,11 @@ static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg) if (!vbuf) return -ENOMEM; - down_read(&sbi->sb_lock); + f2fs_down_read(&sbi->sb_lock); count = utf16s_to_utf8s(sbi->raw_super->volume_name, ARRAY_SIZE(sbi->raw_super->volume_name), UTF16_LITTLE_ENDIAN, vbuf, MAX_VOLUME_NAME); - up_read(&sbi->sb_lock); + f2fs_up_read(&sbi->sb_lock); if (copy_to_user((char __user *)arg, vbuf, min(FSLABEL_MAX, count))) @@ -3425,7 +3425,7 @@ static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg) if (err) goto out; - down_write(&sbi->sb_lock); + f2fs_down_write(&sbi->sb_lock); memset(sbi->raw_super->volume_name, 0, sizeof(sbi->raw_super->volume_name)); @@ -3435,7 +3435,7 @@ static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg) err = f2fs_commit_super(sbi, false); - up_write(&sbi->sb_lock); + f2fs_up_write(&sbi->sb_lock); mnt_drop_write_file(filp); out: @@ -3561,8 +3561,8 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg) if (!atomic_read(&F2FS_I(inode)->i_compr_blocks)) goto out; - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - down_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_down_write(&F2FS_I(inode)->i_mmap_sem); last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); @@ -3597,8 +3597,8 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg) released_blocks += ret; } - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - up_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_up_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); out: inode_unlock(inode); @@ -3714,8 +3714,8 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg) goto unlock_inode; } - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - down_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_down_write(&F2FS_I(inode)->i_mmap_sem); last_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE); @@ -3750,8 +3750,8 @@ static int f2fs_reserve_compress_blocks(struct file *filp, unsigned long arg) reserved_blocks += ret; } - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - up_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_up_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); if (ret >= 0) { clear_inode_flag(inode, FI_COMPRESS_RELEASED); @@ -3869,8 +3869,8 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg) if (ret) goto err; - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - down_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_down_write(&F2FS_I(inode)->i_mmap_sem); ret = filemap_write_and_wait_range(mapping, range.start, to_end ? LLONG_MAX : end_addr - 1); @@ -3957,8 +3957,8 @@ static int f2fs_sec_trim_file(struct file *filp, unsigned long arg) ret = f2fs_secure_erase(prev_bdev, inode, prev_index, prev_block, len, range.flags); out: - up_write(&F2FS_I(inode)->i_mmap_sem); - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_up_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); err: inode_unlock(inode); file_end_write(filp); @@ -4442,11 +4442,11 @@ write: /* if we couldn't write data, we should deallocate blocks. */ if (preallocated && i_size_read(inode) < target_size) { - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); - down_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_down_write(&F2FS_I(inode)->i_mmap_sem); f2fs_truncate(inode); - up_write(&F2FS_I(inode)->i_mmap_sem); - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_up_write(&F2FS_I(inode)->i_mmap_sem); + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); } if (ret > 0) diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 49e6edcae970..f528d11ab526 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -91,21 +91,21 @@ static int gc_thread_func(void *data) */ if (sbi->gc_mode == GC_URGENT_HIGH) { wait_ms = gc_th->urgent_sleep_time; - down_write(&sbi->gc_lock); + f2fs_down_write(&sbi->gc_lock); goto do_gc; } if (foreground) { - down_write(&sbi->gc_lock); + f2fs_down_write(&sbi->gc_lock); goto do_gc; - } else if (!down_write_trylock(&sbi->gc_lock)) { + } else if (!f2fs_down_write_trylock(&sbi->gc_lock)) { stat_other_skip_bggc_count(sbi); goto next; } if (!is_idle(sbi, GC_TIME)) { increase_sleep_time(gc_th, &wait_ms); - up_write(&sbi->gc_lock); + f2fs_up_write(&sbi->gc_lock); stat_io_skip_bggc_count(sbi); goto next; } @@ -941,7 +941,7 @@ next_step: continue; } - if (f2fs_get_node_info(sbi, nid, &ni)) { + if (f2fs_get_node_info(sbi, nid, &ni, false)) { f2fs_put_page(node_page, 1); continue; } @@ -1009,7 +1009,7 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, if (IS_ERR(node_page)) return false; - if (f2fs_get_node_info(sbi, nid, dni)) { + if (f2fs_get_node_info(sbi, nid, dni, false)) { f2fs_put_page(node_page, 1); return false; } @@ -1203,7 +1203,7 @@ static int move_data_block(struct inode *inode, block_t bidx, f2fs_wait_on_block_writeback(inode, dn.data_blkaddr); - err = f2fs_get_node_info(fio.sbi, dn.nid, &ni); + err = f2fs_get_node_info(fio.sbi, dn.nid, &ni, false); if (err) goto put_out; @@ -1212,7 +1212,7 @@ static int move_data_block(struct inode *inode, block_t bidx, fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr; if (lfs_mode) - down_write(&fio.sbi->io_order_lock); + f2fs_down_write(&fio.sbi->io_order_lock); mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi), fio.old_blkaddr, false); @@ -1298,7 +1298,7 @@ recover_block: true, true, true); up_out: if (lfs_mode) - up_write(&fio.sbi->io_order_lock); + f2fs_up_write(&fio.sbi->io_order_lock); put_out: f2fs_put_dnode(&dn); out: @@ -1457,7 +1457,7 @@ next_step: if (IS_ERR(inode) || is_bad_inode(inode)) continue; - if (!down_write_trylock( + if (!f2fs_down_write_trylock( &F2FS_I(inode)->i_gc_rwsem[WRITE])) { iput(inode); sbi->skipped_gc_rwsem++; @@ -1470,7 +1470,7 @@ next_step: if (f2fs_post_read_required(inode)) { int err = ra_data_block(inode, start_bidx); - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); if (err) { iput(inode); continue; @@ -1481,7 +1481,7 @@ next_step: data_page = f2fs_get_read_data_page(inode, start_bidx, REQ_RAHEAD, true); - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); if (IS_ERR(data_page)) { iput(inode); continue; @@ -1500,14 +1500,14 @@ next_step: int err; if (S_ISREG(inode->i_mode)) { - if (!down_write_trylock(&fi->i_gc_rwsem[READ])) { + if (!f2fs_down_write_trylock(&fi->i_gc_rwsem[READ])) { sbi->skipped_gc_rwsem++; continue; } - if (!down_write_trylock( + if (!f2fs_down_write_trylock( &fi->i_gc_rwsem[WRITE])) { sbi->skipped_gc_rwsem++; - up_write(&fi->i_gc_rwsem[READ]); + f2fs_up_write(&fi->i_gc_rwsem[READ]); continue; } locked = true; @@ -1530,8 +1530,8 @@ next_step: submitted++; if (locked) { - up_write(&fi->i_gc_rwsem[WRITE]); - up_write(&fi->i_gc_rwsem[READ]); + f2fs_up_write(&fi->i_gc_rwsem[WRITE]); + f2fs_up_write(&fi->i_gc_rwsem[READ]); } stat_inc_data_blk_count(sbi, 1, gc_type); @@ -1789,7 +1789,7 @@ stop: reserved_segments(sbi), prefree_segments(sbi)); - up_write(&sbi->gc_lock); + f2fs_up_write(&sbi->gc_lock); put_gc_inode(&gc_list); @@ -1918,7 +1918,7 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs) long long block_count; int segs = secs * sbi->segs_per_sec; - down_write(&sbi->sb_lock); + f2fs_down_write(&sbi->sb_lock); section_count = le32_to_cpu(raw_sb->section_count); segment_count = le32_to_cpu(raw_sb->segment_count); @@ -1939,7 +1939,7 @@ static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs) cpu_to_le32(dev_segs + segs); } - up_write(&sbi->sb_lock); + f2fs_up_write(&sbi->sb_lock); } static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs) @@ -2013,7 +2013,7 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi)); /* stop other GC */ - if (!down_write_trylock(&sbi->gc_lock)) + if (!f2fs_down_write_trylock(&sbi->gc_lock)) return -EAGAIN; /* stop CP to protect MAIN_SEC in free_segment_range */ @@ -2033,15 +2033,15 @@ int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count) out_unlock: f2fs_unlock_op(sbi); - up_write(&sbi->gc_lock); + f2fs_up_write(&sbi->gc_lock); if (err) return err; set_sbi_flag(sbi, SBI_IS_RESIZEFS); freeze_super(sbi->sb); - down_write(&sbi->gc_lock); - down_write(&sbi->cp_global_sem); + f2fs_down_write(&sbi->gc_lock); + f2fs_down_write(&sbi->cp_global_sem); spin_lock(&sbi->stat_lock); if (shrunk_blocks + valid_user_blocks(sbi) + @@ -2086,8 +2086,8 @@ recover_out: spin_unlock(&sbi->stat_lock); } out_err: - up_write(&sbi->cp_global_sem); - up_write(&sbi->gc_lock); + f2fs_up_write(&sbi->cp_global_sem); + f2fs_up_write(&sbi->gc_lock); thaw_super(sbi->sb); clear_sbi_flag(sbi, SBI_IS_RESIZEFS); return err; diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index 2311c76ffc37..bb50338a38c9 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -149,7 +149,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) if (err) return err; - err = f2fs_get_node_info(fio.sbi, dn->nid, &ni); + err = f2fs_get_node_info(fio.sbi, dn->nid, &ni, false); if (err) { f2fs_truncate_data_blocks_range(dn, 1); f2fs_put_dnode(dn); @@ -647,7 +647,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, } if (inode) { - down_write(&F2FS_I(inode)->i_sem); + f2fs_down_write(&F2FS_I(inode)->i_sem); page = f2fs_init_inode_metadata(inode, dir, fname, ipage); if (IS_ERR(page)) { err = PTR_ERR(page); @@ -676,7 +676,7 @@ int f2fs_add_inline_entry(struct inode *dir, const struct f2fs_filename *fname, f2fs_update_parent_metadata(dir, inode, 0); fail: if (inode) - up_write(&F2FS_I(inode)->i_sem); + f2fs_up_write(&F2FS_I(inode)->i_sem); out: f2fs_put_page(ipage, 1); return err; @@ -804,7 +804,7 @@ int f2fs_inline_data_fiemap(struct inode *inode, ilen = start + len; ilen -= start; - err = f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni); + err = f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni, false); if (err) goto out; diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 1213f15ffd68..dbc4721ef121 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -868,7 +868,7 @@ void f2fs_handle_failed_inode(struct inode *inode) * so we can prevent losing this orphan when encoutering checkpoint * and following suddenly power-off. */ - err = f2fs_get_node_info(sbi, inode->i_ino, &ni); + err = f2fs_get_node_info(sbi, inode->i_ino, &ni, false); if (err) { set_sbi_flag(sbi, SBI_NEED_FSCK); f2fs_warn(sbi, "May loss orphan inode, run fsck to fix."); diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 8e7aa2b2973e..3048074618ff 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -196,7 +196,7 @@ static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode * __u8 (*extlist)[F2FS_EXTENSION_LEN] = sbi->raw_super->extension_list; int i, cold_count, hot_count; - down_read(&sbi->sb_lock); + f2fs_down_read(&sbi->sb_lock); cold_count = le32_to_cpu(sbi->raw_super->extension_count); hot_count = sbi->raw_super->hot_ext_count; @@ -206,7 +206,7 @@ static inline void set_file_temperature(struct f2fs_sb_info *sbi, struct inode * break; } - up_read(&sbi->sb_lock); + f2fs_up_read(&sbi->sb_lock); if (i == cold_count + hot_count) return; @@ -297,19 +297,19 @@ static void set_compress_inode(struct f2fs_sb_info *sbi, struct inode *inode, !f2fs_may_compress(inode)) return; - down_read(&sbi->sb_lock); + f2fs_down_read(&sbi->sb_lock); cold_count = le32_to_cpu(sbi->raw_super->extension_count); hot_count = sbi->raw_super->hot_ext_count; for (i = cold_count; i < cold_count + hot_count; i++) { if (is_extension_exist(name, extlist[i], false)) { - up_read(&sbi->sb_lock); + f2fs_up_read(&sbi->sb_lock); return; } } - up_read(&sbi->sb_lock); + f2fs_up_read(&sbi->sb_lock); ext = F2FS_OPTION(sbi).extensions; @@ -1012,11 +1012,11 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, new_page = NULL; new_inode->i_ctime = current_time(new_inode); - down_write(&F2FS_I(new_inode)->i_sem); + f2fs_down_write(&F2FS_I(new_inode)->i_sem); if (old_dir_entry) f2fs_i_links_write(new_inode, false); f2fs_i_links_write(new_inode, false); - up_write(&F2FS_I(new_inode)->i_sem); + f2fs_up_write(&F2FS_I(new_inode)->i_sem); if (!new_inode->i_nlink) f2fs_add_orphan_inode(new_inode); @@ -1037,13 +1037,13 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry, f2fs_i_links_write(new_dir, true); } - down_write(&F2FS_I(old_inode)->i_sem); + f2fs_down_write(&F2FS_I(old_inode)->i_sem); if (!old_dir_entry || whiteout) file_lost_pino(old_inode); else /* adjust dir's i_pino to pass fsck check */ f2fs_i_pino_write(old_inode, new_dir->i_ino); - up_write(&F2FS_I(old_inode)->i_sem); + f2fs_up_write(&F2FS_I(old_inode)->i_sem); old_inode->i_ctime = current_time(old_inode); f2fs_mark_inode_dirty_sync(old_inode, false); @@ -1203,38 +1203,38 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry, /* update directory entry info of old dir inode */ f2fs_set_link(old_dir, old_entry, old_page, new_inode); - down_write(&F2FS_I(old_inode)->i_sem); + f2fs_down_write(&F2FS_I(old_inode)->i_sem); if (!old_dir_entry) file_lost_pino(old_inode); else /* adjust dir's i_pino to pass fsck check */ f2fs_i_pino_write(old_inode, new_dir->i_ino); - up_write(&F2FS_I(old_inode)->i_sem); + f2fs_up_write(&F2FS_I(old_inode)->i_sem); old_dir->i_ctime = current_time(old_dir); if (old_nlink) { - down_write(&F2FS_I(old_dir)->i_sem); + f2fs_down_write(&F2FS_I(old_dir)->i_sem); f2fs_i_links_write(old_dir, old_nlink > 0); - up_write(&F2FS_I(old_dir)->i_sem); + f2fs_up_write(&F2FS_I(old_dir)->i_sem); } f2fs_mark_inode_dirty_sync(old_dir, false); /* update directory entry info of new dir inode */ f2fs_set_link(new_dir, new_entry, new_page, old_inode); - down_write(&F2FS_I(new_inode)->i_sem); + f2fs_down_write(&F2FS_I(new_inode)->i_sem); if (!new_dir_entry) file_lost_pino(new_inode); else /* adjust dir's i_pino to pass fsck check */ f2fs_i_pino_write(new_inode, old_dir->i_ino); - up_write(&F2FS_I(new_inode)->i_sem); + f2fs_up_write(&F2FS_I(new_inode)->i_sem); new_dir->i_ctime = current_time(new_dir); if (new_nlink) { - down_write(&F2FS_I(new_dir)->i_sem); + f2fs_down_write(&F2FS_I(new_dir)->i_sem); f2fs_i_links_write(new_dir, new_nlink > 0); - up_write(&F2FS_I(new_dir)->i_sem); + f2fs_up_write(&F2FS_I(new_dir)->i_sem); } f2fs_mark_inode_dirty_sync(new_dir, false); diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index eed7528a6560..7186055a8867 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -380,14 +380,14 @@ int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid) struct nat_entry *e; bool need = false; - down_read(&nm_i->nat_tree_lock); + f2fs_down_read(&nm_i->nat_tree_lock); e = __lookup_nat_cache(nm_i, nid); if (e) { if (!get_nat_flag(e, IS_CHECKPOINTED) && !get_nat_flag(e, HAS_FSYNCED_INODE)) need = true; } - up_read(&nm_i->nat_tree_lock); + f2fs_up_read(&nm_i->nat_tree_lock); return need; } @@ -397,11 +397,11 @@ bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) struct nat_entry *e; bool is_cp = true; - down_read(&nm_i->nat_tree_lock); + f2fs_down_read(&nm_i->nat_tree_lock); e = __lookup_nat_cache(nm_i, nid); if (e && !get_nat_flag(e, IS_CHECKPOINTED)) is_cp = false; - up_read(&nm_i->nat_tree_lock); + f2fs_up_read(&nm_i->nat_tree_lock); return is_cp; } @@ -411,13 +411,13 @@ bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) struct nat_entry *e; bool need_update = true; - down_read(&nm_i->nat_tree_lock); + f2fs_down_read(&nm_i->nat_tree_lock); e = __lookup_nat_cache(nm_i, ino); if (e && get_nat_flag(e, HAS_LAST_FSYNC) && (get_nat_flag(e, IS_CHECKPOINTED) || get_nat_flag(e, HAS_FSYNCED_INODE))) need_update = false; - up_read(&nm_i->nat_tree_lock); + f2fs_up_read(&nm_i->nat_tree_lock); return need_update; } @@ -428,11 +428,15 @@ static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, struct f2fs_nm_info *nm_i = NM_I(sbi); struct nat_entry *new, *e; + /* Let's mitigate lock contention of nat_tree_lock during checkpoint */ + if (f2fs_rwsem_is_locked(&sbi->cp_global_sem)) + return; + new = __alloc_nat_entry(nid, false); if (!new) return; - down_write(&nm_i->nat_tree_lock); + f2fs_down_write(&nm_i->nat_tree_lock); e = __lookup_nat_cache(nm_i, nid); if (!e) e = __init_nat_entry(nm_i, new, ne, false); @@ -441,7 +445,7 @@ static void cache_nat_entry(struct f2fs_sb_info *sbi, nid_t nid, nat_get_blkaddr(e) != le32_to_cpu(ne->block_addr) || nat_get_version(e) != ne->version); - up_write(&nm_i->nat_tree_lock); + f2fs_up_write(&nm_i->nat_tree_lock); if (e != new) __free_nat_entry(new); } @@ -453,7 +457,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, struct nat_entry *e; struct nat_entry *new = __alloc_nat_entry(ni->nid, true); - down_write(&nm_i->nat_tree_lock); + f2fs_down_write(&nm_i->nat_tree_lock); e = __lookup_nat_cache(nm_i, ni->nid); if (!e) { e = __init_nat_entry(nm_i, new, NULL, true); @@ -502,7 +506,7 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, set_nat_flag(e, HAS_FSYNCED_INODE, true); set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); } - up_write(&nm_i->nat_tree_lock); + f2fs_up_write(&nm_i->nat_tree_lock); } int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) @@ -510,7 +514,7 @@ int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) struct f2fs_nm_info *nm_i = NM_I(sbi); int nr = nr_shrink; - if (!down_write_trylock(&nm_i->nat_tree_lock)) + if (!f2fs_down_write_trylock(&nm_i->nat_tree_lock)) return 0; spin_lock(&nm_i->nat_list_lock); @@ -532,12 +536,12 @@ int f2fs_try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) } spin_unlock(&nm_i->nat_list_lock); - up_write(&nm_i->nat_tree_lock); + f2fs_up_write(&nm_i->nat_tree_lock); return nr - nr_shrink; } int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, - struct node_info *ni) + struct node_info *ni, bool checkpoint_context) { struct f2fs_nm_info *nm_i = NM_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); @@ -554,13 +558,13 @@ int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid, ni->nid = nid; retry: /* Check nat cache */ - down_read(&nm_i->nat_tree_lock); + f2fs_down_read(&nm_i->nat_tree_lock); e = __lookup_nat_cache(nm_i, nid); if (e) { ni->ino = nat_get_ino(e); ni->blk_addr = nat_get_blkaddr(e); ni->version = nat_get_version(e); - up_read(&nm_i->nat_tree_lock); + f2fs_up_read(&nm_i->nat_tree_lock); return 0; } @@ -570,10 +574,11 @@ retry: * nat_tree_lock. Therefore, we should retry, if we failed to grab here * while not bothering checkpoint. */ - if (!rwsem_is_locked(&sbi->cp_global_sem)) { + if (!f2fs_rwsem_is_locked(&sbi->cp_global_sem) || checkpoint_context) { down_read(&curseg->journal_rwsem); - } else if (!down_read_trylock(&curseg->journal_rwsem)) { - up_read(&nm_i->nat_tree_lock); + } else if (f2fs_rwsem_is_contended(&nm_i->nat_tree_lock) || + !down_read_trylock(&curseg->journal_rwsem)) { + f2fs_up_read(&nm_i->nat_tree_lock); goto retry; } @@ -582,15 +587,15 @@ retry: ne = nat_in_journal(journal, i); node_info_from_raw_nat(ni, &ne); } - up_read(&curseg->journal_rwsem); + up_read(&curseg->journal_rwsem); if (i >= 0) { - up_read(&nm_i->nat_tree_lock); + f2fs_up_read(&nm_i->nat_tree_lock); goto cache; } /* Fill node_info from nat page */ index = current_nat_addr(sbi, nid); - up_read(&nm_i->nat_tree_lock); + f2fs_up_read(&nm_i->nat_tree_lock); page = f2fs_get_meta_page(sbi, index); if (IS_ERR(page)) @@ -865,7 +870,7 @@ static int truncate_node(struct dnode_of_data *dn) int err; pgoff_t index; - err = f2fs_get_node_info(sbi, dn->nid, &ni); + err = f2fs_get_node_info(sbi, dn->nid, &ni, false); if (err) return err; @@ -1264,7 +1269,7 @@ struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs) goto fail; #ifdef CONFIG_F2FS_CHECK_FS - err = f2fs_get_node_info(sbi, dn->nid, &new_ni); + err = f2fs_get_node_info(sbi, dn->nid, &new_ni, false); if (err) { dec_valid_node_count(sbi, dn->inode, !ofs); goto fail; @@ -1326,7 +1331,7 @@ static int read_node_page(struct page *page, int op_flags) return LOCKED_PAGE; } - err = f2fs_get_node_info(sbi, page->index, &ni); + err = f2fs_get_node_info(sbi, page->index, &ni, false); if (err) return err; @@ -1580,21 +1585,21 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted, nid = nid_of_node(page); f2fs_bug_on(sbi, page->index != nid); - if (f2fs_get_node_info(sbi, nid, &ni)) + if (f2fs_get_node_info(sbi, nid, &ni, !do_balance)) goto redirty_out; if (wbc->for_reclaim) { - if (!down_read_trylock(&sbi->node_write)) + if (!f2fs_down_read_trylock(&sbi->node_write)) goto redirty_out; } else { - down_read(&sbi->node_write); + f2fs_down_read(&sbi->node_write); } /* This page is already truncated */ if (unlikely(ni.blk_addr == NULL_ADDR)) { ClearPageUptodate(page); dec_page_count(sbi, F2FS_DIRTY_NODES); - up_read(&sbi->node_write); + f2fs_up_read(&sbi->node_write); unlock_page(page); return 0; } @@ -1602,7 +1607,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted, if (__is_valid_data_blkaddr(ni.blk_addr) && !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC_ENHANCE)) { - up_read(&sbi->node_write); + f2fs_up_read(&sbi->node_write); goto redirty_out; } @@ -1623,7 +1628,7 @@ static int __write_node_page(struct page *page, bool atomic, bool *submitted, f2fs_do_write_node_page(nid, &fio); set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page)); dec_page_count(sbi, F2FS_DIRTY_NODES); - up_read(&sbi->node_write); + f2fs_up_read(&sbi->node_write); if (wbc->for_reclaim) { f2fs_submit_merged_write_cond(sbi, NULL, page, 0, NODE); @@ -2372,7 +2377,7 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi) unsigned int i, idx; nid_t nid; - down_read(&nm_i->nat_tree_lock); + f2fs_down_read(&nm_i->nat_tree_lock); for (i = 0; i < nm_i->nat_blocks; i++) { if (!test_bit_le(i, nm_i->nat_block_bitmap)) @@ -2395,7 +2400,7 @@ static void scan_free_nid_bits(struct f2fs_sb_info *sbi) out: scan_curseg_cache(sbi); - up_read(&nm_i->nat_tree_lock); + f2fs_up_read(&nm_i->nat_tree_lock); } static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, @@ -2430,7 +2435,7 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nid), FREE_NID_PAGES, META_NAT, true); - down_read(&nm_i->nat_tree_lock); + f2fs_down_read(&nm_i->nat_tree_lock); while (1) { if (!test_bit_le(NAT_BLOCK_OFFSET(nid), @@ -2445,7 +2450,7 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, } if (ret) { - up_read(&nm_i->nat_tree_lock); + f2fs_up_read(&nm_i->nat_tree_lock); f2fs_err(sbi, "NAT is corrupt, run fsck to fix it"); return ret; } @@ -2465,7 +2470,7 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, /* find free nids from current sum_pages */ scan_curseg_cache(sbi); - up_read(&nm_i->nat_tree_lock); + f2fs_up_read(&nm_i->nat_tree_lock); f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid), nm_i->ra_nid_pages, META_NAT, false); @@ -2663,7 +2668,7 @@ int f2fs_recover_xattr_data(struct inode *inode, struct page *page) goto recover_xnid; /* 1: invalidate the previous xattr nid */ - err = f2fs_get_node_info(sbi, prev_xnid, &ni); + err = f2fs_get_node_info(sbi, prev_xnid, &ni, false); if (err) return err; @@ -2703,7 +2708,7 @@ int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) struct page *ipage; int err; - err = f2fs_get_node_info(sbi, ino, &old_ni); + err = f2fs_get_node_info(sbi, ino, &old_ni, false); if (err) return err; @@ -2993,15 +2998,15 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) * nat_cnt[DIRTY_NAT]. */ if (enabled_nat_bits(sbi, cpc)) { - down_write(&nm_i->nat_tree_lock); + f2fs_down_write(&nm_i->nat_tree_lock); remove_nats_in_journal(sbi); - up_write(&nm_i->nat_tree_lock); + f2fs_up_write(&nm_i->nat_tree_lock); } if (!nm_i->nat_cnt[DIRTY_NAT]) return 0; - down_write(&nm_i->nat_tree_lock); + f2fs_down_write(&nm_i->nat_tree_lock); /* * if there are no enough space in journal to store dirty nat @@ -3030,7 +3035,7 @@ int f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc) break; } - up_write(&nm_i->nat_tree_lock); + f2fs_up_write(&nm_i->nat_tree_lock); /* Allow dirty nats by node block allocation in write_begin */ return err; @@ -3148,7 +3153,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi) mutex_init(&nm_i->build_lock); spin_lock_init(&nm_i->nid_list_lock); - init_rwsem(&nm_i->nat_tree_lock); + init_f2fs_rwsem(&nm_i->nat_tree_lock); nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); @@ -3254,7 +3259,7 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi) spin_unlock(&nm_i->nid_list_lock); /* destroy nat cache */ - down_write(&nm_i->nat_tree_lock); + f2fs_down_write(&nm_i->nat_tree_lock); while ((found = __gang_lookup_nat_cache(nm_i, nid, NATVEC_SIZE, natvec))) { unsigned idx; @@ -3284,7 +3289,7 @@ void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi) kmem_cache_free(nat_entry_set_slab, setvec[idx]); } } - up_write(&nm_i->nat_tree_lock); + f2fs_up_write(&nm_i->nat_tree_lock); kvfree(nm_i->nat_block_bitmap); if (nm_i->free_nid_bitmap) { diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 695eacfe776c..564cec003beb 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -594,7 +594,7 @@ retry_dn: f2fs_wait_on_page_writeback(dn.node_page, NODE, true, true); - err = f2fs_get_node_info(sbi, dn.nid, &ni); + err = f2fs_get_node_info(sbi, dn.nid, &ni, false); if (err) goto err; @@ -797,7 +797,7 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only) INIT_LIST_HEAD(&dir_list); /* prevent checkpoint */ - down_write(&sbi->cp_global_sem); + f2fs_down_write(&sbi->cp_global_sem); /* step #1: find fsynced inode numbers */ err = find_fsync_dnodes(sbi, &inode_list, check_only); @@ -848,7 +848,7 @@ skip: if (!err) clear_sbi_flag(sbi, SBI_POR_DOING); - up_write(&sbi->cp_global_sem); + f2fs_up_write(&sbi->cp_global_sem); /* let's drop all the directory inodes for clean checkpoint */ destroy_fsync_dnodes(&dir_list, err); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 95bfcff5ce8c..f739bae3c443 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -251,7 +251,7 @@ retry: goto next; } - err = f2fs_get_node_info(sbi, dn.nid, &ni); + err = f2fs_get_node_info(sbi, dn.nid, &ni, false); if (err) { f2fs_put_dnode(&dn); return err; @@ -471,7 +471,7 @@ int f2fs_commit_inmem_pages(struct inode *inode) f2fs_balance_fs(sbi, true); - down_write(&fi->i_gc_rwsem[WRITE]); + f2fs_down_write(&fi->i_gc_rwsem[WRITE]); f2fs_lock_op(sbi); set_inode_flag(inode, FI_ATOMIC_COMMIT); @@ -483,7 +483,7 @@ int f2fs_commit_inmem_pages(struct inode *inode) clear_inode_flag(inode, FI_ATOMIC_COMMIT); f2fs_unlock_op(sbi); - up_write(&fi->i_gc_rwsem[WRITE]); + f2fs_up_write(&fi->i_gc_rwsem[WRITE]); return err; } @@ -521,7 +521,7 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need) io_schedule(); finish_wait(&sbi->gc_thread->fggc_wq, &wait); } else { - down_write(&sbi->gc_lock); + f2fs_down_write(&sbi->gc_lock); f2fs_gc(sbi, false, false, false, NULL_SEGNO); } } @@ -551,7 +551,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg) /* there is background inflight IO or foreground operation recently */ if (is_inflight_io(sbi, REQ_TIME) || - (!f2fs_time_over(sbi, REQ_TIME) && rwsem_is_locked(&sbi->cp_rwsem))) + (!f2fs_time_over(sbi, REQ_TIME) && f2fs_rwsem_is_locked(&sbi->cp_rwsem))) return; /* exceed periodical checkpoint timeout threshold */ @@ -2746,7 +2746,7 @@ static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi) if (!sbi->am.atgc_enabled) return; - down_read(&SM_I(sbi)->curseg_lock); + f2fs_down_read(&SM_I(sbi)->curseg_lock); mutex_lock(&curseg->curseg_mutex); down_write(&SIT_I(sbi)->sentry_lock); @@ -2756,7 +2756,7 @@ static void __f2fs_init_atgc_curseg(struct f2fs_sb_info *sbi) up_write(&SIT_I(sbi)->sentry_lock); mutex_unlock(&curseg->curseg_mutex); - up_read(&SM_I(sbi)->curseg_lock); + f2fs_up_read(&SM_I(sbi)->curseg_lock); } void f2fs_init_inmem_curseg(struct f2fs_sb_info *sbi) @@ -2907,7 +2907,7 @@ void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, struct curseg_info *curseg = CURSEG_I(sbi, type); unsigned int segno; - down_read(&SM_I(sbi)->curseg_lock); + f2fs_down_read(&SM_I(sbi)->curseg_lock); mutex_lock(&curseg->curseg_mutex); down_write(&SIT_I(sbi)->sentry_lock); @@ -2931,7 +2931,7 @@ unlock: type, segno, curseg->segno); mutex_unlock(&curseg->curseg_mutex); - up_read(&SM_I(sbi)->curseg_lock); + f2fs_up_read(&SM_I(sbi)->curseg_lock); } static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type, @@ -2963,23 +2963,23 @@ static void __allocate_new_section(struct f2fs_sb_info *sbi, void f2fs_allocate_new_section(struct f2fs_sb_info *sbi, int type, bool force) { - down_read(&SM_I(sbi)->curseg_lock); + f2fs_down_read(&SM_I(sbi)->curseg_lock); down_write(&SIT_I(sbi)->sentry_lock); __allocate_new_section(sbi, type, force); up_write(&SIT_I(sbi)->sentry_lock); - up_read(&SM_I(sbi)->curseg_lock); + f2fs_up_read(&SM_I(sbi)->curseg_lock); } void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi) { int i; - down_read(&SM_I(sbi)->curseg_lock); + f2fs_down_read(&SM_I(sbi)->curseg_lock); down_write(&SIT_I(sbi)->sentry_lock); for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) __allocate_new_segment(sbi, i, false, false); up_write(&SIT_I(sbi)->sentry_lock); - up_read(&SM_I(sbi)->curseg_lock); + f2fs_up_read(&SM_I(sbi)->curseg_lock); } static const struct segment_allocation default_salloc_ops = { @@ -3117,9 +3117,9 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) if (sbi->discard_blks == 0) goto out; - down_write(&sbi->gc_lock); + f2fs_down_write(&sbi->gc_lock); err = f2fs_write_checkpoint(sbi, &cpc); - up_write(&sbi->gc_lock); + f2fs_up_write(&sbi->gc_lock); if (err) goto out; @@ -3356,7 +3356,7 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, bool from_gc = (type == CURSEG_ALL_DATA_ATGC); struct seg_entry *se = NULL; - down_read(&SM_I(sbi)->curseg_lock); + f2fs_down_read(&SM_I(sbi)->curseg_lock); mutex_lock(&curseg->curseg_mutex); down_write(&sit_i->sentry_lock); @@ -3439,7 +3439,7 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, mutex_unlock(&curseg->curseg_mutex); - up_read(&SM_I(sbi)->curseg_lock); + f2fs_up_read(&SM_I(sbi)->curseg_lock); } static void update_device_state(struct f2fs_io_info *fio) @@ -3469,7 +3469,7 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio) bool keep_order = (f2fs_lfs_mode(fio->sbi) && type == CURSEG_COLD_DATA); if (keep_order) - down_read(&fio->sbi->io_order_lock); + f2fs_down_read(&fio->sbi->io_order_lock); reallocate: f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr, &fio->new_blkaddr, sum, type, fio); @@ -3489,7 +3489,7 @@ reallocate: update_device_state(fio); if (keep_order) - up_read(&fio->sbi->io_order_lock); + f2fs_up_read(&fio->sbi->io_order_lock); } void f2fs_do_write_meta_page(struct f2fs_sb_info *sbi, struct page *page, @@ -3620,7 +3620,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, se = get_seg_entry(sbi, segno); type = se->type; - down_write(&SM_I(sbi)->curseg_lock); + f2fs_down_write(&SM_I(sbi)->curseg_lock); if (!recover_curseg) { /* for recovery flow */ @@ -3689,7 +3689,7 @@ void f2fs_do_replace_block(struct f2fs_sb_info *sbi, struct f2fs_summary *sum, up_write(&sit_i->sentry_lock); mutex_unlock(&curseg->curseg_mutex); - up_write(&SM_I(sbi)->curseg_lock); + f2fs_up_write(&SM_I(sbi)->curseg_lock); } void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, @@ -5165,7 +5165,7 @@ int f2fs_build_segment_manager(struct f2fs_sb_info *sbi) INIT_LIST_HEAD(&sm_info->sit_entry_set); - init_rwsem(&sm_info->curseg_lock); + init_f2fs_rwsem(&sm_info->curseg_lock); if (!f2fs_readonly(sbi->sb)) { err = f2fs_create_flush_cmd_control(sbi); diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index a8be1f1609b9..3f8c32a529d4 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -1248,17 +1248,17 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb) /* Initialize f2fs-specific inode info */ atomic_set(&fi->dirty_pages, 0); atomic_set(&fi->i_compr_blocks, 0); - init_rwsem(&fi->i_sem); + init_f2fs_rwsem(&fi->i_sem); spin_lock_init(&fi->i_size_lock); INIT_LIST_HEAD(&fi->dirty_list); INIT_LIST_HEAD(&fi->gdirty_list); INIT_LIST_HEAD(&fi->inmem_ilist); INIT_LIST_HEAD(&fi->inmem_pages); mutex_init(&fi->inmem_lock); - init_rwsem(&fi->i_gc_rwsem[READ]); - init_rwsem(&fi->i_gc_rwsem[WRITE]); - init_rwsem(&fi->i_mmap_sem); - init_rwsem(&fi->i_xattr_sem); + init_f2fs_rwsem(&fi->i_gc_rwsem[READ]); + init_f2fs_rwsem(&fi->i_gc_rwsem[WRITE]); + init_f2fs_rwsem(&fi->i_mmap_sem); + init_f2fs_rwsem(&fi->i_xattr_sem); /* Will be used by directory only */ fi->i_dir_level = F2FS_SB(sb)->dir_level; @@ -1963,7 +1963,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi) f2fs_update_time(sbi, DISABLE_TIME); while (!f2fs_time_over(sbi, DISABLE_TIME)) { - down_write(&sbi->gc_lock); + f2fs_down_write(&sbi->gc_lock); err = f2fs_gc(sbi, true, false, false, NULL_SEGNO); if (err == -ENODATA) { err = 0; @@ -1985,7 +1985,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi) goto restore_flag; } - down_write(&sbi->gc_lock); + f2fs_down_write(&sbi->gc_lock); cpc.reason = CP_PAUSE; set_sbi_flag(sbi, SBI_CP_DISABLED); err = f2fs_write_checkpoint(sbi, &cpc); @@ -1997,7 +1997,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi) spin_unlock(&sbi->stat_lock); out_unlock: - up_write(&sbi->gc_lock); + f2fs_up_write(&sbi->gc_lock); restore_flag: sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */ return err; @@ -2017,12 +2017,12 @@ static void f2fs_enable_checkpoint(struct f2fs_sb_info *sbi) if (unlikely(retry < 0)) f2fs_warn(sbi, "checkpoint=enable has some unwritten data."); - down_write(&sbi->gc_lock); + f2fs_down_write(&sbi->gc_lock); f2fs_dirty_to_prefree(sbi); clear_sbi_flag(sbi, SBI_CP_DISABLED); set_sbi_flag(sbi, SBI_IS_DIRTY); - up_write(&sbi->gc_lock); + f2fs_up_write(&sbi->gc_lock); f2fs_sync_fs(sbi->sb, 1); } @@ -2544,18 +2544,18 @@ int f2fs_quota_sync(struct super_block *sb, int type) /* * do_quotactl * f2fs_quota_sync - * down_read(quota_sem) + * f2fs_down_read(quota_sem) * dquot_writeback_dquots() * f2fs_dquot_commit * block_operation - * down_read(quota_sem) + * f2fs_down_read(quota_sem) */ f2fs_lock_op(sbi); - down_read(&sbi->quota_sem); + f2fs_down_read(&sbi->quota_sem); ret = f2fs_quota_sync_file(sbi, cnt); - up_read(&sbi->quota_sem); + f2fs_up_read(&sbi->quota_sem); f2fs_unlock_op(sbi); inode_unlock(dqopt->files[cnt]); @@ -2680,11 +2680,11 @@ static int f2fs_dquot_commit(struct dquot *dquot) struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); int ret; - down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING); + f2fs_down_read_nested(&sbi->quota_sem, SINGLE_DEPTH_NESTING); ret = dquot_commit(dquot); if (ret < 0) set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); - up_read(&sbi->quota_sem); + f2fs_up_read(&sbi->quota_sem); return ret; } @@ -2693,11 +2693,11 @@ static int f2fs_dquot_acquire(struct dquot *dquot) struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb); int ret; - down_read(&sbi->quota_sem); + f2fs_down_read(&sbi->quota_sem); ret = dquot_acquire(dquot); if (ret < 0) set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR); - up_read(&sbi->quota_sem); + f2fs_up_read(&sbi->quota_sem); return ret; } @@ -3430,14 +3430,14 @@ static void init_sb_info(struct f2fs_sb_info *sbi) INIT_LIST_HEAD(&sbi->s_list); mutex_init(&sbi->umount_mutex); - init_rwsem(&sbi->io_order_lock); + init_f2fs_rwsem(&sbi->io_order_lock); spin_lock_init(&sbi->cp_lock); sbi->dirty_device = 0; spin_lock_init(&sbi->dev_lock); - init_rwsem(&sbi->sb_lock); - init_rwsem(&sbi->pin_sem); + init_f2fs_rwsem(&sbi->sb_lock); + init_f2fs_rwsem(&sbi->pin_sem); } static int init_percpu_info(struct f2fs_sb_info *sbi) @@ -3881,11 +3881,11 @@ try_onemore: /* init f2fs-specific super block info */ sbi->valid_super_block = valid_super_block; - init_rwsem(&sbi->gc_lock); + init_f2fs_rwsem(&sbi->gc_lock); mutex_init(&sbi->writepages); - init_rwsem(&sbi->cp_global_sem); - init_rwsem(&sbi->node_write); - init_rwsem(&sbi->node_change); + init_f2fs_rwsem(&sbi->cp_global_sem); + init_f2fs_rwsem(&sbi->node_write); + init_f2fs_rwsem(&sbi->node_change); /* disallow all the data/node/meta page writes */ set_sbi_flag(sbi, SBI_POR_DOING); @@ -3911,18 +3911,18 @@ try_onemore: } for (j = HOT; j < n; j++) { - init_rwsem(&sbi->write_io[i][j].io_rwsem); + init_f2fs_rwsem(&sbi->write_io[i][j].io_rwsem); sbi->write_io[i][j].sbi = sbi; sbi->write_io[i][j].bio = NULL; spin_lock_init(&sbi->write_io[i][j].io_lock); INIT_LIST_HEAD(&sbi->write_io[i][j].io_list); INIT_LIST_HEAD(&sbi->write_io[i][j].bio_list); - init_rwsem(&sbi->write_io[i][j].bio_list_lock); + init_f2fs_rwsem(&sbi->write_io[i][j].bio_list_lock); } } - init_rwsem(&sbi->cp_rwsem); - init_rwsem(&sbi->quota_sem); + init_f2fs_rwsem(&sbi->cp_rwsem); + init_f2fs_rwsem(&sbi->quota_sem); init_waitqueue_head(&sbi->cp_wait); init_sb_info(sbi); diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index 2c9b6b26aa64..30f891efab4e 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -363,7 +363,7 @@ static ssize_t __sbi_store(struct f2fs_attr *a, if (strlen(name) >= F2FS_EXTENSION_LEN) return -EINVAL; - down_write(&sbi->sb_lock); + f2fs_down_write(&sbi->sb_lock); ret = f2fs_update_extension_list(sbi, name, hot, set); if (ret) @@ -373,7 +373,7 @@ static ssize_t __sbi_store(struct f2fs_attr *a, if (ret) f2fs_update_extension_list(sbi, name, hot, !set); out: - up_write(&sbi->sb_lock); + f2fs_up_write(&sbi->sb_lock); return ret ? ret : count; } diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c index 15ba36926fad..c700fb47e895 100644 --- a/fs/f2fs/verity.c +++ b/fs/f2fs/verity.c @@ -208,7 +208,7 @@ cleanup: * from re-instantiating cached pages we are truncating (since unlike * normal file accesses, garbage collection isn't limited by i_size). */ - down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); truncate_inode_pages(inode->i_mapping, inode->i_size); err2 = f2fs_truncate(inode); if (err2) { @@ -216,7 +216,7 @@ cleanup: err2); set_sbi_flag(sbi, SBI_NEED_FSCK); } - up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + f2fs_up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); clear_inode_flag(inode, FI_VERITY_IN_PROGRESS); return err ?: err2; } diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index f738993d3eb4..7be723118df0 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c @@ -529,10 +529,10 @@ int f2fs_getxattr(struct inode *inode, int index, const char *name, if (len > F2FS_NAME_LEN) return -ERANGE; - down_read(&F2FS_I(inode)->i_xattr_sem); + f2fs_down_read(&F2FS_I(inode)->i_xattr_sem); error = lookup_all_xattrs(inode, ipage, index, len, name, &entry, &base_addr, &base_size, &is_inline); - up_read(&F2FS_I(inode)->i_xattr_sem); + f2fs_up_read(&F2FS_I(inode)->i_xattr_sem); if (error) return error; @@ -566,9 +566,9 @@ ssize_t f2fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) int error; size_t rest = buffer_size; - down_read(&F2FS_I(inode)->i_xattr_sem); + f2fs_down_read(&F2FS_I(inode)->i_xattr_sem); error = read_all_xattrs(inode, NULL, &base_addr); - up_read(&F2FS_I(inode)->i_xattr_sem); + f2fs_up_read(&F2FS_I(inode)->i_xattr_sem); if (error) return error; @@ -790,9 +790,9 @@ int f2fs_setxattr(struct inode *inode, int index, const char *name, f2fs_balance_fs(sbi, true); f2fs_lock_op(sbi); - down_write(&F2FS_I(inode)->i_xattr_sem); + f2fs_down_write(&F2FS_I(inode)->i_xattr_sem); err = __f2fs_setxattr(inode, index, name, value, size, ipage, flags); - up_write(&F2FS_I(inode)->i_xattr_sem); + f2fs_up_write(&F2FS_I(inode)->i_xattr_sem); f2fs_unlock_op(sbi); f2fs_update_time(sbi, REQ_TIME); diff --git a/fs/incfs/data_mgmt.c b/fs/incfs/data_mgmt.c index fbab68a280d5..a383c5b5ad7f 100644 --- a/fs/incfs/data_mgmt.c +++ b/fs/incfs/data_mgmt.c @@ -175,6 +175,7 @@ void incfs_free_mount_info(struct mount_info *mi) kfree(mi->pseudo_file_xattr[i].data); kfree(mi->mi_per_uid_read_timeouts); incfs_free_sysfs_node(mi->mi_sysfs_node); + kfree(mi->mi_options.sysfs_name); kfree(mi); } diff --git a/fs/incfs/pseudo_files.c b/fs/incfs/pseudo_files.c index 1a664f1478d5..1b9bf000f598 100644 --- a/fs/incfs/pseudo_files.c +++ b/fs/incfs/pseudo_files.c @@ -147,8 +147,12 @@ static long ioctl_permit_fill(struct file *f, void __user *arg) return -EFAULT; file = fget(permit_fill.file_descriptor); - if (IS_ERR(file)) + if (IS_ERR_OR_NULL(file)) { + if (!file) + return -ENOENT; + return PTR_ERR(file); + } if (file->f_op != &incfs_file_ops) { error = -EPERM; diff --git a/fs/incfs/vfs.c b/fs/incfs/vfs.c index ea7866fbfd6e..35ac6e3daf8e 100644 --- a/fs/incfs/vfs.c +++ b/fs/incfs/vfs.c @@ -393,7 +393,7 @@ static int iterate_incfs_dir(struct file *file, struct dir_context *ctx) struct mount_info *mi = get_mount_info(file_superblock(file)); bool root; - if (!dir) { + if (!dir || !mi) { error = -EBADF; goto out; } @@ -1336,6 +1336,9 @@ static int dir_rename(struct inode *old_dir, struct dentry *old_dentry, struct dentry *trap; int error = 0; + if (!mi) + return -EBADF; + error = mutex_lock_interruptible(&mi->mi_dir_struct_mutex); if (error) return error; @@ -1664,6 +1667,9 @@ static ssize_t incfs_getxattr(struct dentry *d, const char *name, size_t stored_size; int i; + if (!mi) + return -EBADF; + if (di && di->backing_path.dentry) return vfs_getxattr(di->backing_path.dentry, name, value, size); @@ -1698,6 +1704,9 @@ static ssize_t incfs_setxattr(struct dentry *d, const char *name, size_t *stored_size; int i; + if (!mi) + return -EBADF; + if (di && di->backing_path.dentry) return vfs_setxattr(di->backing_path.dentry, name, value, size, flags); @@ -1736,6 +1745,11 @@ static ssize_t incfs_listxattr(struct dentry *d, char *list, size_t size) return vfs_listxattr(di->backing_path.dentry, list, size); } +static int incfs_test_super(struct super_block *s, void *p) +{ + return s->s_fs_info != NULL; +} + struct dentry *incfs_mount_fs(struct file_system_type *type, int flags, const char *dev_name, void *data) { @@ -1746,7 +1760,8 @@ struct dentry *incfs_mount_fs(struct file_system_type *type, int flags, struct dentry *incomplete_dir = NULL; struct super_block *src_fs_sb = NULL; struct inode *root_inode = NULL; - struct super_block *sb = sget(type, NULL, set_anon_super, flags, NULL); + struct super_block *sb = sget(type, incfs_test_super, set_anon_super, + flags, NULL); int error = 0; if (IS_ERR(sb)) @@ -1787,13 +1802,18 @@ struct dentry *incfs_mount_fs(struct file_system_type *type, int flags, src_fs_sb = backing_dir_path.dentry->d_sb; sb->s_maxbytes = src_fs_sb->s_maxbytes; - mi = incfs_alloc_mount_info(sb, &options, &backing_dir_path); + if (!sb->s_fs_info) { + mi = incfs_alloc_mount_info(sb, &options, &backing_dir_path); - if (IS_ERR_OR_NULL(mi)) { - error = PTR_ERR(mi); - pr_err("incfs: Error allocating mount info. %d\n", error); - mi = NULL; - goto err; + if (IS_ERR_OR_NULL(mi)) { + error = PTR_ERR(mi); + pr_err("incfs: Error allocating mount info. %d\n", error); + mi = NULL; + goto err; + } + sb->s_fs_info = mi; + } else { + mi = sb->s_fs_info; } index_dir = open_or_create_special_dir(backing_dir_path.dentry, @@ -1818,21 +1838,22 @@ struct dentry *incfs_mount_fs(struct file_system_type *type, int flags, } mi->mi_incomplete_dir = incomplete_dir; - sb->s_fs_info = mi; root_inode = fetch_regular_inode(sb, backing_dir_path.dentry); if (IS_ERR(root_inode)) { error = PTR_ERR(root_inode); goto err; } - sb->s_root = d_make_root(root_inode); if (!sb->s_root) { - error = -ENOMEM; - goto err; + sb->s_root = d_make_root(root_inode); + if (!sb->s_root) { + error = -ENOMEM; + goto err; + } + error = incfs_init_dentry(sb->s_root, &backing_dir_path); + if (error) + goto err; } - error = incfs_init_dentry(sb->s_root, &backing_dir_path); - if (error) - goto err; path_put(&backing_dir_path); sb->s_flags |= SB_ACTIVE; @@ -1854,6 +1875,9 @@ static int incfs_remount_fs(struct super_block *sb, int *flags, char *data) struct mount_info *mi = get_mount_info(sb); int err = 0; + if (!mi) + return err; + sync_filesystem(sb); err = parse_options(&options, (char *)data); if (err) @@ -1883,12 +1907,16 @@ void incfs_kill_sb(struct super_block *sb) pr_debug("incfs: unmount\n"); generic_shutdown_super(sb); incfs_free_mount_info(mi); + sb->s_fs_info = NULL; } static int show_options(struct seq_file *m, struct dentry *root) { struct mount_info *mi = get_mount_info(root->d_sb); + if (!mi) + return -EBADF; + seq_printf(m, ",read_timeout_ms=%u", mi->mi_options.read_timeout_ms); seq_printf(m, ",readahead=%u", mi->mi_options.readahead_pages); if (mi->mi_options.read_log_pages != 0) { diff --git a/fs/incfs/vfs.h b/fs/incfs/vfs.h index 79fdf243733d..8876e63a8b0f 100644 --- a/fs/incfs/vfs.h +++ b/fs/incfs/vfs.h @@ -19,7 +19,6 @@ static inline struct mount_info *get_mount_info(struct super_block *sb) { struct mount_info *result = sb->s_fs_info; - WARN_ON(!result); return result; } diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c index ade05887070d..5a3f09d7d821 100644 --- a/fs/tracefs/inode.c +++ b/fs/tracefs/inode.c @@ -504,8 +504,7 @@ static struct dentry *__create_dir(const char *name, struct dentry *parent, if (unlikely(!inode)) return failed_creating(dentry); - /* Do not set bits for OTH */ - inode->i_mode = S_IFDIR | S_IRWXU | S_IRUSR| S_IRGRP | S_IXUSR | S_IXGRP; + inode->i_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO; inode->i_op = ops; inode->i_fop = &simple_dir_operations; inode->i_uid = d_inode(dentry->d_parent)->i_uid; diff --git a/include/trace/hooks/cpu.h b/include/trace/hooks/cpu.h index fc3eaf318b25..8653e34064c3 100644 --- a/include/trace/hooks/cpu.h +++ b/include/trace/hooks/cpu.h @@ -14,10 +14,6 @@ DECLARE_HOOK(android_vh_cpu_up, TP_PROTO(unsigned int cpu), TP_ARGS(cpu)); -DECLARE_HOOK(android_vh_cpu_down, - TP_PROTO(unsigned int cpu), - TP_ARGS(cpu)); - /* macro versions of hooks are no longer required */ #endif /* _TRACE_HOOK_CPU_H */ diff --git a/include/trace/hooks/gic_v3.h b/include/trace/hooks/gic_v3.h index 3afb7379145d..42e0bb934570 100644 --- a/include/trace/hooks/gic_v3.h +++ b/include/trace/hooks/gic_v3.h @@ -12,9 +12,6 @@ */ struct irq_data; struct cpumask; -DECLARE_HOOK(android_vh_gic_v3_affinity_init, - TP_PROTO(int irq, u32 offset, u64 *affinity), - TP_ARGS(irq, offset, affinity)); DECLARE_RESTRICTED_HOOK(android_rvh_gic_v3_set_affinity, TP_PROTO(struct irq_data *d, const struct cpumask *mask_val, u64 *affinity, bool force, void __iomem *base), diff --git a/include/trace/hooks/iommu.h b/include/trace/hooks/iommu.h index dd54a50b3aa4..e818b90e482c 100644 --- a/include/trace/hooks/iommu.h +++ b/include/trace/hooks/iommu.h @@ -12,6 +12,10 @@ #include #include +DECLARE_RESTRICTED_HOOK(android_rvh_iommu_setup_dma_ops, + TP_PROTO(struct device *dev, u64 dma_base, u64 size), + TP_ARGS(dev, dma_base, size), 1); + DECLARE_HOOK(android_vh_iommu_setup_dma_ops, TP_PROTO(struct device *dev, u64 dma_base, u64 size), TP_ARGS(dev, dma_base, size)); diff --git a/include/trace/hooks/mm.h b/include/trace/hooks/mm.h index 5821f6d74a07..1e7c0a0811de 100644 --- a/include/trace/hooks/mm.h +++ b/include/trace/hooks/mm.h @@ -68,16 +68,7 @@ DECLARE_HOOK(android_vh_include_reserved_zone, DECLARE_HOOK(android_vh_show_mem, TP_PROTO(unsigned int filter, nodemask_t *nodemask), TP_ARGS(filter, nodemask)); -DECLARE_HOOK(android_vh_alloc_pages_slowpath, - TP_PROTO(gfp_t gfp_mask, unsigned int order, unsigned long delta), - TP_ARGS(gfp_mask, order, delta)); -DECLARE_HOOK(android_vh_print_slabinfo_header, - TP_PROTO(struct seq_file *m), - TP_ARGS(m)); struct slabinfo; -DECLARE_HOOK(android_vh_cache_show, - TP_PROTO(struct seq_file *m, struct slabinfo *sinfo, struct kmem_cache *s), - TP_ARGS(m, sinfo, s)); struct dirty_throttle_control; DECLARE_HOOK(android_vh_mm_dirty_limits, TP_PROTO(struct dirty_throttle_control *const gdtc, bool strictlimit, diff --git a/include/trace/hooks/net.h b/include/trace/hooks/net.h index 6715aa4eb668..acb2fd2c7e07 100644 --- a/include/trace/hooks/net.h +++ b/include/trace/hooks/net.h @@ -18,16 +18,8 @@ DECLARE_HOOK(android_vh_ptype_head, DECLARE_HOOK(android_vh_kfree_skb, TP_PROTO(struct sk_buff *skb), TP_ARGS(skb)); -struct nf_conn; -struct sock; -DECLARE_RESTRICTED_HOOK(android_rvh_nf_conn_alloc, - TP_PROTO(struct nf_conn *nf_conn), TP_ARGS(nf_conn), 1); -DECLARE_RESTRICTED_HOOK(android_rvh_nf_conn_free, - TP_PROTO(struct nf_conn *nf_conn), TP_ARGS(nf_conn), 1); -DECLARE_RESTRICTED_HOOK(android_rvh_sk_alloc, - TP_PROTO(struct sock *sock), TP_ARGS(sock), 1); -DECLARE_RESTRICTED_HOOK(android_rvh_sk_free, - TP_PROTO(struct sock *sock), TP_ARGS(sock), 1); +struct nf_conn; /* needed for CRC preservation */ +struct sock; /* needed for CRC preservation */ /* macro versions of hooks are no longer required */ diff --git a/include/trace/hooks/sched.h b/include/trace/hooks/sched.h index 0844244c9aff..55fc6c3a1a89 100644 --- a/include/trace/hooks/sched.h +++ b/include/trace/hooks/sched.h @@ -283,9 +283,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_check_preempt_tick, unsigned long delta_exec, struct cfs_rq *cfs_rq, struct sched_entity *curr, unsigned int granularity), TP_ARGS(p, ideal_runtime, skip_preempt, delta_exec, cfs_rq, curr, granularity), 1); -DECLARE_RESTRICTED_HOOK(android_rvh_check_preempt_wakeup_ignore, - TP_PROTO(struct task_struct *p, bool *ignore), - TP_ARGS(p, ignore), 1); DECLARE_RESTRICTED_HOOK(android_rvh_replace_next_task_fair, TP_PROTO(struct rq *rq, struct task_struct **p, struct sched_entity **se, bool *repick, bool simple, struct task_struct *prev), @@ -342,10 +339,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_dequeue_entity, TP_PROTO(struct cfs_rq *cfs, struct sched_entity *se), TP_ARGS(cfs, se), 1); -DECLARE_RESTRICTED_HOOK(android_rvh_entity_tick, - TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se), - TP_ARGS(cfs_rq, se), 1); - DECLARE_RESTRICTED_HOOK(android_rvh_enqueue_task_fair, TP_PROTO(struct rq *rq, struct task_struct *p, int flags), TP_ARGS(rq, p, flags), 1); @@ -370,10 +363,6 @@ DECLARE_HOOK(android_vh_dup_task_struct, TP_PROTO(struct task_struct *tsk, struct task_struct *orig), TP_ARGS(tsk, orig)); -DECLARE_HOOK(android_vh_set_task_comm, - TP_PROTO(struct task_struct *p), - TP_ARGS(p)); - DECLARE_RESTRICTED_HOOK(android_rvh_find_new_ilb, TP_PROTO(struct cpumask *nohz_idle_cpus_mask, int *ilb), TP_ARGS(nohz_idle_cpus_mask, ilb), 1); diff --git a/include/trace/hooks/sysrqcrash.h b/include/trace/hooks/sysrqcrash.h deleted file mode 100644 index d163f898a9f6..000000000000 --- a/include/trace/hooks/sysrqcrash.h +++ /dev/null @@ -1,22 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#undef TRACE_SYSTEM -#define TRACE_SYSTEM sysrqcrash -#define TRACE_INCLUDE_PATH trace/hooks - -#if !defined(_TRACE_HOOK_SYSRQCRASH_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_HOOK_SYSRQCRASH_H -#include -#include -/* - * Following tracepoints are not exported in tracefs and provide a - * mechanism for vendor modules to hook and extend functionality - */ -DECLARE_HOOK(android_vh_sysrq_crash, - TP_PROTO(void *data), - TP_ARGS(data)); - -/* macro versions of hooks are no longer required */ - -#endif /* _TRACE_HOOK_SYSRQCRASH_H */ -/* This part must be outside protection */ -#include diff --git a/include/trace/hooks/traps.h b/include/trace/hooks/traps.h index f8d56a207afc..d1ceb632c1d5 100644 --- a/include/trace/hooks/traps.h +++ b/include/trace/hooks/traps.h @@ -17,11 +17,6 @@ DECLARE_RESTRICTED_HOOK(android_rvh_do_undefinstr, TP_ARGS(regs, user), TP_CONDITION(!user)); -DECLARE_RESTRICTED_HOOK(android_rvh_do_bti, - TP_PROTO(struct pt_regs *regs, bool user), - TP_ARGS(regs, user), - TP_CONDITION(!user)); - DECLARE_RESTRICTED_HOOK(android_rvh_do_ptrauth_fault, TP_PROTO(struct pt_regs *regs, unsigned int esr, bool user), TP_ARGS(regs, esr, user), diff --git a/include/trace/hooks/user.h b/include/trace/hooks/user.h deleted file mode 100644 index b5b507ccd9c2..000000000000 --- a/include/trace/hooks/user.h +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#undef TRACE_SYSTEM -#define TRACE_SYSTEM user -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH trace/hooks -#if !defined(_TRACE_HOOK_USER_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_HOOK_USER_H -#include -#include - -struct user_struct; -DECLARE_HOOK(android_vh_alloc_uid, - TP_PROTO(struct user_struct *user), - TP_ARGS(user)); - -DECLARE_HOOK(android_vh_free_user, - TP_PROTO(struct user_struct *up), - TP_ARGS(up)); - -#endif /* _TRACE_HOOK_USER_H */ -/* This part must be outside protection */ -#include - diff --git a/include/trace/hooks/vmscan.h b/include/trace/hooks/vmscan.h index b6b77d9895cd..ecbff00093ee 100644 --- a/include/trace/hooks/vmscan.h +++ b/include/trace/hooks/vmscan.h @@ -22,9 +22,6 @@ DECLARE_HOOK(android_vh_shrink_slab_bypass, DECLARE_HOOK(android_vh_tune_inactive_ratio, TP_PROTO(unsigned long *inactive_ratio, int file), TP_ARGS(inactive_ratio, file)) -DECLARE_HOOK(android_vh_do_shrink_slab, - TP_PROTO(struct shrinker *shrinker, struct shrink_control *shrinkctl, int priority), - TP_ARGS(shrinker, shrinkctl, priority)); DECLARE_RESTRICTED_HOOK(android_rvh_set_balance_anon_file_reclaim, TP_PROTO(bool *balance_anon_file_reclaim), TP_ARGS(balance_anon_file_reclaim), 1); diff --git a/include/trace/hooks/workqueue.h b/include/trace/hooks/workqueue.h deleted file mode 100644 index 0ffe56433a4c..000000000000 --- a/include/trace/hooks/workqueue.h +++ /dev/null @@ -1,22 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#undef TRACE_SYSTEM -#define TRACE_SYSTEM workqueue -#define TRACE_INCLUDE_PATH trace/hooks - -#if !defined(_TRACE_HOOK_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_HOOK_WORKQUEUE_H -#include -#include -/* - * Following tracepoints are not exported in tracefs and provide a - * mechanism for vendor modules to hook and extend functionality - */ -struct worker; -DECLARE_HOOK(android_vh_create_worker, - TP_PROTO(struct worker *worker, struct workqueue_attrs *attrs), - TP_ARGS(worker, attrs)); -/* macro versions of hooks are no longer required */ - -#endif /* _TRACE_HOOK_WORKQUEUE_H */ -/* This part must be outside protection */ -#include diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index df41ab45ab85..510f341f1ddc 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -3307,7 +3307,6 @@ void cpuset_wait_for_hotplug(void) { flush_work(&cpuset_hotplug_work); } -EXPORT_SYMBOL_GPL(cpuset_wait_for_hotplug); /* * Keep top_cpuset.mems_allowed tracking node_states[N_MEMORY]. diff --git a/kernel/cpu.c b/kernel/cpu.c index 4648410fdb55..d49ba9df8f06 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1127,8 +1127,6 @@ static int cpu_down(unsigned int cpu, enum cpuhp_state target) { int err; - trace_android_vh_cpu_down(cpu); - cpu_maps_update_begin(); err = cpu_down_maps_locked(cpu, target); cpu_maps_update_done(); diff --git a/kernel/printk/printk_ringbuffer.c b/kernel/printk/printk_ringbuffer.c index 617dd6358965..b77c4154a1ee 100644 --- a/kernel/printk/printk_ringbuffer.c +++ b/kernel/printk/printk_ringbuffer.c @@ -474,8 +474,10 @@ static enum desc_state desc_read(struct prb_desc_ring *desc_ring, * state has been re-checked. A memcpy() for all of @desc * cannot be used because of the atomic_t @state_var field. */ - memcpy(&desc_out->text_blk_lpos, &desc->text_blk_lpos, - sizeof(desc_out->text_blk_lpos)); /* LMM(desc_read:C) */ + if (desc_out) { + memcpy(&desc_out->text_blk_lpos, &desc->text_blk_lpos, + sizeof(desc_out->text_blk_lpos)); /* LMM(desc_read:C) */ + } if (seq_out) *seq_out = info->seq; /* also part of desc_read:C */ if (caller_id_out) @@ -528,7 +530,8 @@ static enum desc_state desc_read(struct prb_desc_ring *desc_ring, state_val = atomic_long_read(state_var); /* LMM(desc_read:E) */ d_state = get_desc_state(id, state_val); out: - atomic_long_set(&desc_out->state_var, state_val); + if (desc_out) + atomic_long_set(&desc_out->state_var, state_val); return d_state; } @@ -1450,6 +1453,9 @@ static void desc_make_final(struct prb_desc_ring *desc_ring, unsigned long id) atomic_long_cmpxchg_relaxed(&d->state_var, prev_state_val, DESC_SV(id, desc_finalized)); /* LMM(desc_make_final:A) */ + + /* Best effort to remember the last finalized @id. */ + atomic_long_set(&desc_ring->last_finalized_id, id); } /** @@ -1659,7 +1665,12 @@ void prb_commit(struct prb_reserved_entry *e) */ void prb_final_commit(struct prb_reserved_entry *e) { + struct prb_desc_ring *desc_ring = &e->rb->desc_ring; + _prb_commit(e, desc_finalized); + + /* Best effort to remember the last finalized @id. */ + atomic_long_set(&desc_ring->last_finalized_id, e->id); } /* @@ -2007,9 +2018,39 @@ u64 prb_first_valid_seq(struct printk_ringbuffer *rb) */ u64 prb_next_seq(struct printk_ringbuffer *rb) { - u64 seq = 0; + struct prb_desc_ring *desc_ring = &rb->desc_ring; + enum desc_state d_state; + unsigned long id; + u64 seq; - /* Search forward from the oldest descriptor. */ + /* Check if the cached @id still points to a valid @seq. */ + id = atomic_long_read(&desc_ring->last_finalized_id); + d_state = desc_read(desc_ring, id, NULL, &seq, NULL); + + if (d_state == desc_finalized || d_state == desc_reusable) { + /* + * Begin searching after the last finalized record. + * + * On 0, the search must begin at 0 because of hack#2 + * of the bootstrapping phase it is not known if a + * record at index 0 exists. + */ + if (seq != 0) + seq++; + } else { + /* + * The information about the last finalized sequence number + * has gone. It should happen only when there is a flood of + * new messages and the ringbuffer is rapidly recycled. + * Give up and start from the beginning. + */ + seq = 0; + } + + /* + * The information about the last finalized @seq might be inaccurate. + * Search forward to find the current one. + */ while (_prb_read_valid(rb, &seq, NULL, NULL)) seq++; @@ -2046,6 +2087,7 @@ void prb_init(struct printk_ringbuffer *rb, rb->desc_ring.infos = infos; atomic_long_set(&rb->desc_ring.head_id, DESC0_ID(descbits)); atomic_long_set(&rb->desc_ring.tail_id, DESC0_ID(descbits)); + atomic_long_set(&rb->desc_ring.last_finalized_id, DESC0_ID(descbits)); rb->text_data_ring.size_bits = textbits; rb->text_data_ring.data = text_buf; diff --git a/kernel/printk/printk_ringbuffer.h b/kernel/printk/printk_ringbuffer.h index 5dc9d022db07..e68a8cb277b1 100644 --- a/kernel/printk/printk_ringbuffer.h +++ b/kernel/printk/printk_ringbuffer.h @@ -75,6 +75,7 @@ struct prb_desc_ring { struct printk_info *infos; atomic_long_t head_id; atomic_long_t tail_id; + atomic_long_t last_finalized_id; }; /* @@ -258,6 +259,7 @@ static struct printk_ringbuffer name = { \ .infos = &_##name##_infos[0], \ .head_id = ATOMIC_INIT(DESC0_ID(descbits)), \ .tail_id = ATOMIC_INIT(DESC0_ID(descbits)), \ + .last_finalized_id = ATOMIC_INIT(DESC0_ID(descbits)), \ }, \ .text_data_ring = { \ .size_bits = (avgtextbits) + (descbits), \ diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h index 6aeb5f517ce2..e4b9fa72570e 100644 --- a/kernel/rcu/tree_exp.h +++ b/kernel/rcu/tree_exp.h @@ -813,7 +813,7 @@ static int rcu_print_task_exp_stall(struct rcu_node *rnp) */ void synchronize_rcu_expedited(void) { - bool boottime = (rcu_scheduler_active == RCU_SCHEDULER_INIT); + bool no_wq; struct rcu_exp_work rew; struct rcu_node *rnp; unsigned long s; @@ -838,9 +838,15 @@ void synchronize_rcu_expedited(void) if (exp_funnel_lock(s)) return; /* Someone else did our work for us. */ + /* Don't use workqueue during boot or from an incoming CPU. */ + preempt_disable(); + no_wq = rcu_scheduler_active == RCU_SCHEDULER_INIT || + !cpumask_test_cpu(smp_processor_id(), cpu_active_mask); + preempt_enable(); + /* Ensure that load happens before action based on it. */ - if (unlikely(boottime)) { - /* Direct call during scheduler init and early_initcalls(). */ + if (unlikely(no_wq)) { + /* Direct call for scheduler init, early_initcall()s, and incoming CPUs. */ rcu_exp_sel_wait_wake(s); } else { /* Marshall arguments & schedule the expedited grace period. */ @@ -858,7 +864,7 @@ void synchronize_rcu_expedited(void) /* Let the next expedited grace period start. */ mutex_unlock(&rcu_state.exp_mutex); - if (likely(!boottime)) + if (likely(!no_wq)) destroy_work_on_stack(&rew.rew_work); } EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 6ed153f226b3..33a725a03024 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -531,16 +531,16 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags) raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } - /* Unboost if we were boosted. */ - if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex) - rt_mutex_futex_unlock(&rnp->boost_mtx); - /* * If this was the last task on the expedited lists, * then we need to report up the rcu_node hierarchy. */ if (!empty_exp && empty_exp_now) rcu_report_exp_rnp(rnp, true); + + /* Unboost if we were boosted. */ + if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex) + rt_mutex_futex_unlock(&rnp->boost_mtx); } else { local_irq_restore(flags); } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 31c804640564..1defb8dff43a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4617,7 +4617,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) if (cfs_rq->nr_running > 1) check_preempt_tick(cfs_rq, curr); - trace_android_rvh_entity_tick(cfs_rq, curr); } @@ -7076,13 +7075,9 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ int scale = cfs_rq->nr_running >= sched_nr_latency; int next_buddy_marked = 0; bool preempt = false, nopreempt = false; - bool ignore = false; if (unlikely(se == pse)) return; - trace_android_rvh_check_preempt_wakeup_ignore(curr, &ignore); - if (ignore) - return; /* * This is possible from callers such as attach_tasks(), in which we diff --git a/kernel/user.c b/kernel/user.c index 83adc37251fa..ab445fcc07aa 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -20,8 +20,6 @@ #include #include -#include - /* * userns count is 1 for root user, 1 for init_uts_ns, * and 1 for... ? @@ -141,7 +139,6 @@ static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent) static void free_user(struct user_struct *up, unsigned long flags) __releases(&uidhash_lock) { - trace_android_vh_free_user(up); uid_hash_remove(up); spin_unlock_irqrestore(&uidhash_lock, flags); kmem_cache_free(uid_cachep, up); @@ -193,7 +190,6 @@ struct user_struct *alloc_uid(kuid_t uid) new->uid = uid; refcount_set(&new->__count, 1); - trace_android_vh_alloc_uid(new); ratelimit_state_init(&new->ratelimit, HZ, 100); ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index dfab7aa912c3..2b9c0cdc5601 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -55,7 +55,6 @@ #include "workqueue_internal.h" #include -#include /* events/workqueue.h uses default TRACE_INCLUDE_PATH */ #undef TRACE_INCLUDE_PATH @@ -1959,7 +1958,6 @@ static struct worker *create_worker(struct worker_pool *pool) if (IS_ERR(worker->task)) goto fail; - trace_android_vh_create_worker(worker, pool->attrs); set_user_nice(worker->task, pool->attrs->nice); kthread_bind_mask(worker->task, pool->attrs->cpumask); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 251f06652801..9f4b74244b15 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4780,7 +4780,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, int no_progress_loops; unsigned int cpuset_mems_cookie; int reserve_flags; - unsigned long alloc_start = jiffies; + /* * We also sanity check to catch abuse of atomic reserves being used by * callers that are not in atomic context. @@ -5022,7 +5022,6 @@ fail: warn_alloc(gfp_mask, ac->nodemask, "page allocation failure: order:%u", order); got_pg: - trace_android_vh_alloc_pages_slowpath(gfp_mask, order, alloc_start); return page; } diff --git a/mm/slab_common.c b/mm/slab_common.c index 6a04bfa91df5..c751b18f7e60 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -950,7 +950,6 @@ static void print_slabinfo_header(struct seq_file *m) seq_puts(m, " : globalstat "); seq_puts(m, " : cpustat "); #endif - trace_android_vh_print_slabinfo_header(m); seq_putc(m, '\n'); } @@ -986,7 +985,6 @@ static void cache_show(struct kmem_cache *s, struct seq_file *m) seq_printf(m, " : slabdata %6lu %6lu %6lu", sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail); slabinfo_show_stats(m, s); - trace_android_vh_cache_show(m, &sinfo, s); seq_putc(m, '\n'); } diff --git a/mm/vmscan.c b/mm/vmscan.c index d49cd9cb79e0..3f5a834dd764 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -451,8 +451,6 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl, : SHRINK_BATCH; long scanned = 0, next_deferred; - trace_android_vh_do_shrink_slab(shrinker, shrinkctl, priority); - if (!(shrinker->flags & SHRINKER_NUMA_AWARE)) nid = 0; diff --git a/net/core/sock.c b/net/core/sock.c index 70396a406761..7539b119d5ce 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -136,7 +136,6 @@ #include #include -#include #include #include @@ -1696,8 +1695,6 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, if (security_sk_alloc(sk, family, priority)) goto out_free; - trace_android_rvh_sk_alloc(sk); - if (!try_module_get(prot->owner)) goto out_free_sec; sk_tx_queue_clear(sk); @@ -1707,7 +1704,6 @@ static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority, out_free_sec: security_sk_free(sk); - trace_android_rvh_sk_free(sk); out_free: if (slab != NULL) kmem_cache_free(slab, sk); @@ -1727,7 +1723,6 @@ static void sk_prot_free(struct proto *prot, struct sock *sk) cgroup_sk_free(&sk->sk_cgrp_data); mem_cgroup_sk_free(sk); security_sk_free(sk); - trace_android_rvh_sk_free(sk); if (slab != NULL) kmem_cache_free(slab, sk); else diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 9b88e4c04af1..79a61929c390 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1496,8 +1496,6 @@ __nf_conntrack_alloc(struct net *net, nf_ct_zone_add(ct, zone); - trace_android_rvh_nf_conn_alloc(ct); - /* Because we use RCU lookups, we set ct_general.use to zero before * this is inserted in any list. */ @@ -1530,7 +1528,6 @@ void nf_conntrack_free(struct nf_conn *ct) nf_ct_ext_destroy(ct); kmem_cache_free(nf_conntrack_cachep, ct); smp_mb__before_atomic(); - trace_android_rvh_nf_conn_free(ct); atomic_dec(&net->ct.count); } EXPORT_SYMBOL_GPL(nf_conntrack_free); diff --git a/tools/testing/selftests/filesystems/incfs/incfs_test.c b/tools/testing/selftests/filesystems/incfs/incfs_test.c index 4c30aec3f545..2d2bc933fefc 100644 --- a/tools/testing/selftests/filesystems/incfs/incfs_test.c +++ b/tools/testing/selftests/filesystems/incfs/incfs_test.c @@ -4409,6 +4409,7 @@ static int sysfs_test(const char *mount_dir) int fd = -1; int pid = -1; char buffer[32]; + char *null_buf = NULL; int status; struct incfs_per_uid_read_timeouts purt_set[] = { { @@ -4437,13 +4438,13 @@ static int sysfs_test(const char *mount_dir) TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1); TESTEQUAL(ioctl_test_last_error(cmd_fd, NULL, 0, 0), 0); TESTEQUAL(sysfs_test_value("reads_failed_timed_out", 0), 0); - TEST(read(fd, NULL, 1), -1); + TESTEQUAL(read(fd, null_buf, 1), -1); TESTEQUAL(ioctl_test_last_error(cmd_fd, &file.id, 0, -ETIME), 0); TESTEQUAL(sysfs_test_value("reads_failed_timed_out", 2), 0); TESTEQUAL(emit_test_file_data(mount_dir, &file), 0); TESTEQUAL(sysfs_test_value("reads_failed_hash_verification", 0), 0); - TESTEQUAL(read(fd, NULL, 1), -1); + TESTEQUAL(read(fd, null_buf, 1), -1); TESTEQUAL(sysfs_test_value("reads_failed_hash_verification", 1), 0); TESTSYSCALL(close(fd)); fd = -1;