@@ -15,40 +15,133 @@ specific language governing permissions and limitations under the License.
15
15
An attention processor is a class for applying different types of attention mechanisms.
16
16
17
17
## AttnProcessor
18
+
18
19
[[ autodoc]] models.attention_processor.AttnProcessor
19
20
20
- ## AttnProcessor2_0
21
21
[[ autodoc]] models.attention_processor.AttnProcessor2_0
22
22
23
- ## AttnAddedKVProcessor
24
23
[[ autodoc]] models.attention_processor.AttnAddedKVProcessor
25
24
26
- ## AttnAddedKVProcessor2_0
27
25
[[ autodoc]] models.attention_processor.AttnAddedKVProcessor2_0
28
26
27
+ [[ autodoc]] models.attention_processor.AttnProcessorNPU
28
+
29
+ [[ autodoc]] models.attention_processor.FusedAttnProcessor2_0
30
+
31
+ ## Allegro
32
+
33
+ [[ autodoc]] models.attention_processor.AllegroAttnProcessor2_0
34
+
35
+ ## AuraFlow
36
+
37
+ [[ autodoc]] models.attention_processor.AuraFlowAttnProcessor2_0
38
+
39
+ [[ autodoc]] models.attention_processor.FusedAuraFlowAttnProcessor2_0
40
+
41
+ ## CogVideoX
42
+
43
+ [[ autodoc]] models.attention_processor.CogVideoXAttnProcessor2_0
44
+
45
+ [[ autodoc]] models.attention_processor.FusedCogVideoXAttnProcessor2_0
46
+
29
47
## CrossFrameAttnProcessor
48
+
30
49
[[ autodoc]] pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.CrossFrameAttnProcessor
31
50
32
- ## CustomDiffusionAttnProcessor
51
+ ## Custom Diffusion
52
+
33
53
[[ autodoc]] models.attention_processor.CustomDiffusionAttnProcessor
34
54
35
- ## CustomDiffusionAttnProcessor2_0
36
55
[[ autodoc]] models.attention_processor.CustomDiffusionAttnProcessor2_0
37
56
38
- ## CustomDiffusionXFormersAttnProcessor
39
57
[[ autodoc]] models.attention_processor.CustomDiffusionXFormersAttnProcessor
40
58
41
- ## FusedAttnProcessor2_0
42
- [[ autodoc]] models.attention_processor.FusedAttnProcessor2_0
59
+ ## Flux
60
+
61
+ [[ autodoc]] models.attention_processor.FluxAttnProcessor2_0
62
+
63
+ [[ autodoc]] models.attention_processor.FusedFluxAttnProcessor2_0
64
+
65
+ [[ autodoc]] models.attention_processor.FluxSingleAttnProcessor2_0
66
+
67
+ ## Hunyuan
68
+
69
+ [[ autodoc]] models.attention_processor.HunyuanAttnProcessor2_0
70
+
71
+ [[ autodoc]] models.attention_processor.FusedHunyuanAttnProcessor2_0
72
+
73
+ [[ autodoc]] models.attention_processor.PAGHunyuanAttnProcessor2_0
74
+
75
+ [[ autodoc]] models.attention_processor.PAGCFGHunyuanAttnProcessor2_0
76
+
77
+ ## IdentitySelfAttnProcessor2_0
78
+
79
+ [[ autodoc]] models.attention_processor.PAGIdentitySelfAttnProcessor2_0
80
+
81
+ [[ autodoc]] models.attention_processor.PAGCFGIdentitySelfAttnProcessor2_0
82
+
83
+ ## IP-Adapter
84
+
85
+ [[ autodoc]] models.attention_processor.IPAdapterAttnProcessor
86
+
87
+ [[ autodoc]] models.attention_processor.IPAdapterAttnProcessor2_0
88
+
89
+ ## JointAttnProcessor2_0
90
+
91
+ [[ autodoc]] models.attention_processor.JointAttnProcessor2_0
92
+
93
+ [[ autodoc]] models.attention_processor.PAGJointAttnProcessor2_0
94
+
95
+ [[ autodoc]] models.attention_processor.PAGCFGJointAttnProcessor2_0
96
+
97
+ [[ autodoc]] models.attention_processor.FusedJointAttnProcessor2_0
98
+
99
+ ## LoRA
100
+
101
+ [[ autodoc]] models.attention_processor.LoRAAttnProcessor
102
+
103
+ [[ autodoc]] models.attention_processor.LoRAAttnProcessor2_0
104
+
105
+ [[ autodoc]] models.attention_processor.LoRAAttnAddedKVProcessor
106
+
107
+ [[ autodoc]] models.attention_processor.LoRAXFormersAttnProcessor
108
+
109
+ ## Lumina-T2X
110
+
111
+ [[ autodoc]] models.attention_processor.LuminaAttnProcessor2_0
112
+
113
+ ## Mochi
114
+
115
+ [[ autodoc]] models.attention_processor.MochiAttnProcessor2_0
116
+
117
+ [[ autodoc]] models.attention_processor.MochiVaeAttnProcessor2_0
118
+
119
+ ## Sana
120
+
121
+ [[ autodoc]] models.attention_processor.SanaLinearAttnProcessor2_0
122
+
123
+ [[ autodoc]] models.attention_processor.SanaMultiscaleAttnProcessor2_0
124
+
125
+ [[ autodoc]] models.attention_processor.PAGCFGSanaLinearAttnProcessor2_0
126
+
127
+ [[ autodoc]] models.attention_processor.PAGIdentitySanaLinearAttnProcessor2_0
128
+
129
+ ## Stable Audio
130
+
131
+ [[ autodoc]] models.attention_processor.StableAudioAttnProcessor2_0
43
132
44
133
## SlicedAttnProcessor
134
+
45
135
[[ autodoc]] models.attention_processor.SlicedAttnProcessor
46
136
47
- ## SlicedAttnAddedKVProcessor
48
137
[[ autodoc]] models.attention_processor.SlicedAttnAddedKVProcessor
49
138
50
139
## XFormersAttnProcessor
140
+
51
141
[[ autodoc]] models.attention_processor.XFormersAttnProcessor
52
142
53
- ## AttnProcessorNPU
54
- [[ autodoc]] models.attention_processor.AttnProcessorNPU
143
+ [[ autodoc]] models.attention_processor.XFormersAttnAddedKVProcessor
144
+
145
+ ## XLAFlashAttnProcessor2_0
146
+
147
+ [[ autodoc]] models.attention_processor.XLAFlashAttnProcessor2_0
0 commit comments