1 #ifndef _ASM_X86_INTEL_RDT_H
2 #define _ASM_X86_INTEL_RDT_H
4 #ifdef CONFIG_INTEL_RDT_A
6 #include <linux/sched.h>
7 #include <linux/kernfs.h>
8 #include <linux/jump_label.h>
10 #include <asm/intel_rdt_common.h>
12 #define IA32_L3_QOS_CFG 0xc81
13 #define IA32_L3_CBM_BASE 0xc90
14 #define IA32_L2_CBM_BASE 0xd10
16 #define L3_QOS_CDP_ENABLE 0x01ULL
19 * struct rdtgroup - store rdtgroup's data in resctrl file system.
21 * @rdtgroup_list: linked list for all rdtgroups
22 * @closid: closid for this rdtgroup
23 * @cpu_mask: CPUs assigned to this rdtgroup
25 * @waitcount: how many cpus expect to find this
26 * group when they acquire rdtgroup_mutex
29 struct kernfs_node *kn;
30 struct list_head rdtgroup_list;
32 struct cpumask cpu_mask;
41 #define RFTYPE_FLAGS_CPUS_LIST 1
43 /* List of all resource groups */
44 extern struct list_head rdt_all_groups;
46 extern int max_name_width, max_data_width;
48 int __init rdtgroup_init(void);
51 * struct rftype - describe each file in the resctrl file system
54 * @kf_ops: File operations
55 * @flags: File specific RFTYPE_FLAGS_* flags
56 * @seq_show: Show content of the file
57 * @write: Write to the file
62 struct kernfs_ops *kf_ops;
65 int (*seq_show)(struct kernfs_open_file *of,
66 struct seq_file *sf, void *v);
68 * write() is the generic write callback which maps directly to
69 * kernfs write operation and overrides all other operations.
70 * Maximum write size is determined by ->max_write_len.
72 ssize_t (*write)(struct kernfs_open_file *of,
73 char *buf, size_t nbytes, loff_t off);
77 * struct rdt_domain - group of cpus sharing an RDT resource
78 * @list: all instances of this resource
79 * @id: unique id for this instance
80 * @cpu_mask: which cpus share this resource
81 * @ctrl_val: array of cache or mem ctrl values (indexed by CLOSID)
82 * @new_ctrl: new ctrl value to be loaded
83 * @have_new_ctrl: did user provide new_ctrl for this domain
86 struct list_head list;
88 struct cpumask cpu_mask;
95 * struct msr_param - set a range of MSRs from a domain
96 * @res: The resource to use
97 * @low: Beginning index from base MSR
101 struct rdt_resource *res;
107 * struct rdt_cache - Cache allocation related data
108 * @cbm_len: Length of the cache bit mask
109 * @min_cbm_bits: Minimum number of consecutive bits to be set
110 * @cbm_idx_mult: Multiplier of CBM index
111 * @cbm_idx_offset: Offset of CBM index. CBM index is computed by:
112 * closid * cbm_idx_multi + cbm_idx_offset
113 * in a cache bit mask
116 unsigned int cbm_len;
117 unsigned int min_cbm_bits;
118 unsigned int cbm_idx_mult;
119 unsigned int cbm_idx_offset;
123 * struct rdt_resource - attributes of an RDT resource
124 * @enabled: Is this feature enabled on this machine
125 * @capable: Is this feature available on this machine
126 * @name: Name to use in "schemata" file
127 * @num_closid: Number of CLOSIDs available
128 * @cache_level: Which cache level defines scope of this resource
129 * @default_ctrl: Specifies default cache cbm or memory B/W percent.
130 * @msr_base: Base MSR address for CBMs
131 * @msr_update: Function pointer to update QOS MSRs
132 * @data_width: Character width of data when displaying
133 * @domains: All domains for this resource
134 * @cache: Cache allocation related data
136 struct rdt_resource {
143 unsigned int msr_base;
144 void (*msr_update) (struct rdt_domain *d, struct msr_param *m,
145 struct rdt_resource *r);
147 struct list_head domains;
148 struct rdt_cache cache;
151 extern struct mutex rdtgroup_mutex;
153 extern struct rdt_resource rdt_resources_all[];
154 extern struct rdtgroup rdtgroup_default;
155 DECLARE_STATIC_KEY_FALSE(rdt_enable_key);
157 int __init rdtgroup_init(void);
165 /* Must be the last */
169 #define for_each_capable_rdt_resource(r) \
170 for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
174 #define for_each_enabled_rdt_resource(r) \
175 for (r = rdt_resources_all; r < rdt_resources_all + RDT_NUM_RESOURCES;\
179 /* CPUID.(EAX=10H, ECX=ResID=1).EAX */
180 union cpuid_0x10_1_eax {
182 unsigned int cbm_len:5;
187 /* CPUID.(EAX=10H, ECX=ResID=3).EAX */
188 union cpuid_0x10_3_eax {
190 unsigned int max_delay:12;
195 /* CPUID.(EAX=10H, ECX=ResID).EDX */
196 union cpuid_0x10_x_edx {
198 unsigned int cos_max:16;
203 DECLARE_PER_CPU_READ_MOSTLY(int, cpu_closid);
205 void rdt_ctrl_update(void *arg);
206 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn);
207 void rdtgroup_kn_unlock(struct kernfs_node *kn);
208 ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
209 char *buf, size_t nbytes, loff_t off);
210 int rdtgroup_schemata_show(struct kernfs_open_file *of,
211 struct seq_file *s, void *v);
214 * intel_rdt_sched_in() - Writes the task's CLOSid to IA32_PQR_MSR
216 * Following considerations are made so that this has minimal impact
217 * on scheduler hot path:
218 * - This will stay as no-op unless we are running on an Intel SKU
219 * which supports resource control and we enable by mounting the
220 * resctrl file system.
221 * - Caches the per cpu CLOSid values and does the MSR write only
222 * when a task with a different CLOSid is scheduled in.
224 * Must be called with preemption disabled.
226 static inline void intel_rdt_sched_in(void)
228 if (static_branch_likely(&rdt_enable_key)) {
229 struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
233 * If this task has a closid assigned, use it.
234 * Else use the closid assigned to this cpu.
236 closid = current->closid;
238 closid = this_cpu_read(cpu_closid);
240 if (closid != state->closid) {
241 state->closid = closid;
242 wrmsr(MSR_IA32_PQR_ASSOC, state->rmid, closid);
249 static inline void intel_rdt_sched_in(void) {}
251 #endif /* CONFIG_INTEL_RDT_A */
252 #endif /* _ASM_X86_INTEL_RDT_H */