]> git.karo-electronics.de Git - karo-tx-linux.git/blob - drivers/gpu/drm/i915/intel_atomic.c
Merge branch 'x86/mm'
[karo-tx-linux.git] / drivers / gpu / drm / i915 / intel_atomic.c
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23
24 /**
25  * DOC: atomic modeset support
26  *
27  * The functions here implement the state management and hardware programming
28  * dispatch required by the atomic modeset infrastructure.
29  * See intel_atomic_plane.c for the plane-specific atomic functionality.
30  */
31
32 #include <drm/drmP.h>
33 #include <drm/drm_atomic.h>
34 #include <drm/drm_atomic_helper.h>
35 #include <drm/drm_plane_helper.h>
36 #include "intel_drv.h"
37
38 /**
39  * intel_connector_atomic_get_property - fetch connector property value
40  * @connector: connector to fetch property for
41  * @state: state containing the property value
42  * @property: property to look up
43  * @val: pointer to write property value into
44  *
45  * The DRM core does not store shadow copies of properties for
46  * atomic-capable drivers.  This entrypoint is used to fetch
47  * the current value of a driver-specific connector property.
48  */
49 int
50 intel_connector_atomic_get_property(struct drm_connector *connector,
51                                     const struct drm_connector_state *state,
52                                     struct drm_property *property,
53                                     uint64_t *val)
54 {
55         int i;
56
57         /*
58          * TODO: We only have atomic modeset for planes at the moment, so the
59          * crtc/connector code isn't quite ready yet.  Until it's ready,
60          * continue to look up all property values in the DRM's shadow copy
61          * in obj->properties->values[].
62          *
63          * When the crtc/connector state work matures, this function should
64          * be updated to read the values out of the state structure instead.
65          */
66         for (i = 0; i < connector->base.properties->count; i++) {
67                 if (connector->base.properties->properties[i] == property) {
68                         *val = connector->base.properties->values[i];
69                         return 0;
70                 }
71         }
72
73         return -EINVAL;
74 }
75
76 /*
77  * intel_crtc_duplicate_state - duplicate crtc state
78  * @crtc: drm crtc
79  *
80  * Allocates and returns a copy of the crtc state (both common and
81  * Intel-specific) for the specified crtc.
82  *
83  * Returns: The newly allocated crtc state, or NULL on failure.
84  */
85 struct drm_crtc_state *
86 intel_crtc_duplicate_state(struct drm_crtc *crtc)
87 {
88         struct intel_crtc_state *crtc_state;
89
90         crtc_state = kmemdup(crtc->state, sizeof(*crtc_state), GFP_KERNEL);
91         if (!crtc_state)
92                 return NULL;
93
94         __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base);
95
96         crtc_state->update_pipe = false;
97         crtc_state->disable_lp_wm = false;
98         crtc_state->disable_cxsr = false;
99         crtc_state->wm_changed = false;
100
101         return &crtc_state->base;
102 }
103
104 /**
105  * intel_crtc_destroy_state - destroy crtc state
106  * @crtc: drm crtc
107  *
108  * Destroys the crtc state (both common and Intel-specific) for the
109  * specified crtc.
110  */
111 void
112 intel_crtc_destroy_state(struct drm_crtc *crtc,
113                           struct drm_crtc_state *state)
114 {
115         drm_atomic_helper_crtc_destroy_state(crtc, state);
116 }
117
118 /**
119  * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests
120  * @dev: DRM device
121  * @crtc: intel crtc
122  * @crtc_state: incoming crtc_state to validate and setup scalers
123  *
124  * This function sets up scalers based on staged scaling requests for
125  * a @crtc and its planes. It is called from crtc level check path. If request
126  * is a supportable request, it attaches scalers to requested planes and crtc.
127  *
128  * This function takes into account the current scaler(s) in use by any planes
129  * not being part of this atomic state
130  *
131  *  Returns:
132  *         0 - scalers were setup succesfully
133  *         error code - otherwise
134  */
135 int intel_atomic_setup_scalers(struct drm_device *dev,
136         struct intel_crtc *intel_crtc,
137         struct intel_crtc_state *crtc_state)
138 {
139         struct drm_plane *plane = NULL;
140         struct intel_plane *intel_plane;
141         struct intel_plane_state *plane_state = NULL;
142         struct intel_crtc_scaler_state *scaler_state =
143                 &crtc_state->scaler_state;
144         struct drm_atomic_state *drm_state = crtc_state->base.state;
145         int num_scalers_need;
146         int i, j;
147
148         num_scalers_need = hweight32(scaler_state->scaler_users);
149
150         /*
151          * High level flow:
152          * - staged scaler requests are already in scaler_state->scaler_users
153          * - check whether staged scaling requests can be supported
154          * - add planes using scalers that aren't in current transaction
155          * - assign scalers to requested users
156          * - as part of plane commit, scalers will be committed
157          *   (i.e., either attached or detached) to respective planes in hw
158          * - as part of crtc_commit, scaler will be either attached or detached
159          *   to crtc in hw
160          */
161
162         /* fail if required scalers > available scalers */
163         if (num_scalers_need > intel_crtc->num_scalers){
164                 DRM_DEBUG_KMS("Too many scaling requests %d > %d\n",
165                         num_scalers_need, intel_crtc->num_scalers);
166                 return -EINVAL;
167         }
168
169         /* walkthrough scaler_users bits and start assigning scalers */
170         for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) {
171                 int *scaler_id;
172                 const char *name;
173                 int idx;
174
175                 /* skip if scaler not required */
176                 if (!(scaler_state->scaler_users & (1 << i)))
177                         continue;
178
179                 if (i == SKL_CRTC_INDEX) {
180                         name = "CRTC";
181                         idx = intel_crtc->base.base.id;
182
183                         /* panel fitter case: assign as a crtc scaler */
184                         scaler_id = &scaler_state->scaler_id;
185                 } else {
186                         name = "PLANE";
187
188                         /* plane scaler case: assign as a plane scaler */
189                         /* find the plane that set the bit as scaler_user */
190                         plane = drm_state->planes[i];
191
192                         /*
193                          * to enable/disable hq mode, add planes that are using scaler
194                          * into this transaction
195                          */
196                         if (!plane) {
197                                 struct drm_plane_state *state;
198                                 plane = drm_plane_from_index(dev, i);
199                                 state = drm_atomic_get_plane_state(drm_state, plane);
200                                 if (IS_ERR(state)) {
201                                         DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n",
202                                                 plane->base.id);
203                                         return PTR_ERR(state);
204                                 }
205
206                                 /*
207                                  * the plane is added after plane checks are run,
208                                  * but since this plane is unchanged just do the
209                                  * minimum required validation.
210                                  */
211                                 crtc_state->base.planes_changed = true;
212                         }
213
214                         intel_plane = to_intel_plane(plane);
215                         idx = plane->base.id;
216
217                         /* plane on different crtc cannot be a scaler user of this crtc */
218                         if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) {
219                                 continue;
220                         }
221
222                         plane_state = to_intel_plane_state(drm_state->plane_states[i]);
223                         scaler_id = &plane_state->scaler_id;
224                 }
225
226                 if (*scaler_id < 0) {
227                         /* find a free scaler */
228                         for (j = 0; j < intel_crtc->num_scalers; j++) {
229                                 if (!scaler_state->scalers[j].in_use) {
230                                         scaler_state->scalers[j].in_use = 1;
231                                         *scaler_id = j;
232                                         DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
233                                                 intel_crtc->pipe, *scaler_id, name, idx);
234                                         break;
235                                 }
236                         }
237                 }
238
239                 if (WARN_ON(*scaler_id < 0)) {
240                         DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx);
241                         continue;
242                 }
243
244                 /* set scaler mode */
245                 if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) {
246                         /*
247                          * when only 1 scaler is in use on either pipe A or B,
248                          * scaler 0 operates in high quality (HQ) mode.
249                          * In this case use scaler 0 to take advantage of HQ mode
250                          */
251                         *scaler_id = 0;
252                         scaler_state->scalers[0].in_use = 1;
253                         scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ;
254                         scaler_state->scalers[1].in_use = 0;
255                 } else {
256                         scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN;
257                 }
258         }
259
260         return 0;
261 }
262
263 static void
264 intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
265                                   struct intel_shared_dpll_config *shared_dpll)
266 {
267         enum intel_dpll_id i;
268
269         /* Copy shared dpll state */
270         for (i = 0; i < dev_priv->num_shared_dpll; i++) {
271                 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
272
273                 shared_dpll[i] = pll->config;
274         }
275 }
276
277 struct intel_shared_dpll_config *
278 intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s)
279 {
280         struct intel_atomic_state *state = to_intel_atomic_state(s);
281
282         WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex));
283
284         if (!state->dpll_set) {
285                 state->dpll_set = true;
286
287                 intel_atomic_duplicate_dpll_state(to_i915(s->dev),
288                                                   state->shared_dpll);
289         }
290
291         return state->shared_dpll;
292 }
293
294 struct drm_atomic_state *
295 intel_atomic_state_alloc(struct drm_device *dev)
296 {
297         struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
298
299         if (!state || drm_atomic_state_init(dev, &state->base) < 0) {
300                 kfree(state);
301                 return NULL;
302         }
303
304         return &state->base;
305 }
306
307 void intel_atomic_state_clear(struct drm_atomic_state *s)
308 {
309         struct intel_atomic_state *state = to_intel_atomic_state(s);
310         drm_atomic_state_default_clear(&state->base);
311         state->dpll_set = false;
312 }