From: Dave Airlie Date: Thu, 29 Jan 2015 01:45:31 +0000 (+1000) Subject: Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux... X-Git-Tag: v4.0-rc1~74^2~22 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=b3869b17fd63bacb53ac4db4ff4ba093701e17be;p=karo-tx-linux.git Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux into drm-next This backmerges drm-fixes into drm-next mainly for the amdkfd stuff, I'm not 100% confident, but it builds and the amdkfd folks can fix anything up. Signed-off-by: Dave Airlie Conflicts: drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h --- b3869b17fd63bacb53ac4db4ff4ba093701e17be diff --cc drivers/gpu/drm/amd/amdkfd/kfd_device.c index 1ba8332419fa,25bc47f3c1cf..5bc32c26b989 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@@ -183,29 -170,16 +183,28 @@@ bool kgd2kfd_device_init(struct kfd_de kfd->shared_resources = *gpu_resources; /* calculate max size of mqds needed for queues */ - size = max_num_of_processes * - max_num_of_queues_per_process * - kfd->device_info->mqd_size_aligned; + size = max_num_of_queues_per_device * + kfd->device_info->mqd_size_aligned; - /* add another 512KB for all other allocations on gart */ + /* + * calculate max size of runlist packet. + * There can be only 2 packets at once + */ - size += (max_num_of_processes * sizeof(struct pm4_map_process) + - max_num_of_processes * max_num_of_queues_per_process * ++ size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_map_process) + ++ max_num_of_queues_per_device * + sizeof(struct pm4_map_queues) + sizeof(struct pm4_runlist)) * 2; + + /* Add size of HIQ & DIQ */ + size += KFD_KERNEL_QUEUE_SIZE * 2; + + /* add another 512KB for all other allocations on gart (HPD, fences) */ size += 512 * 1024; - if (kfd2kgd->init_sa_manager(kfd->kgd, size)) { + if (kfd2kgd->init_gtt_mem_allocation(kfd->kgd, size, &kfd->gtt_mem, + &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)) { dev_err(kfd_device, - "Error initializing sa manager for device (%x:%x)\n", - kfd->pdev->vendor, kfd->pdev->device); + "Could not allocate %d bytes for device (%x:%x)\n", + size, kfd->pdev->vendor, kfd->pdev->device); goto out; } diff --cc drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index b189f9791c90,0d8694f015c1..ecc78ece634c --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@@ -161,8 -213,15 +168,18 @@@ static int create_queue_nocpsch(struct list_add(&q->list, &qpd->queues_list); dqm->queue_count++; + + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) + dqm->sdma_queue_count++; ++ + /* + * Unconditionally increment this counter, regardless of the queue's + * type or whether the queue is active. + */ + dqm->total_queue_count++; + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); + mutex_unlock(&dqm->lock); return 0; } @@@ -488,8 -588,12 +518,7 @@@ static int init_scheduler(struct device pr_debug("kfd: In %s\n", __func__); - retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE); - + retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm)); - if (retval != 0) - return retval; - - retval = init_memory(dqm); - return retval; } @@@ -793,12 -843,14 +844,19 @@@ static int create_queue_cpsch(struct de mutex_lock(&dqm->lock); + if (dqm->total_queue_count >= max_num_of_queues_per_device) { + pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n", + dqm->total_queue_count); + retval = -EPERM; + goto out; + } + - mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP); + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) + select_sdma_engine_id(q); + + mqd = dqm->ops.get_mqd_manager(dqm, + get_mqd_type_from_queue_type(q->properties.type)); + if (mqd == NULL) { mutex_unlock(&dqm->lock); return -ENOMEM; @@@ -815,8 -867,14 +873,16 @@@ retval = execute_queues_cpsch(dqm, false); } + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) + dqm->sdma_queue_count++; + /* + * Unconditionally increment this counter, regardless of the queue's + * type or whether the queue is active. + */ + dqm->total_queue_count++; + + pr_debug("Total of %d queues are accountable so far\n", + dqm->total_queue_count); out: mutex_unlock(&dqm->lock); diff --cc drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index e7b17b28330e,52035bf0c1cb..d64f86cda34f --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@@ -143,10 -130,9 +143,11 @@@ struct device_queue_manager struct list_head queues; unsigned int processes_count; unsigned int queue_count; + unsigned int sdma_queue_count; + unsigned int total_queue_count; unsigned int next_pipe_to_allocate; unsigned int *allocated_queues; + unsigned int sdma_bitmap; unsigned int vmid_bitmap; uint64_t pipelines_addr; struct kfd_mem_obj *pipeline_mem; diff --cc drivers/gpu/drm/amd/amdkfd/kfd_module.c index ac5445415667,a8be6df85347..3c6221905bc4 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@@ -48,17 -48,12 +48,12 @@@ static const struct kgd2kfd_calls kgd2k int sched_policy = KFD_SCHED_POLICY_HWS; module_param(sched_policy, int, 0444); MODULE_PARM_DESC(sched_policy, - "Kernel cmdline parameter that defines the amdkfd scheduling policy"); + "Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)"); - int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT; - module_param(max_num_of_processes, int, 0444); - MODULE_PARM_DESC(max_num_of_processes, - "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes"); - - int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT; - module_param(max_num_of_queues_per_process, int, 0444); - MODULE_PARM_DESC(max_num_of_queues_per_process, - "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process"); + int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT; + module_param(max_num_of_queues_per_device, int, 0444); + MODULE_PARM_DESC(max_num_of_queues_per_device, + "Maximum number of supported queues per device (1 = Minimum, 4096 = default)"); bool kgd2kfd_init(unsigned interface_version, const struct kfd2kgd_calls *f2g, diff --cc drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 513eeb6e402a,f37cf5efe642..ca93ab0449c8 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@@ -204,8 -201,9 +204,9 @@@ int pqm_create_queue(struct process_que goto err_create_queue; pqn->q = q; pqn->kq = NULL; - retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd, + retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, &q->properties.vmid); + pr_debug("DQM returned %d for create_queue\n", retval); print_queue(q); break; case KFD_QUEUE_TYPE_DIQ: @@@ -245,7 -242,10 +246,10 @@@ err_create_queue: kfree(pqn); err_allocate_pqn: + /* check if queues list is empty unregister process from device */ clear_bit(*qid, pqm->queue_slot_bitmap); + if (list_empty(&pqm->queues)) - dev->dqm->unregister_process(dev->dqm, &pdd->qpd); ++ dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd); return retval; }