}
}
- thread_pool__init(nr_online_cpus);
+ r = thread_pool__init(kvm);
+ if (r < 0) {
+ pr_err("thread_pool__init() failed with error %d\n", r);
+ goto fail;
+ }
+
fail:
return r;
}
if (r < 0)
pr_warning("pci__exit() failed with error %d\n", r);
+ r = thread_pool__exit(kvm);
+ if (r < 0)
+ pr_warning("thread_pool__exit() failed with error %d\n", r);
+
r = kvm__exit(kvm);
if (r < 0)
pr_warning("pci__exit() failed with error %d\n", r);
static pthread_t *threads;
static long threadcount;
+static bool running;
static struct thread_pool__job *thread_pool__job_pop_locked(void)
{
{
pthread_cleanup_push(thread_pool__threadfunc_cleanup, NULL);
- for (;;) {
+ while (running) {
struct thread_pool__job *curjob;
mutex_lock(&job_mutex);
- while ((curjob = thread_pool__job_pop_locked()) == NULL)
+ while (running && (curjob = thread_pool__job_pop_locked()) == NULL)
pthread_cond_wait(&job_cond, &job_mutex);
mutex_unlock(&job_mutex);
- thread_pool__handle_job(curjob);
+ if (running)
+ thread_pool__handle_job(curjob);
}
pthread_cleanup_pop(0);
return res;
}
-int thread_pool__init(unsigned long thread_count)
+int thread_pool__init(struct kvm *kvm)
{
unsigned long i;
+ unsigned int thread_count = sysconf(_SC_NPROCESSORS_ONLN);
+
+ running = true;
for (i = 0; i < thread_count; i++)
if (thread_pool__addthread() < 0)
return i;
}
+int thread_pool__exit(struct kvm *kvm)
+{
+ int i;
+ void *NUL = NULL;
+
+ running = false;
+
+ for (i = 0; i < threadcount; i++) {
+ mutex_lock(&job_mutex);
+ pthread_cond_signal(&job_cond);
+ mutex_unlock(&job_mutex);
+ }
+
+ for (i = 0; i < threadcount; i++) {
+ pthread_join(threads[i], NUL);
+ }
+
+ return 0;
+}
+
void thread_pool__do_job(struct thread_pool__job *job)
{
struct thread_pool__job *jobinfo = job;