]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
kvm tools: use mutex abstraction instead of pthread mutex
authorSasha Levin <sasha.levin@oracle.com>
Wed, 24 Oct 2012 17:00:08 +0000 (13:00 -0400)
committerPekka Enberg <penberg@kernel.org>
Thu, 25 Oct 2012 06:49:39 +0000 (09:49 +0300)
We already have something to wrap pthread with mutex_[init,lock,unlock]
calls. This patch creates a new struct mutex abstraction and moves
everything to work with it.

Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
13 files changed:
tools/kvm/hw/serial.c
tools/kvm/include/kvm/mutex.h
tools/kvm/include/kvm/qcow.h
tools/kvm/include/kvm/threadpool.h
tools/kvm/include/kvm/uip.h
tools/kvm/net/uip/buf.c
tools/kvm/net/uip/core.c
tools/kvm/net/uip/tcp.c
tools/kvm/net/uip/udp.c
tools/kvm/util/threadpool.c
tools/kvm/virtio/blk.c
tools/kvm/virtio/console.c
tools/kvm/virtio/net.c

index a177a7f2906592d7b4d5d6820e583bb49d97e07f..53b684ab9dc3f053b927b223a9420f0dbb81f64c 100644 (file)
@@ -22,7 +22,7 @@
 #define UART_IIR_TYPE_BITS     0xc0
 
 struct serial8250_device {
-       pthread_mutex_t         mutex;
+       struct mutex            mutex;
        u8                      id;
 
        u16                     iobase;
@@ -55,7 +55,7 @@ struct serial8250_device {
 static struct serial8250_device devices[] = {
        /* ttyS0 */
        [0]     = {
-               .mutex                  = PTHREAD_MUTEX_INITIALIZER,
+               .mutex                  = MUTEX_INITIALIZER,
 
                .id                     = 0,
                .iobase                 = 0x3f8,
@@ -65,7 +65,7 @@ static struct serial8250_device devices[] = {
        },
        /* ttyS1 */
        [1]     = {
-               .mutex                  = PTHREAD_MUTEX_INITIALIZER,
+               .mutex                  = MUTEX_INITIALIZER,
 
                .id                     = 1,
                .iobase                 = 0x2f8,
@@ -75,7 +75,7 @@ static struct serial8250_device devices[] = {
        },
        /* ttyS2 */
        [2]     = {
-               .mutex                  = PTHREAD_MUTEX_INITIALIZER,
+               .mutex                  = MUTEX_INITIALIZER,
 
                .id                     = 2,
                .iobase                 = 0x3e8,
@@ -85,7 +85,7 @@ static struct serial8250_device devices[] = {
        },
        /* ttyS3 */
        [3]     = {
-               .mutex                  = PTHREAD_MUTEX_INITIALIZER,
+               .mutex                  = MUTEX_INITIALIZER,
 
                .id                     = 3,
                .iobase                 = 0x2e8,
index 3286cead3d6d2e82f7c5eba4286fa398f6d70d24..a90584b9db87b6a91260068b75fb96e83d8d133f 100644 (file)
  * to write user-space code! :-)
  */
 
-#define DEFINE_MUTEX(mutex) pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER
+struct mutex {
+       pthread_mutex_t mutex;
+};
+#define MUTEX_INITIALIZER (struct mutex) { .mutex = PTHREAD_MUTEX_INITIALIZER }
 
-static inline void mutex_init(pthread_mutex_t *mutex)
+#define DEFINE_MUTEX(mtx) struct mutex mtx = MUTEX_INITIALIZER
+
+static inline void mutex_init(struct mutex *lock)
 {
-       if (pthread_mutex_init(mutex, NULL) != 0)
+       if (pthread_mutex_init(&lock->mutex, NULL) != 0)
                die("unexpected pthread_mutex_init() failure!");
 }
 
-static inline void mutex_lock(pthread_mutex_t *mutex)
+static inline void mutex_lock(struct mutex *lock)
 {
-       if (pthread_mutex_lock(mutex) != 0)
+       if (pthread_mutex_lock(&lock->mutex) != 0)
                die("unexpected pthread_mutex_lock() failure!");
+
 }
 
-static inline void mutex_unlock(pthread_mutex_t *mutex)
+static inline void mutex_unlock(struct mutex *lock)
 {
-       if (pthread_mutex_unlock(mutex) != 0)
+       if (pthread_mutex_unlock(&lock->mutex) != 0)
                die("unexpected pthread_mutex_unlock() failure!");
 }
 
index e032a1e1e3c3faf2e534bad279a746225fc99685..f8492462ddaa6ed8f95a85cd289d85029577f9a5 100644 (file)
@@ -74,7 +74,7 @@ struct qcow_header {
 };
 
 struct qcow {
-       pthread_mutex_t                 mutex;
+       struct mutex                    mutex;
        struct qcow_header              *header;
        struct qcow_l1_table            table;
        struct qcow_refcount_table      refcount_table;
index abe46eaaa65fb7416b40aa9ba5c3d31f1d098ef1..bacb2434e6f1e7cf11d7478c8b0167f09681d6ba 100644 (file)
@@ -15,7 +15,7 @@ struct thread_pool__job {
        void                            *data;
 
        int                             signalcount;
-       pthread_mutex_t                 mutex;
+       struct mutex                    mutex;
 
        struct list_head                queue;
 };
@@ -26,7 +26,7 @@ static inline void thread_pool__init_job(struct thread_pool__job *job, struct kv
                .kvm            = kvm,
                .callback       = callback,
                .data           = data,
-               .mutex          = PTHREAD_MUTEX_INITIALIZER,
+               .mutex          = MUTEX_INITIALIZER,
        };
 }
 
index 9af011021f3495bedde0fb7033a43f688028f195..ac248d2b775718965fd5bc977aad55b774effba5 100644 (file)
@@ -187,14 +187,14 @@ struct uip_dhcp {
 struct uip_info {
        struct list_head udp_socket_head;
        struct list_head tcp_socket_head;
-       pthread_mutex_t udp_socket_lock;
-       pthread_mutex_t tcp_socket_lock;
+       struct mutex udp_socket_lock;
+       struct mutex tcp_socket_lock;
        struct uip_eth_addr guest_mac;
        struct uip_eth_addr host_mac;
        pthread_cond_t buf_free_cond;
        pthread_cond_t buf_used_cond;
        struct list_head buf_head;
-       pthread_mutex_t buf_lock;
+       struct mutex buf_lock;
        pthread_t udp_thread;
        int udp_epollfd;
        int buf_free_nr;
@@ -221,7 +221,7 @@ struct uip_buf {
 struct uip_udp_socket {
        struct sockaddr_in addr;
        struct list_head list;
-       pthread_mutex_t *lock;
+       struct mutex *lock;
        u32 dport, sport;
        u32 dip, sip;
        int fd;
@@ -232,7 +232,7 @@ struct uip_tcp_socket {
        struct list_head list;
        struct uip_info *info;
        pthread_cond_t  cond;
-       pthread_mutex_t *lock;
+       struct mutex *lock;
        pthread_t thread;
        u32 dport, sport;
        u32 guest_acked;
index 5e564a99eb6a80053b8b99bae7fbb222ada5e37e..f29ad41cb8fc2fb260611de3124c00bb478e93d3 100644 (file)
@@ -11,7 +11,7 @@ struct uip_buf *uip_buf_get_used(struct uip_info *info)
        mutex_lock(&info->buf_lock);
 
        while (!(info->buf_used_nr > 0))
-               pthread_cond_wait(&info->buf_used_cond, &info->buf_lock);
+               pthread_cond_wait(&info->buf_used_cond, &info->buf_lock.mutex);
 
        list_for_each_entry(buf, &info->buf_head, list) {
                if (buf->status == UIP_BUF_STATUS_USED) {
@@ -39,7 +39,7 @@ struct uip_buf *uip_buf_get_free(struct uip_info *info)
        mutex_lock(&info->buf_lock);
 
        while (!(info->buf_free_nr > 0))
-               pthread_cond_wait(&info->buf_free_cond, &info->buf_lock);
+               pthread_cond_wait(&info->buf_free_cond, &info->buf_lock.mutex);
 
        list_for_each_entry(buf, &info->buf_head, list) {
                if (buf->status == UIP_BUF_STATUS_FREE) {
index 2e7603c3f9d4ec3312337a64bb273e93c4a9051c..4e5bb82e48b13c0ad125f9268c6694ec3179aff8 100644 (file)
@@ -153,9 +153,9 @@ int uip_init(struct uip_info *info)
        INIT_LIST_HEAD(tcp_socket_head);
        INIT_LIST_HEAD(buf_head);
 
-       pthread_mutex_init(&info->udp_socket_lock, NULL);
-       pthread_mutex_init(&info->tcp_socket_lock, NULL);
-       pthread_mutex_init(&info->buf_lock, NULL);
+       mutex_init(&info->udp_socket_lock);
+       mutex_init(&info->tcp_socket_lock);
+       mutex_init(&info->buf_lock);
 
        pthread_cond_init(&info->buf_used_cond, NULL);
        pthread_cond_init(&info->buf_free_cond, NULL);
index 830aa3fd91a042e9a6b6747bd339304b06237cef..9044f40ba2d08a4f11091ed5a1344658898d61af 100644 (file)
@@ -27,7 +27,7 @@ static int uip_tcp_socket_close(struct uip_tcp_socket *sk, int how)
 static struct uip_tcp_socket *uip_tcp_socket_find(struct uip_tx_arg *arg, u32 sip, u32 dip, u16 sport, u16 dport)
 {
        struct list_head *sk_head;
-       pthread_mutex_t *sk_lock;
+       struct mutex *sk_lock;
        struct uip_tcp_socket *sk;
 
        sk_head = &arg->info->tcp_socket_head;
@@ -49,7 +49,7 @@ static struct uip_tcp_socket *uip_tcp_socket_alloc(struct uip_tx_arg *arg, u32 s
 {
        struct list_head *sk_head;
        struct uip_tcp_socket *sk;
-       pthread_mutex_t *sk_lock;
+       struct mutex *sk_lock;
        struct uip_tcp *tcp;
        struct uip_ip *ip;
        int ret;
@@ -198,7 +198,7 @@ static void *uip_tcp_socket_thread(void *p)
                while (left > 0) {
                        mutex_lock(sk->lock);
                        while ((len = sk->guest_acked + sk->window_size - sk->seq_server) <= 0)
-                               pthread_cond_wait(&sk->cond, sk->lock);
+                               pthread_cond_wait(&sk->cond, &sk->lock->mutex);
                        mutex_unlock(sk->lock);
 
                        sk->payload = pos;
index 5b6ec1c67c5c158bfe3e66d2e543d80cf58f4ed8..31c417cd5ca972524a9ba6bb62485dc8378d4f65 100644 (file)
@@ -14,7 +14,7 @@ static struct uip_udp_socket *uip_udp_socket_find(struct uip_tx_arg *arg, u32 si
 {
        struct list_head *sk_head;
        struct uip_udp_socket *sk;
-       pthread_mutex_t *sk_lock;
+       struct mutex *sk_lock;
        struct epoll_event ev;
        int flags;
        int ret;
index a3638311e2e9dc3d9098fc5ab9f04a5569762818..e64aa26dada4b0432e08341eb3226ce2622258d6 100644 (file)
@@ -7,9 +7,9 @@
 #include <pthread.h>
 #include <stdbool.h>
 
-static pthread_mutex_t job_mutex       = PTHREAD_MUTEX_INITIALIZER;
-static pthread_mutex_t thread_mutex    = PTHREAD_MUTEX_INITIALIZER;
-static pthread_cond_t  job_cond        = PTHREAD_COND_INITIALIZER;
+static DEFINE_MUTEX(job_mutex);
+static DEFINE_MUTEX(thread_mutex);
+static pthread_cond_t job_cond = PTHREAD_COND_INITIALIZER;
 
 static LIST_HEAD(head);
 
@@ -85,7 +85,7 @@ static void *thread_pool__threadfunc(void *param)
 
                mutex_lock(&job_mutex);
                while (running && (curjob = thread_pool__job_pop_locked()) == NULL)
-                       pthread_cond_wait(&job_cond, &job_mutex);
+                       pthread_cond_wait(&job_cond, &job_mutex.mutex);
                mutex_unlock(&job_mutex);
 
                if (running)
index f76342c9ab9100ae050e24c093f2cf68308ab0e0..356a2406ba6d770d9b7d78d4dbc9a679a1c715c4 100644 (file)
@@ -37,7 +37,7 @@ struct blk_dev_req {
 };
 
 struct blk_dev {
-       pthread_mutex_t                 mutex;
+       struct mutex                    mutex;
 
        struct list_head                list;
 
@@ -248,7 +248,7 @@ static int virtio_blk__init_one(struct kvm *kvm, struct disk_image *disk)
                return -ENOMEM;
 
        *bdev = (struct blk_dev) {
-               .mutex                  = PTHREAD_MUTEX_INITIALIZER,
+               .mutex                  = MUTEX_INITIALIZER,
                .disk                   = disk,
                .blk_config             = (struct virtio_blk_config) {
                        .capacity       = disk->size / SECTOR_SIZE,
index 88b110614de2b1bceffb6dd8668e33b0b2132c0f..1df6cb008fd74dca3c97d72dde639ff8eb11369a 100644 (file)
@@ -29,7 +29,7 @@
 #define VIRTIO_CONSOLE_TX_QUEUE                1
 
 struct con_dev {
-       pthread_mutex_t                 mutex;
+       struct mutex                    mutex;
 
        struct virtio_device            vdev;
        struct virt_queue               vqs[VIRTIO_CONSOLE_NUM_QUEUES];
@@ -40,7 +40,7 @@ struct con_dev {
 };
 
 static struct con_dev cdev = {
-       .mutex                          = PTHREAD_MUTEX_INITIALIZER,
+       .mutex                          = MUTEX_INITIALIZER,
 
        .config = {
                .cols                   = 80,
index ac429cc03a5224f5f6430998231d25088b0b62db..db77ab832784683c35860ef12ab7b59287da79f9 100644 (file)
@@ -39,7 +39,7 @@ struct net_dev_operations {
 };
 
 struct net_dev {
-       pthread_mutex_t                 mutex;
+       struct mutex                    mutex;
        struct virtio_device            vdev;
        struct list_head                list;
 
@@ -48,11 +48,11 @@ struct net_dev {
        u32                             features;
 
        pthread_t                       io_rx_thread;
-       pthread_mutex_t                 io_rx_lock;
+       struct mutex                    io_rx_lock;
        pthread_cond_t                  io_rx_cond;
 
        pthread_t                       io_tx_thread;
-       pthread_mutex_t                 io_tx_lock;
+       struct mutex                    io_tx_lock;
        pthread_cond_t                  io_tx_cond;
 
        int                             vhost_fd;
@@ -87,7 +87,7 @@ static void *virtio_net_rx_thread(void *p)
        while (1) {
                mutex_lock(&ndev->io_rx_lock);
                if (!virt_queue__available(vq))
-                       pthread_cond_wait(&ndev->io_rx_cond, &ndev->io_rx_lock);
+                       pthread_cond_wait(&ndev->io_rx_cond, &ndev->io_rx_lock.mutex);
                mutex_unlock(&ndev->io_rx_lock);
 
                while (virt_queue__available(vq)) {
@@ -125,7 +125,7 @@ static void *virtio_net_tx_thread(void *p)
        while (1) {
                mutex_lock(&ndev->io_tx_lock);
                if (!virt_queue__available(vq))
-                       pthread_cond_wait(&ndev->io_tx_cond, &ndev->io_tx_lock);
+                       pthread_cond_wait(&ndev->io_tx_cond, &ndev->io_tx_lock.mutex);
                mutex_unlock(&ndev->io_tx_lock);
 
                while (virt_queue__available(vq)) {
@@ -252,8 +252,8 @@ fail:
 
 static void virtio_net__io_thread_init(struct kvm *kvm, struct net_dev *ndev)
 {
-       pthread_mutex_init(&ndev->io_tx_lock, NULL);
-       pthread_mutex_init(&ndev->io_rx_lock, NULL);
+       mutex_init(&ndev->io_tx_lock);
+       mutex_init(&ndev->io_rx_lock);
 
        pthread_cond_init(&ndev->io_tx_cond, NULL);
        pthread_cond_init(&ndev->io_rx_cond, NULL);