Reputation: 3438
Below is the code for a thread-safe queue in Anthony Williams' book C++ concurrency in action that I gathered in a .h
file.
#ifndef THREADSAFE_QUEUE_H
#define THREADSAFE_QUEUE_H
// Anthony Williams' fine-grained lock-based thread-safe queue.
#include <mutex> // for std::mutex
#include <condition_variable> // for std::condition_variable
#include <memory> // for std::shaerd_ptr and std::unique_ptr
#include <utility> // for std::move
template <typename T>
class threadsafe_queue
{
private:
struct node
{
std::shared_ptr<T> data;
std::unique_ptr<node> next;
};
std::mutex head_mutex;
std::unique_ptr<node> head;
std::mutex tail_mutex;
node* tail;
std::condition_variable data_cond;
public:
threadsafe_queue():
head(new node), tail(head.get())
{}
threadsafe_queue(const threadsafe_queue& other)=delete;
threadsafe_queue& operator=(const threadsafe_queue& other)=delete;
std::shared_ptr<T> try_pop();
bool try_pop(T& value);
std::shared_ptr<T> wait_and_pop();
void wait_and_pop(T& value);
void push(T new_value);
void empty();
private:
node* get_tail()
{
std::lock_guard<std::mutex> tail_lock(tail_mutex);
return tail;
}
std::unique_ptr<node> pop_head()
{
std::unique_ptr<node> old_head = std::move(head);
head = std::move(old_head->next);
return old_head;
}
std::unique_lock<std::mutex> wait_for_data()
{
std::unique_lock<std::mutex> head_lock(head_mutex);
data_cond.wait(head_lock, [&]{return head.get()!=get_tail();});
return std::move(head_lock);
}
std::unique_ptr<node> wait_pop_head()
{
std::unique_lock<std::mutex> head_lock(wait_for_data());
return pop_head();
}
std::unique_ptr<node> wait_pop_head(T& value)
{
std::unique_lock<std::mutex> head_lock(wait_for_data());
value=std::move(*head->data);
return pop_head();
}
std::unique_ptr<node> try_pop_head()
{
std::unique_lock<std::mutex> head_lock(head_mutex);
if(head.get()==get_tail())
{
return std::unique_ptr<node>();
}
return pop_head();
}
std::unique_ptr<node> try_pop_head(T& value)
{
std::unique_lock<std::mutex> head_lock(head_mutex);
if(head.get()==get_tail())
{
return std::unique_ptr<node>();
}
value=std::move(*head->data);
return pop_head();
}
};
/*
* PUBLIC INTERFACE
*/
// try pop.
template <typename T>
std::shared_ptr<T> threadsafe_queue<T>::try_pop()
{
std::unique_ptr<node> const old_head=try_pop_head();
return old_head?old_head->data:std::shared_ptr<T>();
}
template <typename T>
bool threadsafe_queue<T>::try_pop(T& value)
{
std::unique_ptr<node> const old_head=try_pop_head(value);
return old_head;
}
// wait and pop.
template <typename T>
std::shared_ptr<T> threadsafe_queue<T>::wait_and_pop()
{
std::unique_ptr<node> const old_head=wait_pop_head();
return old_head->data;
}
template <typename T>
void threadsafe_queue<T>::wait_and_pop(T& value)
{
std::unique_ptr<node> const old_head=wait_pop_head(value);
}
// push.
template <typename T>
void threadsafe_queue<T>::push(T new_value)
{
std::shared_ptr<T> new_data(
std::make_shared<T>(std::move(new_value)));
std::unique_ptr<node> p(new node);
{
std::lock_guard<std::mutex> tail_lock(tail_mutex);
tail->data=new_data;
node* const new_tail=p.get();
tail->next=std::move(p);
tail=new_tail;
}
data_cond.notify_one();
}
// empty.
template <typename T>
void threadsafe_queue<T>::empty()
{
std::lock_guard<std::mutex> head_lock(head_mutex);
return (head.get()==get_tail());
}
#endif
There is one thing in the code I cannot reason about and it has appeared in two points. In wait_pop_head(T& value)
and try_pop_head(T& value)
, there is value=std::move(*head->data);
. Basically to assign the dereferencing result of a shared_ptr
to a reference, it passes it through std::move
. I appreciate if you let me know why it should be done like this? Why shouldn't value=*head->data;
be used instead?
Another question that came up in the comments is why should std::shared_ptr
be used instead of std::unique_ptr
?
Upvotes: 0
Views: 2372
Reputation: 609
To the first one, if T has allocated dynamical memory, then the move
can reduce much resource. And in the case, the funcation is named *_pop_head
, that means the data
of the head node should be treated useless after the calling.
So using move
to transfer the ownership of T's dynamical memory to the target T
object is reasonable.
To the second one, the only reason I can think is that if we want to have a top
member function, we can hold the data
of head node in more than one place.
Upvotes: 0
Reputation: 15259
If type T
defines a move assignment operator, then supplying an rvalue reference obtained via the call to std::move()
will allow use of that more efficient operator, as opposed to forcing use a copying assignment operator.
If type T
doesn't define a move assignment operator, it's fine to supply a T&&
to its assignment operator, which will likely demand a parameter of type const T&
or maybe just T
. In either case, an argument of type T&&
can be converted to the parameter type.
As far the use of std::shared_ptr
rather than std::unique_ptr
, I don't see why it's necessary either. It looks like it would be possible to return a std::unique_ptr
from try_pop()
and wait_and_pop()
, moving the pointer out of the threadsafe_queue::node
instance that's about to be destroyed. The only argument I can come up with is to allow callers of either of the "pop" functions to wind up with a sharable reference, thinking that to be the more flexible option. My cursory reading of the implementation doesn't show any case where two threadsafe_queue::node
instances would ever point to the same "data" value, so I can't find any internal reason for that design choice.
Upvotes: 1