mirror of
https://github.com/isocpp/CppCoreGuidelines.git
synced 2024-03-22 13:30:58 +08:00
commit
3b93c16fbd
|
@ -6868,6 +6868,7 @@ Union rule summary:
|
||||||
|
|
||||||
##### Example
|
##### Example
|
||||||
|
|
||||||
|
|
||||||
???
|
???
|
||||||
|
|
||||||
##### Enforcement
|
##### Enforcement
|
||||||
|
@ -6878,6 +6879,7 @@ Union rule summary:
|
||||||
|
|
||||||
##### Reason
|
##### Reason
|
||||||
|
|
||||||
|
|
||||||
Naked unions are a source of type errors.
|
Naked unions are a source of type errors.
|
||||||
|
|
||||||
**Alternative**: Wrap them in a class together with a type field.
|
**Alternative**: Wrap them in a class together with a type field.
|
||||||
|
@ -6888,6 +6890,8 @@ Naked unions are a source of type errors.
|
||||||
|
|
||||||
???
|
???
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
##### Enforcement
|
##### Enforcement
|
||||||
|
|
||||||
???
|
???
|
||||||
|
@ -10309,7 +10313,7 @@ However, thanks to the magic of cut-and-paste, code fragments can turn up in une
|
||||||
static double cached_x = 0.0;
|
static double cached_x = 0.0;
|
||||||
static double cached_result = COMPUTATION_OF_ZERO;
|
static double cached_result = COMPUTATION_OF_ZERO;
|
||||||
double result;
|
double result;
|
||||||
|
|
||||||
if (cached_x == x)
|
if (cached_x == x)
|
||||||
return cached_result;
|
return cached_result;
|
||||||
result = computation(x);
|
result = computation(x);
|
||||||
|
@ -10365,9 +10369,9 @@ including:
|
||||||
`id` plus one.
|
`id` plus one.
|
||||||
* Thread A and B load `id` and increment it simultaneously. They both get the
|
* Thread A and B load `id` and increment it simultaneously. They both get the
|
||||||
same ID.
|
same ID.
|
||||||
|
|
||||||
Local static variables are a common source of data races.
|
Local static variables are a common source of data races.
|
||||||
|
|
||||||
##### Example, bad:
|
##### Example, bad:
|
||||||
|
|
||||||
void f(fstream& fs, regex pat)
|
void f(fstream& fs, regex pat)
|
||||||
|
@ -10381,7 +10385,7 @@ Local static variables are a common source of data races.
|
||||||
auto h2 = async([&]{ return find_all(buf,sz,pat); }); // span a task to find matches
|
auto h2 = async([&]{ return find_all(buf,sz,pat); }); // span a task to find matches
|
||||||
// ...
|
// ...
|
||||||
}
|
}
|
||||||
|
|
||||||
Here, we have a (nasty) data race on the elements of `buf` (`sort` will both read and write).
|
Here, we have a (nasty) data race on the elements of `buf` (`sort` will both read and write).
|
||||||
All data races are nasty.
|
All data races are nasty.
|
||||||
Here, we managed to get a data race on data on the stack.
|
Here, we managed to get a data race on data on the stack.
|
||||||
|
@ -10390,7 +10394,7 @@ Not all data races are as easy to spot as this one.
|
||||||
##### Example, bad:
|
##### Example, bad:
|
||||||
|
|
||||||
// code not controlled by a lock
|
// code not controlled by a lock
|
||||||
|
|
||||||
unsigned val;
|
unsigned val;
|
||||||
|
|
||||||
if (val < 5) {
|
if (val < 5) {
|
||||||
|
@ -10405,7 +10409,7 @@ Not all data races are as easy to spot as this one.
|
||||||
}
|
}
|
||||||
|
|
||||||
Now, a compiler that does not know that `val` can change will most likely implement that `switch` using a jump table with five entries.
|
Now, a compiler that does not know that `val` can change will most likely implement that `switch` using a jump table with five entries.
|
||||||
Then, a `val` outside the [0..4] range will cause a jump to an address that could be anywhere in the program, and execution would proceed there.
|
Then, a `val` outside the `[0..4]` range will cause a jump to an address that could be anywhere in the program, and execution would proceed there.
|
||||||
Really, "all bets are off" if you get a data race.
|
Really, "all bets are off" if you get a data race.
|
||||||
Actually, it can be worse still: by looking at the generated code you may be able to determine where the stray jump will go for a given value;
|
Actually, it can be worse still: by looking at the generated code you may be able to determine where the stray jump will go for a given value;
|
||||||
this can be a security risk.
|
this can be a security risk.
|
||||||
|
@ -10437,13 +10441,13 @@ The less sharing you do, the less chance you have to wait on a lock (so performa
|
||||||
Graph<Temp_node> validate(const vector<Reading>&);
|
Graph<Temp_node> validate(const vector<Reading>&);
|
||||||
Image validate(const vector<Reading>&);
|
Image validate(const vector<Reading>&);
|
||||||
// ...
|
// ...
|
||||||
|
|
||||||
void process_readings(istream& socket1)
|
void process_readings(istream& socket1)
|
||||||
{
|
{
|
||||||
vector<Reading> surface_readings;
|
vector<Reading> surface_readings;
|
||||||
socket1 >> surface_readings;
|
socket1 >> surface_readings;
|
||||||
if (!socket1) throw Bad_input{};
|
if (!socket1) throw Bad_input{};
|
||||||
|
|
||||||
auto h1 = async([&] { if (!validate(surface_readings) throw Invalide_data{}; });
|
auto h1 = async([&] { if (!validate(surface_readings) throw Invalide_data{}; });
|
||||||
auto h2 = async([&] { return temparature_gradiants(surface_readings); });
|
auto h2 = async([&] { return temparature_gradiants(surface_readings); });
|
||||||
auto h3 = async([&] { return altitude_map(surface_readings); });
|
auto h3 = async([&] { return altitude_map(surface_readings); });
|
||||||
|
@ -10453,7 +10457,7 @@ The less sharing you do, the less chance you have to wait on a lock (so performa
|
||||||
auto v3 = h3.get();
|
auto v3 = h3.get();
|
||||||
// ...
|
// ...
|
||||||
}
|
}
|
||||||
|
|
||||||
Without those `const`s, we would have to review every asynchroneously invoked function for potential data races on `surface_readings`.
|
Without those `const`s, we would have to review every asynchroneously invoked function for potential data races on `surface_readings`.
|
||||||
|
|
||||||
##### Note
|
##### Note
|
||||||
|
@ -10477,7 +10481,7 @@ Application concepts are easier to reason about.
|
||||||
##### Example
|
##### Example
|
||||||
|
|
||||||
???
|
???
|
||||||
|
|
||||||
###### Note
|
###### Note
|
||||||
|
|
||||||
With the exception of `async()`, the standard-library facilities are low-level, machine-oriented, threads-and-lock level.
|
With the exception of `async()`, the standard-library facilities are low-level, machine-oriented, threads-and-lock level.
|
||||||
|
@ -10486,7 +10490,7 @@ This is a potent argument for using higher level, more applications-oriented lib
|
||||||
|
|
||||||
##### Enforcement
|
##### Enforcement
|
||||||
|
|
||||||
???
|
???
|
||||||
|
|
||||||
### <a name="Rconc-volatile"></a>CP.8 Don't try to use `volatile` for synchronization
|
### <a name="Rconc-volatile"></a>CP.8 Don't try to use `volatile` for synchronization
|
||||||
|
|
||||||
|
@ -10504,7 +10508,7 @@ It simply has nothing to do with concurrency.
|
||||||
{
|
{
|
||||||
if (int n = free_slots--) return &pool[n];
|
if (int n = free_slots--) return &pool[n];
|
||||||
}
|
}
|
||||||
|
|
||||||
Here we have a problem:
|
Here we have a problem:
|
||||||
This is perfectly good code in a single-threaded program, but have two treads exectute this and
|
This is perfectly good code in a single-threaded program, but have two treads exectute this and
|
||||||
there is a race condition on `free_slots` so that two threads might get the same value and `free_slots`.
|
there is a race condition on `free_slots` so that two threads might get the same value and `free_slots`.
|
||||||
|
@ -10584,18 +10588,18 @@ Avoids nasty errors from unreleased locks.
|
||||||
##### Example, bad
|
##### Example, bad
|
||||||
|
|
||||||
mutex mtx;
|
mutex mtx;
|
||||||
|
|
||||||
void do_stuff()
|
void do_stuff()
|
||||||
{
|
{
|
||||||
mtx.lock();
|
mtx.lock();
|
||||||
// ... do stuff ...
|
// ... do stuff ...
|
||||||
mtx.unlock();
|
mtx.unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
Sooner or later, someone will forget the `mtx.unlock()`, place a `return` in the `... do stuff ...`, throw an exception, or something.
|
Sooner or later, someone will forget the `mtx.unlock()`, place a `return` in the `... do stuff ...`, throw an exception, or something.
|
||||||
|
|
||||||
mutex mtx;
|
mutex mtx;
|
||||||
|
|
||||||
void do_stuff()
|
void do_stuff()
|
||||||
{
|
{
|
||||||
unique_lock<mutex> lck {mtx};
|
unique_lock<mutex> lck {mtx};
|
||||||
|
@ -10620,23 +10624,23 @@ This is asking for deadlock:
|
||||||
// thread 1
|
// thread 1
|
||||||
lock_guard<mutex> lck1(m1);
|
lock_guard<mutex> lck1(m1);
|
||||||
lock_guard<mutex> lck2(m2);
|
lock_guard<mutex> lck2(m2);
|
||||||
|
|
||||||
// thread 2
|
// thread 2
|
||||||
lock_guard<mutex> lck2(m2);
|
lock_guard<mutex> lck2(m2);
|
||||||
lock_guard<mutex> lck1(m1);
|
lock_guard<mutex> lck1(m1);
|
||||||
|
|
||||||
Instead, use `lock()`:
|
Instead, use `lock()`:
|
||||||
|
|
||||||
// thread 1
|
// thread 1
|
||||||
lock_guard<mutex> lck1(m1,defer_lock);
|
lock_guard<mutex> lck1(m1,defer_lock);
|
||||||
lock_guard<mutex> lck2(m2,defer_lock);
|
lock_guard<mutex> lck2(m2,defer_lock);
|
||||||
lock(lck1,lck2);
|
lock(lck1,lck2);
|
||||||
|
|
||||||
// thread 2
|
// thread 2
|
||||||
lock_guard<mutex> lck2(m2,defer_lock);
|
lock_guard<mutex> lck2(m2,defer_lock);
|
||||||
lock_guard<mutex> lck1(m1,defer_lock);
|
lock_guard<mutex> lck1(m1,defer_lock);
|
||||||
lock(lck2,lck1);
|
lock(lck2,lck1);
|
||||||
|
|
||||||
Here, the writers of `thread1` and `thread2` are still not agreeing on the order of the `mutex`es, but order no longer matters.
|
Here, the writers of `thread1` and `thread2` are still not agreeing on the order of the `mutex`es, but order no longer matters.
|
||||||
|
|
||||||
##### Note
|
##### Note
|
||||||
|
@ -10647,7 +10651,7 @@ In real code, `mutex`es are not always conveniently aquired on consequtive lines
|
||||||
I'm really looking forward to be able to write plain
|
I'm really looking forward to be able to write plain
|
||||||
|
|
||||||
lock_guard lck1(m1,defer_lock);
|
lock_guard lck1(m1,defer_lock);
|
||||||
|
|
||||||
and have the `mutex` type deduced.
|
and have the `mutex` type deduced.
|
||||||
|
|
||||||
##### Enforcement
|
##### Enforcement
|
||||||
|
@ -10682,7 +10686,7 @@ A common example of the "calling unknown code" problem is a call to a function t
|
||||||
Such problem cal often be solved by using a `recursive_mutex`. For example:
|
Such problem cal often be solved by using a `recursive_mutex`. For example:
|
||||||
|
|
||||||
recursive_mutex my_mutex;
|
recursive_mutex my_mutex;
|
||||||
|
|
||||||
template<typename Action>
|
template<typename Action>
|
||||||
void do_something(Action f)
|
void do_something(Action f)
|
||||||
{
|
{
|
||||||
|
@ -10691,9 +10695,9 @@ Such problem cal often be solved by using a `recursive_mutex`. For example:
|
||||||
f(this); // f will do something to *this
|
f(this); // f will do something to *this
|
||||||
// ...
|
// ...
|
||||||
}
|
}
|
||||||
|
|
||||||
If, as it is likely, `f()` invokes operations on `*this`, we must make sure that the object's invariant holds before the call.
|
If, as it is likely, `f()` invokes operations on `*this`, we must make sure that the object's invariant holds before the call.
|
||||||
|
|
||||||
##### Enforcement
|
##### Enforcement
|
||||||
|
|
||||||
* Flag calling a virtual function with a non-recursive `mutex` held
|
* Flag calling a virtual function with a non-recursive `mutex` held
|
||||||
|
@ -10716,7 +10720,7 @@ If a `thread` joins, we can safely pass pointers to objects in the scope of the
|
||||||
// ...
|
// ...
|
||||||
}
|
}
|
||||||
int glob = 33;
|
int glob = 33;
|
||||||
|
|
||||||
void some_fct(int* p)
|
void some_fct(int* p)
|
||||||
{
|
{
|
||||||
int x = 77;
|
int x = 77;
|
||||||
|
@ -10754,16 +10758,16 @@ If a `thread` is detached, we can safely pass pointers to static and free store
|
||||||
*p = 99;
|
*p = 99;
|
||||||
// ...
|
// ...
|
||||||
}
|
}
|
||||||
|
|
||||||
int glob = 33;
|
int glob = 33;
|
||||||
|
|
||||||
void some_fct(int* p)
|
void some_fct(int* p)
|
||||||
{
|
{
|
||||||
int x = 77;
|
int x = 77;
|
||||||
std::thread t0(f,&x); // bad
|
std::thread t0(f,&x); // bad
|
||||||
std::thread t1(f,p); // bad
|
std::thread t1(f,p); // bad
|
||||||
std::thread t2(f,&glob); // OK
|
std::thread t2(f,&glob); // OK
|
||||||
auto q = make_unique<int>(99);
|
auto q = make_unique<int>(99);
|
||||||
std::thread t3(f,q.get()); // bad
|
std::thread t3(f,q.get()); // bad
|
||||||
// ...
|
// ...
|
||||||
t0.detach();
|
t0.detach();
|
||||||
|
@ -10789,7 +10793,7 @@ After that, the usual lifetime and ownership (for global objects) enforcement ap
|
||||||
|
|
||||||
##### Reason
|
##### Reason
|
||||||
|
|
||||||
An `raii_thread` is a thread that joins at the end of its scope.
|
An `raii_thread` is a thread that joins at the end of its scope.
|
||||||
|
|
||||||
Detatched threads are hard to monitor.
|
Detatched threads are hard to monitor.
|
||||||
|
|
||||||
|
@ -10813,7 +10817,7 @@ Documenting that aids comprehension and helps static analysis.
|
||||||
##### Example
|
##### Example
|
||||||
|
|
||||||
void heartbeat();
|
void heartbeat();
|
||||||
|
|
||||||
void use()
|
void use()
|
||||||
{
|
{
|
||||||
gsl::detached_thread t1(heartbeat); // obviously need not be joined
|
gsl::detached_thread t1(heartbeat); // obviously need not be joined
|
||||||
|
@ -10829,7 +10833,7 @@ Flag unconditional `detach` on a plain `thread`
|
||||||
##### Reason
|
##### Reason
|
||||||
|
|
||||||
`thread`s that are supposed to unconditionally `join` or unconditionally `detach` can be clearly identified as such.
|
`thread`s that are supposed to unconditionally `join` or unconditionally `detach` can be clearly identified as such.
|
||||||
The plain `thread`s should be assumed to use the full generality of `std::thread`.
|
The plain `thread`s should be assumed to use the full generality of `std::thread`.
|
||||||
|
|
||||||
##### Example
|
##### Example
|
||||||
|
|
||||||
|
@ -10840,7 +10844,7 @@ The plain `thread`s should be assumed to use the full generality of `std::thread
|
||||||
t->detach();
|
t->detach();
|
||||||
// ...
|
// ...
|
||||||
}
|
}
|
||||||
|
|
||||||
void use(int n)
|
void use(int n)
|
||||||
{
|
{
|
||||||
thread t { thricky, this, n };
|
thread t { thricky, this, n };
|
||||||
|
@ -10857,7 +10861,7 @@ The plain `thread`s should be assumed to use the full generality of `std::thread
|
||||||
### <a name="Rconc-join"></a>CP.28: Remember to join scoped `thread`s that are not `detach()`ed
|
### <a name="Rconc-join"></a>CP.28: Remember to join scoped `thread`s that are not `detach()`ed
|
||||||
|
|
||||||
##### Reason
|
##### Reason
|
||||||
|
|
||||||
A `thread` that has not been `detach()`ed when it is destroyed terminates the program.
|
A `thread` that has not been `detach()`ed when it is destroyed terminates the program.
|
||||||
|
|
||||||
##### Example, bad
|
##### Example, bad
|
||||||
|
@ -10865,13 +10869,13 @@ A `thread` that has not been `detach()`ed when it is destroyed terminates the pr
|
||||||
void f() { std::cout << "Hello "; }
|
void f() { std::cout << "Hello "; }
|
||||||
|
|
||||||
struct F {
|
struct F {
|
||||||
void operator()() { std::cout << "parallel world "; }
|
void operator()() { std::cout << "parallel world "; }
|
||||||
};
|
};
|
||||||
|
|
||||||
int main()
|
int main()
|
||||||
{
|
{
|
||||||
std::thread t1{f}; // f() executes in separate thread
|
std::thread t1{f}; // f() executes in separate thread
|
||||||
std::thread t2{F()}; // F()() executes in separate thread
|
std::thread t2{F()}; // F()() executes in separate thread
|
||||||
} // spot the bugs
|
} // spot the bugs
|
||||||
|
|
||||||
##### Example
|
##### Example
|
||||||
|
@ -10879,18 +10883,18 @@ A `thread` that has not been `detach()`ed when it is destroyed terminates the pr
|
||||||
void f() { std::cout << "Hello "; }
|
void f() { std::cout << "Hello "; }
|
||||||
|
|
||||||
struct F {
|
struct F {
|
||||||
void operator()() { std::cout << "parallel world "; }
|
void operator()() { std::cout << "parallel world "; }
|
||||||
};
|
};
|
||||||
|
|
||||||
int main()
|
int main()
|
||||||
{
|
{
|
||||||
std::thread t1{f}; // f() executes in separate thread
|
std::thread t1{f}; // f() executes in separate thread
|
||||||
std::thread t2{F()}; // F()() executes in separate thread
|
std::thread t2{F()}; // F()() executes in separate thread
|
||||||
|
|
||||||
t1.join();
|
t1.join();
|
||||||
t2.join();
|
t2.join();
|
||||||
} // one bad bug left
|
} // one bad bug left
|
||||||
|
|
||||||
??? Is `cout` synchronized?
|
??? Is `cout` synchronized?
|
||||||
|
|
||||||
##### Enforcement
|
##### Enforcement
|
||||||
|
@ -10914,7 +10918,7 @@ In general, you cannot know whether a non-`raii_thread` will outlife your thread
|
||||||
// ...
|
// ...
|
||||||
t0.detach();
|
t0.detach();
|
||||||
}
|
}
|
||||||
|
|
||||||
The detach` may not be so easy to spot.
|
The detach` may not be so easy to spot.
|
||||||
Use a `raii_thread` or don't pass the pointer.
|
Use a `raii_thread` or don't pass the pointer.
|
||||||
|
|
||||||
|
@ -10942,13 +10946,13 @@ Defining "small amount" precisely and is impossible.
|
||||||
|
|
||||||
string modify1(string);
|
string modify1(string);
|
||||||
void modify2(shared_ptr<string);
|
void modify2(shared_ptr<string);
|
||||||
|
|
||||||
void fct(string& s)
|
void fct(string& s)
|
||||||
{
|
{
|
||||||
auto res = async(modify1,string);
|
auto res = async(modify1,string);
|
||||||
async(modify2,&s);
|
async(modify2,&s);
|
||||||
}
|
}
|
||||||
|
|
||||||
The call of `modify1` involves copying two `string` values; the call of `modify2` does not.
|
The call of `modify1` involves copying two `string` values; the call of `modify2` does not.
|
||||||
On the other hand, the implementation of `modify1` is exactly as we would have written in for single-threaded code,
|
On the other hand, the implementation of `modify1` is exactly as we would have written in for single-threaded code,
|
||||||
wheread the implementation of `modify2` will need some form of locking to avoid data races.
|
wheread the implementation of `modify2` will need some form of locking to avoid data races.
|
||||||
|
@ -10974,7 +10978,7 @@ safe way to ensure proper deletion.
|
||||||
|
|
||||||
##### Example
|
##### Example
|
||||||
|
|
||||||
???
|
???
|
||||||
|
|
||||||
##### Note
|
##### Note
|
||||||
|
|
||||||
|
@ -11026,20 +11030,20 @@ This spawns a `thread` per message, and the `run_list` is presumably managed to
|
||||||
Instead, we could have a set of pre-created worker threads processing the messages
|
Instead, we could have a set of pre-created worker threads processing the messages
|
||||||
|
|
||||||
Sync_queue<Message> work;
|
Sync_queue<Message> work;
|
||||||
|
|
||||||
void master(istream& is)
|
void master(istream& is)
|
||||||
{
|
{
|
||||||
for (Message m; is>>m; )
|
for (Message m; is>>m; )
|
||||||
work.put(n);
|
work.put(n);
|
||||||
}
|
}
|
||||||
|
|
||||||
void worker()
|
void worker()
|
||||||
{
|
{
|
||||||
for (Message m; m=work.get(); ) {
|
for (Message m; m=work.get(); ) {
|
||||||
// process
|
// process
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void workers() // set up worker threads (specifically 4 worker threads)
|
void workers() // set up worker threads (specifically 4 worker threads)
|
||||||
{
|
{
|
||||||
raii_thread w1 {worker};
|
raii_thread w1 {worker};
|
||||||
|
@ -11047,7 +11051,7 @@ Instead, we could have a set of pre-created worker threads processing the messag
|
||||||
raii_thread w3 {worker};
|
raii_thread w3 {worker};
|
||||||
raii_thread w4 {worker};
|
raii_thread w4 {worker};
|
||||||
}
|
}
|
||||||
|
|
||||||
###### Note
|
###### Note
|
||||||
|
|
||||||
If you system has a good thread pool, use it.
|
If you system has a good thread pool, use it.
|
||||||
|
@ -11069,23 +11073,23 @@ A `wait` without a condition can miss a wakeup or wake up simply to find that th
|
||||||
std::condition_variable cv;
|
std::condition_variable cv;
|
||||||
std::mutex mx;
|
std::mutex mx;
|
||||||
|
|
||||||
void thread1()
|
void thread1()
|
||||||
{
|
{
|
||||||
while (true) {
|
while (true) {
|
||||||
// do some work ...
|
// do some work ...
|
||||||
std::unique_lock<std::mutex> lock(mx);
|
std::unique_lock<std::mutex> lock(mx);
|
||||||
cv.notify_one(); // wake other thread
|
cv.notify_one(); // wake other thread
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void thread2()
|
void thread2()
|
||||||
{
|
{
|
||||||
while (true) {
|
while (true) {
|
||||||
std::unique_lock<std::mutex> lock(mx);
|
std::unique_lock<std::mutex> lock(mx);
|
||||||
cv.wait(lock); // might block forever
|
cv.wait(lock); // might block forever
|
||||||
// do work ...
|
// do work ...
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Here, if some other `thread` consumes `thread1`'s notification, `thread2` can wait forever.
|
Here, if some other `thread` consumes `thread1`'s notification, `thread2` can wait forever.
|
||||||
|
|
||||||
|
@ -11094,30 +11098,30 @@ Here, if some other `thread` consumes `thread1`'s notification, `thread2` can wa
|
||||||
template<typename T>
|
template<typename T>
|
||||||
class Sync_queue {
|
class Sync_queue {
|
||||||
public:
|
public:
|
||||||
void put(const T& val);
|
void put(const T& val);
|
||||||
void put(T&& val);
|
void put(T&& val);
|
||||||
void get(T& val);
|
void get(T& val);
|
||||||
private:
|
private:
|
||||||
mutex mtx;
|
mutex mtx;
|
||||||
condition_variable cond; // this controls access
|
condition_variable cond; // this controls access
|
||||||
list<T> q;
|
list<T> q;
|
||||||
};
|
};
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
void Sync_queue<T>::put(const T& val)
|
void Sync_queue<T>::put(const T& val)
|
||||||
{
|
{
|
||||||
lock_guard<mutex> lck(mtx);
|
lock_guard<mutex> lck(mtx);
|
||||||
q.push_back(val);
|
q.push_back(val);
|
||||||
cond.notify_one();
|
cond.notify_one();
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
void Sync_queue<T>::get(T& val)
|
void Sync_queue<T>::get(T& val)
|
||||||
{
|
{
|
||||||
unique_lock<mutex> lck(mtx);
|
unique_lock<mutex> lck(mtx);
|
||||||
cond.wait(lck,[this]{ return !q.empty(); }); // prevent spurious wakeup
|
cond.wait(lck,[this]{ return !q.empty(); }); // prevent spurious wakeup
|
||||||
val=q.front();
|
val=q.front();
|
||||||
q.pop_front();
|
q.pop_front();
|
||||||
}
|
}
|
||||||
|
|
||||||
Now if the queue is empty when a thread executing `get()` wakes up (e.g., because another thread has gotton to `get()` before it),
|
Now if the queue is empty when a thread executing `get()` wakes up (e.g., because another thread has gotton to `get()` before it),
|
||||||
|
@ -11144,7 +11148,7 @@ and `thread` suspection and resumption are expensive.
|
||||||
do1(); // transaction: needs locking
|
do1(); // transaction: needs locking
|
||||||
do2(); // cleanup: does not need locking
|
do2(); // cleanup: does not need locking
|
||||||
}
|
}
|
||||||
|
|
||||||
Here, we are holding the lock for longer than necessary:
|
Here, we are holding the lock for longer than necessary:
|
||||||
We should not have taken the lock before we needed it and should have released it again before starting the cleanup.
|
We should not have taken the lock before we needed it and should have released it again before starting the cleanup.
|
||||||
We could rewrite this to
|
We could rewrite this to
|
||||||
|
@ -11157,7 +11161,7 @@ We could rewrite this to
|
||||||
my_lock.unluck();
|
my_lock.unluck();
|
||||||
do2(); // cleanup: does not need locking
|
do2(); // cleanup: does not need locking
|
||||||
}
|
}
|
||||||
|
|
||||||
But that compromises safety and violates the [use RAII](#Rconc-raii) rule.
|
But that compromises safety and violates the [use RAII](#Rconc-raii) rule.
|
||||||
Instead, add a block for the critical section:
|
Instead, add a block for the critical section:
|
||||||
|
|
||||||
|
@ -11170,7 +11174,7 @@ Instead, add a block for the critical section:
|
||||||
}
|
}
|
||||||
do2(); // cleanup: does not need locking
|
do2(); // cleanup: does not need locking
|
||||||
}
|
}
|
||||||
|
|
||||||
##### Enforcement
|
##### Enforcement
|
||||||
|
|
||||||
Impossible in general.
|
Impossible in general.
|
||||||
|
@ -11188,7 +11192,7 @@ An unnamed local objects is a temporary that immediately goes out of scope.
|
||||||
unique_lock<mutex>(m1);
|
unique_lock<mutex>(m1);
|
||||||
lock_guard<mutex> {m2};
|
lock_guard<mutex> {m2};
|
||||||
lock(m1,m2);
|
lock(m1,m2);
|
||||||
|
|
||||||
This looks innocent enough, but it isn't.
|
This looks innocent enough, but it isn't.
|
||||||
|
|
||||||
##### Enforcement
|
##### Enforcement
|
||||||
|
@ -11209,7 +11213,7 @@ It should be obvious to a reader that the data is to be guarded and how.
|
||||||
std::mutex m; // take this mutex before accessing other members
|
std::mutex m; // take this mutex before accessing other members
|
||||||
// ...
|
// ...
|
||||||
};
|
};
|
||||||
|
|
||||||
##### Enforcement
|
##### Enforcement
|
||||||
|
|
||||||
??? Possible?
|
??? Possible?
|
||||||
|
@ -11330,16 +11334,16 @@ It's error-prone and requires expert level knowledge of language features, machi
|
||||||
|
|
||||||
##### Example, bad
|
##### Example, bad
|
||||||
|
|
||||||
extern atomic<Link*> head; // the shared head of a linked list
|
extern atomic<Link*> head; // the shared head of a linked list
|
||||||
|
|
||||||
|
Link* nh = new Link(data,nullptr); // make a link ready for insertion
|
||||||
|
Link* h = head.load(); // read the shared head of the list
|
||||||
|
|
||||||
Link* nh = new Link(data,nullptr); // make a link ready for insertion
|
|
||||||
Link* h = head.load(); // read the shared head of the list
|
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (h->data<=data) break; // if so, insert elsewhere
|
if (h->data<=data) break; // if so, insert elsewhere
|
||||||
nh->next = h; // next element is the previous head
|
nh->next = h; // next element is the previous head
|
||||||
} while (!head.compare_exchange_weak(h,nh)); // write nh to head or to h
|
} while (!head.compare_exchange_weak(h,nh)); // write nh to head or to h
|
||||||
|
|
||||||
Spot the bug.
|
Spot the bug.
|
||||||
It would be really hard to find through testing.
|
It would be really hard to find through testing.
|
||||||
Read up on the ABA problem.
|
Read up on the ABA problem.
|
||||||
|
@ -11390,7 +11394,7 @@ Become an expert before shipping lock-free code for others to use.
|
||||||
* Mark Batty, Scott Owens, Susmit Sarkar, Peter Sewell, and Tjark Weber, “Mathematizing C++ Concurrency”, POPL 2011.
|
* Mark Batty, Scott Owens, Susmit Sarkar, Peter Sewell, and Tjark Weber, “Mathematizing C++ Concurrency”, POPL 2011.
|
||||||
* Damian Dechev, Peter Pirkelbauer, and Bjarne Stroustrup: Understanding and Effectively Preventing the ABA Problem in Descriptor-based Lock-free Designs. 13th IEEE Computer Society ISORC 2010 Symposium. May 2010.
|
* Damian Dechev, Peter Pirkelbauer, and Bjarne Stroustrup: Understanding and Effectively Preventing the ABA Problem in Descriptor-based Lock-free Designs. 13th IEEE Computer Society ISORC 2010 Symposium. May 2010.
|
||||||
* Damian Dechev and Bjarne Stroustrup: Scalable Non-blocking Concurrent Objects for Mission Critical Code. ACM OOPSLA'09. October 2009
|
* Damian Dechev and Bjarne Stroustrup: Scalable Non-blocking Concurrent Objects for Mission Critical Code. ACM OOPSLA'09. October 2009
|
||||||
* Damian Dechev, Peter Pirkelbauer, Nicolas Rouquette, and Bjarne Stroustrup: Semantically Enhanced Containers for Concurrent Real-Time Systems. Proc. 16th Annual IEEE International Conference and Workshop on the Engineering of Computer Based Systems (IEEE ECBS). April 2009.
|
* Damian Dechev, Peter Pirkelbauer, Nicolas Rouquette, and Bjarne Stroustrup: Semantically Enhanced Containers for Concurrent Real-Time Systems. Proc. 16th Annual IEEE International Conference and Workshop on the Engineering of Computer Based Systems (IEEE ECBS). April 2009.
|
||||||
|
|
||||||
|
|
||||||
### <a name="Rconc-double"></a>CP.110: Use a conventional pattern for double-checked locking
|
### <a name="Rconc-double"></a>CP.110: Use a conventional pattern for double-checked locking
|
||||||
|
@ -11410,7 +11414,7 @@ Double-checked locking is easy to mess up.
|
||||||
x_init.store(true, memory_order_release);
|
x_init.store(true, memory_order_release);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ... use x ...
|
// ... use x ...
|
||||||
|
|
||||||
|
|
||||||
|
@ -11435,7 +11439,7 @@ These rules defy simple catagorization:
|
||||||
##### Example
|
##### Example
|
||||||
|
|
||||||
const volatile long clock;
|
const volatile long clock;
|
||||||
|
|
||||||
This describes a register constantly updated by a clock circuit.
|
This describes a register constantly updated by a clock circuit.
|
||||||
`clock` is `volatile` because its value will change without any action from the C++ program that uses it.
|
`clock` is `volatile` because its value will change without any action from the C++ program that uses it.
|
||||||
For example, reading `clock` twice will often yield two different values, so the optimizer had better not optimize away the second read in this code:
|
For example, reading `clock` twice will often yield two different values, so the optimizer had better not optimize away the second read in this code:
|
||||||
|
@ -11443,7 +11447,7 @@ For example, reading `clock` twice will often yield two different values, so the
|
||||||
long t1 = clock;
|
long t1 = clock;
|
||||||
// ... no use of clock here ...
|
// ... no use of clock here ...
|
||||||
long t2 = clock;
|
long t2 = clock;
|
||||||
|
|
||||||
`clock` is `const` because the program should not try to write to `clock`.
|
`clock` is `const` because the program should not try to write to `clock`.
|
||||||
|
|
||||||
###### Note
|
###### Note
|
||||||
|
|
Loading…
Reference in New Issue
Block a user