Using Golang to implement a barrier object similar to pthread_barrier_t semantics

Source: Internet
Author: User
This is a creation in Article, where the information may have evolved or changed.

See the Golang Standard library sync package Waitgroup type, this is the Golang version of the Barrier object implementation, see the document gives the use of the example:

 var wg sync. waitgroup    var urls = []string{              "http://www.golang.org/",              "http://www.google.com/",              "http://www.somestupidname.com/",     }    for _, url  := range urls {                        // increment the waitgroup  COUNTER.            WG. ADD (1)                          // launch a goroutine to fetch the  url. &nBsp;          go func (url string)  {                                  // Decrement the  counter when the goroutine completes.                     DEFER WG. Done ()                                           // Fetch the URL.                     http. Get (URL)             } (URL)     }        // wait for all  HTTP FETCHES TO COMPLETE.    WG. Wait ()

You can see that the Waitgroup type is primarily used for one goroutine (the one that calls the Wait () method), and the number of Waits is Goroutine (the Done () method is called internally),

The Add method adds or decreases the internal count, and the Done method is actually add (-1);

With pthread_barrier_t There is a semantic difference between the callers of pthread_barrier_wait () waiting for each other, like 5 players (threads) in the hurdle race, using Pthread_barrier_init Initialize the last parameter is 5, five players are good friends, set the rules, no matter who first to the railing, will wait for teammates, until the last member to cross the bar, then the same starting point again. The following is a simple example of using pthread_barrier_t 5 threads, each with a private array, and an increment number:

#define  _GNU_SOURCE  #include  <pthread.h> #include  <stdio.h> #include  < string.h> #include  <stdlib.h> #define  nthr 5#define narr 6#define inloops  1000#define outloops 10#define err_abort (Code,text)  do { \     char errbuf[128] = {0};         \     fprintf  (stderr,  "%s at \"%s\ ":%d: %s\n", \          (text),  __file__, __line__, strerror_r (code,errbuf,128));  \    abort  (); \} while  (0) typedef struct thrarg  {    pthread_t   tid;    int          incr;    int          arr[narr];} Thrarg;pthread_barrier_t   barrier;thrarg  thrs[nthr];void *thrfunc  (Void *arg) {     thrArg *self =  (thrarg*) arg;         int j, i, k, status;        for  (i  = 0; i < outloops; i++)  {         status = pthread_barrier_wait  (&barrier);         if  (status > 0)              err_abort  (status,  "Wait on barrier");         //each line Cheng  INLOOPS  times, add   own increment value to its internal array arr  members          for  (j = 0; j < inloops; j++)              for  (k = 0; k < narr; k++)                  self->arr[k] += self->incr;         //the thread that performed the iteration first waits until the last arrival          status = pthread_barrier_wait  (&barrier);         if   (status > 0)             err_ abort  (status,  "Wait on barrier");         // The last thread that arrives, adds the internal increment of all threads 1        //at this time the other first-to-last thread is blocked at the first wait call, so the final arriving thread         //can access the internal state of all threads exclusively,if  after the statement is executed, skip to the first wait place,         //other threads that block at the first wait, get freed, and everyone uses a new increment to do the calculation          if  (Status == pthread_barrier_serial_thread )  {             int i;            for  (i = 0; i < nthr; i++)                  thrs[i].incr += 1;         }    }    return null;} int main  (int arg, char *argv[]) {    int i, j;     int status;    pthread_barrier_init  (&barrier, null, &NBSP;NTHR);    for  (i = 0; i < nthr; i++)  {         thrs[i].incr = i;         for  (j = 0; j <  narr; j++)             thrs[i].arr[j]  = j + 1;        status = pthread_create   (&thrs[i].tid,            NULL,  thrfunc,  (void*) &thrs[i]);        if  (status !=  0)             err_abort  (status,  " Create thread ");    }    for  (i = 0; i  < nthr; i++)  {        status = pthread_ join  (thrs[i].tid, null);        if  (status !=  0)             err_abort  (status,  " Join thread ");         printf  ("%02d:  (%d)  ", &NBSP;I,&NBSP;THRS[I].INCR);         for  (j = 0; j < narr; j+ +)             printf  ("%010u ",  THRS[I].ARR[J]);        printf  ("\ n");     }     pthread_barrier_destroy  (&barrier);     return 0;}

How to use Golang to express the above C code, need to implement pthread_barrier_t equivalent semantics of the Barrier object, you can use Golang existing mutex, cond

Object Implementation Barrier:

package mainimport  (     "FMT"      "Sync") type barrier  struct{    lock  sync. Mutex    cond  sync. cond    threshold  int    //total number of Waits      How much count      int    //left without reaching barrier, that is, the number of wait calls is not completed      cycle      bool   //for reinitialization of the next wait  cycle,}func  newbarrier (N  int)  *barrier{    b := &barrier{threshold : n, count: n}     b.cond.l = &b.lock     return b}//last == true , stating that the most one arrives func  (B *barrier) Wait () (Last bool) {     b.lock.lock ()     defer  b.lock.unlock ()      cycle :=  b.cycle    b.count--    //last arrival responsible, reinitialize count  count, cycle   Variable Rollover,    if b.count == 0 {        b.cycle  =  !b.cycle        b.count =  b.threshold        b.cond.broadcast ()         last = true    }else{      for  cycle == b.cycle {          b.cond.wait ()       }    }    return}type  Thrarg struct{   incr  int   arr   [narr]int}var   (    thrs  [nthr]thrarg    wg   sync. waitgroup     Barrier = newbarrier (NTHR)) const  (    outloops = 10     inloops  = 1000    nthr  = 5     narr  = 6) Func thrfunc (arg  *thrarg) {&NBSP;&NBSP;&NBSP;&NBSP;DEFER&NBSP;WG . Done ()     for i := 0; i < outloops; i++{         barrier. Wait ()         for j := 0; j < inloops ;  j++{            for k:= 0; k  < narr; k++{                 arg.arr[k] += arg.incr             }        }         if barrier. Wait ()  {            for i := 0 ; i < nthr; i++{                 thrs[i].incr += 1             }        }    }}func   main () {    for i:= 0; i < nthr; i++{         thrs[i].incr =  i         for j := 0; j < narr; j++{             thrs[i].arr[j] = j + 1      &NBSP;&NBSP;&NBSP;}&NBSP;&NBSP;&NBSP;&NBSP;&NBSP;&NBSP;&NBSP;&NBSP;WG. ADD (1)   &nbsP;     go thrfunc (&thrs[i])     }     Wg. Wait ()     //all goroutine complete, main goroutine, check the final results     for i  := 0; i < nthr; i++{        fmt. Printf ("%02d:  (%d)  ", &NBSP;I,&NBSP;THRS[I].INCR)         for  j := 0; j < narr; j++{             fmt. printf  ("%010d ",  thrs[i].arr[j]);        }         fmt. Println ()     }}














Contact Us

The content source of this page is from Internet, which doesn't represent Alibaba Cloud's opinion; products and services mentioned on that page don't have any relationship with Alibaba Cloud. If the content of the page makes you feel confusing, please write us an email, we will handle the problem within 5 days after receiving your email.

If you find any instances of plagiarism from the community, please send an email to: info-contact@alibabacloud.com and provide relevant evidence. A staff member will contact you within 5 working days.

A Free Trial That Lets You Build Big!

Start building with 50+ products and up to 12 months usage for Elastic Compute Service

  • Sales Support

    1 on 1 presale consultation

  • After-Sales Support

    24/7 Technical Support 6 Free Tickets per Quarter Faster Response

  • Alibaba Cloud offers highly flexible support services tailored to meet your exact needs.