This is a creation in Article, where the information may have evolved or changed.
1. The first step is to modify the Anaconda library to obtain search_meta_data;
2. Call Anaconda, crawl the search data, slightly
Package Anacondaimport ("Net/url") type search_meta_data struct {completed_in float32 ' JSON: "completed_in" ' max_id Int64 ' JSON: ' max_id ' max_id_str string ' json: ' Max_id_str ' ' next_results string ' json: ' Next_results ' ' Query string ' JSON: ' Query ' ' Refresh_url string ' json: ' Refresh_url ' ' Count int ' json: ' count ' ' since_id int ' json: ' since_id ' ' Since_id_str string ' json: ' since_id_str ' '}type searchresponse struct {statuses []tweetsearch_metadata Search_meta_ Data}func (a Twitterapi) Getsearch (queryString string, v URL. Values) (Data search_meta_data, Timeline []tweet, err Error) {var sr Searchresponsev = cleanvalues (v) v.set ("Q", Querystrin g) Response_ch: = Make (chan response) a.queryqueue <-Query{baseurl + "/search/tweets.json", V, &SR, _get, Response_ ch}//We have to read from the response channel before assigning to timeline//Otherwise This would happen before the RESPO Nses has been writtenresp: = <-response_cherr = Resp.errtimeline = Sr. Statusesdata= Sr. SeArch_metadatareturn data, Timeline, err}