INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Get award emojis for issue/ merge request
def __get_award_emoji(self, item_type, item_id): """Get award emojis for issue/merge request""" emojis = [] group_emojis = self.client.emojis(item_type, item_id) for raw_emojis in group_emojis: for emoji in json.loads(raw_emojis): emojis.append(emoji) return emojis
Fetch emojis for a note of an issue/ merge request
def __get_note_award_emoji(self, item_type, item_id, note_id): """Fetch emojis for a note of an issue/merge request""" emojis = [] group_emojis = self.client.note_emojis(item_type, item_id, note_id) try: for raw_emojis in group_emojis: for emoji in json.loads(raw_emojis): emojis.append(emoji) except requests.exceptions.HTTPError as error: if error.response.status_code == 404: logger.warning("Emojis not available for %s ", urijoin(item_type, str(item_id), GitLabClient.NOTES, str(note_id), GitLabClient.EMOJI)) return emojis return emojis
Get the issues from pagination
def issues(self, from_date=None): """Get the issues from pagination""" payload = { 'state': 'all', 'order_by': 'updated_at', 'sort': 'asc', 'per_page': PER_PAGE } if from_date: payload['updated_after'] = from_date.isoformat() return self.fetch_items(GitLabClient.ISSUES, payload)
Get the merge requests from pagination
def merges(self, from_date=None): """Get the merge requests from pagination""" payload = { 'state': 'all', 'order_by': 'updated_at', 'sort': 'asc', 'view': 'simple', 'per_page': PER_PAGE } if from_date: payload['updated_after'] = from_date.isoformat() return self.fetch_items(GitLabClient.MERGES, payload)
Get the merge full data
def merge(self, merge_id): """Get the merge full data""" path = urijoin(self.base_url, GitLabClient.PROJECTS, self.owner + '%2F' + self.repository, GitLabClient.MERGES, merge_id) response = self.fetch(path) return response.text
Get the merge versions from pagination
def merge_versions(self, merge_id): """Get the merge versions from pagination""" payload = { 'order_by': 'updated_at', 'sort': 'asc', 'per_page': PER_PAGE } path = urijoin(GitLabClient.MERGES, str(merge_id), GitLabClient.VERSIONS) return self.fetch_items(path, payload)
Get merge version detail
def merge_version(self, merge_id, version_id): """Get merge version detail""" path = urijoin(self.base_url, GitLabClient.PROJECTS, self.owner + '%2F' + self.repository, GitLabClient.MERGES, merge_id, GitLabClient.VERSIONS, version_id) response = self.fetch(path) return response.text
Get the notes from pagination
def notes(self, item_type, item_id): """Get the notes from pagination""" payload = { 'order_by': 'updated_at', 'sort': 'asc', 'per_page': PER_PAGE } path = urijoin(item_type, str(item_id), GitLabClient.NOTES) return self.fetch_items(path, payload)
Get emojis from pagination
def emojis(self, item_type, item_id): """Get emojis from pagination""" payload = { 'order_by': 'updated_at', 'sort': 'asc', 'per_page': PER_PAGE } path = urijoin(item_type, str(item_id), GitLabClient.EMOJI) return self.fetch_items(path, payload)
Get emojis of a note
def note_emojis(self, item_type, item_id, note_id): """Get emojis of a note""" payload = { 'order_by': 'updated_at', 'sort': 'asc', 'per_page': PER_PAGE } path = urijoin(item_type, str(item_id), GitLabClient.NOTES, str(note_id), GitLabClient.EMOJI) return self.fetch_items(path, payload)
Calculate the seconds to reset the token requests by obtaining the different between the current date and the next date when the token is fully regenerated.
def calculate_time_to_reset(self): """Calculate the seconds to reset the token requests, by obtaining the different between the current date and the next date when the token is fully regenerated. """ time_to_reset = self.rate_limit_reset_ts - (datetime_utcnow().replace(microsecond=0).timestamp() + 1) if time_to_reset < 0: time_to_reset = 0 return time_to_reset
Fetch the data from a given URL.
def fetch(self, url, payload=None, headers=None, method=HttpClient.GET, stream=False): """Fetch the data from a given URL. :param url: link to the resource :param payload: payload of the request :param headers: headers of the request :param method: type of request call (GET or POST) :param stream: defer downloading the response body until the response content is available :returns a response object """ if not self.from_archive: self.sleep_for_rate_limit() response = super().fetch(url, payload, headers, method, stream) if not self.from_archive: self.update_rate_limit(response) return response
Return the items from GitLab API using links pagination
def fetch_items(self, path, payload): """Return the items from GitLab API using links pagination""" page = 0 # current page last_page = None # last page url_next = urijoin(self.base_url, GitLabClient.PROJECTS, self.owner + '%2F' + self.repository, path) logger.debug("Get GitLab paginated items from " + url_next) response = self.fetch(url_next, payload=payload) items = response.text page += 1 if 'last' in response.links: last_url = response.links['last']['url'] last_page = last_url.split('&page=')[1].split('&')[0] last_page = int(last_page) logger.debug("Page: %i/%i" % (page, last_page)) while items: yield items items = None if 'next' in response.links: url_next = response.links['next']['url'] # Loving requests :) response = self.fetch(url_next, payload=payload) page += 1 items = response.text logger.debug("Page: %i/%i" % (page, last_page))
Sanitize payload of a HTTP request by removing the token information before storing/ retrieving archived items
def sanitize_for_archive(url, headers, payload): """Sanitize payload of a HTTP request by removing the token information before storing/retrieving archived items :param: url: HTTP url request :param: headers: HTTP headers request :param: payload: HTTP payload request :returns url, headers and the sanitized payload """ if headers and 'PRIVATE-TOKEN' in headers: headers.pop('PRIVATE-TOKEN', None) return url, headers, payload
Initialize rate limit information
def _init_rate_limit(self): """Initialize rate limit information""" url = urijoin(self.base_url, 'projects', self.owner + '%2F' + self.repository) try: response = super().fetch(url) self.update_rate_limit(response) except requests.exceptions.HTTPError as error: if error.response.status_code == 401: raise error else: logger.warning("Rate limit not initialized: %s", error)
Returns the GitLab argument parser.
def setup_cmd_parser(cls): """Returns the GitLab argument parser.""" parser = BackendCommandArgumentParser(cls.BACKEND.CATEGORIES, from_date=True, token_auth=True, archive=True) # GitLab options group = parser.parser.add_argument_group('GitLab arguments') group.add_argument('--enterprise-url', dest='base_url', help="Base URL for GitLab Enterprise instance") group.add_argument('--sleep-for-rate', dest='sleep_for_rate', action='store_true', help="sleep for getting more rate") group.add_argument('--min-rate-to-sleep', dest='min_rate_to_sleep', default=MIN_RATE_LIMIT, type=int, help="sleep until reset when the rate limit \ reaches this value") group.add_argument('--blacklist-ids', dest='blacklist_ids', nargs='*', type=int, help="Ids of items that must not be retrieved.") # Generic client options group.add_argument('--max-retries', dest='max_retries', default=MAX_RETRIES, type=int, help="number of API call retries") group.add_argument('--sleep-time', dest='sleep_time', default=DEFAULT_SLEEP_TIME, type=int, help="sleeping time between API call retries") # Positional arguments parser.parser.add_argument('owner', help="GitLab owner") parser.parser.add_argument('repository', help="GitLab repository") return parser
Fetch the messages from the channel.
def fetch(self, category=CATEGORY_MESSAGE, from_date=DEFAULT_DATETIME): """Fetch the messages from the channel. This method fetches the messages stored on the channel that were sent since the given date. :param category: the category of items to fetch :param from_date: obtain messages sent since this date :returns: a generator of messages """ if not from_date: from_date = DEFAULT_DATETIME from_date = datetime_to_utc(from_date) latest = datetime_utcnow().timestamp() kwargs = {'from_date': from_date, 'latest': latest} items = super().fetch(category, **kwargs) return items
Fetch the messages
def fetch_items(self, category, **kwargs): """Fetch the messages :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] latest = kwargs['latest'] logger.info("Fetching messages of '%s' channel from %s", self.channel, str(from_date)) raw_info = self.client.channel_info(self.channel) channel_info = self.parse_channel_info(raw_info) channel_info['num_members'] = self.client.conversation_members(self.channel) oldest = datetime_to_utc(from_date).timestamp() # Minimum value supported by Slack is 0 not 0.0 if oldest == 0.0: oldest = 0 # Slack does not include on its result the lower limit # of the search if it has the same date of 'oldest'. To get # this messages too, we substract a low value to be sure # the dates are not the same. To avoid precision problems # it is substracted by five decimals and not by six. if oldest > 0.0: oldest -= .00001 fetching = True nmsgs = 0 while fetching: raw_history = self.client.history(self.channel, oldest=oldest, latest=latest) messages, fetching = self.parse_history(raw_history) for message in messages: # Fetch user data user_id = None if 'user' in message: user_id = message['user'] elif 'comment' in message: user_id = message['comment']['user'] if user_id: message['user_data'] = self.__get_or_fetch_user(user_id) message['channel_info'] = channel_info yield message nmsgs += 1 if fetching: latest = float(message['ts']) logger.info("Fetch process completed: %s message fetched", nmsgs)
Extracts the identifier from a Slack item.
def metadata_id(item): """Extracts the identifier from a Slack item. This identifier will be the mix of two fields because Slack messages does not have any unique identifier. In this case, 'ts' and 'user' values (or 'bot_id' when the message is sent by a bot) are combined because there have been cases where two messages were sent by different users at the same time. """ if 'user' in item: nick = item['user'] elif 'comment' in item: nick = item['comment']['user'] else: nick = item['bot_id'] return item['ts'] + nick
Init client
def _init_client(self, from_archive=False): """Init client""" return SlackClient(self.api_token, self.max_items, self.archive, from_archive)
Fetch the number of members in a conversation which is a supertype for public and private ones DM and group DM.
def conversation_members(self, conversation): """Fetch the number of members in a conversation, which is a supertype for public and private ones, DM and group DM. :param conversation: the ID of the conversation """ members = 0 resource = self.RCONVERSATION_INFO params = { self.PCHANNEL: conversation, } raw_response = self._fetch(resource, params) response = json.loads(raw_response) members += len(response["members"]) while 'next_cursor' in response['response_metadata'] and response['response_metadata']['next_cursor']: params['cursor'] = response['response_metadata']['next_cursor'] raw_response = self._fetch(resource, params) response = json.loads(raw_response) members += len(response["members"]) return members
Fetch information about a channel.
def channel_info(self, channel): """Fetch information about a channel.""" resource = self.RCHANNEL_INFO params = { self.PCHANNEL: channel, } response = self._fetch(resource, params) return response
Fetch the history of a channel.
def history(self, channel, oldest=None, latest=None): """Fetch the history of a channel.""" resource = self.RCHANNEL_HISTORY params = { self.PCHANNEL: channel, self.PCOUNT: self.max_items } if oldest is not None: params[self.POLDEST] = oldest if latest is not None: params[self.PLATEST] = latest response = self._fetch(resource, params) return response
Fetch user info.
def user(self, user_id): """Fetch user info.""" resource = self.RUSER_INFO params = { self.PUSER: user_id } response = self._fetch(resource, params) return response
Sanitize payload of a HTTP request by removing the token information before storing/ retrieving archived items
def sanitize_for_archive(url, headers, payload): """Sanitize payload of a HTTP request by removing the token information before storing/retrieving archived items :param: url: HTTP url request :param: headers: HTTP headers request :param: payload: HTTP payload request :returns url, headers and the sanitized payload """ if SlackClient.PTOKEN in payload: payload.pop(SlackClient.PTOKEN) return url, headers, payload
Fetch a resource.
def _fetch(self, resource, params): """Fetch a resource. :param resource: resource to get :param params: dict with the HTTP parameters needed to get the given resource """ url = self.URL % {'resource': resource} params[self.PTOKEN] = self.api_token logger.debug("Slack client requests: %s params: %s", resource, str(params)) r = self.fetch(url, payload=params) # Check for possible API errors result = r.json() if not result['ok']: raise SlackClientError(error=result['error']) return r.text
Returns the Slack argument parser.
def setup_cmd_parser(cls): """Returns the Slack argument parser.""" parser = BackendCommandArgumentParser(cls.BACKEND.CATEGORIES, from_date=True, token_auth=True, archive=True) # Backend token is required action = parser.parser._option_string_actions['--api-token'] action.required = True # Slack options group = parser.parser.add_argument_group('Slack arguments') group.add_argument('--max-items', dest='max_items', type=int, default=MAX_ITEMS, help="Maximum number of items requested on the same query") # Required arguments parser.parser.add_argument('channel', help="Slack channel identifier") return parser
Fetch the bugs
def fetch_items(self, category, **kwargs): """Fetch the bugs :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] logger.info("Looking for bugs: '%s' updated from '%s'", self.url, str(from_date)) buglist = [bug for bug in self.__fetch_buglist(from_date)] nbugs = 0 tbugs = len(buglist) for i in range(0, tbugs, self.max_bugs): chunk = buglist[i:i + self.max_bugs] bugs_ids = [b['bug_id'] for b in chunk] logger.info("Fetching bugs: %s/%s", i, tbugs) bugs = self.__fetch_and_parse_bugs_details(bugs_ids) for bug in bugs: bug_id = bug['bug_id'][0]['__text__'] bug['activity'] = self.__fetch_and_parse_bug_activity(bug_id) nbugs += 1 yield bug logger.info("Fetch process completed: %s/%s bugs fetched", nbugs, tbugs)
Extracts and coverts the update time from a Bugzilla item.
def metadata_updated_on(item): """Extracts and coverts the update time from a Bugzilla item. The timestamp is extracted from 'delta_ts' field. This date is converted to UNIX timestamp format. Due Bugzilla servers ignore the timezone on HTTP requests, it will be ignored during the conversion, too. :param item: item generated by the backend :returns: a UNIX timestamp """ ts = item['delta_ts'][0]['__text__'] ts = str_to_datetime(ts) ts = ts.replace(tzinfo=dateutil.tz.tzutc()) return ts.timestamp()
Parse a Bugzilla CSV bug list.
def parse_buglist(raw_csv): """Parse a Bugzilla CSV bug list. The method parses the CSV file and returns an iterator of dictionaries. Each one of this, contains the summary of a bug. :param raw_csv: CSV string to parse :returns: a generator of parsed bugs """ reader = csv.DictReader(raw_csv.split('\n'), delimiter=',', quotechar='"') for row in reader: yield row
Parse a Bugilla bugs details XML stream.
def parse_bugs_details(raw_xml): """Parse a Bugilla bugs details XML stream. This method returns a generator which parses the given XML, producing an iterator of dictionaries. Each dictionary stores the information related to a parsed bug. If the given XML is invalid or does not contains any bug, the method will raise a ParseError exception. :param raw_xml: XML string to parse :returns: a generator of parsed bugs :raises ParseError: raised when an error occurs parsing the given XML stream """ bugs = xml_to_dict(raw_xml) if 'bug' not in bugs: cause = "No bugs found. XML stream seems to be invalid." raise ParseError(cause=cause) for bug in bugs['bug']: yield bug
Parse a Bugzilla bug activity HTML stream.
def parse_bug_activity(raw_html): """Parse a Bugzilla bug activity HTML stream. This method extracts the information about activity from the given HTML stream. The bug activity is stored into a HTML table. Each parsed activity event is returned into a dictionary. If the given HTML is invalid, the method will raise a ParseError exception. :param raw_html: HTML string to parse :returns: a generator of parsed activity events :raises ParseError: raised when an error occurs parsing the given HTML stream """ def is_activity_empty(bs): EMPTY_ACTIVITY = "No changes have been made to this (?:bug|issue) yet." tag = bs.find(text=re.compile(EMPTY_ACTIVITY)) return tag is not None def find_activity_table(bs): # The first table with 5 columns is the table of activity tables = bs.find_all('table') for tb in tables: nheaders = len(tb.tr.find_all('th', recursive=False)) if nheaders == 5: return tb raise ParseError(cause="Table of bug activity not found.") def remove_tags(bs): HTML_TAGS_TO_REMOVE = ['a', 'i', 'span'] for tag in bs.find_all(HTML_TAGS_TO_REMOVE): tag.replaceWith(tag.text) def format_text(bs): strings = [s.strip(' \n\t') for s in bs.stripped_strings] s = ' '.join(strings) return s # Parsing starts here bs = bs4.BeautifulSoup(raw_html, 'html.parser') if is_activity_empty(bs): fields = [] else: activity_tb = find_activity_table(bs) remove_tags(activity_tb) fields = activity_tb.find_all('td') while fields: # First two fields: 'Who' and 'When'. who = fields.pop(0) when = fields.pop(0) # The attribute 'rowspan' of 'who' field tells how many # changes were made on the same date. n = int(who.get('rowspan')) # Next fields are split into chunks of three elements: # 'What', 'Removed' and 'Added'. These chunks share # 'Who' and 'When' values. for _ in range(n): what = fields.pop(0) removed = fields.pop(0) added = fields.pop(0) event = {'Who': format_text(who), 'When': format_text(when), 'What': format_text(what), 'Removed': format_text(removed), 'Added': format_text(added)} yield event
Init client
def _init_client(self, from_archive=False): """Init client""" return BugzillaClient(self.url, user=self.user, password=self.password, max_bugs_csv=self.max_bugs_csv, archive=self.archive, from_archive=from_archive)
Authenticate a user in the server.
def login(self, user, password): """Authenticate a user in the server. :param user: Bugzilla user :param password: user password """ url = self.URL % {'base': self.base_url, 'cgi': self.CGI_LOGIN} payload = { self.PBUGZILLA_LOGIN: user, self.PBUGZILLA_PASSWORD: password, self.PLOGIN: 'Log in' } headers = {'Referer': self.base_url} req = self.fetch(url, payload=payload, headers=headers, method=HttpClient.POST) # Check if the authentication went OK. When this string # is found means that the authentication was successful if req.text.find("index.cgi?logout=1") < 0: cause = ("Bugzilla client could not authenticate user %s. " "Please check user and password parameters. " "URLs may also need a trailing '/'.") % user raise BackendError(cause=cause) logger.debug("Bugzilla user %s authenticated in %s", user, self.base_url)
Logout from the server.
def logout(self): """Logout from the server.""" params = { self.PLOGOUT: '1' } self.call(self.CGI_LOGIN, params) self._close_http_session() logger.debug("Bugzilla user logged out from %s", self.base_url)
Get metadata information in XML format.
def metadata(self): """Get metadata information in XML format.""" params = { self.PCTYPE: self.CTYPE_XML } response = self.call(self.CGI_BUG, params) return response
Get a summary of bugs in CSV format.
def buglist(self, from_date=DEFAULT_DATETIME): """Get a summary of bugs in CSV format. :param from_date: retrieve bugs that where updated from that date """ if not self.version: self.version = self.__fetch_version() if self.version in self.OLD_STYLE_VERSIONS: order = 'Last+Changed' else: order = 'changeddate' date = from_date.strftime("%Y-%m-%d %H:%M:%S") params = { self.PCHFIELD_FROM: date, self.PCTYPE: self.CTYPE_CSV, self.PLIMIT: self.max_bugs_csv, self.PORDER: order } response = self.call(self.CGI_BUGLIST, params) return response
Get the information of a list of bugs in XML format.
def bugs(self, *bug_ids): """Get the information of a list of bugs in XML format. :param bug_ids: list of bug identifiers """ params = { self.PBUG_ID: bug_ids, self.PCTYPE: self.CTYPE_XML, self.PEXCLUDE_FIELD: 'attachmentdata' } response = self.call(self.CGI_BUG, params) return response
Get the activity of a bug in HTML format.
def bug_activity(self, bug_id): """Get the activity of a bug in HTML format. :param bug_id: bug identifier """ params = { self.PBUG_ID: bug_id } response = self.call(self.CGI_BUG_ACTIVITY, params) return response
Run an API command.
def call(self, cgi, params): """Run an API command. :param cgi: cgi command to run on the server :param params: dict with the HTTP parameters needed to run the given command """ url = self.URL % {'base': self.base_url, 'cgi': cgi} logger.debug("Bugzilla client calls command: %s params: %s", cgi, str(params)) req = self.fetch(url, payload=params) return req.text
Sanitize payload of a HTTP request by removing the login and password information before storing/ retrieving archived items
def sanitize_for_archive(url, headers, payload): """Sanitize payload of a HTTP request by removing the login and password information before storing/retrieving archived items :param: url: HTTP url request :param: headers: HTTP headers request :param: payload: HTTP payload request :returns url, headers and the sanitized payload """ if BugzillaClient.PBUGZILLA_LOGIN in payload: payload.pop(BugzillaClient.PBUGZILLA_LOGIN) if BugzillaClient.PBUGZILLA_PASSWORD in payload: payload.pop(BugzillaClient.PBUGZILLA_PASSWORD) if BugzillaClient.PLOGIN in payload: payload.pop(BugzillaClient.PLOGIN) return url, headers, payload
Fetch the events from the server.
def fetch(self, category=CATEGORY_EVENT, from_date=DEFAULT_DATETIME, to_date=None, filter_classified=False): """Fetch the events from the server. This method fetches those events of a group stored on the server that were updated since the given date. Data comments and rsvps are included within each event. :param category: the category of items to fetch :param from_date: obtain events updated since this date :param to_date: obtain events updated before this date :param filter_classified: remove classified fields from the resulting items :returns: a generator of events """ if not from_date: from_date = DEFAULT_DATETIME from_date = datetime_to_utc(from_date) kwargs = {"from_date": from_date, "to_date": to_date} items = super().fetch(category, filter_classified=filter_classified, **kwargs) return items
Fetch the events
def fetch_items(self, category, **kwargs): """Fetch the events :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] to_date = kwargs['to_date'] logger.info("Fetching events of '%s' group from %s to %s", self.group, str(from_date), str(to_date) if to_date else '--') to_date_ts = datetime_to_utc(to_date).timestamp() if to_date else None nevents = 0 stop_fetching = False ev_pages = self.client.events(self.group, from_date=from_date) for evp in ev_pages: events = [event for event in self.parse_json(evp)] for event in events: event_id = event['id'] event['comments'] = self.__fetch_and_parse_comments(event_id) event['rsvps'] = self.__fetch_and_parse_rsvps(event_id) # Check events updated before 'to_date' event_ts = self.metadata_updated_on(event) if to_date_ts and event_ts >= to_date_ts: stop_fetching = True continue yield event nevents += 1 if stop_fetching: break logger.info("Fetch process completed: %s events fetched", nevents)
Fetch the events pages of a given group.
def events(self, group, from_date=DEFAULT_DATETIME): """Fetch the events pages of a given group.""" date = datetime_to_utc(from_date) date = date.strftime("since:%Y-%m-%dT%H:%M:%S.000Z") resource = urijoin(group, self.REVENTS) # Hack required due to Metup API does not support list # values with the format `?param=value1&param=value2`. # It only works with `?param=value1,value2`. # Morever, urrlib3 encodes comma characters when values # are given using params dict, which it doesn't work # with Meetup, either. fixed_params = '?' + self.PFIELDS + '=' + ','.join(self.VEVENT_FIELDS) fixed_params += '&' + self.PSTATUS + '=' + ','.join(self.VSTATUS) resource += fixed_params params = { self.PORDER: self.VUPDATED, self.PSCROLL: date, self.PPAGE: self.max_items } try: for page in self._fetch(resource, params): yield page except requests.exceptions.HTTPError as error: if error.response.status_code == 410: msg = "Group is no longer accessible: {}".format(error) raise RepositoryError(cause=msg) else: raise error
Fetch the comments of a given event.
def comments(self, group, event_id): """Fetch the comments of a given event.""" resource = urijoin(group, self.REVENTS, event_id, self.RCOMMENTS) params = { self.PPAGE: self.max_items } for page in self._fetch(resource, params): yield page
Fetch the rsvps of a given event.
def rsvps(self, group, event_id): """Fetch the rsvps of a given event.""" resource = urijoin(group, self.REVENTS, event_id, self.RRSVPS) # Same hack that in 'events' method fixed_params = '?' + self.PFIELDS + '=' + ','.join(self.VRSVP_FIELDS) fixed_params += '&' + self.PRESPONSE + '=' + ','.join(self.VRESPONSE) resource += fixed_params params = { self.PPAGE: self.max_items } for page in self._fetch(resource, params): yield page
Sanitize payload of a HTTP request by removing the token information before storing/ retrieving archived items
def sanitize_for_archive(url, headers, payload): """Sanitize payload of a HTTP request by removing the token information before storing/retrieving archived items :param: url: HTTP url request :param: headers: HTTP headers request :param: payload: HTTP payload request :returns url, headers and the sanitized payload """ if MeetupClient.PKEY in payload: payload.pop(MeetupClient.PKEY) if MeetupClient.PSIGN in payload: payload.pop(MeetupClient.PSIGN) return url, headers, payload
Fetch a resource.
def _fetch(self, resource, params): """Fetch a resource. Method to fetch and to iterate over the contents of a type of resource. The method returns a generator of pages for that resource and parameters. :param resource: type of the resource :param params: parameters to filter :returns: a generator of pages for the requeste resource """ url = urijoin(self.base_url, resource) params[self.PKEY] = self.api_key params[self.PSIGN] = 'true', do_fetch = True while do_fetch: logger.debug("Meetup client calls resource: %s params: %s", resource, str(params)) if not self.from_archive: self.sleep_for_rate_limit() r = self.fetch(url, payload=params) if not self.from_archive: self.update_rate_limit(r) yield r.text if r.links and 'next' in r.links: url = r.links['next']['url'] params = { self.PKEY: self.api_key, self.PSIGN: 'true' } else: do_fetch = False
Fetch the questions
def fetch_items(self, category, **kwargs): """Fetch the questions :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = datetime_to_utc(kwargs['from_date']).timestamp() questions_groups = self.client.get_api_questions(AskbotClient.API_QUESTIONS) for questions in questions_groups: for question in questions['questions']: updated_at = int(question['last_activity_at']) if updated_at > from_date: html_question = self.__fetch_question(question) if not html_question: continue logger.debug("Fetching HTML question %s", question['id']) comments = self.__fetch_comments(question) question_obj = self.__build_question(html_question, question, comments) question.update(question_obj) yield question
Init client
def _init_client(self, from_archive=False): """Init client""" return AskbotClient(self.url, self.archive, from_archive)
Fetch an Askbot HTML question body.
def __fetch_question(self, question): """Fetch an Askbot HTML question body. The method fetchs the HTML question retrieving the question body of the item question received :param question: item with the question itself :returns: a list of HTML page/s for the question """ html_question_items = [] npages = 1 next_request = True while next_request: try: html_question = self.client.get_html_question(question['id'], npages) html_question_items.append(html_question) tpages = self.ab_parser.parse_number_of_html_pages(html_question) if npages == tpages: next_request = False npages = npages + 1 except requests.exceptions.TooManyRedirects as e: logger.warning("%s, data not retrieved for question %s", e, question['id']) next_request = False return html_question_items
Fetch all the comments of an Askbot question and answers.
def __fetch_comments(self, question): """Fetch all the comments of an Askbot question and answers. The method fetchs the list of every comment existing in a question and its answers. :param question: item with the question itself :returns: a list of comments with the ids as hashes """ comments = {} comments[question['id']] = json.loads(self.client.get_comments(question['id'])) for object_id in question['answer_ids']: comments[object_id] = json.loads(self.client.get_comments(object_id)) return comments
Build an Askbot HTML response.
def __build_question(html_question, question, comments): """Build an Askbot HTML response. The method puts together all the information regarding a question :param html_question: array of HTML raw pages :param question: question object from the API :param comments: list of comments to add :returns: a dict item with the parsed question information """ question_object = {} # Parse the user info from the soup container question_container = AskbotParser.parse_question_container(html_question[0]) # Add the info to the question object question_object.update(question_container) # Add the comments of the question (if any) if comments[int(question['id'])]: question_object['comments'] = comments[int(question['id'])] answers = [] for page in html_question: answers.extend(AskbotParser.parse_answers(page)) if len(answers) != 0: question_object['answers'] = answers for answer in question_object['answers']: if comments[int(answer['id'])]: answer['comments'] = comments[int(answer['id'])] return question_object
Retrieve a question page using the API.
def get_api_questions(self, path): """Retrieve a question page using the API. :param page: page to retrieve """ npages = 1 next_request = True path = urijoin(self.base_url, path) while next_request: try: params = { 'page': npages, 'sort': self.ORDER_API } response = self.fetch(path, payload=params) whole_page = response.text raw_questions = json.loads(whole_page) tpages = raw_questions['pages'] logger.debug("Fetching questions from '%s': page %s/%s", self.base_url, npages, tpages) if npages == tpages: next_request = False npages = npages + 1 yield raw_questions except requests.exceptions.TooManyRedirects as e: logger.warning("%s, data not retrieved for resource %s", e, path) next_request = False
Retrieve a raw HTML question and all it s information.
def get_html_question(self, question_id, page=1): """Retrieve a raw HTML question and all it's information. :param question_id: question identifier :param page: page to retrieve """ path = urijoin(self.base_url, self.HTML_QUESTION, question_id) params = { 'page': page, 'sort': self.ORDER_HTML } response = self.fetch(path, payload=params) return response.text
Retrieve a list of comments by a given id.
def get_comments(self, post_id): """Retrieve a list of comments by a given id. :param object_id: object identifiere """ path = urijoin(self.base_url, self.COMMENTS if self._use_new_urls else self.COMMENTS_OLD) params = { 'post_id': post_id, 'post_type': 'answer', 'avatar_size': 0 } headers = {'X-Requested-With': 'XMLHttpRequest'} try: response = self.fetch(path, payload=params, headers=headers) raw = response.text except requests.exceptions.HTTPError as ex: if ex.response.status_code == 404: logger.debug("Comments URL did not work. Using old URL schema.") self._use_new_urls = False path = urijoin(self.base_url, self.COMMENTS_OLD) response = self.fetch(path, payload=params, headers=headers) raw = response.text elif ex.response.status_code == 500: logger.warning("Comments not retrieved due to %s", ex) raw = '[]' else: raise ex return raw
Parse the question info container of a given HTML question.
def parse_question_container(html_question): """Parse the question info container of a given HTML question. The method parses the information available in the question information container. The container can have up to 2 elements: the first one contains the information related with the user who generated the question and the date (if any). The second one contains the date of the updated, and the user who updated it (if not the same who generated the question). :param html_question: raw HTML question element :returns: an object with the parsed information """ container_info = {} bs_question = bs4.BeautifulSoup(html_question, "html.parser") question = AskbotParser._find_question_container(bs_question) container = question.select("div.post-update-info") created = container[0] container_info['author'] = AskbotParser.parse_user_info(created) try: container[1] except IndexError: pass else: updated = container[1] if AskbotParser.parse_user_info(updated): container_info['updated_by'] = AskbotParser.parse_user_info(updated) return container_info
Parse the answers of a given HTML question.
def parse_answers(html_question): """Parse the answers of a given HTML question. The method parses the answers related with a given HTML question, as well as all the comments related to the answer. :param html_question: raw HTML question element :returns: a list with the answers """ def parse_answer_container(update_info): """Parse the answer info container of a given HTML question. The method parses the information available in the answer information container. The container can have up to 2 elements: the first one contains the information related with the user who generated the question and the date (if any). The second one contains the date of the updated, and the user who updated it (if not the same who generated the question). :param update_info: beautiful soup update_info container element :returns: an object with the parsed information """ container_info = {} created = update_info[0] answered_at = created.abbr.attrs["title"] # Convert date to UNIX timestamp container_info['added_at'] = str(str_to_datetime(answered_at).timestamp()) container_info['answered_by'] = AskbotParser.parse_user_info(created) try: update_info[1] except IndexError: pass else: updated = update_info[1] updated_at = updated.abbr.attrs["title"] # Convert date to UNIX timestamp container_info['updated_at'] = str(str_to_datetime(updated_at).timestamp()) if AskbotParser.parse_user_info(updated): container_info['updated_by'] = AskbotParser.parse_user_info(updated) return container_info answer_list = [] # Select all the answers bs_question = bs4.BeautifulSoup(html_question, "html.parser") bs_answers = bs_question.select("div.answer") for bs_answer in bs_answers: answer_id = bs_answer.attrs["data-post-id"] votes_element = bs_answer.select("div.vote-number")[0].text accepted_answer = bs_answer.select("div.answer-img-accept")[0].get('title').endswith("correct") # Select the body of the answer body = bs_answer.select("div.post-body") # Get the user information container and parse it update_info = body[0].select("div.post-update-info") answer_container = parse_answer_container(update_info) # Remove the update-info-container div to be able to get the body body[0].div.extract().select("div.post-update-info-container") # Override the body with a clean one body = body[0].get_text(strip=True) # Generate the answer object answer = {'id': answer_id, 'score': votes_element, 'summary': body, 'accepted': accepted_answer } # Update the object with the information in the answer container answer.update(answer_container) answer_list.append(answer) return answer_list
Parse number of answer pages to paginate over them.
def parse_number_of_html_pages(html_question): """Parse number of answer pages to paginate over them. :param html_question: raw HTML question element :returns: an integer with the number of pages """ bs_question = bs4.BeautifulSoup(html_question, "html.parser") try: bs_question.select('div.paginator')[0] except IndexError: return 1 else: return int(bs_question.select('div.paginator')[0].attrs['data-num-pages'])
Parse the user information of a given HTML container.
def parse_user_info(update_info): """Parse the user information of a given HTML container. The method parses all the available user information in the container. If the class "user-info" exists, the method will get all the available information in the container. If not, if a class "tip" exists, it will be a wiki post with no user associated. Else, it can be an empty container. :param update_info: beautiful soup answer container element :returns: an object with the parsed information """ user_info = {} if update_info.select("div.user-info"): # Get all the <a> elements in the container. First <a> contains the user # information, second one (if exists), the website of the user. elements = update_info.select("div.user-info")[0].find_all("a") href = elements[0].attrs["href"] user_info['id'] = re.search(r'\d+', href).group(0) user_info['username'] = elements[0].text user_info['reputation'] = update_info.select('span.reputation-score')[0].text user_info['badges'] = update_info.select("span.badges")[0].attrs["title"] try: elements[1] except IndexError: pass else: user_info['website'] = elements[1].attrs["href"] if update_info.select("img.flag"): flag = update_info.select("img.flag")[0].attrs["alt"] user_info['country'] = re.sub("flag of ", "", flag) return user_info
Fetch the reviews
def fetch_items(self, category, **kwargs): """Fetch the reviews :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] if self.client.version[0] == 2 and self.client.version[1] == 8: fetcher = self._fetch_gerrit28(from_date) else: fetcher = self._fetch_gerrit(from_date) for review in fetcher: yield review
Parse a Gerrit reviews list.
def parse_reviews(raw_data): """Parse a Gerrit reviews list.""" # Join isolated reviews in JSON in array for parsing items_raw = "[" + raw_data.replace("\n", ",") + "]" items_raw = items_raw.replace(",]", "]") items = json.loads(items_raw) reviews = [] for item in items: if 'project' in item.keys(): reviews.append(item) return reviews
Specific fetch for gerrit 2. 8 version.
def _fetch_gerrit28(self, from_date=DEFAULT_DATETIME): """ Specific fetch for gerrit 2.8 version. Get open and closed reviews in different queries. Take the newer review from both lists and iterate. """ # Convert date to Unix time from_ut = datetime_to_utc(from_date) from_ut = from_ut.timestamp() filter_open = "status:open" filter_closed = "status:closed" last_item_open = self.client.next_retrieve_group_item() last_item_closed = self.client.next_retrieve_group_item() reviews_open = self._get_reviews(last_item_open, filter_open) reviews_closed = self._get_reviews(last_item_closed, filter_closed) last_nreviews_open = len(reviews_open) last_nreviews_closed = len(reviews_closed) while reviews_open or reviews_closed: if reviews_open and reviews_closed: if reviews_open[0]['lastUpdated'] >= reviews_closed[0]['lastUpdated']: review_open = reviews_open.pop(0) review = review_open else: review_closed = reviews_closed.pop(0) review = review_closed elif reviews_closed: review_closed = reviews_closed.pop(0) review = review_closed else: review_open = reviews_open.pop(0) review = review_open updated = review['lastUpdated'] if updated <= from_ut: logger.debug("No more updates for %s" % (self.hostname)) break else: yield review if not reviews_open and last_nreviews_open >= self.max_reviews: last_item_open = self.client.next_retrieve_group_item(last_item_open, review_open) reviews_open = self._get_reviews(last_item_open, filter_open) last_nreviews_open = len(reviews_open) if not reviews_closed and last_nreviews_closed >= self.max_reviews: last_item_closed = self.client.next_retrieve_group_item(last_item_closed, review_closed) reviews_closed = self._get_reviews(last_item_closed, filter_closed) last_nreviews_closed = len(reviews_closed)
Return the Gerrit server version.
def version(self): """Return the Gerrit server version.""" if self._version: return self._version cmd = self.gerrit_cmd + " %s " % (GerritClient.CMD_VERSION) logger.debug("Getting version: %s" % (cmd)) raw_data = self.__execute(cmd) raw_data = str(raw_data, "UTF-8") logger.debug("Gerrit version: %s" % (raw_data)) # output: gerrit version 2.10-rc1-988-g333a9dd m = re.match(GerritClient.VERSION_REGEX, raw_data) if not m: cause = "Invalid gerrit version %s" % raw_data raise BackendError(cause=cause) try: mayor = int(m.group(1)) minor = int(m.group(2)) except Exception: cause = "Gerrit client could not determine the server version." raise BackendError(cause=cause) self._version = [mayor, minor] return self._version
Get the reviews starting from last_item.
def reviews(self, last_item, filter_=None): """Get the reviews starting from last_item.""" cmd = self._get_gerrit_cmd(last_item, filter_) logger.debug("Getting reviews with command: %s", cmd) raw_data = self.__execute(cmd) raw_data = str(raw_data, "UTF-8") return raw_data
Return the item to start from in next reviews group.
def next_retrieve_group_item(self, last_item=None, entry=None): """Return the item to start from in next reviews group.""" next_item = None gerrit_version = self.version if gerrit_version[0] == 2 and gerrit_version[1] > 9: if last_item is None: next_item = 0 else: next_item = last_item elif gerrit_version[0] == 2 and gerrit_version[1] == 9: # https://groups.google.com/forum/#!topic/repo-discuss/yQgRR5hlS3E cause = "Gerrit 2.9.0 does not support pagination" raise BackendError(cause=cause) else: if entry is not None: next_item = entry['sortKey'] return next_item
Execute gerrit command
def __execute(self, cmd): """Execute gerrit command""" if self.from_archive: response = self.__execute_from_archive(cmd) else: response = self.__execute_from_remote(cmd) return response
Execute gerrit command against the archive
def __execute_from_archive(self, cmd): """Execute gerrit command against the archive""" cmd = self.sanitize_for_archive(cmd) response = self.archive.retrieve(cmd, None, None) if isinstance(response, RuntimeError): raise response return response
Execute gerrit command with retry if it fails
def __execute_from_remote(self, cmd): """Execute gerrit command with retry if it fails""" result = None # data result from the cmd execution retries = 0 while retries < self.MAX_RETRIES: try: result = subprocess.check_output(cmd, shell=True) break except subprocess.CalledProcessError as ex: logger.error("gerrit cmd %s failed: %s", cmd, ex) time.sleep(self.RETRY_WAIT * retries) retries += 1 if result is None: result = RuntimeError(cmd + " failed " + str(self.MAX_RETRIES) + " times. Giving up!") if self.archive: cmd = self.sanitize_for_archive(cmd) self.archive.store(cmd, None, None, result) if isinstance(result, RuntimeError): raise result return result
Returns the Gerrit argument parser.
def setup_cmd_parser(cls): """Returns the Gerrit argument parser.""" parser = BackendCommandArgumentParser(cls.BACKEND.CATEGORIES, from_date=True, archive=True) # Gerrit options group = parser.parser.add_argument_group('Gerrit arguments') group.add_argument('--user', dest='user', help="Gerrit ssh user") group.add_argument('--max-reviews', dest='max_reviews', type=int, default=MAX_REVIEWS, help="Max number of reviews per ssh query.") group.add_argument('--blacklist-reviews', dest='blacklist_reviews', nargs='*', help="Wrong reviews that must not be retrieved.") group.add_argument('--disable-host-key-check', dest='disable_host_key_check', action='store_true', help="Don't check remote host identity") group.add_argument('--ssh-port', dest='port', default=PORT, type=int, help="Set SSH port of the Gerrit server") # Required arguments parser.parser.add_argument('hostname', help="Hostname of the Gerrit server") return parser
Fetch the issues
def fetch_items(self, category, **kwargs): """Fetch the issues :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] logger.info("Fetching issues of '%s' distribution from %s", self.distribution, str(from_date)) nissues = 0 for issue in self._fetch_issues(from_date): yield issue nissues += 1 logger.info("Fetch process completed: %s issues fetched", nissues)
Init client
def _init_client(self, from_archive=False): """Init client""" return LaunchpadClient(self.distribution, self.package, self.items_per_page, self.sleep_time, self.archive, from_archive)
Fetch the issues from a project ( distribution/ package )
def _fetch_issues(self, from_date): """Fetch the issues from a project (distribution/package)""" issues_groups = self.client.issues(start=from_date) for raw_issues in issues_groups: issues = json.loads(raw_issues)['entries'] for issue in issues: issue = self.__init_extra_issue_fields(issue) issue_id = self.__extract_issue_id(issue['bug_link']) for field in TARGET_ISSUE_FIELDS: if not issue[field]: continue if field == 'bug_link': issue['bug_data'] = self.__fetch_issue_data(issue_id) issue['activity_data'] = [activity for activity in self.__fetch_issue_activities(issue_id)] issue['messages_data'] = [message for message in self.__fetch_issue_messages(issue_id)] issue['attachments_data'] = [attachment for attachment in self.__fetch_issue_attachments(issue_id)] elif field == 'assignee_link': issue['assignee_data'] = self.__fetch_user_data('{ASSIGNEE}', issue[field]) elif field == 'owner_link': issue['owner_data'] = self.__fetch_user_data('{OWNER}', issue[field]) yield issue
Get data associated to an issue
def __fetch_issue_data(self, issue_id): """Get data associated to an issue""" raw_issue = self.client.issue(issue_id) issue = json.loads(raw_issue) return issue
Get attachments of an issue
def __fetch_issue_attachments(self, issue_id): """Get attachments of an issue""" for attachments_raw in self.client.issue_collection(issue_id, "attachments"): attachments = json.loads(attachments_raw) for attachment in attachments['entries']: yield attachment
Get messages of an issue
def __fetch_issue_messages(self, issue_id): """Get messages of an issue""" for messages_raw in self.client.issue_collection(issue_id, "messages"): messages = json.loads(messages_raw) for msg in messages['entries']: msg['owner_data'] = self.__fetch_user_data('{OWNER}', msg['owner_link']) yield msg
Get activities on an issue
def __fetch_issue_activities(self, issue_id): """Get activities on an issue""" for activities_raw in self.client.issue_collection(issue_id, "activity"): activities = json.loads(activities_raw) for act in activities['entries']: act['person_data'] = self.__fetch_user_data('{PERSON}', act['person_link']) yield act
Get data associated to an user
def __fetch_user_data(self, tag_type, user_link): """Get data associated to an user""" user_name = self.client.user_name(user_link) user = {} if not user_name: return user user_raw = self.client.user(user_name) user = json.loads(user_raw) return user
Get the issues from pagination
def issues(self, start=None): """Get the issues from pagination""" payload = self.__build_payload(size=self.items_per_page, operation=True, startdate=start) path = self.__get_url_project() return self.__fetch_items(path=path, payload=payload)
Get the user data by URL
def user(self, user_name): """Get the user data by URL""" user = None if user_name in self._users: return self._users[user_name] url_user = self.__get_url("~" + user_name) logger.info("Getting info for %s" % (url_user)) try: raw_user = self.__send_request(url_user) user = raw_user except requests.exceptions.HTTPError as e: if e.response.status_code in [404, 410]: logger.warning("Data is not available - %s", url_user) user = '{}' else: raise e self._users[user_name] = user return user
Get the issue data by its ID
def issue(self, issue_id): """Get the issue data by its ID""" path = urijoin("bugs", str(issue_id)) url_issue = self.__get_url(path) raw_text = self.__send_request(url_issue) return raw_text
Get a collection list of a given issue
def issue_collection(self, issue_id, collection_name): """Get a collection list of a given issue""" path = urijoin("bugs", str(issue_id), collection_name) url_collection = self.__get_url(path) payload = {'ws.size': self.items_per_page, 'ws.start': 0, 'order_by': 'date_last_updated'} raw_items = self.__fetch_items(path=url_collection, payload=payload) return raw_items
Build URL project
def __get_url_project(self): """Build URL project""" if self.package: url = self.__get_url_distribution_package() else: url = self.__get_url_distribution() return url
Send request
def __send_request(self, url, params=None): """Send request""" r = self.fetch(url, payload=params) return r.text
Build payload
def __build_payload(self, size, operation=False, startdate=None): """Build payload""" payload = { 'ws.size': size, 'order_by': 'date_last_updated', 'omit_duplicates': 'false', 'status': ["New", "Incomplete", "Opinion", "Invalid", "Won't Fix", "Expired", "Confirmed", "Triaged", "In Progress", "Fix Committed", "Fix Released", "Incomplete (with response)", "Incomplete (without response)"] } if operation: payload['ws.op'] = 'searchTasks' if startdate: startdate = startdate.isoformat() payload['modified_since'] = startdate return payload
Return the items from Launchpad API using pagination
def __fetch_items(self, path, payload): """Return the items from Launchpad API using pagination""" page = 0 # current page url_next = path fetch_data = True while fetch_data: logger.debug("Fetching page: %i", page) try: raw_content = self.__send_request(url_next, payload) content = json.loads(raw_content) except requests.exceptions.HTTPError as e: if e.response.status_code in [410]: logger.warning("Data is not available - %s", url_next) raw_content = '{"total_size": 0, "start": 0, "entries": []}' content = json.loads(raw_content) else: raise e if 'next_collection_link' in content: url_next = content['next_collection_link'] payload = None else: fetch_data = False yield raw_content page += 1
Fetch the messages
def fetch_items(self, category, **kwargs): """Fetch the messages :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] logger.info("Looking for messages from '%s' since %s", self.uri, str(from_date)) mailing_list = GroupsioClient(self.group_name, self.dirpath, self.api_token, self.verify) mailing_list.fetch() messages = self._fetch_and_parse_messages(mailing_list, from_date) for message in messages: yield message logger.info("Fetch process completed")
Fetch the mbox files from the remote archiver.
def fetch(self): """Fetch the mbox files from the remote archiver. Stores the archives in the path given during the initialization of this object. Those archives which a not valid extension will be ignored. Groups.io archives are returned as a .zip file, which contains one file in mbox format. :returns: a list of tuples, storing the links and paths of the fetched archives """ logger.info("Downloading mboxes from '%s'", self.uri) logger.debug("Storing mboxes in '%s'", self.dirpath) if not os.path.exists(self.dirpath): os.makedirs(self.dirpath) group_id = self.__find_group_id() url = urijoin(GROUPSIO_API_URL, self.DOWNLOAD_ARCHIVES) payload = {'group_id': group_id} filepath = os.path.join(self.dirpath, MBOX_FILE) success = self._download_archive(url, payload, filepath) return success
Fetch the groupsio paginated subscriptions for a given token
def subscriptions(self, per_page=PER_PAGE): """Fetch the groupsio paginated subscriptions for a given token :param per_page: number of subscriptions per page :returns: an iterator of subscriptions """ url = urijoin(GROUPSIO_API_URL, self.GET_SUBSCRIPTIONS) logger.debug("Get groupsio paginated subscriptions from " + url) keep_fetching = True payload = { "limit": per_page } while keep_fetching: r = self.__fetch(url, payload) response_raw = r.json() subscriptions = response_raw['data'] yield subscriptions total_subscriptions = response_raw['total_count'] logger.debug("Subscriptions: %i/%i" % (response_raw['end_item'], total_subscriptions)) payload['page_token'] = response_raw['next_page_token'] keep_fetching = response_raw['has_more']
Find the id of a group given its name by iterating on the list of subscriptions
def __find_group_id(self): """Find the id of a group given its name by iterating on the list of subscriptions""" group_subscriptions = self.subscriptions(self.auth) for subscriptions in group_subscriptions: for sub in subscriptions: if sub['group_name'] == self.group_name: return sub['group_id'] msg = "Group id not found for group name %s" % self.group_name raise BackendError(cause=msg)
Fetch requests from groupsio API
def __fetch(self, url, payload): """Fetch requests from groupsio API""" r = requests.get(url, params=payload, auth=self.auth, verify=self.verify) try: r.raise_for_status() except requests.exceptions.HTTPError as e: raise e return r
Initialize mailing lists directory path
def _pre_init(self): """Initialize mailing lists directory path""" if not self.parsed_args.mboxes_path: base_path = os.path.expanduser('~/.perceval/mailinglists/') dirpath = os.path.join(base_path, GROUPSIO_URL, 'g', self.parsed_args.group_name) else: dirpath = self.parsed_args.mboxes_path setattr(self.parsed_args, 'dirpath', dirpath)
Returns the Groupsio argument parser.
def setup_cmd_parser(cls): """Returns the Groupsio argument parser.""" parser = BackendCommandArgumentParser(cls.BACKEND.CATEGORIES, from_date=True, token_auth=True) # Backend token is required action = parser.parser._option_string_actions['--api-token'] action.required = True # Optional arguments group = parser.parser.add_argument_group('Groupsio arguments') group.add_argument('--mboxes-path', dest='mboxes_path', help="Path where mbox files will be stored") group.add_argument('--no-verify', dest='verify', action='store_false', help="Value 'True' enable SSL verification") # Required arguments parser.parser.add_argument('group_name', help="Name of the group on Groups.io") return parser
Generate a UUID based on the given parameters.
def uuid(*args): """Generate a UUID based on the given parameters. The UUID will be the SHA1 of the concatenation of the values from the list. The separator bewteedn these values is ':'. Each value must be a non-empty string, otherwise, the function will raise an exception. :param *args: list of arguments used to generate the UUID :returns: a universal unique identifier :raises ValueError: when anyone of the values is not a string, is empty or `None`. """ def check_value(v): if not isinstance(v, str): raise ValueError("%s value is not a string instance" % str(v)) elif not v: raise ValueError("value cannot be None or empty") else: return v s = ':'.join(map(check_value, args)) sha1 = hashlib.sha1(s.encode('utf-8', errors='surrogateescape')) uuid_sha1 = sha1.hexdigest() return uuid_sha1
Fetch items using the given backend.
def fetch(backend_class, backend_args, category, filter_classified=False, manager=None): """Fetch items using the given backend. Generator to get items using the given backend class. When an archive manager is given, this function will store the fetched items in an `Archive`. If an exception is raised, this archive will be removed to avoid corrupted archives. The parameters needed to initialize the `backend` class and get the items are given using `backend_args` dict parameter. :param backend_class: backend class to fetch items :param backend_args: dict of arguments needed to fetch the items :param category: category of the items to retrieve. If None, it will use the default backend category :param filter_classified: remove classified fields from the resulting items :param manager: archive manager needed to store the items :returns: a generator of items """ init_args = find_signature_parameters(backend_class.__init__, backend_args) archive = manager.create_archive() if manager else None init_args['archive'] = archive backend = backend_class(**init_args) if category: backend_args['category'] = category if filter_classified: backend_args['filter_classified'] = filter_classified fetch_args = find_signature_parameters(backend.fetch, backend_args) items = backend.fetch(**fetch_args) try: for item in items: yield item except Exception as e: if manager: archive_path = archive.archive_path manager.remove_archive(archive_path) raise e
Fetch items from an archive manager.
def fetch_from_archive(backend_class, backend_args, manager, category, archived_after): """Fetch items from an archive manager. Generator to get the items of a category (previously fetched by the given backend class) from an archive manager. Only those items archived after the given date will be returned. The parameters needed to initialize `backend` and get the items are given using `backend_args` dict parameter. :param backend_class: backend class to retrive items :param backend_args: dict of arguments needed to retrieve the items :param manager: archive manager where the items will be retrieved :param category: category of the items to retrieve :param archived_after: return items archived after this date :returns: a generator of archived items """ init_args = find_signature_parameters(backend_class.__init__, backend_args) backend = backend_class(**init_args) filepaths = manager.search(backend.origin, backend.__class__.__name__, category, archived_after) for filepath in filepaths: backend.archive = Archive(filepath) items = backend.fetch_from_archive() try: for item in items: yield item except ArchiveError as e: logger.warning("Ignoring %s archive due to: %s", filepath, str(e))
Find available backends.
def find_backends(top_package): """Find available backends. Look for the Perceval backends and commands under `top_package` and its sub-packages. When `top_package` defines a namespace, backends under that same namespace will be found too. :param top_package: package storing backends :returns: a tuple with two dicts: one with `Backend` classes and one with `BackendCommand` classes """ candidates = pkgutil.walk_packages(top_package.__path__, prefix=top_package.__name__ + '.') modules = [name for _, name, is_pkg in candidates if not is_pkg] return _import_backends(modules)
Fetch items from the repository.
def fetch(self, category, filter_classified=False, **kwargs): """Fetch items from the repository. The method retrieves items from a repository. To removed classified fields from the resulting items, set the parameter `filter_classified`. Take into account this parameter is incompatible with archiving items. Raw client data are archived before any other process. Therefore, classified data are stored within the archive. To prevent from possible data leaks or security issues when users do not need these fields, archiving and filtering are not compatible. :param category: the category of the items fetched :param filter_classified: remove classified fields from the resulting items :param kwargs: a list of other parameters (e.g., from_date, offset, etc. specific for each backend) :returns: a generator of items :raises BackendError: either when the category is not valid or 'filter_classified' and 'archive' are active at the same time. """ if category not in self.categories: cause = "%s category not valid for %s" % (category, self.__class__.__name__) raise BackendError(cause=cause) if filter_classified and self.archive: cause = "classified fields filtering is not compatible with archiving items" raise BackendError(cause=cause) if self.archive: self.archive.init_metadata(self.origin, self.__class__.__name__, self.version, category, kwargs) self.client = self._init_client() for item in self.fetch_items(category, **kwargs): if filter_classified: item = self.filter_classified_data(item) yield self.metadata(item, filter_classified=filter_classified)
Fetch the questions from an archive.
def fetch_from_archive(self): """Fetch the questions from an archive. It returns the items stored within an archive. If this method is called but no archive was provided, the method will raise a `ArchiveError` exception. :returns: a generator of items :raises ArchiveError: raised when an error occurs accessing an archive """ if not self.archive: raise ArchiveError(cause="archive instance was not provided") self.client = self._init_client(from_archive=True) for item in self.fetch_items(self.archive.category, **self.archive.backend_params): yield self.metadata(item)
Remove classified or confidential data from an item.
def filter_classified_data(self, item): """Remove classified or confidential data from an item. It removes those fields that contain data considered as classified. Classified fields are defined in `CLASSIFIED_FIELDS` class attribute. :param item: fields will be removed from this item :returns: the same item but with confidential data filtered """ item_uuid = uuid(self.origin, self.metadata_id(item)) logger.debug("Filtering classified data for item %s", item_uuid) for cf in self.CLASSIFIED_FIELDS: try: _remove_key_from_nested_dict(item, cf) except KeyError: logger.debug("Classified field '%s' not found for item %s; field ignored", '.'.join(cf), item_uuid) logger.debug("Classified data filtered for item %s", item_uuid) return item