INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Fetch the issues
def fetch_items(self, category, **kwargs): """Fetch the issues :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] logger.info("Fetching issues of '%s' from %s", self.url, str(from_date)) nissues = 0 for issue_id in self.__fetch_issues_ids(from_date): issue = self.__fetch_and_parse_issue(issue_id) for key in USER_FIELDS: if key not in issue: continue user = self.__get_or_fetch_user(issue[key]['id']) issue[key + '_data'] = user for journal in issue['journals']: if 'user' not in journal: continue user = self.__get_or_fetch_user(journal['user']['id']) journal['user_data'] = user yield issue nissues += 1 logger.info("Fetch process completed: %s issues fetched", nissues)
Parse a Redmine issues JSON stream.
def parse_issues(raw_json): """Parse a Redmine issues JSON stream. The method parses a JSON stream and returns a list iterator. Each item is a dictionary that contains the issue parsed data. :param raw_json: JSON string to parse :returns: a generator of parsed issues """ results = json.loads(raw_json) issues = results['issues'] for issue in issues: yield issue
Init client
def _init_client(self, from_archive=False): """Init client""" return RedmineClient(self.url, self.api_token, self.archive, from_archive)
Get the information of a list of issues.
def issues(self, from_date=DEFAULT_DATETIME, offset=None, max_issues=MAX_ISSUES): """Get the information of a list of issues. :param from_date: retrieve issues that where updated from that date; dates are converted to UTC :param offset: starting position for the search :param max_issues: maximum number of issues to reteurn per query """ resource = self.RISSUES + self.CJSON ts = datetime_to_utc(from_date) ts = ts.strftime("%Y-%m-%dT%H:%M:%SZ") # By default, Redmine returns open issues only. # Parameter 'status_id' is set to get all the statuses. params = { self.PSTATUS_ID: '*', self.PSORT: self.PUPDATED_ON, self.PUPDATED_ON: '>=' + ts, self.PLIMIT: max_issues } if offset is not None: params[self.POFFSET] = offset response = self._call(resource, params) return response
Get the information of the given issue.
def issue(self, issue_id): """Get the information of the given issue. :param issue_id: issue identifier """ resource = urijoin(self.RISSUES, str(issue_id) + self.CJSON) params = { self.PINCLUDE: ','.join([self.CATTACHMENTS, self.CCHANGESETS, self.CCHILDREN, self.CJOURNALS, self.CRELATIONS, self.CWATCHERS]) } response = self._call(resource, params) return response
Get the information of the given user.
def user(self, user_id): """Get the information of the given user. :param user_id: user identifier """ resource = urijoin(self.RUSERS, str(user_id) + self.CJSON) params = {} response = self._call(resource, params) return response
Sanitize payload of a HTTP request by removing the token information before storing/ retrieving archived items
def sanitize_for_archive(url, headers, payload): """Sanitize payload of a HTTP request by removing the token information before storing/retrieving archived items :param: url: HTTP url request :param: headers: HTTP headers request :param: payload: HTTP payload request :returns url, headers and the sanitized payload """ if RedmineClient.PKEY in payload: payload.pop(RedmineClient.PKEY) return url, headers, payload
Call to get a resource.
def _call(self, resource, params): """Call to get a resource. :param method: resource to get :param params: dict with the HTTP parameters needed to get the given resource """ url = self.URL % {'base': self.base_url, 'resource': resource} if self.api_token: params[self.PKEY] = self.api_token logger.debug("Redmine client requests: %s params: %s", resource, str(params)) r = self.fetch(url, payload=params, verify=False) return r.text
Fetch the messages
def fetch_items(self, category, **kwargs): """Fetch the messages :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] logger.info("Looking for messages from '%s' since %s", self.url, str(from_date)) mailing_list = HyperKittyList(self.url, self.dirpath) mailing_list.fetch(from_date=from_date) messages = self._fetch_and_parse_messages(mailing_list, from_date) for message in messages: yield message logger.info("Fetch process completed")
Fetch the mbox files from the remote archiver.
def fetch(self, from_date=DEFAULT_DATETIME): """Fetch the mbox files from the remote archiver. This method stores the archives in the path given during the initialization of this object. HyperKitty archives are accessed month by month and stored following the schema year-month. Archives are fetched from the given month till the current month. :param from_date: fetch archives that store messages equal or after the given date; only year and month values are compared :returns: a list of tuples, storing the links and paths of the fetched archives """ logger.info("Downloading mboxes from '%s' to since %s", self.client.base_url, str(from_date)) logger.debug("Storing mboxes in '%s'", self.dirpath) self.client.fetch(self.client.base_url) from_date = datetime_to_utc(from_date) to_end = datetime_utcnow() to_end += dateutil.relativedelta.relativedelta(months=1) months = months_range(from_date, to_end) fetched = [] if not os.path.exists(self.dirpath): os.makedirs(self.dirpath) tmbox = 0 for dts in months: tmbox += 1 start, end = dts[0], dts[1] filename = start.strftime("%Y-%m.mbox.gz") filepath = os.path.join(self.dirpath, filename) url = urijoin(self.client.base_url, 'export', filename) params = { 'start': start.strftime("%Y-%m-%d"), 'end': end.strftime("%Y-%m-%d") } success = self._download_archive(url, params, filepath) if success: fetched.append((url, filepath)) logger.info("%s/%s MBoxes downloaded", len(fetched), tmbox) return fetched
Fetch data from a Docker Hub repository.
def fetch(self, category=CATEGORY_DOCKERHUB_DATA): """Fetch data from a Docker Hub repository. The method retrieves, from a repository stored in Docker Hub, its data which includes number of pulls, stars, description, among other data. :param category: the category of items to fetch :returns: a generator of data """ kwargs = {} items = super().fetch(category, **kwargs) return items
Fetch the Dockher Hub items
def fetch_items(self, category, **kwargs): """Fetch the Dockher Hub items :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ logger.info("Fetching data from '%s' repository of '%s' owner", self.repository, self.owner) raw_data = self.client.repository(self.owner, self.repository) fetched_on = datetime_utcnow().timestamp() data = self.parse_json(raw_data) data['fetched_on'] = fetched_on yield data logger.info("Fetch process completed")
Init client
def _init_client(self, from_archive=False): """Init client""" return DockerHubClient(archive=self.archive, from_archive=from_archive)
Fetch information about a repository.
def repository(self, owner, repository): """Fetch information about a repository.""" url = urijoin(self.base_url, self.RREPOSITORY, owner, repository) logger.debug("DockerHub client requests: %s", url) response = self.fetch(url) return response.text
Add extra information for custom fields.
def map_custom_field(custom_fields, fields): """Add extra information for custom fields. :param custom_fields: set of custom fields with the extra information :param fields: fields of the issue where to add the extra information :returns: an set of items with the extra information mapped """ def build_cf(cf, v): return {'id': cf['id'], 'name': cf['name'], 'value': v} return { k: build_cf(custom_fields[k], v) for k, v in fields.items() if k in custom_fields }
Filter custom fields from a given set of fields.
def filter_custom_fields(fields): """Filter custom fields from a given set of fields. :param fields: set of fields :returns: an object with the filtered custom fields """ custom_fields = {} sorted_fields = [field for field in fields if field['custom'] is True] for custom_field in sorted_fields: custom_fields[custom_field['id']] = custom_field return custom_fields
Fetch the issues
def fetch_items(self, category, **kwargs): """Fetch the issues :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] logger.info("Looking for issues at site '%s', in project '%s' and updated from '%s'", self.url, self.project, str(from_date)) whole_pages = self.client.get_issues(from_date) fields = json.loads(self.client.get_fields()) custom_fields = filter_custom_fields(fields) for whole_page in whole_pages: issues = self.parse_issues(whole_page) for issue in issues: mapping = map_custom_field(custom_fields, issue['fields']) for k, v in mapping.items(): issue['fields'][k] = v comments_data = self.__get_issue_comments(issue['id']) issue['comments_data'] = comments_data yield issue
Parse a JIRA API raw response.
def parse_issues(raw_page): """Parse a JIRA API raw response. The method parses the API response retrieving the issues from the received items :param items: items from where to parse the issues :returns: a generator of issues """ raw_issues = json.loads(raw_page) issues = raw_issues['issues'] for issue in issues: yield issue
Init client
def _init_client(self, from_archive=False): """Init client""" return JiraClient(self.url, self.project, self.user, self.password, self.verify, self.cert, self.max_results, self.archive, from_archive)
Get issue comments
def __get_issue_comments(self, issue_id): """Get issue comments""" comments = [] page_comments = self.client.get_comments(issue_id) for page_comment in page_comments: raw_comments = json.loads(page_comment) comments.extend(raw_comments['comments']) return comments
Retrieve all the items from a given date.
def get_items(self, from_date, url, expand_fields=True): """Retrieve all the items from a given date. :param url: endpoint API url :param from_date: obtain items updated since this date :param expand_fields: if True, it includes the expand fields in the payload """ start_at = 0 req = self.fetch(url, payload=self.__build_payload(start_at, from_date, expand_fields)) issues = req.text data = req.json() titems = data['total'] nitems = data['maxResults'] start_at += min(nitems, titems) self.__log_status(start_at, titems, url) while issues: yield issues issues = None if data['startAt'] + nitems < titems: req = self.fetch(url, payload=self.__build_payload(start_at, from_date, expand_fields)) data = req.json() start_at += nitems issues = req.text self.__log_status(start_at, titems, url)
Retrieve all the issues from a given date.
def get_issues(self, from_date): """Retrieve all the issues from a given date. :param from_date: obtain issues updated since this date """ url = urijoin(self.base_url, self.RESOURCE, self.VERSION_API, 'search') issues = self.get_items(from_date, url) return issues
Retrieve all the comments of a given issue.
def get_comments(self, issue_id): """Retrieve all the comments of a given issue. :param issue_id: ID of the issue """ url = urijoin(self.base_url, self.RESOURCE, self.VERSION_API, self.ISSUE, issue_id, self.COMMENT) comments = self.get_items(DEFAULT_DATETIME, url, expand_fields=False) return comments
Retrieve all the fields available.
def get_fields(self): """Retrieve all the fields available.""" url = urijoin(self.base_url, self.RESOURCE, self.VERSION_API, 'field') req = self.fetch(url) return req.text
Fetch the builds from the url.
def fetch(self, category=CATEGORY_BUILD): """Fetch the builds from the url. The method retrieves, from a Jenkins url, the builds updated since the given date. :param category: the category of items to fetch :returns: a generator of builds """ kwargs = {} items = super().fetch(category, **kwargs) return items
Fetch the contents
def fetch_items(self, category, **kwargs): """Fetch the contents :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ logger.info("Looking for projects at url '%s'", self.url) nbuilds = 0 # number of builds processed njobs = 0 # number of jobs processed projects = json.loads(self.client.get_jobs()) jobs = projects['jobs'] for job in jobs: logger.debug("Adding builds from %s (%i/%i)", job['url'], njobs, len(jobs)) try: raw_builds = self.client.get_builds(job['name']) except requests.exceptions.HTTPError as e: if e.response.status_code == 500: logger.warning(e) logger.warning("Unable to fetch builds from job %s; skipping", job['url']) continue else: raise e if not raw_builds: continue try: builds = json.loads(raw_builds) except ValueError: logger.warning("Unable to parse builds from job %s; skipping", job['url']) continue builds = builds['builds'] for build in builds: yield build nbuilds += 1 njobs += 1 logger.info("Total number of jobs: %i/%i", njobs, len(jobs)) logger.info("Total number of builds: %i", nbuilds)
Init client
def _init_client(self, from_archive=False): """Init client""" return JenkinsClient(self.url, self.blacklist_jobs, self.detail_depth, self.sleep_time, archive=self.archive, from_archive=from_archive)
Retrieve all jobs
def get_jobs(self): """ Retrieve all jobs""" url_jenkins = urijoin(self.base_url, "api", "json") response = self.fetch(url_jenkins) return response.text
Retrieve all builds from a job
def get_builds(self, job_name): """ Retrieve all builds from a job""" if self.blacklist_jobs and job_name in self.blacklist_jobs: logger.warning("Not getting blacklisted job: %s", job_name) return payload = {'depth': self.detail_depth} url_build = urijoin(self.base_url, "job", job_name, "api", "json") response = self.fetch(url_build, payload=payload) return response.text
Fetch the questions
def fetch_items(self, category, **kwargs): """Fetch the questions :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] logger.info("Looking for questions at site '%s', with tag '%s' and updated from '%s'", self.site, self.tagged, str(from_date)) whole_pages = self.client.get_questions(from_date) for whole_page in whole_pages: questions = self.parse_questions(whole_page) for question in questions: yield question
Parse a StackExchange API raw response.
def parse_questions(raw_page): """Parse a StackExchange API raw response. The method parses the API response retrieving the questions from the received items :param items: items from where to parse the questions :returns: a generator of questions """ raw_questions = json.loads(raw_page) questions = raw_questions['items'] for question in questions: yield question
Init client
def _init_client(self, from_archive=False): """Init client""" return StackExchangeClient(self.site, self.tagged, self.api_token, self.max_questions, self.archive, from_archive)
Retrieve all the questions from a given date.
def get_questions(self, from_date): """Retrieve all the questions from a given date. :param from_date: obtain questions updated since this date """ page = 1 url = urijoin(self.base_url, self.VERSION_API, "questions") req = self.fetch(url, payload=self.__build_payload(page, from_date)) questions = req.text data = req.json() tquestions = data['total'] nquestions = data['page_size'] self.__log_status(data['quota_remaining'], data['quota_max'], nquestions, tquestions) while questions: yield questions questions = None if data['has_more']: page += 1 backoff = data.get('backoff', None) if backoff: logger.debug("Expensive query. Wait %s secs to send a new request", backoff) time.sleep(float(backoff)) req = self.fetch(url, payload=self.__build_payload(page, from_date)) data = req.json() questions = req.text nquestions += data['page_size'] self.__log_status(data['quota_remaining'], data['quota_max'], nquestions, tquestions)
Sanitize payload of a HTTP request by removing the token information before storing/ retrieving archived items
def sanitize_for_archive(url, headers, payload): """Sanitize payload of a HTTP request by removing the token information before storing/retrieving archived items :param: url: HTTP url request :param: headers: HTTP headers request :param: payload: HTTP payload request :returns url, headers and the sanitized payload """ if 'key' in payload: payload.pop('key') return url, headers, payload
Returns the StackExchange argument parser.
def setup_cmd_parser(cls): """Returns the StackExchange argument parser.""" parser = BackendCommandArgumentParser(cls.BACKEND.CATEGORIES, from_date=True, token_auth=True, archive=True) # StackExchange options group = parser.parser.add_argument_group('StackExchange arguments') group.add_argument('--site', dest='site', required=True, help="StackExchange site") group.add_argument('--tagged', dest='tagged', help="filter items by question Tag") group.add_argument('--max-questions', dest='max_questions', type=int, default=MAX_QUESTIONS, help="Maximum number of questions requested in the same query") return parser
Fetch the pages from the backend url.
def fetch(self, category=CATEGORY_PAGE, from_date=DEFAULT_DATETIME, reviews_api=False): """Fetch the pages from the backend url. The method retrieves, from a MediaWiki url, the wiki pages. :param category: the category of items to fetch :param from_date: obtain pages updated since this date :param reviews_api: use the reviews API available in MediaWiki >= 1.27 :returns: a generator of pages """ if from_date == DEFAULT_DATETIME: from_date = None else: from_date = datetime_to_utc(from_date) kwargs = {"from_date": from_date, "reviews_api": reviews_api} items = super().fetch(category, **kwargs) return items
Fetch the pages
def fetch_items(self, category, **kwargs): """Fetch the pages :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] reviews_api = kwargs['reviews_api'] mediawiki_version = self.client.get_version() logger.info("MediaWiki version: %s", mediawiki_version) if reviews_api: if ((mediawiki_version[0] == 1 and mediawiki_version[1] >= 27) or mediawiki_version[0] > 1): fetcher = self.__fetch_1_27(from_date) else: logger.warning("Reviews API only available in MediaWiki >= 1.27") logger.warning("Using the Pages API instead") fetcher = self.__fetch_pre1_27(from_date) else: fetcher = self.__fetch_pre1_27(from_date) for page_reviews in fetcher: yield page_reviews
Init client
def _init_client(self, from_archive=False): """Init client""" return MediaWikiClient(self.url, self.archive, from_archive)
Get the max date in unixtime format from reviews.
def __get_max_date(self, reviews): """"Get the max date in unixtime format from reviews.""" max_ts = 0 for review in reviews: ts = str_to_datetime(review['timestamp']) ts = datetime_to_utc(ts) if ts.timestamp() > max_ts: max_ts = ts.timestamp() return max_ts
Fetch the pages from the backend url for MediaWiki > = 1. 27
def __fetch_1_27(self, from_date=None): """Fetch the pages from the backend url for MediaWiki >=1.27 The method retrieves, from a MediaWiki url, the wiki pages. :returns: a generator of pages """ logger.info("Looking for pages at url '%s'", self.url) npages = 0 # number of pages processed tpages = 0 # number of total pages pages_done = [] # pages already retrieved in reviews API namespaces_contents = self.__get_namespaces_contents() arvcontinue = '' # pagination for getting revisions and their pages while arvcontinue is not None: raw_pages = self.client.get_pages_from_allrevisions(namespaces_contents, from_date, arvcontinue) data_json = json.loads(raw_pages) arvcontinue = data_json['continue']['arvcontinue'] if 'continue' in data_json else None pages_json = data_json['query']['allrevisions'] for page in pages_json: if page['pageid'] in pages_done: logger.debug("Page %s already processed; skipped", page['pageid']) continue tpages += 1 pages_done.append(page['pageid']) page_reviews = self.__get_page_reviews(page) if not page_reviews: logger.warning("Revisions not found in %s [page id: %s], page skipped", page['title'], page['pageid']) continue yield page_reviews npages += 1 logger.info("Total number of pages: %i, skipped %i", tpages, tpages - npages)
Fetch the pages from the backend url.
def __fetch_pre1_27(self, from_date=None): """Fetch the pages from the backend url. The method retrieves, from a MediaWiki url, the wiki pages. :returns: a generator of pages """ def fetch_incremental_changes(namespaces_contents): # Use recent changes API to get the pages from date npages = 0 # number of pages processed tpages = 0 # number of total pages pages_done = [] # pages already retrieved in reviews API rccontinue = '' hole_created = True # To detect that incremental is not complete while rccontinue is not None: raw_pages = self.client.get_recent_pages(namespaces_contents, rccontinue) data_json = json.loads(raw_pages) if 'query-continue' in data_json: # < 1.27 rccontinue = data_json['query-continue']['recentchanges']['rccontinue'] elif 'continue' in data_json: # >= 1.27 rccontinue = data_json['continue']['rccontinue'] else: rccontinue = None pages_json = data_json['query']['recentchanges'] for page in pages_json: page_ts = dateutil.parser.parse(page['timestamp']) if from_date >= page_ts: # The rest of recent changes are older than from_date logger.debug("All recent changes newer than %s processed.", from_date) rccontinue = None hole_created = False break if page['pageid'] in pages_done: logger.debug("Page %s already processed; skipped", page['pageid']) continue tpages += 1 pages_done.append(page['pageid']) page_reviews = self.__get_page_reviews(page) if not page_reviews: logger.warning("Revisions not found in %s [page id: %s], page skipped", page['title'], page['pageid']) continue yield page_reviews npages += 1 if hole_created: logger.error("Incremental update NOT completed. Hole in history created.") logger.info("Total number of pages: %i, skipped %i", tpages, tpages - npages) def fetch_all_pages(namespaces_contents): # Use get all pages API to get pages npages = 0 # number of pages processed tpages = 0 # number of total pages pages_done = [] # pages already retrieved in reviews API for ns in namespaces_contents: apcontinue = '' # pagination for getting pages logger.debug("Getting pages for namespace: %s", ns) while apcontinue is not None: raw_pages = self.client.get_pages(ns, apcontinue) data_json = json.loads(raw_pages) if 'query-continue' in data_json: # < 1.27 apcontinue = data_json['query-continue']['allpages']['apcontinue'] elif 'continue' in data_json: # >= 1.27 apcontinue = data_json['continue']['apcontinue'] else: apcontinue = None pages_json = data_json['query']['allpages'] for page in pages_json: if page['pageid'] in pages_done: logger.debug("Page %s already processed; skipped", page['pageid']) continue tpages += 1 pages_done.append(page['pageid']) page_reviews = self.__get_page_reviews(page) if not page_reviews: logger.warning("Revisions not found in %s [page id: %s], page skipped", page['title'], page['pageid']) continue yield page_reviews npages += 1 logger.info("Total number of pages: %i, skipped %i", tpages, tpages - npages) logger.info("Looking for pages at url '%s'", self.url) # from_date can not be older than MAX_RECENT_DAYS days ago if from_date: if (datetime_utcnow() - from_date).days >= MAX_RECENT_DAYS: cause = "Can't get incremental pages older than %i days." % MAX_RECENT_DAYS cause += " Do a complete analysis without from_date for older changes." raise BackendError(cause=cause) namespaces_contents = self.__get_namespaces_contents() if not from_date: return fetch_all_pages(namespaces_contents) else: return fetch_incremental_changes(namespaces_contents)
Run an API command.: param cgi: cgi command to run on the server: param params: dict with the HTTP parameters needed to run the given command
def call(self, params): """Run an API command. :param cgi: cgi command to run on the server :param params: dict with the HTTP parameters needed to run the given command """ logger.debug("MediaWiki client calls API: %s params: %s", self.base_url, str(params)) req = self.fetch(self.base_url, payload=params) return req.text
Retrieve all pages from a namespace starting from apcontinue.
def get_pages(self, namespace, apcontinue=''): """Retrieve all pages from a namespace starting from apcontinue.""" params = { "action": "query", "list": "allpages", "aplimit": self.limit, "apnamespace": namespace, "format": "json" } if apcontinue: params['apcontinue'] = apcontinue return self.call(params)
Retrieve recent pages from all namespaces starting from rccontinue.
def get_recent_pages(self, namespaces, rccontinue=''): """Retrieve recent pages from all namespaces starting from rccontinue.""" namespaces.sort() params = { "action": "query", "list": "recentchanges", "rclimit": self.limit, "rcnamespace": "|".join(namespaces), "rcprop": "title|timestamp|ids", "format": "json" } if rccontinue: params['rccontinue'] = rccontinue return self.call(params)
Fetch the messages the bot can read from the server.
def fetch(self, category=CATEGORY_MESSAGE, offset=DEFAULT_OFFSET, chats=None): """Fetch the messages the bot can read from the server. The method retrieves, from the Telegram server, the messages sent with an offset equal or greater than the given. A list of chats, groups and channels identifiers can be set using the parameter `chats`. When it is set, only those messages sent to any of these will be returned. An empty list will return no messages. :param category: the category of items to fetch :param offset: obtain messages from this offset :param chats: list of chat names used to filter messages :returns: a generator of messages :raises ValueError: when `chats` is an empty list """ if not offset: offset = DEFAULT_OFFSET kwargs = {"offset": offset, "chats": chats} items = super().fetch(category, **kwargs) return items
Fetch the messages
def fetch_items(self, category, **kwargs): """Fetch the messages :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ offset = kwargs['offset'] chats = kwargs['chats'] logger.info("Looking for messages of '%s' bot from offset '%s'", self.bot, offset) if chats is not None: if len(chats) == 0: logger.warning("Chat list filter is empty. No messages will be returned") else: logger.info("Messages which belong to chats %s will be fetched", '[' + ','.join(str(ch_id) for ch_id in chats) + ']') nmsgs = 0 while True: raw_json = self.client.updates(offset=offset) messages = [msg for msg in self.parse_messages(raw_json)] if len(messages) == 0: break for msg in messages: offset = max(msg['update_id'], offset) if not self._filter_message_by_chats(msg, chats): logger.debug("Message %s does not belong to any chat; filtered", msg['message']['message_id']) continue yield msg nmsgs += 1 offset += 1 logger.info("Fetch process completed: %s messages fetched", nmsgs)
Parse a Telegram JSON messages list.
def parse_messages(raw_json): """Parse a Telegram JSON messages list. The method parses the JSON stream and returns an iterator of dictionaries. Each one of this, contains a Telegram message. :param raw_json: JSON string to parse :returns: a generator of parsed messages """ result = json.loads(raw_json) messages = result['result'] for msg in messages: yield msg
Init client
def _init_client(self, from_archive=False): """Init client""" return TelegramBotClient(self.bot_token, self.archive, from_archive)
Check if a message can be filtered based in a list of chats.
def _filter_message_by_chats(self, message, chats): """Check if a message can be filtered based in a list of chats. This method returns `True` when the message was sent to a chat of the given list. It also returns `True` when chats is `None`. :param message: Telegram message :param chats: list of chat, groups and channels identifiers :returns: `True` when the message can be filtered; otherwise, it returns `False` """ if chats is None: return True chat_id = message['message']['chat']['id'] return chat_id in chats
Fetch the messages that a bot can read.
def updates(self, offset=None): """Fetch the messages that a bot can read. When the `offset` is given it will retrieve all the messages that are greater or equal to that offset. Take into account that, due to how the API works, all previous messages will be removed from the server. :param offset: fetch the messages starting on this offset """ params = {} if offset: params[self.OFFSET] = offset response = self._call(self.UPDATES_METHOD, params) return response
Sanitize URL of a HTTP request by removing the token information before storing/ retrieving archived items
def sanitize_for_archive(url, headers, payload): """Sanitize URL of a HTTP request by removing the token information before storing/retrieving archived items :param: url: HTTP url request :param: headers: HTTP headers request :param: payload: HTTP payload request :returns the sanitized url, plus the headers and payload """ url = re.sub('bot.*/', 'botXXXXX/', url) return url, headers, payload
Retrive the given resource.
def _call(self, method, params): """Retrive the given resource. :param resource: resource to retrieve :param params: dict with the HTTP parameters needed to retrieve the given resource """ url = self.base_url % {'token': self.bot_token, 'method': method} logger.debug("Telegram bot calls method: %s params: %s", method, str(params)) r = self.fetch(url, payload=params) return r.text
Fetch the articles
def fetch_items(self, category, **kwargs): """Fetch the articles :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ offset = kwargs['offset'] logger.info("Fetching articles of '%s' group on '%s' offset %s", self.group, self.host, str(offset)) narts, iarts, tarts = (0, 0, 0) _, _, first, last, _ = self.client.group(self.group) if offset <= last: first = max(first, offset) _, overview = self.client.over((first, last)) else: overview = [] tarts = len(overview) logger.debug("Total number of articles to fetch: %s", tarts) for article_id, _ in overview: try: article_raw = self.client.article(article_id) article = self.__parse_article(article_raw) except ParseError: logger.warning("Error parsing %s article; skipping", article_id) iarts += 1 continue except nntplib.NNTPTemporaryError as e: logger.warning("Error '%s' fetching article %s; skipping", e.response, article_id) iarts += 1 continue yield article narts += 1
NNTP metadata.
def metadata(self, item, filter_classified=False): """NNTP metadata. This method takes items, overriding `metadata` decorator, to add extra information related to NNTP. :param item: an item fetched by a backend :param filter_classified: sets if classified fields were filtered """ item = super().metadata(item, filter_classified=filter_classified) item['offset'] = item['data']['offset'] return item
Parse a NNTP article.
def parse_article(raw_article): """Parse a NNTP article. This method parses a NNTP article stored in a string object and returns an dictionary. :param raw_article: NNTP article string :returns: a dictionary of type `requests.structures.CaseInsensitiveDict` :raises ParseError: when an error is found parsing the article """ try: message = email.message_from_string(raw_article) article = message_to_dict(message) except UnicodeEncodeError as e: raise ParseError(cause=str(e)) return article
Init client
def _init_client(self, from_archive=False): """Init client""" return NNTTPClient(self.host, self.archive, from_archive)
Fetch NNTP data from the server or from the archive
def _fetch(self, method, args): """Fetch NNTP data from the server or from the archive :param method: the name of the command to execute :param args: the arguments required by the command """ if self.from_archive: data = self._fetch_from_archive(method, args) else: data = self._fetch_from_remote(method, args) return data
Fetch article data
def _fetch_article(self, article_id): """Fetch article data :param article_id: id of the article to fetch """ fetched_data = self.handler.article(article_id) data = { 'number': fetched_data[1].number, 'message_id': fetched_data[1].message_id, 'lines': fetched_data[1].lines } return data
Fetch data from NNTP
def _fetch_from_remote(self, method, args): """Fetch data from NNTP :param method: the name of the command to execute :param args: the arguments required by the command """ try: if method == NNTTPClient.GROUP: data = self.handler.group(args) elif method == NNTTPClient.OVER: data = self.handler.over(args) elif method == NNTTPClient.ARTICLE: data = self._fetch_article(args) except nntplib.NNTPTemporaryError as e: data = e raise e finally: if self.archive: self.archive.store(method, args, None, data) return data
Fetch data from the archive
def _fetch_from_archive(self, method, args): """Fetch data from the archive :param method: the name of the command to execute :param args: the arguments required by the command """ if not self.archive: raise ArchiveError(cause="Archive not provided") data = self.archive.retrieve(method, args, None) if isinstance(data, nntplib.NNTPTemporaryError): raise data return data
Fetch the data from a given URL.
def fetch(self, url, payload=None, headers=None, method=GET, stream=False, verify=True): """Fetch the data from a given URL. :param url: link to the resource :param payload: payload of the request :param headers: headers of the request :param method: type of request call (GET or POST) :param stream: defer downloading the response body until the response content is available :param verify: verifying the SSL certificate :returns a response object """ if self.from_archive: response = self._fetch_from_archive(url, payload, headers) else: response = self._fetch_from_remote(url, payload, headers, method, stream, verify) return response
Create a http session and initialize the retry object.
def _create_http_session(self): """Create a http session and initialize the retry object.""" self.session = requests.Session() if self.headers: self.session.headers.update(self.headers) retries = urllib3.util.Retry(total=self.max_retries, connect=self.max_retries_on_connect, read=self.max_retries_on_read, redirect=self.max_retries_on_redirect, status=self.max_retries_on_status, method_whitelist=self.method_whitelist, status_forcelist=self.status_forcelist, backoff_factor=self.sleep_time, raise_on_redirect=self.raise_on_redirect, raise_on_status=self.raise_on_status, respect_retry_after_header=self.respect_retry_after_header) self.session.mount('http://', requests.adapters.HTTPAdapter(max_retries=retries)) self.session.mount('https://', requests.adapters.HTTPAdapter(max_retries=retries))
Setup the rate limit handler.
def setup_rate_limit_handler(self, sleep_for_rate=False, min_rate_to_sleep=MIN_RATE_LIMIT, rate_limit_header=RATE_LIMIT_HEADER, rate_limit_reset_header=RATE_LIMIT_RESET_HEADER): """Setup the rate limit handler. :param sleep_for_rate: sleep until rate limit is reset :param min_rate_to_sleep: minimun rate needed to make the fecthing process sleep :param rate_limit_header: header from where extract the rate limit data :param rate_limit_reset_header: header from where extract the rate limit reset data """ self.rate_limit = None self.rate_limit_reset_ts = None self.sleep_for_rate = sleep_for_rate self.rate_limit_header = rate_limit_header self.rate_limit_reset_header = rate_limit_reset_header if min_rate_to_sleep > self.MAX_RATE_LIMIT: msg = "Minimum rate to sleep value exceeded (%d)." msg += "High values might cause the client to sleep forever." msg += "Reset to %d." self.min_rate_to_sleep = self.MAX_RATE_LIMIT logger.warning(msg, min_rate_to_sleep, self.MAX_RATE_LIMIT) else: self.min_rate_to_sleep = min_rate_to_sleep
The fetching process sleeps until the rate limit is restored or raises a RateLimitError exception if sleep_for_rate flag is disabled.
def sleep_for_rate_limit(self): """The fetching process sleeps until the rate limit is restored or raises a RateLimitError exception if sleep_for_rate flag is disabled. """ if self.rate_limit is not None and self.rate_limit <= self.min_rate_to_sleep: seconds_to_reset = self.calculate_time_to_reset() if seconds_to_reset < 0: logger.warning("Value of sleep for rate limit is negative, reset it to 0") seconds_to_reset = 0 cause = "Rate limit exhausted." if self.sleep_for_rate: logger.info("%s Waiting %i secs for rate limit reset.", cause, seconds_to_reset) time.sleep(seconds_to_reset) else: raise RateLimitError(cause=cause, seconds_to_reset=seconds_to_reset)
Update the rate limit and the time to reset from the response headers.
def update_rate_limit(self, response): """Update the rate limit and the time to reset from the response headers. :param: response: the response object """ if self.rate_limit_header in response.headers: self.rate_limit = int(response.headers[self.rate_limit_header]) logger.debug("Rate limit: %s", self.rate_limit) else: self.rate_limit = None if self.rate_limit_reset_header in response.headers: self.rate_limit_reset_ts = int(response.headers[self.rate_limit_reset_header]) logger.debug("Rate limit reset: %s", self.calculate_time_to_reset()) else: self.rate_limit_reset_ts = None
Fetch the messages
def fetch_items(self, category, **kwargs): """Fetch the messages :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] logger.info("Fetching messages of '%s' from %s", self.uri, str(from_date)) nmessages = 0 archives = self.__retrieve_archives(from_date) for archive in archives: logger.debug("Parsing supybot archive %s", archive) for message in self.parse_supybot_log(archive): dt = str_to_datetime(message['timestamp']) if dt < from_date: logger.debug("Message %s sent before %s; skipped", str(dt), str(from_date)) continue yield message nmessages += 1 logger.info("Fetch process completed: %s messages fetched", nmessages)
Parse a Supybot IRC log file.
def parse_supybot_log(filepath): """Parse a Supybot IRC log file. The method parses the Supybot IRC log file and returns an iterator of dictionaries. Each one of this, contains a message from the file. :param filepath: path to the IRC log file :returns: a generator of parsed messages :raises ParseError: raised when the format of the Supybot log file is invalid :raises OSError: raised when an error occurs reading the given file """ with open(filepath, 'r', errors='surrogateescape', newline=os.linesep) as f: parser = SupybotParser(f) try: for message in parser.parse(): yield message except ParseError as e: cause = "file: %s; reason: %s" % (filepath, str(e)) raise ParseError(cause=cause)
Retrieve the Supybot archives after the given date
def __retrieve_archives(self, from_date): """Retrieve the Supybot archives after the given date""" archives = [] candidates = self.__list_supybot_archives() for candidate in candidates: dt = self.__parse_date_from_filepath(candidate) if dt.date() >= from_date.date(): archives.append((dt, candidate)) else: logger.debug("Archive %s stored before %s; skipped", candidate, str(from_date)) archives.sort(key=lambda x: x[0]) return [archive[1] for archive in archives]
List the filepath of the archives stored in dirpath
def __list_supybot_archives(self): """List the filepath of the archives stored in dirpath""" archives = [] for root, _, files in os.walk(self.dirpath): for filename in files: location = os.path.join(root, filename) archives.append(location) return archives
Parse a Supybot IRC stream.
def parse(self): """Parse a Supybot IRC stream. Returns an iterator of dicts. Each dicts contains information about the date, type, nick and body of a single log entry. :returns: iterator of parsed lines :raises ParseError: when an invalid line is found parsing the given stream """ for line in self.stream: line = line.rstrip('\n') self.nline += 1 if self.SUPYBOT_EMPTY_REGEX.match(line): continue ts, msg = self._parse_supybot_timestamp(line) if self.SUPYBOT_EMPTY_COMMENT_REGEX.match(msg): continue elif self.SUPYBOT_EMPTY_COMMENT_ACTION_REGEX.match(msg): continue elif self.SUPYBOT_EMPTY_BOT_REGEX.match(msg): continue itype, nick, body = self._parse_supybot_msg(msg) item = self._build_item(ts, itype, nick, body) yield item
Parse timestamp section
def _parse_supybot_timestamp(self, line): """Parse timestamp section""" m = self.SUPYBOT_TIMESTAMP_REGEX.match(line) if not m: msg = "date expected on line %s" % (str(self.nline)) raise ParseError(cause=msg) ts = m.group('ts') msg = m.group('msg') return ts, msg
Parse message section
def _parse_supybot_msg(self, line): """Parse message section""" patterns = [(self.SUPYBOT_COMMENT_REGEX, self.TCOMMENT), (self.SUPYBOT_COMMENT_ACTION_REGEX, self.TCOMMENT), (self.SUPYBOT_SERVER_REGEX, self.TSERVER), (self.SUPYBOT_BOT_REGEX, self.TCOMMENT)] for p in patterns: m = p[0].match(line) if not m: continue return p[1], m.group('nick'), m.group('body').strip() msg = "invalid message on line %s" % (str(self.nline)) raise ParseError(cause=msg)
Fetch the topics
def fetch_items(self, category, **kwargs): """Fetch the topics :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] logger.info("Looking for topics at '%s', updated from '%s'", self.url, str(from_date)) ntopics = 0 topics_ids = self.__fetch_and_parse_topics_ids(from_date) for topic_id in topics_ids: topic = self.__fetch_and_parse_topic(topic_id) ntopics += 1 yield topic logger.info("Fetch process completed: %s topics fetched", ntopics)
Parse a topics page stream.
def __parse_topics_page(self, raw_json): """Parse a topics page stream. The result of parsing process is a generator of tuples. Each tuple contains de identifier of the topic, the last date when it was updated and whether is pinned or not. :param raw_json: JSON stream to parse :returns: a generator of parsed bugs """ topics_page = json.loads(raw_json) topics_ids = [] for topic in topics_page['topic_list']['topics']: topic_id = topic['id'] if topic['last_posted_at'] is None: logger.warning("Topic %s with last_posted_at null. Ignoring it.", topic['title']) continue updated_at = str_to_datetime(topic['last_posted_at']) pinned = topic['pinned'] topics_ids.append((topic_id, updated_at, pinned)) return topics_ids
Retrieve the #page summaries of the latest topics.
def topics_page(self, page=None): """Retrieve the #page summaries of the latest topics. :param page: number of page to retrieve """ params = { self.PKEY: self.api_key, self.PPAGE: page } # http://example.com/latest.json response = self._call(self.ALL_TOPICS, self.TOPICS_SUMMARY, params=params) return response
Retrive the topic with topic_id identifier.
def topic(self, topic_id): """Retrive the topic with `topic_id` identifier. :param topic_id: identifier of the topic to retrieve """ params = { self.PKEY: self.api_key } # http://example.com/t/8.json response = self._call(self.TOPIC, topic_id, params=params) return response
Retrieve the post whit post_id identifier.
def post(self, post_id): """Retrieve the post whit `post_id` identifier. :param post_id: identifier of the post to retrieve """ params = { self.PKEY: self.api_key } # http://example.com/posts/10.json response = self._call(self.POSTS, post_id, params=params) return response
Sanitize payload of a HTTP request by removing the token information before storing/ retrieving archived items
def sanitize_for_archive(url, headers, payload): """Sanitize payload of a HTTP request by removing the token information before storing/retrieving archived items :param: url: HTTP url request :param: headers: HTTP headers request :param: payload: HTTP payload request :returns url, headers and the sanitized payload """ if DiscourseClient.PKEY in payload: payload.pop(DiscourseClient.PKEY) return url, headers, payload
Run an API command.
def _call(self, res, res_id, params): """Run an API command. :param res: type of resource to fetch :param res_id: identifier of the resource :param params: dict with the HTTP parameters needed to run the given command """ if res: url = urijoin(self.base_url, res, res_id) else: url = urijoin(self.base_url, res_id) url += self.TJSON logger.debug("Discourse client calls resource: %s %s params: %s", res, res_id, str(params)) r = self.fetch(url, payload=params) return r.text
Fetch the tasks
def fetch_items(self, category, **kwargs): """Fetch the tasks :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] logger.info("Fetching tasks of '%s' from %s", self.url, str(from_date)) ntasks = 0 for task in self.__fetch_tasks(from_date): yield task ntasks += 1 logger.info("Fetch process completed: %s tasks fetched", ntasks)
Parse a Phabricator tasks JSON stream.
def parse_tasks(raw_json): """Parse a Phabricator tasks JSON stream. The method parses a JSON stream and returns a list iterator. Each item is a dictionary that contains the task parsed data. :param raw_json: JSON string to parse :returns: a generator of parsed tasks """ results = json.loads(raw_json) tasks = results['result']['data'] for t in tasks: yield t
Parse a Phabricator users JSON stream.
def parse_users(raw_json): """Parse a Phabricator users JSON stream. The method parses a JSON stream and returns a list iterator. Each item is a dictionary that contais the user parsed data. :param raw_json: JSON string to parse :returns: a generator of parsed users """ results = json.loads(raw_json) users = results['result'] for u in users: yield u
Init client
def _init_client(self, from_archive=False): """Init client""" return ConduitClient(self.url, self.api_token, self.max_retries, self.sleep_time, self.archive, from_archive)
Retrieve tasks.
def tasks(self, from_date=DEFAULT_DATETIME): """Retrieve tasks. :param from_date: retrieve tasks that where updated from that date; dates are converted epoch time. """ # Convert 'from_date' to epoch timestamp. # Zero value (1970-01-01 00:00:00) is not allowed for # 'modifiedStart' so it will be set to 1, by default. ts = int(datetime_to_utc(from_date).timestamp()) or 1 consts = { self.PMODIFIED_START: ts } attachments = { self. PPROJECTS: True } params = { self.PCONSTRAINTS: consts, self.PATTACHMENTS: attachments, self.PORDER: self.VOUTDATED, } while True: r = self._call(self.MANIPHEST_TASKS, params) yield r j = json.loads(r) after = j['result']['cursor']['after'] if not after: break params[self.PAFTER] = after
Retrieve tasks transactions.
def transactions(self, *phids): """Retrieve tasks transactions. :param phids: list of tasks identifiers """ params = { self.PIDS: phids } response = self._call(self.MANIPHEST_TRANSACTIONS, params) return response
Retrieve users.
def users(self, *phids): """Retrieve users. :params phids: list of users identifiers """ params = { self.PHIDS: phids } response = self._call(self.PHAB_USERS, params) return response
Retrieve data about PHIDs.
def phids(self, *phids): """Retrieve data about PHIDs. :params phids: list of PHIDs """ params = { self.PHIDS: phids } response = self._call(self.PHAB_PHIDS, params) return response
Sanitize payload of a HTTP request by removing the token information before storing/ retrieving archived items
def sanitize_for_archive(url, headers, payload): """Sanitize payload of a HTTP request by removing the token information before storing/retrieving archived items :param: url: HTTP url request :param: headers: HTTP headers request :param: payload: HTTP payload request :returns url, headers and the sanitized payload """ if '__conduit__' in payload['params']: params = json.loads(payload['params']) params.pop('__conduit__') payload['params'] = json.dumps(params, sort_keys=True) return url, headers, payload
Call a method.
def _call(self, method, params): """Call a method. :param method: method to call :param params: dict with the HTTP parameters needed to call the given method :raises ConduitError: when an error is returned by the server """ url = self.URL % {'base': self.base_url, 'method': method} # Conduit and POST parameters params['__conduit__'] = {'token': self.api_token} data = { 'params': json.dumps(params, sort_keys=True), 'output': 'json', '__conduit__': True } logger.debug("Phabricator Conduit client requests: %s params: %s", method, str(data)) r = self.fetch(url, payload=data, method=HttpClient.POST, verify=False) # Check for possible Conduit API errors result = r.json() if result['error_code']: raise ConduitError(error=result['error_info'], code=result['error_code']) return r.text
Fetch the contents
def fetch_items(self, category, **kwargs): """Fetch the contents :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] logger.info("Fetching historical contents of '%s' from %s", self.url, str(from_date)) nhcs = 0 contents = self.__fetch_contents_summary(from_date) contents = [content for content in contents] for content in contents: cid = content['id'] content_url = urijoin(self.origin, content['_links']['webui']) hcs = self.__fetch_historical_contents(cid, from_date) for hc in hcs: hc['content_url'] = content_url hc['ancestors'] = content.get('ancestors', []) yield hc nhcs += 1 logger.info("Fetch process completed: %s historical contents fetched", nhcs)
Extracts the identifier from a Confluence item.
def metadata_id(item): """Extracts the identifier from a Confluence item. This identifier will be the mix of two fields because a historical content does not have any unique identifier. In this case, 'id' and 'version' values are combined because it should not be possible to have two equal version numbers for the same content. The value to return will follow the pattern: <content>#v<version> (i.e 28979#v10). """ cid = item['id'] cversion = item['version']['number'] return str(cid) + '#v' + str(cversion)
Parse a Confluence summary JSON list.
def parse_contents_summary(raw_json): """Parse a Confluence summary JSON list. The method parses a JSON stream and returns an iterator of diccionaries. Each dictionary is a content summary. :param raw_json: JSON string to parse :returns: a generator of parsed content summaries. """ summary = json.loads(raw_json) contents = summary['results'] for c in contents: yield c
Init client
def _init_client(self, from_archive=False): """Init client""" return ConfluenceClient(self.url, archive=self.archive, from_archive=from_archive)
Get the contents of a repository.
def contents(self, from_date=DEFAULT_DATETIME, offset=None, max_contents=MAX_CONTENTS): """Get the contents of a repository. This method returns an iterator that manages the pagination over contents. Take into account that the seconds of `from_date` parameter will be ignored because the API only works with hours and minutes. :param from_date: fetch the contents updated since this date :param offset: fetch the contents starting from this offset :param limit: maximum number of contents to fetch per request """ resource = self.RCONTENTS + '/' + self.MSEARCH # Set confluence query parameter (cql) date = from_date.strftime("%Y-%m-%d %H:%M") cql = self.VCQL % {'date': date} # Set parameters params = { self.PCQL: cql, self.PLIMIT: max_contents, self.PEXPAND: self.PANCESTORS } if offset: params[self.PSTART] = offset for response in self._call(resource, params): yield response
Get the snapshot of a content for the given version.
def historical_content(self, content_id, version): """Get the snapshot of a content for the given version. :param content_id: fetch the snapshot of this content :param version: snapshot version of the content """ resource = self.RCONTENTS + '/' + str(content_id) params = { self.PVERSION: version, self.PSTATUS: self.VHISTORICAL, self.PEXPAND: ','.join(self.VEXPAND) } # Only one item is returned response = [response for response in self._call(resource, params)] return response[0]
Retrive the given resource.
def _call(self, resource, params): """Retrive the given resource. :param resource: resource to retrieve :param params: dict with the HTTP parameters needed to retrieve the given resource """ url = self.URL % {'base': self.base_url, 'resource': resource} logger.debug("Confluence client requests: %s params: %s", resource, str(params)) while True: r = self.fetch(url, payload=params) yield r.text # Pagination is available when 'next' link exists j = r.json() if '_links' not in j: break if 'next' not in j['_links']: break url = urijoin(self.base_url, j['_links']['next']) params = {}
Parse the result property extracting the value and unit of measure
def _parse_result(self): ''' Parse the result property, extracting the value and unit of measure ''' if self.result is not None: uom = testXMLAttribute(self.result, "uom") value_str = testXMLValue(self.result) try: value = float(value_str) except: raise ValueError("Error parsing measurement value") self.result = Measurement(value, uom)
Return a capabilities url
def capabilities_url(self, service_url): """Return a capabilities url """ qs = [] if service_url.find('?') != -1: qs = cgi.parse_qsl(service_url.split('?')[1]) params = [x[0] for x in qs] if 'service' not in params: qs.append(('service', 'WFS')) if 'request' not in params: qs.append(('request', 'GetCapabilities')) if 'version' not in params: qs.append(('version', self.version)) urlqs = urlencode(tuple(qs)) return service_url.split('?')[0] + '?' + urlqs
Get and parse a WFS capabilities document returning an instance of WFSCapabilitiesInfoset
def read(self, url, timeout=30): """Get and parse a WFS capabilities document, returning an instance of WFSCapabilitiesInfoset Parameters ---------- url : string The URL to the WFS capabilities document. timeout : number A timeout value (in seconds) for the request. """ request = self.capabilities_url(url) u = openURL(request, timeout=timeout, username=self.username, password=self.password) return etree.fromstring(u.read())
Parse a WFS capabilities document returning an instance of WFSCapabilitiesInfoset
def readString(self, st): """Parse a WFS capabilities document, returning an instance of WFSCapabilitiesInfoset string should be an XML capabilities document """ if not isinstance(st, str) and not isinstance(st, bytes): raise ValueError("String must be of type string or bytes, not %s" % type(st)) return etree.fromstring(st)