INSTRUCTION
stringlengths 1
8.43k
| RESPONSE
stringlengths 75
104k
|
|---|---|
Add metadata to an item.
|
def metadata(self, item, filter_classified=False):
"""Add metadata to an item.
It adds metadata to a given item such as how and
when it was fetched. The contents from the original item will
be stored under the 'data' keyword.
:param item: an item fetched by a backend
:param filter_classified: sets if classified fields were filtered
"""
item = {
'backend_name': self.__class__.__name__,
'backend_version': self.version,
'perceval_version': __version__,
'timestamp': datetime_utcnow().timestamp(),
'origin': self.origin,
'uuid': uuid(self.origin, self.metadata_id(item)),
'updated_on': self.metadata_updated_on(item),
'classified_fields_filtered': self.classified_fields if filter_classified else None,
'category': self.metadata_category(item),
'tag': self.tag,
'data': item,
}
return item
|
Parse a list of arguments.
|
def parse(self, *args):
"""Parse a list of arguments.
Parse argument strings needed to run a backend command. The result
will be a `argparse.Namespace` object populated with the values
obtained after the validation of the parameters.
:param args: argument strings
:result: an object with the parsed values
"""
parsed_args = self.parser.parse_args(args)
# Category was not set, remove it
if parsed_args.category is None:
delattr(parsed_args, 'category')
if self._from_date:
parsed_args.from_date = str_to_datetime(parsed_args.from_date)
if self._to_date and parsed_args.to_date:
parsed_args.to_date = str_to_datetime(parsed_args.to_date)
if self._archive and parsed_args.archived_since:
parsed_args.archived_since = str_to_datetime(parsed_args.archived_since)
if self._archive and parsed_args.fetch_archive and parsed_args.no_archive:
raise AttributeError("fetch-archive and no-archive arguments are not compatible")
if self._archive and parsed_args.fetch_archive and not parsed_args.category:
raise AttributeError("fetch-archive needs a category to work with")
# Set aliases
for alias, arg in self.aliases.items():
if (alias not in parsed_args) and (arg in parsed_args):
value = getattr(parsed_args, arg, None)
setattr(parsed_args, alias, value)
return parsed_args
|
Activate authentication arguments parsing
|
def _set_auth_arguments(self, basic_auth=True, token_auth=False):
"""Activate authentication arguments parsing"""
group = self.parser.add_argument_group('authentication arguments')
if basic_auth:
group.add_argument('-u', '--backend-user', dest='user',
help="backend user")
group.add_argument('-p', '--backend-password', dest='password',
help="backend password")
if token_auth:
group.add_argument('-t', '--api-token', dest='api_token',
help="backend authentication token / API key")
|
Activate archive arguments parsing
|
def _set_archive_arguments(self):
"""Activate archive arguments parsing"""
group = self.parser.add_argument_group('archive arguments')
group.add_argument('--archive-path', dest='archive_path', default=None,
help="directory path to the archives")
group.add_argument('--no-archive', dest='no_archive', action='store_true',
help="do not archive data")
group.add_argument('--fetch-archive', dest='fetch_archive', action='store_true',
help="fetch data from the archives")
group.add_argument('--archived-since', dest='archived_since', default='1970-01-01',
help="retrieve items archived since the given date")
|
Activate output arguments parsing
|
def _set_output_arguments(self):
"""Activate output arguments parsing"""
group = self.parser.add_argument_group('output arguments')
group.add_argument('-o', '--output', type=argparse.FileType('w'),
dest='outfile', default=sys.stdout,
help="output file")
group.add_argument('--json-line', dest='json_line', action='store_true',
help="produce a JSON line for each output item")
|
Fetch and write items.
|
def run(self):
"""Fetch and write items.
This method runs the backend to fetch the items from the given
origin. Items are converted to JSON objects and written to the
defined output.
If `fetch-archive` parameter was given as an argument during
the inizialization of the instance, the items will be retrieved
using the archive manager.
"""
backend_args = vars(self.parsed_args)
category = backend_args.pop('category', None)
filter_classified = backend_args.pop('filter_classified', False)
archived_since = backend_args.pop('archived_since', None)
if self.archive_manager and self.parsed_args.fetch_archive:
items = fetch_from_archive(self.BACKEND, backend_args,
self.archive_manager,
category,
archived_since)
else:
items = fetch(self.BACKEND, backend_args, category,
filter_classified=filter_classified,
manager=self.archive_manager)
try:
for item in items:
if self.json_line:
obj = json.dumps(item, separators=(',', ':'), sort_keys=True)
else:
obj = json.dumps(item, indent=4, sort_keys=True)
self.outfile.write(obj)
self.outfile.write('\n')
except IOError as e:
raise RuntimeError(str(e))
except Exception as e:
raise RuntimeError(str(e))
|
Initialize archive based on the parsed parameters
|
def _initialize_archive(self):
"""Initialize archive based on the parsed parameters"""
if 'archive_path' not in self.parsed_args:
manager = None
elif self.parsed_args.no_archive:
manager = None
else:
if not self.parsed_args.archive_path:
archive_path = os.path.expanduser(ARCHIVES_DEFAULT_PATH)
else:
archive_path = self.parsed_args.archive_path
manager = ArchiveManager(archive_path)
self.archive_manager = manager
|
Fetch the messages
|
def fetch_items(self, category, **kwargs):
"""Fetch the messages
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items
"""
from_date = kwargs['from_date']
logger.info("Looking for messages from '%s' on '%s' since %s",
self.uri, self.dirpath, str(from_date))
mailing_list = MailingList(self.uri, self.dirpath)
messages = self._fetch_and_parse_messages(mailing_list, from_date)
for message in messages:
yield message
logger.info("Fetch process completed")
|
Extracts the update time from a MBox item.
|
def metadata_updated_on(item):
"""Extracts the update time from a MBox item.
The timestamp used is extracted from 'Date' field in its
several forms. This date is converted to UNIX timestamp
format.
:param item: item generated by the backend
:returns: a UNIX timestamp
"""
ts = item[MBox.DATE_FIELD]
ts = str_to_datetime(ts)
return ts.timestamp()
|
Parse a mbox file.
|
def parse_mbox(filepath):
"""Parse a mbox file.
This method parses a mbox file and returns an iterator of dictionaries.
Each one of this contains an email message.
:param filepath: path of the mbox to parse
:returns : generator of messages; each message is stored in a
dictionary of type `requests.structures.CaseInsensitiveDict`
"""
mbox = _MBox(filepath, create=False)
for msg in mbox:
message = message_to_dict(msg)
yield message
|
Fetch and parse the messages from a mailing list
|
def _fetch_and_parse_messages(self, mailing_list, from_date):
"""Fetch and parse the messages from a mailing list"""
from_date = datetime_to_utc(from_date)
nmsgs, imsgs, tmsgs = (0, 0, 0)
for mbox in mailing_list.mboxes:
tmp_path = None
try:
tmp_path = self._copy_mbox(mbox)
for message in self.parse_mbox(tmp_path):
tmsgs += 1
if not self._validate_message(message):
imsgs += 1
continue
# Ignore those messages sent before the given date
dt = str_to_datetime(message[MBox.DATE_FIELD])
if dt < from_date:
logger.debug("Message %s sent before %s; skipped",
message['unixfrom'], str(from_date))
tmsgs -= 1
continue
# Convert 'CaseInsensitiveDict' to dict
message = self._casedict_to_dict(message)
nmsgs += 1
logger.debug("Message %s parsed", message['unixfrom'])
yield message
except (OSError, EOFError) as e:
logger.warning("Ignoring %s mbox due to: %s", mbox.filepath, str(e))
except Exception as e:
if tmp_path and os.path.exists(tmp_path):
os.remove(tmp_path)
raise e
finally:
if tmp_path and os.path.exists(tmp_path):
os.remove(tmp_path)
logger.info("Done. %s/%s messages fetched; %s ignored",
nmsgs, tmsgs, imsgs)
|
Copy the contents of a mbox to a temporary file
|
def _copy_mbox(self, mbox):
"""Copy the contents of a mbox to a temporary file"""
tmp_path = tempfile.mktemp(prefix='perceval_')
with mbox.container as f_in:
with open(tmp_path, mode='wb') as f_out:
for l in f_in:
f_out.write(l)
return tmp_path
|
Check if the given message has the mandatory fields
|
def _validate_message(self, message):
"""Check if the given message has the mandatory fields"""
# This check is "case insensitive" because we're
# using 'CaseInsensitiveDict' from requests.structures
# module to store the contents of a message.
if self.MESSAGE_ID_FIELD not in message:
logger.warning("Field 'Message-ID' not found in message %s; ignoring",
message['unixfrom'])
return False
if not message[self.MESSAGE_ID_FIELD]:
logger.warning("Field 'Message-ID' is empty in message %s; ignoring",
message['unixfrom'])
return False
if self.DATE_FIELD not in message:
logger.warning("Field 'Date' not found in message %s; ignoring",
message['unixfrom'])
return False
if not message[self.DATE_FIELD]:
logger.warning("Field 'Date' is empty in message %s; ignoring",
message['unixfrom'])
return False
try:
str_to_datetime(message[self.DATE_FIELD])
except InvalidDateError:
logger.warning("Invalid date %s in message %s; ignoring",
message[self.DATE_FIELD], message['unixfrom'])
return False
return True
|
Convert a message in CaseInsensitiveDict to dict.
|
def _casedict_to_dict(self, message):
"""Convert a message in CaseInsensitiveDict to dict.
This method also converts well known problematic headers,
such as Message-ID and Date to a common name.
"""
message_id = message.pop(self.MESSAGE_ID_FIELD)
date = message.pop(self.DATE_FIELD)
msg = {k: v for k, v in message.items()}
msg[self.MESSAGE_ID_FIELD] = message_id
msg[self.DATE_FIELD] = date
return msg
|
Return a Message representation or raise a KeyError.
|
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
from_line = self._file.readline().replace(mailbox.linesep, b'')
string = self._file.read(stop - self._file.tell())
msg = self._message_factory(string.replace(mailbox.linesep, b'\n'))
try:
msg.set_from(from_line[5:].decode('ascii'))
return msg
except UnicodeDecodeError:
pass
try:
msg.set_from(from_line[5:].decode('utf-8'))
except UnicodeDecodeError:
msg.set_from(from_line[5:].decode('iso-8859-1'))
return msg
|
Get the mboxes managed by this mailing list.
|
def mboxes(self):
"""Get the mboxes managed by this mailing list.
Returns the archives sorted by name.
:returns: a list of `.MBoxArchive` objects
"""
archives = []
if os.path.isfile(self.dirpath):
try:
archives.append(MBoxArchive(self.dirpath))
except OSError as e:
logger.warning("Ignoring %s mbox due to: %s", self.dirpath, str(e))
else:
for root, _, files in os.walk(self.dirpath):
for filename in sorted(files):
try:
location = os.path.join(root, filename)
archives.append(MBoxArchive(location))
except OSError as e:
logger.warning("Ignoring %s mbox due to: %s", filename, str(e))
return archives
|
Fetch commits.
|
def fetch(self, category=CATEGORY_COMMIT, from_date=DEFAULT_DATETIME, to_date=DEFAULT_LAST_DATETIME,
branches=None, latest_items=False, no_update=False):
"""Fetch commits.
The method retrieves from a Git repository or a log file
a list of commits. Commits are returned in the same order
they were obtained.
When `from_date` parameter is given it returns items commited
since the given date.
The list of `branches` is a list of strings, with the names of
the branches to fetch. If the list of branches is empty, no
commit is fetched. If the list of branches is None, all commits
for all branches will be fetched.
The parameter `latest_items` returns only those commits which
are new since the last time this method was called.
The parameter `no_update` returns all commits without performing
an update of the repository before.
Take into account that `from_date` and `branches` are ignored
when the commits are fetched from a Git log file or when
`latest_items` flag is set.
The class raises a `RepositoryError` exception when an error
occurs accessing the repository.
:param category: the category of items to fetch
:param from_date: obtain commits newer than a specific date
(inclusive)
:param to_date: obtain commits older than a specific date
:param branches: names of branches to fetch from (default: None)
:param latest_items: sync with the repository to fetch only the
newest commits
:param no_update: if enabled, don't update the repo with the latest changes
:returns: a generator of commits
"""
if not from_date:
from_date = DEFAULT_DATETIME
if not to_date:
to_date = DEFAULT_LAST_DATETIME
kwargs = {
'from_date': from_date,
'to_date': to_date,
'branches': branches,
'latest_items': latest_items,
'no_update': no_update
}
items = super().fetch(category, **kwargs)
return items
|
Fetch the commits
|
def fetch_items(self, category, **kwargs):
"""Fetch the commits
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items
"""
from_date = kwargs['from_date']
to_date = kwargs['to_date']
branches = kwargs['branches']
latest_items = kwargs['latest_items']
no_update = kwargs['no_update']
ncommits = 0
try:
if os.path.isfile(self.gitpath):
commits = self.__fetch_from_log()
else:
commits = self.__fetch_from_repo(from_date, to_date, branches,
latest_items, no_update)
for commit in commits:
yield commit
ncommits += 1
except EmptyRepositoryError:
pass
logger.info("Fetch process completed: %s commits fetched",
ncommits)
|
Parse a Git log file.
|
def parse_git_log_from_file(filepath):
"""Parse a Git log file.
The method parses the Git log file and returns an iterator of
dictionaries. Each one of this, contains a commit.
:param filepath: path to the log file
:returns: a generator of parsed commits
:raises ParseError: raised when the format of the Git log file
is invalid
:raises OSError: raised when an error occurs reading the
given file
"""
with open(filepath, 'r', errors='surrogateescape',
newline=os.linesep) as f:
parser = GitParser(f)
for commit in parser.parse():
yield commit
|
Initialize repositories directory path
|
def _pre_init(self):
"""Initialize repositories directory path"""
if self.parsed_args.git_log:
git_path = self.parsed_args.git_log
elif not self.parsed_args.git_path:
base_path = os.path.expanduser('~/.perceval/repositories/')
processed_uri = self.parsed_args.uri.lstrip('/')
git_path = os.path.join(base_path, processed_uri) + '-git'
else:
git_path = self.parsed_args.git_path
setattr(self.parsed_args, 'gitpath', git_path)
|
Returns the Git argument parser.
|
def setup_cmd_parser(cls):
"""Returns the Git argument parser."""
parser = BackendCommandArgumentParser(cls.BACKEND.CATEGORIES,
from_date=True,
to_date=True)
# Optional arguments
group = parser.parser.add_argument_group('Git arguments')
group.add_argument('--branches', dest='branches',
nargs='+', type=str, default=None,
help="Fetch commits only from these branches")
# Mutual exclusive parameters
exgroup = group.add_mutually_exclusive_group()
exgroup.add_argument('--git-path', dest='git_path',
help="Path where the Git repository will be cloned")
exgroup.add_argument('--git-log', dest='git_log',
help="Path to the Git log file")
exgroup_fetch = group.add_mutually_exclusive_group()
exgroup_fetch.add_argument('--latest-items', dest='latest_items',
action='store_true',
help="Fetch latest commits added to the repository")
exgroup_fetch.add_argument('--no-update', dest='no_update',
action='store_true',
help="Fetch all commits without updating the repository")
# Required arguments
parser.parser.add_argument('uri',
help="URI of the Git log repository")
return parser
|
Parse the Git log stream.
|
def parse(self):
"""Parse the Git log stream."""
for line in self.stream:
line = line.rstrip('\n')
parsed = False
self.nline += 1
while not parsed:
parsed = self.handlers[self.state](line)
if self.state == self.COMMIT and self.commit:
commit = self._build_commit()
logger.debug("Commit %s parsed", commit['commit'])
yield commit
# Return the last commit, if any
if self.commit:
commit = self._build_commit()
logger.debug("Commit %s parsed", commit['commit'])
yield commit
|
Get the old filepath of a moved/ renamed file.
|
def __get_old_filepath(self, f):
"""Get the old filepath of a moved/renamed file.
Moved or renamed files can be found in the log with any of the
next patterns:
'old_name => new_name'
'{old_prefix => new_prefix}/name'
'name/{old_suffix => new_suffix}'
This method returns the filepath before the file was moved or
renamed.
"""
i = f.find('{')
j = f.find('}')
if i > -1 and j > -1:
prefix = f[0:i]
inner = f[i + 1:f.find(' => ', i)]
suffix = f[j + 1:]
return prefix + inner + suffix
elif ' => ' in f:
return f.split(' => ')[0]
else:
return f
|
Clone a Git repository.
|
def clone(cls, uri, dirpath):
"""Clone a Git repository.
Make a bare copy of the repository stored in `uri` into `dirpath`.
The repository would be either local or remote.
:param uri: URI of the repository
:param dirtpath: directory where the repository will be cloned
:returns: a `GitRepository` class having cloned the repository
:raises RepositoryError: when an error occurs cloning the given
repository
"""
cmd = ['git', 'clone', '--bare', uri, dirpath]
env = {
'LANG': 'C',
'HOME': os.getenv('HOME', '')
}
cls._exec(cmd, env=env)
logger.debug("Git %s repository cloned into %s",
uri, dirpath)
return cls(uri, dirpath)
|
Count the objects of a repository.
|
def count_objects(self):
"""Count the objects of a repository.
The method returns the total number of objects (packed and unpacked)
available on the repository.
:raises RepositoryError: when an error occurs counting the objects
of a repository
"""
cmd_count = ['git', 'count-objects', '-v']
outs = self._exec(cmd_count, cwd=self.dirpath, env=self.gitenv)
outs = outs.decode('utf-8', errors='surrogateescape').rstrip()
try:
cobjs = {k: v for k, v in (x.split(': ') for x in outs.split('\n'))}
nobjs = int(cobjs['count']) + int(cobjs['in-pack'])
except KeyError as e:
error = "unable to parse 'count-objects' output; reason: '%s' entry not found" \
% e.args[0]
raise RepositoryError(cause=error)
except ValueError as e:
error = "unable to parse 'count-objects' output; reason: %s" % str(e)
raise RepositoryError(cause=error)
logger.debug("Git %s repository has %s objects",
self.uri, str(nobjs))
return nobjs
|
Check if the repo is in a detached state.
|
def is_detached(self):
"""Check if the repo is in a detached state.
The repository is in a detached state when HEAD is not a symbolic
reference.
:returns: whether the repository is detached or not
:raises RepositoryError: when an error occurs checking the state
of the repository
"""
cmd_sym = ['git', 'symbolic-ref', 'HEAD']
try:
self._exec(cmd_sym, cwd=self.dirpath, env=self.gitenv)
except RepositoryError as e:
if e.msg.find("ref HEAD is not a symbolic ref") == -1:
raise e
return True
else:
return False
|
Update repository from its remote.
|
def update(self):
"""Update repository from its remote.
Calling this method, the repository will be synchronized with
the remote repository using 'fetch' command for 'heads' refs.
Any commit stored in the local copy will be removed; refs
will be overwritten.
:raises RepositoryError: when an error occurs updating the
repository
"""
cmd_update = ['git', 'fetch', 'origin', '+refs/heads/*:refs/heads/*', '--prune']
self._exec(cmd_update, cwd=self.dirpath, env=self.gitenv)
logger.debug("Git %s repository updated into %s",
self.uri, self.dirpath)
|
Keep the repository in sync.
|
def sync(self):
"""Keep the repository in sync.
This method will synchronize the repository with its 'origin',
fetching newest objects and updating references. It uses low
level commands which allow to keep track of which things
have changed in the repository.
The method also returns a list of hashes related to the new
commits fetched during the process.
:returns: list of new commits
:raises RepositoryError: when an error occurs synchronizing
the repository
"""
pack_name, refs = self._fetch_pack()
if pack_name:
commits = self._read_commits_from_pack(pack_name)
else:
commits = []
logger.debug("Git repository %s (%s) does not have any new object",
self.uri, self.dirpath)
self._update_references(refs)
logger.debug("Git repository %s (%s) is synced",
self.uri, self.dirpath)
return commits
|
Read the list commits from the repository
|
def rev_list(self, branches=None):
"""Read the list commits from the repository
The list of branches is a list of strings, with the names of the
branches to fetch. If the list of branches is empty, no commit
is fetched. If the list of branches is None, all commits
for all branches will be fetched.
The method returns the Git rev-list of the repository using the
following options:
git rev-list --topo-order
:param branches: names of branches to fetch from (default: None)
:raises EmptyRepositoryError: when the repository is empty and
the action cannot be performed
:raises RepositoryError: when an error occurs executing the command
"""
if self.is_empty():
logger.warning("Git %s repository is empty; unable to get the rev-list",
self.uri)
raise EmptyRepositoryError(repository=self.uri)
cmd_rev_list = ['git', 'rev-list', '--topo-order']
if branches is None:
cmd_rev_list.extend(['--branches', '--tags', '--remotes=origin'])
elif len(branches) == 0:
cmd_rev_list.extend(['--branches', '--tags', '--max-count=0'])
else:
branches = ['refs/heads/' + branch for branch in branches]
cmd_rev_list.extend(branches)
for line in self._exec_nb(cmd_rev_list, cwd=self.dirpath, env=self.gitenv):
yield line.rstrip('\n')
logger.debug("Git rev-list fetched from %s repository (%s)",
self.uri, self.dirpath)
|
Read the commit log from the repository.
|
def log(self, from_date=None, to_date=None, branches=None, encoding='utf-8'):
"""Read the commit log from the repository.
The method returns the Git log of the repository using the
following options:
git log --raw --numstat --pretty=fuller --decorate=full
--all --reverse --topo-order --parents -M -C -c
--remotes=origin
When `from_date` is given, it gets the commits equal or older
than that date. This date is given in a datetime object.
The list of branches is a list of strings, with the names of the
branches to fetch. If the list of branches is empty, no commit
is fetched. If the list of branches is None, all commits
for all branches will be fetched.
:param from_date: fetch commits newer than a specific
date (inclusive)
:param branches: names of branches to fetch from (default: None)
:param encoding: encode the log using this format
:returns: a generator where each item is a line from the log
:raises EmptyRepositoryError: when the repository is empty and
the action cannot be performed
:raises RepositoryError: when an error occurs fetching the log
"""
if self.is_empty():
logger.warning("Git %s repository is empty; unable to get the log",
self.uri)
raise EmptyRepositoryError(repository=self.uri)
cmd_log = ['git', 'log', '--reverse', '--topo-order']
cmd_log.extend(self.GIT_PRETTY_OUTPUT_OPTS)
if from_date:
dt = from_date.strftime("%Y-%m-%d %H:%M:%S %z")
cmd_log.append('--since=' + dt)
if to_date:
dt = to_date.strftime("%Y-%m-%d %H:%M:%S %z")
cmd_log.append('--until=' + dt)
if branches is None:
cmd_log.extend(['--branches', '--tags', '--remotes=origin'])
elif len(branches) == 0:
cmd_log.append('--max-count=0')
else:
branches = ['refs/heads/' + branch for branch in branches]
cmd_log.extend(branches)
for line in self._exec_nb(cmd_log, cwd=self.dirpath, env=self.gitenv):
yield line
logger.debug("Git log fetched from %s repository (%s)",
self.uri, self.dirpath)
|
Show the data of a set of commits.
|
def show(self, commits=None, encoding='utf-8'):
"""Show the data of a set of commits.
The method returns the output of Git show command for a
set of commits using the following options:
git show --raw --numstat --pretty=fuller --decorate=full
--parents -M -C -c [<commit>...<commit>]
When the list of commits is empty, the command will return
data about the last commit, like the default behaviour of
`git show`.
:param commits: list of commits to show data
:param encoding: encode the output using this format
:returns: a generator where each item is a line from the show output
:raises EmptyRepositoryError: when the repository is empty and
the action cannot be performed
:raises RepositoryError: when an error occurs fetching the show output
"""
if self.is_empty():
logger.warning("Git %s repository is empty; unable to run show",
self.uri)
raise EmptyRepositoryError(repository=self.uri)
if commits is None:
commits = []
cmd_show = ['git', 'show']
cmd_show.extend(self.GIT_PRETTY_OUTPUT_OPTS)
cmd_show.extend(commits)
for line in self._exec_nb(cmd_show, cwd=self.dirpath, env=self.gitenv):
yield line
logger.debug("Git show fetched from %s repository (%s)",
self.uri, self.dirpath)
|
Fetch changes and store them in a pack.
|
def _fetch_pack(self):
"""Fetch changes and store them in a pack."""
def prepare_refs(refs):
return [ref.hash.encode('utf-8') for ref in refs
if not ref.refname.endswith('^{}')]
def determine_wants(refs):
remote_refs = prepare_refs(self._discover_refs(remote=True))
local_refs = prepare_refs(self._discover_refs())
wants = [ref for ref in remote_refs if ref not in local_refs]
return wants
client, repo_path = dulwich.client.get_transport_and_path(self.uri)
repo = dulwich.repo.Repo(self.dirpath)
fd = io.BytesIO()
local_refs = self._discover_refs()
graph_walker = _GraphWalker(local_refs)
result = client.fetch_pack(repo_path,
determine_wants,
graph_walker,
fd.write)
refs = [GitRef(ref_hash.decode('utf-8'), ref_name.decode('utf-8'))
for ref_name, ref_hash in result.refs.items()]
if len(fd.getvalue()) > 0:
fd.seek(0)
pack = repo.object_store.add_thin_pack(fd.read, None)
pack_name = pack.name().decode('utf-8')
else:
pack_name = None
return (pack_name, refs)
|
Read the commits of a pack.
|
def _read_commits_from_pack(self, packet_name):
"""Read the commits of a pack."""
filepath = 'objects/pack/pack-' + packet_name
cmd_verify_pack = ['git', 'verify-pack', '-v', filepath]
outs = self._exec(cmd_verify_pack, cwd=self.dirpath, env=self.gitenv)
outs = outs.decode('utf-8', errors='surrogateescape').rstrip()
lines = [line.split(' ') for line in outs.split('\n')]
# Commits usually come in the pack ordered from newest to oldest
commits = [parts[0] for parts in lines if parts[1] == 'commit']
commits.reverse()
return commits
|
Update references removing old ones.
|
def _update_references(self, refs):
"""Update references removing old ones."""
new_refs = [ref.refname for ref in refs]
# Delete old references
for old_ref in self._discover_refs():
if not old_ref.refname.startswith('refs/heads/'):
continue
if old_ref.refname in new_refs:
continue
self._update_ref(old_ref, delete=True)
# Update new references
for new_ref in refs:
refname = new_ref.refname
if refname.endswith('^{}'):
logger.debug("Annotated tag %s ignored for updating in sync process",
refname)
continue
elif not refname.startswith('refs/heads/') and not refname.startswith('refs/tags/'):
logger.debug("Reference %s not needed; ignored for updating in sync process",
refname)
continue
else:
self._update_ref(new_ref)
# Prune repository to remove old branches
cmd = ['git', 'remote', 'prune', 'origin']
self._exec(cmd, cwd=self.dirpath, env=self.gitenv)
|
Get the current list of local or remote refs.
|
def _discover_refs(self, remote=False):
"""Get the current list of local or remote refs."""
if remote:
cmd_refs = ['git', 'ls-remote', '-h', '-t', '--exit-code', 'origin']
sep = '\t'
ignored_error_codes = [2]
else:
# Check first whether the local repo is empty;
# Running 'show-ref' in empty repos gives an error
if self.is_empty():
raise EmptyRepositoryError(repository=self.uri)
cmd_refs = ['git', 'show-ref', '--heads', '--tags']
sep = ' '
ignored_error_codes = [1]
# Error codes returned when no matching refs (i.e, no heads
# or tags) are found in a repository will be ignored. Otherwise,
# the full process would fail for those situations.
outs = self._exec(cmd_refs, cwd=self.dirpath,
env=self.gitenv,
ignored_error_codes=ignored_error_codes)
outs = outs.decode('utf-8', errors='surrogateescape').rstrip()
outs = outs.split('\n') if outs else []
refs = []
for line in outs:
data = line.split(sep)
ref = GitRef(data[0], data[1])
refs.append(ref)
return refs
|
Update a reference.
|
def _update_ref(self, ref, delete=False):
"""Update a reference."""
cmd = ['git', 'update-ref']
if delete:
cmd.extend(['-d', ref.refname])
action = 'deleted'
else:
cmd.extend([ref.refname, ref.hash])
action = 'updated to %s' % ref.hash
try:
self._exec(cmd, cwd=self.dirpath, env=self.gitenv)
except RepositoryError as e:
logger.warning("Git %s ref could not be %s during sync process in %s (%s); skipped",
ref.refname, action, self.uri, self.dirpath)
else:
logger.debug("Git %s ref %s in %s (%s)",
ref.refname, action, self.uri, self.dirpath)
|
Run a command with a non blocking call.
|
def _exec_nb(self, cmd, cwd=None, env=None, encoding='utf-8'):
"""Run a command with a non blocking call.
Execute `cmd` command with a non blocking call. The command will
be run in the directory set by `cwd`. Enviroment variables can be
set using the `env` dictionary. The output data is returned
as encoded bytes in an iterator. Each item will be a line of the
output.
:returns: an iterator with the output of the command as encoded bytes
:raises RepositoryError: when an error occurs running the command
"""
self.failed_message = None
logger.debug("Running command %s (cwd: %s, env: %s)",
' '.join(cmd), cwd, str(env))
try:
self.proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd,
env=env)
err_thread = threading.Thread(target=self._read_stderr,
kwargs={'encoding': encoding},
daemon=True)
err_thread.start()
for line in self.proc.stdout:
yield line.decode(encoding, errors='surrogateescape')
err_thread.join()
self.proc.communicate()
self.proc.stdout.close()
self.proc.stderr.close()
except OSError as e:
err_thread.join()
raise RepositoryError(cause=str(e))
if self.proc.returncode != 0:
cause = "git command - %s (return code: %d)" % \
(self.failed_message, self.proc.returncode)
raise RepositoryError(cause=cause)
|
Reads self. proc. stderr.
|
def _read_stderr(self, encoding='utf-8'):
"""Reads self.proc.stderr.
Usually, this should be read in a thread, to prevent blocking
the read from stdout of the stderr buffer is filled, and this
function is not called becuase the program is busy in the
stderr reading loop.
Reads self.proc.stderr (self.proc is the subprocess running
the git command), and reads / writes self.failed_message
(the message sent to stderr when git fails, usually one line).
"""
for line in self.proc.stderr:
err_line = line.decode(encoding, errors='surrogateescape')
if self.proc.returncode != 0:
# If the subprocess didn't finish successfully, we expect
# the last line in stderr to provide the cause
if self.failed_message is not None:
# We had a message, there is a newer line, print it
logger.debug("Git log stderr: " + self.failed_message)
self.failed_message = err_line
else:
# The subprocess is successfully up to now, print the line
logger.debug("Git log stderr: " + err_line)
|
Run a command.
|
def _exec(cmd, cwd=None, env=None, ignored_error_codes=None,
encoding='utf-8'):
"""Run a command.
Execute `cmd` command in the directory set by `cwd`. Environment
variables can be set using the `env` dictionary. The output
data is returned as encoded bytes.
Commands which their returning status codes are non-zero will
be treated as failed. Error codes considered as valid can be
ignored giving them in the `ignored_error_codes` list.
:returns: the output of the command as encoded bytes
:raises RepositoryError: when an error occurs running the command
"""
if ignored_error_codes is None:
ignored_error_codes = []
logger.debug("Running command %s (cwd: %s, env: %s)",
' '.join(cmd), cwd, str(env))
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=cwd, env=env)
(outs, errs) = proc.communicate()
except OSError as e:
raise RepositoryError(cause=str(e))
if proc.returncode != 0 and proc.returncode not in ignored_error_codes:
err = errs.decode(encoding, errors='surrogateescape')
cause = "git command - %s" % err
raise RepositoryError(cause=cause)
else:
logger.debug(errs.decode(encoding, errors='surrogateescape'))
return outs
|
Fetch the tweets from the server.
|
def fetch(self, category=CATEGORY_TWEET, since_id=None, max_id=None,
geocode=None, lang=None,
include_entities=True, tweets_type=TWEET_TYPE_MIXED):
"""Fetch the tweets from the server.
This method fetches tweets from the TwitterSearch API published in the last seven days.
:param category: the category of items to fetch
:param since_id: if not null, it returns results with an ID greater than the specified ID
:param max_id: when it is set or if not None, it returns results with an ID less than the specified ID
:param geocode: if enabled, returns tweets by users located at latitude,longitude,"mi"|"km"
:param lang: if enabled, restricts tweets to the given language, given by an ISO 639-1 code
:param include_entities: if disabled, it excludes entities node
:param tweets_type: type of tweets returned. Default is “mixed”, others are "recent" and "popular"
:returns: a generator of tweets
"""
kwargs = {"since_id": since_id,
"max_id": max_id,
"geocode": geocode,
"lang": lang,
"include_entities": include_entities,
"result_type": tweets_type}
items = super().fetch(category, **kwargs)
return items
|
Fetch the tweets
|
def fetch_items(self, category, **kwargs):
"""Fetch the tweets
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items
"""
since_id = kwargs['since_id']
max_id = kwargs['max_id']
geocode = kwargs['geocode']
lang = kwargs['lang']
entities = kwargs['include_entities']
tweets_type = kwargs['result_type']
logger.info("Fetching tweets %s from %s to %s",
self.query, str(since_id),
str(max_id) if max_id else '--')
tweets_ids = []
min_date = None
max_date = None
group_tweets = self.client.tweets(self.query, since_id=since_id, max_id=max_id, geocode=geocode,
lang=lang, include_entities=entities, result_type=tweets_type)
for tweets in group_tweets:
for i in range(len(tweets)):
tweet = tweets[i]
tweets_ids.append(tweet['id'])
if tweets[-1] == tweet:
min_date = str_to_datetime(tweets[-1]['created_at'])
if tweets[0] == tweet and not max_date:
max_date = str_to_datetime(tweets[0]['created_at'])
yield tweet
logger.info("Fetch process completed: %s (unique %s) tweets fetched, from %s to %s",
len(tweets_ids), len(list(set(tweets_ids))), min_date, max_date)
|
Init client
|
def _init_client(self, from_archive=False):
"""Init client"""
return TwitterClient(self.api_token, self.max_items,
self.sleep_for_rate, self.min_rate_to_sleep, self.sleep_time,
self.archive, from_archive)
|
Fetch tweets for a given query between since_id and max_id.
|
def tweets(self, query, since_id=None, max_id=None, geocode=None, lang=None,
include_entities=True, result_type=TWEET_TYPE_MIXED):
"""Fetch tweets for a given query between since_id and max_id.
:param query: query to fetch tweets
:param since_id: if not null, it returns results with an ID greater than the specified ID
:param max_id: if not null, it returns results with an ID less than the specified ID
:param geocode: if enabled, returns tweets by users located at latitude,longitude,"mi"|"km"
:param lang: if enabled, restricts tweets to the given language, given by an ISO 639-1 code
:param include_entities: if disabled, it excludes entities node
:param result_type: type of tweets returned. Default is “mixed”, others are "recent" and "popular"
:returns: a generator of tweets
"""
resource = self.base_url
params = {'q': query,
'count': self.max_items}
if since_id:
params['since_id'] = since_id
if max_id:
params['max_id'] = max_id
if geocode:
params['geocode'] = geocode
if lang:
params['lang'] = lang
params['include_entities'] = include_entities
params['result_type'] = result_type
while True:
raw_tweets = self._fetch(resource, params=params)
tweets = json.loads(raw_tweets)
if not tweets['statuses']:
break
params['max_id'] = tweets['statuses'][-1]['id'] - 1
yield tweets['statuses']
|
Fetch a resource.
|
def _fetch(self, url, params):
"""Fetch a resource.
Method to fetch and to iterate over the contents of a
type of resource. The method returns a generator of
pages for that resource and parameters.
:param url: the endpoint of the API
:param params: parameters to filter
:returns: the text of the response
"""
if not self.from_archive:
self.sleep_for_rate_limit()
headers = {'Authorization': 'Bearer ' + self.api_key}
r = self.fetch(url, payload=params, headers=headers)
if not self.from_archive:
self.update_rate_limit(r)
return r.text
|
Returns the Twitter argument parser.
|
def setup_cmd_parser(cls):
"""Returns the Twitter argument parser."""
parser = BackendCommandArgumentParser(cls.BACKEND.CATEGORIES,
token_auth=True,
archive=True)
# Backend token is required
action = parser.parser._option_string_actions['--api-token']
action.required = True
# Meetup options
group = parser.parser.add_argument_group('Twitter arguments')
group.add_argument('--max-items', dest='max_items',
type=int, default=MAX_ITEMS,
help="Maximum number of items requested on the same query")
group.add_argument('--no-entities', dest='include_entities',
action='store_false',
help=" Exclude entities node")
group.add_argument('--geo-code', dest='geocode',
help="Select tweets by users located at latitude,longitude,radius")
group.add_argument('--lang', dest='lang',
help="Select tweets to the given language in ISO 639-1 code")
group.add_argument('--tweets-type', dest='tweets_type', default=TWEET_TYPE_MIXED,
help="Type of tweets returned. Default is 'mixed', others are 'recent' and 'popular'")
group.add_argument('--sleep-for-rate', dest='sleep_for_rate',
action='store_true',
help="sleep for getting more rate")
group.add_argument('--min-rate-to-sleep', dest='min_rate_to_sleep',
default=MIN_RATE_LIMIT, type=int,
help="sleep until reset when the rate limit reaches this value")
group.add_argument('--sleep-time', dest='sleep_time',
default=SLEEP_TIME, type=int,
help="minimun sleeping time to avoid too many request exception")
# Required arguments
parser.parser.add_argument('query',
help="Search query including operators, max 500 chars")
return parser
|
Fetch data from Google API.
|
def fetch(self, category=CATEGORY_HITS):
"""Fetch data from Google API.
The method retrieves a list of hits for some
given keywords using the Google API.
:param category: the category of items to fetch
:returns: a generator of data
"""
kwargs = {}
items = super().fetch(category, **kwargs)
return items
|
Fetch Google hit items
|
def fetch_items(self, category, **kwargs):
"""Fetch Google hit items
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items
"""
logger.info("Fetching data for '%s'", self.keywords)
hits_raw = self.client.hits(self.keywords)
hits = self.__parse_hits(hits_raw)
yield hits
logger.info("Fetch process completed")
|
Init client
|
def _init_client(self, from_archive=False):
"""Init client"""
return GoogleHitsClient(self.sleep_time, self.max_retries,
archive=self.archive, from_archive=from_archive)
|
Parse the hits returned by the Google Search API
|
def __parse_hits(self, hit_raw):
"""Parse the hits returned by the Google Search API"""
# Create the soup and get the desired div
bs_result = bs4.BeautifulSoup(hit_raw, 'html.parser')
hit_string = bs_result.find("div", id="resultStats").text
# Remove commas or dots
hit_string = hit_string.replace(',', u'')
hit_string = hit_string.replace('.', u'')
fetched_on = datetime_utcnow().timestamp()
id_args = self.keywords[:]
id_args.append(str(fetched_on))
hits_json = {
'fetched_on': fetched_on,
'id': uuid(*id_args),
'keywords': self.keywords,
'type': 'googleSearchHits'
}
if not hit_string:
logger.warning("No hits for %s", self.keywords)
hits_json['hits'] = 0
return hits_json
str_hits = re.search(r'\d+', hit_string).group(0)
hits = int(str_hits)
hits_json['hits'] = hits
return hits_json
|
Fetch information about a list of keywords.
|
def hits(self, keywords):
"""Fetch information about a list of keywords."""
if len(keywords) == 1:
query_str = keywords[0]
else:
query_str = ' '.join([k for k in keywords])
logger.info("Fetching hits for '%s'", query_str)
params = {'q': query_str}
# Make the request
req = self.fetch(GOOGLE_SEARCH_URL, payload=params)
return req.text
|
Fetch the issues/ pull requests from the repository.
|
def fetch(self, category=CATEGORY_ISSUE, from_date=DEFAULT_DATETIME, to_date=DEFAULT_LAST_DATETIME):
"""Fetch the issues/pull requests from the repository.
The method retrieves, from a GitHub repository, the issues/pull requests
updated since the given date.
:param category: the category of items to fetch
:param from_date: obtain issues/pull requests updated since this date
:param to_date: obtain issues/pull requests until a specific date (included)
:returns: a generator of issues
"""
if not from_date:
from_date = DEFAULT_DATETIME
if not to_date:
to_date = DEFAULT_LAST_DATETIME
from_date = datetime_to_utc(from_date)
to_date = datetime_to_utc(to_date)
kwargs = {
'from_date': from_date,
'to_date': to_date
}
items = super().fetch(category, **kwargs)
return items
|
Fetch the items ( issues or pull_requests )
|
def fetch_items(self, category, **kwargs):
"""Fetch the items (issues or pull_requests)
:param category: the category of items to fetch
:param kwargs: backend arguments
:returns: a generator of items
"""
from_date = kwargs['from_date']
to_date = kwargs['to_date']
if category == CATEGORY_ISSUE:
items = self.__fetch_issues(from_date, to_date)
elif category == CATEGORY_PULL_REQUEST:
items = self.__fetch_pull_requests(from_date, to_date)
else:
items = self.__fetch_repo_info()
return items
|
Extracts the update time from a GitHub item.
|
def metadata_updated_on(item):
"""Extracts the update time from a GitHub item.
The timestamp used is extracted from 'updated_at' field.
This date is converted to UNIX timestamp format. As GitHub
dates are in UTC the conversion is straightforward.
:param item: item generated by the backend
:returns: a UNIX timestamp
"""
if "forks_count" in item:
return item['fetched_on']
else:
ts = item['updated_at']
ts = str_to_datetime(ts)
return ts.timestamp()
|
Extracts the category from a GitHub item.
|
def metadata_category(item):
"""Extracts the category from a GitHub item.
This backend generates two types of item which are
'issue' and 'pull_request'.
"""
if "base" in item:
category = CATEGORY_PULL_REQUEST
elif "forks_count" in item:
category = CATEGORY_REPO
else:
category = CATEGORY_ISSUE
return category
|
Init client
|
def _init_client(self, from_archive=False):
"""Init client"""
return GitHubClient(self.owner, self.repository, self.api_token, self.base_url,
self.sleep_for_rate, self.min_rate_to_sleep,
self.sleep_time, self.max_retries,
self.archive, from_archive)
|
Fetch the issues
|
def __fetch_issues(self, from_date, to_date):
"""Fetch the issues"""
issues_groups = self.client.issues(from_date=from_date)
for raw_issues in issues_groups:
issues = json.loads(raw_issues)
for issue in issues:
if str_to_datetime(issue['updated_at']) > to_date:
return
self.__init_extra_issue_fields(issue)
for field in TARGET_ISSUE_FIELDS:
if not issue[field]:
continue
if field == 'user':
issue[field + '_data'] = self.__get_user(issue[field]['login'])
elif field == 'assignee':
issue[field + '_data'] = self.__get_issue_assignee(issue[field])
elif field == 'assignees':
issue[field + '_data'] = self.__get_issue_assignees(issue[field])
elif field == 'comments':
issue[field + '_data'] = self.__get_issue_comments(issue['number'])
elif field == 'reactions':
issue[field + '_data'] = \
self.__get_issue_reactions(issue['number'], issue['reactions']['total_count'])
yield issue
|
Fetch the pull requests
|
def __fetch_pull_requests(self, from_date, to_date):
"""Fetch the pull requests"""
raw_pulls = self.client.pulls(from_date=from_date)
for raw_pull in raw_pulls:
pull = json.loads(raw_pull)
if str_to_datetime(pull['updated_at']) > to_date:
return
self.__init_extra_pull_fields(pull)
for field in TARGET_PULL_FIELDS:
if not pull[field]:
continue
if field == 'user':
pull[field + '_data'] = self.__get_user(pull[field]['login'])
elif field == 'merged_by':
pull[field + '_data'] = self.__get_user(pull[field]['login'])
elif field == 'review_comments':
pull[field + '_data'] = self.__get_pull_review_comments(pull['number'])
elif field == 'requested_reviewers':
pull[field + '_data'] = self.__get_pull_requested_reviewers(pull['number'])
elif field == 'commits':
pull[field + '_data'] = self.__get_pull_commits(pull['number'])
yield pull
|
Get repo info about stars watchers and forks
|
def __fetch_repo_info(self):
"""Get repo info about stars, watchers and forks"""
raw_repo = self.client.repo()
repo = json.loads(raw_repo)
fetched_on = datetime_utcnow()
repo['fetched_on'] = fetched_on.timestamp()
yield repo
|
Get issue reactions
|
def __get_issue_reactions(self, issue_number, total_count):
"""Get issue reactions"""
reactions = []
if total_count == 0:
return reactions
group_reactions = self.client.issue_reactions(issue_number)
for raw_reactions in group_reactions:
for reaction in json.loads(raw_reactions):
reaction['user_data'] = self.__get_user(reaction['user']['login'])
reactions.append(reaction)
return reactions
|
Get issue comments
|
def __get_issue_comments(self, issue_number):
"""Get issue comments"""
comments = []
group_comments = self.client.issue_comments(issue_number)
for raw_comments in group_comments:
for comment in json.loads(raw_comments):
comment_id = comment.get('id')
comment['user_data'] = self.__get_user(comment['user']['login'])
comment['reactions_data'] = \
self.__get_issue_comment_reactions(comment_id, comment['reactions']['total_count'])
comments.append(comment)
return comments
|
Get reactions on issue comments
|
def __get_issue_comment_reactions(self, comment_id, total_count):
"""Get reactions on issue comments"""
reactions = []
if total_count == 0:
return reactions
group_reactions = self.client.issue_comment_reactions(comment_id)
for raw_reactions in group_reactions:
for reaction in json.loads(raw_reactions):
reaction['user_data'] = self.__get_user(reaction['user']['login'])
reactions.append(reaction)
return reactions
|
Get issue assignees
|
def __get_issue_assignees(self, raw_assignees):
"""Get issue assignees"""
assignees = []
for ra in raw_assignees:
assignees.append(self.__get_user(ra['login']))
return assignees
|
Get pull request requested reviewers
|
def __get_pull_requested_reviewers(self, pr_number):
"""Get pull request requested reviewers"""
requested_reviewers = []
group_requested_reviewers = self.client.pull_requested_reviewers(pr_number)
for raw_requested_reviewers in group_requested_reviewers:
group_requested_reviewers = json.loads(raw_requested_reviewers)
for requested_reviewer in group_requested_reviewers['users']:
user_data = self.__get_user(requested_reviewer['login'])
requested_reviewers.append(user_data)
return requested_reviewers
|
Get pull request commit hashes
|
def __get_pull_commits(self, pr_number):
"""Get pull request commit hashes"""
hashes = []
group_pull_commits = self.client.pull_commits(pr_number)
for raw_pull_commits in group_pull_commits:
for commit in json.loads(raw_pull_commits):
commit_hash = commit['sha']
hashes.append(commit_hash)
return hashes
|
Get pull request review comments
|
def __get_pull_review_comments(self, pr_number):
"""Get pull request review comments"""
comments = []
group_comments = self.client.pull_review_comments(pr_number)
for raw_comments in group_comments:
for comment in json.loads(raw_comments):
comment_id = comment.get('id')
user = comment.get('user', None)
if not user:
logger.warning("Missing user info for %s", comment['url'])
comment['user_data'] = None
else:
comment['user_data'] = self.__get_user(user['login'])
comment['reactions_data'] = \
self.__get_pull_review_comment_reactions(comment_id, comment['reactions']['total_count'])
comments.append(comment)
return comments
|
Get pull review comment reactions
|
def __get_pull_review_comment_reactions(self, comment_id, total_count):
"""Get pull review comment reactions"""
reactions = []
if total_count == 0:
return reactions
group_reactions = self.client.pull_review_comment_reactions(comment_id)
for raw_reactions in group_reactions:
for reaction in json.loads(raw_reactions):
reaction['user_data'] = self.__get_user(reaction['user']['login'])
reactions.append(reaction)
return reactions
|
Get user and org data for the login
|
def __get_user(self, login):
"""Get user and org data for the login"""
user = {}
if not login:
return user
user_raw = self.client.user(login)
user = json.loads(user_raw)
user_orgs_raw = \
self.client.user_orgs(login)
user['organizations'] = json.loads(user_orgs_raw)
return user
|
Get reactions of an issue
|
def issue_reactions(self, issue_number):
"""Get reactions of an issue"""
payload = {
'per_page': PER_PAGE,
'direction': 'asc',
'sort': 'updated'
}
path = urijoin("issues", str(issue_number), "reactions")
return self.fetch_items(path, payload)
|
Fetch the issues from the repository.
|
def issues(self, from_date=None):
"""Fetch the issues from the repository.
The method retrieves, from a GitHub repository, the issues
updated since the given date.
:param from_date: obtain issues updated since this date
:returns: a generator of issues
"""
payload = {
'state': 'all',
'per_page': PER_PAGE,
'direction': 'asc',
'sort': 'updated'}
if from_date:
payload['since'] = from_date.isoformat()
path = urijoin("issues")
return self.fetch_items(path, payload)
|
Fetch the pull requests from the repository.
|
def pulls(self, from_date=None):
"""Fetch the pull requests from the repository.
The method retrieves, from a GitHub repository, the pull requests
updated since the given date.
:param from_date: obtain pull requests updated since this date
:returns: a generator of pull requests
"""
issues_groups = self.issues(from_date=from_date)
for raw_issues in issues_groups:
issues = json.loads(raw_issues)
for issue in issues:
if "pull_request" not in issue:
continue
pull_number = issue["number"]
path = urijoin(self.base_url, 'repos', self.owner, self.repository, "pulls", pull_number)
r = self.fetch(path)
pull = r.text
yield pull
|
Get repository data
|
def repo(self):
"""Get repository data"""
path = urijoin(self.base_url, 'repos', self.owner, self.repository)
r = self.fetch(path)
repo = r.text
return repo
|
Get pull requested reviewers
|
def pull_requested_reviewers(self, pr_number):
"""Get pull requested reviewers"""
requested_reviewers_url = urijoin("pulls", str(pr_number), "requested_reviewers")
return self.fetch_items(requested_reviewers_url, {})
|
Get pull request commits
|
def pull_commits(self, pr_number):
"""Get pull request commits"""
payload = {
'per_page': PER_PAGE,
}
commit_url = urijoin("pulls", str(pr_number), "commits")
return self.fetch_items(commit_url, payload)
|
Get pull request review comments
|
def pull_review_comments(self, pr_number):
"""Get pull request review comments"""
payload = {
'per_page': PER_PAGE,
'direction': 'asc',
'sort': 'updated'
}
comments_url = urijoin("pulls", str(pr_number), "comments")
return self.fetch_items(comments_url, payload)
|
Get reactions of a review comment
|
def pull_review_comment_reactions(self, comment_id):
"""Get reactions of a review comment"""
payload = {
'per_page': PER_PAGE,
'direction': 'asc',
'sort': 'updated'
}
path = urijoin("pulls", "comments", str(comment_id), "reactions")
return self.fetch_items(path, payload)
|
Get the user information and update the user cache
|
def user(self, login):
"""Get the user information and update the user cache"""
user = None
if login in self._users:
return self._users[login]
url_user = urijoin(self.base_url, 'users', login)
logging.info("Getting info for %s" % (url_user))
r = self.fetch(url_user)
user = r.text
self._users[login] = user
return user
|
Get the user public organizations
|
def user_orgs(self, login):
"""Get the user public organizations"""
if login in self._users_orgs:
return self._users_orgs[login]
url = urijoin(self.base_url, 'users', login, 'orgs')
try:
r = self.fetch(url)
orgs = r.text
except requests.exceptions.HTTPError as error:
# 404 not found is wrongly received sometimes
if error.response.status_code == 404:
logger.error("Can't get github login orgs: %s", error)
orgs = '[]'
else:
raise error
self._users_orgs[login] = orgs
return orgs
|
Return token s remaining API points
|
def _get_token_rate_limit(self, token):
"""Return token's remaining API points"""
rate_url = urijoin(self.base_url, "rate_limit")
self.session.headers.update({'Authorization': 'token ' + token})
remaining = 0
try:
headers = super().fetch(rate_url).headers
if self.rate_limit_header in headers:
remaining = int(headers[self.rate_limit_header])
except requests.exceptions.HTTPError as error:
logger.warning("Rate limit not initialized: %s", error)
return remaining
|
Return array of all tokens remaining API points
|
def _get_tokens_rate_limits(self):
"""Return array of all tokens remaining API points"""
remainings = [0] * self.n_tokens
# Turn off archiving when checking rates, because that would cause
# archive key conflict (the same URLs giving different responses)
arch = self.archive
self.archive = None
for idx, token in enumerate(self.tokens):
# Pass flag to skip disabling archiving because this function doies it
remainings[idx] = self._get_token_rate_limit(token)
# Restore archiving to whatever state it was
self.archive = arch
logger.debug("Remaining API points: {}".format(remainings))
return remainings
|
Check all API tokens defined and choose one with most remaining API points
|
def _choose_best_api_token(self):
"""Check all API tokens defined and choose one with most remaining API points"""
# Return if no tokens given
if self.n_tokens == 0:
return
# If multiple tokens given, choose best
token_idx = 0
if self.n_tokens > 1:
remainings = self._get_tokens_rate_limits()
token_idx = remainings.index(max(remainings))
logger.debug("Remaining API points: {}, choosen index: {}".format(remainings, token_idx))
# If we have any tokens - use best of them
self.current_token = self.tokens[token_idx]
self.session.headers.update({'Authorization': 'token ' + self.current_token})
# Update rate limit data for the current token
self._update_current_rate_limit()
|
Check if we need to switch GitHub API tokens
|
def _need_check_tokens(self):
"""Check if we need to switch GitHub API tokens"""
if self.n_tokens <= 1 or self.rate_limit is None:
return False
elif self.last_rate_limit_checked is None:
self.last_rate_limit_checked = self.rate_limit
return True
# If approaching minimum rate limit for sleep
approaching_limit = float(self.min_rate_to_sleep) * (1.0 + TOKEN_USAGE_BEFORE_SWITCH) + 1
if self.rate_limit <= approaching_limit:
self.last_rate_limit_checked = self.rate_limit
return True
# Only switch token when used predefined factor of the current token's remaining API points
ratio = float(self.rate_limit) / float(self.last_rate_limit_checked)
if ratio < 1.0 - TOKEN_USAGE_BEFORE_SWITCH:
self.last_rate_limit_checked = self.rate_limit
return True
elif ratio > 1.0:
self.last_rate_limit_checked = self.rate_limit
return False
else:
return False
|
Update rate limits data for the current token
|
def _update_current_rate_limit(self):
"""Update rate limits data for the current token"""
url = urijoin(self.base_url, "rate_limit")
try:
# Turn off archiving when checking rates, because that would cause
# archive key conflict (the same URLs giving different responses)
arch = self.archive
self.archive = None
response = super().fetch(url)
self.archive = arch
self.update_rate_limit(response)
self.last_rate_limit_checked = self.rate_limit
except requests.exceptions.HTTPError as error:
if error.response.status_code == 404:
logger.warning("Rate limit not initialized: %s", error)
else:
raise error
|
Init metadata information.
|
def init_metadata(self, origin, backend_name, backend_version,
category, backend_params):
"""Init metadata information.
Metatada is composed by basic information needed to identify
where archived data came from and how it can be retrieved
and built into Perceval items.
:param: origin: identifier of the repository
:param: backend_name: name of the backend
:param: backend_version: version of the backend
:param: category: category of the items fetched
:param: backend_params: dict representation of the fetch parameters
raises ArchiveError: when an error occurs initializing the metadata
"""
created_on = datetime_to_utc(datetime_utcnow())
created_on_dumped = created_on.isoformat()
backend_params_dumped = pickle.dumps(backend_params, 0)
metadata = (origin, backend_name, backend_version, category,
backend_params_dumped, created_on_dumped,)
try:
cursor = self._db.cursor()
insert_stmt = "INSERT INTO " + self.METADATA_TABLE + " "\
"(origin, backend_name, backend_version, " \
"category, backend_params, created_on) " \
"VALUES (?, ?, ?, ?, ?, ?)"
cursor.execute(insert_stmt, metadata)
self._db.commit()
cursor.close()
except sqlite3.DatabaseError as e:
msg = "metadata initialization error; cause: %s" % str(e)
raise ArchiveError(cause=msg)
self.origin = origin
self.backend_name = backend_name
self.backend_version = backend_version
self.category = category
self.backend_params = backend_params
self.created_on = created_on
logger.debug("Metadata of archive %s initialized to %s",
self.archive_path, metadata)
|
Store a raw item in this archive.
|
def store(self, uri, payload, headers, data):
"""Store a raw item in this archive.
The method will store `data` content in this archive. The unique
identifier for that item will be generated using the rest of the
parameters.
:param uri: request URI
:param payload: request payload
:param headers: request headers
:param data: data to store in this archive
:raises ArchiveError: when an error occurs storing the given data
"""
hashcode = self.make_hashcode(uri, payload, headers)
payload_dump = pickle.dumps(payload, 0)
headers_dump = pickle.dumps(headers, 0)
data_dump = pickle.dumps(data, 0)
logger.debug("Archiving %s with %s %s %s in %s",
hashcode, uri, payload, headers, self.archive_path)
try:
cursor = self._db.cursor()
insert_stmt = "INSERT INTO " + self.ARCHIVE_TABLE + " (" \
"id, hashcode, uri, payload, headers, data) " \
"VALUES(?,?,?,?,?,?)"
cursor.execute(insert_stmt, (None, hashcode, uri,
payload_dump, headers_dump, data_dump))
self._db.commit()
cursor.close()
except sqlite3.IntegrityError as e:
msg = "data storage error; cause: duplicated entry %s" % hashcode
raise ArchiveError(cause=msg)
except sqlite3.DatabaseError as e:
msg = "data storage error; cause: %s" % str(e)
raise ArchiveError(cause=msg)
logger.debug("%s data archived in %s", hashcode, self.archive_path)
|
Retrieve a raw item from the archive.
|
def retrieve(self, uri, payload, headers):
"""Retrieve a raw item from the archive.
The method will return the `data` content corresponding to the
hascode derived from the given parameters.
:param uri: request URI
:param payload: request payload
:param headers: request headers
:returns: the archived data
:raises ArchiveError: when an error occurs retrieving data
"""
hashcode = self.make_hashcode(uri, payload, headers)
logger.debug("Retrieving entry %s with %s %s %s in %s",
hashcode, uri, payload, headers, self.archive_path)
self._db.row_factory = sqlite3.Row
try:
cursor = self._db.cursor()
select_stmt = "SELECT data " \
"FROM " + self.ARCHIVE_TABLE + " " \
"WHERE hashcode = ?"
cursor.execute(select_stmt, (hashcode,))
row = cursor.fetchone()
cursor.close()
except sqlite3.DatabaseError as e:
msg = "data retrieval error; cause: %s" % str(e)
raise ArchiveError(cause=msg)
if row:
found = pickle.loads(row['data'])
else:
msg = "entry %s not found in archive %s" % (hashcode, self.archive_path)
raise ArchiveError(cause=msg)
return found
|
Create a brand new archive.
|
def create(cls, archive_path):
"""Create a brand new archive.
Call this method to create a new and empty archive. It will initialize
the storage file in the path defined by `archive_path`.
:param archive_path: absolute path where the archive file will be created
:raises ArchiveError: when the archive file already exists
"""
if os.path.exists(archive_path):
msg = "archive %s already exists; remove it before creating a new one"
raise ArchiveError(cause=msg % (archive_path))
conn = sqlite3.connect(archive_path)
cursor = conn.cursor()
cursor.execute(cls.METADATA_CREATE_STMT)
cursor.execute(cls.ARCHIVE_CREATE_STMT)
conn.commit()
cursor.close()
conn.close()
logger.debug("Creating archive %s", archive_path)
archive = cls(archive_path)
logger.debug("Achive %s was created", archive_path)
return archive
|
Generate a SHA1 based on the given arguments.
|
def make_hashcode(uri, payload, headers):
"""Generate a SHA1 based on the given arguments.
Hashcodes created by this method will used as unique identifiers
for the raw items or resources stored by this archive.
:param uri: URI to the resource
:param payload: payload of the request needed to fetch the resource
:param headers: headers of the request needed to fetch the resource
:returns: a SHA1 hash code
"""
def dict_to_json_str(data):
return json.dumps(data, sort_keys=True)
content = ':'.join([uri, dict_to_json_str(payload), dict_to_json_str(headers)])
hashcode = hashlib.sha1(content.encode('utf-8'))
return hashcode.hexdigest()
|
Check whether the archive is valid or not.
|
def _verify_archive(self):
"""Check whether the archive is valid or not.
This method will check if tables were created and if they
contain valid data.
"""
nentries = self._count_table_rows(self.ARCHIVE_TABLE)
nmetadata = self._count_table_rows(self.METADATA_TABLE)
if nmetadata > 1:
msg = "archive %s metadata corrupted; multiple metadata entries" % (self.archive_path)
raise ArchiveError(cause=msg)
if nmetadata == 0 and nentries > 0:
msg = "archive %s metadata is empty but %s entries were achived" % (self.archive_path)
raise ArchiveError(cause=msg)
logger.debug("Integrity of archive %s OK; entries: %s rows, metadata: %s rows",
self.archive_path, nentries, nmetadata)
|
Load metadata from the archive file
|
def _load_metadata(self):
"""Load metadata from the archive file"""
logger.debug("Loading metadata infomation of archive %s", self.archive_path)
cursor = self._db.cursor()
select_stmt = "SELECT origin, backend_name, backend_version, " \
"category, backend_params, created_on " \
"FROM " + self.METADATA_TABLE + " " \
"LIMIT 1"
cursor.execute(select_stmt)
row = cursor.fetchone()
cursor.close()
if row:
self.origin = row[0]
self.backend_name = row[1]
self.backend_version = row[2]
self.category = row[3]
self.backend_params = pickle.loads(row[4])
self.created_on = str_to_datetime(row[5])
else:
logger.debug("Metadata of archive %s was empty", self.archive_path)
logger.debug("Metadata of archive %s loaded", self.archive_path)
|
Fetch the number of rows in a table
|
def _count_table_rows(self, table_name):
"""Fetch the number of rows in a table"""
cursor = self._db.cursor()
select_stmt = "SELECT COUNT(*) FROM " + table_name
try:
cursor.execute(select_stmt)
row = cursor.fetchone()
except sqlite3.DatabaseError as e:
msg = "invalid archive file; cause: %s" % str(e)
raise ArchiveError(cause=msg)
finally:
cursor.close()
return row[0]
|
Create a new archive.
|
def create_archive(self):
"""Create a new archive.
The method creates in the filesystem a brand new archive with
a random SHA1 as its name. The first byte of the hashcode will
be the name of the subdirectory; the remaining bytes, the
archive name.
:returns: a new `Archive` object
:raises ArchiveManagerError: when an error occurs creating the
new archive
"""
hashcode = uuid.uuid4().hex
archive_dir = os.path.join(self.dirpath, hashcode[0:2])
archive_name = hashcode[2:] + self.STORAGE_EXT
archive_path = os.path.join(archive_dir, archive_name)
if not os.path.exists(archive_dir):
os.makedirs(archive_dir)
try:
archive = Archive.create(archive_path)
except ArchiveError as e:
raise ArchiveManagerError(cause=str(e))
return archive
|
Remove an archive.
|
def remove_archive(self, archive_path):
"""Remove an archive.
This method deletes from the filesystem the archive stored
in `archive_path`.
:param archive_path: path to the archive
:raises ArchiveManangerError: when an error occurs removing the
archive
"""
try:
Archive(archive_path)
except ArchiveError as e:
raise ArchiveManagerError(cause=str(e))
os.remove(archive_path)
|
Search archives.
|
def search(self, origin, backend_name, category, archived_after):
"""Search archives.
Get the archives which store data based on the given parameters.
These parameters define which the origin was (`origin`), how data
was fetched (`backend_name`) and data type ('category').
Only those archives created on or after `archived_after` will be
returned.
The method returns a list with the file paths to those archives.
The list is sorted by the date of creation of each archive.
:param origin: data origin
:param backend_name: backed used to fetch data
:param category: type of the items fetched by the backend
:param archived_after: get archives created on or after this date
:returns: a list with archive names which match the search criteria
"""
archives = self._search_archives(origin, backend_name,
category, archived_after)
archives = [(fp, date) for fp, date in archives]
archives = [fp for fp, _ in sorted(archives, key=lambda x: x[1])]
return archives
|
Search archives using filters.
|
def _search_archives(self, origin, backend_name, category, archived_after):
"""Search archives using filters."""
for archive_path in self._search_files():
try:
archive = Archive(archive_path)
except ArchiveError:
continue
match = archive.origin == origin and \
archive.backend_name == backend_name and \
archive.category == category and \
archive.created_on >= archived_after
if not match:
continue
yield archive_path, archive.created_on
|
Retrieve the file paths stored under the base path.
|
def _search_files(self):
"""Retrieve the file paths stored under the base path."""
for root, _, files in os.walk(self.dirpath):
for filename in files:
location = os.path.join(root, filename)
yield location
|
Check if filename is a compressed file supported by the tool.
|
def check_compressed_file_type(filepath):
"""Check if filename is a compressed file supported by the tool.
This function uses magic numbers (first four bytes) to determine
the type of the file. Supported types are 'gz' and 'bz2'. When
the filetype is not supported, the function returns `None`.
:param filepath: path to the file
:returns: 'gz' or 'bz2'; `None` if the type is not supported
"""
def compressed_file_type(content):
magic_dict = {
b'\x1f\x8b\x08': 'gz',
b'\x42\x5a\x68': 'bz2',
b'PK\x03\x04': 'zip'
}
for magic, filetype in magic_dict.items():
if content.startswith(magic):
return filetype
return None
with open(filepath, mode='rb') as f:
magic_number = f.read(4)
return compressed_file_type(magic_number)
|
Generate a months range.
|
def months_range(from_date, to_date):
"""Generate a months range.
Generator of months starting on `from_date` util `to_date`. Each
returned item is a tuple of two datatime objects like in (month, month+1).
Thus, the result will follow the sequence:
((fd, fd+1), (fd+1, fd+2), ..., (td-2, td-1), (td-1, td))
:param from_date: generate dates starting on this month
:param to_date: generate dates until this month
:result: a generator of months range
"""
start = datetime.datetime(from_date.year, from_date.month, 1)
end = datetime.datetime(to_date.year, to_date.month, 1)
month_gen = dateutil.rrule.rrule(freq=dateutil.rrule.MONTHLY,
dtstart=start, until=end)
months = [d for d in month_gen]
pos = 0
for x in range(1, len(months)):
yield months[pos], months[x]
pos = x
|
Convert an email message into a dictionary.
|
def message_to_dict(msg):
"""Convert an email message into a dictionary.
This function transforms an `email.message.Message` object
into a dictionary. Headers are stored as key:value pairs
while the body of the message is stored inside `body` key.
Body may have two other keys inside, 'plain', for plain body
messages and 'html', for HTML encoded messages.
The returned dictionary has the type `requests.structures.CaseInsensitiveDict`
due to same headers with different case formats can appear in
the same message.
:param msg: email message of type `email.message.Message`
:returns : dictionary of type `requests.structures.CaseInsensitiveDict`
:raises ParseError: when an error occurs transforming the message
to a dictionary
"""
def parse_headers(msg):
headers = {}
for header, value in msg.items():
hv = []
for text, charset in email.header.decode_header(value):
if type(text) == bytes:
charset = charset if charset else 'utf-8'
try:
text = text.decode(charset, errors='surrogateescape')
except (UnicodeError, LookupError):
# Try again with a 7bit encoding
text = text.decode('ascii', errors='surrogateescape')
hv.append(text)
v = ' '.join(hv)
headers[header] = v if v else None
return headers
def parse_payload(msg):
body = {}
if not msg.is_multipart():
payload = decode_payload(msg)
subtype = msg.get_content_subtype()
body[subtype] = [payload]
else:
# Include all the attached texts if it is multipart
# Ignores binary parts by default
for part in email.iterators.typed_subpart_iterator(msg):
payload = decode_payload(part)
subtype = part.get_content_subtype()
body.setdefault(subtype, []).append(payload)
return {k: '\n'.join(v) for k, v in body.items()}
def decode_payload(msg_or_part):
charset = msg_or_part.get_content_charset('utf-8')
payload = msg_or_part.get_payload(decode=True)
try:
payload = payload.decode(charset, errors='surrogateescape')
except (UnicodeError, LookupError):
# Try again with a 7bit encoding
payload = payload.decode('ascii', errors='surrogateescape')
return payload
# The function starts here
message = requests.structures.CaseInsensitiveDict()
if isinstance(msg, mailbox.mboxMessage):
message['unixfrom'] = msg.get_from()
else:
message['unixfrom'] = None
try:
for k, v in parse_headers(msg).items():
message[k] = v
message['body'] = parse_payload(msg)
except UnicodeError as e:
raise ParseError(cause=str(e))
return message
|
Remove control and invalid characters from an xml stream.
|
def remove_invalid_xml_chars(raw_xml):
"""Remove control and invalid characters from an xml stream.
Looks for invalid characters and subtitutes them with whitespaces.
This solution is based on these two posts: Olemis Lang's reponse
on StackOverflow (http://stackoverflow.com/questions/1707890) and
lawlesst's on GitHub Gist (https://gist.github.com/lawlesst/4110923),
that is based on the previous answer.
:param xml: XML stream
:returns: a purged XML stream
"""
illegal_unichrs = [(0x00, 0x08), (0x0B, 0x1F),
(0x7F, 0x84), (0x86, 0x9F)]
illegal_ranges = ['%s-%s' % (chr(low), chr(high))
for (low, high) in illegal_unichrs
if low < sys.maxunicode]
illegal_xml_re = re.compile('[%s]' % ''.join(illegal_ranges))
purged_xml = ''
for c in raw_xml:
if illegal_xml_re.search(c) is not None:
c = ' '
purged_xml += c
return purged_xml
|
Convert a XML stream into a dictionary.
|
def xml_to_dict(raw_xml):
"""Convert a XML stream into a dictionary.
This function transforms a xml stream into a dictionary. The
attributes are stored as single elements while child nodes are
stored into lists. The text node is stored using the special
key '__text__'.
This code is based on Winston Ewert's solution to this problem.
See http://codereview.stackexchange.com/questions/10400/convert-elementtree-to-dict
for more info. The code was licensed as cc by-sa 3.0.
:param raw_xml: XML stream
:returns: a dict with the XML data
:raises ParseError: raised when an error occurs parsing the given
XML stream
"""
def node_to_dict(node):
d = {}
d.update(node.items())
text = getattr(node, 'text', None)
if text is not None:
d['__text__'] = text
childs = {}
for child in node:
childs.setdefault(child.tag, []).append(node_to_dict(child))
d.update(childs.items())
return d
purged_xml = remove_invalid_xml_chars(raw_xml)
try:
tree = xml.etree.ElementTree.fromstring(purged_xml)
except xml.etree.ElementTree.ParseError as e:
cause = "XML stream %s" % (str(e))
raise ParseError(cause=cause)
d = node_to_dict(tree)
return d
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.