INSTRUCTION
stringlengths 1
8.43k
| RESPONSE
stringlengths 75
104k
|
|---|---|
Puts a value into a queue but aborts if this thread is closed.
|
def queue(self, queue_, value):
"""Puts a value into a queue but aborts if this thread is closed."""
while not self.closed:
try:
queue_.put(value, block=True, timeout=1)
return
except queue.Full:
continue
|
Parses a HDS manifest and returns its substreams.
|
def parse_manifest(cls, session, url, timeout=60, pvswf=None, is_akamai=False,
**request_params):
"""Parses a HDS manifest and returns its substreams.
:param url: The URL to the manifest.
:param timeout: How long to wait for data to be returned from
from the stream before raising an error.
:param is_akamai: force adding of the akamai parameters
:param pvswf: URL of player SWF for Akamai HD player verification.
"""
# private argument, should only be used in recursive calls
raise_for_drm = request_params.pop("raise_for_drm", False)
if not request_params:
request_params = {}
request_params["headers"] = request_params.get("headers", {})
request_params["params"] = request_params.get("params", {})
# These params are reserved for internal use
request_params.pop("exception", None)
request_params.pop("stream", None)
request_params.pop("timeout", None)
request_params.pop("url", None)
if "akamaihd" in url or is_akamai:
request_params["params"]["hdcore"] = HDCORE_VERSION
request_params["params"]["g"] = cls.cache_buster_string(12)
res = session.http.get(url, exception=IOError, **request_params)
manifest = session.http.xml(res, "manifest XML", ignore_ns=True,
exception=IOError)
if manifest.findtext("drmAdditionalHeader"):
log.debug("Omitting HDS stream protected by DRM: {}", url)
if raise_for_drm:
raise PluginError("{} is protected by DRM".format(url))
log.warning("Some or all streams are unavailable as they are protected by DRM")
return {}
parsed = urlparse(url)
baseurl = manifest.findtext("baseURL")
baseheight = manifest.findtext("height")
bootstraps = {}
streams = {}
if not baseurl:
baseurl = urljoin(url, os.path.dirname(parsed.path))
if not baseurl.endswith("/"):
baseurl += "/"
for bootstrap in manifest.findall("bootstrapInfo"):
name = bootstrap.attrib.get("id") or "_global"
url = bootstrap.attrib.get("url")
if url:
box = absolute_url(baseurl, url)
else:
data = base64.b64decode(bytes(bootstrap.text, "utf8"))
box = Box.deserialize(BytesIO(data))
bootstraps[name] = box
pvtoken = manifest.findtext("pv-2.0")
if pvtoken:
if not pvswf:
raise IOError("This manifest requires the 'pvswf' parameter "
"to verify the SWF")
params = cls._pv_params(session, pvswf, pvtoken, **request_params)
request_params["params"].update(params)
child_drm = False
for media in manifest.findall("media"):
url = media.attrib.get("url")
bootstrapid = media.attrib.get("bootstrapInfoId", "_global")
href = media.attrib.get("href")
if url and bootstrapid:
bootstrap = bootstraps.get(bootstrapid)
if not bootstrap:
continue
bitrate = media.attrib.get("bitrate")
streamid = media.attrib.get("streamId")
height = media.attrib.get("height")
if height:
quality = height + "p"
elif bitrate:
quality = bitrate + "k"
elif streamid:
quality = streamid
elif baseheight:
quality = baseheight + "p"
else:
quality = "live"
metadata = media.findtext("metadata")
if metadata:
metadata = base64.b64decode(bytes(metadata, "utf8"))
metadata = ScriptData.deserialize(BytesIO(metadata))
else:
metadata = None
stream = HDSStream(session, baseurl, url, bootstrap,
metadata=metadata, timeout=timeout,
**request_params)
streams[quality] = stream
elif href:
url = absolute_url(baseurl, href)
try:
child_streams = cls.parse_manifest(session, url,
timeout=timeout,
is_akamai=is_akamai,
raise_for_drm=True,
**request_params)
except PluginError:
child_drm = True
child_streams = {}
for name, stream in child_streams.items():
# Override stream name if bitrate is available in parent
# manifest but not the child one.
bitrate = media.attrib.get("bitrate")
if bitrate and not re.match(r"^(\d+)k$", name):
name = bitrate + "k"
streams[name] = stream
if child_drm:
log.warning("Some or all streams are unavailable as they are protected by DRM")
return streams
|
Returns any parameters needed for Akamai HD player verification.
|
def _pv_params(cls, session, pvswf, pv, **request_params):
"""Returns any parameters needed for Akamai HD player verification.
Algorithm originally documented by KSV, source:
http://stream-recorder.com/forum/showpost.php?p=43761&postcount=13
"""
try:
data, hdntl = pv.split(";")
except ValueError:
data = pv
hdntl = ""
cache = Cache(filename="stream.json")
key = "akamaihd-player:" + pvswf
cached = cache.get(key)
request_params = deepcopy(request_params)
headers = request_params.pop("headers", {})
if cached:
headers["If-Modified-Since"] = cached["modified"]
swf = session.http.get(pvswf, headers=headers, **request_params)
if cached and swf.status_code == 304: # Server says not modified
hash = cached["hash"]
else:
# Calculate SHA-256 hash of the uncompressed SWF file, base-64
# encoded
hash = sha256()
hash.update(swfdecompress(swf.content))
hash = base64.b64encode(hash.digest()).decode("ascii")
modified = swf.headers.get("Last-Modified", "")
# Only save in cache if a valid date is given
if len(modified) < 40:
cache.set(key, dict(hash=hash, modified=modified))
msg = "st=0~exp=9999999999~acl=*~data={0}!{1}".format(data, hash)
auth = hmac.new(AKAMAIHD_PV_KEY, msg.encode("ascii"), sha256)
pvtoken = "{0}~hmac={1}".format(msg, auth.hexdigest())
# The "hdntl" parameter can be accepted as a cookie or passed in the
# query string, but the "pvtoken" parameter can only be in the query
# string
params = [("pvtoken", pvtoken)]
params.extend(parse_qsl(hdntl, keep_blank_values=True))
return params
|
Given an HTTP response from the sessino endpoint extract the nonce so we can sign requests with it. We don t really sign the requests in the traditional sense of a nonce we just incude them in the auth requests.
|
def _extract_nonce(cls, http_result):
"""
Given an HTTP response from the sessino endpoint, extract the nonce, so we can "sign" requests with it.
We don't really sign the requests in the traditional sense of a nonce, we just incude them in the auth requests.
:param http_result: HTTP response from the bbc session endpoint.
:type http_result: requests.Response
:return: nonce to "sign" url requests with
:rtype: string
"""
# Extract the redirect URL from the last call
last_redirect_url = urlparse(http_result.history[-1].request.url)
last_redirect_query = dict(parse_qsl(last_redirect_url.query))
# Extract the nonce from the query string in the redirect URL
final_url = urlparse(last_redirect_query['goto'])
goto_url = dict(parse_qsl(final_url.query))
goto_url_query = parse_json(goto_url['state'])
# Return the nonce we can use for future queries
return goto_url_query['nonce']
|
Find the Video Packet ID in the HTML for the provided URL
|
def find_vpid(self, url, res=None):
"""
Find the Video Packet ID in the HTML for the provided URL
:param url: URL to download, if res is not provided.
:param res: Provide a cached version of the HTTP response to search
:type url: string
:type res: requests.Response
:return: Video Packet ID for a Programme in iPlayer
:rtype: string
"""
log.debug("Looking for vpid on {0}", url)
# Use pre-fetched page if available
res = res or self.session.http.get(url)
m = self.mediator_re.search(res.text)
vpid = m and parse_json(m.group(1), schema=self.mediator_schema)
return vpid
|
Create session using BBC ID. See https:// www. bbc. co. uk/ usingthebbc/ account/
|
def login(self, ptrt_url):
"""
Create session using BBC ID. See https://www.bbc.co.uk/usingthebbc/account/
:param ptrt_url: The snapback URL to redirect to after successful authentication
:type ptrt_url: string
:return: Whether authentication was successful
:rtype: bool
"""
def auth_check(res):
return ptrt_url in ([h.url for h in res.history] + [res.url])
# make the session request to get the correct cookies
session_res = self.session.http.get(
self.session_url,
params=dict(ptrt=ptrt_url)
)
if auth_check(session_res):
log.debug("Already authenticated, skipping authentication")
return True
http_nonce = self._extract_nonce(session_res)
res = self.session.http.post(
self.auth_url,
params=dict(
ptrt=ptrt_url,
nonce=http_nonce
),
data=dict(
jsEnabled=True,
username=self.get_option("username"),
password=self.get_option('password'),
attempts=0
),
headers={"Referer": self.url})
return auth_check(res)
|
Remove the PKCS#7 padding
|
def pkcs7_decode(paddedData, keySize=16):
'''
Remove the PKCS#7 padding
'''
# Use ord + [-1:] to support both python 2 and 3
val = ord(paddedData[-1:])
if val > keySize:
raise StreamError("Input is not padded or padding is corrupt, got padding size of {0}".format(val))
return paddedData[:-val]
|
Attempts to parse a variant playlist and return its streams.
|
def parse_variant_playlist(cls, session_, url, name_key="name",
name_prefix="", check_streams=False,
force_restart=False, name_fmt=None,
start_offset=0, duration=None,
**request_params):
"""Attempts to parse a variant playlist and return its streams.
:param url: The URL of the variant playlist.
:param name_key: Prefer to use this key as stream name, valid keys are:
name, pixels, bitrate.
:param name_prefix: Add this prefix to the stream names.
:param check_streams: Only allow streams that are accessible.
:param force_restart: Start at the first segment even for a live stream
:param name_fmt: A format string for the name, allowed format keys are
name, pixels, bitrate.
"""
locale = session_.localization
# Backwards compatibility with "namekey" and "nameprefix" params.
name_key = request_params.pop("namekey", name_key)
name_prefix = request_params.pop("nameprefix", name_prefix)
audio_select = session_.options.get("hls-audio-select") or []
res = session_.http.get(url, exception=IOError, **request_params)
try:
parser = hls_playlist.load(res.text, base_uri=res.url)
except ValueError as err:
raise IOError("Failed to parse playlist: {0}".format(err))
streams = {}
for playlist in filter(lambda p: not p.is_iframe, parser.playlists):
names = dict(name=None, pixels=None, bitrate=None)
audio_streams = []
fallback_audio = []
default_audio = []
preferred_audio = []
for media in playlist.media:
if media.type == "VIDEO" and media.name:
names["name"] = media.name
elif media.type == "AUDIO":
audio_streams.append(media)
for media in audio_streams:
# Media without a uri is not relevant as external audio
if not media.uri:
continue
if not fallback_audio and media.default:
fallback_audio = [media]
# if the media is "audoselect" and it better matches the users preferences, use that
# instead of default
if not default_audio and (media.autoselect and locale.equivalent(language=media.language)):
default_audio = [media]
# select the first audio stream that matches the users explict language selection
if (('*' in audio_select or media.language in audio_select or media.name in audio_select) or
((not preferred_audio or media.default) and locale.explicit and locale.equivalent(
language=media.language))):
preferred_audio.append(media)
# final fallback on the first audio stream listed
fallback_audio = fallback_audio or (len(audio_streams) and
audio_streams[0].uri and [audio_streams[0]])
if playlist.stream_info.resolution:
width, height = playlist.stream_info.resolution
names["pixels"] = "{0}p".format(height)
if playlist.stream_info.bandwidth:
bw = playlist.stream_info.bandwidth
if bw >= 1000:
names["bitrate"] = "{0}k".format(int(bw / 1000.0))
else:
names["bitrate"] = "{0}k".format(bw / 1000.0)
if name_fmt:
stream_name = name_fmt.format(**names)
else:
stream_name = (names.get(name_key) or names.get("name") or
names.get("pixels") or names.get("bitrate"))
if not stream_name:
continue
if stream_name in streams: # rename duplicate streams
stream_name = "{0}_alt".format(stream_name)
num_alts = len(list(filter(lambda n: n.startswith(stream_name), streams.keys())))
# We shouldn't need more than 2 alt streams
if num_alts >= 2:
continue
elif num_alts > 0:
stream_name = "{0}{1}".format(stream_name, num_alts + 1)
if check_streams:
try:
session_.http.get(playlist.uri, **request_params)
except KeyboardInterrupt:
raise
except Exception:
continue
external_audio = preferred_audio or default_audio or fallback_audio
if external_audio and FFMPEGMuxer.is_usable(session_):
external_audio_msg = ", ".join([
"(language={0}, name={1})".format(x.language, (x.name or "N/A"))
for x in external_audio
])
log.debug("Using external audio tracks for stream {0} {1}", name_prefix + stream_name,
external_audio_msg)
stream = MuxedHLSStream(session_,
video=playlist.uri,
audio=[x.uri for x in external_audio if x.uri],
force_restart=force_restart,
start_offset=start_offset,
duration=duration,
**request_params)
else:
stream = cls(session_,
playlist.uri,
force_restart=force_restart,
start_offset=start_offset,
duration=duration,
**request_params)
streams[name_prefix + stream_name] = stream
return streams
|
Changes google. com to www. google. com
|
def prepend_www(url):
"""Changes google.com to www.google.com"""
parsed = urlparse(url)
if parsed.netloc.split(".")[0] != "www":
return parsed.scheme + "://www." + parsed.netloc + parsed.path
else:
return url
|
Wrapper around json. loads.
|
def parse_json(data, name="JSON", exception=PluginError, schema=None):
"""Wrapper around json.loads.
Wraps errors in custom exception with a snippet of the data in the message.
"""
try:
json_data = json.loads(data)
except ValueError as err:
snippet = repr(data)
if len(snippet) > 35:
snippet = snippet[:35] + " ..."
else:
snippet = data
raise exception("Unable to parse {0}: {1} ({2})".format(name, err, snippet))
if schema:
json_data = schema.validate(json_data, name=name, exception=exception)
return json_data
|
Wrapper around ElementTree. fromstring with some extras.
|
def parse_xml(data, name="XML", ignore_ns=False, exception=PluginError, schema=None, invalid_char_entities=False):
"""Wrapper around ElementTree.fromstring with some extras.
Provides these extra features:
- Handles incorrectly encoded XML
- Allows stripping namespace information
- Wraps errors in custom exception with a snippet of the data in the message
"""
if is_py2 and isinstance(data, unicode):
data = data.encode("utf8")
elif is_py3 and isinstance(data, str):
data = bytearray(data, "utf8")
if ignore_ns:
data = re.sub(br"[\t ]xmlns=\"(.+?)\"", b"", data)
if invalid_char_entities:
data = re.sub(br'&(?!(?:#(?:[0-9]+|[Xx][0-9A-Fa-f]+)|[A-Za-z0-9]+);)', b'&', data)
try:
tree = ET.fromstring(data)
except Exception as err:
snippet = repr(data)
if len(snippet) > 35:
snippet = snippet[:35] + " ..."
raise exception("Unable to parse {0}: {1} ({2})".format(name, err, snippet))
if schema:
tree = schema.validate(tree, name=name, exception=exception)
return tree
|
Parses a query string into a dict.
|
def parse_qsd(data, name="query string", exception=PluginError, schema=None, **params):
"""Parses a query string into a dict.
Unlike parse_qs and parse_qsl, duplicate keys are not preserved in
favor of a simpler return value.
"""
value = dict(parse_qsl(data, **params))
if schema:
value = schema.validate(value, name=name, exception=exception)
return value
|
Search for a key in a nested dict or list of nested dicts and return the values.
|
def search_dict(data, key):
"""
Search for a key in a nested dict, or list of nested dicts, and return the values.
:param data: dict/list to search
:param key: key to find
:return: matches for key
"""
if isinstance(data, dict):
for dkey, value in data.items():
if dkey == key:
yield value
for result in search_dict(value, key):
yield result
elif isinstance(data, list):
for value in data:
for result in search_dict(value, key):
yield result
|
Get the live stream in a particular language: param lang:: param path:: return:
|
def _get_live_streams(self, lang, path):
"""
Get the live stream in a particular language
:param lang:
:param path:
:return:
"""
res = self.session.http.get(self._live_api_url.format(lang, path))
live_res = self.session.http.json(res)['default']['uid']
post_data = '{"channel_url":"/api/channels/%s/"}' % live_res
try:
stream_data = self.session.http.json(self.session.http.post(self._stream_get_url, data=post_data))['stream_url']
except BaseException:
stream_data = self.session.http.json(self.session.http.post(self._stream_get_url, data=post_data))['channel_url']
return HLSStream.parse_variant_playlist(self.session, stream_data)
|
Find the streams for OlympicChannel: return:
|
def _get_streams(self):
"""
Find the streams for OlympicChannel
:return:
"""
match = self._url_re.match(self.url)
type_of_stream = match.group('type')
lang = re.search(r"/../", self.url).group(0)
if type_of_stream == 'tv':
path = re.search(r"tv/.*-\d/$", self.url).group(0)
return self._get_live_streams(lang, path)
elif type_of_stream == 'playback':
path = re.search(r"/playback/.*/$", self.url).group(0)
return self._get_vod_streams()
|
Returns LOW priority if the URL is not prefixed with hls:// but ends with. m3u8 and return NORMAL priority if the URL is prefixed.: param url: the URL to find the plugin priority for: return: plugin priority for the given URL
|
def priority(cls, url):
"""
Returns LOW priority if the URL is not prefixed with hls:// but ends with
.m3u8 and return NORMAL priority if the URL is prefixed.
:param url: the URL to find the plugin priority for
:return: plugin priority for the given URL
"""
m = cls._url_re.match(url)
if m:
prefix, url = cls._url_re.match(url).groups()
url_path = urlparse(url).path
if prefix is None and url_path.endswith(".m3u8"):
return LOW_PRIORITY
elif prefix is not None:
return NORMAL_PRIORITY
return NO_PRIORITY
|
Spawn the process defined in cmd
|
def spawn(self, parameters=None, arguments=None, stderr=None, timeout=None, short_option_prefix="-", long_option_prefix="--"):
"""
Spawn the process defined in `cmd`
parameters is converted to options the short and long option prefixes
if a list is given as the value, the parameter is repeated with each
value
If timeout is set the spawn will block until the process returns or
the timeout expires.
:param parameters: optional parameters
:param arguments: positional arguments
:param stderr: where to redirect stderr to
:param timeout: timeout for short lived process
:param long_option_prefix: option prefix, default -
:param short_option_prefix: long option prefix, default --
:return: spawned process
"""
stderr = stderr or self.stderr
cmd = self.bake(self._check_cmd(), parameters, arguments, short_option_prefix, long_option_prefix)
log.debug("Spawning command: {0}", subprocess.list2cmdline(cmd))
try:
process = subprocess.Popen(cmd, stderr=stderr, stdout=subprocess.PIPE)
except (OSError, IOError) as err:
raise StreamError("Failed to start process: {0} ({1})".format(self._check_cmd(), str(err)))
if timeout:
elapsed = 0
while elapsed < timeout and not process.poll():
time.sleep(0.25)
elapsed += 0.25
# kill after the timeout has expired and the process still hasn't ended
if not process.poll():
try:
log.debug("Process timeout expired ({0}s), killing process".format(timeout))
process.kill()
except Exception:
pass
process.wait()
return process
|
Brute force regex based HTML tag parser. This is a rough - and - ready searcher to find HTML tags when standards compliance is not required. Will find tags that are commented out or inside script tag etc.
|
def itertags(html, tag):
"""
Brute force regex based HTML tag parser. This is a rough-and-ready searcher to find HTML tags when
standards compliance is not required. Will find tags that are commented out, or inside script tag etc.
:param html: HTML page
:param tag: tag name to find
:return: generator with Tags
"""
for match in tag_re.finditer(html):
if match.group("tag") == tag:
attrs = dict((a.group("key").lower(), a.group("value")) for a in attr_re.finditer(match.group("attr")))
yield Tag(match.group("tag"), attrs, match.group("inner"))
|
Attempt to parse a DASH manifest file and return its streams
|
def parse_manifest(cls, session, url_or_manifest, **args):
"""
Attempt to parse a DASH manifest file and return its streams
:param session: Streamlink session instance
:param url_or_manifest: URL of the manifest file or an XML manifest string
:return: a dict of name -> DASHStream instances
"""
ret = {}
if url_or_manifest.startswith('<?xml'):
mpd = MPD(parse_xml(url_or_manifest, ignore_ns=True))
else:
res = session.http.get(url_or_manifest, **args)
url = res.url
urlp = list(urlparse(url))
urlp[2], _ = urlp[2].rsplit("/", 1)
mpd = MPD(session.http.xml(res, ignore_ns=True), base_url=urlunparse(urlp), url=url)
video, audio = [], []
# Search for suitable video and audio representations
for aset in mpd.periods[0].adaptationSets:
if aset.contentProtection:
raise PluginError("{} is protected by DRM".format(url))
for rep in aset.representations:
if rep.mimeType.startswith("video"):
video.append(rep)
elif rep.mimeType.startswith("audio"):
audio.append(rep)
if not video:
video = [None]
if not audio:
audio = [None]
locale = session.localization
locale_lang = locale.language
lang = None
available_languages = set()
# if the locale is explicitly set, prefer that language over others
for aud in audio:
if aud and aud.lang:
available_languages.add(aud.lang)
try:
if locale.explicit and aud.lang and Language.get(aud.lang) == locale_lang:
lang = aud.lang
except LookupError:
continue
if not lang:
# filter by the first language that appears
lang = audio[0] and audio[0].lang
log.debug("Available languages for DASH audio streams: {0} (using: {1})".format(", ".join(available_languages) or "NONE", lang or "n/a"))
# if the language is given by the stream, filter out other languages that do not match
if len(available_languages) > 1:
audio = list(filter(lambda a: a.lang is None or a.lang == lang, audio))
for vid, aud in itertools.product(video, audio):
stream = DASHStream(session, mpd, vid, aud, **args)
stream_name = []
if vid:
stream_name.append("{:0.0f}{}".format(vid.height or vid.bandwidth_rounded, "p" if vid.height else "k"))
if audio and len(audio) > 1:
stream_name.append("a{:0.0f}k".format(aud.bandwidth))
ret['+'.join(stream_name)] = stream
return ret
|
Determine which Unicode encoding the JSON text sample is encoded with
|
def determine_json_encoding(cls, sample):
"""
Determine which Unicode encoding the JSON text sample is encoded with
RFC4627 (http://www.ietf.org/rfc/rfc4627.txt) suggests that the encoding of JSON text can be determined
by checking the pattern of NULL bytes in first 4 octets of the text.
:param sample: a sample of at least 4 bytes of the JSON text
:return: the most likely encoding of the JSON text
"""
nulls_at = [i for i, j in enumerate(bytearray(sample[:4])) if j == 0]
if nulls_at == [0, 1, 2]:
return "UTF-32BE"
elif nulls_at == [0, 2]:
return "UTF-16BE"
elif nulls_at == [1, 2, 3]:
return "UTF-32LE"
elif nulls_at == [1, 3]:
return "UTF-16LE"
else:
return "UTF-8"
|
Parses JSON from a response.
|
def json(cls, res, *args, **kwargs):
"""Parses JSON from a response."""
# if an encoding is already set then use the provided encoding
if res.encoding is None:
res.encoding = cls.determine_json_encoding(res.content[:4])
return parse_json(res.text, *args, **kwargs)
|
Parses XML from a response.
|
def xml(cls, res, *args, **kwargs):
"""Parses XML from a response."""
return parse_xml(res.text, *args, **kwargs)
|
Parses a semi - colon delimited list of cookies.
|
def parse_cookies(self, cookies, **kwargs):
"""Parses a semi-colon delimited list of cookies.
Example: foo=bar;baz=qux
"""
for name, value in _parse_keyvalue_list(cookies):
self.cookies.set(name, value, **kwargs)
|
Parses a semi - colon delimited list of headers.
|
def parse_headers(self, headers):
"""Parses a semi-colon delimited list of headers.
Example: foo=bar;baz=qux
"""
for name, value in _parse_keyvalue_list(headers):
self.headers[name] = value
|
Parses a semi - colon delimited list of query parameters.
|
def parse_query_params(self, cookies, **kwargs):
"""Parses a semi-colon delimited list of query parameters.
Example: foo=bar;baz=qux
"""
for name, value in _parse_keyvalue_list(cookies):
self.params[name] = value
|
Finds the streams from tvcatchup. com.
|
def _get_streams(self):
"""
Finds the streams from tvcatchup.com.
"""
token = self.login(self.get_option("username"), self.get_option("password"))
m = self._url_re.match(self.url)
scode = m and m.group("scode") or self.get_option("station_code")
res = self.session.http.get(self._guide_url, params=dict(token=token))
channels = OrderedDict()
for t in itertags(res.text, "a"):
if t.attributes.get('cs'):
channels[t.attributes.get('cs').lower()] = t.attributes.get('title').replace("Watch ", "").strip()
if not scode:
log.error("Station code not provided, use --ustvnow-station-code.")
log.info("Available stations are: \n{0} ".format('\n'.join(' {0} ({1})'.format(c, n) for c, n in channels.items())))
return
if scode in channels:
log.debug("Finding streams for: {0}", channels.get(scode))
r = self.session.http.get(self._stream_url, params={"scode": scode,
"token": token,
"br_n": "Firefox",
"br_v": "52",
"br_d": "desktop"},
headers={"User-Agent": useragents.FIREFOX})
data = self.session.http.json(r)
return HLSStream.parse_variant_playlist(self.session, data["stream"])
else:
log.error("Invalid station-code: {0}", scode)
|
Randomly generated deviceId.: return:
|
def device_id(self):
"""
Randomly generated deviceId.
:return:
"""
if self._device_id is None:
self._device_id = "".join(
random.choice("0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") for _ in range(50))
return self._device_id
|
Return the message for this LogRecord.
|
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
msg = self.msg
if self.args:
msg = msg.format(*self.args)
return maybe_encode(msg)
|
A factory method which can be overridden in subclasses to create specialized LogRecords.
|
def makeRecord(self, name, level, fn, lno, msg, args, exc_info,
func=None, extra=None, sinfo=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
if name.startswith("streamlink"):
rv = _LogRecord(name, level, fn, lno, msg, args, exc_info, func, sinfo)
else:
rv = _CompatLogRecord(name, level, fn, lno, msg, args, exc_info, func, sinfo)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
|
Wraps a file described in request in a Response object.
|
def send(self, request, **kwargs):
""" Wraps a file, described in request, in a Response object.
:param request: The PreparedRequest` being "sent".
:returns: a Response object containing the file
"""
# Check that the method makes sense. Only support GET
if request.method not in ("GET", "HEAD"):
raise ValueError("Invalid request method %s" % request.method)
# Parse the URL
url_parts = urlparse(request.url)
# Make the Windows URLs slightly nicer
if is_win32 and url_parts.netloc.endswith(":"):
url_parts = url_parts._replace(path="/" + url_parts.netloc + url_parts.path, netloc='')
# Reject URLs with a hostname component
if url_parts.netloc and url_parts.netloc not in ("localhost", ".", "..", "-"):
raise ValueError("file: URLs with hostname components are not permitted")
# If the path is relative update it to be absolute
if url_parts.netloc in (".", ".."):
pwd = os.path.abspath(url_parts.netloc).replace(os.sep, "/") + "/"
if is_win32:
# prefix the path with a / in Windows
pwd = "/" + pwd
url_parts = url_parts._replace(path=urljoin(pwd, url_parts.path.lstrip("/")))
resp = Response()
resp.url = request.url
# Open the file, translate certain errors into HTTP responses
# Use urllib's unquote to translate percent escapes into whatever
# they actually need to be
try:
# If the netloc is - then read from stdin
if url_parts.netloc == "-":
if is_py3:
resp.raw = sys.stdin.buffer
else:
resp.raw = sys.stdin
# make a fake response URL, the current directory
resp.url = "file://" + os.path.abspath(".").replace(os.sep, "/") + "/"
else:
# Split the path on / (the URL directory separator) and decode any
# % escapes in the parts
path_parts = [unquote(p) for p in url_parts.path.split('/')]
# Strip out the leading empty parts created from the leading /'s
while path_parts and not path_parts[0]:
path_parts.pop(0)
# If os.sep is in any of the parts, someone fed us some shenanigans.
# Treat is like a missing file.
if any(os.sep in p for p in path_parts):
raise IOError(errno.ENOENT, os.strerror(errno.ENOENT))
# Look for a drive component. If one is present, store it separately
# so that a directory separator can correctly be added to the real
# path, and remove any empty path parts between the drive and the path.
# Assume that a part ending with : or | (legacy) is a drive.
if path_parts and (path_parts[0].endswith('|') or
path_parts[0].endswith(':')):
path_drive = path_parts.pop(0)
if path_drive.endswith('|'):
path_drive = path_drive[:-1] + ':'
while path_parts and not path_parts[0]:
path_parts.pop(0)
else:
path_drive = ''
# Try to put the path back together
# Join the drive back in, and stick os.sep in front of the path to
# make it absolute.
path = path_drive + os.sep + os.path.join(*path_parts)
# Check if the drive assumptions above were correct. If path_drive
# is set, and os.path.splitdrive does not return a drive, it wasn't
# reall a drive. Put the path together again treating path_drive
# as a normal path component.
if path_drive and not os.path.splitdrive(path):
path = os.sep + os.path.join(path_drive, *path_parts)
# Use io.open since we need to add a release_conn method, and
# methods can't be added to file objects in python 2.
resp.raw = io.open(path, "rb")
resp.raw.release_conn = resp.raw.close
except IOError as e:
if e.errno == errno.EACCES:
resp.status_code = codes.forbidden
elif e.errno == errno.ENOENT:
resp.status_code = codes.not_found
else:
resp.status_code = codes.bad_request
# Wrap the error message in a file-like object
# The error message will be localized, try to convert the string
# representation of the exception into a byte stream
resp_str = str(e).encode(locale.getpreferredencoding(False))
resp.raw = BytesIO(resp_str)
resp.headers['Content-Length'] = len(resp_str)
# Add release_conn to the BytesIO object
resp.raw.release_conn = resp.raw.close
else:
resp.status_code = codes.ok
# If it's a regular file, set the Content-Length
resp_stat = os.fstat(resp.raw.fileno())
if stat.S_ISREG(resp_stat.st_mode):
resp.headers['Content-Length'] = resp_stat.st_size
return resp
|
Get the info about the content based on the ID: param content_id:: return:
|
def _get_media_info(self, content_id):
"""
Get the info about the content, based on the ID
:param content_id:
:return:
"""
params = {"identityPointId": self._session_attributes.get("ipid"),
"fingerprint": self._session_attributes.get("fprt"),
"contentId": content_id,
"playbackScenario": self.playback_scenario,
"platform": "WEB_MEDIAPLAYER_5",
"subject": "LIVE_EVENT_COVERAGE",
"frameworkURL": "https://ws.media.net.wwe.com",
"_": int(time.time())}
if self.session_key:
params["sessionKey"] = self.session_key
url = self.api_url.format(id=content_id)
res = self.session.http.get(url, params=params)
return self.session.http.xml(res, ignore_ns=True, schema=self._info_schema)
|
Find the streams for vk. com: return:
|
def _get_streams(self):
"""
Find the streams for vk.com
:return:
"""
self.session.http.headers.update({'User-Agent': useragents.IPHONE_6})
# If this is a 'videos' catalog URL
# with an video ID in the GET request, get that instead
url = self.follow_vk_redirect(self.url)
m = self._url_re.match(url)
if not m:
log.error('URL is not compatible: {0}'.format(url))
return
video_id = m.group('video_id')
log.debug('video ID: {0}'.format(video_id))
params = {
'act': 'show_inline',
'al': '1',
'video': video_id,
}
res = self.session.http.post(self.API_URL, params=params)
for _i in itertags(res.text, 'iframe'):
if _i.attributes.get('src'):
iframe_url = update_scheme(self.url, _i.attributes['src'])
log.debug('Found iframe: {0}'.format(iframe_url))
for s in self.session.streams(iframe_url).items():
yield s
for _i in itertags(res.text, 'source'):
if _i.attributes.get('type') == 'application/vnd.apple.mpegurl':
video_url = _i.attributes['src']
# Remove invalid URL
if video_url.startswith('https://vk.com/'):
continue
streams = HLSStream.parse_variant_playlist(self.session,
video_url)
if not streams:
yield 'live', HLSStream(self.session, video_url)
else:
for s in streams.items():
yield s
elif _i.attributes.get('type') == 'video/mp4':
q = 'vod'
video_url = _i.attributes['src']
m = self._vod_quality_re.search(video_url)
if m:
q = '{0}p'.format(m.group(1))
yield q, HTTPStream(self.session, video_url)
|
Find the streams for web. tv: return:
|
def _get_streams(self):
"""
Find the streams for web.tv
:return:
"""
headers = {}
res = self.session.http.get(self.url, headers=headers)
headers["Referer"] = self.url
sources = self._sources_re.findall(res.text)
if len(sources):
sdata = parse_json(sources[0], schema=self._sources_schema)
for source in sdata:
self.logger.debug("Found stream of type: {}", source[u'type'])
if source[u'type'] == u"application/vnd.apple.mpegurl":
url = update_scheme(self.url, source[u"src"])
try:
# try to parse the stream as a variant playlist
variant = HLSStream.parse_variant_playlist(self.session, url, headers=headers)
if variant:
for q, s in variant.items():
yield q, s
else:
# and if that fails, try it as a plain HLS stream
yield 'live', HLSStream(self.session, url, headers=headers)
except IOError:
self.logger.warning("Could not open the stream, perhaps the channel is offline")
|
Attempt a login to LiveEdu. tv
|
def login(self):
"""
Attempt a login to LiveEdu.tv
"""
email = self.get_option("email")
password = self.get_option("password")
if email and password:
res = self.session.http.get(self.login_url)
csrf_match = self.csrf_re.search(res.text)
token = csrf_match and csrf_match.group(1)
self.logger.debug("Attempting login as {0} (token={1})", email, token)
res = self.session.http.post(self.login_url,
data=dict(login=email, password=password, csrfmiddlewaretoken=token),
allow_redirects=False,
raise_for_status=False,
headers={"Referer": self.login_url})
if res.status_code != 302:
self.logger.error("Failed to login to LiveEdu account: {0}", email)
|
Get the config object from the page source and call the API to get the list of streams: return:
|
def _get_streams(self):
"""
Get the config object from the page source and call the
API to get the list of streams
:return:
"""
# attempt a login
self.login()
res = self.session.http.get(self.url)
# decode the config for the page
matches = self.config_re.finditer(res.text)
try:
config = self.config_schema.validate(dict(
[m.group("key", "value") for m in matches]
))
except PluginError:
return
if config["selectedVideoHID"]:
self.logger.debug("Found video hash ID: {0}", config["selectedVideoHID"])
api_url = urljoin(self.url, urljoin(config["videosURL"], config["selectedVideoHID"]))
elif config["livestreamURL"]:
self.logger.debug("Found live stream URL: {0}", config["livestreamURL"])
api_url = urljoin(self.url, config["livestreamURL"])
else:
return
ares = self.session.http.get(api_url)
data = self.session.http.json(ares, schema=self.api_schema)
viewing_urls = data["viewing_urls"]
if "error" in viewing_urls:
self.logger.error("Failed to load streams: {0}", viewing_urls["error"])
else:
for url in viewing_urls["urls"]:
try:
label = "{0}p".format(url.get("res", url["label"]))
except KeyError:
label = "live"
if url["type"] == "rtmp/mp4" and RTMPStream.is_usable(self.session):
params = {
"rtmp": url["src"],
"pageUrl": self.url,
"live": True,
}
yield label, RTMPStream(self.session, params)
elif url["type"] == "application/x-mpegURL":
for s in HLSStream.parse_variant_playlist(self.session, url["src"]).items():
yield s
|
Loads a plugin from the same directory as the calling plugin.
|
def load_support_plugin(name):
"""Loads a plugin from the same directory as the calling plugin.
The path used is extracted from the last call in module scope,
therefore this must be called only from module level in the
originating plugin or the correct plugin path will not be found.
"""
# Get the path of the caller module
stack = list(filter(lambda f: f[3] == "<module>", inspect.stack()))
prev_frame = stack[0]
path = os.path.dirname(prev_frame[1])
# Major hack. If we are frozen by bbfreeze the stack trace will
# contain relative paths. We therefore use the __file__ variable
# in this module to correct it.
if not os.path.isabs(path):
prefix = os.path.normpath(__file__ + "../../../../../")
path = os.path.join(prefix, path)
return load_module(name, path)
|
Take the scheme from the current URL and applies it to the target URL if the target URL startswith// or is missing a scheme: param current: current URL: param target: target URL: return: target URL with the current URLs scheme
|
def update_scheme(current, target):
"""
Take the scheme from the current URL and applies it to the
target URL if the target URL startswith // or is missing a scheme
:param current: current URL
:param target: target URL
:return: target URL with the current URLs scheme
"""
target_p = urlparse(target)
if not target_p.scheme and target_p.netloc:
return "{0}:{1}".format(urlparse(current).scheme,
urlunparse(target_p))
elif not target_p.scheme and not target_p.netloc:
return "{0}://{1}".format(urlparse(current).scheme,
urlunparse(target_p))
else:
return target
|
Compare two URLs and return True if they are equal some parts of the URLs can be ignored: param first: URL: param second: URL: param ignore_scheme: ignore the scheme: param ignore_netloc: ignore the netloc: param ignore_path: ignore the path: param ignore_params: ignore the params: param ignore_query: ignore the query string: param ignore_fragment: ignore the fragment: return: result of comparison
|
def url_equal(first, second, ignore_scheme=False, ignore_netloc=False, ignore_path=False, ignore_params=False,
ignore_query=False, ignore_fragment=False):
"""
Compare two URLs and return True if they are equal, some parts of the URLs can be ignored
:param first: URL
:param second: URL
:param ignore_scheme: ignore the scheme
:param ignore_netloc: ignore the netloc
:param ignore_path: ignore the path
:param ignore_params: ignore the params
:param ignore_query: ignore the query string
:param ignore_fragment: ignore the fragment
:return: result of comparison
"""
# <scheme>://<netloc>/<path>;<params>?<query>#<fragment>
firstp = urlparse(first)
secondp = urlparse(second)
return ((firstp.scheme == secondp.scheme or ignore_scheme) and
(firstp.netloc == secondp.netloc or ignore_netloc) and
(firstp.path == secondp.path or ignore_path) and
(firstp.params == secondp.params or ignore_params) and
(firstp.query == secondp.query or ignore_query) and
(firstp.fragment == secondp.fragment or ignore_fragment))
|
Join extra paths to a URL does not join absolute paths: param base: the base URL: param parts: a list of the parts to join: param allow_fragments: include url fragments: return: the joined URL
|
def url_concat(base, *parts, **kwargs):
"""
Join extra paths to a URL, does not join absolute paths
:param base: the base URL
:param parts: a list of the parts to join
:param allow_fragments: include url fragments
:return: the joined URL
"""
allow_fragments = kwargs.get("allow_fragments", True)
for part in parts:
base = urljoin(base.rstrip("/") + "/", part.strip("/"), allow_fragments)
return base
|
Update or remove keys from a query string in a URL
|
def update_qsd(url, qsd=None, remove=None):
"""
Update or remove keys from a query string in a URL
:param url: URL to update
:param qsd: dict of keys to update, a None value leaves it unchanged
:param remove: list of keys to remove, or "*" to remove all
note: updated keys are never removed, even if unchanged
:return: updated URL
"""
qsd = qsd or {}
remove = remove or []
# parse current query string
parsed = urlparse(url)
current_qsd = OrderedDict(parse_qsl(parsed.query))
# * removes all possible keys
if remove == "*":
remove = list(current_qsd.keys())
# remove keys before updating, but leave updated keys untouched
for key in remove:
if key not in qsd:
del current_qsd[key]
# and update the query string
for key, value in qsd.items():
if value:
current_qsd[key] = value
return parsed._replace(query=urlencode(current_qsd)).geturl()
|
Find all the streams for the ITV url: return: Mapping of quality to stream
|
def _get_streams(self):
"""
Find all the streams for the ITV url
:return: Mapping of quality to stream
"""
self.session.http.headers.update({"User-Agent": useragents.FIREFOX})
video_info = self.video_info()
video_info_url = video_info.get("data-html5-playlist") or video_info.get("data-video-id")
res = self.session.http.post(video_info_url,
data=json.dumps(self.device_info),
headers={"hmac": video_info.get("data-video-hmac")})
data = self.session.http.json(res, schema=self._video_info_schema)
log.debug("Video ID info response: {0}".format(data))
stype = data['Playlist']['VideoType']
for media in data['Playlist']['Video']['MediaFiles']:
url = urljoin(data['Playlist']['Video']['Base'], media['Href'])
name_fmt = "{pixels}_{bitrate}" if stype == "CATCHUP" else None
for s in HLSStream.parse_variant_playlist(self.session, url, name_fmt=name_fmt).items():
yield s
|
Reads FLV tags from fd or buf and returns them with adjusted timestamps.
|
def iter_chunks(self, fd=None, buf=None, skip_header=None):
"""Reads FLV tags from fd or buf and returns them with adjusted
timestamps."""
timestamps = dict(self.timestamps_add)
tag_iterator = self.iter_tags(fd=fd, buf=buf, skip_header=skip_header)
if not self.flv_header_written:
analyzed_tags = self.analyze_tags(tag_iterator)
else:
analyzed_tags = []
for tag in chain(analyzed_tags, tag_iterator):
if not self.flv_header_written:
flv_header = Header(has_video=self.has_video,
has_audio=self.has_audio)
yield flv_header.serialize()
self.flv_header_written = True
if self.verify_tag(tag):
self.adjust_tag_gap(tag)
self.adjust_tag_timestamp(tag)
if self.duration:
norm_timestamp = tag.timestamp / 1000
if norm_timestamp > self.duration:
break
yield tag.serialize()
timestamps[tag.type] = tag.timestamp
if not self.flatten_timestamps:
self.timestamps_add = timestamps
self.tags = []
|
Find all the arguments required by name
|
def requires(self, name):
"""
Find all the arguments required by name
:param name: name of the argument the find the dependencies
:return: list of dependant arguments
"""
results = set([name])
argument = self.get(name)
for reqname in argument.requires:
required = self.get(reqname)
if not required:
raise KeyError("{0} is not a valid argument for this plugin".format(reqname))
if required.name in results:
raise RuntimeError("cycle detected in plugin argument config")
results.add(required.name)
yield required
for r in self.requires(required.name):
if r.name in results:
raise RuntimeError("cycle detected in plugin argument config")
results.add(r.name)
yield r
|
Checks if file already exists and ask the user if it should be overwritten if it does.
|
def check_file_output(filename, force):
"""Checks if file already exists and ask the user if it should
be overwritten if it does."""
log.debug("Checking file output")
if os.path.isfile(filename) and not force:
if sys.stdin.isatty():
answer = console.ask("File {0} already exists! Overwrite it? [y/N] ",
filename)
if answer.lower() != "y":
sys.exit()
else:
log.error("File {0} already exists, use --force to overwrite it.".format(filename))
sys.exit()
return FileOutput(filename)
|
Decides where to write the stream.
|
def create_output(plugin):
"""Decides where to write the stream.
Depending on arguments it can be one of these:
- The stdout pipe
- A subprocess' stdin pipe
- A named pipe that the subprocess reads from
- A regular file
"""
if (args.output or args.stdout) and (args.record or args.record_and_pipe):
console.exit("Cannot use record options with other file output options.")
if args.output:
if args.output == "-":
out = FileOutput(fd=stdout)
else:
out = check_file_output(args.output, args.force)
elif args.stdout:
out = FileOutput(fd=stdout)
elif args.record_and_pipe:
record = check_file_output(args.record_and_pipe, args.force)
out = FileOutput(fd=stdout, record=record)
else:
http = namedpipe = record = None
if not args.player:
console.exit("The default player (VLC) does not seem to be "
"installed. You must specify the path to a player "
"executable with --player.")
if args.player_fifo:
pipename = "streamlinkpipe-{0}".format(os.getpid())
log.info("Creating pipe {0}", pipename)
try:
namedpipe = NamedPipe(pipename)
except IOError as err:
console.exit("Failed to create pipe: {0}", err)
elif args.player_http:
http = create_http_server()
title = create_title(plugin)
if args.record:
record = check_file_output(args.record, args.force)
log.info("Starting player: {0}", args.player)
out = PlayerOutput(args.player, args=args.player_args,
quiet=not args.verbose_player,
kill=not args.player_no_close,
namedpipe=namedpipe, http=http,
record=record, title=title)
return out
|
Creates a HTTP server listening on a given host and port.
|
def create_http_server(host=None, port=0):
"""Creates a HTTP server listening on a given host and port.
If host is empty, listen on all available interfaces, and if port is 0,
listen on a random high port.
"""
try:
http = HTTPServer()
http.bind(host=host, port=port)
except OSError as err:
console.exit("Failed to create HTTP server: {0}", err)
return http
|
Repeatedly accept HTTP connections on a server.
|
def iter_http_requests(server, player):
"""Repeatedly accept HTTP connections on a server.
Forever if the serving externally, or while a player is running if it is not
empty.
"""
while not player or player.running:
try:
yield server.open(timeout=2.5)
except OSError:
continue
|
Continuously output the stream over HTTP.
|
def output_stream_http(plugin, initial_streams, external=False, port=0):
"""Continuously output the stream over HTTP."""
global output
if not external:
if not args.player:
console.exit("The default player (VLC) does not seem to be "
"installed. You must specify the path to a player "
"executable with --player.")
title = create_title(plugin)
server = create_http_server()
player = output = PlayerOutput(args.player, args=args.player_args,
filename=server.url,
quiet=not args.verbose_player,
title=title)
try:
log.info("Starting player: {0}", args.player)
if player:
player.open()
except OSError as err:
console.exit("Failed to start player: {0} ({1})",
args.player, err)
else:
server = create_http_server(host=None, port=port)
player = None
log.info("Starting server, access with one of:")
for url in server.urls:
log.info(" " + url)
for req in iter_http_requests(server, player):
user_agent = req.headers.get("User-Agent") or "unknown player"
log.info("Got HTTP request from {0}".format(user_agent))
stream_fd = prebuffer = None
while not stream_fd and (not player or player.running):
try:
streams = initial_streams or fetch_streams(plugin)
initial_streams = None
for stream_name in (resolve_stream_name(streams, s) for s in args.stream):
if stream_name in streams:
stream = streams[stream_name]
break
else:
log.info("Stream not available, will re-fetch "
"streams in 10 sec")
sleep(10)
continue
except PluginError as err:
log.error(u"Unable to fetch new streams: {0}", err)
continue
try:
log.info("Opening stream: {0} ({1})", stream_name,
type(stream).shortname())
stream_fd, prebuffer = open_stream(stream)
except StreamError as err:
log.error("{0}", err)
if stream_fd and prebuffer:
log.debug("Writing stream to player")
read_stream(stream_fd, server, prebuffer)
server.close(True)
player.close()
server.close()
|
Prepares a filename to be passed to the player.
|
def output_stream_passthrough(plugin, stream):
"""Prepares a filename to be passed to the player."""
global output
title = create_title(plugin)
filename = '"{0}"'.format(stream_to_url(stream))
output = PlayerOutput(args.player, args=args.player_args,
filename=filename, call=True,
quiet=not args.verbose_player,
title=title)
try:
log.info("Starting player: {0}", args.player)
output.open()
except OSError as err:
console.exit("Failed to start player: {0} ({1})", args.player, err)
return False
return True
|
Opens a stream and reads 8192 bytes from it.
|
def open_stream(stream):
"""Opens a stream and reads 8192 bytes from it.
This is useful to check if a stream actually has data
before opening the output.
"""
global stream_fd
# Attempts to open the stream
try:
stream_fd = stream.open()
except StreamError as err:
raise StreamError("Could not open stream: {0}".format(err))
# Read 8192 bytes before proceeding to check for errors.
# This is to avoid opening the output unnecessarily.
try:
log.debug("Pre-buffering 8192 bytes")
prebuffer = stream_fd.read(8192)
except IOError as err:
stream_fd.close()
raise StreamError("Failed to read data from stream: {0}".format(err))
if not prebuffer:
stream_fd.close()
raise StreamError("No data returned from stream")
return stream_fd, prebuffer
|
Open stream create output and finally write the stream to output.
|
def output_stream(plugin, stream):
"""Open stream, create output and finally write the stream to output."""
global output
success_open = False
for i in range(args.retry_open):
try:
stream_fd, prebuffer = open_stream(stream)
success_open = True
break
except StreamError as err:
log.error("Try {0}/{1}: Could not open stream {2} ({3})", i + 1, args.retry_open, stream, err)
if not success_open:
console.exit("Could not open stream {0}, tried {1} times, exiting", stream, args.retry_open)
output = create_output(plugin)
try:
output.open()
except (IOError, OSError) as err:
if isinstance(output, PlayerOutput):
console.exit("Failed to start player: {0} ({1})",
args.player, err)
else:
console.exit("Failed to open output: {0} ({1})",
args.output, err)
with closing(output):
log.debug("Writing stream to output")
read_stream(stream_fd, output, prebuffer)
return True
|
Reads data from stream and then writes it to the output.
|
def read_stream(stream, output, prebuffer, chunk_size=8192):
"""Reads data from stream and then writes it to the output."""
is_player = isinstance(output, PlayerOutput)
is_http = isinstance(output, HTTPServer)
is_fifo = is_player and output.namedpipe
show_progress = isinstance(output, FileOutput) and output.fd is not stdout and sys.stdout.isatty()
show_record_progress = hasattr(output, "record") and isinstance(output.record, FileOutput) and output.record.fd is not stdout and sys.stdout.isatty()
stream_iterator = chain(
[prebuffer],
iter(partial(stream.read, chunk_size), b"")
)
if show_progress:
stream_iterator = progress(stream_iterator,
prefix=os.path.basename(args.output))
elif show_record_progress:
stream_iterator = progress(stream_iterator,
prefix=os.path.basename(args.record))
try:
for data in stream_iterator:
# We need to check if the player process still exists when
# using named pipes on Windows since the named pipe is not
# automatically closed by the player.
if is_win32 and is_fifo:
output.player.poll()
if output.player.returncode is not None:
log.info("Player closed")
break
try:
output.write(data)
except IOError as err:
if is_player and err.errno in ACCEPTABLE_ERRNO:
log.info("Player closed")
elif is_http and err.errno in ACCEPTABLE_ERRNO:
log.info("HTTP connection closed")
else:
console.exit("Error when writing to output: {0}, exiting", err)
break
except IOError as err:
console.exit("Error when reading from stream: {0}, exiting", err)
finally:
stream.close()
log.info("Stream ended")
|
Decides what to do with the selected stream.
|
def handle_stream(plugin, streams, stream_name):
"""Decides what to do with the selected stream.
Depending on arguments it can be one of these:
- Output internal command-line
- Output JSON represenation
- Continuously output the stream over HTTP
- Output stream data to selected output
"""
stream_name = resolve_stream_name(streams, stream_name)
stream = streams[stream_name]
# Print internal command-line if this stream
# uses a subprocess.
if args.subprocess_cmdline:
if isinstance(stream, StreamProcess):
try:
cmdline = stream.cmdline()
except StreamError as err:
console.exit("{0}", err)
console.msg("{0}", cmdline)
else:
console.exit("The stream specified cannot be translated to a command")
# Print JSON representation of the stream
elif console.json:
console.msg_json(stream)
elif args.stream_url:
try:
console.msg("{0}", stream.to_url())
except TypeError:
console.exit("The stream specified cannot be translated to a URL")
# Output the stream
else:
# Find any streams with a '_alt' suffix and attempt
# to use these in case the main stream is not usable.
alt_streams = list(filter(lambda k: stream_name + "_alt" in k,
sorted(streams.keys())))
file_output = args.output or args.stdout
for stream_name in [stream_name] + alt_streams:
stream = streams[stream_name]
stream_type = type(stream).shortname()
if stream_type in args.player_passthrough and not file_output:
log.info("Opening stream: {0} ({1})", stream_name,
stream_type)
success = output_stream_passthrough(plugin, stream)
elif args.player_external_http:
return output_stream_http(plugin, streams, external=True,
port=args.player_external_http_port)
elif args.player_continuous_http and not file_output:
return output_stream_http(plugin, streams)
else:
log.info("Opening stream: {0} ({1})", stream_name,
stream_type)
success = output_stream(plugin, stream)
if success:
break
|
Fetches streams using correct parameters.
|
def fetch_streams(plugin):
"""Fetches streams using correct parameters."""
return plugin.streams(stream_types=args.stream_types,
sorting_excludes=args.stream_sorting_excludes)
|
Attempts to fetch streams repeatedly until some are returned or limit hit.
|
def fetch_streams_with_retry(plugin, interval, count):
"""Attempts to fetch streams repeatedly
until some are returned or limit hit."""
try:
streams = fetch_streams(plugin)
except PluginError as err:
log.error(u"{0}", err)
streams = None
if not streams:
log.info("Waiting for streams, retrying every {0} "
"second(s)", interval)
attempts = 0
while not streams:
sleep(interval)
try:
streams = fetch_streams(plugin)
except FatalPluginError as err:
raise
except PluginError as err:
log.error(u"{0}", err)
if count > 0:
attempts += 1
if attempts >= count:
break
return streams
|
Returns the real stream name of a synonym.
|
def resolve_stream_name(streams, stream_name):
"""Returns the real stream name of a synonym."""
if stream_name in STREAM_SYNONYMS and stream_name in streams:
for name, stream in streams.items():
if stream is streams[stream_name] and name not in STREAM_SYNONYMS:
return name
return stream_name
|
Formats a dict of streams.
|
def format_valid_streams(plugin, streams):
"""Formats a dict of streams.
Filters out synonyms and displays them next to
the stream they point to.
Streams are sorted according to their quality
(based on plugin.stream_weight).
"""
delimiter = ", "
validstreams = []
for name, stream in sorted(streams.items(),
key=lambda stream: plugin.stream_weight(stream[0])):
if name in STREAM_SYNONYMS:
continue
def synonymfilter(n):
return stream is streams[n] and n is not name
synonyms = list(filter(synonymfilter, streams.keys()))
if len(synonyms) > 0:
joined = delimiter.join(synonyms)
name = "{0} ({1})".format(name, joined)
validstreams.append(name)
return delimiter.join(validstreams)
|
The URL handler.
|
def handle_url():
"""The URL handler.
Attempts to resolve the URL to a plugin and then attempts
to fetch a list of available streams.
Proceeds to handle stream if user specified a valid one,
otherwise output list of valid streams.
"""
try:
plugin = streamlink.resolve_url(args.url)
setup_plugin_options(streamlink, plugin)
log.info("Found matching plugin {0} for URL {1}",
plugin.module, args.url)
plugin_args = []
for parg in plugin.arguments:
value = plugin.get_option(parg.dest)
if value:
plugin_args.append((parg, value))
if plugin_args:
log.debug("Plugin specific arguments:")
for parg, value in plugin_args:
log.debug(" {0}={1} ({2})".format(parg.argument_name(plugin.module),
value if not parg.sensitive else ("*" * 8),
parg.dest))
if args.retry_max or args.retry_streams:
retry_streams = 1
retry_max = 0
if args.retry_streams:
retry_streams = args.retry_streams
if args.retry_max:
retry_max = args.retry_max
streams = fetch_streams_with_retry(plugin, retry_streams,
retry_max)
else:
streams = fetch_streams(plugin)
except NoPluginError:
console.exit("No plugin can handle URL: {0}", args.url)
except PluginError as err:
console.exit(u"{0}", err)
if not streams:
console.exit("No playable streams found on this URL: {0}", args.url)
if args.default_stream and not args.stream and not args.json:
args.stream = args.default_stream
if args.stream:
validstreams = format_valid_streams(plugin, streams)
for stream_name in args.stream:
if stream_name in streams:
log.info("Available streams: {0}", validstreams)
handle_stream(plugin, streams, stream_name)
return
err = ("The specified stream(s) '{0}' could not be "
"found".format(", ".join(args.stream)))
if console.json:
console.msg_json(dict(streams=streams, plugin=plugin.module,
error=err))
else:
console.exit("{0}.\n Available streams: {1}",
err, validstreams)
else:
if console.json:
console.msg_json(dict(streams=streams, plugin=plugin.module))
else:
validstreams = format_valid_streams(plugin, streams)
console.msg("Available streams: {0}", validstreams)
|
Outputs a list of all plugins Streamlink has loaded.
|
def print_plugins():
"""Outputs a list of all plugins Streamlink has loaded."""
pluginlist = list(streamlink.get_plugins().keys())
pluginlist_formatted = ", ".join(sorted(pluginlist))
if console.json:
console.msg_json(pluginlist)
else:
console.msg("Loaded plugins: {0}", pluginlist_formatted)
|
Opens a web browser to allow the user to grant Streamlink access to their Twitch account.
|
def authenticate_twitch_oauth():
"""Opens a web browser to allow the user to grant Streamlink
access to their Twitch account."""
client_id = TWITCH_CLIENT_ID
redirect_uri = "https://streamlink.github.io/twitch_oauth.html"
url = ("https://api.twitch.tv/kraken/oauth2/authorize"
"?response_type=token"
"&client_id={0}"
"&redirect_uri={1}"
"&scope=user_read+user_subscriptions"
"&force_verify=true").format(client_id, redirect_uri)
console.msg("Attempting to open a browser to let you authenticate "
"Streamlink with Twitch")
try:
if not webbrowser.open_new_tab(url):
raise webbrowser.Error
except webbrowser.Error:
console.exit("Unable to open a web browser, try accessing this URL "
"manually instead:\n{0}".format(url))
|
Attempts to load plugins from a list of directories.
|
def load_plugins(dirs):
"""Attempts to load plugins from a list of directories."""
dirs = [os.path.expanduser(d) for d in dirs]
for directory in dirs:
if os.path.isdir(directory):
streamlink.load_plugins(directory)
else:
log.warning("Plugin path {0} does not exist or is not "
"a directory!", directory)
|
Parses arguments.
|
def setup_args(parser, config_files=[], ignore_unknown=False):
"""Parses arguments."""
global args
arglist = sys.argv[1:]
# Load arguments from config files
for config_file in filter(os.path.isfile, config_files):
arglist.insert(0, "@" + config_file)
args, unknown = parser.parse_known_args(arglist)
if unknown and not ignore_unknown:
msg = gettext('unrecognized arguments: %s')
parser.error(msg % ' '.join(unknown))
# Force lowercase to allow case-insensitive lookup
if args.stream:
args.stream = [stream.lower() for stream in args.stream]
if not args.url and args.url_param:
args.url = args.url_param
|
Console setup.
|
def setup_console(output):
"""Console setup."""
global console
# All console related operations is handled via the ConsoleOutput class
console = ConsoleOutput(output, streamlink)
console.json = args.json
# Handle SIGTERM just like SIGINT
signal.signal(signal.SIGTERM, signal.default_int_handler)
|
Sets the global HTTP settings such as proxy and headers.
|
def setup_http_session():
"""Sets the global HTTP settings, such as proxy and headers."""
if args.http_proxy:
streamlink.set_option("http-proxy", args.http_proxy)
if args.https_proxy:
streamlink.set_option("https-proxy", args.https_proxy)
if args.http_cookie:
streamlink.set_option("http-cookies", dict(args.http_cookie))
if args.http_header:
streamlink.set_option("http-headers", dict(args.http_header))
if args.http_query_param:
streamlink.set_option("http-query-params", dict(args.http_query_param))
if args.http_ignore_env:
streamlink.set_option("http-trust-env", False)
if args.http_no_ssl_verify:
streamlink.set_option("http-ssl-verify", False)
if args.http_disable_dh:
streamlink.set_option("http-disable-dh", True)
if args.http_ssl_cert:
streamlink.set_option("http-ssl-cert", args.http_ssl_cert)
if args.http_ssl_cert_crt_key:
streamlink.set_option("http-ssl-cert", tuple(args.http_ssl_cert_crt_key))
if args.http_timeout:
streamlink.set_option("http-timeout", args.http_timeout)
if args.http_cookies:
streamlink.set_option("http-cookies", args.http_cookies)
if args.http_headers:
streamlink.set_option("http-headers", args.http_headers)
if args.http_query_params:
streamlink.set_option("http-query-params", args.http_query_params)
|
Loads any additional plugins.
|
def setup_plugins(extra_plugin_dir=None):
"""Loads any additional plugins."""
if os.path.isdir(PLUGINS_DIR):
load_plugins([PLUGINS_DIR])
if extra_plugin_dir:
load_plugins(extra_plugin_dir)
|
Sets Streamlink options.
|
def setup_options():
"""Sets Streamlink options."""
if args.hls_live_edge:
streamlink.set_option("hls-live-edge", args.hls_live_edge)
if args.hls_segment_attempts:
streamlink.set_option("hls-segment-attempts", args.hls_segment_attempts)
if args.hls_playlist_reload_attempts:
streamlink.set_option("hls-playlist-reload-attempts", args.hls_playlist_reload_attempts)
if args.hls_segment_threads:
streamlink.set_option("hls-segment-threads", args.hls_segment_threads)
if args.hls_segment_timeout:
streamlink.set_option("hls-segment-timeout", args.hls_segment_timeout)
if args.hls_segment_ignore_names:
streamlink.set_option("hls-segment-ignore-names", args.hls_segment_ignore_names)
if args.hls_segment_key_uri:
streamlink.set_option("hls-segment-key-uri", args.hls_segment_key_uri)
if args.hls_timeout:
streamlink.set_option("hls-timeout", args.hls_timeout)
if args.hls_audio_select:
streamlink.set_option("hls-audio-select", args.hls_audio_select)
if args.hls_start_offset:
streamlink.set_option("hls-start-offset", args.hls_start_offset)
if args.hls_duration:
streamlink.set_option("hls-duration", args.hls_duration)
if args.hls_live_restart:
streamlink.set_option("hls-live-restart", args.hls_live_restart)
if args.hds_live_edge:
streamlink.set_option("hds-live-edge", args.hds_live_edge)
if args.hds_segment_attempts:
streamlink.set_option("hds-segment-attempts", args.hds_segment_attempts)
if args.hds_segment_threads:
streamlink.set_option("hds-segment-threads", args.hds_segment_threads)
if args.hds_segment_timeout:
streamlink.set_option("hds-segment-timeout", args.hds_segment_timeout)
if args.hds_timeout:
streamlink.set_option("hds-timeout", args.hds_timeout)
if args.http_stream_timeout:
streamlink.set_option("http-stream-timeout", args.http_stream_timeout)
if args.ringbuffer_size:
streamlink.set_option("ringbuffer-size", args.ringbuffer_size)
if args.rtmp_proxy:
streamlink.set_option("rtmp-proxy", args.rtmp_proxy)
if args.rtmp_rtmpdump:
streamlink.set_option("rtmp-rtmpdump", args.rtmp_rtmpdump)
if args.rtmp_timeout:
streamlink.set_option("rtmp-timeout", args.rtmp_timeout)
if args.stream_segment_attempts:
streamlink.set_option("stream-segment-attempts", args.stream_segment_attempts)
if args.stream_segment_threads:
streamlink.set_option("stream-segment-threads", args.stream_segment_threads)
if args.stream_segment_timeout:
streamlink.set_option("stream-segment-timeout", args.stream_segment_timeout)
if args.stream_timeout:
streamlink.set_option("stream-timeout", args.stream_timeout)
if args.ffmpeg_ffmpeg:
streamlink.set_option("ffmpeg-ffmpeg", args.ffmpeg_ffmpeg)
if args.ffmpeg_verbose:
streamlink.set_option("ffmpeg-verbose", args.ffmpeg_verbose)
if args.ffmpeg_verbose_path:
streamlink.set_option("ffmpeg-verbose-path", args.ffmpeg_verbose_path)
if args.ffmpeg_video_transcode:
streamlink.set_option("ffmpeg-video-transcode", args.ffmpeg_video_transcode)
if args.ffmpeg_audio_transcode:
streamlink.set_option("ffmpeg-audio-transcode", args.ffmpeg_audio_transcode)
streamlink.set_option("subprocess-errorlog", args.subprocess_errorlog)
streamlink.set_option("subprocess-errorlog-path", args.subprocess_errorlog_path)
streamlink.set_option("locale", args.locale)
|
Sets Streamlink plugin options.
|
def setup_plugin_args(session, parser):
"""Sets Streamlink plugin options."""
plugin_args = parser.add_argument_group("Plugin options")
for pname, plugin in session.plugins.items():
defaults = {}
for parg in plugin.arguments:
plugin_args.add_argument(parg.argument_name(pname), **parg.options)
defaults[parg.dest] = parg.default
plugin.options = PluginOptions(defaults)
|
Sets Streamlink plugin options.
|
def setup_plugin_options(session, plugin):
"""Sets Streamlink plugin options."""
pname = plugin.module
required = OrderedDict({})
for parg in plugin.arguments:
if parg.options.get("help") != argparse.SUPPRESS:
if parg.required:
required[parg.name] = parg
value = getattr(args, parg.namespace_dest(pname))
session.set_plugin_option(pname, parg.dest, value)
# if the value is set, check to see if any of the required arguments are not set
if parg.required or value:
try:
for rparg in plugin.arguments.requires(parg.name):
required[rparg.name] = rparg
except RuntimeError:
console.logger.error("{0} plugin has a configuration error and the arguments "
"cannot be parsed".format(pname))
break
if required:
for req in required.values():
if not session.get_plugin_option(pname, req.dest):
prompt = req.prompt or "Enter {0} {1}".format(pname, req.name)
session.set_plugin_option(pname, req.dest,
console.askpass(prompt + ": ")
if req.sensitive else
console.ask(prompt + ": "))
|
Show current installed versions
|
def log_current_versions():
"""Show current installed versions"""
if logger.root.isEnabledFor(logging.DEBUG):
# MAC OS X
if sys.platform == "darwin":
os_version = "macOS {0}".format(platform.mac_ver()[0])
# Windows
elif sys.platform.startswith("win"):
os_version = "{0} {1}".format(platform.system(), platform.release())
# linux / other
else:
os_version = platform.platform()
log.debug("OS: {0}".format(os_version))
log.debug("Python: {0}".format(platform.python_version()))
log.debug("Streamlink: {0}".format(streamlink_version))
log.debug("Requests({0}), Socks({1}), Websocket({2})".format(
requests.__version__, socks_version, websocket_version))
|
Try to find a stream_id
|
def _get_stream_id(self, text):
"""Try to find a stream_id"""
m = self._image_re.search(text)
if m:
return m.group("stream_id")
|
Fallback if no stream_id was found before
|
def _get_iframe(self, text):
"""Fallback if no stream_id was found before"""
m = self._iframe_re.search(text)
if m:
return self.session.streams(m.group("url"))
|
Sets general options used by plugins and streams originating from this session object.
|
def set_option(self, key, value):
"""Sets general options used by plugins and streams originating
from this session object.
:param key: key of the option
:param value: value to set the option to
**Available options**:
======================== =========================================
hds-live-edge ( float) Specify the time live HDS
streams will start from the edge of
stream, default: ``10.0``
hds-segment-attempts (int) How many attempts should be done
to download each HDS segment, default: ``3``
hds-segment-threads (int) The size of the thread pool used
to download segments, default: ``1``
hds-segment-timeout (float) HDS segment connect and read
timeout, default: ``10.0``
hds-timeout (float) Timeout for reading data from
HDS streams, default: ``60.0``
hls-live-edge (int) How many segments from the end
to start live streams on, default: ``3``
hls-segment-attempts (int) How many attempts should be done
to download each HLS segment, default: ``3``
hls-segment-threads (int) The size of the thread pool used
to download segments, default: ``1``
hls-segment-timeout (float) HLS segment connect and read
timeout, default: ``10.0``
hls-timeout (float) Timeout for reading data from
HLS streams, default: ``60.0``
http-proxy (str) Specify a HTTP proxy to use for
all HTTP requests
https-proxy (str) Specify a HTTPS proxy to use for
all HTTPS requests
http-cookies (dict or str) A dict or a semi-colon (;)
delimited str of cookies to add to each
HTTP request, e.g. ``foo=bar;baz=qux``
http-headers (dict or str) A dict or semi-colon (;)
delimited str of headers to add to each
HTTP request, e.g. ``foo=bar;baz=qux``
http-query-params (dict or str) A dict or a ampersand (&)
delimited string of query parameters to
add to each HTTP request,
e.g. ``foo=bar&baz=qux``
http-trust-env (bool) Trust HTTP settings set in the
environment, such as environment
variables (HTTP_PROXY, etc) and
~/.netrc authentication
http-ssl-verify (bool) Verify SSL certificates,
default: ``True``
http-ssl-cert (str or tuple) SSL certificate to use,
can be either a .pem file (str) or a
.crt/.key pair (tuple)
http-timeout (float) General timeout used by all HTTP
requests except the ones covered by
other options, default: ``20.0``
http-stream-timeout (float) Timeout for reading data from
HTTP streams, default: ``60.0``
subprocess-errorlog (bool) Log errors from subprocesses to
a file located in the temp directory
subprocess-errorlog-path (str) Log errors from subprocesses to
a specific file
ringbuffer-size (int) The size of the internal ring
buffer used by most stream types,
default: ``16777216`` (16MB)
rtmp-proxy (str) Specify a proxy (SOCKS) that RTMP
streams will use
rtmp-rtmpdump (str) Specify the location of the
rtmpdump executable used by RTMP streams,
e.g. ``/usr/local/bin/rtmpdump``
rtmp-timeout (float) Timeout for reading data from
RTMP streams, default: ``60.0``
ffmpeg-ffmpeg (str) Specify the location of the
ffmpeg executable use by Muxing streams
e.g. ``/usr/local/bin/ffmpeg``
ffmpeg-verbose (bool) Log stderr from ffmpeg to the
console
ffmpeg-verbose-path (str) Specify the location of the
ffmpeg stderr log file
ffmpeg-video-transcode (str) The codec to use if transcoding
video when muxing with ffmpeg
e.g. ``h264``
ffmpeg-audio-transcode (str) The codec to use if transcoding
audio when muxing with ffmpeg
e.g. ``aac``
stream-segment-attempts (int) How many attempts should be done
to download each segment, default: ``3``.
General option used by streams not
covered by other options.
stream-segment-threads (int) The size of the thread pool used
to download segments, default: ``1``.
General option used by streams not
covered by other options.
stream-segment-timeout (float) Segment connect and read
timeout, default: ``10.0``.
General option used by streams not
covered by other options.
stream-timeout (float) Timeout for reading data from
stream, default: ``60.0``.
General option used by streams not
covered by other options.
locale (str) Locale setting, in the RFC 1766 format
eg. en_US or es_ES
default: ``system locale``.
user-input-requester (UserInputRequester) instance of UserInputRequester
to collect input from the user at runtime. Must be
set before the plugins are loaded.
default: ``UserInputRequester``.
======================== =========================================
"""
# Backwards compatibility
if key == "rtmpdump":
key = "rtmp-rtmpdump"
elif key == "rtmpdump-proxy":
key = "rtmp-proxy"
elif key == "errorlog":
key = "subprocess-errorlog"
elif key == "errorlog-path":
key = "subprocess-errorlog-path"
if key == "http-proxy":
self.http.proxies["http"] = update_scheme("http://", value)
elif key == "https-proxy":
self.http.proxies["https"] = update_scheme("https://", value)
elif key == "http-cookies":
if isinstance(value, dict):
self.http.cookies.update(value)
else:
self.http.parse_cookies(value)
elif key == "http-headers":
if isinstance(value, dict):
self.http.headers.update(value)
else:
self.http.parse_headers(value)
elif key == "http-query-params":
if isinstance(value, dict):
self.http.params.update(value)
else:
self.http.parse_query_params(value)
elif key == "http-trust-env":
self.http.trust_env = value
elif key == "http-ssl-verify":
self.http.verify = value
elif key == "http-disable-dh":
if value:
requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS += ':!DH'
try:
requests.packages.urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST = \
requests.packages.urllib3.util.ssl_.DEFAULT_CIPHERS.encode("ascii")
except AttributeError:
# no ssl to disable the cipher on
pass
elif key == "http-ssl-cert":
self.http.cert = value
elif key == "http-timeout":
self.http.timeout = value
else:
self.options.set(key, value)
|
Returns current value of specified option.
|
def get_option(self, key):
"""Returns current value of specified option.
:param key: key of the option
"""
# Backwards compatibility
if key == "rtmpdump":
key = "rtmp-rtmpdump"
elif key == "rtmpdump-proxy":
key = "rtmp-proxy"
elif key == "errorlog":
key = "subprocess-errorlog"
if key == "http-proxy":
return self.http.proxies.get("http")
elif key == "https-proxy":
return self.http.proxies.get("https")
elif key == "http-cookies":
return self.http.cookies
elif key == "http-headers":
return self.http.headers
elif key == "http-query-params":
return self.http.params
elif key == "http-trust-env":
return self.http.trust_env
elif key == "http-ssl-verify":
return self.http.verify
elif key == "http-ssl-cert":
return self.http.cert
elif key == "http-timeout":
return self.http.timeout
else:
return self.options.get(key)
|
Sets plugin specific options used by plugins originating from this session object.
|
def set_plugin_option(self, plugin, key, value):
"""Sets plugin specific options used by plugins originating
from this session object.
:param plugin: name of the plugin
:param key: key of the option
:param value: value to set the option to
"""
if plugin in self.plugins:
plugin = self.plugins[plugin]
plugin.set_option(key, value)
|
Returns current value of plugin specific option.
|
def get_plugin_option(self, plugin, key):
"""Returns current value of plugin specific option.
:param plugin: name of the plugin
:param key: key of the option
"""
if plugin in self.plugins:
plugin = self.plugins[plugin]
return plugin.get_option(key)
|
Attempts to find a plugin that can use this URL.
|
def resolve_url(self, url, follow_redirect=True):
"""Attempts to find a plugin that can use this URL.
The default protocol (http) will be prefixed to the URL if
not specified.
Raises :exc:`NoPluginError` on failure.
:param url: a URL to match against loaded plugins
:param follow_redirect: follow redirects
"""
url = update_scheme("http://", url)
available_plugins = []
for name, plugin in self.plugins.items():
if plugin.can_handle_url(url):
available_plugins.append(plugin)
available_plugins.sort(key=lambda x: x.priority(url), reverse=True)
if available_plugins:
return available_plugins[0](url)
if follow_redirect:
# Attempt to handle a redirect URL
try:
res = self.http.head(url, allow_redirects=True, acceptable_status=[501])
# Fall back to GET request if server doesn't handle HEAD.
if res.status_code == 501:
res = self.http.get(url, stream=True)
if res.url != url:
return self.resolve_url(res.url, follow_redirect=follow_redirect)
except PluginError:
pass
raise NoPluginError
|
Attempts to find a plugin and extract streams from the * url *.
|
def streams(self, url, **params):
"""Attempts to find a plugin and extract streams from the *url*.
*params* are passed to :func:`Plugin.streams`.
Raises :exc:`NoPluginError` if no plugin is found.
"""
plugin = self.resolve_url(url)
return plugin.streams(**params)
|
Attempt to load plugins from the path specified.
|
def load_plugins(self, path):
"""Attempt to load plugins from the path specified.
:param path: full path to a directory where to look for plugins
"""
for loader, name, ispkg in pkgutil.iter_modules([path]):
file, pathname, desc = imp.find_module(name, [path])
# set the full plugin module name
module_name = "streamlink.plugin.{0}".format(name)
try:
self.load_plugin(module_name, file, pathname, desc)
except Exception:
sys.stderr.write("Failed to load plugin {0}:\n".format(name))
print_small_exception("load_plugin")
continue
|
Get the VOD data path and the default VOD ID: return:
|
def vod_data(self, vid=None):
"""
Get the VOD data path and the default VOD ID
:return:
"""
page = self.session.http.get(self.url)
m = self._vod_re.search(page.text)
vod_data_url = m and urljoin(self.url, m.group(0))
if vod_data_url:
self.logger.debug("Found VOD data url: {0}", vod_data_url)
res = self.session.http.get(vod_data_url)
return self.session.http.json(res)
|
converts a timestamp to seconds
|
def hours_minutes_seconds(value):
"""converts a timestamp to seconds
- hours:minutes:seconds to seconds
- minutes:seconds to seconds
- 11h22m33s to seconds
- 11h to seconds
- 20h15m to seconds
- seconds to seconds
:param value: hh:mm:ss ; 00h00m00s ; seconds
:return: seconds
"""
try:
return int(value)
except ValueError:
pass
match = (_hours_minutes_seconds_re.match(value)
or _hours_minutes_seconds_2_re.match(value))
if not match:
raise ValueError
s = 0
s += int(match.group("hours") or "0") * 60 * 60
s += int(match.group("minutes") or "0") * 60
s += int(match.group("seconds") or "0")
return s
|
Checks value for minimum length using len ().
|
def length(length):
"""Checks value for minimum length using len()."""
def min_len(value):
if not len(value) >= length:
raise ValueError(
"Minimum length is {0} but value is {1}".format(length, len(value))
)
return True
return min_len
|
Checks if the string value starts with another string.
|
def startswith(string):
"""Checks if the string value starts with another string."""
def starts_with(value):
validate(text, value)
if not value.startswith(string):
raise ValueError("'{0}' does not start with '{1}'".format(value, string))
return True
return starts_with
|
Checks if the string value ends with another string.
|
def endswith(string):
"""Checks if the string value ends with another string."""
def ends_with(value):
validate(text, value)
if not value.endswith(string):
raise ValueError("'{0}' does not end with '{1}'".format(value, string))
return True
return ends_with
|
Checks if the string value contains another string.
|
def contains(string):
"""Checks if the string value contains another string."""
def contains_str(value):
validate(text, value)
if string not in value:
raise ValueError("'{0}' does not contain '{1}'".format(value, string))
return True
return contains_str
|
Get item from value ( value [ item ] ).
|
def get(item, default=None):
"""Get item from value (value[item]).
If the item is not found, return the default.
Handles XML elements, regex matches and anything that has __getitem__.
"""
def getter(value):
if ET.iselement(value):
value = value.attrib
try:
# Use .group() if this is a regex match object
if _is_re_match(value):
return value.group(item)
else:
return value[item]
except (KeyError, IndexError):
return default
except (TypeError, AttributeError) as err:
raise ValueError(err)
return transform(getter)
|
Get a named attribute from an object.
|
def getattr(attr, default=None):
"""Get a named attribute from an object.
When a default argument is given, it is returned when the attribute
doesn't exist.
"""
def getter(value):
return _getattr(value, attr, default)
return transform(getter)
|
Filters out unwanted items using the specified function.
|
def filter(func):
"""Filters out unwanted items using the specified function.
Supports both dicts and sequences, key/value pairs are
expanded when applied to a dict.
"""
def expand_kv(kv):
return func(*kv)
def filter_values(value):
cls = type(value)
if isinstance(value, dict):
return cls(_filter(expand_kv, value.items()))
else:
return cls(_filter(func, value))
return transform(filter_values)
|
Apply function to each value inside the sequence or dict.
|
def map(func):
"""Apply function to each value inside the sequence or dict.
Supports both dicts and sequences, key/value pairs are
expanded when applied to a dict.
"""
# text is an alias for basestring on Python 2, which cannot be
# instantiated and therefore can't be used to transform the value,
# so we force to unicode instead.
if is_py2 and text == func:
func = unicode
def expand_kv(kv):
return func(*kv)
def map_values(value):
cls = type(value)
if isinstance(value, dict):
return cls(_map(expand_kv, value.items()))
else:
return cls(_map(func, value))
return transform(map_values)
|
Parses an URL and validates its attributes.
|
def url(**attributes):
"""Parses an URL and validates its attributes."""
def check_url(value):
validate(text, value)
parsed = urlparse(value)
if not parsed.netloc:
raise ValueError("'{0}' is not a valid URL".format(value))
for name, schema in attributes.items():
if not _hasattr(parsed, name):
raise ValueError("Invalid URL attribute '{0}'".format(name))
try:
validate(schema, _getattr(parsed, name))
except ValueError as err:
raise ValueError(
"Unable to validate URL attribute '{0}': {1}".format(
name, err
)
)
return True
# Convert "http" to be either any("http", "https") for convenience
if attributes.get("scheme") == "http":
attributes["scheme"] = any("http", "https")
return check_url
|
Find a XML element via xpath.
|
def xml_find(xpath):
"""Find a XML element via xpath."""
def xpath_find(value):
validate(ET.iselement, value)
value = value.find(xpath)
if value is None:
raise ValueError("XPath '{0}' did not return an element".format(xpath))
return validate(ET.iselement, value)
return transform(xpath_find)
|
Find a list of XML elements via xpath.
|
def xml_findall(xpath):
"""Find a list of XML elements via xpath."""
def xpath_findall(value):
validate(ET.iselement, value)
return value.findall(xpath)
return transform(xpath_findall)
|
Finds playlist info ( type id ) in HTTP response.
|
def _find_playlist_info(response):
"""
Finds playlist info (type, id) in HTTP response.
:param response: Response object.
:returns: Dictionary with type and id.
"""
values = {}
matches = _playlist_info_re.search(response.text)
if matches:
values['type'] = matches.group(1)
values['id'] = matches.group(2)
return values
|
Finds embedded player url in HTTP response.
|
def _find_player_url(response):
"""
Finds embedded player url in HTTP response.
:param response: Response object.
:returns: Player url (str).
"""
url = ''
matches = _player_re.search(response.text)
if matches:
tmp_url = matches.group(0).replace('&', '&')
if 'hash' not in tmp_url:
# there's no hash in the URL, try to find it
matches = _hash_re.search(response.text)
if matches:
url = tmp_url + '&hash=' + matches.group(1)
else:
url = tmp_url
return 'http://ceskatelevize.cz/' + url
|
Find the VOD video url: return: video url
|
def _get_vod_stream(self):
"""
Find the VOD video url
:return: video url
"""
res = self.session.http.get(self.url)
video_urls = self._re_vod.findall(res.text)
if len(video_urls):
return dict(vod=HTTPStream(self.session, video_urls[0]))
|
Get the live stream in a particular language: param match:: return:
|
def _get_live_streams(self, match):
"""
Get the live stream in a particular language
:param match:
:return:
"""
live_url = self._live_api_url.format(match.get("subdomain"))
live_res = self.session.http.json(self.session.http.get(live_url), schema=self._live_schema)
api_url = update_scheme("{0}:///".format(match.get("scheme")), live_res["url"])
api_res = self.session.http.json(self.session.http.get(api_url), schema=self._stream_api_schema)
return HLSStream.parse_variant_playlist(self.session, api_res["primary"])
|
Find the streams for euronews: return:
|
def _get_streams(self):
"""
Find the streams for euronews
:return:
"""
match = self._url_re.match(self.url).groupdict()
if match.get("path") == "live":
return self._get_live_streams(match)
else:
return self._get_vod_stream()
|
Attempts to parse a M3U8 playlist from a string of data.
|
def load(data, base_uri=None, parser=M3U8Parser, **kwargs):
"""Attempts to parse a M3U8 playlist from a string of data.
If specified, *base_uri* is the base URI that relative URIs will
be joined together with, otherwise relative URIs will be as is.
If specified, *parser* can be a M3U8Parser subclass to be used
to parse the data.
"""
return parser(base_uri, **kwargs).parse(data)
|
Check if the current player supports adding a title
|
def supported_player(cls, cmd):
"""
Check if the current player supports adding a title
:param cmd: command to test
:return: name of the player|None
"""
if not is_win32:
# under a POSIX system use shlex to find the actual command
# under windows this is not an issue because executables end in .exe
cmd = shlex.split(cmd)[0]
cmd = os.path.basename(cmd.lower())
for player, possiblecmds in SUPPORTED_PLAYERS.items():
for possiblecmd in possiblecmds:
if cmd.startswith(possiblecmd):
return player
|
Login to the schoolism account and return the users account: param email: ( str ) email for account: param password: ( str ) password for account: return: ( str ) users email
|
def login(self, email, password):
"""
Login to the schoolism account and return the users account
:param email: (str) email for account
:param password: (str) password for account
:return: (str) users email
"""
if self.options.get("email") and self.options.get("password"):
res = self.session.http.post(self.login_url, data={"email": email,
"password": password,
"redirect": None,
"submit": "Login"})
if res.cookies.get("password") and res.cookies.get("email"):
return res.cookies.get("email")
else:
log.error("Failed to login to Schoolism, incorrect email/password combination")
else:
log.error("An email and password are required to access Schoolism streams")
|
Get the livestream videoid from a username. https:// developer. dailymotion. com/ tools/ apiexplorer#/ user/ videos/ list
|
def get_live_id(self, username):
"""Get the livestream videoid from a username.
https://developer.dailymotion.com/tools/apiexplorer#/user/videos/list
"""
params = {
"flags": "live_onair"
}
api_user_videos = USER_INFO_URL.format(username) + "/videos"
try:
res = self.session.http.get(api_user_videos.format(username),
params=params)
except Exception as e:
self.logger.error("invalid username")
raise NoStreamsError(self.url)
data = self.session.http.json(res, schema=_live_id_schema)
if data["total"] > 0:
media_id = data["list"][0]["id"]
return media_id
return False
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.