partition
stringclasses 3
values | func_name
stringlengths 1
134
| docstring
stringlengths 1
46.9k
| path
stringlengths 4
223
| original_string
stringlengths 75
104k
| code
stringlengths 75
104k
| docstring_tokens
listlengths 1
1.97k
| repo
stringlengths 7
55
| language
stringclasses 1
value | url
stringlengths 87
315
| code_tokens
listlengths 19
28.4k
| sha
stringlengths 40
40
|
|---|---|---|---|---|---|---|---|---|---|---|---|
test
|
WikiText.parameters
|
Return a list of parameter objects.
|
wikitextparser/_wikitext.py
|
def parameters(self) -> List['Parameter']:
"""Return a list of parameter objects."""
_lststr = self._lststr
_type_to_spans = self._type_to_spans
return [
Parameter(_lststr, _type_to_spans, span, 'Parameter')
for span in self._subspans('Parameter')]
|
def parameters(self) -> List['Parameter']:
"""Return a list of parameter objects."""
_lststr = self._lststr
_type_to_spans = self._type_to_spans
return [
Parameter(_lststr, _type_to_spans, span, 'Parameter')
for span in self._subspans('Parameter')]
|
[
"Return",
"a",
"list",
"of",
"parameter",
"objects",
"."
] |
5j9/wikitextparser
|
python
|
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_wikitext.py#L687-L693
|
[
"def",
"parameters",
"(",
"self",
")",
"->",
"List",
"[",
"'Parameter'",
"]",
":",
"_lststr",
"=",
"self",
".",
"_lststr",
"_type_to_spans",
"=",
"self",
".",
"_type_to_spans",
"return",
"[",
"Parameter",
"(",
"_lststr",
",",
"_type_to_spans",
",",
"span",
",",
"'Parameter'",
")",
"for",
"span",
"in",
"self",
".",
"_subspans",
"(",
"'Parameter'",
")",
"]"
] |
1347425814361d7955342c53212edbb27f0ff4b5
|
test
|
WikiText.parser_functions
|
Return a list of parser function objects.
|
wikitextparser/_wikitext.py
|
def parser_functions(self) -> List['ParserFunction']:
"""Return a list of parser function objects."""
_lststr = self._lststr
_type_to_spans = self._type_to_spans
return [
ParserFunction(_lststr, _type_to_spans, span, 'ParserFunction')
for span in self._subspans('ParserFunction')]
|
def parser_functions(self) -> List['ParserFunction']:
"""Return a list of parser function objects."""
_lststr = self._lststr
_type_to_spans = self._type_to_spans
return [
ParserFunction(_lststr, _type_to_spans, span, 'ParserFunction')
for span in self._subspans('ParserFunction')]
|
[
"Return",
"a",
"list",
"of",
"parser",
"function",
"objects",
"."
] |
5j9/wikitextparser
|
python
|
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_wikitext.py#L696-L702
|
[
"def",
"parser_functions",
"(",
"self",
")",
"->",
"List",
"[",
"'ParserFunction'",
"]",
":",
"_lststr",
"=",
"self",
".",
"_lststr",
"_type_to_spans",
"=",
"self",
".",
"_type_to_spans",
"return",
"[",
"ParserFunction",
"(",
"_lststr",
",",
"_type_to_spans",
",",
"span",
",",
"'ParserFunction'",
")",
"for",
"span",
"in",
"self",
".",
"_subspans",
"(",
"'ParserFunction'",
")",
"]"
] |
1347425814361d7955342c53212edbb27f0ff4b5
|
test
|
WikiText.templates
|
Return a list of templates as template objects.
|
wikitextparser/_wikitext.py
|
def templates(self) -> List['Template']:
"""Return a list of templates as template objects."""
_lststr = self._lststr
_type_to_spans = self._type_to_spans
return [
Template(_lststr, _type_to_spans, span, 'Template')
for span in self._subspans('Template')]
|
def templates(self) -> List['Template']:
"""Return a list of templates as template objects."""
_lststr = self._lststr
_type_to_spans = self._type_to_spans
return [
Template(_lststr, _type_to_spans, span, 'Template')
for span in self._subspans('Template')]
|
[
"Return",
"a",
"list",
"of",
"templates",
"as",
"template",
"objects",
"."
] |
5j9/wikitextparser
|
python
|
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_wikitext.py#L705-L711
|
[
"def",
"templates",
"(",
"self",
")",
"->",
"List",
"[",
"'Template'",
"]",
":",
"_lststr",
"=",
"self",
".",
"_lststr",
"_type_to_spans",
"=",
"self",
".",
"_type_to_spans",
"return",
"[",
"Template",
"(",
"_lststr",
",",
"_type_to_spans",
",",
"span",
",",
"'Template'",
")",
"for",
"span",
"in",
"self",
".",
"_subspans",
"(",
"'Template'",
")",
"]"
] |
1347425814361d7955342c53212edbb27f0ff4b5
|
test
|
WikiText.wikilinks
|
Return a list of wikilink objects.
|
wikitextparser/_wikitext.py
|
def wikilinks(self) -> List['WikiLink']:
"""Return a list of wikilink objects."""
_lststr = self._lststr
_type_to_spans = self._type_to_spans
return [
WikiLink(_lststr, _type_to_spans, span, 'WikiLink')
for span in self._subspans('WikiLink')]
|
def wikilinks(self) -> List['WikiLink']:
"""Return a list of wikilink objects."""
_lststr = self._lststr
_type_to_spans = self._type_to_spans
return [
WikiLink(_lststr, _type_to_spans, span, 'WikiLink')
for span in self._subspans('WikiLink')]
|
[
"Return",
"a",
"list",
"of",
"wikilink",
"objects",
"."
] |
5j9/wikitextparser
|
python
|
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_wikitext.py#L714-L720
|
[
"def",
"wikilinks",
"(",
"self",
")",
"->",
"List",
"[",
"'WikiLink'",
"]",
":",
"_lststr",
"=",
"self",
".",
"_lststr",
"_type_to_spans",
"=",
"self",
".",
"_type_to_spans",
"return",
"[",
"WikiLink",
"(",
"_lststr",
",",
"_type_to_spans",
",",
"span",
",",
"'WikiLink'",
")",
"for",
"span",
"in",
"self",
".",
"_subspans",
"(",
"'WikiLink'",
")",
"]"
] |
1347425814361d7955342c53212edbb27f0ff4b5
|
test
|
WikiText.comments
|
Return a list of comment objects.
|
wikitextparser/_wikitext.py
|
def comments(self) -> List['Comment']:
"""Return a list of comment objects."""
_lststr = self._lststr
_type_to_spans = self._type_to_spans
return [
Comment(_lststr, _type_to_spans, span, 'Comment')
for span in self._subspans('Comment')]
|
def comments(self) -> List['Comment']:
"""Return a list of comment objects."""
_lststr = self._lststr
_type_to_spans = self._type_to_spans
return [
Comment(_lststr, _type_to_spans, span, 'Comment')
for span in self._subspans('Comment')]
|
[
"Return",
"a",
"list",
"of",
"comment",
"objects",
"."
] |
5j9/wikitextparser
|
python
|
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_wikitext.py#L723-L729
|
[
"def",
"comments",
"(",
"self",
")",
"->",
"List",
"[",
"'Comment'",
"]",
":",
"_lststr",
"=",
"self",
".",
"_lststr",
"_type_to_spans",
"=",
"self",
".",
"_type_to_spans",
"return",
"[",
"Comment",
"(",
"_lststr",
",",
"_type_to_spans",
",",
"span",
",",
"'Comment'",
")",
"for",
"span",
"in",
"self",
".",
"_subspans",
"(",
"'Comment'",
")",
"]"
] |
1347425814361d7955342c53212edbb27f0ff4b5
|
test
|
WikiText.external_links
|
Return a list of found external link objects.
Note:
Templates adjacent to external links are considered part of the
link. In reality, this depends on the contents of the template:
>>> WikiText(
... 'http://example.com{{dead link}}'
...).external_links[0].url
'http://example.com{{dead link}}'
>>> WikiText(
... '[http://example.com{{space template}} text]'
...).external_links[0].url
'http://example.com{{space template}}'
|
wikitextparser/_wikitext.py
|
def external_links(self) -> List['ExternalLink']:
"""Return a list of found external link objects.
Note:
Templates adjacent to external links are considered part of the
link. In reality, this depends on the contents of the template:
>>> WikiText(
... 'http://example.com{{dead link}}'
...).external_links[0].url
'http://example.com{{dead link}}'
>>> WikiText(
... '[http://example.com{{space template}} text]'
...).external_links[0].url
'http://example.com{{space template}}'
"""
external_links = [] # type: List['ExternalLink']
external_links_append = external_links.append
type_to_spans = self._type_to_spans
lststr = self._lststr
ss, se = self._span
spans = type_to_spans.setdefault('ExternalLink', [])
if not spans:
# All the added spans will be new.
spans_append = spans.append
for m in EXTERNAL_LINK_FINDITER(self._ext_link_shadow):
s, e = m.span()
span = [ss + s, ss + e]
spans_append(span)
external_links_append(
ExternalLink(lststr, type_to_spans, span, 'ExternalLink'))
return external_links
# There are already some ExternalLink spans. Use the already existing
# ones when the detected span is one of those.
span_tuple_to_span_get = {(s[0], s[1]): s for s in spans}.get
for m in EXTERNAL_LINK_FINDITER(self._ext_link_shadow):
s, e = m.span()
span = s, e = [s + ss, e + ss]
old_span = span_tuple_to_span_get((s, e))
if old_span is None:
insort(spans, span)
else:
span = old_span
external_links_append(
ExternalLink(lststr, type_to_spans, span, 'ExternalLink'))
return external_links
|
def external_links(self) -> List['ExternalLink']:
"""Return a list of found external link objects.
Note:
Templates adjacent to external links are considered part of the
link. In reality, this depends on the contents of the template:
>>> WikiText(
... 'http://example.com{{dead link}}'
...).external_links[0].url
'http://example.com{{dead link}}'
>>> WikiText(
... '[http://example.com{{space template}} text]'
...).external_links[0].url
'http://example.com{{space template}}'
"""
external_links = [] # type: List['ExternalLink']
external_links_append = external_links.append
type_to_spans = self._type_to_spans
lststr = self._lststr
ss, se = self._span
spans = type_to_spans.setdefault('ExternalLink', [])
if not spans:
# All the added spans will be new.
spans_append = spans.append
for m in EXTERNAL_LINK_FINDITER(self._ext_link_shadow):
s, e = m.span()
span = [ss + s, ss + e]
spans_append(span)
external_links_append(
ExternalLink(lststr, type_to_spans, span, 'ExternalLink'))
return external_links
# There are already some ExternalLink spans. Use the already existing
# ones when the detected span is one of those.
span_tuple_to_span_get = {(s[0], s[1]): s for s in spans}.get
for m in EXTERNAL_LINK_FINDITER(self._ext_link_shadow):
s, e = m.span()
span = s, e = [s + ss, e + ss]
old_span = span_tuple_to_span_get((s, e))
if old_span is None:
insort(spans, span)
else:
span = old_span
external_links_append(
ExternalLink(lststr, type_to_spans, span, 'ExternalLink'))
return external_links
|
[
"Return",
"a",
"list",
"of",
"found",
"external",
"link",
"objects",
"."
] |
5j9/wikitextparser
|
python
|
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_wikitext.py#L732-L778
|
[
"def",
"external_links",
"(",
"self",
")",
"->",
"List",
"[",
"'ExternalLink'",
"]",
":",
"external_links",
"=",
"[",
"]",
"# type: List['ExternalLink']",
"external_links_append",
"=",
"external_links",
".",
"append",
"type_to_spans",
"=",
"self",
".",
"_type_to_spans",
"lststr",
"=",
"self",
".",
"_lststr",
"ss",
",",
"se",
"=",
"self",
".",
"_span",
"spans",
"=",
"type_to_spans",
".",
"setdefault",
"(",
"'ExternalLink'",
",",
"[",
"]",
")",
"if",
"not",
"spans",
":",
"# All the added spans will be new.",
"spans_append",
"=",
"spans",
".",
"append",
"for",
"m",
"in",
"EXTERNAL_LINK_FINDITER",
"(",
"self",
".",
"_ext_link_shadow",
")",
":",
"s",
",",
"e",
"=",
"m",
".",
"span",
"(",
")",
"span",
"=",
"[",
"ss",
"+",
"s",
",",
"ss",
"+",
"e",
"]",
"spans_append",
"(",
"span",
")",
"external_links_append",
"(",
"ExternalLink",
"(",
"lststr",
",",
"type_to_spans",
",",
"span",
",",
"'ExternalLink'",
")",
")",
"return",
"external_links",
"# There are already some ExternalLink spans. Use the already existing",
"# ones when the detected span is one of those.",
"span_tuple_to_span_get",
"=",
"{",
"(",
"s",
"[",
"0",
"]",
",",
"s",
"[",
"1",
"]",
")",
":",
"s",
"for",
"s",
"in",
"spans",
"}",
".",
"get",
"for",
"m",
"in",
"EXTERNAL_LINK_FINDITER",
"(",
"self",
".",
"_ext_link_shadow",
")",
":",
"s",
",",
"e",
"=",
"m",
".",
"span",
"(",
")",
"span",
"=",
"s",
",",
"e",
"=",
"[",
"s",
"+",
"ss",
",",
"e",
"+",
"ss",
"]",
"old_span",
"=",
"span_tuple_to_span_get",
"(",
"(",
"s",
",",
"e",
")",
")",
"if",
"old_span",
"is",
"None",
":",
"insort",
"(",
"spans",
",",
"span",
")",
"else",
":",
"span",
"=",
"old_span",
"external_links_append",
"(",
"ExternalLink",
"(",
"lststr",
",",
"type_to_spans",
",",
"span",
",",
"'ExternalLink'",
")",
")",
"return",
"external_links"
] |
1347425814361d7955342c53212edbb27f0ff4b5
|
test
|
WikiText.sections
|
Return a list of section in current wikitext.
The first section will always be the lead section, even if it is an
empty string.
|
wikitextparser/_wikitext.py
|
def sections(self) -> List['Section']:
"""Return a list of section in current wikitext.
The first section will always be the lead section, even if it is an
empty string.
"""
sections = [] # type: List['Section']
sections_append = sections.append
type_to_spans = self._type_to_spans
lststr = self._lststr
ss, se = _span = self._span
type_spans = type_to_spans.setdefault('Section', [])
full_match = SECTIONS_FULLMATCH(self._shadow)
section_spans = full_match.spans('section')
levels = [len(eq) for eq in full_match.captures('equals')]
if not type_spans:
# All spans are new
spans_append = type_spans.append
for current_index, (current_level, (s, e)) in enumerate(
zip(levels, section_spans), 1
):
# Add text of the current_section to any parent section.
# Note that section 0 is not a parent for any subsection.
for section_index, section_level in enumerate(
levels[current_index:], current_index
):
if current_level and section_level > current_level:
e = section_spans[section_index][1]
else:
break
span = [ss + s, ss + e]
spans_append(span)
sections_append(
Section(lststr, type_to_spans, span, 'Section'))
return sections
# There are already some spans. Instead of appending new spans
# use them when the detected span already exists.
span_tuple_to_span = {(s[0], s[1]): s for s in type_spans}.get
for current_index, (current_level, (s, e)) in enumerate(
zip(levels, section_spans), 1
):
# Add text of the current_section to any parent section.
# Note that section 0 is not a parent for any subsection.
for section_index, section_level in enumerate(
levels[current_index:], current_index
):
if current_level and section_level > current_level:
e = section_spans[section_index][1]
else:
break
s, e = ss + s, ss + e
old_span = span_tuple_to_span((s, e))
if old_span is None:
span = [s, e]
insort(type_spans, span)
else:
span = old_span
sections_append(Section(lststr, type_to_spans, span, 'Section'))
return sections
|
def sections(self) -> List['Section']:
"""Return a list of section in current wikitext.
The first section will always be the lead section, even if it is an
empty string.
"""
sections = [] # type: List['Section']
sections_append = sections.append
type_to_spans = self._type_to_spans
lststr = self._lststr
ss, se = _span = self._span
type_spans = type_to_spans.setdefault('Section', [])
full_match = SECTIONS_FULLMATCH(self._shadow)
section_spans = full_match.spans('section')
levels = [len(eq) for eq in full_match.captures('equals')]
if not type_spans:
# All spans are new
spans_append = type_spans.append
for current_index, (current_level, (s, e)) in enumerate(
zip(levels, section_spans), 1
):
# Add text of the current_section to any parent section.
# Note that section 0 is not a parent for any subsection.
for section_index, section_level in enumerate(
levels[current_index:], current_index
):
if current_level and section_level > current_level:
e = section_spans[section_index][1]
else:
break
span = [ss + s, ss + e]
spans_append(span)
sections_append(
Section(lststr, type_to_spans, span, 'Section'))
return sections
# There are already some spans. Instead of appending new spans
# use them when the detected span already exists.
span_tuple_to_span = {(s[0], s[1]): s for s in type_spans}.get
for current_index, (current_level, (s, e)) in enumerate(
zip(levels, section_spans), 1
):
# Add text of the current_section to any parent section.
# Note that section 0 is not a parent for any subsection.
for section_index, section_level in enumerate(
levels[current_index:], current_index
):
if current_level and section_level > current_level:
e = section_spans[section_index][1]
else:
break
s, e = ss + s, ss + e
old_span = span_tuple_to_span((s, e))
if old_span is None:
span = [s, e]
insort(type_spans, span)
else:
span = old_span
sections_append(Section(lststr, type_to_spans, span, 'Section'))
return sections
|
[
"Return",
"a",
"list",
"of",
"section",
"in",
"current",
"wikitext",
"."
] |
5j9/wikitextparser
|
python
|
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_wikitext.py#L781-L839
|
[
"def",
"sections",
"(",
"self",
")",
"->",
"List",
"[",
"'Section'",
"]",
":",
"sections",
"=",
"[",
"]",
"# type: List['Section']",
"sections_append",
"=",
"sections",
".",
"append",
"type_to_spans",
"=",
"self",
".",
"_type_to_spans",
"lststr",
"=",
"self",
".",
"_lststr",
"ss",
",",
"se",
"=",
"_span",
"=",
"self",
".",
"_span",
"type_spans",
"=",
"type_to_spans",
".",
"setdefault",
"(",
"'Section'",
",",
"[",
"]",
")",
"full_match",
"=",
"SECTIONS_FULLMATCH",
"(",
"self",
".",
"_shadow",
")",
"section_spans",
"=",
"full_match",
".",
"spans",
"(",
"'section'",
")",
"levels",
"=",
"[",
"len",
"(",
"eq",
")",
"for",
"eq",
"in",
"full_match",
".",
"captures",
"(",
"'equals'",
")",
"]",
"if",
"not",
"type_spans",
":",
"# All spans are new",
"spans_append",
"=",
"type_spans",
".",
"append",
"for",
"current_index",
",",
"(",
"current_level",
",",
"(",
"s",
",",
"e",
")",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"levels",
",",
"section_spans",
")",
",",
"1",
")",
":",
"# Add text of the current_section to any parent section.",
"# Note that section 0 is not a parent for any subsection.",
"for",
"section_index",
",",
"section_level",
"in",
"enumerate",
"(",
"levels",
"[",
"current_index",
":",
"]",
",",
"current_index",
")",
":",
"if",
"current_level",
"and",
"section_level",
">",
"current_level",
":",
"e",
"=",
"section_spans",
"[",
"section_index",
"]",
"[",
"1",
"]",
"else",
":",
"break",
"span",
"=",
"[",
"ss",
"+",
"s",
",",
"ss",
"+",
"e",
"]",
"spans_append",
"(",
"span",
")",
"sections_append",
"(",
"Section",
"(",
"lststr",
",",
"type_to_spans",
",",
"span",
",",
"'Section'",
")",
")",
"return",
"sections",
"# There are already some spans. Instead of appending new spans",
"# use them when the detected span already exists.",
"span_tuple_to_span",
"=",
"{",
"(",
"s",
"[",
"0",
"]",
",",
"s",
"[",
"1",
"]",
")",
":",
"s",
"for",
"s",
"in",
"type_spans",
"}",
".",
"get",
"for",
"current_index",
",",
"(",
"current_level",
",",
"(",
"s",
",",
"e",
")",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"levels",
",",
"section_spans",
")",
",",
"1",
")",
":",
"# Add text of the current_section to any parent section.",
"# Note that section 0 is not a parent for any subsection.",
"for",
"section_index",
",",
"section_level",
"in",
"enumerate",
"(",
"levels",
"[",
"current_index",
":",
"]",
",",
"current_index",
")",
":",
"if",
"current_level",
"and",
"section_level",
">",
"current_level",
":",
"e",
"=",
"section_spans",
"[",
"section_index",
"]",
"[",
"1",
"]",
"else",
":",
"break",
"s",
",",
"e",
"=",
"ss",
"+",
"s",
",",
"ss",
"+",
"e",
"old_span",
"=",
"span_tuple_to_span",
"(",
"(",
"s",
",",
"e",
")",
")",
"if",
"old_span",
"is",
"None",
":",
"span",
"=",
"[",
"s",
",",
"e",
"]",
"insort",
"(",
"type_spans",
",",
"span",
")",
"else",
":",
"span",
"=",
"old_span",
"sections_append",
"(",
"Section",
"(",
"lststr",
",",
"type_to_spans",
",",
"span",
",",
"'Section'",
")",
")",
"return",
"sections"
] |
1347425814361d7955342c53212edbb27f0ff4b5
|
test
|
WikiText.tables
|
Return a list of found table objects.
|
wikitextparser/_wikitext.py
|
def tables(self) -> List['Table']:
"""Return a list of found table objects."""
tables = [] # type: List['Table']
tables_append = tables.append
type_to_spans = self._type_to_spans
lststr = self._lststr
shadow = self._shadow[:]
ss, se = self._span
spans = type_to_spans.setdefault('Table', [])
if not spans:
# All the added spans will be new.
m = True # type: Any
while m:
m = False
for m in TABLE_FINDITER(shadow):
ms, me = m.span()
# Ignore leading whitespace using len(m[1]).
span = [ss + ms + len(m[1]), ss + me]
spans.append(span)
tables_append(Table(lststr, type_to_spans, span, 'Table'))
shadow[ms:me] = b'_' * (me - ms)
return tables
# There are already exists some spans. Try to use the already existing
# before appending new spans.
span_tuple_to_span_get = {(s[0], s[1]): s for s in spans}.get
m = True
while m:
m = False
for m in TABLE_FINDITER(shadow):
ms, me = m.span()
# Ignore leading whitespace using len(m[1]).
s, e = ss + ms + len(m[1]), ss + me
old_span = span_tuple_to_span_get((s, e))
if old_span is None:
span = [s, e]
insort(spans, span)
else:
span = old_span
tables_append(Table(lststr, type_to_spans, span, 'Table'))
shadow[ms:me] = b'_' * (me - ms)
return tables
|
def tables(self) -> List['Table']:
"""Return a list of found table objects."""
tables = [] # type: List['Table']
tables_append = tables.append
type_to_spans = self._type_to_spans
lststr = self._lststr
shadow = self._shadow[:]
ss, se = self._span
spans = type_to_spans.setdefault('Table', [])
if not spans:
# All the added spans will be new.
m = True # type: Any
while m:
m = False
for m in TABLE_FINDITER(shadow):
ms, me = m.span()
# Ignore leading whitespace using len(m[1]).
span = [ss + ms + len(m[1]), ss + me]
spans.append(span)
tables_append(Table(lststr, type_to_spans, span, 'Table'))
shadow[ms:me] = b'_' * (me - ms)
return tables
# There are already exists some spans. Try to use the already existing
# before appending new spans.
span_tuple_to_span_get = {(s[0], s[1]): s for s in spans}.get
m = True
while m:
m = False
for m in TABLE_FINDITER(shadow):
ms, me = m.span()
# Ignore leading whitespace using len(m[1]).
s, e = ss + ms + len(m[1]), ss + me
old_span = span_tuple_to_span_get((s, e))
if old_span is None:
span = [s, e]
insort(spans, span)
else:
span = old_span
tables_append(Table(lststr, type_to_spans, span, 'Table'))
shadow[ms:me] = b'_' * (me - ms)
return tables
|
[
"Return",
"a",
"list",
"of",
"found",
"table",
"objects",
"."
] |
5j9/wikitextparser
|
python
|
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_wikitext.py#L842-L882
|
[
"def",
"tables",
"(",
"self",
")",
"->",
"List",
"[",
"'Table'",
"]",
":",
"tables",
"=",
"[",
"]",
"# type: List['Table']",
"tables_append",
"=",
"tables",
".",
"append",
"type_to_spans",
"=",
"self",
".",
"_type_to_spans",
"lststr",
"=",
"self",
".",
"_lststr",
"shadow",
"=",
"self",
".",
"_shadow",
"[",
":",
"]",
"ss",
",",
"se",
"=",
"self",
".",
"_span",
"spans",
"=",
"type_to_spans",
".",
"setdefault",
"(",
"'Table'",
",",
"[",
"]",
")",
"if",
"not",
"spans",
":",
"# All the added spans will be new.",
"m",
"=",
"True",
"# type: Any",
"while",
"m",
":",
"m",
"=",
"False",
"for",
"m",
"in",
"TABLE_FINDITER",
"(",
"shadow",
")",
":",
"ms",
",",
"me",
"=",
"m",
".",
"span",
"(",
")",
"# Ignore leading whitespace using len(m[1]).",
"span",
"=",
"[",
"ss",
"+",
"ms",
"+",
"len",
"(",
"m",
"[",
"1",
"]",
")",
",",
"ss",
"+",
"me",
"]",
"spans",
".",
"append",
"(",
"span",
")",
"tables_append",
"(",
"Table",
"(",
"lststr",
",",
"type_to_spans",
",",
"span",
",",
"'Table'",
")",
")",
"shadow",
"[",
"ms",
":",
"me",
"]",
"=",
"b'_'",
"*",
"(",
"me",
"-",
"ms",
")",
"return",
"tables",
"# There are already exists some spans. Try to use the already existing",
"# before appending new spans.",
"span_tuple_to_span_get",
"=",
"{",
"(",
"s",
"[",
"0",
"]",
",",
"s",
"[",
"1",
"]",
")",
":",
"s",
"for",
"s",
"in",
"spans",
"}",
".",
"get",
"m",
"=",
"True",
"while",
"m",
":",
"m",
"=",
"False",
"for",
"m",
"in",
"TABLE_FINDITER",
"(",
"shadow",
")",
":",
"ms",
",",
"me",
"=",
"m",
".",
"span",
"(",
")",
"# Ignore leading whitespace using len(m[1]).",
"s",
",",
"e",
"=",
"ss",
"+",
"ms",
"+",
"len",
"(",
"m",
"[",
"1",
"]",
")",
",",
"ss",
"+",
"me",
"old_span",
"=",
"span_tuple_to_span_get",
"(",
"(",
"s",
",",
"e",
")",
")",
"if",
"old_span",
"is",
"None",
":",
"span",
"=",
"[",
"s",
",",
"e",
"]",
"insort",
"(",
"spans",
",",
"span",
")",
"else",
":",
"span",
"=",
"old_span",
"tables_append",
"(",
"Table",
"(",
"lststr",
",",
"type_to_spans",
",",
"span",
",",
"'Table'",
")",
")",
"shadow",
"[",
"ms",
":",
"me",
"]",
"=",
"b'_'",
"*",
"(",
"me",
"-",
"ms",
")",
"return",
"tables"
] |
1347425814361d7955342c53212edbb27f0ff4b5
|
test
|
WikiText.lists
|
r"""Return a list of WikiList objects.
:param pattern: The starting pattern for list items.
Return all types of lists (ol, ul, and dl) if pattern is None.
If pattern is not None, it will be passed to the regex engine,
remember to escape the `*` character. Examples:
- `\#` means top-level ordered lists
- `\#\*` means unordred lists inside an ordered one
- Currently definition lists are not well supported, but you
can use `[:;]` as their pattern.
Tips and tricks:
Be careful when using the following patterns as they will
probably cause malfunction in the `sublists` method of the
resultant List. (However don't worry about them if you are
not going to use the `sublists` method.)
- Use `\*+` as a pattern and nested unordered lists will be
treated as flat.
- Use `\*\s*` as pattern to rtstrip `items` of the list.
Although the pattern parameter is optional, but specifying it
can improve the performance.
|
wikitextparser/_wikitext.py
|
def lists(self, pattern: str = None) -> List['WikiList']:
r"""Return a list of WikiList objects.
:param pattern: The starting pattern for list items.
Return all types of lists (ol, ul, and dl) if pattern is None.
If pattern is not None, it will be passed to the regex engine,
remember to escape the `*` character. Examples:
- `\#` means top-level ordered lists
- `\#\*` means unordred lists inside an ordered one
- Currently definition lists are not well supported, but you
can use `[:;]` as their pattern.
Tips and tricks:
Be careful when using the following patterns as they will
probably cause malfunction in the `sublists` method of the
resultant List. (However don't worry about them if you are
not going to use the `sublists` method.)
- Use `\*+` as a pattern and nested unordered lists will be
treated as flat.
- Use `\*\s*` as pattern to rtstrip `items` of the list.
Although the pattern parameter is optional, but specifying it
can improve the performance.
"""
lists = []
lists_append = lists.append
lststr = self._lststr
type_to_spans = self._type_to_spans
spans = type_to_spans.setdefault('WikiList', [])
span_tuple_to_span_get = {(s[0], s[1]): s for s in spans}.get
shadow, ss = self._lists_shadow_ss
for pattern in \
(r'\#', r'\*', '[:;]') if pattern is None else (pattern,):
for m in finditer(
LIST_PATTERN_FORMAT.replace(b'{pattern}', pattern.encode()),
shadow, MULTILINE
):
ms, me = m.span()
s, e = ss + ms, ss + me
old_span = span_tuple_to_span_get((s, e))
if old_span is None:
span = [s, e]
insort(spans, span)
else:
span = old_span
lists_append(WikiList(
lststr, pattern, m, type_to_spans, span, 'WikiList'))
return lists
|
def lists(self, pattern: str = None) -> List['WikiList']:
r"""Return a list of WikiList objects.
:param pattern: The starting pattern for list items.
Return all types of lists (ol, ul, and dl) if pattern is None.
If pattern is not None, it will be passed to the regex engine,
remember to escape the `*` character. Examples:
- `\#` means top-level ordered lists
- `\#\*` means unordred lists inside an ordered one
- Currently definition lists are not well supported, but you
can use `[:;]` as their pattern.
Tips and tricks:
Be careful when using the following patterns as they will
probably cause malfunction in the `sublists` method of the
resultant List. (However don't worry about them if you are
not going to use the `sublists` method.)
- Use `\*+` as a pattern and nested unordered lists will be
treated as flat.
- Use `\*\s*` as pattern to rtstrip `items` of the list.
Although the pattern parameter is optional, but specifying it
can improve the performance.
"""
lists = []
lists_append = lists.append
lststr = self._lststr
type_to_spans = self._type_to_spans
spans = type_to_spans.setdefault('WikiList', [])
span_tuple_to_span_get = {(s[0], s[1]): s for s in spans}.get
shadow, ss = self._lists_shadow_ss
for pattern in \
(r'\#', r'\*', '[:;]') if pattern is None else (pattern,):
for m in finditer(
LIST_PATTERN_FORMAT.replace(b'{pattern}', pattern.encode()),
shadow, MULTILINE
):
ms, me = m.span()
s, e = ss + ms, ss + me
old_span = span_tuple_to_span_get((s, e))
if old_span is None:
span = [s, e]
insort(spans, span)
else:
span = old_span
lists_append(WikiList(
lststr, pattern, m, type_to_spans, span, 'WikiList'))
return lists
|
[
"r",
"Return",
"a",
"list",
"of",
"WikiList",
"objects",
"."
] |
5j9/wikitextparser
|
python
|
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_wikitext.py#L889-L939
|
[
"def",
"lists",
"(",
"self",
",",
"pattern",
":",
"str",
"=",
"None",
")",
"->",
"List",
"[",
"'WikiList'",
"]",
":",
"lists",
"=",
"[",
"]",
"lists_append",
"=",
"lists",
".",
"append",
"lststr",
"=",
"self",
".",
"_lststr",
"type_to_spans",
"=",
"self",
".",
"_type_to_spans",
"spans",
"=",
"type_to_spans",
".",
"setdefault",
"(",
"'WikiList'",
",",
"[",
"]",
")",
"span_tuple_to_span_get",
"=",
"{",
"(",
"s",
"[",
"0",
"]",
",",
"s",
"[",
"1",
"]",
")",
":",
"s",
"for",
"s",
"in",
"spans",
"}",
".",
"get",
"shadow",
",",
"ss",
"=",
"self",
".",
"_lists_shadow_ss",
"for",
"pattern",
"in",
"(",
"r'\\#'",
",",
"r'\\*'",
",",
"'[:;]'",
")",
"if",
"pattern",
"is",
"None",
"else",
"(",
"pattern",
",",
")",
":",
"for",
"m",
"in",
"finditer",
"(",
"LIST_PATTERN_FORMAT",
".",
"replace",
"(",
"b'{pattern}'",
",",
"pattern",
".",
"encode",
"(",
")",
")",
",",
"shadow",
",",
"MULTILINE",
")",
":",
"ms",
",",
"me",
"=",
"m",
".",
"span",
"(",
")",
"s",
",",
"e",
"=",
"ss",
"+",
"ms",
",",
"ss",
"+",
"me",
"old_span",
"=",
"span_tuple_to_span_get",
"(",
"(",
"s",
",",
"e",
")",
")",
"if",
"old_span",
"is",
"None",
":",
"span",
"=",
"[",
"s",
",",
"e",
"]",
"insort",
"(",
"spans",
",",
"span",
")",
"else",
":",
"span",
"=",
"old_span",
"lists_append",
"(",
"WikiList",
"(",
"lststr",
",",
"pattern",
",",
"m",
",",
"type_to_spans",
",",
"span",
",",
"'WikiList'",
")",
")",
"return",
"lists"
] |
1347425814361d7955342c53212edbb27f0ff4b5
|
test
|
WikiText.tags
|
Return all tags with the given name.
|
wikitextparser/_wikitext.py
|
def tags(self, name=None) -> List['Tag']:
"""Return all tags with the given name."""
lststr = self._lststr
type_to_spans = self._type_to_spans
if name:
if name in _tag_extensions:
string = lststr[0]
return [
Tag(lststr, type_to_spans, span, 'ExtensionTag')
for span in type_to_spans['ExtensionTag']
if string.startswith('<' + name, span[0])]
tags = [] # type: List['Tag']
else:
# There is no name, add all extension tags. Before using shadow.
tags = [
Tag(lststr, type_to_spans, span, 'ExtensionTag')
for span in type_to_spans['ExtensionTag']]
tags_append = tags.append
# Get the left-most start tag, match it to right-most end tag
# and so on.
ss = self._span[0]
shadow = self._shadow
if name:
# There is a name but it is not in TAG_EXTENSIONS.
reversed_start_matches = reversed([m for m in regex_compile(
START_TAG_PATTERN.replace(
rb'{name}', rb'(?P<name>' + name.encode() + rb')')
).finditer(shadow)])
end_search = regex_compile(END_TAG_PATTERN .replace(
b'{name}', name.encode())).search
else:
reversed_start_matches = reversed(
[m for m in START_TAG_FINDITER(shadow)])
shadow_copy = shadow[:]
spans = type_to_spans.setdefault('Tag', [])
span_tuple_to_span_get = {(s[0], s[1]): s for s in spans}.get
spans_append = spans.append
for start_match in reversed_start_matches:
if start_match['self_closing']:
# Don't look for the end tag
s, e = start_match.span()
span = [ss + s, ss + e]
else:
# look for the end-tag
if name:
# the end_search is already available
# noinspection PyUnboundLocalVariable
end_match = end_search(shadow_copy, start_match.end())
else:
# build end_search according to start tag name
end_match = search(
END_TAG_PATTERN.replace(
b'{name}', start_match['name']),
shadow_copy)
if end_match:
s, e = end_match.span()
shadow_copy[s:e] = b'_' * (e - s)
span = [ss + start_match.start(), ss + e]
else:
# Assume start-only tag.
s, e = start_match.span()
span = [ss + s, ss + e]
old_span = span_tuple_to_span_get((span[0], span[1]))
if old_span is None:
spans_append(span)
else:
span = old_span
tags_append(Tag(lststr, type_to_spans, span, 'Tag'))
return sorted(tags, key=attrgetter('_span'))
|
def tags(self, name=None) -> List['Tag']:
"""Return all tags with the given name."""
lststr = self._lststr
type_to_spans = self._type_to_spans
if name:
if name in _tag_extensions:
string = lststr[0]
return [
Tag(lststr, type_to_spans, span, 'ExtensionTag')
for span in type_to_spans['ExtensionTag']
if string.startswith('<' + name, span[0])]
tags = [] # type: List['Tag']
else:
# There is no name, add all extension tags. Before using shadow.
tags = [
Tag(lststr, type_to_spans, span, 'ExtensionTag')
for span in type_to_spans['ExtensionTag']]
tags_append = tags.append
# Get the left-most start tag, match it to right-most end tag
# and so on.
ss = self._span[0]
shadow = self._shadow
if name:
# There is a name but it is not in TAG_EXTENSIONS.
reversed_start_matches = reversed([m for m in regex_compile(
START_TAG_PATTERN.replace(
rb'{name}', rb'(?P<name>' + name.encode() + rb')')
).finditer(shadow)])
end_search = regex_compile(END_TAG_PATTERN .replace(
b'{name}', name.encode())).search
else:
reversed_start_matches = reversed(
[m for m in START_TAG_FINDITER(shadow)])
shadow_copy = shadow[:]
spans = type_to_spans.setdefault('Tag', [])
span_tuple_to_span_get = {(s[0], s[1]): s for s in spans}.get
spans_append = spans.append
for start_match in reversed_start_matches:
if start_match['self_closing']:
# Don't look for the end tag
s, e = start_match.span()
span = [ss + s, ss + e]
else:
# look for the end-tag
if name:
# the end_search is already available
# noinspection PyUnboundLocalVariable
end_match = end_search(shadow_copy, start_match.end())
else:
# build end_search according to start tag name
end_match = search(
END_TAG_PATTERN.replace(
b'{name}', start_match['name']),
shadow_copy)
if end_match:
s, e = end_match.span()
shadow_copy[s:e] = b'_' * (e - s)
span = [ss + start_match.start(), ss + e]
else:
# Assume start-only tag.
s, e = start_match.span()
span = [ss + s, ss + e]
old_span = span_tuple_to_span_get((span[0], span[1]))
if old_span is None:
spans_append(span)
else:
span = old_span
tags_append(Tag(lststr, type_to_spans, span, 'Tag'))
return sorted(tags, key=attrgetter('_span'))
|
[
"Return",
"all",
"tags",
"with",
"the",
"given",
"name",
"."
] |
5j9/wikitextparser
|
python
|
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_wikitext.py#L941-L1009
|
[
"def",
"tags",
"(",
"self",
",",
"name",
"=",
"None",
")",
"->",
"List",
"[",
"'Tag'",
"]",
":",
"lststr",
"=",
"self",
".",
"_lststr",
"type_to_spans",
"=",
"self",
".",
"_type_to_spans",
"if",
"name",
":",
"if",
"name",
"in",
"_tag_extensions",
":",
"string",
"=",
"lststr",
"[",
"0",
"]",
"return",
"[",
"Tag",
"(",
"lststr",
",",
"type_to_spans",
",",
"span",
",",
"'ExtensionTag'",
")",
"for",
"span",
"in",
"type_to_spans",
"[",
"'ExtensionTag'",
"]",
"if",
"string",
".",
"startswith",
"(",
"'<'",
"+",
"name",
",",
"span",
"[",
"0",
"]",
")",
"]",
"tags",
"=",
"[",
"]",
"# type: List['Tag']",
"else",
":",
"# There is no name, add all extension tags. Before using shadow.",
"tags",
"=",
"[",
"Tag",
"(",
"lststr",
",",
"type_to_spans",
",",
"span",
",",
"'ExtensionTag'",
")",
"for",
"span",
"in",
"type_to_spans",
"[",
"'ExtensionTag'",
"]",
"]",
"tags_append",
"=",
"tags",
".",
"append",
"# Get the left-most start tag, match it to right-most end tag",
"# and so on.",
"ss",
"=",
"self",
".",
"_span",
"[",
"0",
"]",
"shadow",
"=",
"self",
".",
"_shadow",
"if",
"name",
":",
"# There is a name but it is not in TAG_EXTENSIONS.",
"reversed_start_matches",
"=",
"reversed",
"(",
"[",
"m",
"for",
"m",
"in",
"regex_compile",
"(",
"START_TAG_PATTERN",
".",
"replace",
"(",
"rb'{name}'",
",",
"rb'(?P<name>'",
"+",
"name",
".",
"encode",
"(",
")",
"+",
"rb')'",
")",
")",
".",
"finditer",
"(",
"shadow",
")",
"]",
")",
"end_search",
"=",
"regex_compile",
"(",
"END_TAG_PATTERN",
".",
"replace",
"(",
"b'{name}'",
",",
"name",
".",
"encode",
"(",
")",
")",
")",
".",
"search",
"else",
":",
"reversed_start_matches",
"=",
"reversed",
"(",
"[",
"m",
"for",
"m",
"in",
"START_TAG_FINDITER",
"(",
"shadow",
")",
"]",
")",
"shadow_copy",
"=",
"shadow",
"[",
":",
"]",
"spans",
"=",
"type_to_spans",
".",
"setdefault",
"(",
"'Tag'",
",",
"[",
"]",
")",
"span_tuple_to_span_get",
"=",
"{",
"(",
"s",
"[",
"0",
"]",
",",
"s",
"[",
"1",
"]",
")",
":",
"s",
"for",
"s",
"in",
"spans",
"}",
".",
"get",
"spans_append",
"=",
"spans",
".",
"append",
"for",
"start_match",
"in",
"reversed_start_matches",
":",
"if",
"start_match",
"[",
"'self_closing'",
"]",
":",
"# Don't look for the end tag",
"s",
",",
"e",
"=",
"start_match",
".",
"span",
"(",
")",
"span",
"=",
"[",
"ss",
"+",
"s",
",",
"ss",
"+",
"e",
"]",
"else",
":",
"# look for the end-tag",
"if",
"name",
":",
"# the end_search is already available",
"# noinspection PyUnboundLocalVariable",
"end_match",
"=",
"end_search",
"(",
"shadow_copy",
",",
"start_match",
".",
"end",
"(",
")",
")",
"else",
":",
"# build end_search according to start tag name",
"end_match",
"=",
"search",
"(",
"END_TAG_PATTERN",
".",
"replace",
"(",
"b'{name}'",
",",
"start_match",
"[",
"'name'",
"]",
")",
",",
"shadow_copy",
")",
"if",
"end_match",
":",
"s",
",",
"e",
"=",
"end_match",
".",
"span",
"(",
")",
"shadow_copy",
"[",
"s",
":",
"e",
"]",
"=",
"b'_'",
"*",
"(",
"e",
"-",
"s",
")",
"span",
"=",
"[",
"ss",
"+",
"start_match",
".",
"start",
"(",
")",
",",
"ss",
"+",
"e",
"]",
"else",
":",
"# Assume start-only tag.",
"s",
",",
"e",
"=",
"start_match",
".",
"span",
"(",
")",
"span",
"=",
"[",
"ss",
"+",
"s",
",",
"ss",
"+",
"e",
"]",
"old_span",
"=",
"span_tuple_to_span_get",
"(",
"(",
"span",
"[",
"0",
"]",
",",
"span",
"[",
"1",
"]",
")",
")",
"if",
"old_span",
"is",
"None",
":",
"spans_append",
"(",
"span",
")",
"else",
":",
"span",
"=",
"old_span",
"tags_append",
"(",
"Tag",
"(",
"lststr",
",",
"type_to_spans",
",",
"span",
",",
"'Tag'",
")",
")",
"return",
"sorted",
"(",
"tags",
",",
"key",
"=",
"attrgetter",
"(",
"'_span'",
")",
")"
] |
1347425814361d7955342c53212edbb27f0ff4b5
|
test
|
SubWikiText._subspans
|
Yield all the sub-span indices excluding self._span.
|
wikitextparser/_wikitext.py
|
def _subspans(self, _type: str) -> Generator[int, None, None]:
"""Yield all the sub-span indices excluding self._span."""
ss, se = self._span
spans = self._type_to_spans[_type]
# Do not yield self._span by bisecting for s < ss.
# The second bisect is an optimization and should be on [se + 1],
# but empty spans are not desired thus [se] is used.
b = bisect(spans, [ss])
for span in spans[b:bisect(spans, [se], b)]:
if span[1] <= se:
yield span
|
def _subspans(self, _type: str) -> Generator[int, None, None]:
"""Yield all the sub-span indices excluding self._span."""
ss, se = self._span
spans = self._type_to_spans[_type]
# Do not yield self._span by bisecting for s < ss.
# The second bisect is an optimization and should be on [se + 1],
# but empty spans are not desired thus [se] is used.
b = bisect(spans, [ss])
for span in spans[b:bisect(spans, [se], b)]:
if span[1] <= se:
yield span
|
[
"Yield",
"all",
"the",
"sub",
"-",
"span",
"indices",
"excluding",
"self",
".",
"_span",
"."
] |
5j9/wikitextparser
|
python
|
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_wikitext.py#L1052-L1062
|
[
"def",
"_subspans",
"(",
"self",
",",
"_type",
":",
"str",
")",
"->",
"Generator",
"[",
"int",
",",
"None",
",",
"None",
"]",
":",
"ss",
",",
"se",
"=",
"self",
".",
"_span",
"spans",
"=",
"self",
".",
"_type_to_spans",
"[",
"_type",
"]",
"# Do not yield self._span by bisecting for s < ss.",
"# The second bisect is an optimization and should be on [se + 1],",
"# but empty spans are not desired thus [se] is used.",
"b",
"=",
"bisect",
"(",
"spans",
",",
"[",
"ss",
"]",
")",
"for",
"span",
"in",
"spans",
"[",
"b",
":",
"bisect",
"(",
"spans",
",",
"[",
"se",
"]",
",",
"b",
")",
"]",
":",
"if",
"span",
"[",
"1",
"]",
"<=",
"se",
":",
"yield",
"span"
] |
1347425814361d7955342c53212edbb27f0ff4b5
|
test
|
SubWikiText.ancestors
|
Return the ancestors of the current node.
:param type_: the type of the desired ancestors as a string.
Currently the following types are supported: {Template,
ParserFunction, WikiLink, Comment, Parameter, ExtensionTag}.
The default is None and means all the ancestors of any type above.
|
wikitextparser/_wikitext.py
|
def ancestors(self, type_: Optional[str] = None) -> List['WikiText']:
"""Return the ancestors of the current node.
:param type_: the type of the desired ancestors as a string.
Currently the following types are supported: {Template,
ParserFunction, WikiLink, Comment, Parameter, ExtensionTag}.
The default is None and means all the ancestors of any type above.
"""
if type_ is None:
types = SPAN_PARSER_TYPES
else:
types = type_,
lststr = self._lststr
type_to_spans = self._type_to_spans
ss, se = self._span
ancestors = []
ancestors_append = ancestors.append
for type_ in types:
cls = globals()[type_]
spans = type_to_spans[type_]
for span in spans[:bisect(spans, [ss])]:
if se < span[1]:
ancestors_append(cls(lststr, type_to_spans, span, type_))
return sorted(ancestors, key=lambda i: ss - i._span[0])
|
def ancestors(self, type_: Optional[str] = None) -> List['WikiText']:
"""Return the ancestors of the current node.
:param type_: the type of the desired ancestors as a string.
Currently the following types are supported: {Template,
ParserFunction, WikiLink, Comment, Parameter, ExtensionTag}.
The default is None and means all the ancestors of any type above.
"""
if type_ is None:
types = SPAN_PARSER_TYPES
else:
types = type_,
lststr = self._lststr
type_to_spans = self._type_to_spans
ss, se = self._span
ancestors = []
ancestors_append = ancestors.append
for type_ in types:
cls = globals()[type_]
spans = type_to_spans[type_]
for span in spans[:bisect(spans, [ss])]:
if se < span[1]:
ancestors_append(cls(lststr, type_to_spans, span, type_))
return sorted(ancestors, key=lambda i: ss - i._span[0])
|
[
"Return",
"the",
"ancestors",
"of",
"the",
"current",
"node",
"."
] |
5j9/wikitextparser
|
python
|
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_wikitext.py#L1065-L1088
|
[
"def",
"ancestors",
"(",
"self",
",",
"type_",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"List",
"[",
"'WikiText'",
"]",
":",
"if",
"type_",
"is",
"None",
":",
"types",
"=",
"SPAN_PARSER_TYPES",
"else",
":",
"types",
"=",
"type_",
",",
"lststr",
"=",
"self",
".",
"_lststr",
"type_to_spans",
"=",
"self",
".",
"_type_to_spans",
"ss",
",",
"se",
"=",
"self",
".",
"_span",
"ancestors",
"=",
"[",
"]",
"ancestors_append",
"=",
"ancestors",
".",
"append",
"for",
"type_",
"in",
"types",
":",
"cls",
"=",
"globals",
"(",
")",
"[",
"type_",
"]",
"spans",
"=",
"type_to_spans",
"[",
"type_",
"]",
"for",
"span",
"in",
"spans",
"[",
":",
"bisect",
"(",
"spans",
",",
"[",
"ss",
"]",
")",
"]",
":",
"if",
"se",
"<",
"span",
"[",
"1",
"]",
":",
"ancestors_append",
"(",
"cls",
"(",
"lststr",
",",
"type_to_spans",
",",
"span",
",",
"type_",
")",
")",
"return",
"sorted",
"(",
"ancestors",
",",
"key",
"=",
"lambda",
"i",
":",
"ss",
"-",
"i",
".",
"_span",
"[",
"0",
"]",
")"
] |
1347425814361d7955342c53212edbb27f0ff4b5
|
test
|
SubWikiText.parent
|
Return the parent node of the current object.
:param type_: the type of the desired parent object.
Currently the following types are supported: {Template,
ParserFunction, WikiLink, Comment, Parameter, ExtensionTag}.
The default is None and means the first parent, of any type above.
:return: parent WikiText object or None if no parent with the desired
`type_` is found.
|
wikitextparser/_wikitext.py
|
def parent(self, type_: Optional[str] = None) -> Optional['WikiText']:
"""Return the parent node of the current object.
:param type_: the type of the desired parent object.
Currently the following types are supported: {Template,
ParserFunction, WikiLink, Comment, Parameter, ExtensionTag}.
The default is None and means the first parent, of any type above.
:return: parent WikiText object or None if no parent with the desired
`type_` is found.
"""
ancestors = self.ancestors(type_)
if ancestors:
return ancestors[0]
return None
|
def parent(self, type_: Optional[str] = None) -> Optional['WikiText']:
"""Return the parent node of the current object.
:param type_: the type of the desired parent object.
Currently the following types are supported: {Template,
ParserFunction, WikiLink, Comment, Parameter, ExtensionTag}.
The default is None and means the first parent, of any type above.
:return: parent WikiText object or None if no parent with the desired
`type_` is found.
"""
ancestors = self.ancestors(type_)
if ancestors:
return ancestors[0]
return None
|
[
"Return",
"the",
"parent",
"node",
"of",
"the",
"current",
"object",
"."
] |
5j9/wikitextparser
|
python
|
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_wikitext.py#L1090-L1103
|
[
"def",
"parent",
"(",
"self",
",",
"type_",
":",
"Optional",
"[",
"str",
"]",
"=",
"None",
")",
"->",
"Optional",
"[",
"'WikiText'",
"]",
":",
"ancestors",
"=",
"self",
".",
"ancestors",
"(",
"type_",
")",
"if",
"ancestors",
":",
"return",
"ancestors",
"[",
"0",
"]",
"return",
"None"
] |
1347425814361d7955342c53212edbb27f0ff4b5
|
test
|
mode
|
Return the most common item in the list.
Return the first one if there are more than one most common items.
Example:
>>> mode([1,1,2,2,])
1
>>> mode([1,2,2])
2
>>> mode([])
...
ValueError: max() arg is an empty sequence
|
wikitextparser/_template.py
|
def mode(list_: List[T]) -> T:
"""Return the most common item in the list.
Return the first one if there are more than one most common items.
Example:
>>> mode([1,1,2,2,])
1
>>> mode([1,2,2])
2
>>> mode([])
...
ValueError: max() arg is an empty sequence
"""
return max(set(list_), key=list_.count)
|
def mode(list_: List[T]) -> T:
"""Return the most common item in the list.
Return the first one if there are more than one most common items.
Example:
>>> mode([1,1,2,2,])
1
>>> mode([1,2,2])
2
>>> mode([])
...
ValueError: max() arg is an empty sequence
"""
return max(set(list_), key=list_.count)
|
[
"Return",
"the",
"most",
"common",
"item",
"in",
"the",
"list",
"."
] |
5j9/wikitextparser
|
python
|
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_template.py#L323-L338
|
[
"def",
"mode",
"(",
"list_",
":",
"List",
"[",
"T",
"]",
")",
"->",
"T",
":",
"return",
"max",
"(",
"set",
"(",
"list_",
")",
",",
"key",
"=",
"list_",
".",
"count",
")"
] |
1347425814361d7955342c53212edbb27f0ff4b5
|
test
|
get_arg
|
Return the first argument in the args that has the given name.
Return None if no such argument is found.
As the computation of self.arguments is a little costly, this
function was created so that other methods that have already computed
the arguments use it instead of calling self.get_arg directly.
|
wikitextparser/_template.py
|
def get_arg(name: str, args: Iterable[Argument]) -> Optional[Argument]:
"""Return the first argument in the args that has the given name.
Return None if no such argument is found.
As the computation of self.arguments is a little costly, this
function was created so that other methods that have already computed
the arguments use it instead of calling self.get_arg directly.
"""
for arg in args:
if arg.name.strip(WS) == name.strip(WS):
return arg
return None
|
def get_arg(name: str, args: Iterable[Argument]) -> Optional[Argument]:
"""Return the first argument in the args that has the given name.
Return None if no such argument is found.
As the computation of self.arguments is a little costly, this
function was created so that other methods that have already computed
the arguments use it instead of calling self.get_arg directly.
"""
for arg in args:
if arg.name.strip(WS) == name.strip(WS):
return arg
return None
|
[
"Return",
"the",
"first",
"argument",
"in",
"the",
"args",
"that",
"has",
"the",
"given",
"name",
"."
] |
5j9/wikitextparser
|
python
|
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_template.py#L341-L353
|
[
"def",
"get_arg",
"(",
"name",
":",
"str",
",",
"args",
":",
"Iterable",
"[",
"Argument",
"]",
")",
"->",
"Optional",
"[",
"Argument",
"]",
":",
"for",
"arg",
"in",
"args",
":",
"if",
"arg",
".",
"name",
".",
"strip",
"(",
"WS",
")",
"==",
"name",
".",
"strip",
"(",
"WS",
")",
":",
"return",
"arg",
"return",
"None"
] |
1347425814361d7955342c53212edbb27f0ff4b5
|
test
|
Template.normal_name
|
Return normal form of self.name.
- Remove comments.
- Remove language code.
- Remove namespace ("template:" or any of `localized_namespaces`.
- Use space instead of underscore.
- Remove consecutive spaces.
- Use uppercase for the first letter if `capitalize`.
- Remove #anchor.
:param rm_namespaces: is used to provide additional localized
namespaces for the template namespace. They will be removed from
the result. Default is ('Template',).
:param capitalize: If True, convert the first letter of the
template's name to a capital letter. See
[[mw:Manual:$wgCapitalLinks]] for more info.
:param code: is the language code.
:param capital_links: deprecated.
:param _code: deprecated.
Example:
>>> Template(
... '{{ eN : tEmPlAtE : <!-- c --> t_1 # b | a }}'
... ).normal_name(code='en')
'T 1'
|
wikitextparser/_template.py
|
def normal_name(
self,
rm_namespaces=('Template',),
capital_links=False,
_code: str = None,
*,
code: str = None,
capitalize=False
) -> str:
"""Return normal form of self.name.
- Remove comments.
- Remove language code.
- Remove namespace ("template:" or any of `localized_namespaces`.
- Use space instead of underscore.
- Remove consecutive spaces.
- Use uppercase for the first letter if `capitalize`.
- Remove #anchor.
:param rm_namespaces: is used to provide additional localized
namespaces for the template namespace. They will be removed from
the result. Default is ('Template',).
:param capitalize: If True, convert the first letter of the
template's name to a capital letter. See
[[mw:Manual:$wgCapitalLinks]] for more info.
:param code: is the language code.
:param capital_links: deprecated.
:param _code: deprecated.
Example:
>>> Template(
... '{{ eN : tEmPlAtE : <!-- c --> t_1 # b | a }}'
... ).normal_name(code='en')
'T 1'
"""
if capital_links:
warn('`capital_links` argument is deprecated,'
' use `capitalize` instead', DeprecationWarning)
capitalize = capital_links
if _code:
warn('`positional_code` argument is deprecated,'
' use `code` instead', DeprecationWarning)
code = _code
# Remove comments
name = COMMENT_SUB('', self.name).strip(WS)
# Remove code
if code:
head, sep, tail = name.partition(':')
if not head and sep:
name = tail.strip(' ')
head, sep, tail = name.partition(':')
if code.lower() == head.strip(' ').lower():
name = tail.strip(' ')
# Remove namespace
head, sep, tail = name.partition(':')
if not head and sep:
name = tail.strip(' ')
head, sep, tail = name.partition(':')
if head:
ns = head.strip(' ').lower()
for namespace in rm_namespaces:
if namespace.lower() == ns:
name = tail.strip(' ')
break
# Use space instead of underscore
name = name.replace('_', ' ')
if capitalize:
# Use uppercase for the first letter
n0 = name[0]
if n0.islower():
name = n0.upper() + name[1:]
# Remove #anchor
name, sep, tail = name.partition('#')
return ' '.join(name.split())
|
def normal_name(
self,
rm_namespaces=('Template',),
capital_links=False,
_code: str = None,
*,
code: str = None,
capitalize=False
) -> str:
"""Return normal form of self.name.
- Remove comments.
- Remove language code.
- Remove namespace ("template:" or any of `localized_namespaces`.
- Use space instead of underscore.
- Remove consecutive spaces.
- Use uppercase for the first letter if `capitalize`.
- Remove #anchor.
:param rm_namespaces: is used to provide additional localized
namespaces for the template namespace. They will be removed from
the result. Default is ('Template',).
:param capitalize: If True, convert the first letter of the
template's name to a capital letter. See
[[mw:Manual:$wgCapitalLinks]] for more info.
:param code: is the language code.
:param capital_links: deprecated.
:param _code: deprecated.
Example:
>>> Template(
... '{{ eN : tEmPlAtE : <!-- c --> t_1 # b | a }}'
... ).normal_name(code='en')
'T 1'
"""
if capital_links:
warn('`capital_links` argument is deprecated,'
' use `capitalize` instead', DeprecationWarning)
capitalize = capital_links
if _code:
warn('`positional_code` argument is deprecated,'
' use `code` instead', DeprecationWarning)
code = _code
# Remove comments
name = COMMENT_SUB('', self.name).strip(WS)
# Remove code
if code:
head, sep, tail = name.partition(':')
if not head and sep:
name = tail.strip(' ')
head, sep, tail = name.partition(':')
if code.lower() == head.strip(' ').lower():
name = tail.strip(' ')
# Remove namespace
head, sep, tail = name.partition(':')
if not head and sep:
name = tail.strip(' ')
head, sep, tail = name.partition(':')
if head:
ns = head.strip(' ').lower()
for namespace in rm_namespaces:
if namespace.lower() == ns:
name = tail.strip(' ')
break
# Use space instead of underscore
name = name.replace('_', ' ')
if capitalize:
# Use uppercase for the first letter
n0 = name[0]
if n0.islower():
name = n0.upper() + name[1:]
# Remove #anchor
name, sep, tail = name.partition('#')
return ' '.join(name.split())
|
[
"Return",
"normal",
"form",
"of",
"self",
".",
"name",
"."
] |
5j9/wikitextparser
|
python
|
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_template.py#L40-L113
|
[
"def",
"normal_name",
"(",
"self",
",",
"rm_namespaces",
"=",
"(",
"'Template'",
",",
")",
",",
"capital_links",
"=",
"False",
",",
"_code",
":",
"str",
"=",
"None",
",",
"*",
",",
"code",
":",
"str",
"=",
"None",
",",
"capitalize",
"=",
"False",
")",
"->",
"str",
":",
"if",
"capital_links",
":",
"warn",
"(",
"'`capital_links` argument is deprecated,'",
"' use `capitalize` instead'",
",",
"DeprecationWarning",
")",
"capitalize",
"=",
"capital_links",
"if",
"_code",
":",
"warn",
"(",
"'`positional_code` argument is deprecated,'",
"' use `code` instead'",
",",
"DeprecationWarning",
")",
"code",
"=",
"_code",
"# Remove comments",
"name",
"=",
"COMMENT_SUB",
"(",
"''",
",",
"self",
".",
"name",
")",
".",
"strip",
"(",
"WS",
")",
"# Remove code",
"if",
"code",
":",
"head",
",",
"sep",
",",
"tail",
"=",
"name",
".",
"partition",
"(",
"':'",
")",
"if",
"not",
"head",
"and",
"sep",
":",
"name",
"=",
"tail",
".",
"strip",
"(",
"' '",
")",
"head",
",",
"sep",
",",
"tail",
"=",
"name",
".",
"partition",
"(",
"':'",
")",
"if",
"code",
".",
"lower",
"(",
")",
"==",
"head",
".",
"strip",
"(",
"' '",
")",
".",
"lower",
"(",
")",
":",
"name",
"=",
"tail",
".",
"strip",
"(",
"' '",
")",
"# Remove namespace",
"head",
",",
"sep",
",",
"tail",
"=",
"name",
".",
"partition",
"(",
"':'",
")",
"if",
"not",
"head",
"and",
"sep",
":",
"name",
"=",
"tail",
".",
"strip",
"(",
"' '",
")",
"head",
",",
"sep",
",",
"tail",
"=",
"name",
".",
"partition",
"(",
"':'",
")",
"if",
"head",
":",
"ns",
"=",
"head",
".",
"strip",
"(",
"' '",
")",
".",
"lower",
"(",
")",
"for",
"namespace",
"in",
"rm_namespaces",
":",
"if",
"namespace",
".",
"lower",
"(",
")",
"==",
"ns",
":",
"name",
"=",
"tail",
".",
"strip",
"(",
"' '",
")",
"break",
"# Use space instead of underscore",
"name",
"=",
"name",
".",
"replace",
"(",
"'_'",
",",
"' '",
")",
"if",
"capitalize",
":",
"# Use uppercase for the first letter",
"n0",
"=",
"name",
"[",
"0",
"]",
"if",
"n0",
".",
"islower",
"(",
")",
":",
"name",
"=",
"n0",
".",
"upper",
"(",
")",
"+",
"name",
"[",
"1",
":",
"]",
"# Remove #anchor",
"name",
",",
"sep",
",",
"tail",
"=",
"name",
".",
"partition",
"(",
"'#'",
")",
"return",
"' '",
".",
"join",
"(",
"name",
".",
"split",
"(",
")",
")"
] |
1347425814361d7955342c53212edbb27f0ff4b5
|
test
|
Template.rm_first_of_dup_args
|
Eliminate duplicate arguments by removing the first occurrences.
Remove the first occurrences of duplicate arguments, regardless of
their value. Result of the rendered wikitext should remain the same.
Warning: Some meaningful data may be removed from wikitext.
Also see `rm_dup_args_safe` function.
|
wikitextparser/_template.py
|
def rm_first_of_dup_args(self) -> None:
"""Eliminate duplicate arguments by removing the first occurrences.
Remove the first occurrences of duplicate arguments, regardless of
their value. Result of the rendered wikitext should remain the same.
Warning: Some meaningful data may be removed from wikitext.
Also see `rm_dup_args_safe` function.
"""
names = set() # type: set
for a in reversed(self.arguments):
name = a.name.strip(WS)
if name in names:
del a[:len(a.string)]
else:
names.add(name)
|
def rm_first_of_dup_args(self) -> None:
"""Eliminate duplicate arguments by removing the first occurrences.
Remove the first occurrences of duplicate arguments, regardless of
their value. Result of the rendered wikitext should remain the same.
Warning: Some meaningful data may be removed from wikitext.
Also see `rm_dup_args_safe` function.
"""
names = set() # type: set
for a in reversed(self.arguments):
name = a.name.strip(WS)
if name in names:
del a[:len(a.string)]
else:
names.add(name)
|
[
"Eliminate",
"duplicate",
"arguments",
"by",
"removing",
"the",
"first",
"occurrences",
"."
] |
5j9/wikitextparser
|
python
|
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_template.py#L115-L130
|
[
"def",
"rm_first_of_dup_args",
"(",
"self",
")",
"->",
"None",
":",
"names",
"=",
"set",
"(",
")",
"# type: set",
"for",
"a",
"in",
"reversed",
"(",
"self",
".",
"arguments",
")",
":",
"name",
"=",
"a",
".",
"name",
".",
"strip",
"(",
"WS",
")",
"if",
"name",
"in",
"names",
":",
"del",
"a",
"[",
":",
"len",
"(",
"a",
".",
"string",
")",
"]",
"else",
":",
"names",
".",
"add",
"(",
"name",
")"
] |
1347425814361d7955342c53212edbb27f0ff4b5
|
test
|
Template.rm_dup_args_safe
|
Remove duplicate arguments in a safe manner.
Remove the duplicate arguments only in the following situations:
1. Both arguments have the same name AND value. (Remove one of
them.)
2. Arguments have the same name and one of them is empty. (Remove
the empty one.)
Warning: Although this is considered to be safe and no meaningful data
is removed from wikitext, but the result of the rendered wikitext
may actually change if the second arg is empty and removed but
the first had had a value.
If `tag` is defined, it should be a string that will be appended to
the value of the remaining duplicate arguments.
Also see `rm_first_of_dup_args` function.
|
wikitextparser/_template.py
|
def rm_dup_args_safe(self, tag: str = None) -> None:
"""Remove duplicate arguments in a safe manner.
Remove the duplicate arguments only in the following situations:
1. Both arguments have the same name AND value. (Remove one of
them.)
2. Arguments have the same name and one of them is empty. (Remove
the empty one.)
Warning: Although this is considered to be safe and no meaningful data
is removed from wikitext, but the result of the rendered wikitext
may actually change if the second arg is empty and removed but
the first had had a value.
If `tag` is defined, it should be a string that will be appended to
the value of the remaining duplicate arguments.
Also see `rm_first_of_dup_args` function.
"""
name_to_lastarg_vals = {} \
# type: Dict[str, Tuple[Argument, List[str]]]
# Removing positional args affects their name. By reversing the list
# we avoid encountering those kind of args.
for arg in reversed(self.arguments):
name = arg.name.strip(WS)
if arg.positional:
# Value of keyword arguments is automatically stripped by MW.
val = arg.value
else:
# But it's not OK to strip whitespace in positional arguments.
val = arg.value.strip(WS)
if name in name_to_lastarg_vals:
# This is a duplicate argument.
if not val:
# This duplicate argument is empty. It's safe to remove it.
del arg[0:len(arg.string)]
else:
# Try to remove any of the detected duplicates of this
# that are empty or their value equals to this one.
lastarg, dup_vals = name_to_lastarg_vals[name]
if val in dup_vals:
del arg[0:len(arg.string)]
elif '' in dup_vals:
# This happens only if the last occurrence of name has
# been an empty string; other empty values will
# be removed as they are seen.
# In other words index of the empty argument in
# dup_vals is always 0.
del lastarg[0:len(lastarg.string)]
dup_vals.pop(0)
else:
# It was not possible to remove any of the duplicates.
dup_vals.append(val)
if tag:
arg.value += tag
else:
name_to_lastarg_vals[name] = (arg, [val])
|
def rm_dup_args_safe(self, tag: str = None) -> None:
"""Remove duplicate arguments in a safe manner.
Remove the duplicate arguments only in the following situations:
1. Both arguments have the same name AND value. (Remove one of
them.)
2. Arguments have the same name and one of them is empty. (Remove
the empty one.)
Warning: Although this is considered to be safe and no meaningful data
is removed from wikitext, but the result of the rendered wikitext
may actually change if the second arg is empty and removed but
the first had had a value.
If `tag` is defined, it should be a string that will be appended to
the value of the remaining duplicate arguments.
Also see `rm_first_of_dup_args` function.
"""
name_to_lastarg_vals = {} \
# type: Dict[str, Tuple[Argument, List[str]]]
# Removing positional args affects their name. By reversing the list
# we avoid encountering those kind of args.
for arg in reversed(self.arguments):
name = arg.name.strip(WS)
if arg.positional:
# Value of keyword arguments is automatically stripped by MW.
val = arg.value
else:
# But it's not OK to strip whitespace in positional arguments.
val = arg.value.strip(WS)
if name in name_to_lastarg_vals:
# This is a duplicate argument.
if not val:
# This duplicate argument is empty. It's safe to remove it.
del arg[0:len(arg.string)]
else:
# Try to remove any of the detected duplicates of this
# that are empty or their value equals to this one.
lastarg, dup_vals = name_to_lastarg_vals[name]
if val in dup_vals:
del arg[0:len(arg.string)]
elif '' in dup_vals:
# This happens only if the last occurrence of name has
# been an empty string; other empty values will
# be removed as they are seen.
# In other words index of the empty argument in
# dup_vals is always 0.
del lastarg[0:len(lastarg.string)]
dup_vals.pop(0)
else:
# It was not possible to remove any of the duplicates.
dup_vals.append(val)
if tag:
arg.value += tag
else:
name_to_lastarg_vals[name] = (arg, [val])
|
[
"Remove",
"duplicate",
"arguments",
"in",
"a",
"safe",
"manner",
"."
] |
5j9/wikitextparser
|
python
|
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_template.py#L132-L188
|
[
"def",
"rm_dup_args_safe",
"(",
"self",
",",
"tag",
":",
"str",
"=",
"None",
")",
"->",
"None",
":",
"name_to_lastarg_vals",
"=",
"{",
"}",
"# type: Dict[str, Tuple[Argument, List[str]]]",
"# Removing positional args affects their name. By reversing the list",
"# we avoid encountering those kind of args.",
"for",
"arg",
"in",
"reversed",
"(",
"self",
".",
"arguments",
")",
":",
"name",
"=",
"arg",
".",
"name",
".",
"strip",
"(",
"WS",
")",
"if",
"arg",
".",
"positional",
":",
"# Value of keyword arguments is automatically stripped by MW.",
"val",
"=",
"arg",
".",
"value",
"else",
":",
"# But it's not OK to strip whitespace in positional arguments.",
"val",
"=",
"arg",
".",
"value",
".",
"strip",
"(",
"WS",
")",
"if",
"name",
"in",
"name_to_lastarg_vals",
":",
"# This is a duplicate argument.",
"if",
"not",
"val",
":",
"# This duplicate argument is empty. It's safe to remove it.",
"del",
"arg",
"[",
"0",
":",
"len",
"(",
"arg",
".",
"string",
")",
"]",
"else",
":",
"# Try to remove any of the detected duplicates of this",
"# that are empty or their value equals to this one.",
"lastarg",
",",
"dup_vals",
"=",
"name_to_lastarg_vals",
"[",
"name",
"]",
"if",
"val",
"in",
"dup_vals",
":",
"del",
"arg",
"[",
"0",
":",
"len",
"(",
"arg",
".",
"string",
")",
"]",
"elif",
"''",
"in",
"dup_vals",
":",
"# This happens only if the last occurrence of name has",
"# been an empty string; other empty values will",
"# be removed as they are seen.",
"# In other words index of the empty argument in",
"# dup_vals is always 0.",
"del",
"lastarg",
"[",
"0",
":",
"len",
"(",
"lastarg",
".",
"string",
")",
"]",
"dup_vals",
".",
"pop",
"(",
"0",
")",
"else",
":",
"# It was not possible to remove any of the duplicates.",
"dup_vals",
".",
"append",
"(",
"val",
")",
"if",
"tag",
":",
"arg",
".",
"value",
"+=",
"tag",
"else",
":",
"name_to_lastarg_vals",
"[",
"name",
"]",
"=",
"(",
"arg",
",",
"[",
"val",
"]",
")"
] |
1347425814361d7955342c53212edbb27f0ff4b5
|
test
|
Template.set_arg
|
Set the value for `name` argument. Add it if it doesn't exist.
- Use `positional`, `before` and `after` keyword arguments only when
adding a new argument.
- If `before` is given, ignore `after`.
- If neither `before` nor `after` are given and it's needed to add a
new argument, then append the new argument to the end.
- If `positional` is True, try to add the given value as a positional
argument. Ignore `preserve_spacing` if positional is True.
If it's None, do what seems more appropriate.
|
wikitextparser/_template.py
|
def set_arg(
self, name: str,
value: str,
positional: bool = None,
before: str = None,
after: str = None,
preserve_spacing: bool = True
) -> None:
"""Set the value for `name` argument. Add it if it doesn't exist.
- Use `positional`, `before` and `after` keyword arguments only when
adding a new argument.
- If `before` is given, ignore `after`.
- If neither `before` nor `after` are given and it's needed to add a
new argument, then append the new argument to the end.
- If `positional` is True, try to add the given value as a positional
argument. Ignore `preserve_spacing` if positional is True.
If it's None, do what seems more appropriate.
"""
args = list(reversed(self.arguments))
arg = get_arg(name, args)
# Updating an existing argument.
if arg:
if positional:
arg.positional = positional
if preserve_spacing:
val = arg.value
arg.value = val.replace(val.strip(WS), value)
else:
arg.value = value
return
# Adding a new argument
if not name and positional is None:
positional = True
# Calculate the whitespace needed before arg-name and after arg-value.
if not positional and preserve_spacing and args:
before_names = []
name_lengths = []
before_values = []
after_values = []
for arg in args:
aname = arg.name
name_len = len(aname)
name_lengths.append(name_len)
before_names.append(STARTING_WS_MATCH(aname)[0])
arg_value = arg.value
before_values.append(STARTING_WS_MATCH(arg_value)[0])
after_values.append(ENDING_WS_MATCH(arg_value)[0])
pre_name_ws_mode = mode(before_names)
name_length_mode = mode(name_lengths)
post_value_ws_mode = mode(
[SPACE_AFTER_SEARCH(self.string)[0]] + after_values[1:]
)
pre_value_ws_mode = mode(before_values)
else:
preserve_spacing = False
# Calculate the string that needs to be added to the Template.
if positional:
# Ignore preserve_spacing for positional args.
addstring = '|' + value
else:
if preserve_spacing:
# noinspection PyUnboundLocalVariable
addstring = (
'|' + (pre_name_ws_mode + name.strip(WS)).
ljust(name_length_mode) +
'=' + pre_value_ws_mode + value + post_value_ws_mode
)
else:
addstring = '|' + name + '=' + value
# Place the addstring in the right position.
if before:
arg = get_arg(before, args)
arg.insert(0, addstring)
elif after:
arg = get_arg(after, args)
arg.insert(len(arg.string), addstring)
else:
if args and not positional:
arg = args[0]
arg_string = arg.string
if preserve_spacing:
# Insert after the last argument.
# The addstring needs to be recalculated because we don't
# want to change the the whitespace before final braces.
# noinspection PyUnboundLocalVariable
arg[0:len(arg_string)] = (
arg.string.rstrip(WS) + post_value_ws_mode +
addstring.rstrip(WS) + after_values[0]
)
else:
arg.insert(len(arg_string), addstring)
else:
# The template has no arguments or the new arg is
# positional AND is to be added at the end of the template.
self.insert(-2, addstring)
|
def set_arg(
self, name: str,
value: str,
positional: bool = None,
before: str = None,
after: str = None,
preserve_spacing: bool = True
) -> None:
"""Set the value for `name` argument. Add it if it doesn't exist.
- Use `positional`, `before` and `after` keyword arguments only when
adding a new argument.
- If `before` is given, ignore `after`.
- If neither `before` nor `after` are given and it's needed to add a
new argument, then append the new argument to the end.
- If `positional` is True, try to add the given value as a positional
argument. Ignore `preserve_spacing` if positional is True.
If it's None, do what seems more appropriate.
"""
args = list(reversed(self.arguments))
arg = get_arg(name, args)
# Updating an existing argument.
if arg:
if positional:
arg.positional = positional
if preserve_spacing:
val = arg.value
arg.value = val.replace(val.strip(WS), value)
else:
arg.value = value
return
# Adding a new argument
if not name and positional is None:
positional = True
# Calculate the whitespace needed before arg-name and after arg-value.
if not positional and preserve_spacing and args:
before_names = []
name_lengths = []
before_values = []
after_values = []
for arg in args:
aname = arg.name
name_len = len(aname)
name_lengths.append(name_len)
before_names.append(STARTING_WS_MATCH(aname)[0])
arg_value = arg.value
before_values.append(STARTING_WS_MATCH(arg_value)[0])
after_values.append(ENDING_WS_MATCH(arg_value)[0])
pre_name_ws_mode = mode(before_names)
name_length_mode = mode(name_lengths)
post_value_ws_mode = mode(
[SPACE_AFTER_SEARCH(self.string)[0]] + after_values[1:]
)
pre_value_ws_mode = mode(before_values)
else:
preserve_spacing = False
# Calculate the string that needs to be added to the Template.
if positional:
# Ignore preserve_spacing for positional args.
addstring = '|' + value
else:
if preserve_spacing:
# noinspection PyUnboundLocalVariable
addstring = (
'|' + (pre_name_ws_mode + name.strip(WS)).
ljust(name_length_mode) +
'=' + pre_value_ws_mode + value + post_value_ws_mode
)
else:
addstring = '|' + name + '=' + value
# Place the addstring in the right position.
if before:
arg = get_arg(before, args)
arg.insert(0, addstring)
elif after:
arg = get_arg(after, args)
arg.insert(len(arg.string), addstring)
else:
if args and not positional:
arg = args[0]
arg_string = arg.string
if preserve_spacing:
# Insert after the last argument.
# The addstring needs to be recalculated because we don't
# want to change the the whitespace before final braces.
# noinspection PyUnboundLocalVariable
arg[0:len(arg_string)] = (
arg.string.rstrip(WS) + post_value_ws_mode +
addstring.rstrip(WS) + after_values[0]
)
else:
arg.insert(len(arg_string), addstring)
else:
# The template has no arguments or the new arg is
# positional AND is to be added at the end of the template.
self.insert(-2, addstring)
|
[
"Set",
"the",
"value",
"for",
"name",
"argument",
".",
"Add",
"it",
"if",
"it",
"doesn",
"t",
"exist",
"."
] |
5j9/wikitextparser
|
python
|
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_template.py#L190-L285
|
[
"def",
"set_arg",
"(",
"self",
",",
"name",
":",
"str",
",",
"value",
":",
"str",
",",
"positional",
":",
"bool",
"=",
"None",
",",
"before",
":",
"str",
"=",
"None",
",",
"after",
":",
"str",
"=",
"None",
",",
"preserve_spacing",
":",
"bool",
"=",
"True",
")",
"->",
"None",
":",
"args",
"=",
"list",
"(",
"reversed",
"(",
"self",
".",
"arguments",
")",
")",
"arg",
"=",
"get_arg",
"(",
"name",
",",
"args",
")",
"# Updating an existing argument.",
"if",
"arg",
":",
"if",
"positional",
":",
"arg",
".",
"positional",
"=",
"positional",
"if",
"preserve_spacing",
":",
"val",
"=",
"arg",
".",
"value",
"arg",
".",
"value",
"=",
"val",
".",
"replace",
"(",
"val",
".",
"strip",
"(",
"WS",
")",
",",
"value",
")",
"else",
":",
"arg",
".",
"value",
"=",
"value",
"return",
"# Adding a new argument",
"if",
"not",
"name",
"and",
"positional",
"is",
"None",
":",
"positional",
"=",
"True",
"# Calculate the whitespace needed before arg-name and after arg-value.",
"if",
"not",
"positional",
"and",
"preserve_spacing",
"and",
"args",
":",
"before_names",
"=",
"[",
"]",
"name_lengths",
"=",
"[",
"]",
"before_values",
"=",
"[",
"]",
"after_values",
"=",
"[",
"]",
"for",
"arg",
"in",
"args",
":",
"aname",
"=",
"arg",
".",
"name",
"name_len",
"=",
"len",
"(",
"aname",
")",
"name_lengths",
".",
"append",
"(",
"name_len",
")",
"before_names",
".",
"append",
"(",
"STARTING_WS_MATCH",
"(",
"aname",
")",
"[",
"0",
"]",
")",
"arg_value",
"=",
"arg",
".",
"value",
"before_values",
".",
"append",
"(",
"STARTING_WS_MATCH",
"(",
"arg_value",
")",
"[",
"0",
"]",
")",
"after_values",
".",
"append",
"(",
"ENDING_WS_MATCH",
"(",
"arg_value",
")",
"[",
"0",
"]",
")",
"pre_name_ws_mode",
"=",
"mode",
"(",
"before_names",
")",
"name_length_mode",
"=",
"mode",
"(",
"name_lengths",
")",
"post_value_ws_mode",
"=",
"mode",
"(",
"[",
"SPACE_AFTER_SEARCH",
"(",
"self",
".",
"string",
")",
"[",
"0",
"]",
"]",
"+",
"after_values",
"[",
"1",
":",
"]",
")",
"pre_value_ws_mode",
"=",
"mode",
"(",
"before_values",
")",
"else",
":",
"preserve_spacing",
"=",
"False",
"# Calculate the string that needs to be added to the Template.",
"if",
"positional",
":",
"# Ignore preserve_spacing for positional args.",
"addstring",
"=",
"'|'",
"+",
"value",
"else",
":",
"if",
"preserve_spacing",
":",
"# noinspection PyUnboundLocalVariable",
"addstring",
"=",
"(",
"'|'",
"+",
"(",
"pre_name_ws_mode",
"+",
"name",
".",
"strip",
"(",
"WS",
")",
")",
".",
"ljust",
"(",
"name_length_mode",
")",
"+",
"'='",
"+",
"pre_value_ws_mode",
"+",
"value",
"+",
"post_value_ws_mode",
")",
"else",
":",
"addstring",
"=",
"'|'",
"+",
"name",
"+",
"'='",
"+",
"value",
"# Place the addstring in the right position.",
"if",
"before",
":",
"arg",
"=",
"get_arg",
"(",
"before",
",",
"args",
")",
"arg",
".",
"insert",
"(",
"0",
",",
"addstring",
")",
"elif",
"after",
":",
"arg",
"=",
"get_arg",
"(",
"after",
",",
"args",
")",
"arg",
".",
"insert",
"(",
"len",
"(",
"arg",
".",
"string",
")",
",",
"addstring",
")",
"else",
":",
"if",
"args",
"and",
"not",
"positional",
":",
"arg",
"=",
"args",
"[",
"0",
"]",
"arg_string",
"=",
"arg",
".",
"string",
"if",
"preserve_spacing",
":",
"# Insert after the last argument.",
"# The addstring needs to be recalculated because we don't",
"# want to change the the whitespace before final braces.",
"# noinspection PyUnboundLocalVariable",
"arg",
"[",
"0",
":",
"len",
"(",
"arg_string",
")",
"]",
"=",
"(",
"arg",
".",
"string",
".",
"rstrip",
"(",
"WS",
")",
"+",
"post_value_ws_mode",
"+",
"addstring",
".",
"rstrip",
"(",
"WS",
")",
"+",
"after_values",
"[",
"0",
"]",
")",
"else",
":",
"arg",
".",
"insert",
"(",
"len",
"(",
"arg_string",
")",
",",
"addstring",
")",
"else",
":",
"# The template has no arguments or the new arg is",
"# positional AND is to be added at the end of the template.",
"self",
".",
"insert",
"(",
"-",
"2",
",",
"addstring",
")"
] |
1347425814361d7955342c53212edbb27f0ff4b5
|
test
|
Template.get_arg
|
Return the last argument with the given name.
Return None if no argument with that name is found.
|
wikitextparser/_template.py
|
def get_arg(self, name: str) -> Optional[Argument]:
"""Return the last argument with the given name.
Return None if no argument with that name is found.
"""
return get_arg(name, reversed(self.arguments))
|
def get_arg(self, name: str) -> Optional[Argument]:
"""Return the last argument with the given name.
Return None if no argument with that name is found.
"""
return get_arg(name, reversed(self.arguments))
|
[
"Return",
"the",
"last",
"argument",
"with",
"the",
"given",
"name",
"."
] |
5j9/wikitextparser
|
python
|
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_template.py#L287-L292
|
[
"def",
"get_arg",
"(",
"self",
",",
"name",
":",
"str",
")",
"->",
"Optional",
"[",
"Argument",
"]",
":",
"return",
"get_arg",
"(",
"name",
",",
"reversed",
"(",
"self",
".",
"arguments",
")",
")"
] |
1347425814361d7955342c53212edbb27f0ff4b5
|
test
|
Template.has_arg
|
Return true if the is an arg named `name`.
Also check equality of values if `value` is provided.
Note: If you just need to get an argument and you want to LBYL, it's
better to get_arg directly and then check if the returned value
is None.
|
wikitextparser/_template.py
|
def has_arg(self, name: str, value: str = None) -> bool:
"""Return true if the is an arg named `name`.
Also check equality of values if `value` is provided.
Note: If you just need to get an argument and you want to LBYL, it's
better to get_arg directly and then check if the returned value
is None.
"""
for arg in reversed(self.arguments):
if arg.name.strip(WS) == name.strip(WS):
if value:
if arg.positional:
if arg.value == value:
return True
return False
if arg.value.strip(WS) == value.strip(WS):
return True
return False
return True
return False
|
def has_arg(self, name: str, value: str = None) -> bool:
"""Return true if the is an arg named `name`.
Also check equality of values if `value` is provided.
Note: If you just need to get an argument and you want to LBYL, it's
better to get_arg directly and then check if the returned value
is None.
"""
for arg in reversed(self.arguments):
if arg.name.strip(WS) == name.strip(WS):
if value:
if arg.positional:
if arg.value == value:
return True
return False
if arg.value.strip(WS) == value.strip(WS):
return True
return False
return True
return False
|
[
"Return",
"true",
"if",
"the",
"is",
"an",
"arg",
"named",
"name",
"."
] |
5j9/wikitextparser
|
python
|
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_template.py#L294-L314
|
[
"def",
"has_arg",
"(",
"self",
",",
"name",
":",
"str",
",",
"value",
":",
"str",
"=",
"None",
")",
"->",
"bool",
":",
"for",
"arg",
"in",
"reversed",
"(",
"self",
".",
"arguments",
")",
":",
"if",
"arg",
".",
"name",
".",
"strip",
"(",
"WS",
")",
"==",
"name",
".",
"strip",
"(",
"WS",
")",
":",
"if",
"value",
":",
"if",
"arg",
".",
"positional",
":",
"if",
"arg",
".",
"value",
"==",
"value",
":",
"return",
"True",
"return",
"False",
"if",
"arg",
".",
"value",
".",
"strip",
"(",
"WS",
")",
"==",
"value",
".",
"strip",
"(",
"WS",
")",
":",
"return",
"True",
"return",
"False",
"return",
"True",
"return",
"False"
] |
1347425814361d7955342c53212edbb27f0ff4b5
|
test
|
Template.del_arg
|
Delete all arguments with the given then.
|
wikitextparser/_template.py
|
def del_arg(self, name: str) -> None:
"""Delete all arguments with the given then."""
for arg in reversed(self.arguments):
if arg.name.strip(WS) == name.strip(WS):
del arg[:]
|
def del_arg(self, name: str) -> None:
"""Delete all arguments with the given then."""
for arg in reversed(self.arguments):
if arg.name.strip(WS) == name.strip(WS):
del arg[:]
|
[
"Delete",
"all",
"arguments",
"with",
"the",
"given",
"then",
"."
] |
5j9/wikitextparser
|
python
|
https://github.com/5j9/wikitextparser/blob/1347425814361d7955342c53212edbb27f0ff4b5/wikitextparser/_template.py#L316-L320
|
[
"def",
"del_arg",
"(",
"self",
",",
"name",
":",
"str",
")",
"->",
"None",
":",
"for",
"arg",
"in",
"reversed",
"(",
"self",
".",
"arguments",
")",
":",
"if",
"arg",
".",
"name",
".",
"strip",
"(",
"WS",
")",
"==",
"name",
".",
"strip",
"(",
"WS",
")",
":",
"del",
"arg",
"[",
":",
"]"
] |
1347425814361d7955342c53212edbb27f0ff4b5
|
test
|
build_crs_table
|
Build crs table of all equivalent format variations by scraping spatialreference.org.
Saves table as tab-delimited text file.
NOTE: Might take a while.
Arguments:
- *savepath*: The absolute or relative filepath to which to save the crs table, including the ".txt" extension.
|
pycrs/utils.py
|
def build_crs_table(savepath):
"""
Build crs table of all equivalent format variations by scraping spatialreference.org.
Saves table as tab-delimited text file.
NOTE: Might take a while.
Arguments:
- *savepath*: The absolute or relative filepath to which to save the crs table, including the ".txt" extension.
"""
# create table
outfile = open(savepath, "wb")
# create fields
fields = ["codetype", "code", "proj4", "ogcwkt", "esriwkt"]
outfile.write("\t".join(fields) + "\n")
# make table from url requests
for codetype in ("epsg", "esri", "sr-org"):
print(codetype)
# collect existing proj list
print("fetching list of available codes")
codelist = []
page = 1
while True:
try:
link = 'http://spatialreference.org/ref/%s/?page=%s' %(codetype,page)
html = urllib2.urlopen(link).read()
codes = [match.groups()[0] for match in re.finditer(r'/ref/'+codetype+'/(\d+)', html) ]
if not codes: break
print("page",page)
codelist.extend(codes)
page += 1
except:
break
print("fetching string formats for each projection")
for i,code in enumerate(codelist):
# check if code exists
link = 'http://spatialreference.org/ref/%s/%s/' %(codetype,code)
urllib2.urlopen(link)
# collect each projection format in a table row
row = [codetype, code]
for resulttype in ("proj4", "ogcwkt", "esriwkt"):
try:
link = 'http://spatialreference.org/ref/%s/%s/%s/' %(codetype,code,resulttype)
result = urllib2.urlopen(link).read()
row.append(result)
except:
pass
print("projection %i of %i added" %(i,len(codelist)) )
outfile.write("\t".join(row) + "\n")
# close the file
outfile.close()
|
def build_crs_table(savepath):
"""
Build crs table of all equivalent format variations by scraping spatialreference.org.
Saves table as tab-delimited text file.
NOTE: Might take a while.
Arguments:
- *savepath*: The absolute or relative filepath to which to save the crs table, including the ".txt" extension.
"""
# create table
outfile = open(savepath, "wb")
# create fields
fields = ["codetype", "code", "proj4", "ogcwkt", "esriwkt"]
outfile.write("\t".join(fields) + "\n")
# make table from url requests
for codetype in ("epsg", "esri", "sr-org"):
print(codetype)
# collect existing proj list
print("fetching list of available codes")
codelist = []
page = 1
while True:
try:
link = 'http://spatialreference.org/ref/%s/?page=%s' %(codetype,page)
html = urllib2.urlopen(link).read()
codes = [match.groups()[0] for match in re.finditer(r'/ref/'+codetype+'/(\d+)', html) ]
if not codes: break
print("page",page)
codelist.extend(codes)
page += 1
except:
break
print("fetching string formats for each projection")
for i,code in enumerate(codelist):
# check if code exists
link = 'http://spatialreference.org/ref/%s/%s/' %(codetype,code)
urllib2.urlopen(link)
# collect each projection format in a table row
row = [codetype, code]
for resulttype in ("proj4", "ogcwkt", "esriwkt"):
try:
link = 'http://spatialreference.org/ref/%s/%s/%s/' %(codetype,code,resulttype)
result = urllib2.urlopen(link).read()
row.append(result)
except:
pass
print("projection %i of %i added" %(i,len(codelist)) )
outfile.write("\t".join(row) + "\n")
# close the file
outfile.close()
|
[
"Build",
"crs",
"table",
"of",
"all",
"equivalent",
"format",
"variations",
"by",
"scraping",
"spatialreference",
".",
"org",
".",
"Saves",
"table",
"as",
"tab",
"-",
"delimited",
"text",
"file",
".",
"NOTE",
":",
"Might",
"take",
"a",
"while",
"."
] |
karimbahgat/PyCRS
|
python
|
https://github.com/karimbahgat/PyCRS/blob/d6a8bb9c28787a25b4a1d59a7e4603db3221eaef/pycrs/utils.py#L11-L69
|
[
"def",
"build_crs_table",
"(",
"savepath",
")",
":",
"# create table",
"outfile",
"=",
"open",
"(",
"savepath",
",",
"\"wb\"",
")",
"# create fields",
"fields",
"=",
"[",
"\"codetype\"",
",",
"\"code\"",
",",
"\"proj4\"",
",",
"\"ogcwkt\"",
",",
"\"esriwkt\"",
"]",
"outfile",
".",
"write",
"(",
"\"\\t\"",
".",
"join",
"(",
"fields",
")",
"+",
"\"\\n\"",
")",
"# make table from url requests",
"for",
"codetype",
"in",
"(",
"\"epsg\"",
",",
"\"esri\"",
",",
"\"sr-org\"",
")",
":",
"print",
"(",
"codetype",
")",
"# collect existing proj list",
"print",
"(",
"\"fetching list of available codes\"",
")",
"codelist",
"=",
"[",
"]",
"page",
"=",
"1",
"while",
"True",
":",
"try",
":",
"link",
"=",
"'http://spatialreference.org/ref/%s/?page=%s'",
"%",
"(",
"codetype",
",",
"page",
")",
"html",
"=",
"urllib2",
".",
"urlopen",
"(",
"link",
")",
".",
"read",
"(",
")",
"codes",
"=",
"[",
"match",
".",
"groups",
"(",
")",
"[",
"0",
"]",
"for",
"match",
"in",
"re",
".",
"finditer",
"(",
"r'/ref/'",
"+",
"codetype",
"+",
"'/(\\d+)'",
",",
"html",
")",
"]",
"if",
"not",
"codes",
":",
"break",
"print",
"(",
"\"page\"",
",",
"page",
")",
"codelist",
".",
"extend",
"(",
"codes",
")",
"page",
"+=",
"1",
"except",
":",
"break",
"print",
"(",
"\"fetching string formats for each projection\"",
")",
"for",
"i",
",",
"code",
"in",
"enumerate",
"(",
"codelist",
")",
":",
"# check if code exists",
"link",
"=",
"'http://spatialreference.org/ref/%s/%s/'",
"%",
"(",
"codetype",
",",
"code",
")",
"urllib2",
".",
"urlopen",
"(",
"link",
")",
"# collect each projection format in a table row",
"row",
"=",
"[",
"codetype",
",",
"code",
"]",
"for",
"resulttype",
"in",
"(",
"\"proj4\"",
",",
"\"ogcwkt\"",
",",
"\"esriwkt\"",
")",
":",
"try",
":",
"link",
"=",
"'http://spatialreference.org/ref/%s/%s/%s/'",
"%",
"(",
"codetype",
",",
"code",
",",
"resulttype",
")",
"result",
"=",
"urllib2",
".",
"urlopen",
"(",
"link",
")",
".",
"read",
"(",
")",
"row",
".",
"append",
"(",
"result",
")",
"except",
":",
"pass",
"print",
"(",
"\"projection %i of %i added\"",
"%",
"(",
"i",
",",
"len",
"(",
"codelist",
")",
")",
")",
"outfile",
".",
"write",
"(",
"\"\\t\"",
".",
"join",
"(",
"row",
")",
"+",
"\"\\n\"",
")",
"# close the file",
"outfile",
".",
"close",
"(",
")"
] |
d6a8bb9c28787a25b4a1d59a7e4603db3221eaef
|
test
|
crscode_to_string
|
Lookup crscode on spatialreference.org and return in specified format.
Arguments:
- *codetype*: "epsg", "esri", or "sr-org".
- *code*: The code.
- *format*: The crs format of the returned string. One of "ogcwkt", "esriwkt", or "proj4", but also several others...
Returns:
- Crs string in the specified format.
|
pycrs/utils.py
|
def crscode_to_string(codetype, code, format):
"""
Lookup crscode on spatialreference.org and return in specified format.
Arguments:
- *codetype*: "epsg", "esri", or "sr-org".
- *code*: The code.
- *format*: The crs format of the returned string. One of "ogcwkt", "esriwkt", or "proj4", but also several others...
Returns:
- Crs string in the specified format.
"""
link = 'http://spatialreference.org/ref/%s/%s/%s/' %(codetype,code,format)
result = urllib2.urlopen(link).read()
if not isinstance(result, str):
result = result.decode()
return result
|
def crscode_to_string(codetype, code, format):
"""
Lookup crscode on spatialreference.org and return in specified format.
Arguments:
- *codetype*: "epsg", "esri", or "sr-org".
- *code*: The code.
- *format*: The crs format of the returned string. One of "ogcwkt", "esriwkt", or "proj4", but also several others...
Returns:
- Crs string in the specified format.
"""
link = 'http://spatialreference.org/ref/%s/%s/%s/' %(codetype,code,format)
result = urllib2.urlopen(link).read()
if not isinstance(result, str):
result = result.decode()
return result
|
[
"Lookup",
"crscode",
"on",
"spatialreference",
".",
"org",
"and",
"return",
"in",
"specified",
"format",
"."
] |
karimbahgat/PyCRS
|
python
|
https://github.com/karimbahgat/PyCRS/blob/d6a8bb9c28787a25b4a1d59a7e4603db3221eaef/pycrs/utils.py#L72-L90
|
[
"def",
"crscode_to_string",
"(",
"codetype",
",",
"code",
",",
"format",
")",
":",
"link",
"=",
"'http://spatialreference.org/ref/%s/%s/%s/'",
"%",
"(",
"codetype",
",",
"code",
",",
"format",
")",
"result",
"=",
"urllib2",
".",
"urlopen",
"(",
"link",
")",
".",
"read",
"(",
")",
"if",
"not",
"isinstance",
"(",
"result",
",",
"str",
")",
":",
"result",
"=",
"result",
".",
"decode",
"(",
")",
"return",
"result"
] |
d6a8bb9c28787a25b4a1d59a7e4603db3221eaef
|
test
|
GeogCS.to_proj4
|
Returns the CS as a proj4 formatted string or dict.
Arguments:
- **as_dict** (optional): If True, returns the proj4 string as a dict (defaults to False).
- **toplevel** (optional): If True, treats this CS as the final toplevel CS and adds the necessary proj4 elements (defaults to True).
|
pycrs/elements/cs.py
|
def to_proj4(self, as_dict=False, toplevel=True):
"""
Returns the CS as a proj4 formatted string or dict.
Arguments:
- **as_dict** (optional): If True, returns the proj4 string as a dict (defaults to False).
- **toplevel** (optional): If True, treats this CS as the final toplevel CS and adds the necessary proj4 elements (defaults to True).
"""
# dont parse axis to proj4, because in proj4, axis only applies to the cs, ie the projcs (not the geogcs, where wkt can specify with axis)
# also proj4 cannot specify angular units
if toplevel:
string = "+proj=longlat %s %s +nodef" % (self.datum.to_proj4(), self.prime_mer.to_proj4())
else:
string = "%s %s" % (self.datum.to_proj4(), self.prime_mer.to_proj4())
if as_dict:
return dict([
entry.lstrip('+').split('=')
for entry in string.split()
if entry != "+no_defs"
])
else:
return string
|
def to_proj4(self, as_dict=False, toplevel=True):
"""
Returns the CS as a proj4 formatted string or dict.
Arguments:
- **as_dict** (optional): If True, returns the proj4 string as a dict (defaults to False).
- **toplevel** (optional): If True, treats this CS as the final toplevel CS and adds the necessary proj4 elements (defaults to True).
"""
# dont parse axis to proj4, because in proj4, axis only applies to the cs, ie the projcs (not the geogcs, where wkt can specify with axis)
# also proj4 cannot specify angular units
if toplevel:
string = "+proj=longlat %s %s +nodef" % (self.datum.to_proj4(), self.prime_mer.to_proj4())
else:
string = "%s %s" % (self.datum.to_proj4(), self.prime_mer.to_proj4())
if as_dict:
return dict([
entry.lstrip('+').split('=')
for entry in string.split()
if entry != "+no_defs"
])
else:
return string
|
[
"Returns",
"the",
"CS",
"as",
"a",
"proj4",
"formatted",
"string",
"or",
"dict",
"."
] |
karimbahgat/PyCRS
|
python
|
https://github.com/karimbahgat/PyCRS/blob/d6a8bb9c28787a25b4a1d59a7e4603db3221eaef/pycrs/elements/cs.py#L66-L88
|
[
"def",
"to_proj4",
"(",
"self",
",",
"as_dict",
"=",
"False",
",",
"toplevel",
"=",
"True",
")",
":",
"# dont parse axis to proj4, because in proj4, axis only applies to the cs, ie the projcs (not the geogcs, where wkt can specify with axis)",
"# also proj4 cannot specify angular units",
"if",
"toplevel",
":",
"string",
"=",
"\"+proj=longlat %s %s +nodef\"",
"%",
"(",
"self",
".",
"datum",
".",
"to_proj4",
"(",
")",
",",
"self",
".",
"prime_mer",
".",
"to_proj4",
"(",
")",
")",
"else",
":",
"string",
"=",
"\"%s %s\"",
"%",
"(",
"self",
".",
"datum",
".",
"to_proj4",
"(",
")",
",",
"self",
".",
"prime_mer",
".",
"to_proj4",
"(",
")",
")",
"if",
"as_dict",
":",
"return",
"dict",
"(",
"[",
"entry",
".",
"lstrip",
"(",
"'+'",
")",
".",
"split",
"(",
"'='",
")",
"for",
"entry",
"in",
"string",
".",
"split",
"(",
")",
"if",
"entry",
"!=",
"\"+no_defs\"",
"]",
")",
"else",
":",
"return",
"string"
] |
d6a8bb9c28787a25b4a1d59a7e4603db3221eaef
|
test
|
GeogCS.to_ogc_wkt
|
Returns the CS as a OGC WKT formatted string.
|
pycrs/elements/cs.py
|
def to_ogc_wkt(self):
"""
Returns the CS as a OGC WKT formatted string.
"""
return 'GEOGCS["%s", %s, %s, %s, AXIS["Lon", %s], AXIS["Lat", %s]]' % (self.name, self.datum.to_ogc_wkt(), self.prime_mer.to_ogc_wkt(), self.angunit.to_ogc_wkt(), self.twin_ax[0].ogc_wkt, self.twin_ax[1].ogc_wkt )
|
def to_ogc_wkt(self):
"""
Returns the CS as a OGC WKT formatted string.
"""
return 'GEOGCS["%s", %s, %s, %s, AXIS["Lon", %s], AXIS["Lat", %s]]' % (self.name, self.datum.to_ogc_wkt(), self.prime_mer.to_ogc_wkt(), self.angunit.to_ogc_wkt(), self.twin_ax[0].ogc_wkt, self.twin_ax[1].ogc_wkt )
|
[
"Returns",
"the",
"CS",
"as",
"a",
"OGC",
"WKT",
"formatted",
"string",
"."
] |
karimbahgat/PyCRS
|
python
|
https://github.com/karimbahgat/PyCRS/blob/d6a8bb9c28787a25b4a1d59a7e4603db3221eaef/pycrs/elements/cs.py#L90-L94
|
[
"def",
"to_ogc_wkt",
"(",
"self",
")",
":",
"return",
"'GEOGCS[\"%s\", %s, %s, %s, AXIS[\"Lon\", %s], AXIS[\"Lat\", %s]]'",
"%",
"(",
"self",
".",
"name",
",",
"self",
".",
"datum",
".",
"to_ogc_wkt",
"(",
")",
",",
"self",
".",
"prime_mer",
".",
"to_ogc_wkt",
"(",
")",
",",
"self",
".",
"angunit",
".",
"to_ogc_wkt",
"(",
")",
",",
"self",
".",
"twin_ax",
"[",
"0",
"]",
".",
"ogc_wkt",
",",
"self",
".",
"twin_ax",
"[",
"1",
"]",
".",
"ogc_wkt",
")"
] |
d6a8bb9c28787a25b4a1d59a7e4603db3221eaef
|
test
|
GeogCS.to_esri_wkt
|
Returns the CS as a ESRI WKT formatted string.
|
pycrs/elements/cs.py
|
def to_esri_wkt(self):
"""
Returns the CS as a ESRI WKT formatted string.
"""
return 'GEOGCS["%s", %s, %s, %s, AXIS["Lon", %s], AXIS["Lat", %s]]' % (self.name, self.datum.to_esri_wkt(), self.prime_mer.to_esri_wkt(), self.angunit.to_esri_wkt(), self.twin_ax[0].esri_wkt, self.twin_ax[1].esri_wkt )
|
def to_esri_wkt(self):
"""
Returns the CS as a ESRI WKT formatted string.
"""
return 'GEOGCS["%s", %s, %s, %s, AXIS["Lon", %s], AXIS["Lat", %s]]' % (self.name, self.datum.to_esri_wkt(), self.prime_mer.to_esri_wkt(), self.angunit.to_esri_wkt(), self.twin_ax[0].esri_wkt, self.twin_ax[1].esri_wkt )
|
[
"Returns",
"the",
"CS",
"as",
"a",
"ESRI",
"WKT",
"formatted",
"string",
"."
] |
karimbahgat/PyCRS
|
python
|
https://github.com/karimbahgat/PyCRS/blob/d6a8bb9c28787a25b4a1d59a7e4603db3221eaef/pycrs/elements/cs.py#L96-L100
|
[
"def",
"to_esri_wkt",
"(",
"self",
")",
":",
"return",
"'GEOGCS[\"%s\", %s, %s, %s, AXIS[\"Lon\", %s], AXIS[\"Lat\", %s]]'",
"%",
"(",
"self",
".",
"name",
",",
"self",
".",
"datum",
".",
"to_esri_wkt",
"(",
")",
",",
"self",
".",
"prime_mer",
".",
"to_esri_wkt",
"(",
")",
",",
"self",
".",
"angunit",
".",
"to_esri_wkt",
"(",
")",
",",
"self",
".",
"twin_ax",
"[",
"0",
"]",
".",
"esri_wkt",
",",
"self",
".",
"twin_ax",
"[",
"1",
"]",
".",
"esri_wkt",
")"
] |
d6a8bb9c28787a25b4a1d59a7e4603db3221eaef
|
test
|
ProjCS.to_proj4
|
Returns the CS as a proj4 formatted string or dict.
Arguments:
- **as_dict** (optional): If True, returns the proj4 string as a dict (defaults to False).
|
pycrs/elements/cs.py
|
def to_proj4(self, as_dict=False):
"""
Returns the CS as a proj4 formatted string or dict.
Arguments:
- **as_dict** (optional): If True, returns the proj4 string as a dict (defaults to False).
"""
string = "%s" % self.proj.to_proj4()
string += " %s" % self.geogcs.to_proj4(toplevel=False)
string += " " + " ".join(param.to_proj4() for param in self.params)
string += " %s" % self.unit.to_proj4()
string += " +axis=" + self.twin_ax[0].proj4 + self.twin_ax[1].proj4 + "u" # up set as default because only proj4 can set it I think...
string += " +no_defs"
if as_dict:
return dict([
entry.lstrip('+').split('=')
for entry in string.split()
if entry != "+no_defs"
])
else:
return string
|
def to_proj4(self, as_dict=False):
"""
Returns the CS as a proj4 formatted string or dict.
Arguments:
- **as_dict** (optional): If True, returns the proj4 string as a dict (defaults to False).
"""
string = "%s" % self.proj.to_proj4()
string += " %s" % self.geogcs.to_proj4(toplevel=False)
string += " " + " ".join(param.to_proj4() for param in self.params)
string += " %s" % self.unit.to_proj4()
string += " +axis=" + self.twin_ax[0].proj4 + self.twin_ax[1].proj4 + "u" # up set as default because only proj4 can set it I think...
string += " +no_defs"
if as_dict:
return dict([
entry.lstrip('+').split('=')
for entry in string.split()
if entry != "+no_defs"
])
else:
return string
|
[
"Returns",
"the",
"CS",
"as",
"a",
"proj4",
"formatted",
"string",
"or",
"dict",
"."
] |
karimbahgat/PyCRS
|
python
|
https://github.com/karimbahgat/PyCRS/blob/d6a8bb9c28787a25b4a1d59a7e4603db3221eaef/pycrs/elements/cs.py#L147-L169
|
[
"def",
"to_proj4",
"(",
"self",
",",
"as_dict",
"=",
"False",
")",
":",
"string",
"=",
"\"%s\"",
"%",
"self",
".",
"proj",
".",
"to_proj4",
"(",
")",
"string",
"+=",
"\" %s\"",
"%",
"self",
".",
"geogcs",
".",
"to_proj4",
"(",
"toplevel",
"=",
"False",
")",
"string",
"+=",
"\" \"",
"+",
"\" \"",
".",
"join",
"(",
"param",
".",
"to_proj4",
"(",
")",
"for",
"param",
"in",
"self",
".",
"params",
")",
"string",
"+=",
"\" %s\"",
"%",
"self",
".",
"unit",
".",
"to_proj4",
"(",
")",
"string",
"+=",
"\" +axis=\"",
"+",
"self",
".",
"twin_ax",
"[",
"0",
"]",
".",
"proj4",
"+",
"self",
".",
"twin_ax",
"[",
"1",
"]",
".",
"proj4",
"+",
"\"u\"",
"# up set as default because only proj4 can set it I think...",
"string",
"+=",
"\" +no_defs\"",
"if",
"as_dict",
":",
"return",
"dict",
"(",
"[",
"entry",
".",
"lstrip",
"(",
"'+'",
")",
".",
"split",
"(",
"'='",
")",
"for",
"entry",
"in",
"string",
".",
"split",
"(",
")",
"if",
"entry",
"!=",
"\"+no_defs\"",
"]",
")",
"else",
":",
"return",
"string"
] |
d6a8bb9c28787a25b4a1d59a7e4603db3221eaef
|
test
|
ProjCS.to_ogc_wkt
|
Returns the CS as a OGC WKT formatted string.
|
pycrs/elements/cs.py
|
def to_ogc_wkt(self):
"""
Returns the CS as a OGC WKT formatted string.
"""
string = 'PROJCS["%s", %s, %s, ' % (self.name, self.geogcs.to_ogc_wkt(), self.proj.to_ogc_wkt() )
string += ", ".join(param.to_ogc_wkt() for param in self.params)
string += ', %s' % self.unit.to_ogc_wkt()
string += ', AXIS["X", %s], AXIS["Y", %s]]' % (self.twin_ax[0].ogc_wkt, self.twin_ax[1].ogc_wkt )
return string
|
def to_ogc_wkt(self):
"""
Returns the CS as a OGC WKT formatted string.
"""
string = 'PROJCS["%s", %s, %s, ' % (self.name, self.geogcs.to_ogc_wkt(), self.proj.to_ogc_wkt() )
string += ", ".join(param.to_ogc_wkt() for param in self.params)
string += ', %s' % self.unit.to_ogc_wkt()
string += ', AXIS["X", %s], AXIS["Y", %s]]' % (self.twin_ax[0].ogc_wkt, self.twin_ax[1].ogc_wkt )
return string
|
[
"Returns",
"the",
"CS",
"as",
"a",
"OGC",
"WKT",
"formatted",
"string",
"."
] |
karimbahgat/PyCRS
|
python
|
https://github.com/karimbahgat/PyCRS/blob/d6a8bb9c28787a25b4a1d59a7e4603db3221eaef/pycrs/elements/cs.py#L171-L179
|
[
"def",
"to_ogc_wkt",
"(",
"self",
")",
":",
"string",
"=",
"'PROJCS[\"%s\", %s, %s, '",
"%",
"(",
"self",
".",
"name",
",",
"self",
".",
"geogcs",
".",
"to_ogc_wkt",
"(",
")",
",",
"self",
".",
"proj",
".",
"to_ogc_wkt",
"(",
")",
")",
"string",
"+=",
"\", \"",
".",
"join",
"(",
"param",
".",
"to_ogc_wkt",
"(",
")",
"for",
"param",
"in",
"self",
".",
"params",
")",
"string",
"+=",
"', %s'",
"%",
"self",
".",
"unit",
".",
"to_ogc_wkt",
"(",
")",
"string",
"+=",
"', AXIS[\"X\", %s], AXIS[\"Y\", %s]]'",
"%",
"(",
"self",
".",
"twin_ax",
"[",
"0",
"]",
".",
"ogc_wkt",
",",
"self",
".",
"twin_ax",
"[",
"1",
"]",
".",
"ogc_wkt",
")",
"return",
"string"
] |
d6a8bb9c28787a25b4a1d59a7e4603db3221eaef
|
test
|
ProjCS.to_esri_wkt
|
Returns the CS as a ESRI WKT formatted string.
|
pycrs/elements/cs.py
|
def to_esri_wkt(self):
"""
Returns the CS as a ESRI WKT formatted string.
"""
string = 'PROJCS["%s", %s, %s, ' % (self.name, self.geogcs.to_esri_wkt(), self.proj.to_esri_wkt() )
string += ", ".join(param.to_esri_wkt() for param in self.params)
string += ', %s' % self.unit.to_esri_wkt()
string += ', AXIS["X", %s], AXIS["Y", %s]]' % (self.twin_ax[0].esri_wkt, self.twin_ax[1].esri_wkt )
return string
|
def to_esri_wkt(self):
"""
Returns the CS as a ESRI WKT formatted string.
"""
string = 'PROJCS["%s", %s, %s, ' % (self.name, self.geogcs.to_esri_wkt(), self.proj.to_esri_wkt() )
string += ", ".join(param.to_esri_wkt() for param in self.params)
string += ', %s' % self.unit.to_esri_wkt()
string += ', AXIS["X", %s], AXIS["Y", %s]]' % (self.twin_ax[0].esri_wkt, self.twin_ax[1].esri_wkt )
return string
|
[
"Returns",
"the",
"CS",
"as",
"a",
"ESRI",
"WKT",
"formatted",
"string",
"."
] |
karimbahgat/PyCRS
|
python
|
https://github.com/karimbahgat/PyCRS/blob/d6a8bb9c28787a25b4a1d59a7e4603db3221eaef/pycrs/elements/cs.py#L181-L189
|
[
"def",
"to_esri_wkt",
"(",
"self",
")",
":",
"string",
"=",
"'PROJCS[\"%s\", %s, %s, '",
"%",
"(",
"self",
".",
"name",
",",
"self",
".",
"geogcs",
".",
"to_esri_wkt",
"(",
")",
",",
"self",
".",
"proj",
".",
"to_esri_wkt",
"(",
")",
")",
"string",
"+=",
"\", \"",
".",
"join",
"(",
"param",
".",
"to_esri_wkt",
"(",
")",
"for",
"param",
"in",
"self",
".",
"params",
")",
"string",
"+=",
"', %s'",
"%",
"self",
".",
"unit",
".",
"to_esri_wkt",
"(",
")",
"string",
"+=",
"', AXIS[\"X\", %s], AXIS[\"Y\", %s]]'",
"%",
"(",
"self",
".",
"twin_ax",
"[",
"0",
"]",
".",
"esri_wkt",
",",
"self",
".",
"twin_ax",
"[",
"1",
"]",
".",
"esri_wkt",
")",
"return",
"string"
] |
d6a8bb9c28787a25b4a1d59a7e4603db3221eaef
|
test
|
find
|
Search for a ellipsoid name located in this module.
Arguments:
- **ellipsname**: The ellipsoid name to search for.
- **crstype**: Which CRS naming convention to search (different
CRS formats have different names for the same ellipsoid).
- **strict** (optional): If False, ignores minor name mismatches
such as underscore or character casing, otherwise must be exact
match (defaults to False).
|
pycrs/elements/ellipsoids.py
|
def find(ellipsname, crstype, strict=False):
"""
Search for a ellipsoid name located in this module.
Arguments:
- **ellipsname**: The ellipsoid name to search for.
- **crstype**: Which CRS naming convention to search (different
CRS formats have different names for the same ellipsoid).
- **strict** (optional): If False, ignores minor name mismatches
such as underscore or character casing, otherwise must be exact
match (defaults to False).
"""
if not strict:
ellipsname = ellipsname.lower().replace(" ","_")
for itemname,item in globals().items():
if itemname.startswith("_") or itemname == 'Ellipsoid':
continue
try:
if hasattr(item.name, crstype):
itemname = getattr(item.name, crstype)
if not strict:
itemname = itemname.lower().replace(" ","_")
if ellipsname == itemname:
return item
except:
pass
else:
return None
|
def find(ellipsname, crstype, strict=False):
"""
Search for a ellipsoid name located in this module.
Arguments:
- **ellipsname**: The ellipsoid name to search for.
- **crstype**: Which CRS naming convention to search (different
CRS formats have different names for the same ellipsoid).
- **strict** (optional): If False, ignores minor name mismatches
such as underscore or character casing, otherwise must be exact
match (defaults to False).
"""
if not strict:
ellipsname = ellipsname.lower().replace(" ","_")
for itemname,item in globals().items():
if itemname.startswith("_") or itemname == 'Ellipsoid':
continue
try:
if hasattr(item.name, crstype):
itemname = getattr(item.name, crstype)
if not strict:
itemname = itemname.lower().replace(" ","_")
if ellipsname == itemname:
return item
except:
pass
else:
return None
|
[
"Search",
"for",
"a",
"ellipsoid",
"name",
"located",
"in",
"this",
"module",
"."
] |
karimbahgat/PyCRS
|
python
|
https://github.com/karimbahgat/PyCRS/blob/d6a8bb9c28787a25b4a1d59a7e4603db3221eaef/pycrs/elements/ellipsoids.py#L8-L36
|
[
"def",
"find",
"(",
"ellipsname",
",",
"crstype",
",",
"strict",
"=",
"False",
")",
":",
"if",
"not",
"strict",
":",
"ellipsname",
"=",
"ellipsname",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"\" \"",
",",
"\"_\"",
")",
"for",
"itemname",
",",
"item",
"in",
"globals",
"(",
")",
".",
"items",
"(",
")",
":",
"if",
"itemname",
".",
"startswith",
"(",
"\"_\"",
")",
"or",
"itemname",
"==",
"'Ellipsoid'",
":",
"continue",
"try",
":",
"if",
"hasattr",
"(",
"item",
".",
"name",
",",
"crstype",
")",
":",
"itemname",
"=",
"getattr",
"(",
"item",
".",
"name",
",",
"crstype",
")",
"if",
"not",
"strict",
":",
"itemname",
"=",
"itemname",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"\" \"",
",",
"\"_\"",
")",
"if",
"ellipsname",
"==",
"itemname",
":",
"return",
"item",
"except",
":",
"pass",
"else",
":",
"return",
"None"
] |
d6a8bb9c28787a25b4a1d59a7e4603db3221eaef
|
test
|
from_url
|
Returns the crs object from a string interpreted as a specified format, located at a given url site.
Arguments:
- *url*: The url where the crs string is to be read from.
- *format* (optional): Which format to parse the crs string as. One of "ogc wkt", "esri wkt", or "proj4".
If None, tries to autodetect the format for you (default).
Returns:
- CRS object.
|
pycrs/load.py
|
def from_url(url, format=None):
"""
Returns the crs object from a string interpreted as a specified format, located at a given url site.
Arguments:
- *url*: The url where the crs string is to be read from.
- *format* (optional): Which format to parse the crs string as. One of "ogc wkt", "esri wkt", or "proj4".
If None, tries to autodetect the format for you (default).
Returns:
- CRS object.
"""
# first get string from url
string = urllib2.urlopen(url).read()
if PY3 is True:
# decode str into string
string = string.decode('utf-8')
# then determine parser
if format:
# user specified format
format = format.lower().replace(" ", "_")
func = parse.__getattr__("from_%s" % format)
else:
# unknown format
func = parse.from_unknown_text
# then load
crs = func(string)
return crs
|
def from_url(url, format=None):
"""
Returns the crs object from a string interpreted as a specified format, located at a given url site.
Arguments:
- *url*: The url where the crs string is to be read from.
- *format* (optional): Which format to parse the crs string as. One of "ogc wkt", "esri wkt", or "proj4".
If None, tries to autodetect the format for you (default).
Returns:
- CRS object.
"""
# first get string from url
string = urllib2.urlopen(url).read()
if PY3 is True:
# decode str into string
string = string.decode('utf-8')
# then determine parser
if format:
# user specified format
format = format.lower().replace(" ", "_")
func = parse.__getattr__("from_%s" % format)
else:
# unknown format
func = parse.from_unknown_text
# then load
crs = func(string)
return crs
|
[
"Returns",
"the",
"crs",
"object",
"from",
"a",
"string",
"interpreted",
"as",
"a",
"specified",
"format",
"located",
"at",
"a",
"given",
"url",
"site",
"."
] |
karimbahgat/PyCRS
|
python
|
https://github.com/karimbahgat/PyCRS/blob/d6a8bb9c28787a25b4a1d59a7e4603db3221eaef/pycrs/load.py#L22-L54
|
[
"def",
"from_url",
"(",
"url",
",",
"format",
"=",
"None",
")",
":",
"# first get string from url",
"string",
"=",
"urllib2",
".",
"urlopen",
"(",
"url",
")",
".",
"read",
"(",
")",
"if",
"PY3",
"is",
"True",
":",
"# decode str into string",
"string",
"=",
"string",
".",
"decode",
"(",
"'utf-8'",
")",
"# then determine parser",
"if",
"format",
":",
"# user specified format",
"format",
"=",
"format",
".",
"lower",
"(",
")",
".",
"replace",
"(",
"\" \"",
",",
"\"_\"",
")",
"func",
"=",
"parse",
".",
"__getattr__",
"(",
"\"from_%s\"",
"%",
"format",
")",
"else",
":",
"# unknown format",
"func",
"=",
"parse",
".",
"from_unknown_text",
"# then load",
"crs",
"=",
"func",
"(",
"string",
")",
"return",
"crs"
] |
d6a8bb9c28787a25b4a1d59a7e4603db3221eaef
|
test
|
from_file
|
Returns the crs object from a file, with the format determined from the filename extension.
Arguments:
- *filepath*: filepath to be loaded, including extension.
|
pycrs/load.py
|
def from_file(filepath):
"""
Returns the crs object from a file, with the format determined from the filename extension.
Arguments:
- *filepath*: filepath to be loaded, including extension.
"""
if filepath.endswith(".prj"):
string = open(filepath, "r").read()
return parse.from_unknown_wkt(string)
elif filepath.endswith((".geojson",".json")):
raw = open(filepath).read()
geoj = json.loads(raw)
if "crs" in geoj:
crsinfo = geoj["crs"]
if crsinfo["type"] == "name":
string = crsinfo["properties"]["name"]
return parse.from_unknown_text(string)
elif crsinfo["type"] == "link":
url = crsinfo["properties"]["name"]
type = crsinfo["properties"].get("type")
return from_url(url, format=type)
else: raise FormatError("Invalid GeoJSON crs type: must be either 'name' or 'link'")
else:
# assume default wgs84 as per the spec
return parse.from_epsg_code("4326")
|
def from_file(filepath):
"""
Returns the crs object from a file, with the format determined from the filename extension.
Arguments:
- *filepath*: filepath to be loaded, including extension.
"""
if filepath.endswith(".prj"):
string = open(filepath, "r").read()
return parse.from_unknown_wkt(string)
elif filepath.endswith((".geojson",".json")):
raw = open(filepath).read()
geoj = json.loads(raw)
if "crs" in geoj:
crsinfo = geoj["crs"]
if crsinfo["type"] == "name":
string = crsinfo["properties"]["name"]
return parse.from_unknown_text(string)
elif crsinfo["type"] == "link":
url = crsinfo["properties"]["name"]
type = crsinfo["properties"].get("type")
return from_url(url, format=type)
else: raise FormatError("Invalid GeoJSON crs type: must be either 'name' or 'link'")
else:
# assume default wgs84 as per the spec
return parse.from_epsg_code("4326")
|
[
"Returns",
"the",
"crs",
"object",
"from",
"a",
"file",
"with",
"the",
"format",
"determined",
"from",
"the",
"filename",
"extension",
"."
] |
karimbahgat/PyCRS
|
python
|
https://github.com/karimbahgat/PyCRS/blob/d6a8bb9c28787a25b4a1d59a7e4603db3221eaef/pycrs/load.py#L56-L87
|
[
"def",
"from_file",
"(",
"filepath",
")",
":",
"if",
"filepath",
".",
"endswith",
"(",
"\".prj\"",
")",
":",
"string",
"=",
"open",
"(",
"filepath",
",",
"\"r\"",
")",
".",
"read",
"(",
")",
"return",
"parse",
".",
"from_unknown_wkt",
"(",
"string",
")",
"elif",
"filepath",
".",
"endswith",
"(",
"(",
"\".geojson\"",
",",
"\".json\"",
")",
")",
":",
"raw",
"=",
"open",
"(",
"filepath",
")",
".",
"read",
"(",
")",
"geoj",
"=",
"json",
".",
"loads",
"(",
"raw",
")",
"if",
"\"crs\"",
"in",
"geoj",
":",
"crsinfo",
"=",
"geoj",
"[",
"\"crs\"",
"]",
"if",
"crsinfo",
"[",
"\"type\"",
"]",
"==",
"\"name\"",
":",
"string",
"=",
"crsinfo",
"[",
"\"properties\"",
"]",
"[",
"\"name\"",
"]",
"return",
"parse",
".",
"from_unknown_text",
"(",
"string",
")",
"elif",
"crsinfo",
"[",
"\"type\"",
"]",
"==",
"\"link\"",
":",
"url",
"=",
"crsinfo",
"[",
"\"properties\"",
"]",
"[",
"\"name\"",
"]",
"type",
"=",
"crsinfo",
"[",
"\"properties\"",
"]",
".",
"get",
"(",
"\"type\"",
")",
"return",
"from_url",
"(",
"url",
",",
"format",
"=",
"type",
")",
"else",
":",
"raise",
"FormatError",
"(",
"\"Invalid GeoJSON crs type: must be either 'name' or 'link'\"",
")",
"else",
":",
"# assume default wgs84 as per the spec",
"return",
"parse",
".",
"from_epsg_code",
"(",
"\"4326\"",
")"
] |
d6a8bb9c28787a25b4a1d59a7e4603db3221eaef
|
test
|
from_epsg_code
|
Load crs object from epsg code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The EPSG code as an integer.
Returns:
- A CS instance of the indicated type.
|
pycrs/parse.py
|
def from_epsg_code(code):
"""
Load crs object from epsg code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The EPSG code as an integer.
Returns:
- A CS instance of the indicated type.
"""
# must go online (or look up local table) to get crs details
code = str(code)
proj4 = utils.crscode_to_string("epsg", code, "proj4")
crs = from_proj4(proj4)
return crs
|
def from_epsg_code(code):
"""
Load crs object from epsg code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The EPSG code as an integer.
Returns:
- A CS instance of the indicated type.
"""
# must go online (or look up local table) to get crs details
code = str(code)
proj4 = utils.crscode_to_string("epsg", code, "proj4")
crs = from_proj4(proj4)
return crs
|
[
"Load",
"crs",
"object",
"from",
"epsg",
"code",
"via",
"spatialreference",
".",
"org",
".",
"Parses",
"based",
"on",
"the",
"proj4",
"representation",
"."
] |
karimbahgat/PyCRS
|
python
|
https://github.com/karimbahgat/PyCRS/blob/d6a8bb9c28787a25b4a1d59a7e4603db3221eaef/pycrs/parse.py#L24-L41
|
[
"def",
"from_epsg_code",
"(",
"code",
")",
":",
"# must go online (or look up local table) to get crs details",
"code",
"=",
"str",
"(",
"code",
")",
"proj4",
"=",
"utils",
".",
"crscode_to_string",
"(",
"\"epsg\"",
",",
"code",
",",
"\"proj4\"",
")",
"crs",
"=",
"from_proj4",
"(",
"proj4",
")",
"return",
"crs"
] |
d6a8bb9c28787a25b4a1d59a7e4603db3221eaef
|
test
|
from_esri_code
|
Load crs object from esri code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The ESRI code as an integer.
Returns:
- A CS instance of the indicated type.
|
pycrs/parse.py
|
def from_esri_code(code):
"""
Load crs object from esri code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The ESRI code as an integer.
Returns:
- A CS instance of the indicated type.
"""
# must go online (or look up local table) to get crs details
code = str(code)
proj4 = utils.crscode_to_string("esri", code, "proj4")
crs = from_proj4(proj4)
return crs
|
def from_esri_code(code):
"""
Load crs object from esri code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The ESRI code as an integer.
Returns:
- A CS instance of the indicated type.
"""
# must go online (or look up local table) to get crs details
code = str(code)
proj4 = utils.crscode_to_string("esri", code, "proj4")
crs = from_proj4(proj4)
return crs
|
[
"Load",
"crs",
"object",
"from",
"esri",
"code",
"via",
"spatialreference",
".",
"org",
".",
"Parses",
"based",
"on",
"the",
"proj4",
"representation",
"."
] |
karimbahgat/PyCRS
|
python
|
https://github.com/karimbahgat/PyCRS/blob/d6a8bb9c28787a25b4a1d59a7e4603db3221eaef/pycrs/parse.py#L43-L60
|
[
"def",
"from_esri_code",
"(",
"code",
")",
":",
"# must go online (or look up local table) to get crs details",
"code",
"=",
"str",
"(",
"code",
")",
"proj4",
"=",
"utils",
".",
"crscode_to_string",
"(",
"\"esri\"",
",",
"code",
",",
"\"proj4\"",
")",
"crs",
"=",
"from_proj4",
"(",
"proj4",
")",
"return",
"crs"
] |
d6a8bb9c28787a25b4a1d59a7e4603db3221eaef
|
test
|
from_sr_code
|
Load crs object from sr-org code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The SR-ORG code as an integer.
Returns:
- A CS instance of the indicated type.
|
pycrs/parse.py
|
def from_sr_code(code):
"""
Load crs object from sr-org code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The SR-ORG code as an integer.
Returns:
- A CS instance of the indicated type.
"""
# must go online (or look up local table) to get crs details
code = str(code)
proj4 = utils.crscode_to_string("sr-org", code, "proj4")
crs = from_proj4(proj4)
return crs
|
def from_sr_code(code):
"""
Load crs object from sr-org code, via spatialreference.org.
Parses based on the proj4 representation.
Arguments:
- *code*: The SR-ORG code as an integer.
Returns:
- A CS instance of the indicated type.
"""
# must go online (or look up local table) to get crs details
code = str(code)
proj4 = utils.crscode_to_string("sr-org", code, "proj4")
crs = from_proj4(proj4)
return crs
|
[
"Load",
"crs",
"object",
"from",
"sr",
"-",
"org",
"code",
"via",
"spatialreference",
".",
"org",
".",
"Parses",
"based",
"on",
"the",
"proj4",
"representation",
"."
] |
karimbahgat/PyCRS
|
python
|
https://github.com/karimbahgat/PyCRS/blob/d6a8bb9c28787a25b4a1d59a7e4603db3221eaef/pycrs/parse.py#L62-L79
|
[
"def",
"from_sr_code",
"(",
"code",
")",
":",
"# must go online (or look up local table) to get crs details",
"code",
"=",
"str",
"(",
"code",
")",
"proj4",
"=",
"utils",
".",
"crscode_to_string",
"(",
"\"sr-org\"",
",",
"code",
",",
"\"proj4\"",
")",
"crs",
"=",
"from_proj4",
"(",
"proj4",
")",
"return",
"crs"
] |
d6a8bb9c28787a25b4a1d59a7e4603db3221eaef
|
test
|
_from_wkt
|
Internal method for parsing wkt, with minor differences depending on ogc or esri style.
Arguments:
- *string*: The OGC or ESRI WKT representation as a string.
- *wkttype* (optional): How to parse the WKT string, as either 'ogc', 'esri', or None. If None, tries to autodetect the wkt type before parsing (default).
- *strict* (optional): When True, the parser is strict about names having to match
exactly with upper and lowercases. Default is not strict (False).
Returns:
- A CS instance of the indicated type.
|
pycrs/parse.py
|
def _from_wkt(string, wkttype=None, strict=False):
"""
Internal method for parsing wkt, with minor differences depending on ogc or esri style.
Arguments:
- *string*: The OGC or ESRI WKT representation as a string.
- *wkttype* (optional): How to parse the WKT string, as either 'ogc', 'esri', or None. If None, tries to autodetect the wkt type before parsing (default).
- *strict* (optional): When True, the parser is strict about names having to match
exactly with upper and lowercases. Default is not strict (False).
Returns:
- A CS instance of the indicated type.
"""
# TODO
# - Make function for finding next elemt by name, instead of knowing its arg index position
# - Maybe verify elem arg name
# make sure valid wkttype
if wkttype: wkttype = wkttype.lower()
assert wkttype in ("ogc","esri",None)
# remove newlines and multi spaces
string = " ".join(string.split())
# parse arguments into components
def _consume_bracket(chars, char):
"char must be the opening bracket"
consumed = ""
depth = 1
while char and depth > 0:
consumed += char
char = next(chars, None)
# update depth level
if char == "[":
depth += 1
elif char == "]":
depth -= 1
consumed += char # consume the last closing char too
return consumed
def _consume_quote(chars, char, quotechar):
"char and quotechar must be the opening quote char"
consumed = ""
# consume the first opening char
consumed += char
char = next(chars, None)
# consume inside
while char and char != quotechar:
consumed += char
char = next(chars, None)
# consume the last closing char too
consumed += char
return consumed
def _next_elem(chars, char):
"char must be the first char of the text that precedes brackets"
header = ""
# skip until next header
while not char.isalpha():
char = next(chars, None)
# first consume the element text header
while char.isalpha():
header += char
char = next(chars, None)
# skip until next brackets (in case of spaces)
while char != "[":
char = next(chars, None)
# then consume the element bracket contents
if char == "[":
content = _consume_bracket(chars, char)
char = next(chars, None)
# split content into args list
content = content[1:-1] # remove enclosing brackets
content = _split_except(content)
# recursively load all subelems
for i,item in enumerate(content):
if isinstance(item, str) and "[" in item:
chars = (char for char in item)
char = next(chars)
item = _next_elem(chars, char)
content[i] = item
return header, content
def _clean_value(string):
string = string.strip()
try: string = float(string)
except: pass
return string
def _split_except(string):
"split the string on every comma, except not while inside quotes or square brackets"
chars = (char for char in string)
char = next(chars)
items = []
consumed = ""
while char:
# dont split on quotes, just consume it
if char in ("'", '"'):
consumed += _consume_quote(chars, char, char)
# dont split inside brackets, just consume it
elif char == "[":
consumed += _consume_bracket(chars, char)
# new splitchar found, add what has been consumed so far as an item, reset, and start consuming until next splitchar
elif char == ",":
consumed = _clean_value(consumed)
items.append(consumed)
consumed = ""
# consume normal char
elif char:
consumed += char
# next
char = next(chars, None)
# append last item too
consumed = _clean_value(consumed)
items.append(consumed)
return items
# load into nested tuples and arglists
crstuples = []
chars = (char for char in string)
char = next(chars)
while char:
header,content = _next_elem(chars, char)
crstuples.append((header, content))
char = next(chars, None)
# autodetect wkttype if not specified
if not wkttype:
topheader,topcontent = crstuples[0]
if topheader == "PROJCS":
geogcsheader,geogcscontent = topcontent[1]
elif topheader == "GEOGCS":
geogcsheader,geogcscontent = topheader,topcontent
# datum elem should be second under geogcs
datumheader, datumcontent = geogcscontent[1]
datumname = datumcontent[0].upper().strip('"')
# esri wkt datums all use "D_" before the datum name
if datumname.startswith("D_"):
wkttype = "esri"
else:
wkttype = "ogc"
# parse into actual crs objects
def _parse_top(header, content):
"procedure for parsing the toplevel crs element and all its children"
if header.upper() == "PROJCS":
# find name
csname = content[0].strip('"')
# find geogcs elem (by running parse again)
subheader, subcontent = content[1]
geogcs = _parse_top(subheader, subcontent)
# find projection elem
for part in content:
if isinstance(part, tuple):
subheader,subcontent = part
if subheader == "PROJECTION":
break
projname = subcontent[0].strip('"')
projclass = projections.find(projname, "%s_wkt" % wkttype, strict)
if projclass:
proj = projclass()
else:
raise NotImplementedError("Unsupported projection: The specified projection name %r could not be found in the list of supported projections" % projname)
# find params
params = []
for part in content:
if isinstance(part, tuple):
subheader,subcontent = part
if subheader == "PARAMETER":
name, value = subcontent[0].strip('"'), subcontent[1]
itemclass = parameters.find(name, "%s_wkt" % wkttype, strict)
if itemclass:
item = itemclass(value)
params.append(item)
# find unit
for part in content:
if isinstance(part, tuple):
subheader,subcontent = part
if subheader == "UNIT":
break
unitname,value = subcontent[0].strip('"'), subcontent[1]
unitclass = units.find(unitname, "%s_wkt" % wkttype, strict)
if unitclass:
unit = unitclass()
else:
unit = units.Unknown()
unit.unitmultiplier.value = value # override default multiplier
linunit = unit
# find twin axis maybe
## if len(content) >= 6:
## twinax = (parameters.Axis(
## else:
## twinax = None
# put it all together
projcs = containers.ProjCS(csname, geogcs, proj, params, linunit) #, twinax)
return projcs
elif header.upper() == "GEOGCS":
# name
csname = content[0].strip('"')
# datum
subheader, subcontent = content[1]
## datum name
datumname = subcontent[0].strip('"')
datumclass = datums.find(datumname, "%s_wkt" % wkttype, strict)
if datumclass:
datum = datumclass()
else:
datum = datums.Unknown()
## datum ellipsoid
subsubheader, subsubcontent = subcontent[1]
ellipsname = subsubcontent[0].strip('"')
ellipsclass = ellipsoids.find(ellipsname, "%s_wkt" % wkttype, strict)
if ellipsclass:
ellipsoid = ellipsclass()
else:
ellipsoid = ellipsoids.Unknown()
ellipsoid.semimaj_ax = parameters.SemiMajorRadius(subsubcontent[1])
if subsubcontent[2] == 0:
# WKT falsely sets inverse flattening to 0 for spheroids
# but actually it cannot be 0, it is the flattening that is 0
ellipsoid.flat = parameters.Flattening(subsubcontent[2])
else:
ellipsoid.inv_flat = parameters.InverseFlattening(subsubcontent[2])
## datum shift
if wkttype == "ogc":
for subsubheader,subsubcontent in subcontent[1:]:
if subsubheader == "TOWGS84":
datumshift = parameters.DatumShift(subsubcontent)
break
else:
datumshift = None
elif wkttype == "esri":
# not used in esri wkt
datumshift = None
## put it all togehter
datum.ellips = ellipsoid
datum.datumshift = datumshift
# prime mer
subheader, subcontent = content[2]
prime_mer = parameters.PrimeMeridian(subcontent[1])
# angunit
subheader, subcontent = content[3]
unitname,value = subcontent[0].strip('"'), subcontent[1]
unitclass = units.find(unitname, "%s_wkt" % wkttype, strict)
if unitclass:
unit = unitclass()
else:
unit = units.Unknown()
unit.unitmultiplier.value = value # override default multiplier
angunit = unit
# twin axis
# ...
# put it all together
geogcs = containers.GeogCS(csname, datum, prime_mer, angunit, twin_ax=None)
return geogcs
# toplevel collection
header, content = crstuples[0]
crs = _parse_top(header, content)
# use args to create crs
return crs
|
def _from_wkt(string, wkttype=None, strict=False):
"""
Internal method for parsing wkt, with minor differences depending on ogc or esri style.
Arguments:
- *string*: The OGC or ESRI WKT representation as a string.
- *wkttype* (optional): How to parse the WKT string, as either 'ogc', 'esri', or None. If None, tries to autodetect the wkt type before parsing (default).
- *strict* (optional): When True, the parser is strict about names having to match
exactly with upper and lowercases. Default is not strict (False).
Returns:
- A CS instance of the indicated type.
"""
# TODO
# - Make function for finding next elemt by name, instead of knowing its arg index position
# - Maybe verify elem arg name
# make sure valid wkttype
if wkttype: wkttype = wkttype.lower()
assert wkttype in ("ogc","esri",None)
# remove newlines and multi spaces
string = " ".join(string.split())
# parse arguments into components
def _consume_bracket(chars, char):
"char must be the opening bracket"
consumed = ""
depth = 1
while char and depth > 0:
consumed += char
char = next(chars, None)
# update depth level
if char == "[":
depth += 1
elif char == "]":
depth -= 1
consumed += char # consume the last closing char too
return consumed
def _consume_quote(chars, char, quotechar):
"char and quotechar must be the opening quote char"
consumed = ""
# consume the first opening char
consumed += char
char = next(chars, None)
# consume inside
while char and char != quotechar:
consumed += char
char = next(chars, None)
# consume the last closing char too
consumed += char
return consumed
def _next_elem(chars, char):
"char must be the first char of the text that precedes brackets"
header = ""
# skip until next header
while not char.isalpha():
char = next(chars, None)
# first consume the element text header
while char.isalpha():
header += char
char = next(chars, None)
# skip until next brackets (in case of spaces)
while char != "[":
char = next(chars, None)
# then consume the element bracket contents
if char == "[":
content = _consume_bracket(chars, char)
char = next(chars, None)
# split content into args list
content = content[1:-1] # remove enclosing brackets
content = _split_except(content)
# recursively load all subelems
for i,item in enumerate(content):
if isinstance(item, str) and "[" in item:
chars = (char for char in item)
char = next(chars)
item = _next_elem(chars, char)
content[i] = item
return header, content
def _clean_value(string):
string = string.strip()
try: string = float(string)
except: pass
return string
def _split_except(string):
"split the string on every comma, except not while inside quotes or square brackets"
chars = (char for char in string)
char = next(chars)
items = []
consumed = ""
while char:
# dont split on quotes, just consume it
if char in ("'", '"'):
consumed += _consume_quote(chars, char, char)
# dont split inside brackets, just consume it
elif char == "[":
consumed += _consume_bracket(chars, char)
# new splitchar found, add what has been consumed so far as an item, reset, and start consuming until next splitchar
elif char == ",":
consumed = _clean_value(consumed)
items.append(consumed)
consumed = ""
# consume normal char
elif char:
consumed += char
# next
char = next(chars, None)
# append last item too
consumed = _clean_value(consumed)
items.append(consumed)
return items
# load into nested tuples and arglists
crstuples = []
chars = (char for char in string)
char = next(chars)
while char:
header,content = _next_elem(chars, char)
crstuples.append((header, content))
char = next(chars, None)
# autodetect wkttype if not specified
if not wkttype:
topheader,topcontent = crstuples[0]
if topheader == "PROJCS":
geogcsheader,geogcscontent = topcontent[1]
elif topheader == "GEOGCS":
geogcsheader,geogcscontent = topheader,topcontent
# datum elem should be second under geogcs
datumheader, datumcontent = geogcscontent[1]
datumname = datumcontent[0].upper().strip('"')
# esri wkt datums all use "D_" before the datum name
if datumname.startswith("D_"):
wkttype = "esri"
else:
wkttype = "ogc"
# parse into actual crs objects
def _parse_top(header, content):
"procedure for parsing the toplevel crs element and all its children"
if header.upper() == "PROJCS":
# find name
csname = content[0].strip('"')
# find geogcs elem (by running parse again)
subheader, subcontent = content[1]
geogcs = _parse_top(subheader, subcontent)
# find projection elem
for part in content:
if isinstance(part, tuple):
subheader,subcontent = part
if subheader == "PROJECTION":
break
projname = subcontent[0].strip('"')
projclass = projections.find(projname, "%s_wkt" % wkttype, strict)
if projclass:
proj = projclass()
else:
raise NotImplementedError("Unsupported projection: The specified projection name %r could not be found in the list of supported projections" % projname)
# find params
params = []
for part in content:
if isinstance(part, tuple):
subheader,subcontent = part
if subheader == "PARAMETER":
name, value = subcontent[0].strip('"'), subcontent[1]
itemclass = parameters.find(name, "%s_wkt" % wkttype, strict)
if itemclass:
item = itemclass(value)
params.append(item)
# find unit
for part in content:
if isinstance(part, tuple):
subheader,subcontent = part
if subheader == "UNIT":
break
unitname,value = subcontent[0].strip('"'), subcontent[1]
unitclass = units.find(unitname, "%s_wkt" % wkttype, strict)
if unitclass:
unit = unitclass()
else:
unit = units.Unknown()
unit.unitmultiplier.value = value # override default multiplier
linunit = unit
# find twin axis maybe
## if len(content) >= 6:
## twinax = (parameters.Axis(
## else:
## twinax = None
# put it all together
projcs = containers.ProjCS(csname, geogcs, proj, params, linunit) #, twinax)
return projcs
elif header.upper() == "GEOGCS":
# name
csname = content[0].strip('"')
# datum
subheader, subcontent = content[1]
## datum name
datumname = subcontent[0].strip('"')
datumclass = datums.find(datumname, "%s_wkt" % wkttype, strict)
if datumclass:
datum = datumclass()
else:
datum = datums.Unknown()
## datum ellipsoid
subsubheader, subsubcontent = subcontent[1]
ellipsname = subsubcontent[0].strip('"')
ellipsclass = ellipsoids.find(ellipsname, "%s_wkt" % wkttype, strict)
if ellipsclass:
ellipsoid = ellipsclass()
else:
ellipsoid = ellipsoids.Unknown()
ellipsoid.semimaj_ax = parameters.SemiMajorRadius(subsubcontent[1])
if subsubcontent[2] == 0:
# WKT falsely sets inverse flattening to 0 for spheroids
# but actually it cannot be 0, it is the flattening that is 0
ellipsoid.flat = parameters.Flattening(subsubcontent[2])
else:
ellipsoid.inv_flat = parameters.InverseFlattening(subsubcontent[2])
## datum shift
if wkttype == "ogc":
for subsubheader,subsubcontent in subcontent[1:]:
if subsubheader == "TOWGS84":
datumshift = parameters.DatumShift(subsubcontent)
break
else:
datumshift = None
elif wkttype == "esri":
# not used in esri wkt
datumshift = None
## put it all togehter
datum.ellips = ellipsoid
datum.datumshift = datumshift
# prime mer
subheader, subcontent = content[2]
prime_mer = parameters.PrimeMeridian(subcontent[1])
# angunit
subheader, subcontent = content[3]
unitname,value = subcontent[0].strip('"'), subcontent[1]
unitclass = units.find(unitname, "%s_wkt" % wkttype, strict)
if unitclass:
unit = unitclass()
else:
unit = units.Unknown()
unit.unitmultiplier.value = value # override default multiplier
angunit = unit
# twin axis
# ...
# put it all together
geogcs = containers.GeogCS(csname, datum, prime_mer, angunit, twin_ax=None)
return geogcs
# toplevel collection
header, content = crstuples[0]
crs = _parse_top(header, content)
# use args to create crs
return crs
|
[
"Internal",
"method",
"for",
"parsing",
"wkt",
"with",
"minor",
"differences",
"depending",
"on",
"ogc",
"or",
"esri",
"style",
"."
] |
karimbahgat/PyCRS
|
python
|
https://github.com/karimbahgat/PyCRS/blob/d6a8bb9c28787a25b4a1d59a7e4603db3221eaef/pycrs/parse.py#L134-L418
|
[
"def",
"_from_wkt",
"(",
"string",
",",
"wkttype",
"=",
"None",
",",
"strict",
"=",
"False",
")",
":",
"# TODO",
"# - Make function for finding next elemt by name, instead of knowing its arg index position",
"# - Maybe verify elem arg name",
"# make sure valid wkttype",
"if",
"wkttype",
":",
"wkttype",
"=",
"wkttype",
".",
"lower",
"(",
")",
"assert",
"wkttype",
"in",
"(",
"\"ogc\"",
",",
"\"esri\"",
",",
"None",
")",
"# remove newlines and multi spaces",
"string",
"=",
"\" \"",
".",
"join",
"(",
"string",
".",
"split",
"(",
")",
")",
"# parse arguments into components",
"def",
"_consume_bracket",
"(",
"chars",
",",
"char",
")",
":",
"\"char must be the opening bracket\"",
"consumed",
"=",
"\"\"",
"depth",
"=",
"1",
"while",
"char",
"and",
"depth",
">",
"0",
":",
"consumed",
"+=",
"char",
"char",
"=",
"next",
"(",
"chars",
",",
"None",
")",
"# update depth level",
"if",
"char",
"==",
"\"[\"",
":",
"depth",
"+=",
"1",
"elif",
"char",
"==",
"\"]\"",
":",
"depth",
"-=",
"1",
"consumed",
"+=",
"char",
"# consume the last closing char too",
"return",
"consumed",
"def",
"_consume_quote",
"(",
"chars",
",",
"char",
",",
"quotechar",
")",
":",
"\"char and quotechar must be the opening quote char\"",
"consumed",
"=",
"\"\"",
"# consume the first opening char",
"consumed",
"+=",
"char",
"char",
"=",
"next",
"(",
"chars",
",",
"None",
")",
"# consume inside",
"while",
"char",
"and",
"char",
"!=",
"quotechar",
":",
"consumed",
"+=",
"char",
"char",
"=",
"next",
"(",
"chars",
",",
"None",
")",
"# consume the last closing char too",
"consumed",
"+=",
"char",
"return",
"consumed",
"def",
"_next_elem",
"(",
"chars",
",",
"char",
")",
":",
"\"char must be the first char of the text that precedes brackets\"",
"header",
"=",
"\"\"",
"# skip until next header",
"while",
"not",
"char",
".",
"isalpha",
"(",
")",
":",
"char",
"=",
"next",
"(",
"chars",
",",
"None",
")",
"# first consume the element text header",
"while",
"char",
".",
"isalpha",
"(",
")",
":",
"header",
"+=",
"char",
"char",
"=",
"next",
"(",
"chars",
",",
"None",
")",
"# skip until next brackets (in case of spaces)",
"while",
"char",
"!=",
"\"[\"",
":",
"char",
"=",
"next",
"(",
"chars",
",",
"None",
")",
"# then consume the element bracket contents",
"if",
"char",
"==",
"\"[\"",
":",
"content",
"=",
"_consume_bracket",
"(",
"chars",
",",
"char",
")",
"char",
"=",
"next",
"(",
"chars",
",",
"None",
")",
"# split content into args list",
"content",
"=",
"content",
"[",
"1",
":",
"-",
"1",
"]",
"# remove enclosing brackets",
"content",
"=",
"_split_except",
"(",
"content",
")",
"# recursively load all subelems",
"for",
"i",
",",
"item",
"in",
"enumerate",
"(",
"content",
")",
":",
"if",
"isinstance",
"(",
"item",
",",
"str",
")",
"and",
"\"[\"",
"in",
"item",
":",
"chars",
"=",
"(",
"char",
"for",
"char",
"in",
"item",
")",
"char",
"=",
"next",
"(",
"chars",
")",
"item",
"=",
"_next_elem",
"(",
"chars",
",",
"char",
")",
"content",
"[",
"i",
"]",
"=",
"item",
"return",
"header",
",",
"content",
"def",
"_clean_value",
"(",
"string",
")",
":",
"string",
"=",
"string",
".",
"strip",
"(",
")",
"try",
":",
"string",
"=",
"float",
"(",
"string",
")",
"except",
":",
"pass",
"return",
"string",
"def",
"_split_except",
"(",
"string",
")",
":",
"\"split the string on every comma, except not while inside quotes or square brackets\"",
"chars",
"=",
"(",
"char",
"for",
"char",
"in",
"string",
")",
"char",
"=",
"next",
"(",
"chars",
")",
"items",
"=",
"[",
"]",
"consumed",
"=",
"\"\"",
"while",
"char",
":",
"# dont split on quotes, just consume it",
"if",
"char",
"in",
"(",
"\"'\"",
",",
"'\"'",
")",
":",
"consumed",
"+=",
"_consume_quote",
"(",
"chars",
",",
"char",
",",
"char",
")",
"# dont split inside brackets, just consume it",
"elif",
"char",
"==",
"\"[\"",
":",
"consumed",
"+=",
"_consume_bracket",
"(",
"chars",
",",
"char",
")",
"# new splitchar found, add what has been consumed so far as an item, reset, and start consuming until next splitchar",
"elif",
"char",
"==",
"\",\"",
":",
"consumed",
"=",
"_clean_value",
"(",
"consumed",
")",
"items",
".",
"append",
"(",
"consumed",
")",
"consumed",
"=",
"\"\"",
"# consume normal char",
"elif",
"char",
":",
"consumed",
"+=",
"char",
"# next",
"char",
"=",
"next",
"(",
"chars",
",",
"None",
")",
"# append last item too",
"consumed",
"=",
"_clean_value",
"(",
"consumed",
")",
"items",
".",
"append",
"(",
"consumed",
")",
"return",
"items",
"# load into nested tuples and arglists",
"crstuples",
"=",
"[",
"]",
"chars",
"=",
"(",
"char",
"for",
"char",
"in",
"string",
")",
"char",
"=",
"next",
"(",
"chars",
")",
"while",
"char",
":",
"header",
",",
"content",
"=",
"_next_elem",
"(",
"chars",
",",
"char",
")",
"crstuples",
".",
"append",
"(",
"(",
"header",
",",
"content",
")",
")",
"char",
"=",
"next",
"(",
"chars",
",",
"None",
")",
"# autodetect wkttype if not specified",
"if",
"not",
"wkttype",
":",
"topheader",
",",
"topcontent",
"=",
"crstuples",
"[",
"0",
"]",
"if",
"topheader",
"==",
"\"PROJCS\"",
":",
"geogcsheader",
",",
"geogcscontent",
"=",
"topcontent",
"[",
"1",
"]",
"elif",
"topheader",
"==",
"\"GEOGCS\"",
":",
"geogcsheader",
",",
"geogcscontent",
"=",
"topheader",
",",
"topcontent",
"# datum elem should be second under geogcs",
"datumheader",
",",
"datumcontent",
"=",
"geogcscontent",
"[",
"1",
"]",
"datumname",
"=",
"datumcontent",
"[",
"0",
"]",
".",
"upper",
"(",
")",
".",
"strip",
"(",
"'\"'",
")",
"# esri wkt datums all use \"D_\" before the datum name",
"if",
"datumname",
".",
"startswith",
"(",
"\"D_\"",
")",
":",
"wkttype",
"=",
"\"esri\"",
"else",
":",
"wkttype",
"=",
"\"ogc\"",
"# parse into actual crs objects",
"def",
"_parse_top",
"(",
"header",
",",
"content",
")",
":",
"\"procedure for parsing the toplevel crs element and all its children\"",
"if",
"header",
".",
"upper",
"(",
")",
"==",
"\"PROJCS\"",
":",
"# find name",
"csname",
"=",
"content",
"[",
"0",
"]",
".",
"strip",
"(",
"'\"'",
")",
"# find geogcs elem (by running parse again)",
"subheader",
",",
"subcontent",
"=",
"content",
"[",
"1",
"]",
"geogcs",
"=",
"_parse_top",
"(",
"subheader",
",",
"subcontent",
")",
"# find projection elem",
"for",
"part",
"in",
"content",
":",
"if",
"isinstance",
"(",
"part",
",",
"tuple",
")",
":",
"subheader",
",",
"subcontent",
"=",
"part",
"if",
"subheader",
"==",
"\"PROJECTION\"",
":",
"break",
"projname",
"=",
"subcontent",
"[",
"0",
"]",
".",
"strip",
"(",
"'\"'",
")",
"projclass",
"=",
"projections",
".",
"find",
"(",
"projname",
",",
"\"%s_wkt\"",
"%",
"wkttype",
",",
"strict",
")",
"if",
"projclass",
":",
"proj",
"=",
"projclass",
"(",
")",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"Unsupported projection: The specified projection name %r could not be found in the list of supported projections\"",
"%",
"projname",
")",
"# find params",
"params",
"=",
"[",
"]",
"for",
"part",
"in",
"content",
":",
"if",
"isinstance",
"(",
"part",
",",
"tuple",
")",
":",
"subheader",
",",
"subcontent",
"=",
"part",
"if",
"subheader",
"==",
"\"PARAMETER\"",
":",
"name",
",",
"value",
"=",
"subcontent",
"[",
"0",
"]",
".",
"strip",
"(",
"'\"'",
")",
",",
"subcontent",
"[",
"1",
"]",
"itemclass",
"=",
"parameters",
".",
"find",
"(",
"name",
",",
"\"%s_wkt\"",
"%",
"wkttype",
",",
"strict",
")",
"if",
"itemclass",
":",
"item",
"=",
"itemclass",
"(",
"value",
")",
"params",
".",
"append",
"(",
"item",
")",
"# find unit",
"for",
"part",
"in",
"content",
":",
"if",
"isinstance",
"(",
"part",
",",
"tuple",
")",
":",
"subheader",
",",
"subcontent",
"=",
"part",
"if",
"subheader",
"==",
"\"UNIT\"",
":",
"break",
"unitname",
",",
"value",
"=",
"subcontent",
"[",
"0",
"]",
".",
"strip",
"(",
"'\"'",
")",
",",
"subcontent",
"[",
"1",
"]",
"unitclass",
"=",
"units",
".",
"find",
"(",
"unitname",
",",
"\"%s_wkt\"",
"%",
"wkttype",
",",
"strict",
")",
"if",
"unitclass",
":",
"unit",
"=",
"unitclass",
"(",
")",
"else",
":",
"unit",
"=",
"units",
".",
"Unknown",
"(",
")",
"unit",
".",
"unitmultiplier",
".",
"value",
"=",
"value",
"# override default multiplier",
"linunit",
"=",
"unit",
"# find twin axis maybe",
"## if len(content) >= 6:",
"## twinax = (parameters.Axis(",
"## else:",
"## twinax = None",
"# put it all together",
"projcs",
"=",
"containers",
".",
"ProjCS",
"(",
"csname",
",",
"geogcs",
",",
"proj",
",",
"params",
",",
"linunit",
")",
"#, twinax)",
"return",
"projcs",
"elif",
"header",
".",
"upper",
"(",
")",
"==",
"\"GEOGCS\"",
":",
"# name",
"csname",
"=",
"content",
"[",
"0",
"]",
".",
"strip",
"(",
"'\"'",
")",
"# datum",
"subheader",
",",
"subcontent",
"=",
"content",
"[",
"1",
"]",
"## datum name",
"datumname",
"=",
"subcontent",
"[",
"0",
"]",
".",
"strip",
"(",
"'\"'",
")",
"datumclass",
"=",
"datums",
".",
"find",
"(",
"datumname",
",",
"\"%s_wkt\"",
"%",
"wkttype",
",",
"strict",
")",
"if",
"datumclass",
":",
"datum",
"=",
"datumclass",
"(",
")",
"else",
":",
"datum",
"=",
"datums",
".",
"Unknown",
"(",
")",
"## datum ellipsoid",
"subsubheader",
",",
"subsubcontent",
"=",
"subcontent",
"[",
"1",
"]",
"ellipsname",
"=",
"subsubcontent",
"[",
"0",
"]",
".",
"strip",
"(",
"'\"'",
")",
"ellipsclass",
"=",
"ellipsoids",
".",
"find",
"(",
"ellipsname",
",",
"\"%s_wkt\"",
"%",
"wkttype",
",",
"strict",
")",
"if",
"ellipsclass",
":",
"ellipsoid",
"=",
"ellipsclass",
"(",
")",
"else",
":",
"ellipsoid",
"=",
"ellipsoids",
".",
"Unknown",
"(",
")",
"ellipsoid",
".",
"semimaj_ax",
"=",
"parameters",
".",
"SemiMajorRadius",
"(",
"subsubcontent",
"[",
"1",
"]",
")",
"if",
"subsubcontent",
"[",
"2",
"]",
"==",
"0",
":",
"# WKT falsely sets inverse flattening to 0 for spheroids",
"# but actually it cannot be 0, it is the flattening that is 0",
"ellipsoid",
".",
"flat",
"=",
"parameters",
".",
"Flattening",
"(",
"subsubcontent",
"[",
"2",
"]",
")",
"else",
":",
"ellipsoid",
".",
"inv_flat",
"=",
"parameters",
".",
"InverseFlattening",
"(",
"subsubcontent",
"[",
"2",
"]",
")",
"## datum shift",
"if",
"wkttype",
"==",
"\"ogc\"",
":",
"for",
"subsubheader",
",",
"subsubcontent",
"in",
"subcontent",
"[",
"1",
":",
"]",
":",
"if",
"subsubheader",
"==",
"\"TOWGS84\"",
":",
"datumshift",
"=",
"parameters",
".",
"DatumShift",
"(",
"subsubcontent",
")",
"break",
"else",
":",
"datumshift",
"=",
"None",
"elif",
"wkttype",
"==",
"\"esri\"",
":",
"# not used in esri wkt",
"datumshift",
"=",
"None",
"## put it all togehter",
"datum",
".",
"ellips",
"=",
"ellipsoid",
"datum",
".",
"datumshift",
"=",
"datumshift",
"# prime mer",
"subheader",
",",
"subcontent",
"=",
"content",
"[",
"2",
"]",
"prime_mer",
"=",
"parameters",
".",
"PrimeMeridian",
"(",
"subcontent",
"[",
"1",
"]",
")",
"# angunit",
"subheader",
",",
"subcontent",
"=",
"content",
"[",
"3",
"]",
"unitname",
",",
"value",
"=",
"subcontent",
"[",
"0",
"]",
".",
"strip",
"(",
"'\"'",
")",
",",
"subcontent",
"[",
"1",
"]",
"unitclass",
"=",
"units",
".",
"find",
"(",
"unitname",
",",
"\"%s_wkt\"",
"%",
"wkttype",
",",
"strict",
")",
"if",
"unitclass",
":",
"unit",
"=",
"unitclass",
"(",
")",
"else",
":",
"unit",
"=",
"units",
".",
"Unknown",
"(",
")",
"unit",
".",
"unitmultiplier",
".",
"value",
"=",
"value",
"# override default multiplier",
"angunit",
"=",
"unit",
"# twin axis",
"# ...",
"# put it all together",
"geogcs",
"=",
"containers",
".",
"GeogCS",
"(",
"csname",
",",
"datum",
",",
"prime_mer",
",",
"angunit",
",",
"twin_ax",
"=",
"None",
")",
"return",
"geogcs",
"# toplevel collection",
"header",
",",
"content",
"=",
"crstuples",
"[",
"0",
"]",
"crs",
"=",
"_parse_top",
"(",
"header",
",",
"content",
")",
"# use args to create crs",
"return",
"crs"
] |
d6a8bb9c28787a25b4a1d59a7e4603db3221eaef
|
test
|
from_proj4
|
Parse crs as proj4 formatted string or dict and return the resulting crs object.
Arguments:
- *proj4*: The proj4 representation as a string or dict.
- *strict* (optional): When True, the parser is strict about names having to match
exactly with upper and lowercases. Default is not strict (False).
Returns:
- A CS instance of the indicated type.
|
pycrs/parse.py
|
def from_proj4(proj4, strict=False):
"""
Parse crs as proj4 formatted string or dict and return the resulting crs object.
Arguments:
- *proj4*: The proj4 representation as a string or dict.
- *strict* (optional): When True, the parser is strict about names having to match
exactly with upper and lowercases. Default is not strict (False).
Returns:
- A CS instance of the indicated type.
"""
# parse arguments into components
# use args to create crs
# TODO: SLIGTHLY MESSY STILL, CLEANUP..
params = []
if isinstance(proj4, dict):
# add leading + sign as expected below, proj4 dicts do not have that
partdict = dict([('+'+k,v) for k,v in proj4.items()])
else:
partdict = dict([part.split("=") for part in proj4.split()
if len(part.split("=")) == 2 ])
# INIT CODES
# eg, +init=EPSG:1234
if "+init" in partdict:
# first, get the default proj4 string of the +init code
codetype, code = partdict["+init"].split(":")
if codetype == "EPSG":
initproj4 = utils.crscode_to_string("epsg", code, "proj4")
elif codetype == "ESRI":
initproj4 = utils.crscode_to_string("esri", code, "proj4")
# make the default into param dict
initpartdict = dict([part.split("=") for part in initproj4.split()
if len(part.split("=")) == 2 ])
# override the default with any custom params specified along with the +init code
initpartdict.update(partdict)
# rerun from_proj4() again on the derived proj4 params as if it was not made with the +init code
del initpartdict["+init"]
string = " ".join("%s=%s" % (key,val) for key,val in initpartdict.items())
return from_proj4(string)
# DATUM
# datum param is required
if "+datum" in partdict:
# get predefined datum def
datumname = partdict["+datum"]
datumclass = datums.find(datumname, "proj4", strict)
if datumclass:
datum = datumclass()
else:
datum = datums.Unknown()
else:
datum = datums.Unknown()
# ELLIPS
# ellipse param is required
ellips = None
if "+ellps" in partdict:
# get predefined ellips def
ellipsname = partdict["+ellps"]
ellipsclass = ellipsoids.find(ellipsname, "proj4", strict)
if ellipsclass:
ellips = ellipsclass()
if not ellips:
ellips = ellipsoids.Unknown()
# TO WGS 84 COEFFS
if "+towgs84" in partdict:
coeffs = partdict["+towgs84"].split(",")
datumshift = parameters.DatumShift(coeffs)
# TODO: if no datum, use ellips + towgs84 params to create the correct datum
# ...??
# COMBINE DATUM AND ELLIPS
## create datum and ellips param objs
# +ellps loads all the required ellipsoid parameters
# here we set or overwrite the parameters manually
if "+a" in partdict:
# semimajor radius
ellips.semimaj_ax = parameters.SemiMajorRadius(partdict["+a"])
if "+b" in partdict:
# semiminor radius
ellips.semimin_ax = parameters.SemiMinorRadius(partdict["+b"])
if "+f" in partdict:
# flattening
ellips.flat = parameters.Flattening(partdict["+f"])
if "+rf" in partdict:
# inverse flattening
ellips.inv_flat = parameters.InverseFlattening(partdict["+rf"])
# check that ellipsoid is sufficiently defined
if ellips.semimaj_ax and ellips.semimin_ax:
# +a (semimajor radius) and +b (semiminor radius) is enough and can be used to calculate flattening
# see https://en.wikipedia.org/wiki/Flattening
pass
elif ellips.semimaj_ax and ellips.inv_flat:
# alternatively, it is okay with if +a (semimajor) and +f (flattening) are specified
pass
elif ellips.semimaj_ax and ellips.flat:
# alternatively, semimajor and +rf is also acceptable (the reciprocal/inverse of +f)
pass
else:
raise FormatError("The format string is missing the required +ellps element, or the alternative manual specification of the +a with +b or +f/+rf elements: \n\t %s" % partdict)
if "+datum" in partdict:
datum.ellips = ellips
elif "+towgs84" in partdict:
datum.ellips = ellips
datum.datumshift = datumshift
else:
datum.ellips = ellips
# PRIME MERIDIAN
# set default
prime_mer = parameters.PrimeMeridian(0)
# overwrite with user input
if "+pm" in partdict:
prime_mer = parameters.PrimeMeridian(partdict["+pm"])
# ANGULAR UNIT
## proj4 cannot set angular unit, so just set to default
angunit = units.Degree()
# GEOGCS (note, currently does not load axes)
geogcs = containers.GeogCS("Unknown", datum, prime_mer, angunit) #, twin_ax)
# PROJECTION
if "+proj" in partdict:
# get predefined proj def
projname = partdict["+proj"]
projclass = projections.find(projname, "proj4", strict)
if projclass:
proj = projclass()
elif projname == "longlat":
# proj4 special case, longlat as projection name means unprojected geogcs
proj = None
else:
raise NotImplementedError("Unsupported projection: The specified projection name %r could not be found in the list of supported projections" % projname)
else:
raise FormatError("The format string is missing the required +proj element")
if proj:
# Because proj4 has no element hierarchy, using automatic element find() would
# ...would not be very effective, as that would need a try-fail approach for each
# ...element type (parameter, projection, datum, ellipsoid, unit).
# ...Instead load each element individually.
# CENTRAL MERIDIAN
if "+lon_0" in partdict:
val = partdict["+lon_0"]
obj = parameters.CentralMeridian(val)
params.append(obj)
# FALSE EASTING
if "+x_0" in partdict:
val = partdict["+x_0"]
obj = parameters.FalseEasting(val)
params.append(obj)
# FALSE NORTHING
if "+y_0" in partdict:
val = partdict["+y_0"]
obj = parameters.FalseNorthing(val)
params.append(obj)
# SCALING FACTOR
if "+k_0" in partdict or "+k" in partdict:
if "+k_0" in partdict: val = partdict["+k_0"]
elif "+k" in partdict: val = partdict["+k"]
obj = parameters.ScalingFactor(val)
params.append(obj)
# LATITUDE ORIGIN
if "+lat_0" in partdict:
val = partdict["+lat_0"]
obj = parameters.LatitudeOrigin(val)
params.append(obj)
# LATITUDE TRUE SCALE
if "+lat_ts" in partdict:
val = partdict["+lat_ts"]
obj = parameters.LatitudeTrueScale(val)
params.append(obj)
# LONGITUDE CENTER
if "+lonc" in partdict:
val = partdict["+lonc"]
obj = parameters.LongitudeCenter(val)
params.append(obj)
# AZIMUTH
if "+alpha" in partdict:
val = partdict["+alpha"]
obj = parameters.Azimuth(val)
params.append(obj)
# STD PARALLEL 1
if "+lat_1" in partdict:
val = partdict["+lat_1"]
obj = parameters.LatitudeFirstStndParallel(val)
params.append(obj)
# STD PARALLEL 2
if "+lat_2" in partdict:
val = partdict["+lat_2"]
obj = parameters.LatitudeSecondStndParallel(val)
params.append(obj)
# SATELLITE HEIGHT
if "+h" in partdict:
val = partdict["+h"]
obj = parameters.SatelliteHeight(val)
params.append(obj)
# TILT ANGLE
if "+tilt" in partdict:
val = partdict["+tilt"]
obj = parameters.TiltAngle(val)
params.append(obj)
# UNIT
# get values
if "+units" in partdict:
# unit name takes precedence over to_meter
unitname = partdict["+units"]
unitclass = units.find(unitname, "proj4", strict)
if unitclass:
unit = unitclass() # takes meter multiplier from name, ignoring any custom meter multiplier
else:
raise FormatError("The specified unit name %r does not appear to be a valid unit name" % unitname)
elif "+to_meter" in partdict:
# no unit name specified, only to_meter conversion factor
unit = units.Unknown()
unit.metermultiplier.value = partdict["+to_meter"]
else:
# if nothing specified, defaults to meter
unit = units.Meter()
# PROJCS
projcs = containers.ProjCS("Unknown", geogcs, proj, params, unit)
return projcs
else:
# means projdef was None, ie unprojected longlat geogcs
return geogcs
|
def from_proj4(proj4, strict=False):
"""
Parse crs as proj4 formatted string or dict and return the resulting crs object.
Arguments:
- *proj4*: The proj4 representation as a string or dict.
- *strict* (optional): When True, the parser is strict about names having to match
exactly with upper and lowercases. Default is not strict (False).
Returns:
- A CS instance of the indicated type.
"""
# parse arguments into components
# use args to create crs
# TODO: SLIGTHLY MESSY STILL, CLEANUP..
params = []
if isinstance(proj4, dict):
# add leading + sign as expected below, proj4 dicts do not have that
partdict = dict([('+'+k,v) for k,v in proj4.items()])
else:
partdict = dict([part.split("=") for part in proj4.split()
if len(part.split("=")) == 2 ])
# INIT CODES
# eg, +init=EPSG:1234
if "+init" in partdict:
# first, get the default proj4 string of the +init code
codetype, code = partdict["+init"].split(":")
if codetype == "EPSG":
initproj4 = utils.crscode_to_string("epsg", code, "proj4")
elif codetype == "ESRI":
initproj4 = utils.crscode_to_string("esri", code, "proj4")
# make the default into param dict
initpartdict = dict([part.split("=") for part in initproj4.split()
if len(part.split("=")) == 2 ])
# override the default with any custom params specified along with the +init code
initpartdict.update(partdict)
# rerun from_proj4() again on the derived proj4 params as if it was not made with the +init code
del initpartdict["+init"]
string = " ".join("%s=%s" % (key,val) for key,val in initpartdict.items())
return from_proj4(string)
# DATUM
# datum param is required
if "+datum" in partdict:
# get predefined datum def
datumname = partdict["+datum"]
datumclass = datums.find(datumname, "proj4", strict)
if datumclass:
datum = datumclass()
else:
datum = datums.Unknown()
else:
datum = datums.Unknown()
# ELLIPS
# ellipse param is required
ellips = None
if "+ellps" in partdict:
# get predefined ellips def
ellipsname = partdict["+ellps"]
ellipsclass = ellipsoids.find(ellipsname, "proj4", strict)
if ellipsclass:
ellips = ellipsclass()
if not ellips:
ellips = ellipsoids.Unknown()
# TO WGS 84 COEFFS
if "+towgs84" in partdict:
coeffs = partdict["+towgs84"].split(",")
datumshift = parameters.DatumShift(coeffs)
# TODO: if no datum, use ellips + towgs84 params to create the correct datum
# ...??
# COMBINE DATUM AND ELLIPS
## create datum and ellips param objs
# +ellps loads all the required ellipsoid parameters
# here we set or overwrite the parameters manually
if "+a" in partdict:
# semimajor radius
ellips.semimaj_ax = parameters.SemiMajorRadius(partdict["+a"])
if "+b" in partdict:
# semiminor radius
ellips.semimin_ax = parameters.SemiMinorRadius(partdict["+b"])
if "+f" in partdict:
# flattening
ellips.flat = parameters.Flattening(partdict["+f"])
if "+rf" in partdict:
# inverse flattening
ellips.inv_flat = parameters.InverseFlattening(partdict["+rf"])
# check that ellipsoid is sufficiently defined
if ellips.semimaj_ax and ellips.semimin_ax:
# +a (semimajor radius) and +b (semiminor radius) is enough and can be used to calculate flattening
# see https://en.wikipedia.org/wiki/Flattening
pass
elif ellips.semimaj_ax and ellips.inv_flat:
# alternatively, it is okay with if +a (semimajor) and +f (flattening) are specified
pass
elif ellips.semimaj_ax and ellips.flat:
# alternatively, semimajor and +rf is also acceptable (the reciprocal/inverse of +f)
pass
else:
raise FormatError("The format string is missing the required +ellps element, or the alternative manual specification of the +a with +b or +f/+rf elements: \n\t %s" % partdict)
if "+datum" in partdict:
datum.ellips = ellips
elif "+towgs84" in partdict:
datum.ellips = ellips
datum.datumshift = datumshift
else:
datum.ellips = ellips
# PRIME MERIDIAN
# set default
prime_mer = parameters.PrimeMeridian(0)
# overwrite with user input
if "+pm" in partdict:
prime_mer = parameters.PrimeMeridian(partdict["+pm"])
# ANGULAR UNIT
## proj4 cannot set angular unit, so just set to default
angunit = units.Degree()
# GEOGCS (note, currently does not load axes)
geogcs = containers.GeogCS("Unknown", datum, prime_mer, angunit) #, twin_ax)
# PROJECTION
if "+proj" in partdict:
# get predefined proj def
projname = partdict["+proj"]
projclass = projections.find(projname, "proj4", strict)
if projclass:
proj = projclass()
elif projname == "longlat":
# proj4 special case, longlat as projection name means unprojected geogcs
proj = None
else:
raise NotImplementedError("Unsupported projection: The specified projection name %r could not be found in the list of supported projections" % projname)
else:
raise FormatError("The format string is missing the required +proj element")
if proj:
# Because proj4 has no element hierarchy, using automatic element find() would
# ...would not be very effective, as that would need a try-fail approach for each
# ...element type (parameter, projection, datum, ellipsoid, unit).
# ...Instead load each element individually.
# CENTRAL MERIDIAN
if "+lon_0" in partdict:
val = partdict["+lon_0"]
obj = parameters.CentralMeridian(val)
params.append(obj)
# FALSE EASTING
if "+x_0" in partdict:
val = partdict["+x_0"]
obj = parameters.FalseEasting(val)
params.append(obj)
# FALSE NORTHING
if "+y_0" in partdict:
val = partdict["+y_0"]
obj = parameters.FalseNorthing(val)
params.append(obj)
# SCALING FACTOR
if "+k_0" in partdict or "+k" in partdict:
if "+k_0" in partdict: val = partdict["+k_0"]
elif "+k" in partdict: val = partdict["+k"]
obj = parameters.ScalingFactor(val)
params.append(obj)
# LATITUDE ORIGIN
if "+lat_0" in partdict:
val = partdict["+lat_0"]
obj = parameters.LatitudeOrigin(val)
params.append(obj)
# LATITUDE TRUE SCALE
if "+lat_ts" in partdict:
val = partdict["+lat_ts"]
obj = parameters.LatitudeTrueScale(val)
params.append(obj)
# LONGITUDE CENTER
if "+lonc" in partdict:
val = partdict["+lonc"]
obj = parameters.LongitudeCenter(val)
params.append(obj)
# AZIMUTH
if "+alpha" in partdict:
val = partdict["+alpha"]
obj = parameters.Azimuth(val)
params.append(obj)
# STD PARALLEL 1
if "+lat_1" in partdict:
val = partdict["+lat_1"]
obj = parameters.LatitudeFirstStndParallel(val)
params.append(obj)
# STD PARALLEL 2
if "+lat_2" in partdict:
val = partdict["+lat_2"]
obj = parameters.LatitudeSecondStndParallel(val)
params.append(obj)
# SATELLITE HEIGHT
if "+h" in partdict:
val = partdict["+h"]
obj = parameters.SatelliteHeight(val)
params.append(obj)
# TILT ANGLE
if "+tilt" in partdict:
val = partdict["+tilt"]
obj = parameters.TiltAngle(val)
params.append(obj)
# UNIT
# get values
if "+units" in partdict:
# unit name takes precedence over to_meter
unitname = partdict["+units"]
unitclass = units.find(unitname, "proj4", strict)
if unitclass:
unit = unitclass() # takes meter multiplier from name, ignoring any custom meter multiplier
else:
raise FormatError("The specified unit name %r does not appear to be a valid unit name" % unitname)
elif "+to_meter" in partdict:
# no unit name specified, only to_meter conversion factor
unit = units.Unknown()
unit.metermultiplier.value = partdict["+to_meter"]
else:
# if nothing specified, defaults to meter
unit = units.Meter()
# PROJCS
projcs = containers.ProjCS("Unknown", geogcs, proj, params, unit)
return projcs
else:
# means projdef was None, ie unprojected longlat geogcs
return geogcs
|
[
"Parse",
"crs",
"as",
"proj4",
"formatted",
"string",
"or",
"dict",
"and",
"return",
"the",
"resulting",
"crs",
"object",
"."
] |
karimbahgat/PyCRS
|
python
|
https://github.com/karimbahgat/PyCRS/blob/d6a8bb9c28787a25b4a1d59a7e4603db3221eaef/pycrs/parse.py#L420-L708
|
[
"def",
"from_proj4",
"(",
"proj4",
",",
"strict",
"=",
"False",
")",
":",
"# parse arguments into components",
"# use args to create crs",
"# TODO: SLIGTHLY MESSY STILL, CLEANUP..",
"params",
"=",
"[",
"]",
"if",
"isinstance",
"(",
"proj4",
",",
"dict",
")",
":",
"# add leading + sign as expected below, proj4 dicts do not have that",
"partdict",
"=",
"dict",
"(",
"[",
"(",
"'+'",
"+",
"k",
",",
"v",
")",
"for",
"k",
",",
"v",
"in",
"proj4",
".",
"items",
"(",
")",
"]",
")",
"else",
":",
"partdict",
"=",
"dict",
"(",
"[",
"part",
".",
"split",
"(",
"\"=\"",
")",
"for",
"part",
"in",
"proj4",
".",
"split",
"(",
")",
"if",
"len",
"(",
"part",
".",
"split",
"(",
"\"=\"",
")",
")",
"==",
"2",
"]",
")",
"# INIT CODES",
"# eg, +init=EPSG:1234",
"if",
"\"+init\"",
"in",
"partdict",
":",
"# first, get the default proj4 string of the +init code",
"codetype",
",",
"code",
"=",
"partdict",
"[",
"\"+init\"",
"]",
".",
"split",
"(",
"\":\"",
")",
"if",
"codetype",
"==",
"\"EPSG\"",
":",
"initproj4",
"=",
"utils",
".",
"crscode_to_string",
"(",
"\"epsg\"",
",",
"code",
",",
"\"proj4\"",
")",
"elif",
"codetype",
"==",
"\"ESRI\"",
":",
"initproj4",
"=",
"utils",
".",
"crscode_to_string",
"(",
"\"esri\"",
",",
"code",
",",
"\"proj4\"",
")",
"# make the default into param dict",
"initpartdict",
"=",
"dict",
"(",
"[",
"part",
".",
"split",
"(",
"\"=\"",
")",
"for",
"part",
"in",
"initproj4",
".",
"split",
"(",
")",
"if",
"len",
"(",
"part",
".",
"split",
"(",
"\"=\"",
")",
")",
"==",
"2",
"]",
")",
"# override the default with any custom params specified along with the +init code",
"initpartdict",
".",
"update",
"(",
"partdict",
")",
"# rerun from_proj4() again on the derived proj4 params as if it was not made with the +init code",
"del",
"initpartdict",
"[",
"\"+init\"",
"]",
"string",
"=",
"\" \"",
".",
"join",
"(",
"\"%s=%s\"",
"%",
"(",
"key",
",",
"val",
")",
"for",
"key",
",",
"val",
"in",
"initpartdict",
".",
"items",
"(",
")",
")",
"return",
"from_proj4",
"(",
"string",
")",
"# DATUM",
"# datum param is required",
"if",
"\"+datum\"",
"in",
"partdict",
":",
"# get predefined datum def",
"datumname",
"=",
"partdict",
"[",
"\"+datum\"",
"]",
"datumclass",
"=",
"datums",
".",
"find",
"(",
"datumname",
",",
"\"proj4\"",
",",
"strict",
")",
"if",
"datumclass",
":",
"datum",
"=",
"datumclass",
"(",
")",
"else",
":",
"datum",
"=",
"datums",
".",
"Unknown",
"(",
")",
"else",
":",
"datum",
"=",
"datums",
".",
"Unknown",
"(",
")",
"# ELLIPS",
"# ellipse param is required",
"ellips",
"=",
"None",
"if",
"\"+ellps\"",
"in",
"partdict",
":",
"# get predefined ellips def",
"ellipsname",
"=",
"partdict",
"[",
"\"+ellps\"",
"]",
"ellipsclass",
"=",
"ellipsoids",
".",
"find",
"(",
"ellipsname",
",",
"\"proj4\"",
",",
"strict",
")",
"if",
"ellipsclass",
":",
"ellips",
"=",
"ellipsclass",
"(",
")",
"if",
"not",
"ellips",
":",
"ellips",
"=",
"ellipsoids",
".",
"Unknown",
"(",
")",
"# TO WGS 84 COEFFS",
"if",
"\"+towgs84\"",
"in",
"partdict",
":",
"coeffs",
"=",
"partdict",
"[",
"\"+towgs84\"",
"]",
".",
"split",
"(",
"\",\"",
")",
"datumshift",
"=",
"parameters",
".",
"DatumShift",
"(",
"coeffs",
")",
"# TODO: if no datum, use ellips + towgs84 params to create the correct datum",
"# ...??",
"# COMBINE DATUM AND ELLIPS",
"## create datum and ellips param objs",
"# +ellps loads all the required ellipsoid parameters",
"# here we set or overwrite the parameters manually",
"if",
"\"+a\"",
"in",
"partdict",
":",
"# semimajor radius",
"ellips",
".",
"semimaj_ax",
"=",
"parameters",
".",
"SemiMajorRadius",
"(",
"partdict",
"[",
"\"+a\"",
"]",
")",
"if",
"\"+b\"",
"in",
"partdict",
":",
"# semiminor radius",
"ellips",
".",
"semimin_ax",
"=",
"parameters",
".",
"SemiMinorRadius",
"(",
"partdict",
"[",
"\"+b\"",
"]",
")",
"if",
"\"+f\"",
"in",
"partdict",
":",
"# flattening",
"ellips",
".",
"flat",
"=",
"parameters",
".",
"Flattening",
"(",
"partdict",
"[",
"\"+f\"",
"]",
")",
"if",
"\"+rf\"",
"in",
"partdict",
":",
"# inverse flattening",
"ellips",
".",
"inv_flat",
"=",
"parameters",
".",
"InverseFlattening",
"(",
"partdict",
"[",
"\"+rf\"",
"]",
")",
"# check that ellipsoid is sufficiently defined",
"if",
"ellips",
".",
"semimaj_ax",
"and",
"ellips",
".",
"semimin_ax",
":",
"# +a (semimajor radius) and +b (semiminor radius) is enough and can be used to calculate flattening",
"# see https://en.wikipedia.org/wiki/Flattening",
"pass",
"elif",
"ellips",
".",
"semimaj_ax",
"and",
"ellips",
".",
"inv_flat",
":",
"# alternatively, it is okay with if +a (semimajor) and +f (flattening) are specified",
"pass",
"elif",
"ellips",
".",
"semimaj_ax",
"and",
"ellips",
".",
"flat",
":",
"# alternatively, semimajor and +rf is also acceptable (the reciprocal/inverse of +f)",
"pass",
"else",
":",
"raise",
"FormatError",
"(",
"\"The format string is missing the required +ellps element, or the alternative manual specification of the +a with +b or +f/+rf elements: \\n\\t %s\"",
"%",
"partdict",
")",
"if",
"\"+datum\"",
"in",
"partdict",
":",
"datum",
".",
"ellips",
"=",
"ellips",
"elif",
"\"+towgs84\"",
"in",
"partdict",
":",
"datum",
".",
"ellips",
"=",
"ellips",
"datum",
".",
"datumshift",
"=",
"datumshift",
"else",
":",
"datum",
".",
"ellips",
"=",
"ellips",
"# PRIME MERIDIAN",
"# set default",
"prime_mer",
"=",
"parameters",
".",
"PrimeMeridian",
"(",
"0",
")",
"# overwrite with user input",
"if",
"\"+pm\"",
"in",
"partdict",
":",
"prime_mer",
"=",
"parameters",
".",
"PrimeMeridian",
"(",
"partdict",
"[",
"\"+pm\"",
"]",
")",
"# ANGULAR UNIT ",
"## proj4 cannot set angular unit, so just set to default",
"angunit",
"=",
"units",
".",
"Degree",
"(",
")",
"# GEOGCS (note, currently does not load axes)",
"geogcs",
"=",
"containers",
".",
"GeogCS",
"(",
"\"Unknown\"",
",",
"datum",
",",
"prime_mer",
",",
"angunit",
")",
"#, twin_ax)",
"# PROJECTION",
"if",
"\"+proj\"",
"in",
"partdict",
":",
"# get predefined proj def",
"projname",
"=",
"partdict",
"[",
"\"+proj\"",
"]",
"projclass",
"=",
"projections",
".",
"find",
"(",
"projname",
",",
"\"proj4\"",
",",
"strict",
")",
"if",
"projclass",
":",
"proj",
"=",
"projclass",
"(",
")",
"elif",
"projname",
"==",
"\"longlat\"",
":",
"# proj4 special case, longlat as projection name means unprojected geogcs",
"proj",
"=",
"None",
"else",
":",
"raise",
"NotImplementedError",
"(",
"\"Unsupported projection: The specified projection name %r could not be found in the list of supported projections\"",
"%",
"projname",
")",
"else",
":",
"raise",
"FormatError",
"(",
"\"The format string is missing the required +proj element\"",
")",
"if",
"proj",
":",
"# Because proj4 has no element hierarchy, using automatic element find() would",
"# ...would not be very effective, as that would need a try-fail approach for each",
"# ...element type (parameter, projection, datum, ellipsoid, unit).",
"# ...Instead load each element individually.",
"# CENTRAL MERIDIAN",
"if",
"\"+lon_0\"",
"in",
"partdict",
":",
"val",
"=",
"partdict",
"[",
"\"+lon_0\"",
"]",
"obj",
"=",
"parameters",
".",
"CentralMeridian",
"(",
"val",
")",
"params",
".",
"append",
"(",
"obj",
")",
"# FALSE EASTING",
"if",
"\"+x_0\"",
"in",
"partdict",
":",
"val",
"=",
"partdict",
"[",
"\"+x_0\"",
"]",
"obj",
"=",
"parameters",
".",
"FalseEasting",
"(",
"val",
")",
"params",
".",
"append",
"(",
"obj",
")",
"# FALSE NORTHING",
"if",
"\"+y_0\"",
"in",
"partdict",
":",
"val",
"=",
"partdict",
"[",
"\"+y_0\"",
"]",
"obj",
"=",
"parameters",
".",
"FalseNorthing",
"(",
"val",
")",
"params",
".",
"append",
"(",
"obj",
")",
"# SCALING FACTOR",
"if",
"\"+k_0\"",
"in",
"partdict",
"or",
"\"+k\"",
"in",
"partdict",
":",
"if",
"\"+k_0\"",
"in",
"partdict",
":",
"val",
"=",
"partdict",
"[",
"\"+k_0\"",
"]",
"elif",
"\"+k\"",
"in",
"partdict",
":",
"val",
"=",
"partdict",
"[",
"\"+k\"",
"]",
"obj",
"=",
"parameters",
".",
"ScalingFactor",
"(",
"val",
")",
"params",
".",
"append",
"(",
"obj",
")",
"# LATITUDE ORIGIN",
"if",
"\"+lat_0\"",
"in",
"partdict",
":",
"val",
"=",
"partdict",
"[",
"\"+lat_0\"",
"]",
"obj",
"=",
"parameters",
".",
"LatitudeOrigin",
"(",
"val",
")",
"params",
".",
"append",
"(",
"obj",
")",
"# LATITUDE TRUE SCALE",
"if",
"\"+lat_ts\"",
"in",
"partdict",
":",
"val",
"=",
"partdict",
"[",
"\"+lat_ts\"",
"]",
"obj",
"=",
"parameters",
".",
"LatitudeTrueScale",
"(",
"val",
")",
"params",
".",
"append",
"(",
"obj",
")",
"# LONGITUDE CENTER",
"if",
"\"+lonc\"",
"in",
"partdict",
":",
"val",
"=",
"partdict",
"[",
"\"+lonc\"",
"]",
"obj",
"=",
"parameters",
".",
"LongitudeCenter",
"(",
"val",
")",
"params",
".",
"append",
"(",
"obj",
")",
"# AZIMUTH",
"if",
"\"+alpha\"",
"in",
"partdict",
":",
"val",
"=",
"partdict",
"[",
"\"+alpha\"",
"]",
"obj",
"=",
"parameters",
".",
"Azimuth",
"(",
"val",
")",
"params",
".",
"append",
"(",
"obj",
")",
"# STD PARALLEL 1",
"if",
"\"+lat_1\"",
"in",
"partdict",
":",
"val",
"=",
"partdict",
"[",
"\"+lat_1\"",
"]",
"obj",
"=",
"parameters",
".",
"LatitudeFirstStndParallel",
"(",
"val",
")",
"params",
".",
"append",
"(",
"obj",
")",
"# STD PARALLEL 2",
"if",
"\"+lat_2\"",
"in",
"partdict",
":",
"val",
"=",
"partdict",
"[",
"\"+lat_2\"",
"]",
"obj",
"=",
"parameters",
".",
"LatitudeSecondStndParallel",
"(",
"val",
")",
"params",
".",
"append",
"(",
"obj",
")",
"# SATELLITE HEIGHT",
"if",
"\"+h\"",
"in",
"partdict",
":",
"val",
"=",
"partdict",
"[",
"\"+h\"",
"]",
"obj",
"=",
"parameters",
".",
"SatelliteHeight",
"(",
"val",
")",
"params",
".",
"append",
"(",
"obj",
")",
"# TILT ANGLE",
"if",
"\"+tilt\"",
"in",
"partdict",
":",
"val",
"=",
"partdict",
"[",
"\"+tilt\"",
"]",
"obj",
"=",
"parameters",
".",
"TiltAngle",
"(",
"val",
")",
"params",
".",
"append",
"(",
"obj",
")",
"# UNIT",
"# get values",
"if",
"\"+units\"",
"in",
"partdict",
":",
"# unit name takes precedence over to_meter",
"unitname",
"=",
"partdict",
"[",
"\"+units\"",
"]",
"unitclass",
"=",
"units",
".",
"find",
"(",
"unitname",
",",
"\"proj4\"",
",",
"strict",
")",
"if",
"unitclass",
":",
"unit",
"=",
"unitclass",
"(",
")",
"# takes meter multiplier from name, ignoring any custom meter multiplier",
"else",
":",
"raise",
"FormatError",
"(",
"\"The specified unit name %r does not appear to be a valid unit name\"",
"%",
"unitname",
")",
"elif",
"\"+to_meter\"",
"in",
"partdict",
":",
"# no unit name specified, only to_meter conversion factor",
"unit",
"=",
"units",
".",
"Unknown",
"(",
")",
"unit",
".",
"metermultiplier",
".",
"value",
"=",
"partdict",
"[",
"\"+to_meter\"",
"]",
"else",
":",
"# if nothing specified, defaults to meter",
"unit",
"=",
"units",
".",
"Meter",
"(",
")",
"# PROJCS",
"projcs",
"=",
"containers",
".",
"ProjCS",
"(",
"\"Unknown\"",
",",
"geogcs",
",",
"proj",
",",
"params",
",",
"unit",
")",
"return",
"projcs",
"else",
":",
"# means projdef was None, ie unprojected longlat geogcs",
"return",
"geogcs"
] |
d6a8bb9c28787a25b4a1d59a7e4603db3221eaef
|
test
|
from_unknown_text
|
Detect crs string format and parse into crs object with appropriate function.
Arguments:
- *text*: The crs text representation of unknown type.
- *strict* (optional): When True, the parser is strict about names having to match
exactly with upper and lowercases. Default is not strict (False).
Returns:
- CRS object.
|
pycrs/parse.py
|
def from_unknown_text(text, strict=False):
"""
Detect crs string format and parse into crs object with appropriate function.
Arguments:
- *text*: The crs text representation of unknown type.
- *strict* (optional): When True, the parser is strict about names having to match
exactly with upper and lowercases. Default is not strict (False).
Returns:
- CRS object.
"""
if text.startswith("+"):
crs = from_proj4(text, strict)
elif text.startswith(("PROJCS[","GEOGCS[")):
crs = from_unknown_wkt(text, strict)
#elif text.startswith("urn:"):
# crs = from_ogc_urn(text, strict)
elif text.startswith("EPSG:"):
crs = from_epsg_code(text.split(":")[1])
elif text.startswith("ESRI:"):
crs = from_esri_code(text.split(":")[1])
elif text.startswith("SR-ORG:"):
crs = from_sr_code(text.split(":")[1])
else: raise FormatError("Could not auto-detect the type of crs format, make sure it is one of the supported formats")
return crs
|
def from_unknown_text(text, strict=False):
"""
Detect crs string format and parse into crs object with appropriate function.
Arguments:
- *text*: The crs text representation of unknown type.
- *strict* (optional): When True, the parser is strict about names having to match
exactly with upper and lowercases. Default is not strict (False).
Returns:
- CRS object.
"""
if text.startswith("+"):
crs = from_proj4(text, strict)
elif text.startswith(("PROJCS[","GEOGCS[")):
crs = from_unknown_wkt(text, strict)
#elif text.startswith("urn:"):
# crs = from_ogc_urn(text, strict)
elif text.startswith("EPSG:"):
crs = from_epsg_code(text.split(":")[1])
elif text.startswith("ESRI:"):
crs = from_esri_code(text.split(":")[1])
elif text.startswith("SR-ORG:"):
crs = from_sr_code(text.split(":")[1])
else: raise FormatError("Could not auto-detect the type of crs format, make sure it is one of the supported formats")
return crs
|
[
"Detect",
"crs",
"string",
"format",
"and",
"parse",
"into",
"crs",
"object",
"with",
"appropriate",
"function",
"."
] |
karimbahgat/PyCRS
|
python
|
https://github.com/karimbahgat/PyCRS/blob/d6a8bb9c28787a25b4a1d59a7e4603db3221eaef/pycrs/parse.py#L736-L771
|
[
"def",
"from_unknown_text",
"(",
"text",
",",
"strict",
"=",
"False",
")",
":",
"if",
"text",
".",
"startswith",
"(",
"\"+\"",
")",
":",
"crs",
"=",
"from_proj4",
"(",
"text",
",",
"strict",
")",
"elif",
"text",
".",
"startswith",
"(",
"(",
"\"PROJCS[\"",
",",
"\"GEOGCS[\"",
")",
")",
":",
"crs",
"=",
"from_unknown_wkt",
"(",
"text",
",",
"strict",
")",
"#elif text.startswith(\"urn:\"):",
"# crs = from_ogc_urn(text, strict)",
"elif",
"text",
".",
"startswith",
"(",
"\"EPSG:\"",
")",
":",
"crs",
"=",
"from_epsg_code",
"(",
"text",
".",
"split",
"(",
"\":\"",
")",
"[",
"1",
"]",
")",
"elif",
"text",
".",
"startswith",
"(",
"\"ESRI:\"",
")",
":",
"crs",
"=",
"from_esri_code",
"(",
"text",
".",
"split",
"(",
"\":\"",
")",
"[",
"1",
"]",
")",
"elif",
"text",
".",
"startswith",
"(",
"\"SR-ORG:\"",
")",
":",
"crs",
"=",
"from_sr_code",
"(",
"text",
".",
"split",
"(",
"\":\"",
")",
"[",
"1",
"]",
")",
"else",
":",
"raise",
"FormatError",
"(",
"\"Could not auto-detect the type of crs format, make sure it is one of the supported formats\"",
")",
"return",
"crs"
] |
d6a8bb9c28787a25b4a1d59a7e4603db3221eaef
|
test
|
RawVLR.write_to
|
Write the raw header content to the out stream
Parameters:
----------
out : {file object}
The output stream
|
pylas/vlrs/rawvlr.py
|
def write_to(self, out):
""" Write the raw header content to the out stream
Parameters:
----------
out : {file object}
The output stream
"""
out.write(bytes(self.header))
out.write(self.record_data)
|
def write_to(self, out):
""" Write the raw header content to the out stream
Parameters:
----------
out : {file object}
The output stream
"""
out.write(bytes(self.header))
out.write(self.record_data)
|
[
"Write",
"the",
"raw",
"header",
"content",
"to",
"the",
"out",
"stream"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/vlrs/rawvlr.py#L60-L70
|
[
"def",
"write_to",
"(",
"self",
",",
"out",
")",
":",
"out",
".",
"write",
"(",
"bytes",
"(",
"self",
".",
"header",
")",
")",
"out",
".",
"write",
"(",
"self",
".",
"record_data",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
RawVLR.read_from
|
Instantiate a RawVLR by reading the content from the
data stream
Parameters:
----------
data_stream : {file object}
The input stream
Returns
-------
RawVLR
The RawVLR read
|
pylas/vlrs/rawvlr.py
|
def read_from(cls, data_stream):
""" Instantiate a RawVLR by reading the content from the
data stream
Parameters:
----------
data_stream : {file object}
The input stream
Returns
-------
RawVLR
The RawVLR read
"""
raw_vlr = cls()
header = RawVLRHeader.from_stream(data_stream)
raw_vlr.header = header
raw_vlr.record_data = data_stream.read(header.record_length_after_header)
return raw_vlr
|
def read_from(cls, data_stream):
""" Instantiate a RawVLR by reading the content from the
data stream
Parameters:
----------
data_stream : {file object}
The input stream
Returns
-------
RawVLR
The RawVLR read
"""
raw_vlr = cls()
header = RawVLRHeader.from_stream(data_stream)
raw_vlr.header = header
raw_vlr.record_data = data_stream.read(header.record_length_after_header)
return raw_vlr
|
[
"Instantiate",
"a",
"RawVLR",
"by",
"reading",
"the",
"content",
"from",
"the",
"data",
"stream"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/vlrs/rawvlr.py#L73-L91
|
[
"def",
"read_from",
"(",
"cls",
",",
"data_stream",
")",
":",
"raw_vlr",
"=",
"cls",
"(",
")",
"header",
"=",
"RawVLRHeader",
".",
"from_stream",
"(",
"data_stream",
")",
"raw_vlr",
".",
"header",
"=",
"header",
"raw_vlr",
".",
"record_data",
"=",
"data_stream",
".",
"read",
"(",
"header",
".",
"record_length_after_header",
")",
"return",
"raw_vlr"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
parse_geo_tiff_keys_from_vlrs
|
Gets the 3 GeoTiff vlrs from the vlr_list and parse them into
a nicer structure
Parameters
----------
vlr_list: pylas.vrls.vlrslist.VLRList list of vlrs from a las file
Raises
------
IndexError if any of the needed GeoTiffVLR is not found in the list
Returns
-------
List of GeoTiff keys parsed from the VLRs
|
pylas/vlrs/geotiff.py
|
def parse_geo_tiff_keys_from_vlrs(vlr_list: vlrlist.VLRList) -> List[GeoTiffKey]:
""" Gets the 3 GeoTiff vlrs from the vlr_list and parse them into
a nicer structure
Parameters
----------
vlr_list: pylas.vrls.vlrslist.VLRList list of vlrs from a las file
Raises
------
IndexError if any of the needed GeoTiffVLR is not found in the list
Returns
-------
List of GeoTiff keys parsed from the VLRs
"""
geo_key_dir = vlr_list.get_by_id(
GeoKeyDirectoryVlr.official_user_id(), GeoKeyDirectoryVlr.official_record_ids()
)[0]
geo_doubles = vlr_list.get_by_id(
GeoDoubleParamsVlr.official_user_id(), GeoDoubleParamsVlr.official_record_ids()
)[0]
geo_ascii = vlr_list.get_by_id(
GeoAsciiParamsVlr.official_user_id(), GeoAsciiParamsVlr.official_record_ids()
)[0]
return parse_geo_tiff(geo_key_dir, geo_doubles, geo_ascii)
|
def parse_geo_tiff_keys_from_vlrs(vlr_list: vlrlist.VLRList) -> List[GeoTiffKey]:
""" Gets the 3 GeoTiff vlrs from the vlr_list and parse them into
a nicer structure
Parameters
----------
vlr_list: pylas.vrls.vlrslist.VLRList list of vlrs from a las file
Raises
------
IndexError if any of the needed GeoTiffVLR is not found in the list
Returns
-------
List of GeoTiff keys parsed from the VLRs
"""
geo_key_dir = vlr_list.get_by_id(
GeoKeyDirectoryVlr.official_user_id(), GeoKeyDirectoryVlr.official_record_ids()
)[0]
geo_doubles = vlr_list.get_by_id(
GeoDoubleParamsVlr.official_user_id(), GeoDoubleParamsVlr.official_record_ids()
)[0]
geo_ascii = vlr_list.get_by_id(
GeoAsciiParamsVlr.official_user_id(), GeoAsciiParamsVlr.official_record_ids()
)[0]
return parse_geo_tiff(geo_key_dir, geo_doubles, geo_ascii)
|
[
"Gets",
"the",
"3",
"GeoTiff",
"vlrs",
"from",
"the",
"vlr_list",
"and",
"parse",
"them",
"into",
"a",
"nicer",
"structure"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/vlrs/geotiff.py#L23-L49
|
[
"def",
"parse_geo_tiff_keys_from_vlrs",
"(",
"vlr_list",
":",
"vlrlist",
".",
"VLRList",
")",
"->",
"List",
"[",
"GeoTiffKey",
"]",
":",
"geo_key_dir",
"=",
"vlr_list",
".",
"get_by_id",
"(",
"GeoKeyDirectoryVlr",
".",
"official_user_id",
"(",
")",
",",
"GeoKeyDirectoryVlr",
".",
"official_record_ids",
"(",
")",
")",
"[",
"0",
"]",
"geo_doubles",
"=",
"vlr_list",
".",
"get_by_id",
"(",
"GeoDoubleParamsVlr",
".",
"official_user_id",
"(",
")",
",",
"GeoDoubleParamsVlr",
".",
"official_record_ids",
"(",
")",
")",
"[",
"0",
"]",
"geo_ascii",
"=",
"vlr_list",
".",
"get_by_id",
"(",
"GeoAsciiParamsVlr",
".",
"official_user_id",
"(",
")",
",",
"GeoAsciiParamsVlr",
".",
"official_record_ids",
"(",
")",
")",
"[",
"0",
"]",
"return",
"parse_geo_tiff",
"(",
"geo_key_dir",
",",
"geo_doubles",
",",
"geo_ascii",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
parse_geo_tiff
|
Parses the GeoTiff VLRs information into nicer structs
|
pylas/vlrs/geotiff.py
|
def parse_geo_tiff(
key_dir_vlr: GeoKeyDirectoryVlr,
double_vlr: GeoDoubleParamsVlr,
ascii_vlr: GeoAsciiParamsVlr,
) -> List[GeoTiffKey]:
""" Parses the GeoTiff VLRs information into nicer structs
"""
geotiff_keys = []
for k in key_dir_vlr.geo_keys:
if k.tiff_tag_location == 0:
value = k.value_offset
elif k.tiff_tag_location == 34736:
value = double_vlr.doubles[k.value_offset]
elif k.tiff_tag_location == 34737:
try:
value = ascii_vlr.strings[k.value_offset][k.count :]
except IndexError:
# Maybe I'm just misunderstanding the specification :thinking:
value = ascii_vlr.strings[0][k.value_offset : k.value_offset + k.count]
else:
logger.warning(
"GeoTiffKey with unknown tiff tag location ({})".format(
k.tiff_tag_location
)
)
continue
geotiff_keys.append(GeoTiffKey(k.id, value))
return geotiff_keys
|
def parse_geo_tiff(
key_dir_vlr: GeoKeyDirectoryVlr,
double_vlr: GeoDoubleParamsVlr,
ascii_vlr: GeoAsciiParamsVlr,
) -> List[GeoTiffKey]:
""" Parses the GeoTiff VLRs information into nicer structs
"""
geotiff_keys = []
for k in key_dir_vlr.geo_keys:
if k.tiff_tag_location == 0:
value = k.value_offset
elif k.tiff_tag_location == 34736:
value = double_vlr.doubles[k.value_offset]
elif k.tiff_tag_location == 34737:
try:
value = ascii_vlr.strings[k.value_offset][k.count :]
except IndexError:
# Maybe I'm just misunderstanding the specification :thinking:
value = ascii_vlr.strings[0][k.value_offset : k.value_offset + k.count]
else:
logger.warning(
"GeoTiffKey with unknown tiff tag location ({})".format(
k.tiff_tag_location
)
)
continue
geotiff_keys.append(GeoTiffKey(k.id, value))
return geotiff_keys
|
[
"Parses",
"the",
"GeoTiff",
"VLRs",
"information",
"into",
"nicer",
"structs"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/vlrs/geotiff.py#L52-L81
|
[
"def",
"parse_geo_tiff",
"(",
"key_dir_vlr",
":",
"GeoKeyDirectoryVlr",
",",
"double_vlr",
":",
"GeoDoubleParamsVlr",
",",
"ascii_vlr",
":",
"GeoAsciiParamsVlr",
",",
")",
"->",
"List",
"[",
"GeoTiffKey",
"]",
":",
"geotiff_keys",
"=",
"[",
"]",
"for",
"k",
"in",
"key_dir_vlr",
".",
"geo_keys",
":",
"if",
"k",
".",
"tiff_tag_location",
"==",
"0",
":",
"value",
"=",
"k",
".",
"value_offset",
"elif",
"k",
".",
"tiff_tag_location",
"==",
"34736",
":",
"value",
"=",
"double_vlr",
".",
"doubles",
"[",
"k",
".",
"value_offset",
"]",
"elif",
"k",
".",
"tiff_tag_location",
"==",
"34737",
":",
"try",
":",
"value",
"=",
"ascii_vlr",
".",
"strings",
"[",
"k",
".",
"value_offset",
"]",
"[",
"k",
".",
"count",
":",
"]",
"except",
"IndexError",
":",
"# Maybe I'm just misunderstanding the specification :thinking:",
"value",
"=",
"ascii_vlr",
".",
"strings",
"[",
"0",
"]",
"[",
"k",
".",
"value_offset",
":",
"k",
".",
"value_offset",
"+",
"k",
".",
"count",
"]",
"else",
":",
"logger",
".",
"warning",
"(",
"\"GeoTiffKey with unknown tiff tag location ({})\"",
".",
"format",
"(",
"k",
".",
"tiff_tag_location",
")",
")",
"continue",
"geotiff_keys",
".",
"append",
"(",
"GeoTiffKey",
"(",
"k",
".",
"id",
",",
"value",
")",
")",
"return",
"geotiff_keys"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
get_signedness_for_extra_dim
|
Returns the signedness foe the given type index
Parameters
----------
type_index: int
index of the type as defined in the LAS Specification
Returns
-------
DimensionSignedness,
the enum variant
|
pylas/extradims.py
|
def get_signedness_for_extra_dim(type_index):
""" Returns the signedness foe the given type index
Parameters
----------
type_index: int
index of the type as defined in the LAS Specification
Returns
-------
DimensionSignedness,
the enum variant
"""
try:
t = _extra_dims_style_2[type_index]
if "uint" in t:
return DimensionSignedness.UNSIGNED
elif "int" in t:
return DimensionSignedness.SIGNED
else:
return DimensionSignedness.FLOATING
except IndexError:
raise errors.UnknownExtraType(type_index)
|
def get_signedness_for_extra_dim(type_index):
""" Returns the signedness foe the given type index
Parameters
----------
type_index: int
index of the type as defined in the LAS Specification
Returns
-------
DimensionSignedness,
the enum variant
"""
try:
t = _extra_dims_style_2[type_index]
if "uint" in t:
return DimensionSignedness.UNSIGNED
elif "int" in t:
return DimensionSignedness.SIGNED
else:
return DimensionSignedness.FLOATING
except IndexError:
raise errors.UnknownExtraType(type_index)
|
[
"Returns",
"the",
"signedness",
"foe",
"the",
"given",
"type",
"index"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/extradims.py#L67-L89
|
[
"def",
"get_signedness_for_extra_dim",
"(",
"type_index",
")",
":",
"try",
":",
"t",
"=",
"_extra_dims_style_2",
"[",
"type_index",
"]",
"if",
"\"uint\"",
"in",
"t",
":",
"return",
"DimensionSignedness",
".",
"UNSIGNED",
"elif",
"\"int\"",
"in",
"t",
":",
"return",
"DimensionSignedness",
".",
"SIGNED",
"else",
":",
"return",
"DimensionSignedness",
".",
"FLOATING",
"except",
"IndexError",
":",
"raise",
"errors",
".",
"UnknownExtraType",
"(",
"type_index",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
get_id_for_extra_dim_type
|
Returns the index of the type as defined in the LAS Specification
Parameters
----------
type_str: str
Returns
-------
int
index of the type
|
pylas/extradims.py
|
def get_id_for_extra_dim_type(type_str):
""" Returns the index of the type as defined in the LAS Specification
Parameters
----------
type_str: str
Returns
-------
int
index of the type
"""
try:
return _type_to_extra_dim_id_style_1[type_str]
except KeyError:
try:
return _type_to_extra_dim_id_style_2[type_str]
except KeyError:
raise errors.UnknownExtraType(type_str)
|
def get_id_for_extra_dim_type(type_str):
""" Returns the index of the type as defined in the LAS Specification
Parameters
----------
type_str: str
Returns
-------
int
index of the type
"""
try:
return _type_to_extra_dim_id_style_1[type_str]
except KeyError:
try:
return _type_to_extra_dim_id_style_2[type_str]
except KeyError:
raise errors.UnknownExtraType(type_str)
|
[
"Returns",
"the",
"index",
"of",
"the",
"type",
"as",
"defined",
"in",
"the",
"LAS",
"Specification"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/extradims.py#L111-L130
|
[
"def",
"get_id_for_extra_dim_type",
"(",
"type_str",
")",
":",
"try",
":",
"return",
"_type_to_extra_dim_id_style_1",
"[",
"type_str",
"]",
"except",
"KeyError",
":",
"try",
":",
"return",
"_type_to_extra_dim_id_style_2",
"[",
"type_str",
"]",
"except",
"KeyError",
":",
"raise",
"errors",
".",
"UnknownExtraType",
"(",
"type_str",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
PointRecord.from_point_record
|
Construct a new PackedPointRecord from an existing one with the ability to change
to point format while doing so
|
pylas/point/record.py
|
def from_point_record(cls, other_point_record, new_point_format):
""" Construct a new PackedPointRecord from an existing one with the ability to change
to point format while doing so
"""
array = np.zeros_like(other_point_record.array, dtype=new_point_format.dtype)
new_record = cls(array, new_point_format)
new_record.copy_fields_from(other_point_record)
return new_record
|
def from_point_record(cls, other_point_record, new_point_format):
""" Construct a new PackedPointRecord from an existing one with the ability to change
to point format while doing so
"""
array = np.zeros_like(other_point_record.array, dtype=new_point_format.dtype)
new_record = cls(array, new_point_format)
new_record.copy_fields_from(other_point_record)
return new_record
|
[
"Construct",
"a",
"new",
"PackedPointRecord",
"from",
"an",
"existing",
"one",
"with",
"the",
"ability",
"to",
"change",
"to",
"point",
"format",
"while",
"doing",
"so"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/point/record.py#L121-L128
|
[
"def",
"from_point_record",
"(",
"cls",
",",
"other_point_record",
",",
"new_point_format",
")",
":",
"array",
"=",
"np",
".",
"zeros_like",
"(",
"other_point_record",
".",
"array",
",",
"dtype",
"=",
"new_point_format",
".",
"dtype",
")",
"new_record",
"=",
"cls",
"(",
"array",
",",
"new_point_format",
")",
"new_record",
".",
"copy_fields_from",
"(",
"other_point_record",
")",
"return",
"new_record"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
PointRecord.copy_fields_from
|
Tries to copy the values of the current dimensions from other_record
|
pylas/point/record.py
|
def copy_fields_from(self, other_record):
""" Tries to copy the values of the current dimensions from other_record
"""
for dim_name in self.dimensions_names:
try:
self[dim_name] = other_record[dim_name]
except ValueError:
pass
|
def copy_fields_from(self, other_record):
""" Tries to copy the values of the current dimensions from other_record
"""
for dim_name in self.dimensions_names:
try:
self[dim_name] = other_record[dim_name]
except ValueError:
pass
|
[
"Tries",
"to",
"copy",
"the",
"values",
"of",
"the",
"current",
"dimensions",
"from",
"other_record"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/point/record.py#L130-L137
|
[
"def",
"copy_fields_from",
"(",
"self",
",",
"other_record",
")",
":",
"for",
"dim_name",
"in",
"self",
".",
"dimensions_names",
":",
"try",
":",
"self",
"[",
"dim_name",
"]",
"=",
"other_record",
"[",
"dim_name",
"]",
"except",
"ValueError",
":",
"pass"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
PointRecord._append_zeros_if_too_small
|
Appends zeros to the points stored if the value we are trying to
fit is bigger
|
pylas/point/record.py
|
def _append_zeros_if_too_small(self, value):
""" Appends zeros to the points stored if the value we are trying to
fit is bigger
"""
size_diff = len(value) - len(self.array)
if size_diff:
self.array = np.append(
self.array, np.zeros(size_diff, dtype=self.array.dtype)
)
|
def _append_zeros_if_too_small(self, value):
""" Appends zeros to the points stored if the value we are trying to
fit is bigger
"""
size_diff = len(value) - len(self.array)
if size_diff:
self.array = np.append(
self.array, np.zeros(size_diff, dtype=self.array.dtype)
)
|
[
"Appends",
"zeros",
"to",
"the",
"points",
"stored",
"if",
"the",
"value",
"we",
"are",
"trying",
"to",
"fit",
"is",
"bigger"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/point/record.py#L155-L163
|
[
"def",
"_append_zeros_if_too_small",
"(",
"self",
",",
"value",
")",
":",
"size_diff",
"=",
"len",
"(",
"value",
")",
"-",
"len",
"(",
"self",
".",
"array",
")",
"if",
"size_diff",
":",
"self",
".",
"array",
"=",
"np",
".",
"append",
"(",
"self",
".",
"array",
",",
"np",
".",
"zeros",
"(",
"size_diff",
",",
"dtype",
"=",
"self",
".",
"array",
".",
"dtype",
")",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
PackedPointRecord.all_dimensions_names
|
Returns all the dimensions names, including the names of sub_fields
and their corresponding packed fields
|
pylas/point/record.py
|
def all_dimensions_names(self):
""" Returns all the dimensions names, including the names of sub_fields
and their corresponding packed fields
"""
return frozenset(self.array.dtype.names + tuple(self.sub_fields_dict.keys()))
|
def all_dimensions_names(self):
""" Returns all the dimensions names, including the names of sub_fields
and their corresponding packed fields
"""
return frozenset(self.array.dtype.names + tuple(self.sub_fields_dict.keys()))
|
[
"Returns",
"all",
"the",
"dimensions",
"names",
"including",
"the",
"names",
"of",
"sub_fields",
"and",
"their",
"corresponding",
"packed",
"fields"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/point/record.py#L199-L203
|
[
"def",
"all_dimensions_names",
"(",
"self",
")",
":",
"return",
"frozenset",
"(",
"self",
".",
"array",
".",
"dtype",
".",
"names",
"+",
"tuple",
"(",
"self",
".",
"sub_fields_dict",
".",
"keys",
"(",
")",
")",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
PackedPointRecord.zeros
|
Creates a new point record with all dimensions initialized to zero
Parameters
----------
point_format_id: int
The point format id the point record should have
point_count : int
The number of point the point record should have
Returns
-------
PackedPointRecord
|
pylas/point/record.py
|
def zeros(cls, point_format, point_count):
""" Creates a new point record with all dimensions initialized to zero
Parameters
----------
point_format_id: int
The point format id the point record should have
point_count : int
The number of point the point record should have
Returns
-------
PackedPointRecord
"""
data = np.zeros(point_count, point_format.dtype)
return cls(data, point_format)
|
def zeros(cls, point_format, point_count):
""" Creates a new point record with all dimensions initialized to zero
Parameters
----------
point_format_id: int
The point format id the point record should have
point_count : int
The number of point the point record should have
Returns
-------
PackedPointRecord
"""
data = np.zeros(point_count, point_format.dtype)
return cls(data, point_format)
|
[
"Creates",
"a",
"new",
"point",
"record",
"with",
"all",
"dimensions",
"initialized",
"to",
"zero"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/point/record.py#L218-L234
|
[
"def",
"zeros",
"(",
"cls",
",",
"point_format",
",",
"point_count",
")",
":",
"data",
"=",
"np",
".",
"zeros",
"(",
"point_count",
",",
"point_format",
".",
"dtype",
")",
"return",
"cls",
"(",
"data",
",",
"point_format",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
PackedPointRecord.from_stream
|
Construct the point record by reading the points from the stream
|
pylas/point/record.py
|
def from_stream(cls, stream, point_format, count):
""" Construct the point record by reading the points from the stream
"""
points_dtype = point_format.dtype
point_data_buffer = bytearray(stream.read(count * points_dtype.itemsize))
try:
data = np.frombuffer(point_data_buffer, dtype=points_dtype, count=count)
except ValueError:
expected_bytes_len = count * points_dtype.itemsize
if len(point_data_buffer) % points_dtype.itemsize != 0:
missing_bytes_len = expected_bytes_len - len(point_data_buffer)
raise_not_enough_bytes_error(
expected_bytes_len,
missing_bytes_len,
len(point_data_buffer),
points_dtype,
)
else:
actual_count = len(point_data_buffer) // points_dtype.itemsize
logger.critical(
"Expected {} points, there are {} ({} missing)".format(
count, actual_count, count - actual_count
)
)
data = np.frombuffer(
point_data_buffer, dtype=points_dtype, count=actual_count
)
return cls(data, point_format)
|
def from_stream(cls, stream, point_format, count):
""" Construct the point record by reading the points from the stream
"""
points_dtype = point_format.dtype
point_data_buffer = bytearray(stream.read(count * points_dtype.itemsize))
try:
data = np.frombuffer(point_data_buffer, dtype=points_dtype, count=count)
except ValueError:
expected_bytes_len = count * points_dtype.itemsize
if len(point_data_buffer) % points_dtype.itemsize != 0:
missing_bytes_len = expected_bytes_len - len(point_data_buffer)
raise_not_enough_bytes_error(
expected_bytes_len,
missing_bytes_len,
len(point_data_buffer),
points_dtype,
)
else:
actual_count = len(point_data_buffer) // points_dtype.itemsize
logger.critical(
"Expected {} points, there are {} ({} missing)".format(
count, actual_count, count - actual_count
)
)
data = np.frombuffer(
point_data_buffer, dtype=points_dtype, count=actual_count
)
return cls(data, point_format)
|
[
"Construct",
"the",
"point",
"record",
"by",
"reading",
"the",
"points",
"from",
"the",
"stream"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/point/record.py#L253-L282
|
[
"def",
"from_stream",
"(",
"cls",
",",
"stream",
",",
"point_format",
",",
"count",
")",
":",
"points_dtype",
"=",
"point_format",
".",
"dtype",
"point_data_buffer",
"=",
"bytearray",
"(",
"stream",
".",
"read",
"(",
"count",
"*",
"points_dtype",
".",
"itemsize",
")",
")",
"try",
":",
"data",
"=",
"np",
".",
"frombuffer",
"(",
"point_data_buffer",
",",
"dtype",
"=",
"points_dtype",
",",
"count",
"=",
"count",
")",
"except",
"ValueError",
":",
"expected_bytes_len",
"=",
"count",
"*",
"points_dtype",
".",
"itemsize",
"if",
"len",
"(",
"point_data_buffer",
")",
"%",
"points_dtype",
".",
"itemsize",
"!=",
"0",
":",
"missing_bytes_len",
"=",
"expected_bytes_len",
"-",
"len",
"(",
"point_data_buffer",
")",
"raise_not_enough_bytes_error",
"(",
"expected_bytes_len",
",",
"missing_bytes_len",
",",
"len",
"(",
"point_data_buffer",
")",
",",
"points_dtype",
",",
")",
"else",
":",
"actual_count",
"=",
"len",
"(",
"point_data_buffer",
")",
"//",
"points_dtype",
".",
"itemsize",
"logger",
".",
"critical",
"(",
"\"Expected {} points, there are {} ({} missing)\"",
".",
"format",
"(",
"count",
",",
"actual_count",
",",
"count",
"-",
"actual_count",
")",
")",
"data",
"=",
"np",
".",
"frombuffer",
"(",
"point_data_buffer",
",",
"dtype",
"=",
"points_dtype",
",",
"count",
"=",
"actual_count",
")",
"return",
"cls",
"(",
"data",
",",
"point_format",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
PackedPointRecord.from_compressed_buffer
|
Construct the point record by reading and decompressing the points data from
the input buffer
|
pylas/point/record.py
|
def from_compressed_buffer(cls, compressed_buffer, point_format, count, laszip_vlr):
""" Construct the point record by reading and decompressing the points data from
the input buffer
"""
point_dtype = point_format.dtype
uncompressed = decompress_buffer(
compressed_buffer, point_dtype, count, laszip_vlr
)
return cls(uncompressed, point_format)
|
def from_compressed_buffer(cls, compressed_buffer, point_format, count, laszip_vlr):
""" Construct the point record by reading and decompressing the points data from
the input buffer
"""
point_dtype = point_format.dtype
uncompressed = decompress_buffer(
compressed_buffer, point_dtype, count, laszip_vlr
)
return cls(uncompressed, point_format)
|
[
"Construct",
"the",
"point",
"record",
"by",
"reading",
"and",
"decompressing",
"the",
"points",
"data",
"from",
"the",
"input",
"buffer"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/point/record.py#L292-L300
|
[
"def",
"from_compressed_buffer",
"(",
"cls",
",",
"compressed_buffer",
",",
"point_format",
",",
"count",
",",
"laszip_vlr",
")",
":",
"point_dtype",
"=",
"point_format",
".",
"dtype",
"uncompressed",
"=",
"decompress_buffer",
"(",
"compressed_buffer",
",",
"point_dtype",
",",
"count",
",",
"laszip_vlr",
")",
"return",
"cls",
"(",
"uncompressed",
",",
"point_format",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
LasBase.x
|
Returns the scaled x positions of the points as doubles
|
pylas/lasdatas/base.py
|
def x(self):
""" Returns the scaled x positions of the points as doubles
"""
return scale_dimension(self.X, self.header.x_scale, self.header.x_offset)
|
def x(self):
""" Returns the scaled x positions of the points as doubles
"""
return scale_dimension(self.X, self.header.x_scale, self.header.x_offset)
|
[
"Returns",
"the",
"scaled",
"x",
"positions",
"of",
"the",
"points",
"as",
"doubles"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lasdatas/base.py#L60-L63
|
[
"def",
"x",
"(",
"self",
")",
":",
"return",
"scale_dimension",
"(",
"self",
".",
"X",
",",
"self",
".",
"header",
".",
"x_scale",
",",
"self",
".",
"header",
".",
"x_offset",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
LasBase.y
|
Returns the scaled y positions of the points as doubles
|
pylas/lasdatas/base.py
|
def y(self):
""" Returns the scaled y positions of the points as doubles
"""
return scale_dimension(self.Y, self.header.y_scale, self.header.y_offset)
|
def y(self):
""" Returns the scaled y positions of the points as doubles
"""
return scale_dimension(self.Y, self.header.y_scale, self.header.y_offset)
|
[
"Returns",
"the",
"scaled",
"y",
"positions",
"of",
"the",
"points",
"as",
"doubles"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lasdatas/base.py#L66-L69
|
[
"def",
"y",
"(",
"self",
")",
":",
"return",
"scale_dimension",
"(",
"self",
".",
"Y",
",",
"self",
".",
"header",
".",
"y_scale",
",",
"self",
".",
"header",
".",
"y_offset",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
LasBase.z
|
Returns the scaled z positions of the points as doubles
|
pylas/lasdatas/base.py
|
def z(self):
""" Returns the scaled z positions of the points as doubles
"""
return scale_dimension(self.Z, self.header.z_scale, self.header.z_offset)
|
def z(self):
""" Returns the scaled z positions of the points as doubles
"""
return scale_dimension(self.Z, self.header.z_scale, self.header.z_offset)
|
[
"Returns",
"the",
"scaled",
"z",
"positions",
"of",
"the",
"points",
"as",
"doubles"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lasdatas/base.py#L72-L75
|
[
"def",
"z",
"(",
"self",
")",
":",
"return",
"scale_dimension",
"(",
"self",
".",
"Z",
",",
"self",
".",
"header",
".",
"z_scale",
",",
"self",
".",
"header",
".",
"z_offset",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
LasBase.points
|
Setter for the points property,
Takes care of changing the point_format of the file
(as long as the point format of the new points it compatible with the file version)
Parameters
----------
value: numpy.array of the new points
|
pylas/lasdatas/base.py
|
def points(self, value):
""" Setter for the points property,
Takes care of changing the point_format of the file
(as long as the point format of the new points it compatible with the file version)
Parameters
----------
value: numpy.array of the new points
"""
if value.dtype != self.points.dtype:
raise errors.IncompatibleDataFormat('Cannot set points with a different point format, convert first')
new_point_record = record.PackedPointRecord(value, self.points_data.point_format)
dims.raise_if_version_not_compatible_with_fmt(
new_point_record.point_format.id, self.header.version
)
self.points_data = new_point_record
self.update_header()
|
def points(self, value):
""" Setter for the points property,
Takes care of changing the point_format of the file
(as long as the point format of the new points it compatible with the file version)
Parameters
----------
value: numpy.array of the new points
"""
if value.dtype != self.points.dtype:
raise errors.IncompatibleDataFormat('Cannot set points with a different point format, convert first')
new_point_record = record.PackedPointRecord(value, self.points_data.point_format)
dims.raise_if_version_not_compatible_with_fmt(
new_point_record.point_format.id, self.header.version
)
self.points_data = new_point_record
self.update_header()
|
[
"Setter",
"for",
"the",
"points",
"property",
"Takes",
"care",
"of",
"changing",
"the",
"point_format",
"of",
"the",
"file",
"(",
"as",
"long",
"as",
"the",
"point",
"format",
"of",
"the",
"new",
"points",
"it",
"compatible",
"with",
"the",
"file",
"version",
")"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lasdatas/base.py#L126-L143
|
[
"def",
"points",
"(",
"self",
",",
"value",
")",
":",
"if",
"value",
".",
"dtype",
"!=",
"self",
".",
"points",
".",
"dtype",
":",
"raise",
"errors",
".",
"IncompatibleDataFormat",
"(",
"'Cannot set points with a different point format, convert first'",
")",
"new_point_record",
"=",
"record",
".",
"PackedPointRecord",
"(",
"value",
",",
"self",
".",
"points_data",
".",
"point_format",
")",
"dims",
".",
"raise_if_version_not_compatible_with_fmt",
"(",
"new_point_record",
".",
"point_format",
".",
"id",
",",
"self",
".",
"header",
".",
"version",
")",
"self",
".",
"points_data",
"=",
"new_point_record",
"self",
".",
"update_header",
"(",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
LasBase.add_extra_dim
|
Adds a new extra dimension to the point record
Parameters
----------
name: str
the name of the dimension
type: str
type of the dimension (eg 'uint8')
description: str, optional
a small description of the dimension
|
pylas/lasdatas/base.py
|
def add_extra_dim(self, name, type, description=""):
""" Adds a new extra dimension to the point record
Parameters
----------
name: str
the name of the dimension
type: str
type of the dimension (eg 'uint8')
description: str, optional
a small description of the dimension
"""
name = name.replace(" ", "_")
type_id = extradims.get_id_for_extra_dim_type(type)
extra_byte = ExtraBytesStruct(
data_type=type_id, name=name.encode(), description=description.encode()
)
try:
extra_bytes_vlr = self.vlrs.get("ExtraBytesVlr")[0]
except IndexError:
extra_bytes_vlr = ExtraBytesVlr()
self.vlrs.append(extra_bytes_vlr)
finally:
extra_bytes_vlr.extra_bytes_structs.append(extra_byte)
self.points_data.add_extra_dims([(name, type)])
|
def add_extra_dim(self, name, type, description=""):
""" Adds a new extra dimension to the point record
Parameters
----------
name: str
the name of the dimension
type: str
type of the dimension (eg 'uint8')
description: str, optional
a small description of the dimension
"""
name = name.replace(" ", "_")
type_id = extradims.get_id_for_extra_dim_type(type)
extra_byte = ExtraBytesStruct(
data_type=type_id, name=name.encode(), description=description.encode()
)
try:
extra_bytes_vlr = self.vlrs.get("ExtraBytesVlr")[0]
except IndexError:
extra_bytes_vlr = ExtraBytesVlr()
self.vlrs.append(extra_bytes_vlr)
finally:
extra_bytes_vlr.extra_bytes_structs.append(extra_byte)
self.points_data.add_extra_dims([(name, type)])
|
[
"Adds",
"a",
"new",
"extra",
"dimension",
"to",
"the",
"point",
"record"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lasdatas/base.py#L185-L210
|
[
"def",
"add_extra_dim",
"(",
"self",
",",
"name",
",",
"type",
",",
"description",
"=",
"\"\"",
")",
":",
"name",
"=",
"name",
".",
"replace",
"(",
"\" \"",
",",
"\"_\"",
")",
"type_id",
"=",
"extradims",
".",
"get_id_for_extra_dim_type",
"(",
"type",
")",
"extra_byte",
"=",
"ExtraBytesStruct",
"(",
"data_type",
"=",
"type_id",
",",
"name",
"=",
"name",
".",
"encode",
"(",
")",
",",
"description",
"=",
"description",
".",
"encode",
"(",
")",
")",
"try",
":",
"extra_bytes_vlr",
"=",
"self",
".",
"vlrs",
".",
"get",
"(",
"\"ExtraBytesVlr\"",
")",
"[",
"0",
"]",
"except",
"IndexError",
":",
"extra_bytes_vlr",
"=",
"ExtraBytesVlr",
"(",
")",
"self",
".",
"vlrs",
".",
"append",
"(",
"extra_bytes_vlr",
")",
"finally",
":",
"extra_bytes_vlr",
".",
"extra_bytes_structs",
".",
"append",
"(",
"extra_byte",
")",
"self",
".",
"points_data",
".",
"add_extra_dims",
"(",
"[",
"(",
"name",
",",
"type",
")",
"]",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
LasBase.write_to
|
writes the data to a stream
Parameters
----------
out_stream: file object
the destination stream, implementing the write method
do_compress: bool, optional, default False
Flag to indicate if you want the date to be compressed
|
pylas/lasdatas/base.py
|
def write_to(self, out_stream, do_compress=False):
""" writes the data to a stream
Parameters
----------
out_stream: file object
the destination stream, implementing the write method
do_compress: bool, optional, default False
Flag to indicate if you want the date to be compressed
"""
self.update_header()
if (
self.vlrs.get("ExtraBytesVlr")
and not self.points_data.extra_dimensions_names
):
logger.error(
"Las contains an ExtraBytesVlr, but no extra bytes were found in the point_record, "
"removing the vlr"
)
self.vlrs.extract("ExtraBytesVlr")
if do_compress:
laz_vrl = create_laz_vlr(self.points_data)
self.vlrs.append(known.LasZipVlr(laz_vrl.data()))
raw_vlrs = vlrlist.RawVLRList.from_list(self.vlrs)
self.header.offset_to_point_data = (
self.header.size + raw_vlrs.total_size_in_bytes()
)
self.header.point_format_id = uncompressed_id_to_compressed(
self.header.point_format_id
)
self.header.number_of_vlr = len(raw_vlrs)
points_bytes = compress_buffer(
np.frombuffer(self.points_data.array, np.uint8),
laz_vrl.schema,
self.header.offset_to_point_data,
).tobytes()
else:
raw_vlrs = vlrlist.RawVLRList.from_list(self.vlrs)
self.header.number_of_vlr = len(raw_vlrs)
self.header.offset_to_point_data = (
self.header.size + raw_vlrs.total_size_in_bytes()
)
points_bytes = self.points_data.raw_bytes()
self.header.write_to(out_stream)
self._raise_if_not_expected_pos(out_stream, self.header.size)
raw_vlrs.write_to(out_stream)
self._raise_if_not_expected_pos(out_stream, self.header.offset_to_point_data)
out_stream.write(points_bytes)
|
def write_to(self, out_stream, do_compress=False):
""" writes the data to a stream
Parameters
----------
out_stream: file object
the destination stream, implementing the write method
do_compress: bool, optional, default False
Flag to indicate if you want the date to be compressed
"""
self.update_header()
if (
self.vlrs.get("ExtraBytesVlr")
and not self.points_data.extra_dimensions_names
):
logger.error(
"Las contains an ExtraBytesVlr, but no extra bytes were found in the point_record, "
"removing the vlr"
)
self.vlrs.extract("ExtraBytesVlr")
if do_compress:
laz_vrl = create_laz_vlr(self.points_data)
self.vlrs.append(known.LasZipVlr(laz_vrl.data()))
raw_vlrs = vlrlist.RawVLRList.from_list(self.vlrs)
self.header.offset_to_point_data = (
self.header.size + raw_vlrs.total_size_in_bytes()
)
self.header.point_format_id = uncompressed_id_to_compressed(
self.header.point_format_id
)
self.header.number_of_vlr = len(raw_vlrs)
points_bytes = compress_buffer(
np.frombuffer(self.points_data.array, np.uint8),
laz_vrl.schema,
self.header.offset_to_point_data,
).tobytes()
else:
raw_vlrs = vlrlist.RawVLRList.from_list(self.vlrs)
self.header.number_of_vlr = len(raw_vlrs)
self.header.offset_to_point_data = (
self.header.size + raw_vlrs.total_size_in_bytes()
)
points_bytes = self.points_data.raw_bytes()
self.header.write_to(out_stream)
self._raise_if_not_expected_pos(out_stream, self.header.size)
raw_vlrs.write_to(out_stream)
self._raise_if_not_expected_pos(out_stream, self.header.offset_to_point_data)
out_stream.write(points_bytes)
|
[
"writes",
"the",
"data",
"to",
"a",
"stream"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lasdatas/base.py#L229-L283
|
[
"def",
"write_to",
"(",
"self",
",",
"out_stream",
",",
"do_compress",
"=",
"False",
")",
":",
"self",
".",
"update_header",
"(",
")",
"if",
"(",
"self",
".",
"vlrs",
".",
"get",
"(",
"\"ExtraBytesVlr\"",
")",
"and",
"not",
"self",
".",
"points_data",
".",
"extra_dimensions_names",
")",
":",
"logger",
".",
"error",
"(",
"\"Las contains an ExtraBytesVlr, but no extra bytes were found in the point_record, \"",
"\"removing the vlr\"",
")",
"self",
".",
"vlrs",
".",
"extract",
"(",
"\"ExtraBytesVlr\"",
")",
"if",
"do_compress",
":",
"laz_vrl",
"=",
"create_laz_vlr",
"(",
"self",
".",
"points_data",
")",
"self",
".",
"vlrs",
".",
"append",
"(",
"known",
".",
"LasZipVlr",
"(",
"laz_vrl",
".",
"data",
"(",
")",
")",
")",
"raw_vlrs",
"=",
"vlrlist",
".",
"RawVLRList",
".",
"from_list",
"(",
"self",
".",
"vlrs",
")",
"self",
".",
"header",
".",
"offset_to_point_data",
"=",
"(",
"self",
".",
"header",
".",
"size",
"+",
"raw_vlrs",
".",
"total_size_in_bytes",
"(",
")",
")",
"self",
".",
"header",
".",
"point_format_id",
"=",
"uncompressed_id_to_compressed",
"(",
"self",
".",
"header",
".",
"point_format_id",
")",
"self",
".",
"header",
".",
"number_of_vlr",
"=",
"len",
"(",
"raw_vlrs",
")",
"points_bytes",
"=",
"compress_buffer",
"(",
"np",
".",
"frombuffer",
"(",
"self",
".",
"points_data",
".",
"array",
",",
"np",
".",
"uint8",
")",
",",
"laz_vrl",
".",
"schema",
",",
"self",
".",
"header",
".",
"offset_to_point_data",
",",
")",
".",
"tobytes",
"(",
")",
"else",
":",
"raw_vlrs",
"=",
"vlrlist",
".",
"RawVLRList",
".",
"from_list",
"(",
"self",
".",
"vlrs",
")",
"self",
".",
"header",
".",
"number_of_vlr",
"=",
"len",
"(",
"raw_vlrs",
")",
"self",
".",
"header",
".",
"offset_to_point_data",
"=",
"(",
"self",
".",
"header",
".",
"size",
"+",
"raw_vlrs",
".",
"total_size_in_bytes",
"(",
")",
")",
"points_bytes",
"=",
"self",
".",
"points_data",
".",
"raw_bytes",
"(",
")",
"self",
".",
"header",
".",
"write_to",
"(",
"out_stream",
")",
"self",
".",
"_raise_if_not_expected_pos",
"(",
"out_stream",
",",
"self",
".",
"header",
".",
"size",
")",
"raw_vlrs",
".",
"write_to",
"(",
"out_stream",
")",
"self",
".",
"_raise_if_not_expected_pos",
"(",
"out_stream",
",",
"self",
".",
"header",
".",
"offset_to_point_data",
")",
"out_stream",
".",
"write",
"(",
"points_bytes",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
LasBase.write_to_file
|
Writes the las data into a file
Parameters
----------
filename : str
The file where the data should be written.
do_compress: bool, optional, default None
if None the extension of the filename will be used
to determine if the data should be compressed
otherwise the do_compress flag indicate if the data should be compressed
|
pylas/lasdatas/base.py
|
def write_to_file(self, filename, do_compress=None):
""" Writes the las data into a file
Parameters
----------
filename : str
The file where the data should be written.
do_compress: bool, optional, default None
if None the extension of the filename will be used
to determine if the data should be compressed
otherwise the do_compress flag indicate if the data should be compressed
"""
is_ext_laz = filename.split(".")[-1] == "laz"
if is_ext_laz and do_compress is None:
do_compress = True
with open(filename, mode="wb") as out:
self.write_to(out, do_compress=do_compress)
|
def write_to_file(self, filename, do_compress=None):
""" Writes the las data into a file
Parameters
----------
filename : str
The file where the data should be written.
do_compress: bool, optional, default None
if None the extension of the filename will be used
to determine if the data should be compressed
otherwise the do_compress flag indicate if the data should be compressed
"""
is_ext_laz = filename.split(".")[-1] == "laz"
if is_ext_laz and do_compress is None:
do_compress = True
with open(filename, mode="wb") as out:
self.write_to(out, do_compress=do_compress)
|
[
"Writes",
"the",
"las",
"data",
"into",
"a",
"file"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lasdatas/base.py#L294-L310
|
[
"def",
"write_to_file",
"(",
"self",
",",
"filename",
",",
"do_compress",
"=",
"None",
")",
":",
"is_ext_laz",
"=",
"filename",
".",
"split",
"(",
"\".\"",
")",
"[",
"-",
"1",
"]",
"==",
"\"laz\"",
"if",
"is_ext_laz",
"and",
"do_compress",
"is",
"None",
":",
"do_compress",
"=",
"True",
"with",
"open",
"(",
"filename",
",",
"mode",
"=",
"\"wb\"",
")",
"as",
"out",
":",
"self",
".",
"write_to",
"(",
"out",
",",
"do_compress",
"=",
"do_compress",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
LasBase.write
|
Writes to a stream or file
When destination is a string, it will be interpreted as the path were the file should be written to,
also if do_compress is None, the compression will be guessed from the file extension:
- .laz -> compressed
- .las -> uncompressed
.. note::
This means that you could do something like:
# Create .laz but not compressed
las.write('out.laz', do_compress=False)
# Create .las but compressed
las.write('out.las', do_compress=True)
While it should not confuse Las/Laz readers, it will confuse humans so avoid doing it
Parameters
----------
destination: str or file object
filename or stream to write to
do_compress: bool, optional
Flags to indicate if you want to compress the data
|
pylas/lasdatas/base.py
|
def write(self, destination, do_compress=None):
""" Writes to a stream or file
When destination is a string, it will be interpreted as the path were the file should be written to,
also if do_compress is None, the compression will be guessed from the file extension:
- .laz -> compressed
- .las -> uncompressed
.. note::
This means that you could do something like:
# Create .laz but not compressed
las.write('out.laz', do_compress=False)
# Create .las but compressed
las.write('out.las', do_compress=True)
While it should not confuse Las/Laz readers, it will confuse humans so avoid doing it
Parameters
----------
destination: str or file object
filename or stream to write to
do_compress: bool, optional
Flags to indicate if you want to compress the data
"""
if isinstance(destination, str):
self.write_to_file(destination)
else:
if do_compress is None:
do_compress = False
self.write_to(destination, do_compress=do_compress)
|
def write(self, destination, do_compress=None):
""" Writes to a stream or file
When destination is a string, it will be interpreted as the path were the file should be written to,
also if do_compress is None, the compression will be guessed from the file extension:
- .laz -> compressed
- .las -> uncompressed
.. note::
This means that you could do something like:
# Create .laz but not compressed
las.write('out.laz', do_compress=False)
# Create .las but compressed
las.write('out.las', do_compress=True)
While it should not confuse Las/Laz readers, it will confuse humans so avoid doing it
Parameters
----------
destination: str or file object
filename or stream to write to
do_compress: bool, optional
Flags to indicate if you want to compress the data
"""
if isinstance(destination, str):
self.write_to_file(destination)
else:
if do_compress is None:
do_compress = False
self.write_to(destination, do_compress=do_compress)
|
[
"Writes",
"to",
"a",
"stream",
"or",
"file"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lasdatas/base.py#L312-L347
|
[
"def",
"write",
"(",
"self",
",",
"destination",
",",
"do_compress",
"=",
"None",
")",
":",
"if",
"isinstance",
"(",
"destination",
",",
"str",
")",
":",
"self",
".",
"write_to_file",
"(",
"destination",
")",
"else",
":",
"if",
"do_compress",
"is",
"None",
":",
"do_compress",
"=",
"False",
"self",
".",
"write_to",
"(",
"destination",
",",
"do_compress",
"=",
"do_compress",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
_build_point_formats_dtypes
|
Builds the dict mapping point format id to numpy.dtype
In the dtypes, bit fields are still packed, and need to be unpacked each time
you want to access them
|
pylas/point/dims.py
|
def _build_point_formats_dtypes(point_format_dimensions, dimensions_dict):
""" Builds the dict mapping point format id to numpy.dtype
In the dtypes, bit fields are still packed, and need to be unpacked each time
you want to access them
"""
return {
fmt_id: _point_format_to_dtype(point_fmt, dimensions_dict)
for fmt_id, point_fmt in point_format_dimensions.items()
}
|
def _build_point_formats_dtypes(point_format_dimensions, dimensions_dict):
""" Builds the dict mapping point format id to numpy.dtype
In the dtypes, bit fields are still packed, and need to be unpacked each time
you want to access them
"""
return {
fmt_id: _point_format_to_dtype(point_fmt, dimensions_dict)
for fmt_id, point_fmt in point_format_dimensions.items()
}
|
[
"Builds",
"the",
"dict",
"mapping",
"point",
"format",
"id",
"to",
"numpy",
".",
"dtype",
"In",
"the",
"dtypes",
"bit",
"fields",
"are",
"still",
"packed",
"and",
"need",
"to",
"be",
"unpacked",
"each",
"time",
"you",
"want",
"to",
"access",
"them"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/point/dims.py#L29-L37
|
[
"def",
"_build_point_formats_dtypes",
"(",
"point_format_dimensions",
",",
"dimensions_dict",
")",
":",
"return",
"{",
"fmt_id",
":",
"_point_format_to_dtype",
"(",
"point_fmt",
",",
"dimensions_dict",
")",
"for",
"fmt_id",
",",
"point_fmt",
"in",
"point_format_dimensions",
".",
"items",
"(",
")",
"}"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
_build_unpacked_point_formats_dtypes
|
Builds the dict mapping point format id to numpy.dtype
In the dtypes, bit fields are unpacked and can be accessed directly
|
pylas/point/dims.py
|
def _build_unpacked_point_formats_dtypes(
point_formats_dimensions, composed_fields_dict, dimensions_dict
):
""" Builds the dict mapping point format id to numpy.dtype
In the dtypes, bit fields are unpacked and can be accessed directly
"""
unpacked_dtypes = {}
for fmt_id, dim_names in point_formats_dimensions.items():
composed_dims, dtype = composed_fields_dict[fmt_id], []
for dim_name in dim_names:
if dim_name in composed_dims:
dtype.extend((f.name, f.type) for f in composed_dims[dim_name])
else:
dtype.append(dimensions_dict[dim_name])
unpacked_dtypes[fmt_id] = np.dtype(dtype)
return unpacked_dtypes
|
def _build_unpacked_point_formats_dtypes(
point_formats_dimensions, composed_fields_dict, dimensions_dict
):
""" Builds the dict mapping point format id to numpy.dtype
In the dtypes, bit fields are unpacked and can be accessed directly
"""
unpacked_dtypes = {}
for fmt_id, dim_names in point_formats_dimensions.items():
composed_dims, dtype = composed_fields_dict[fmt_id], []
for dim_name in dim_names:
if dim_name in composed_dims:
dtype.extend((f.name, f.type) for f in composed_dims[dim_name])
else:
dtype.append(dimensions_dict[dim_name])
unpacked_dtypes[fmt_id] = np.dtype(dtype)
return unpacked_dtypes
|
[
"Builds",
"the",
"dict",
"mapping",
"point",
"format",
"id",
"to",
"numpy",
".",
"dtype",
"In",
"the",
"dtypes",
"bit",
"fields",
"are",
"unpacked",
"and",
"can",
"be",
"accessed",
"directly"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/point/dims.py#L40-L55
|
[
"def",
"_build_unpacked_point_formats_dtypes",
"(",
"point_formats_dimensions",
",",
"composed_fields_dict",
",",
"dimensions_dict",
")",
":",
"unpacked_dtypes",
"=",
"{",
"}",
"for",
"fmt_id",
",",
"dim_names",
"in",
"point_formats_dimensions",
".",
"items",
"(",
")",
":",
"composed_dims",
",",
"dtype",
"=",
"composed_fields_dict",
"[",
"fmt_id",
"]",
",",
"[",
"]",
"for",
"dim_name",
"in",
"dim_names",
":",
"if",
"dim_name",
"in",
"composed_dims",
":",
"dtype",
".",
"extend",
"(",
"(",
"f",
".",
"name",
",",
"f",
".",
"type",
")",
"for",
"f",
"in",
"composed_dims",
"[",
"dim_name",
"]",
")",
"else",
":",
"dtype",
".",
"append",
"(",
"dimensions_dict",
"[",
"dim_name",
"]",
")",
"unpacked_dtypes",
"[",
"fmt_id",
"]",
"=",
"np",
".",
"dtype",
"(",
"dtype",
")",
"return",
"unpacked_dtypes"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
np_dtype_to_point_format
|
Tries to find a matching point format id for the input numpy dtype
To match, the input dtype has to be 100% equal to a point format dtype
so all names & dimensions types must match
Parameters:
----------
dtype : numpy.dtype
The input dtype
unpacked : bool, optional
[description] (the default is False, which [default_description])
Raises
------
errors.IncompatibleDataFormat
If No compatible point format was found
Returns
-------
int
The compatible point format found
|
pylas/point/dims.py
|
def np_dtype_to_point_format(dtype, unpacked=False):
""" Tries to find a matching point format id for the input numpy dtype
To match, the input dtype has to be 100% equal to a point format dtype
so all names & dimensions types must match
Parameters:
----------
dtype : numpy.dtype
The input dtype
unpacked : bool, optional
[description] (the default is False, which [default_description])
Raises
------
errors.IncompatibleDataFormat
If No compatible point format was found
Returns
-------
int
The compatible point format found
"""
all_dtypes = (
ALL_POINT_FORMATS_DTYPE if not unpacked else UNPACKED_POINT_FORMATS_DTYPES
)
for format_id, fmt_dtype in all_dtypes.items():
if fmt_dtype == dtype:
return format_id
else:
raise errors.IncompatibleDataFormat(
"Data type of array is not compatible with any point format (array dtype: {})".format(
dtype
)
)
|
def np_dtype_to_point_format(dtype, unpacked=False):
""" Tries to find a matching point format id for the input numpy dtype
To match, the input dtype has to be 100% equal to a point format dtype
so all names & dimensions types must match
Parameters:
----------
dtype : numpy.dtype
The input dtype
unpacked : bool, optional
[description] (the default is False, which [default_description])
Raises
------
errors.IncompatibleDataFormat
If No compatible point format was found
Returns
-------
int
The compatible point format found
"""
all_dtypes = (
ALL_POINT_FORMATS_DTYPE if not unpacked else UNPACKED_POINT_FORMATS_DTYPES
)
for format_id, fmt_dtype in all_dtypes.items():
if fmt_dtype == dtype:
return format_id
else:
raise errors.IncompatibleDataFormat(
"Data type of array is not compatible with any point format (array dtype: {})".format(
dtype
)
)
|
[
"Tries",
"to",
"find",
"a",
"matching",
"point",
"format",
"id",
"for",
"the",
"input",
"numpy",
"dtype",
"To",
"match",
"the",
"input",
"dtype",
"has",
"to",
"be",
"100%",
"equal",
"to",
"a",
"point",
"format",
"dtype",
"so",
"all",
"names",
"&",
"dimensions",
"types",
"must",
"match"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/point/dims.py#L234-L268
|
[
"def",
"np_dtype_to_point_format",
"(",
"dtype",
",",
"unpacked",
"=",
"False",
")",
":",
"all_dtypes",
"=",
"(",
"ALL_POINT_FORMATS_DTYPE",
"if",
"not",
"unpacked",
"else",
"UNPACKED_POINT_FORMATS_DTYPES",
")",
"for",
"format_id",
",",
"fmt_dtype",
"in",
"all_dtypes",
".",
"items",
"(",
")",
":",
"if",
"fmt_dtype",
"==",
"dtype",
":",
"return",
"format_id",
"else",
":",
"raise",
"errors",
".",
"IncompatibleDataFormat",
"(",
"\"Data type of array is not compatible with any point format (array dtype: {})\"",
".",
"format",
"(",
"dtype",
")",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
min_file_version_for_point_format
|
Returns the minimum file version that supports the given point_format_id
|
pylas/point/dims.py
|
def min_file_version_for_point_format(point_format_id):
""" Returns the minimum file version that supports the given point_format_id
"""
for version, point_formats in sorted(VERSION_TO_POINT_FMT.items()):
if point_format_id in point_formats:
return version
else:
raise errors.PointFormatNotSupported(point_format_id)
|
def min_file_version_for_point_format(point_format_id):
""" Returns the minimum file version that supports the given point_format_id
"""
for version, point_formats in sorted(VERSION_TO_POINT_FMT.items()):
if point_format_id in point_formats:
return version
else:
raise errors.PointFormatNotSupported(point_format_id)
|
[
"Returns",
"the",
"minimum",
"file",
"version",
"that",
"supports",
"the",
"given",
"point_format_id"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/point/dims.py#L271-L278
|
[
"def",
"min_file_version_for_point_format",
"(",
"point_format_id",
")",
":",
"for",
"version",
",",
"point_formats",
"in",
"sorted",
"(",
"VERSION_TO_POINT_FMT",
".",
"items",
"(",
")",
")",
":",
"if",
"point_format_id",
"in",
"point_formats",
":",
"return",
"version",
"else",
":",
"raise",
"errors",
".",
"PointFormatNotSupported",
"(",
"point_format_id",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
is_point_fmt_compatible_with_version
|
Returns true if the file version support the point_format_id
|
pylas/point/dims.py
|
def is_point_fmt_compatible_with_version(point_format_id, file_version):
""" Returns true if the file version support the point_format_id
"""
try:
return point_format_id in VERSION_TO_POINT_FMT[str(file_version)]
except KeyError:
raise errors.FileVersionNotSupported(file_version)
|
def is_point_fmt_compatible_with_version(point_format_id, file_version):
""" Returns true if the file version support the point_format_id
"""
try:
return point_format_id in VERSION_TO_POINT_FMT[str(file_version)]
except KeyError:
raise errors.FileVersionNotSupported(file_version)
|
[
"Returns",
"true",
"if",
"the",
"file",
"version",
"support",
"the",
"point_format_id"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/point/dims.py#L293-L299
|
[
"def",
"is_point_fmt_compatible_with_version",
"(",
"point_format_id",
",",
"file_version",
")",
":",
"try",
":",
"return",
"point_format_id",
"in",
"VERSION_TO_POINT_FMT",
"[",
"str",
"(",
"file_version",
")",
"]",
"except",
"KeyError",
":",
"raise",
"errors",
".",
"FileVersionNotSupported",
"(",
"file_version",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
VLRList.get_by_id
|
Function to get vlrs by user_id and/or record_ids.
Always returns a list even if only one vlr matches the user_id and record_id
>>> import pylas
>>> from pylas.vlrs.known import ExtraBytesVlr, WktCoordinateSystemVlr
>>> las = pylas.read("pylastests/extrabytes.las")
>>> las.vlrs
[<ExtraBytesVlr(extra bytes structs: 5)>]
>>> las.vlrs.get(WktCoordinateSystemVlr.official_user_id())
[]
>>> las.vlrs.get(WktCoordinateSystemVlr.official_user_id())[0]
Traceback (most recent call last):
IndexError: list index out of range
>>> las.vlrs.get_by_id(ExtraBytesVlr.official_user_id())
[<ExtraBytesVlr(extra bytes structs: 5)>]
>>> las.vlrs.get_by_id(ExtraBytesVlr.official_user_id())[0]
<ExtraBytesVlr(extra bytes structs: 5)>
Parameters
----------
user_id: str, optional
the user id
record_ids: iterable of int, optional
THe record ids of the vlr(s) you wish to get
Returns
-------
:py:class:`list`
a list of vlrs matching the user_id and records_ids
|
pylas/vlrs/vlrlist.py
|
def get_by_id(self, user_id="", record_ids=(None,)):
""" Function to get vlrs by user_id and/or record_ids.
Always returns a list even if only one vlr matches the user_id and record_id
>>> import pylas
>>> from pylas.vlrs.known import ExtraBytesVlr, WktCoordinateSystemVlr
>>> las = pylas.read("pylastests/extrabytes.las")
>>> las.vlrs
[<ExtraBytesVlr(extra bytes structs: 5)>]
>>> las.vlrs.get(WktCoordinateSystemVlr.official_user_id())
[]
>>> las.vlrs.get(WktCoordinateSystemVlr.official_user_id())[0]
Traceback (most recent call last):
IndexError: list index out of range
>>> las.vlrs.get_by_id(ExtraBytesVlr.official_user_id())
[<ExtraBytesVlr(extra bytes structs: 5)>]
>>> las.vlrs.get_by_id(ExtraBytesVlr.official_user_id())[0]
<ExtraBytesVlr(extra bytes structs: 5)>
Parameters
----------
user_id: str, optional
the user id
record_ids: iterable of int, optional
THe record ids of the vlr(s) you wish to get
Returns
-------
:py:class:`list`
a list of vlrs matching the user_id and records_ids
"""
if user_id != "" and record_ids != (None,):
return [
vlr
for vlr in self.vlrs
if vlr.user_id == user_id and vlr.record_id in record_ids
]
else:
return [
vlr
for vlr in self.vlrs
if vlr.user_id == user_id or vlr.record_id in record_ids
]
|
def get_by_id(self, user_id="", record_ids=(None,)):
""" Function to get vlrs by user_id and/or record_ids.
Always returns a list even if only one vlr matches the user_id and record_id
>>> import pylas
>>> from pylas.vlrs.known import ExtraBytesVlr, WktCoordinateSystemVlr
>>> las = pylas.read("pylastests/extrabytes.las")
>>> las.vlrs
[<ExtraBytesVlr(extra bytes structs: 5)>]
>>> las.vlrs.get(WktCoordinateSystemVlr.official_user_id())
[]
>>> las.vlrs.get(WktCoordinateSystemVlr.official_user_id())[0]
Traceback (most recent call last):
IndexError: list index out of range
>>> las.vlrs.get_by_id(ExtraBytesVlr.official_user_id())
[<ExtraBytesVlr(extra bytes structs: 5)>]
>>> las.vlrs.get_by_id(ExtraBytesVlr.official_user_id())[0]
<ExtraBytesVlr(extra bytes structs: 5)>
Parameters
----------
user_id: str, optional
the user id
record_ids: iterable of int, optional
THe record ids of the vlr(s) you wish to get
Returns
-------
:py:class:`list`
a list of vlrs matching the user_id and records_ids
"""
if user_id != "" and record_ids != (None,):
return [
vlr
for vlr in self.vlrs
if vlr.user_id == user_id and vlr.record_id in record_ids
]
else:
return [
vlr
for vlr in self.vlrs
if vlr.user_id == user_id or vlr.record_id in record_ids
]
|
[
"Function",
"to",
"get",
"vlrs",
"by",
"user_id",
"and",
"/",
"or",
"record_ids",
".",
"Always",
"returns",
"a",
"list",
"even",
"if",
"only",
"one",
"vlr",
"matches",
"the",
"user_id",
"and",
"record_id"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/vlrs/vlrlist.py#L95-L138
|
[
"def",
"get_by_id",
"(",
"self",
",",
"user_id",
"=",
"\"\"",
",",
"record_ids",
"=",
"(",
"None",
",",
")",
")",
":",
"if",
"user_id",
"!=",
"\"\"",
"and",
"record_ids",
"!=",
"(",
"None",
",",
")",
":",
"return",
"[",
"vlr",
"for",
"vlr",
"in",
"self",
".",
"vlrs",
"if",
"vlr",
".",
"user_id",
"==",
"user_id",
"and",
"vlr",
".",
"record_id",
"in",
"record_ids",
"]",
"else",
":",
"return",
"[",
"vlr",
"for",
"vlr",
"in",
"self",
".",
"vlrs",
"if",
"vlr",
".",
"user_id",
"==",
"user_id",
"or",
"vlr",
".",
"record_id",
"in",
"record_ids",
"]"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
VLRList.get
|
Returns the list of vlrs of the requested type
Always returns a list even if there is only one VLR of type vlr_type.
>>> import pylas
>>> las = pylas.read("pylastests/extrabytes.las")
>>> las.vlrs
[<ExtraBytesVlr(extra bytes structs: 5)>]
>>> las.vlrs.get("WktCoordinateSystemVlr")
[]
>>> las.vlrs.get("WktCoordinateSystemVlr")[0]
Traceback (most recent call last):
IndexError: list index out of range
>>> las.vlrs.get('ExtraBytesVlr')
[<ExtraBytesVlr(extra bytes structs: 5)>]
>>> las.vlrs.get('ExtraBytesVlr')[0]
<ExtraBytesVlr(extra bytes structs: 5)>
Parameters
----------
vlr_type: str
the class name of the vlr
Returns
-------
:py:class:`list`
a List of vlrs matching the user_id and records_ids
|
pylas/vlrs/vlrlist.py
|
def get(self, vlr_type):
""" Returns the list of vlrs of the requested type
Always returns a list even if there is only one VLR of type vlr_type.
>>> import pylas
>>> las = pylas.read("pylastests/extrabytes.las")
>>> las.vlrs
[<ExtraBytesVlr(extra bytes structs: 5)>]
>>> las.vlrs.get("WktCoordinateSystemVlr")
[]
>>> las.vlrs.get("WktCoordinateSystemVlr")[0]
Traceback (most recent call last):
IndexError: list index out of range
>>> las.vlrs.get('ExtraBytesVlr')
[<ExtraBytesVlr(extra bytes structs: 5)>]
>>> las.vlrs.get('ExtraBytesVlr')[0]
<ExtraBytesVlr(extra bytes structs: 5)>
Parameters
----------
vlr_type: str
the class name of the vlr
Returns
-------
:py:class:`list`
a List of vlrs matching the user_id and records_ids
"""
return [v for v in self.vlrs if v.__class__.__name__ == vlr_type]
|
def get(self, vlr_type):
""" Returns the list of vlrs of the requested type
Always returns a list even if there is only one VLR of type vlr_type.
>>> import pylas
>>> las = pylas.read("pylastests/extrabytes.las")
>>> las.vlrs
[<ExtraBytesVlr(extra bytes structs: 5)>]
>>> las.vlrs.get("WktCoordinateSystemVlr")
[]
>>> las.vlrs.get("WktCoordinateSystemVlr")[0]
Traceback (most recent call last):
IndexError: list index out of range
>>> las.vlrs.get('ExtraBytesVlr')
[<ExtraBytesVlr(extra bytes structs: 5)>]
>>> las.vlrs.get('ExtraBytesVlr')[0]
<ExtraBytesVlr(extra bytes structs: 5)>
Parameters
----------
vlr_type: str
the class name of the vlr
Returns
-------
:py:class:`list`
a List of vlrs matching the user_id and records_ids
"""
return [v for v in self.vlrs if v.__class__.__name__ == vlr_type]
|
[
"Returns",
"the",
"list",
"of",
"vlrs",
"of",
"the",
"requested",
"type",
"Always",
"returns",
"a",
"list",
"even",
"if",
"there",
"is",
"only",
"one",
"VLR",
"of",
"type",
"vlr_type",
"."
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/vlrs/vlrlist.py#L140-L170
|
[
"def",
"get",
"(",
"self",
",",
"vlr_type",
")",
":",
"return",
"[",
"v",
"for",
"v",
"in",
"self",
".",
"vlrs",
"if",
"v",
".",
"__class__",
".",
"__name__",
"==",
"vlr_type",
"]"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
VLRList.extract
|
Returns the list of vlrs of the requested type
The difference with get is that the returned vlrs will be removed from the list
Parameters
----------
vlr_type: str
the class name of the vlr
Returns
-------
list
a List of vlrs matching the user_id and records_ids
|
pylas/vlrs/vlrlist.py
|
def extract(self, vlr_type):
""" Returns the list of vlrs of the requested type
The difference with get is that the returned vlrs will be removed from the list
Parameters
----------
vlr_type: str
the class name of the vlr
Returns
-------
list
a List of vlrs matching the user_id and records_ids
"""
kept_vlrs, extracted_vlrs = [], []
for vlr in self.vlrs:
if vlr.__class__.__name__ == vlr_type:
extracted_vlrs.append(vlr)
else:
kept_vlrs.append(vlr)
self.vlrs = kept_vlrs
return extracted_vlrs
|
def extract(self, vlr_type):
""" Returns the list of vlrs of the requested type
The difference with get is that the returned vlrs will be removed from the list
Parameters
----------
vlr_type: str
the class name of the vlr
Returns
-------
list
a List of vlrs matching the user_id and records_ids
"""
kept_vlrs, extracted_vlrs = [], []
for vlr in self.vlrs:
if vlr.__class__.__name__ == vlr_type:
extracted_vlrs.append(vlr)
else:
kept_vlrs.append(vlr)
self.vlrs = kept_vlrs
return extracted_vlrs
|
[
"Returns",
"the",
"list",
"of",
"vlrs",
"of",
"the",
"requested",
"type",
"The",
"difference",
"with",
"get",
"is",
"that",
"the",
"returned",
"vlrs",
"will",
"be",
"removed",
"from",
"the",
"list"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/vlrs/vlrlist.py#L172-L194
|
[
"def",
"extract",
"(",
"self",
",",
"vlr_type",
")",
":",
"kept_vlrs",
",",
"extracted_vlrs",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"vlr",
"in",
"self",
".",
"vlrs",
":",
"if",
"vlr",
".",
"__class__",
".",
"__name__",
"==",
"vlr_type",
":",
"extracted_vlrs",
".",
"append",
"(",
"vlr",
")",
"else",
":",
"kept_vlrs",
".",
"append",
"(",
"vlr",
")",
"self",
".",
"vlrs",
"=",
"kept_vlrs",
"return",
"extracted_vlrs"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
VLRList.read_from
|
Reads vlrs and parse them if possible from the stream
Parameters
----------
data_stream : io.BytesIO
stream to read from
num_to_read : int
number of vlrs to be read
Returns
-------
pylas.vlrs.vlrlist.VLRList
List of vlrs
|
pylas/vlrs/vlrlist.py
|
def read_from(cls, data_stream, num_to_read):
""" Reads vlrs and parse them if possible from the stream
Parameters
----------
data_stream : io.BytesIO
stream to read from
num_to_read : int
number of vlrs to be read
Returns
-------
pylas.vlrs.vlrlist.VLRList
List of vlrs
"""
vlrlist = cls()
for _ in range(num_to_read):
raw = RawVLR.read_from(data_stream)
try:
vlrlist.append(vlr_factory(raw))
except UnicodeDecodeError:
logger.error("Failed to decode VLR: {}".format(raw))
return vlrlist
|
def read_from(cls, data_stream, num_to_read):
""" Reads vlrs and parse them if possible from the stream
Parameters
----------
data_stream : io.BytesIO
stream to read from
num_to_read : int
number of vlrs to be read
Returns
-------
pylas.vlrs.vlrlist.VLRList
List of vlrs
"""
vlrlist = cls()
for _ in range(num_to_read):
raw = RawVLR.read_from(data_stream)
try:
vlrlist.append(vlr_factory(raw))
except UnicodeDecodeError:
logger.error("Failed to decode VLR: {}".format(raw))
return vlrlist
|
[
"Reads",
"vlrs",
"and",
"parse",
"them",
"if",
"possible",
"from",
"the",
"stream"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/vlrs/vlrlist.py#L223-L247
|
[
"def",
"read_from",
"(",
"cls",
",",
"data_stream",
",",
"num_to_read",
")",
":",
"vlrlist",
"=",
"cls",
"(",
")",
"for",
"_",
"in",
"range",
"(",
"num_to_read",
")",
":",
"raw",
"=",
"RawVLR",
".",
"read_from",
"(",
"data_stream",
")",
"try",
":",
"vlrlist",
".",
"append",
"(",
"vlr_factory",
"(",
"raw",
")",
")",
"except",
"UnicodeDecodeError",
":",
"logger",
".",
"error",
"(",
"\"Failed to decode VLR: {}\"",
".",
"format",
"(",
"raw",
")",
")",
"return",
"vlrlist"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
files_have_same_point_format_id
|
Returns true if all the files have the same points format id
|
pylas/utils.py
|
def files_have_same_point_format_id(las_files):
""" Returns true if all the files have the same points format id
"""
point_format_found = {las.header.point_format_id for las in las_files}
return len(point_format_found) == 1
|
def files_have_same_point_format_id(las_files):
""" Returns true if all the files have the same points format id
"""
point_format_found = {las.header.point_format_id for las in las_files}
return len(point_format_found) == 1
|
[
"Returns",
"true",
"if",
"all",
"the",
"files",
"have",
"the",
"same",
"points",
"format",
"id"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/utils.py#L6-L10
|
[
"def",
"files_have_same_point_format_id",
"(",
"las_files",
")",
":",
"point_format_found",
"=",
"{",
"las",
".",
"header",
".",
"point_format_id",
"for",
"las",
"in",
"las_files",
"}",
"return",
"len",
"(",
"point_format_found",
")",
"==",
"1"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
files_have_same_dtype
|
Returns true if all the files have the same numpy datatype
|
pylas/utils.py
|
def files_have_same_dtype(las_files):
""" Returns true if all the files have the same numpy datatype
"""
dtypes = {las.points.dtype for las in las_files}
return len(dtypes) == 1
|
def files_have_same_dtype(las_files):
""" Returns true if all the files have the same numpy datatype
"""
dtypes = {las.points.dtype for las in las_files}
return len(dtypes) == 1
|
[
"Returns",
"true",
"if",
"all",
"the",
"files",
"have",
"the",
"same",
"numpy",
"datatype"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/utils.py#L13-L17
|
[
"def",
"files_have_same_dtype",
"(",
"las_files",
")",
":",
"dtypes",
"=",
"{",
"las",
".",
"points",
".",
"dtype",
"for",
"las",
"in",
"las_files",
"}",
"return",
"len",
"(",
"dtypes",
")",
"==",
"1"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
_raise_if_wrong_file_signature
|
Reads the 4 first bytes of the stream to check that is LASF
|
pylas/lasreader.py
|
def _raise_if_wrong_file_signature(stream):
""" Reads the 4 first bytes of the stream to check that is LASF"""
file_sig = stream.read(len(headers.LAS_FILE_SIGNATURE))
if file_sig != headers.LAS_FILE_SIGNATURE:
raise errors.PylasError(
"File Signature ({}) is not {}".format(file_sig, headers.LAS_FILE_SIGNATURE)
)
|
def _raise_if_wrong_file_signature(stream):
""" Reads the 4 first bytes of the stream to check that is LASF"""
file_sig = stream.read(len(headers.LAS_FILE_SIGNATURE))
if file_sig != headers.LAS_FILE_SIGNATURE:
raise errors.PylasError(
"File Signature ({}) is not {}".format(file_sig, headers.LAS_FILE_SIGNATURE)
)
|
[
"Reads",
"the",
"4",
"first",
"bytes",
"of",
"the",
"stream",
"to",
"check",
"that",
"is",
"LASF"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lasreader.py#L15-L21
|
[
"def",
"_raise_if_wrong_file_signature",
"(",
"stream",
")",
":",
"file_sig",
"=",
"stream",
".",
"read",
"(",
"len",
"(",
"headers",
".",
"LAS_FILE_SIGNATURE",
")",
")",
"if",
"file_sig",
"!=",
"headers",
".",
"LAS_FILE_SIGNATURE",
":",
"raise",
"errors",
".",
"PylasError",
"(",
"\"File Signature ({}) is not {}\"",
".",
"format",
"(",
"file_sig",
",",
"headers",
".",
"LAS_FILE_SIGNATURE",
")",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
LasReader.read_header
|
Reads the head of the las file and returns it
|
pylas/lasreader.py
|
def read_header(self):
""" Reads the head of the las file and returns it
"""
self.stream.seek(self.start_pos)
return headers.HeaderFactory().read_from_stream(self.stream)
|
def read_header(self):
""" Reads the head of the las file and returns it
"""
self.stream.seek(self.start_pos)
return headers.HeaderFactory().read_from_stream(self.stream)
|
[
"Reads",
"the",
"head",
"of",
"the",
"las",
"file",
"and",
"returns",
"it"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lasreader.py#L39-L43
|
[
"def",
"read_header",
"(",
"self",
")",
":",
"self",
".",
"stream",
".",
"seek",
"(",
"self",
".",
"start_pos",
")",
"return",
"headers",
".",
"HeaderFactory",
"(",
")",
".",
"read_from_stream",
"(",
"self",
".",
"stream",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
LasReader.read_vlrs
|
Reads and return the vlrs of the file
|
pylas/lasreader.py
|
def read_vlrs(self):
""" Reads and return the vlrs of the file
"""
self.stream.seek(self.start_pos + self.header.size)
return VLRList.read_from(self.stream, num_to_read=self.header.number_of_vlr)
|
def read_vlrs(self):
""" Reads and return the vlrs of the file
"""
self.stream.seek(self.start_pos + self.header.size)
return VLRList.read_from(self.stream, num_to_read=self.header.number_of_vlr)
|
[
"Reads",
"and",
"return",
"the",
"vlrs",
"of",
"the",
"file"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lasreader.py#L45-L49
|
[
"def",
"read_vlrs",
"(",
"self",
")",
":",
"self",
".",
"stream",
".",
"seek",
"(",
"self",
".",
"start_pos",
"+",
"self",
".",
"header",
".",
"size",
")",
"return",
"VLRList",
".",
"read_from",
"(",
"self",
".",
"stream",
",",
"num_to_read",
"=",
"self",
".",
"header",
".",
"number_of_vlr",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
LasReader.read
|
Reads the whole las data (header, vlrs ,points, etc) and returns a LasData
object
|
pylas/lasreader.py
|
def read(self):
""" Reads the whole las data (header, vlrs ,points, etc) and returns a LasData
object
"""
vlrs = self.read_vlrs()
self._warn_if_not_at_expected_pos(
self.header.offset_to_point_data, "end of vlrs", "start of points"
)
self.stream.seek(self.start_pos + self.header.offset_to_point_data)
try:
points = self._read_points(vlrs)
except (RuntimeError, errors.LazPerfNotFound) as e:
logger.error("LazPerf failed to decompress ({}), trying laszip.".format(e))
self.stream.seek(self.start_pos)
self.__init__(io.BytesIO(laszip_decompress(self.stream)))
return self.read()
if points.point_format.has_waveform_packet:
self.stream.seek(
self.start_pos + self.header.start_of_waveform_data_packet_record
)
if self.header.global_encoding.are_waveform_flag_equal():
raise errors.PylasError(
"Incoherent values for internal and external waveform flags, both are {})".format(
"set"
if self.header.global_encoding.waveform_internal
else "unset"
)
)
if self.header.global_encoding.waveform_internal:
# TODO: Find out what to do with these
_, _ = self._read_internal_waveform_packet()
elif self.header.global_encoding.waveform_external:
logger.info(
"Waveform data is in an external file, you'll have to load it yourself"
)
if self.header.version >= "1.4":
evlrs = self.read_evlrs()
return las14.LasData(
header=self.header, vlrs=vlrs, points=points, evlrs=evlrs
)
return las12.LasData(header=self.header, vlrs=vlrs, points=points)
|
def read(self):
""" Reads the whole las data (header, vlrs ,points, etc) and returns a LasData
object
"""
vlrs = self.read_vlrs()
self._warn_if_not_at_expected_pos(
self.header.offset_to_point_data, "end of vlrs", "start of points"
)
self.stream.seek(self.start_pos + self.header.offset_to_point_data)
try:
points = self._read_points(vlrs)
except (RuntimeError, errors.LazPerfNotFound) as e:
logger.error("LazPerf failed to decompress ({}), trying laszip.".format(e))
self.stream.seek(self.start_pos)
self.__init__(io.BytesIO(laszip_decompress(self.stream)))
return self.read()
if points.point_format.has_waveform_packet:
self.stream.seek(
self.start_pos + self.header.start_of_waveform_data_packet_record
)
if self.header.global_encoding.are_waveform_flag_equal():
raise errors.PylasError(
"Incoherent values for internal and external waveform flags, both are {})".format(
"set"
if self.header.global_encoding.waveform_internal
else "unset"
)
)
if self.header.global_encoding.waveform_internal:
# TODO: Find out what to do with these
_, _ = self._read_internal_waveform_packet()
elif self.header.global_encoding.waveform_external:
logger.info(
"Waveform data is in an external file, you'll have to load it yourself"
)
if self.header.version >= "1.4":
evlrs = self.read_evlrs()
return las14.LasData(
header=self.header, vlrs=vlrs, points=points, evlrs=evlrs
)
return las12.LasData(header=self.header, vlrs=vlrs, points=points)
|
[
"Reads",
"the",
"whole",
"las",
"data",
"(",
"header",
"vlrs",
"points",
"etc",
")",
"and",
"returns",
"a",
"LasData",
"object"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lasreader.py#L51-L95
|
[
"def",
"read",
"(",
"self",
")",
":",
"vlrs",
"=",
"self",
".",
"read_vlrs",
"(",
")",
"self",
".",
"_warn_if_not_at_expected_pos",
"(",
"self",
".",
"header",
".",
"offset_to_point_data",
",",
"\"end of vlrs\"",
",",
"\"start of points\"",
")",
"self",
".",
"stream",
".",
"seek",
"(",
"self",
".",
"start_pos",
"+",
"self",
".",
"header",
".",
"offset_to_point_data",
")",
"try",
":",
"points",
"=",
"self",
".",
"_read_points",
"(",
"vlrs",
")",
"except",
"(",
"RuntimeError",
",",
"errors",
".",
"LazPerfNotFound",
")",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"\"LazPerf failed to decompress ({}), trying laszip.\"",
".",
"format",
"(",
"e",
")",
")",
"self",
".",
"stream",
".",
"seek",
"(",
"self",
".",
"start_pos",
")",
"self",
".",
"__init__",
"(",
"io",
".",
"BytesIO",
"(",
"laszip_decompress",
"(",
"self",
".",
"stream",
")",
")",
")",
"return",
"self",
".",
"read",
"(",
")",
"if",
"points",
".",
"point_format",
".",
"has_waveform_packet",
":",
"self",
".",
"stream",
".",
"seek",
"(",
"self",
".",
"start_pos",
"+",
"self",
".",
"header",
".",
"start_of_waveform_data_packet_record",
")",
"if",
"self",
".",
"header",
".",
"global_encoding",
".",
"are_waveform_flag_equal",
"(",
")",
":",
"raise",
"errors",
".",
"PylasError",
"(",
"\"Incoherent values for internal and external waveform flags, both are {})\"",
".",
"format",
"(",
"\"set\"",
"if",
"self",
".",
"header",
".",
"global_encoding",
".",
"waveform_internal",
"else",
"\"unset\"",
")",
")",
"if",
"self",
".",
"header",
".",
"global_encoding",
".",
"waveform_internal",
":",
"# TODO: Find out what to do with these",
"_",
",",
"_",
"=",
"self",
".",
"_read_internal_waveform_packet",
"(",
")",
"elif",
"self",
".",
"header",
".",
"global_encoding",
".",
"waveform_external",
":",
"logger",
".",
"info",
"(",
"\"Waveform data is in an external file, you'll have to load it yourself\"",
")",
"if",
"self",
".",
"header",
".",
"version",
">=",
"\"1.4\"",
":",
"evlrs",
"=",
"self",
".",
"read_evlrs",
"(",
")",
"return",
"las14",
".",
"LasData",
"(",
"header",
"=",
"self",
".",
"header",
",",
"vlrs",
"=",
"vlrs",
",",
"points",
"=",
"points",
",",
"evlrs",
"=",
"evlrs",
")",
"return",
"las12",
".",
"LasData",
"(",
"header",
"=",
"self",
".",
"header",
",",
"vlrs",
"=",
"vlrs",
",",
"points",
"=",
"points",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
LasReader._read_points
|
private function to handle reading of the points record parts
of the las file.
the header is needed for the point format and number of points
the vlrs are need to get the potential laszip vlr as well as the extra bytes vlr
|
pylas/lasreader.py
|
def _read_points(self, vlrs):
""" private function to handle reading of the points record parts
of the las file.
the header is needed for the point format and number of points
the vlrs are need to get the potential laszip vlr as well as the extra bytes vlr
"""
try:
extra_dims = vlrs.get("ExtraBytesVlr")[0].type_of_extra_dims()
except IndexError:
extra_dims = None
point_format = PointFormat(self.header.point_format_id, extra_dims=extra_dims)
if self.header.are_points_compressed:
laszip_vlr = vlrs.pop(vlrs.index("LasZipVlr"))
points = self._read_compressed_points_data(laszip_vlr, point_format)
else:
points = record.PackedPointRecord.from_stream(
self.stream, point_format, self.header.point_count
)
return points
|
def _read_points(self, vlrs):
""" private function to handle reading of the points record parts
of the las file.
the header is needed for the point format and number of points
the vlrs are need to get the potential laszip vlr as well as the extra bytes vlr
"""
try:
extra_dims = vlrs.get("ExtraBytesVlr")[0].type_of_extra_dims()
except IndexError:
extra_dims = None
point_format = PointFormat(self.header.point_format_id, extra_dims=extra_dims)
if self.header.are_points_compressed:
laszip_vlr = vlrs.pop(vlrs.index("LasZipVlr"))
points = self._read_compressed_points_data(laszip_vlr, point_format)
else:
points = record.PackedPointRecord.from_stream(
self.stream, point_format, self.header.point_count
)
return points
|
[
"private",
"function",
"to",
"handle",
"reading",
"of",
"the",
"points",
"record",
"parts",
"of",
"the",
"las",
"file",
"."
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lasreader.py#L97-L117
|
[
"def",
"_read_points",
"(",
"self",
",",
"vlrs",
")",
":",
"try",
":",
"extra_dims",
"=",
"vlrs",
".",
"get",
"(",
"\"ExtraBytesVlr\"",
")",
"[",
"0",
"]",
".",
"type_of_extra_dims",
"(",
")",
"except",
"IndexError",
":",
"extra_dims",
"=",
"None",
"point_format",
"=",
"PointFormat",
"(",
"self",
".",
"header",
".",
"point_format_id",
",",
"extra_dims",
"=",
"extra_dims",
")",
"if",
"self",
".",
"header",
".",
"are_points_compressed",
":",
"laszip_vlr",
"=",
"vlrs",
".",
"pop",
"(",
"vlrs",
".",
"index",
"(",
"\"LasZipVlr\"",
")",
")",
"points",
"=",
"self",
".",
"_read_compressed_points_data",
"(",
"laszip_vlr",
",",
"point_format",
")",
"else",
":",
"points",
"=",
"record",
".",
"PackedPointRecord",
".",
"from_stream",
"(",
"self",
".",
"stream",
",",
"point_format",
",",
"self",
".",
"header",
".",
"point_count",
")",
"return",
"points"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
LasReader._read_compressed_points_data
|
reads the compressed point record
|
pylas/lasreader.py
|
def _read_compressed_points_data(self, laszip_vlr, point_format):
""" reads the compressed point record
"""
offset_to_chunk_table = struct.unpack("<q", self.stream.read(8))[0]
size_of_point_data = offset_to_chunk_table - self.stream.tell()
if offset_to_chunk_table <= 0:
logger.warning(
"Strange offset to chunk table: {}, ignoring it..".format(
offset_to_chunk_table
)
)
size_of_point_data = -1 # Read everything
points = record.PackedPointRecord.from_compressed_buffer(
self.stream.read(size_of_point_data),
point_format,
self.header.point_count,
laszip_vlr,
)
return points
|
def _read_compressed_points_data(self, laszip_vlr, point_format):
""" reads the compressed point record
"""
offset_to_chunk_table = struct.unpack("<q", self.stream.read(8))[0]
size_of_point_data = offset_to_chunk_table - self.stream.tell()
if offset_to_chunk_table <= 0:
logger.warning(
"Strange offset to chunk table: {}, ignoring it..".format(
offset_to_chunk_table
)
)
size_of_point_data = -1 # Read everything
points = record.PackedPointRecord.from_compressed_buffer(
self.stream.read(size_of_point_data),
point_format,
self.header.point_count,
laszip_vlr,
)
return points
|
[
"reads",
"the",
"compressed",
"point",
"record"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lasreader.py#L119-L139
|
[
"def",
"_read_compressed_points_data",
"(",
"self",
",",
"laszip_vlr",
",",
"point_format",
")",
":",
"offset_to_chunk_table",
"=",
"struct",
".",
"unpack",
"(",
"\"<q\"",
",",
"self",
".",
"stream",
".",
"read",
"(",
"8",
")",
")",
"[",
"0",
"]",
"size_of_point_data",
"=",
"offset_to_chunk_table",
"-",
"self",
".",
"stream",
".",
"tell",
"(",
")",
"if",
"offset_to_chunk_table",
"<=",
"0",
":",
"logger",
".",
"warning",
"(",
"\"Strange offset to chunk table: {}, ignoring it..\"",
".",
"format",
"(",
"offset_to_chunk_table",
")",
")",
"size_of_point_data",
"=",
"-",
"1",
"# Read everything",
"points",
"=",
"record",
".",
"PackedPointRecord",
".",
"from_compressed_buffer",
"(",
"self",
".",
"stream",
".",
"read",
"(",
"size_of_point_data",
")",
",",
"point_format",
",",
"self",
".",
"header",
".",
"point_count",
",",
"laszip_vlr",
",",
")",
"return",
"points"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
LasReader._read_internal_waveform_packet
|
reads and returns the waveform vlr header, waveform record
|
pylas/lasreader.py
|
def _read_internal_waveform_packet(self):
""" reads and returns the waveform vlr header, waveform record
"""
# This is strange, the spec says, waveform data packet is in a EVLR
# but in the 2 samples I have its a VLR
# but also the 2 samples have a wrong user_id (LAS_Spec instead of LASF_Spec)
b = bytearray(self.stream.read(rawvlr.VLR_HEADER_SIZE))
waveform_header = rawvlr.RawVLRHeader.from_buffer(b)
waveform_record = self.stream.read()
logger.debug(
"Read: {} MBytes of waveform_record".format(len(waveform_record) / 10 ** 6)
)
return waveform_header, waveform_record
|
def _read_internal_waveform_packet(self):
""" reads and returns the waveform vlr header, waveform record
"""
# This is strange, the spec says, waveform data packet is in a EVLR
# but in the 2 samples I have its a VLR
# but also the 2 samples have a wrong user_id (LAS_Spec instead of LASF_Spec)
b = bytearray(self.stream.read(rawvlr.VLR_HEADER_SIZE))
waveform_header = rawvlr.RawVLRHeader.from_buffer(b)
waveform_record = self.stream.read()
logger.debug(
"Read: {} MBytes of waveform_record".format(len(waveform_record) / 10 ** 6)
)
return waveform_header, waveform_record
|
[
"reads",
"and",
"returns",
"the",
"waveform",
"vlr",
"header",
"waveform",
"record"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lasreader.py#L141-L154
|
[
"def",
"_read_internal_waveform_packet",
"(",
"self",
")",
":",
"# This is strange, the spec says, waveform data packet is in a EVLR",
"# but in the 2 samples I have its a VLR",
"# but also the 2 samples have a wrong user_id (LAS_Spec instead of LASF_Spec)",
"b",
"=",
"bytearray",
"(",
"self",
".",
"stream",
".",
"read",
"(",
"rawvlr",
".",
"VLR_HEADER_SIZE",
")",
")",
"waveform_header",
"=",
"rawvlr",
".",
"RawVLRHeader",
".",
"from_buffer",
"(",
"b",
")",
"waveform_record",
"=",
"self",
".",
"stream",
".",
"read",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Read: {} MBytes of waveform_record\"",
".",
"format",
"(",
"len",
"(",
"waveform_record",
")",
"/",
"10",
"**",
"6",
")",
")",
"return",
"waveform_header",
",",
"waveform_record"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
LasReader.read_evlrs
|
Reads the EVLRs of the file, will fail if the file version
does not support evlrs
|
pylas/lasreader.py
|
def read_evlrs(self):
""" Reads the EVLRs of the file, will fail if the file version
does not support evlrs
"""
self.stream.seek(self.start_pos + self.header.start_of_first_evlr)
return evlrs.EVLRList.read_from(self.stream, self.header.number_of_evlr)
|
def read_evlrs(self):
""" Reads the EVLRs of the file, will fail if the file version
does not support evlrs
"""
self.stream.seek(self.start_pos + self.header.start_of_first_evlr)
return evlrs.EVLRList.read_from(self.stream, self.header.number_of_evlr)
|
[
"Reads",
"the",
"EVLRs",
"of",
"the",
"file",
"will",
"fail",
"if",
"the",
"file",
"version",
"does",
"not",
"support",
"evlrs"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lasreader.py#L156-L161
|
[
"def",
"read_evlrs",
"(",
"self",
")",
":",
"self",
".",
"stream",
".",
"seek",
"(",
"self",
".",
"start_pos",
"+",
"self",
".",
"header",
".",
"start_of_first_evlr",
")",
"return",
"evlrs",
".",
"EVLRList",
".",
"read_from",
"(",
"self",
".",
"stream",
",",
"self",
".",
"header",
".",
"number_of_evlr",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
LasReader._warn_if_not_at_expected_pos
|
Helper function to warn about unknown bytes found in the file
|
pylas/lasreader.py
|
def _warn_if_not_at_expected_pos(self, expected_pos, end_of, start_of):
""" Helper function to warn about unknown bytes found in the file"""
diff = expected_pos - self.stream.tell()
if diff != 0:
logger.warning(
"There are {} bytes between {} and {}".format(diff, end_of, start_of)
)
|
def _warn_if_not_at_expected_pos(self, expected_pos, end_of, start_of):
""" Helper function to warn about unknown bytes found in the file"""
diff = expected_pos - self.stream.tell()
if diff != 0:
logger.warning(
"There are {} bytes between {} and {}".format(diff, end_of, start_of)
)
|
[
"Helper",
"function",
"to",
"warn",
"about",
"unknown",
"bytes",
"found",
"in",
"the",
"file"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lasreader.py#L163-L169
|
[
"def",
"_warn_if_not_at_expected_pos",
"(",
"self",
",",
"expected_pos",
",",
"end_of",
",",
"start_of",
")",
":",
"diff",
"=",
"expected_pos",
"-",
"self",
".",
"stream",
".",
"tell",
"(",
")",
"if",
"diff",
"!=",
"0",
":",
"logger",
".",
"warning",
"(",
"\"There are {} bytes between {} and {}\"",
".",
"format",
"(",
"diff",
",",
"end_of",
",",
"start_of",
")",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
vlr_factory
|
Given a raw_vlr tries to find its corresponding KnownVLR class
that can parse its data.
If no KnownVLR implementation is found, returns a VLR (record_data will still be bytes)
|
pylas/vlrs/known.py
|
def vlr_factory(raw_vlr):
""" Given a raw_vlr tries to find its corresponding KnownVLR class
that can parse its data.
If no KnownVLR implementation is found, returns a VLR (record_data will still be bytes)
"""
user_id = raw_vlr.header.user_id.rstrip(NULL_BYTE).decode()
known_vlrs = BaseKnownVLR.__subclasses__()
for known_vlr in known_vlrs:
if (
known_vlr.official_user_id() == user_id
and raw_vlr.header.record_id in known_vlr.official_record_ids()
):
return known_vlr.from_raw(raw_vlr)
else:
return VLR.from_raw(raw_vlr)
|
def vlr_factory(raw_vlr):
""" Given a raw_vlr tries to find its corresponding KnownVLR class
that can parse its data.
If no KnownVLR implementation is found, returns a VLR (record_data will still be bytes)
"""
user_id = raw_vlr.header.user_id.rstrip(NULL_BYTE).decode()
known_vlrs = BaseKnownVLR.__subclasses__()
for known_vlr in known_vlrs:
if (
known_vlr.official_user_id() == user_id
and raw_vlr.header.record_id in known_vlr.official_record_ids()
):
return known_vlr.from_raw(raw_vlr)
else:
return VLR.from_raw(raw_vlr)
|
[
"Given",
"a",
"raw_vlr",
"tries",
"to",
"find",
"its",
"corresponding",
"KnownVLR",
"class",
"that",
"can",
"parse",
"its",
"data",
".",
"If",
"no",
"KnownVLR",
"implementation",
"is",
"found",
"returns",
"a",
"VLR",
"(",
"record_data",
"will",
"still",
"be",
"bytes",
")"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/vlrs/known.py#L547-L561
|
[
"def",
"vlr_factory",
"(",
"raw_vlr",
")",
":",
"user_id",
"=",
"raw_vlr",
".",
"header",
".",
"user_id",
".",
"rstrip",
"(",
"NULL_BYTE",
")",
".",
"decode",
"(",
")",
"known_vlrs",
"=",
"BaseKnownVLR",
".",
"__subclasses__",
"(",
")",
"for",
"known_vlr",
"in",
"known_vlrs",
":",
"if",
"(",
"known_vlr",
".",
"official_user_id",
"(",
")",
"==",
"user_id",
"and",
"raw_vlr",
".",
"header",
".",
"record_id",
"in",
"known_vlr",
".",
"official_record_ids",
"(",
")",
")",
":",
"return",
"known_vlr",
".",
"from_raw",
"(",
"raw_vlr",
")",
"else",
":",
"return",
"VLR",
".",
"from_raw",
"(",
"raw_vlr",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
open_las
|
Opens and reads the header of the las content in the source
>>> with open_las('pylastests/simple.las') as f:
... print(f.header.point_format_id)
3
>>> f = open('pylastests/simple.las', mode='rb')
>>> with open_las(f, closefd=False) as flas:
... print(flas.header)
<LasHeader(1.2)>
>>> f.closed
False
>>> f = open('pylastests/simple.las', mode='rb')
>>> with open_las(f) as flas:
... las = flas.read()
>>> f.closed
True
Parameters
----------
source : str or io.BytesIO
if source is a str it must be a filename
a stream if a file object with the methods read, seek, tell
closefd: bool
Whether the stream/file object shall be closed, this only work
when using open_las in a with statement. An exception is raised if
closefd is specified and the source is a filename
Returns
-------
pylas.lasreader.LasReader
|
pylas/lib.py
|
def open_las(source, closefd=True):
""" Opens and reads the header of the las content in the source
>>> with open_las('pylastests/simple.las') as f:
... print(f.header.point_format_id)
3
>>> f = open('pylastests/simple.las', mode='rb')
>>> with open_las(f, closefd=False) as flas:
... print(flas.header)
<LasHeader(1.2)>
>>> f.closed
False
>>> f = open('pylastests/simple.las', mode='rb')
>>> with open_las(f) as flas:
... las = flas.read()
>>> f.closed
True
Parameters
----------
source : str or io.BytesIO
if source is a str it must be a filename
a stream if a file object with the methods read, seek, tell
closefd: bool
Whether the stream/file object shall be closed, this only work
when using open_las in a with statement. An exception is raised if
closefd is specified and the source is a filename
Returns
-------
pylas.lasreader.LasReader
"""
if isinstance(source, str):
stream = open(source, mode="rb")
if not closefd:
raise ValueError("Cannot use closefd with filename")
elif isinstance(source, bytes):
stream = io.BytesIO(source)
else:
stream = source
return LasReader(stream, closefd=closefd)
|
def open_las(source, closefd=True):
""" Opens and reads the header of the las content in the source
>>> with open_las('pylastests/simple.las') as f:
... print(f.header.point_format_id)
3
>>> f = open('pylastests/simple.las', mode='rb')
>>> with open_las(f, closefd=False) as flas:
... print(flas.header)
<LasHeader(1.2)>
>>> f.closed
False
>>> f = open('pylastests/simple.las', mode='rb')
>>> with open_las(f) as flas:
... las = flas.read()
>>> f.closed
True
Parameters
----------
source : str or io.BytesIO
if source is a str it must be a filename
a stream if a file object with the methods read, seek, tell
closefd: bool
Whether the stream/file object shall be closed, this only work
when using open_las in a with statement. An exception is raised if
closefd is specified and the source is a filename
Returns
-------
pylas.lasreader.LasReader
"""
if isinstance(source, str):
stream = open(source, mode="rb")
if not closefd:
raise ValueError("Cannot use closefd with filename")
elif isinstance(source, bytes):
stream = io.BytesIO(source)
else:
stream = source
return LasReader(stream, closefd=closefd)
|
[
"Opens",
"and",
"reads",
"the",
"header",
"of",
"the",
"las",
"content",
"in",
"the",
"source"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lib.py#L20-L66
|
[
"def",
"open_las",
"(",
"source",
",",
"closefd",
"=",
"True",
")",
":",
"if",
"isinstance",
"(",
"source",
",",
"str",
")",
":",
"stream",
"=",
"open",
"(",
"source",
",",
"mode",
"=",
"\"rb\"",
")",
"if",
"not",
"closefd",
":",
"raise",
"ValueError",
"(",
"\"Cannot use closefd with filename\"",
")",
"elif",
"isinstance",
"(",
"source",
",",
"bytes",
")",
":",
"stream",
"=",
"io",
".",
"BytesIO",
"(",
"source",
")",
"else",
":",
"stream",
"=",
"source",
"return",
"LasReader",
"(",
"stream",
",",
"closefd",
"=",
"closefd",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
read_las
|
Entry point for reading las data in pylas
Reads the whole file into memory.
>>> las = read_las("pylastests/simple.las")
>>> las.classification
array([1, 1, 1, ..., 1, 1, 1], dtype=uint8)
Parameters
----------
source : str or io.BytesIO
The source to read data from
closefd: bool
if True and the source is a stream, the function will close it
after it is done reading
Returns
-------
pylas.lasdatas.base.LasBase
The object you can interact with to get access to the LAS points & VLRs
|
pylas/lib.py
|
def read_las(source, closefd=True):
""" Entry point for reading las data in pylas
Reads the whole file into memory.
>>> las = read_las("pylastests/simple.las")
>>> las.classification
array([1, 1, 1, ..., 1, 1, 1], dtype=uint8)
Parameters
----------
source : str or io.BytesIO
The source to read data from
closefd: bool
if True and the source is a stream, the function will close it
after it is done reading
Returns
-------
pylas.lasdatas.base.LasBase
The object you can interact with to get access to the LAS points & VLRs
"""
with open_las(source, closefd=closefd) as reader:
return reader.read()
|
def read_las(source, closefd=True):
""" Entry point for reading las data in pylas
Reads the whole file into memory.
>>> las = read_las("pylastests/simple.las")
>>> las.classification
array([1, 1, 1, ..., 1, 1, 1], dtype=uint8)
Parameters
----------
source : str or io.BytesIO
The source to read data from
closefd: bool
if True and the source is a stream, the function will close it
after it is done reading
Returns
-------
pylas.lasdatas.base.LasBase
The object you can interact with to get access to the LAS points & VLRs
"""
with open_las(source, closefd=closefd) as reader:
return reader.read()
|
[
"Entry",
"point",
"for",
"reading",
"las",
"data",
"in",
"pylas"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lib.py#L69-L94
|
[
"def",
"read_las",
"(",
"source",
",",
"closefd",
"=",
"True",
")",
":",
"with",
"open_las",
"(",
"source",
",",
"closefd",
"=",
"closefd",
")",
"as",
"reader",
":",
"return",
"reader",
".",
"read",
"(",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
create_from_header
|
Creates a File from an existing header,
allocating the array of point according to the provided header.
The input header is copied.
Parameters
----------
header : existing header to be used to create the file
Returns
-------
pylas.lasdatas.base.LasBase
|
pylas/lib.py
|
def create_from_header(header):
""" Creates a File from an existing header,
allocating the array of point according to the provided header.
The input header is copied.
Parameters
----------
header : existing header to be used to create the file
Returns
-------
pylas.lasdatas.base.LasBase
"""
header = copy.copy(header)
header.point_count = 0
points = record.PackedPointRecord.empty(PointFormat(header.point_format_id))
if header.version >= "1.4":
return las14.LasData(header=header, points=points)
return las12.LasData(header=header, points=points)
|
def create_from_header(header):
""" Creates a File from an existing header,
allocating the array of point according to the provided header.
The input header is copied.
Parameters
----------
header : existing header to be used to create the file
Returns
-------
pylas.lasdatas.base.LasBase
"""
header = copy.copy(header)
header.point_count = 0
points = record.PackedPointRecord.empty(PointFormat(header.point_format_id))
if header.version >= "1.4":
return las14.LasData(header=header, points=points)
return las12.LasData(header=header, points=points)
|
[
"Creates",
"a",
"File",
"from",
"an",
"existing",
"header",
"allocating",
"the",
"array",
"of",
"point",
"according",
"to",
"the",
"provided",
"header",
".",
"The",
"input",
"header",
"is",
"copied",
"."
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lib.py#L104-L123
|
[
"def",
"create_from_header",
"(",
"header",
")",
":",
"header",
"=",
"copy",
".",
"copy",
"(",
"header",
")",
"header",
".",
"point_count",
"=",
"0",
"points",
"=",
"record",
".",
"PackedPointRecord",
".",
"empty",
"(",
"PointFormat",
"(",
"header",
".",
"point_format_id",
")",
")",
"if",
"header",
".",
"version",
">=",
"\"1.4\"",
":",
"return",
"las14",
".",
"LasData",
"(",
"header",
"=",
"header",
",",
"points",
"=",
"points",
")",
"return",
"las12",
".",
"LasData",
"(",
"header",
"=",
"header",
",",
"points",
"=",
"points",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
create_las
|
Function to create a new empty las data object
.. note::
If you provide both point_format and file_version
an exception will be raised if they are not compatible
>>> las = create_las(point_format_id=6,file_version="1.2")
Traceback (most recent call last):
...
pylas.errors.PylasError: Point format 6 is not compatible with file version 1.2
If you provide only the point_format the file_version will automatically
selected for you.
>>> las = create_las(point_format_id=0)
>>> las.header.version == '1.2'
True
>>> las = create_las(point_format_id=6)
>>> las.header.version == '1.4'
True
Parameters
----------
point_format_id: int
The point format you want the created file to have
file_version: str, optional, default=None
The las version you want the created las to have
Returns
-------
pylas.lasdatas.base.LasBase
A new las data object
|
pylas/lib.py
|
def create_las(*, point_format_id=0, file_version=None):
""" Function to create a new empty las data object
.. note::
If you provide both point_format and file_version
an exception will be raised if they are not compatible
>>> las = create_las(point_format_id=6,file_version="1.2")
Traceback (most recent call last):
...
pylas.errors.PylasError: Point format 6 is not compatible with file version 1.2
If you provide only the point_format the file_version will automatically
selected for you.
>>> las = create_las(point_format_id=0)
>>> las.header.version == '1.2'
True
>>> las = create_las(point_format_id=6)
>>> las.header.version == '1.4'
True
Parameters
----------
point_format_id: int
The point format you want the created file to have
file_version: str, optional, default=None
The las version you want the created las to have
Returns
-------
pylas.lasdatas.base.LasBase
A new las data object
"""
if file_version is not None:
dims.raise_if_version_not_compatible_with_fmt(point_format_id, file_version)
else:
file_version = dims.min_file_version_for_point_format(point_format_id)
header = headers.HeaderFactory.new(file_version)
header.point_format_id = point_format_id
if file_version >= "1.4":
return las14.LasData(header=header)
return las12.LasData(header=header)
|
def create_las(*, point_format_id=0, file_version=None):
""" Function to create a new empty las data object
.. note::
If you provide both point_format and file_version
an exception will be raised if they are not compatible
>>> las = create_las(point_format_id=6,file_version="1.2")
Traceback (most recent call last):
...
pylas.errors.PylasError: Point format 6 is not compatible with file version 1.2
If you provide only the point_format the file_version will automatically
selected for you.
>>> las = create_las(point_format_id=0)
>>> las.header.version == '1.2'
True
>>> las = create_las(point_format_id=6)
>>> las.header.version == '1.4'
True
Parameters
----------
point_format_id: int
The point format you want the created file to have
file_version: str, optional, default=None
The las version you want the created las to have
Returns
-------
pylas.lasdatas.base.LasBase
A new las data object
"""
if file_version is not None:
dims.raise_if_version_not_compatible_with_fmt(point_format_id, file_version)
else:
file_version = dims.min_file_version_for_point_format(point_format_id)
header = headers.HeaderFactory.new(file_version)
header.point_format_id = point_format_id
if file_version >= "1.4":
return las14.LasData(header=header)
return las12.LasData(header=header)
|
[
"Function",
"to",
"create",
"a",
"new",
"empty",
"las",
"data",
"object"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lib.py#L126-L176
|
[
"def",
"create_las",
"(",
"*",
",",
"point_format_id",
"=",
"0",
",",
"file_version",
"=",
"None",
")",
":",
"if",
"file_version",
"is",
"not",
"None",
":",
"dims",
".",
"raise_if_version_not_compatible_with_fmt",
"(",
"point_format_id",
",",
"file_version",
")",
"else",
":",
"file_version",
"=",
"dims",
".",
"min_file_version_for_point_format",
"(",
"point_format_id",
")",
"header",
"=",
"headers",
".",
"HeaderFactory",
".",
"new",
"(",
"file_version",
")",
"header",
".",
"point_format_id",
"=",
"point_format_id",
"if",
"file_version",
">=",
"\"1.4\"",
":",
"return",
"las14",
".",
"LasData",
"(",
"header",
"=",
"header",
")",
"return",
"las12",
".",
"LasData",
"(",
"header",
"=",
"header",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
convert
|
Converts a Las from one point format to another
Automatically upgrades the file version if source file version is not compatible with
the new point_format_id
convert to point format 0
>>> las = read_las('pylastests/simple.las')
>>> las.header.version
'1.2'
>>> las = convert(las, point_format_id=0)
>>> las.header.point_format_id
0
>>> las.header.version
'1.2'
convert to point format 6, which need version >= 1.4
then convert back to point format 0, version is not downgraded
>>> las = read_las('pylastests/simple.las')
>>> las.header.version
'1.2'
>>> las = convert(las, point_format_id=6)
>>> las.header.point_format_id
6
>>> las.header.version
'1.4'
>>> las = convert(las, point_format_id=0)
>>> las.header.version
'1.4'
an exception is raised if the requested point format is not compatible
with the file version
>>> las = read_las('pylastests/simple.las')
>>> convert(las, point_format_id=6, file_version='1.2')
Traceback (most recent call last):
...
pylas.errors.PylasError: Point format 6 is not compatible with file version 1.2
Parameters
----------
source_las : pylas.lasdatas.base.LasBase
The source data to be converted
point_format_id : int, optional
The new point format id (the default is None, which won't change the source format id)
file_version : str, optional,
The new file version. None by default which means that the file_version
may be upgraded for compatibility with the new point_format. The file version will not
be downgraded.
Returns
-------
pylas.lasdatas.base.LasBase
|
pylas/lib.py
|
def convert(source_las, *, point_format_id=None, file_version=None):
""" Converts a Las from one point format to another
Automatically upgrades the file version if source file version is not compatible with
the new point_format_id
convert to point format 0
>>> las = read_las('pylastests/simple.las')
>>> las.header.version
'1.2'
>>> las = convert(las, point_format_id=0)
>>> las.header.point_format_id
0
>>> las.header.version
'1.2'
convert to point format 6, which need version >= 1.4
then convert back to point format 0, version is not downgraded
>>> las = read_las('pylastests/simple.las')
>>> las.header.version
'1.2'
>>> las = convert(las, point_format_id=6)
>>> las.header.point_format_id
6
>>> las.header.version
'1.4'
>>> las = convert(las, point_format_id=0)
>>> las.header.version
'1.4'
an exception is raised if the requested point format is not compatible
with the file version
>>> las = read_las('pylastests/simple.las')
>>> convert(las, point_format_id=6, file_version='1.2')
Traceback (most recent call last):
...
pylas.errors.PylasError: Point format 6 is not compatible with file version 1.2
Parameters
----------
source_las : pylas.lasdatas.base.LasBase
The source data to be converted
point_format_id : int, optional
The new point format id (the default is None, which won't change the source format id)
file_version : str, optional,
The new file version. None by default which means that the file_version
may be upgraded for compatibility with the new point_format. The file version will not
be downgraded.
Returns
-------
pylas.lasdatas.base.LasBase
"""
if point_format_id is None:
point_format_id = source_las.points_data.point_format.id
if file_version is None:
file_version = max(
source_las.header.version,
dims.min_file_version_for_point_format(point_format_id),
)
else:
file_version = str(file_version)
dims.raise_if_version_not_compatible_with_fmt(point_format_id, file_version)
header = headers.HeaderFactory.convert_header(source_las.header, file_version)
header.point_format_id = point_format_id
point_format = PointFormat(
point_format_id, source_las.points_data.point_format.extra_dims
)
points = record.PackedPointRecord.from_point_record(
source_las.points_data, point_format
)
try:
evlrs = source_las.evlrs
except ValueError:
evlrs = []
if file_version >= "1.4":
las = las14.LasData(
header=header, vlrs=source_las.vlrs, points=points, evlrs=evlrs
)
else:
if evlrs:
logger.warning(
"The source contained {} EVLRs,"
" they will be lost as version {} doest not support them".format(
len(evlrs), file_version
)
)
las = las12.LasData(header=header, vlrs=source_las.vlrs, points=points)
return las
|
def convert(source_las, *, point_format_id=None, file_version=None):
""" Converts a Las from one point format to another
Automatically upgrades the file version if source file version is not compatible with
the new point_format_id
convert to point format 0
>>> las = read_las('pylastests/simple.las')
>>> las.header.version
'1.2'
>>> las = convert(las, point_format_id=0)
>>> las.header.point_format_id
0
>>> las.header.version
'1.2'
convert to point format 6, which need version >= 1.4
then convert back to point format 0, version is not downgraded
>>> las = read_las('pylastests/simple.las')
>>> las.header.version
'1.2'
>>> las = convert(las, point_format_id=6)
>>> las.header.point_format_id
6
>>> las.header.version
'1.4'
>>> las = convert(las, point_format_id=0)
>>> las.header.version
'1.4'
an exception is raised if the requested point format is not compatible
with the file version
>>> las = read_las('pylastests/simple.las')
>>> convert(las, point_format_id=6, file_version='1.2')
Traceback (most recent call last):
...
pylas.errors.PylasError: Point format 6 is not compatible with file version 1.2
Parameters
----------
source_las : pylas.lasdatas.base.LasBase
The source data to be converted
point_format_id : int, optional
The new point format id (the default is None, which won't change the source format id)
file_version : str, optional,
The new file version. None by default which means that the file_version
may be upgraded for compatibility with the new point_format. The file version will not
be downgraded.
Returns
-------
pylas.lasdatas.base.LasBase
"""
if point_format_id is None:
point_format_id = source_las.points_data.point_format.id
if file_version is None:
file_version = max(
source_las.header.version,
dims.min_file_version_for_point_format(point_format_id),
)
else:
file_version = str(file_version)
dims.raise_if_version_not_compatible_with_fmt(point_format_id, file_version)
header = headers.HeaderFactory.convert_header(source_las.header, file_version)
header.point_format_id = point_format_id
point_format = PointFormat(
point_format_id, source_las.points_data.point_format.extra_dims
)
points = record.PackedPointRecord.from_point_record(
source_las.points_data, point_format
)
try:
evlrs = source_las.evlrs
except ValueError:
evlrs = []
if file_version >= "1.4":
las = las14.LasData(
header=header, vlrs=source_las.vlrs, points=points, evlrs=evlrs
)
else:
if evlrs:
logger.warning(
"The source contained {} EVLRs,"
" they will be lost as version {} doest not support them".format(
len(evlrs), file_version
)
)
las = las12.LasData(header=header, vlrs=source_las.vlrs, points=points)
return las
|
[
"Converts",
"a",
"Las",
"from",
"one",
"point",
"format",
"to",
"another",
"Automatically",
"upgrades",
"the",
"file",
"version",
"if",
"source",
"file",
"version",
"is",
"not",
"compatible",
"with",
"the",
"new",
"point_format_id"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lib.py#L179-L279
|
[
"def",
"convert",
"(",
"source_las",
",",
"*",
",",
"point_format_id",
"=",
"None",
",",
"file_version",
"=",
"None",
")",
":",
"if",
"point_format_id",
"is",
"None",
":",
"point_format_id",
"=",
"source_las",
".",
"points_data",
".",
"point_format",
".",
"id",
"if",
"file_version",
"is",
"None",
":",
"file_version",
"=",
"max",
"(",
"source_las",
".",
"header",
".",
"version",
",",
"dims",
".",
"min_file_version_for_point_format",
"(",
"point_format_id",
")",
",",
")",
"else",
":",
"file_version",
"=",
"str",
"(",
"file_version",
")",
"dims",
".",
"raise_if_version_not_compatible_with_fmt",
"(",
"point_format_id",
",",
"file_version",
")",
"header",
"=",
"headers",
".",
"HeaderFactory",
".",
"convert_header",
"(",
"source_las",
".",
"header",
",",
"file_version",
")",
"header",
".",
"point_format_id",
"=",
"point_format_id",
"point_format",
"=",
"PointFormat",
"(",
"point_format_id",
",",
"source_las",
".",
"points_data",
".",
"point_format",
".",
"extra_dims",
")",
"points",
"=",
"record",
".",
"PackedPointRecord",
".",
"from_point_record",
"(",
"source_las",
".",
"points_data",
",",
"point_format",
")",
"try",
":",
"evlrs",
"=",
"source_las",
".",
"evlrs",
"except",
"ValueError",
":",
"evlrs",
"=",
"[",
"]",
"if",
"file_version",
">=",
"\"1.4\"",
":",
"las",
"=",
"las14",
".",
"LasData",
"(",
"header",
"=",
"header",
",",
"vlrs",
"=",
"source_las",
".",
"vlrs",
",",
"points",
"=",
"points",
",",
"evlrs",
"=",
"evlrs",
")",
"else",
":",
"if",
"evlrs",
":",
"logger",
".",
"warning",
"(",
"\"The source contained {} EVLRs,\"",
"\" they will be lost as version {} doest not support them\"",
".",
"format",
"(",
"len",
"(",
"evlrs",
")",
",",
"file_version",
")",
")",
"las",
"=",
"las12",
".",
"LasData",
"(",
"header",
"=",
"header",
",",
"vlrs",
"=",
"source_las",
".",
"vlrs",
",",
"points",
"=",
"points",
")",
"return",
"las"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
merge_las
|
Merges multiple las files into one
merged = merge_las(las_1, las_2)
merged = merge_las([las_1, las_2, las_3])
Parameters
----------
las_files: Iterable of LasData or LasData
Returns
-------
pylas.lasdatas.base.LasBase
The result of the merging
|
pylas/lib.py
|
def merge_las(*las_files):
""" Merges multiple las files into one
merged = merge_las(las_1, las_2)
merged = merge_las([las_1, las_2, las_3])
Parameters
----------
las_files: Iterable of LasData or LasData
Returns
-------
pylas.lasdatas.base.LasBase
The result of the merging
"""
if len(las_files) == 1:
las_files = las_files[0]
if not las_files:
raise ValueError("No files to merge")
if not utils.files_have_same_dtype(las_files):
raise ValueError("All files must have the same point format")
header = las_files[0].header
num_pts_merged = sum(len(las.points) for las in las_files)
# scaled x, y, z have to be set manually
# to be sure to have a good offset in the header
merged = create_from_header(header)
# TODO extra dimensions should be manged better here
for dim_name, dim_type in las_files[0].points_data.point_format.extra_dims:
merged.add_extra_dim(dim_name, dim_type)
merged.points = np.zeros(num_pts_merged, merged.points.dtype)
merged_x = np.zeros(num_pts_merged, np.float64)
merged_y = np.zeros(num_pts_merged, np.float64)
merged_z = np.zeros(num_pts_merged, np.float64)
offset = 0
for i, las in enumerate(las_files, start=1):
slc = slice(offset, offset + len(las.points))
merged.points[slc] = las.points
merged_x[slc] = las.x
merged_y[slc] = las.y
merged_z[slc] = las.z
merged['point_source_id'][slc] = i
offset += len(las.points)
merged.x = merged_x
merged.y = merged_y
merged.z = merged_z
return merged
|
def merge_las(*las_files):
""" Merges multiple las files into one
merged = merge_las(las_1, las_2)
merged = merge_las([las_1, las_2, las_3])
Parameters
----------
las_files: Iterable of LasData or LasData
Returns
-------
pylas.lasdatas.base.LasBase
The result of the merging
"""
if len(las_files) == 1:
las_files = las_files[0]
if not las_files:
raise ValueError("No files to merge")
if not utils.files_have_same_dtype(las_files):
raise ValueError("All files must have the same point format")
header = las_files[0].header
num_pts_merged = sum(len(las.points) for las in las_files)
# scaled x, y, z have to be set manually
# to be sure to have a good offset in the header
merged = create_from_header(header)
# TODO extra dimensions should be manged better here
for dim_name, dim_type in las_files[0].points_data.point_format.extra_dims:
merged.add_extra_dim(dim_name, dim_type)
merged.points = np.zeros(num_pts_merged, merged.points.dtype)
merged_x = np.zeros(num_pts_merged, np.float64)
merged_y = np.zeros(num_pts_merged, np.float64)
merged_z = np.zeros(num_pts_merged, np.float64)
offset = 0
for i, las in enumerate(las_files, start=1):
slc = slice(offset, offset + len(las.points))
merged.points[slc] = las.points
merged_x[slc] = las.x
merged_y[slc] = las.y
merged_z[slc] = las.z
merged['point_source_id'][slc] = i
offset += len(las.points)
merged.x = merged_x
merged.y = merged_y
merged.z = merged_z
return merged
|
[
"Merges",
"multiple",
"las",
"files",
"into",
"one"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lib.py#L282-L337
|
[
"def",
"merge_las",
"(",
"*",
"las_files",
")",
":",
"if",
"len",
"(",
"las_files",
")",
"==",
"1",
":",
"las_files",
"=",
"las_files",
"[",
"0",
"]",
"if",
"not",
"las_files",
":",
"raise",
"ValueError",
"(",
"\"No files to merge\"",
")",
"if",
"not",
"utils",
".",
"files_have_same_dtype",
"(",
"las_files",
")",
":",
"raise",
"ValueError",
"(",
"\"All files must have the same point format\"",
")",
"header",
"=",
"las_files",
"[",
"0",
"]",
".",
"header",
"num_pts_merged",
"=",
"sum",
"(",
"len",
"(",
"las",
".",
"points",
")",
"for",
"las",
"in",
"las_files",
")",
"# scaled x, y, z have to be set manually",
"# to be sure to have a good offset in the header",
"merged",
"=",
"create_from_header",
"(",
"header",
")",
"# TODO extra dimensions should be manged better here",
"for",
"dim_name",
",",
"dim_type",
"in",
"las_files",
"[",
"0",
"]",
".",
"points_data",
".",
"point_format",
".",
"extra_dims",
":",
"merged",
".",
"add_extra_dim",
"(",
"dim_name",
",",
"dim_type",
")",
"merged",
".",
"points",
"=",
"np",
".",
"zeros",
"(",
"num_pts_merged",
",",
"merged",
".",
"points",
".",
"dtype",
")",
"merged_x",
"=",
"np",
".",
"zeros",
"(",
"num_pts_merged",
",",
"np",
".",
"float64",
")",
"merged_y",
"=",
"np",
".",
"zeros",
"(",
"num_pts_merged",
",",
"np",
".",
"float64",
")",
"merged_z",
"=",
"np",
".",
"zeros",
"(",
"num_pts_merged",
",",
"np",
".",
"float64",
")",
"offset",
"=",
"0",
"for",
"i",
",",
"las",
"in",
"enumerate",
"(",
"las_files",
",",
"start",
"=",
"1",
")",
":",
"slc",
"=",
"slice",
"(",
"offset",
",",
"offset",
"+",
"len",
"(",
"las",
".",
"points",
")",
")",
"merged",
".",
"points",
"[",
"slc",
"]",
"=",
"las",
".",
"points",
"merged_x",
"[",
"slc",
"]",
"=",
"las",
".",
"x",
"merged_y",
"[",
"slc",
"]",
"=",
"las",
".",
"y",
"merged_z",
"[",
"slc",
"]",
"=",
"las",
".",
"z",
"merged",
"[",
"'point_source_id'",
"]",
"[",
"slc",
"]",
"=",
"i",
"offset",
"+=",
"len",
"(",
"las",
".",
"points",
")",
"merged",
".",
"x",
"=",
"merged_x",
"merged",
".",
"y",
"=",
"merged_y",
"merged",
".",
"z",
"=",
"merged_z",
"return",
"merged"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
write_then_read_again
|
writes the given las into memory using BytesIO and
reads it again, returning the newly read file.
Mostly used for testing purposes, without having to write to disk
|
pylas/lib.py
|
def write_then_read_again(las, do_compress=False):
""" writes the given las into memory using BytesIO and
reads it again, returning the newly read file.
Mostly used for testing purposes, without having to write to disk
"""
out = io.BytesIO()
las.write(out, do_compress=do_compress)
out.seek(0)
return read_las(out)
|
def write_then_read_again(las, do_compress=False):
""" writes the given las into memory using BytesIO and
reads it again, returning the newly read file.
Mostly used for testing purposes, without having to write to disk
"""
out = io.BytesIO()
las.write(out, do_compress=do_compress)
out.seek(0)
return read_las(out)
|
[
"writes",
"the",
"given",
"las",
"into",
"memory",
"using",
"BytesIO",
"and",
"reads",
"it",
"again",
"returning",
"the",
"newly",
"read",
"file",
"."
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/lib.py#L340-L349
|
[
"def",
"write_then_read_again",
"(",
"las",
",",
"do_compress",
"=",
"False",
")",
":",
"out",
"=",
"io",
".",
"BytesIO",
"(",
")",
"las",
".",
"write",
"(",
"out",
",",
"do_compress",
"=",
"do_compress",
")",
"out",
".",
"seek",
"(",
"0",
")",
"return",
"read_las",
"(",
"out",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
RawHeader1_1.date
|
Returns the creation date stored in the las file
Returns
-------
datetime.date
|
pylas/headers/rawheader.py
|
def date(self):
""" Returns the creation date stored in the las file
Returns
-------
datetime.date
"""
try:
return datetime.date(self.creation_year, 1, 1) + datetime.timedelta(
self.creation_day_of_year - 1
)
except ValueError:
return None
|
def date(self):
""" Returns the creation date stored in the las file
Returns
-------
datetime.date
"""
try:
return datetime.date(self.creation_year, 1, 1) + datetime.timedelta(
self.creation_day_of_year - 1
)
except ValueError:
return None
|
[
"Returns",
"the",
"creation",
"date",
"stored",
"in",
"the",
"las",
"file"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/headers/rawheader.py#L143-L156
|
[
"def",
"date",
"(",
"self",
")",
":",
"try",
":",
"return",
"datetime",
".",
"date",
"(",
"self",
".",
"creation_year",
",",
"1",
",",
"1",
")",
"+",
"datetime",
".",
"timedelta",
"(",
"self",
".",
"creation_day_of_year",
"-",
"1",
")",
"except",
"ValueError",
":",
"return",
"None"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
RawHeader1_1.date
|
Returns the date of file creation as a python date object
|
pylas/headers/rawheader.py
|
def date(self, date):
""" Returns the date of file creation as a python date object
"""
self.creation_year = date.year
self.creation_day_of_year = date.timetuple().tm_yday
|
def date(self, date):
""" Returns the date of file creation as a python date object
"""
self.creation_year = date.year
self.creation_day_of_year = date.timetuple().tm_yday
|
[
"Returns",
"the",
"date",
"of",
"file",
"creation",
"as",
"a",
"python",
"date",
"object"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/headers/rawheader.py#L159-L163
|
[
"def",
"date",
"(",
"self",
",",
"date",
")",
":",
"self",
".",
"creation_year",
"=",
"date",
".",
"year",
"self",
".",
"creation_day_of_year",
"=",
"date",
".",
"timetuple",
"(",
")",
".",
"tm_yday"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
RawHeader1_1.mins
|
Returns de minimum values of x, y, z as a numpy array
|
pylas/headers/rawheader.py
|
def mins(self):
""" Returns de minimum values of x, y, z as a numpy array
"""
return np.array([self.x_min, self.y_min, self.z_min])
|
def mins(self):
""" Returns de minimum values of x, y, z as a numpy array
"""
return np.array([self.x_min, self.y_min, self.z_min])
|
[
"Returns",
"de",
"minimum",
"values",
"of",
"x",
"y",
"z",
"as",
"a",
"numpy",
"array"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/headers/rawheader.py#L200-L203
|
[
"def",
"mins",
"(",
"self",
")",
":",
"return",
"np",
".",
"array",
"(",
"[",
"self",
".",
"x_min",
",",
"self",
".",
"y_min",
",",
"self",
".",
"z_min",
"]",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
RawHeader1_1.mins
|
Sets de minimum values of x, y, z as a numpy array
|
pylas/headers/rawheader.py
|
def mins(self, value):
""" Sets de minimum values of x, y, z as a numpy array
"""
self.x_min, self.y_min, self.z_min = value
|
def mins(self, value):
""" Sets de minimum values of x, y, z as a numpy array
"""
self.x_min, self.y_min, self.z_min = value
|
[
"Sets",
"de",
"minimum",
"values",
"of",
"x",
"y",
"z",
"as",
"a",
"numpy",
"array"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/headers/rawheader.py#L206-L209
|
[
"def",
"mins",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"x_min",
",",
"self",
".",
"y_min",
",",
"self",
".",
"z_min",
"=",
"value"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
RawHeader1_1.maxs
|
Returns de maximum values of x, y, z as a numpy array
|
pylas/headers/rawheader.py
|
def maxs(self):
""" Returns de maximum values of x, y, z as a numpy array
"""
return np.array([self.x_max, self.y_max, self.z_max])
|
def maxs(self):
""" Returns de maximum values of x, y, z as a numpy array
"""
return np.array([self.x_max, self.y_max, self.z_max])
|
[
"Returns",
"de",
"maximum",
"values",
"of",
"x",
"y",
"z",
"as",
"a",
"numpy",
"array"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/headers/rawheader.py#L212-L215
|
[
"def",
"maxs",
"(",
"self",
")",
":",
"return",
"np",
".",
"array",
"(",
"[",
"self",
".",
"x_max",
",",
"self",
".",
"y_max",
",",
"self",
".",
"z_max",
"]",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
RawHeader1_1.maxs
|
Sets de maximum values of x, y, z as a numpy array
|
pylas/headers/rawheader.py
|
def maxs(self, value):
""" Sets de maximum values of x, y, z as a numpy array
"""
self.x_max, self.y_max, self.z_max = value
|
def maxs(self, value):
""" Sets de maximum values of x, y, z as a numpy array
"""
self.x_max, self.y_max, self.z_max = value
|
[
"Sets",
"de",
"maximum",
"values",
"of",
"x",
"y",
"z",
"as",
"a",
"numpy",
"array"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/headers/rawheader.py#L218-L221
|
[
"def",
"maxs",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"x_max",
",",
"self",
".",
"y_max",
",",
"self",
".",
"z_max",
"=",
"value"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
RawHeader1_1.scales
|
Returns the scaling values of x, y, z as a numpy array
|
pylas/headers/rawheader.py
|
def scales(self):
""" Returns the scaling values of x, y, z as a numpy array
"""
return np.array([self.x_scale, self.y_scale, self.z_scale])
|
def scales(self):
""" Returns the scaling values of x, y, z as a numpy array
"""
return np.array([self.x_scale, self.y_scale, self.z_scale])
|
[
"Returns",
"the",
"scaling",
"values",
"of",
"x",
"y",
"z",
"as",
"a",
"numpy",
"array"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/headers/rawheader.py#L224-L227
|
[
"def",
"scales",
"(",
"self",
")",
":",
"return",
"np",
".",
"array",
"(",
"[",
"self",
".",
"x_scale",
",",
"self",
".",
"y_scale",
",",
"self",
".",
"z_scale",
"]",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
RawHeader1_1.offsets
|
Returns the offsets values of x, y, z as a numpy array
|
pylas/headers/rawheader.py
|
def offsets(self):
""" Returns the offsets values of x, y, z as a numpy array
"""
return np.array([self.x_offset, self.y_offset, self.z_offset])
|
def offsets(self):
""" Returns the offsets values of x, y, z as a numpy array
"""
return np.array([self.x_offset, self.y_offset, self.z_offset])
|
[
"Returns",
"the",
"offsets",
"values",
"of",
"x",
"y",
"z",
"as",
"a",
"numpy",
"array"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/headers/rawheader.py#L234-L237
|
[
"def",
"offsets",
"(",
"self",
")",
":",
"return",
"np",
".",
"array",
"(",
"[",
"self",
".",
"x_offset",
",",
"self",
".",
"y_offset",
",",
"self",
".",
"z_offset",
"]",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
HeaderFactory.header_class_for_version
|
>>> HeaderFactory.header_class_for_version(2.0)
Traceback (most recent call last):
...
pylas.errors.FileVersionNotSupported: 2.0
>>> HeaderFactory.header_class_for_version(1.2)
<class 'pylas.headers.rawheader.RawHeader1_2'>
>>> header_class = HeaderFactory.header_class_for_version(1.4)
>>> header_class()
<LasHeader(1.4)>
|
pylas/headers/rawheader.py
|
def header_class_for_version(cls, version):
"""
>>> HeaderFactory.header_class_for_version(2.0)
Traceback (most recent call last):
...
pylas.errors.FileVersionNotSupported: 2.0
>>> HeaderFactory.header_class_for_version(1.2)
<class 'pylas.headers.rawheader.RawHeader1_2'>
>>> header_class = HeaderFactory.header_class_for_version(1.4)
>>> header_class()
<LasHeader(1.4)>
"""
try:
return cls._version_to_header[str(version)]
except KeyError:
raise errors.FileVersionNotSupported(version)
|
def header_class_for_version(cls, version):
"""
>>> HeaderFactory.header_class_for_version(2.0)
Traceback (most recent call last):
...
pylas.errors.FileVersionNotSupported: 2.0
>>> HeaderFactory.header_class_for_version(1.2)
<class 'pylas.headers.rawheader.RawHeader1_2'>
>>> header_class = HeaderFactory.header_class_for_version(1.4)
>>> header_class()
<LasHeader(1.4)>
"""
try:
return cls._version_to_header[str(version)]
except KeyError:
raise errors.FileVersionNotSupported(version)
|
[
">>>",
"HeaderFactory",
".",
"header_class_for_version",
"(",
"2",
".",
"0",
")",
"Traceback",
"(",
"most",
"recent",
"call",
"last",
")",
":",
"...",
"pylas",
".",
"errors",
".",
"FileVersionNotSupported",
":",
"2",
".",
"0"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/headers/rawheader.py#L300-L318
|
[
"def",
"header_class_for_version",
"(",
"cls",
",",
"version",
")",
":",
"try",
":",
"return",
"cls",
".",
"_version_to_header",
"[",
"str",
"(",
"version",
")",
"]",
"except",
"KeyError",
":",
"raise",
"errors",
".",
"FileVersionNotSupported",
"(",
"version",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
HeaderFactory.peek_file_version
|
seeks to the position of the las version header fields
in the stream and returns it as a str
Parameters
----------
stream io.BytesIO
Returns
-------
str
file version read from the stream
|
pylas/headers/rawheader.py
|
def peek_file_version(cls, stream):
""" seeks to the position of the las version header fields
in the stream and returns it as a str
Parameters
----------
stream io.BytesIO
Returns
-------
str
file version read from the stream
"""
old_pos = stream.tell()
stream.seek(cls._offset_to_major_version)
major = int.from_bytes(stream.read(ctypes.sizeof(ctypes.c_uint8)), "little")
minor = int.from_bytes(stream.read(ctypes.sizeof(ctypes.c_uint8)), "little")
stream.seek(old_pos)
return "{}.{}".format(major, minor)
|
def peek_file_version(cls, stream):
""" seeks to the position of the las version header fields
in the stream and returns it as a str
Parameters
----------
stream io.BytesIO
Returns
-------
str
file version read from the stream
"""
old_pos = stream.tell()
stream.seek(cls._offset_to_major_version)
major = int.from_bytes(stream.read(ctypes.sizeof(ctypes.c_uint8)), "little")
minor = int.from_bytes(stream.read(ctypes.sizeof(ctypes.c_uint8)), "little")
stream.seek(old_pos)
return "{}.{}".format(major, minor)
|
[
"seeks",
"to",
"the",
"position",
"of",
"the",
"las",
"version",
"header",
"fields",
"in",
"the",
"stream",
"and",
"returns",
"it",
"as",
"a",
"str"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/headers/rawheader.py#L353-L372
|
[
"def",
"peek_file_version",
"(",
"cls",
",",
"stream",
")",
":",
"old_pos",
"=",
"stream",
".",
"tell",
"(",
")",
"stream",
".",
"seek",
"(",
"cls",
".",
"_offset_to_major_version",
")",
"major",
"=",
"int",
".",
"from_bytes",
"(",
"stream",
".",
"read",
"(",
"ctypes",
".",
"sizeof",
"(",
"ctypes",
".",
"c_uint8",
")",
")",
",",
"\"little\"",
")",
"minor",
"=",
"int",
".",
"from_bytes",
"(",
"stream",
".",
"read",
"(",
"ctypes",
".",
"sizeof",
"(",
"ctypes",
".",
"c_uint8",
")",
")",
",",
"\"little\"",
")",
"stream",
".",
"seek",
"(",
"old_pos",
")",
"return",
"\"{}.{}\"",
".",
"format",
"(",
"major",
",",
"minor",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
HeaderFactory.convert_header
|
Converts a header to a another version
Parameters
----------
old_header: the old header instance
new_version: float or str
Returns
-------
The converted header
>>> old_header = HeaderFactory.new(1.2)
>>> HeaderFactory.convert_header(old_header, 1.4)
<LasHeader(1.4)>
>>> old_header = HeaderFactory.new('1.4')
>>> HeaderFactory.convert_header(old_header, '1.2')
<LasHeader(1.2)>
|
pylas/headers/rawheader.py
|
def convert_header(cls, old_header, new_version):
""" Converts a header to a another version
Parameters
----------
old_header: the old header instance
new_version: float or str
Returns
-------
The converted header
>>> old_header = HeaderFactory.new(1.2)
>>> HeaderFactory.convert_header(old_header, 1.4)
<LasHeader(1.4)>
>>> old_header = HeaderFactory.new('1.4')
>>> HeaderFactory.convert_header(old_header, '1.2')
<LasHeader(1.2)>
"""
new_header_class = cls.header_class_for_version(new_version)
b = bytearray(old_header)
b += b"\x00" * (ctypes.sizeof(new_header_class) - len(b))
new_header = new_header_class.from_buffer(b)
new_header.version = str(new_version)
return new_header
|
def convert_header(cls, old_header, new_version):
""" Converts a header to a another version
Parameters
----------
old_header: the old header instance
new_version: float or str
Returns
-------
The converted header
>>> old_header = HeaderFactory.new(1.2)
>>> HeaderFactory.convert_header(old_header, 1.4)
<LasHeader(1.4)>
>>> old_header = HeaderFactory.new('1.4')
>>> HeaderFactory.convert_header(old_header, '1.2')
<LasHeader(1.2)>
"""
new_header_class = cls.header_class_for_version(new_version)
b = bytearray(old_header)
b += b"\x00" * (ctypes.sizeof(new_header_class) - len(b))
new_header = new_header_class.from_buffer(b)
new_header.version = str(new_version)
return new_header
|
[
"Converts",
"a",
"header",
"to",
"a",
"another",
"version"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/headers/rawheader.py#L375-L404
|
[
"def",
"convert_header",
"(",
"cls",
",",
"old_header",
",",
"new_version",
")",
":",
"new_header_class",
"=",
"cls",
".",
"header_class_for_version",
"(",
"new_version",
")",
"b",
"=",
"bytearray",
"(",
"old_header",
")",
"b",
"+=",
"b\"\\x00\"",
"*",
"(",
"ctypes",
".",
"sizeof",
"(",
"new_header_class",
")",
"-",
"len",
"(",
"b",
")",
")",
"new_header",
"=",
"new_header_class",
".",
"from_buffer",
"(",
"b",
")",
"new_header",
".",
"version",
"=",
"str",
"(",
"new_version",
")",
"return",
"new_header"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
test
|
unpack
|
Unpack sub field using its mask
Parameters:
----------
source_array : numpy.ndarray
The source array
mask : mask (ie: 0b00001111)
Mask of the sub field to be extracted from the source array
Returns
-------
numpy.ndarray
The sub field array
|
pylas/point/packing.py
|
def unpack(source_array, mask, dtype=np.uint8):
""" Unpack sub field using its mask
Parameters:
----------
source_array : numpy.ndarray
The source array
mask : mask (ie: 0b00001111)
Mask of the sub field to be extracted from the source array
Returns
-------
numpy.ndarray
The sub field array
"""
lsb = least_significant_bit(mask)
return ((source_array & mask) >> lsb).astype(dtype)
|
def unpack(source_array, mask, dtype=np.uint8):
""" Unpack sub field using its mask
Parameters:
----------
source_array : numpy.ndarray
The source array
mask : mask (ie: 0b00001111)
Mask of the sub field to be extracted from the source array
Returns
-------
numpy.ndarray
The sub field array
"""
lsb = least_significant_bit(mask)
return ((source_array & mask) >> lsb).astype(dtype)
|
[
"Unpack",
"sub",
"field",
"using",
"its",
"mask"
] |
tmontaigu/pylas
|
python
|
https://github.com/tmontaigu/pylas/blob/8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06/pylas/point/packing.py#L12-L27
|
[
"def",
"unpack",
"(",
"source_array",
",",
"mask",
",",
"dtype",
"=",
"np",
".",
"uint8",
")",
":",
"lsb",
"=",
"least_significant_bit",
"(",
"mask",
")",
"return",
"(",
"(",
"source_array",
"&",
"mask",
")",
">>",
"lsb",
")",
".",
"astype",
"(",
"dtype",
")"
] |
8335a1a7d7677f0e4bc391bb6fa3c75b42ed5b06
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.