INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Return a list of parameter objects.
def parameters(self) -> List['Parameter']: """Return a list of parameter objects.""" _lststr = self._lststr _type_to_spans = self._type_to_spans return [ Parameter(_lststr, _type_to_spans, span, 'Parameter') for span in self._subspans('Parameter')]
Return a list of parser function objects.
def parser_functions(self) -> List['ParserFunction']: """Return a list of parser function objects.""" _lststr = self._lststr _type_to_spans = self._type_to_spans return [ ParserFunction(_lststr, _type_to_spans, span, 'ParserFunction') for span in self._subspans('ParserFunction')]
Return a list of templates as template objects.
def templates(self) -> List['Template']: """Return a list of templates as template objects.""" _lststr = self._lststr _type_to_spans = self._type_to_spans return [ Template(_lststr, _type_to_spans, span, 'Template') for span in self._subspans('Template')]
Return a list of wikilink objects.
def wikilinks(self) -> List['WikiLink']: """Return a list of wikilink objects.""" _lststr = self._lststr _type_to_spans = self._type_to_spans return [ WikiLink(_lststr, _type_to_spans, span, 'WikiLink') for span in self._subspans('WikiLink')]
Return a list of comment objects.
def comments(self) -> List['Comment']: """Return a list of comment objects.""" _lststr = self._lststr _type_to_spans = self._type_to_spans return [ Comment(_lststr, _type_to_spans, span, 'Comment') for span in self._subspans('Comment')]
Return a list of found external link objects.
def external_links(self) -> List['ExternalLink']: """Return a list of found external link objects. Note: Templates adjacent to external links are considered part of the link. In reality, this depends on the contents of the template: >>> WikiText( ... 'http://example.com{{dead link}}' ...).external_links[0].url 'http://example.com{{dead link}}' >>> WikiText( ... '[http://example.com{{space template}} text]' ...).external_links[0].url 'http://example.com{{space template}}' """ external_links = [] # type: List['ExternalLink'] external_links_append = external_links.append type_to_spans = self._type_to_spans lststr = self._lststr ss, se = self._span spans = type_to_spans.setdefault('ExternalLink', []) if not spans: # All the added spans will be new. spans_append = spans.append for m in EXTERNAL_LINK_FINDITER(self._ext_link_shadow): s, e = m.span() span = [ss + s, ss + e] spans_append(span) external_links_append( ExternalLink(lststr, type_to_spans, span, 'ExternalLink')) return external_links # There are already some ExternalLink spans. Use the already existing # ones when the detected span is one of those. span_tuple_to_span_get = {(s[0], s[1]): s for s in spans}.get for m in EXTERNAL_LINK_FINDITER(self._ext_link_shadow): s, e = m.span() span = s, e = [s + ss, e + ss] old_span = span_tuple_to_span_get((s, e)) if old_span is None: insort(spans, span) else: span = old_span external_links_append( ExternalLink(lststr, type_to_spans, span, 'ExternalLink')) return external_links
Return a list of section in current wikitext.
def sections(self) -> List['Section']: """Return a list of section in current wikitext. The first section will always be the lead section, even if it is an empty string. """ sections = [] # type: List['Section'] sections_append = sections.append type_to_spans = self._type_to_spans lststr = self._lststr ss, se = _span = self._span type_spans = type_to_spans.setdefault('Section', []) full_match = SECTIONS_FULLMATCH(self._shadow) section_spans = full_match.spans('section') levels = [len(eq) for eq in full_match.captures('equals')] if not type_spans: # All spans are new spans_append = type_spans.append for current_index, (current_level, (s, e)) in enumerate( zip(levels, section_spans), 1 ): # Add text of the current_section to any parent section. # Note that section 0 is not a parent for any subsection. for section_index, section_level in enumerate( levels[current_index:], current_index ): if current_level and section_level > current_level: e = section_spans[section_index][1] else: break span = [ss + s, ss + e] spans_append(span) sections_append( Section(lststr, type_to_spans, span, 'Section')) return sections # There are already some spans. Instead of appending new spans # use them when the detected span already exists. span_tuple_to_span = {(s[0], s[1]): s for s in type_spans}.get for current_index, (current_level, (s, e)) in enumerate( zip(levels, section_spans), 1 ): # Add text of the current_section to any parent section. # Note that section 0 is not a parent for any subsection. for section_index, section_level in enumerate( levels[current_index:], current_index ): if current_level and section_level > current_level: e = section_spans[section_index][1] else: break s, e = ss + s, ss + e old_span = span_tuple_to_span((s, e)) if old_span is None: span = [s, e] insort(type_spans, span) else: span = old_span sections_append(Section(lststr, type_to_spans, span, 'Section')) return sections
Return a list of found table objects.
def tables(self) -> List['Table']: """Return a list of found table objects.""" tables = [] # type: List['Table'] tables_append = tables.append type_to_spans = self._type_to_spans lststr = self._lststr shadow = self._shadow[:] ss, se = self._span spans = type_to_spans.setdefault('Table', []) if not spans: # All the added spans will be new. m = True # type: Any while m: m = False for m in TABLE_FINDITER(shadow): ms, me = m.span() # Ignore leading whitespace using len(m[1]). span = [ss + ms + len(m[1]), ss + me] spans.append(span) tables_append(Table(lststr, type_to_spans, span, 'Table')) shadow[ms:me] = b'_' * (me - ms) return tables # There are already exists some spans. Try to use the already existing # before appending new spans. span_tuple_to_span_get = {(s[0], s[1]): s for s in spans}.get m = True while m: m = False for m in TABLE_FINDITER(shadow): ms, me = m.span() # Ignore leading whitespace using len(m[1]). s, e = ss + ms + len(m[1]), ss + me old_span = span_tuple_to_span_get((s, e)) if old_span is None: span = [s, e] insort(spans, span) else: span = old_span tables_append(Table(lststr, type_to_spans, span, 'Table')) shadow[ms:me] = b'_' * (me - ms) return tables
r Return a list of WikiList objects.
def lists(self, pattern: str = None) -> List['WikiList']: r"""Return a list of WikiList objects. :param pattern: The starting pattern for list items. Return all types of lists (ol, ul, and dl) if pattern is None. If pattern is not None, it will be passed to the regex engine, remember to escape the `*` character. Examples: - `\#` means top-level ordered lists - `\#\*` means unordred lists inside an ordered one - Currently definition lists are not well supported, but you can use `[:;]` as their pattern. Tips and tricks: Be careful when using the following patterns as they will probably cause malfunction in the `sublists` method of the resultant List. (However don't worry about them if you are not going to use the `sublists` method.) - Use `\*+` as a pattern and nested unordered lists will be treated as flat. - Use `\*\s*` as pattern to rtstrip `items` of the list. Although the pattern parameter is optional, but specifying it can improve the performance. """ lists = [] lists_append = lists.append lststr = self._lststr type_to_spans = self._type_to_spans spans = type_to_spans.setdefault('WikiList', []) span_tuple_to_span_get = {(s[0], s[1]): s for s in spans}.get shadow, ss = self._lists_shadow_ss for pattern in \ (r'\#', r'\*', '[:;]') if pattern is None else (pattern,): for m in finditer( LIST_PATTERN_FORMAT.replace(b'{pattern}', pattern.encode()), shadow, MULTILINE ): ms, me = m.span() s, e = ss + ms, ss + me old_span = span_tuple_to_span_get((s, e)) if old_span is None: span = [s, e] insort(spans, span) else: span = old_span lists_append(WikiList( lststr, pattern, m, type_to_spans, span, 'WikiList')) return lists
Return all tags with the given name.
def tags(self, name=None) -> List['Tag']: """Return all tags with the given name.""" lststr = self._lststr type_to_spans = self._type_to_spans if name: if name in _tag_extensions: string = lststr[0] return [ Tag(lststr, type_to_spans, span, 'ExtensionTag') for span in type_to_spans['ExtensionTag'] if string.startswith('<' + name, span[0])] tags = [] # type: List['Tag'] else: # There is no name, add all extension tags. Before using shadow. tags = [ Tag(lststr, type_to_spans, span, 'ExtensionTag') for span in type_to_spans['ExtensionTag']] tags_append = tags.append # Get the left-most start tag, match it to right-most end tag # and so on. ss = self._span[0] shadow = self._shadow if name: # There is a name but it is not in TAG_EXTENSIONS. reversed_start_matches = reversed([m for m in regex_compile( START_TAG_PATTERN.replace( rb'{name}', rb'(?P<name>' + name.encode() + rb')') ).finditer(shadow)]) end_search = regex_compile(END_TAG_PATTERN .replace( b'{name}', name.encode())).search else: reversed_start_matches = reversed( [m for m in START_TAG_FINDITER(shadow)]) shadow_copy = shadow[:] spans = type_to_spans.setdefault('Tag', []) span_tuple_to_span_get = {(s[0], s[1]): s for s in spans}.get spans_append = spans.append for start_match in reversed_start_matches: if start_match['self_closing']: # Don't look for the end tag s, e = start_match.span() span = [ss + s, ss + e] else: # look for the end-tag if name: # the end_search is already available # noinspection PyUnboundLocalVariable end_match = end_search(shadow_copy, start_match.end()) else: # build end_search according to start tag name end_match = search( END_TAG_PATTERN.replace( b'{name}', start_match['name']), shadow_copy) if end_match: s, e = end_match.span() shadow_copy[s:e] = b'_' * (e - s) span = [ss + start_match.start(), ss + e] else: # Assume start-only tag. s, e = start_match.span() span = [ss + s, ss + e] old_span = span_tuple_to_span_get((span[0], span[1])) if old_span is None: spans_append(span) else: span = old_span tags_append(Tag(lststr, type_to_spans, span, 'Tag')) return sorted(tags, key=attrgetter('_span'))
Yield all the sub - span indices excluding self. _span.
def _subspans(self, _type: str) -> Generator[int, None, None]: """Yield all the sub-span indices excluding self._span.""" ss, se = self._span spans = self._type_to_spans[_type] # Do not yield self._span by bisecting for s < ss. # The second bisect is an optimization and should be on [se + 1], # but empty spans are not desired thus [se] is used. b = bisect(spans, [ss]) for span in spans[b:bisect(spans, [se], b)]: if span[1] <= se: yield span
Return the ancestors of the current node.
def ancestors(self, type_: Optional[str] = None) -> List['WikiText']: """Return the ancestors of the current node. :param type_: the type of the desired ancestors as a string. Currently the following types are supported: {Template, ParserFunction, WikiLink, Comment, Parameter, ExtensionTag}. The default is None and means all the ancestors of any type above. """ if type_ is None: types = SPAN_PARSER_TYPES else: types = type_, lststr = self._lststr type_to_spans = self._type_to_spans ss, se = self._span ancestors = [] ancestors_append = ancestors.append for type_ in types: cls = globals()[type_] spans = type_to_spans[type_] for span in spans[:bisect(spans, [ss])]: if se < span[1]: ancestors_append(cls(lststr, type_to_spans, span, type_)) return sorted(ancestors, key=lambda i: ss - i._span[0])
Return the parent node of the current object.
def parent(self, type_: Optional[str] = None) -> Optional['WikiText']: """Return the parent node of the current object. :param type_: the type of the desired parent object. Currently the following types are supported: {Template, ParserFunction, WikiLink, Comment, Parameter, ExtensionTag}. The default is None and means the first parent, of any type above. :return: parent WikiText object or None if no parent with the desired `type_` is found. """ ancestors = self.ancestors(type_) if ancestors: return ancestors[0] return None
Return the most common item in the list.
def mode(list_: List[T]) -> T: """Return the most common item in the list. Return the first one if there are more than one most common items. Example: >>> mode([1,1,2,2,]) 1 >>> mode([1,2,2]) 2 >>> mode([]) ... ValueError: max() arg is an empty sequence """ return max(set(list_), key=list_.count)
Return the first argument in the args that has the given name.
def get_arg(name: str, args: Iterable[Argument]) -> Optional[Argument]: """Return the first argument in the args that has the given name. Return None if no such argument is found. As the computation of self.arguments is a little costly, this function was created so that other methods that have already computed the arguments use it instead of calling self.get_arg directly. """ for arg in args: if arg.name.strip(WS) == name.strip(WS): return arg return None
Return normal form of self. name.
def normal_name( self, rm_namespaces=('Template',), capital_links=False, _code: str = None, *, code: str = None, capitalize=False ) -> str: """Return normal form of self.name. - Remove comments. - Remove language code. - Remove namespace ("template:" or any of `localized_namespaces`. - Use space instead of underscore. - Remove consecutive spaces. - Use uppercase for the first letter if `capitalize`. - Remove #anchor. :param rm_namespaces: is used to provide additional localized namespaces for the template namespace. They will be removed from the result. Default is ('Template',). :param capitalize: If True, convert the first letter of the template's name to a capital letter. See [[mw:Manual:$wgCapitalLinks]] for more info. :param code: is the language code. :param capital_links: deprecated. :param _code: deprecated. Example: >>> Template( ... '{{ eN : tEmPlAtE : <!-- c --> t_1 # b | a }}' ... ).normal_name(code='en') 'T 1' """ if capital_links: warn('`capital_links` argument is deprecated,' ' use `capitalize` instead', DeprecationWarning) capitalize = capital_links if _code: warn('`positional_code` argument is deprecated,' ' use `code` instead', DeprecationWarning) code = _code # Remove comments name = COMMENT_SUB('', self.name).strip(WS) # Remove code if code: head, sep, tail = name.partition(':') if not head and sep: name = tail.strip(' ') head, sep, tail = name.partition(':') if code.lower() == head.strip(' ').lower(): name = tail.strip(' ') # Remove namespace head, sep, tail = name.partition(':') if not head and sep: name = tail.strip(' ') head, sep, tail = name.partition(':') if head: ns = head.strip(' ').lower() for namespace in rm_namespaces: if namespace.lower() == ns: name = tail.strip(' ') break # Use space instead of underscore name = name.replace('_', ' ') if capitalize: # Use uppercase for the first letter n0 = name[0] if n0.islower(): name = n0.upper() + name[1:] # Remove #anchor name, sep, tail = name.partition('#') return ' '.join(name.split())
Eliminate duplicate arguments by removing the first occurrences.
def rm_first_of_dup_args(self) -> None: """Eliminate duplicate arguments by removing the first occurrences. Remove the first occurrences of duplicate arguments, regardless of their value. Result of the rendered wikitext should remain the same. Warning: Some meaningful data may be removed from wikitext. Also see `rm_dup_args_safe` function. """ names = set() # type: set for a in reversed(self.arguments): name = a.name.strip(WS) if name in names: del a[:len(a.string)] else: names.add(name)
Remove duplicate arguments in a safe manner.
def rm_dup_args_safe(self, tag: str = None) -> None: """Remove duplicate arguments in a safe manner. Remove the duplicate arguments only in the following situations: 1. Both arguments have the same name AND value. (Remove one of them.) 2. Arguments have the same name and one of them is empty. (Remove the empty one.) Warning: Although this is considered to be safe and no meaningful data is removed from wikitext, but the result of the rendered wikitext may actually change if the second arg is empty and removed but the first had had a value. If `tag` is defined, it should be a string that will be appended to the value of the remaining duplicate arguments. Also see `rm_first_of_dup_args` function. """ name_to_lastarg_vals = {} \ # type: Dict[str, Tuple[Argument, List[str]]] # Removing positional args affects their name. By reversing the list # we avoid encountering those kind of args. for arg in reversed(self.arguments): name = arg.name.strip(WS) if arg.positional: # Value of keyword arguments is automatically stripped by MW. val = arg.value else: # But it's not OK to strip whitespace in positional arguments. val = arg.value.strip(WS) if name in name_to_lastarg_vals: # This is a duplicate argument. if not val: # This duplicate argument is empty. It's safe to remove it. del arg[0:len(arg.string)] else: # Try to remove any of the detected duplicates of this # that are empty or their value equals to this one. lastarg, dup_vals = name_to_lastarg_vals[name] if val in dup_vals: del arg[0:len(arg.string)] elif '' in dup_vals: # This happens only if the last occurrence of name has # been an empty string; other empty values will # be removed as they are seen. # In other words index of the empty argument in # dup_vals is always 0. del lastarg[0:len(lastarg.string)] dup_vals.pop(0) else: # It was not possible to remove any of the duplicates. dup_vals.append(val) if tag: arg.value += tag else: name_to_lastarg_vals[name] = (arg, [val])
Set the value for name argument. Add it if it doesn t exist.
def set_arg( self, name: str, value: str, positional: bool = None, before: str = None, after: str = None, preserve_spacing: bool = True ) -> None: """Set the value for `name` argument. Add it if it doesn't exist. - Use `positional`, `before` and `after` keyword arguments only when adding a new argument. - If `before` is given, ignore `after`. - If neither `before` nor `after` are given and it's needed to add a new argument, then append the new argument to the end. - If `positional` is True, try to add the given value as a positional argument. Ignore `preserve_spacing` if positional is True. If it's None, do what seems more appropriate. """ args = list(reversed(self.arguments)) arg = get_arg(name, args) # Updating an existing argument. if arg: if positional: arg.positional = positional if preserve_spacing: val = arg.value arg.value = val.replace(val.strip(WS), value) else: arg.value = value return # Adding a new argument if not name and positional is None: positional = True # Calculate the whitespace needed before arg-name and after arg-value. if not positional and preserve_spacing and args: before_names = [] name_lengths = [] before_values = [] after_values = [] for arg in args: aname = arg.name name_len = len(aname) name_lengths.append(name_len) before_names.append(STARTING_WS_MATCH(aname)[0]) arg_value = arg.value before_values.append(STARTING_WS_MATCH(arg_value)[0]) after_values.append(ENDING_WS_MATCH(arg_value)[0]) pre_name_ws_mode = mode(before_names) name_length_mode = mode(name_lengths) post_value_ws_mode = mode( [SPACE_AFTER_SEARCH(self.string)[0]] + after_values[1:] ) pre_value_ws_mode = mode(before_values) else: preserve_spacing = False # Calculate the string that needs to be added to the Template. if positional: # Ignore preserve_spacing for positional args. addstring = '|' + value else: if preserve_spacing: # noinspection PyUnboundLocalVariable addstring = ( '|' + (pre_name_ws_mode + name.strip(WS)). ljust(name_length_mode) + '=' + pre_value_ws_mode + value + post_value_ws_mode ) else: addstring = '|' + name + '=' + value # Place the addstring in the right position. if before: arg = get_arg(before, args) arg.insert(0, addstring) elif after: arg = get_arg(after, args) arg.insert(len(arg.string), addstring) else: if args and not positional: arg = args[0] arg_string = arg.string if preserve_spacing: # Insert after the last argument. # The addstring needs to be recalculated because we don't # want to change the the whitespace before final braces. # noinspection PyUnboundLocalVariable arg[0:len(arg_string)] = ( arg.string.rstrip(WS) + post_value_ws_mode + addstring.rstrip(WS) + after_values[0] ) else: arg.insert(len(arg_string), addstring) else: # The template has no arguments or the new arg is # positional AND is to be added at the end of the template. self.insert(-2, addstring)
Return the last argument with the given name.
def get_arg(self, name: str) -> Optional[Argument]: """Return the last argument with the given name. Return None if no argument with that name is found. """ return get_arg(name, reversed(self.arguments))
Return true if the is an arg named name.
def has_arg(self, name: str, value: str = None) -> bool: """Return true if the is an arg named `name`. Also check equality of values if `value` is provided. Note: If you just need to get an argument and you want to LBYL, it's better to get_arg directly and then check if the returned value is None. """ for arg in reversed(self.arguments): if arg.name.strip(WS) == name.strip(WS): if value: if arg.positional: if arg.value == value: return True return False if arg.value.strip(WS) == value.strip(WS): return True return False return True return False
Delete all arguments with the given then.
def del_arg(self, name: str) -> None: """Delete all arguments with the given then.""" for arg in reversed(self.arguments): if arg.name.strip(WS) == name.strip(WS): del arg[:]
Build crs table of all equivalent format variations by scraping spatialreference. org. Saves table as tab - delimited text file. NOTE: Might take a while.
def build_crs_table(savepath): """ Build crs table of all equivalent format variations by scraping spatialreference.org. Saves table as tab-delimited text file. NOTE: Might take a while. Arguments: - *savepath*: The absolute or relative filepath to which to save the crs table, including the ".txt" extension. """ # create table outfile = open(savepath, "wb") # create fields fields = ["codetype", "code", "proj4", "ogcwkt", "esriwkt"] outfile.write("\t".join(fields) + "\n") # make table from url requests for codetype in ("epsg", "esri", "sr-org"): print(codetype) # collect existing proj list print("fetching list of available codes") codelist = [] page = 1 while True: try: link = 'http://spatialreference.org/ref/%s/?page=%s' %(codetype,page) html = urllib2.urlopen(link).read() codes = [match.groups()[0] for match in re.finditer(r'/ref/'+codetype+'/(\d+)', html) ] if not codes: break print("page",page) codelist.extend(codes) page += 1 except: break print("fetching string formats for each projection") for i,code in enumerate(codelist): # check if code exists link = 'http://spatialreference.org/ref/%s/%s/' %(codetype,code) urllib2.urlopen(link) # collect each projection format in a table row row = [codetype, code] for resulttype in ("proj4", "ogcwkt", "esriwkt"): try: link = 'http://spatialreference.org/ref/%s/%s/%s/' %(codetype,code,resulttype) result = urllib2.urlopen(link).read() row.append(result) except: pass print("projection %i of %i added" %(i,len(codelist)) ) outfile.write("\t".join(row) + "\n") # close the file outfile.close()
Lookup crscode on spatialreference. org and return in specified format.
def crscode_to_string(codetype, code, format): """ Lookup crscode on spatialreference.org and return in specified format. Arguments: - *codetype*: "epsg", "esri", or "sr-org". - *code*: The code. - *format*: The crs format of the returned string. One of "ogcwkt", "esriwkt", or "proj4", but also several others... Returns: - Crs string in the specified format. """ link = 'http://spatialreference.org/ref/%s/%s/%s/' %(codetype,code,format) result = urllib2.urlopen(link).read() if not isinstance(result, str): result = result.decode() return result
Returns the CS as a proj4 formatted string or dict.
def to_proj4(self, as_dict=False, toplevel=True): """ Returns the CS as a proj4 formatted string or dict. Arguments: - **as_dict** (optional): If True, returns the proj4 string as a dict (defaults to False). - **toplevel** (optional): If True, treats this CS as the final toplevel CS and adds the necessary proj4 elements (defaults to True). """ # dont parse axis to proj4, because in proj4, axis only applies to the cs, ie the projcs (not the geogcs, where wkt can specify with axis) # also proj4 cannot specify angular units if toplevel: string = "+proj=longlat %s %s +nodef" % (self.datum.to_proj4(), self.prime_mer.to_proj4()) else: string = "%s %s" % (self.datum.to_proj4(), self.prime_mer.to_proj4()) if as_dict: return dict([ entry.lstrip('+').split('=') for entry in string.split() if entry != "+no_defs" ]) else: return string
Returns the CS as a OGC WKT formatted string.
def to_ogc_wkt(self): """ Returns the CS as a OGC WKT formatted string. """ return 'GEOGCS["%s", %s, %s, %s, AXIS["Lon", %s], AXIS["Lat", %s]]' % (self.name, self.datum.to_ogc_wkt(), self.prime_mer.to_ogc_wkt(), self.angunit.to_ogc_wkt(), self.twin_ax[0].ogc_wkt, self.twin_ax[1].ogc_wkt )
Returns the CS as a ESRI WKT formatted string.
def to_esri_wkt(self): """ Returns the CS as a ESRI WKT formatted string. """ return 'GEOGCS["%s", %s, %s, %s, AXIS["Lon", %s], AXIS["Lat", %s]]' % (self.name, self.datum.to_esri_wkt(), self.prime_mer.to_esri_wkt(), self.angunit.to_esri_wkt(), self.twin_ax[0].esri_wkt, self.twin_ax[1].esri_wkt )
Returns the CS as a proj4 formatted string or dict.
def to_proj4(self, as_dict=False): """ Returns the CS as a proj4 formatted string or dict. Arguments: - **as_dict** (optional): If True, returns the proj4 string as a dict (defaults to False). """ string = "%s" % self.proj.to_proj4() string += " %s" % self.geogcs.to_proj4(toplevel=False) string += " " + " ".join(param.to_proj4() for param in self.params) string += " %s" % self.unit.to_proj4() string += " +axis=" + self.twin_ax[0].proj4 + self.twin_ax[1].proj4 + "u" # up set as default because only proj4 can set it I think... string += " +no_defs" if as_dict: return dict([ entry.lstrip('+').split('=') for entry in string.split() if entry != "+no_defs" ]) else: return string
Returns the CS as a OGC WKT formatted string.
def to_ogc_wkt(self): """ Returns the CS as a OGC WKT formatted string. """ string = 'PROJCS["%s", %s, %s, ' % (self.name, self.geogcs.to_ogc_wkt(), self.proj.to_ogc_wkt() ) string += ", ".join(param.to_ogc_wkt() for param in self.params) string += ', %s' % self.unit.to_ogc_wkt() string += ', AXIS["X", %s], AXIS["Y", %s]]' % (self.twin_ax[0].ogc_wkt, self.twin_ax[1].ogc_wkt ) return string
Returns the CS as a ESRI WKT formatted string.
def to_esri_wkt(self): """ Returns the CS as a ESRI WKT formatted string. """ string = 'PROJCS["%s", %s, %s, ' % (self.name, self.geogcs.to_esri_wkt(), self.proj.to_esri_wkt() ) string += ", ".join(param.to_esri_wkt() for param in self.params) string += ', %s' % self.unit.to_esri_wkt() string += ', AXIS["X", %s], AXIS["Y", %s]]' % (self.twin_ax[0].esri_wkt, self.twin_ax[1].esri_wkt ) return string
Search for a ellipsoid name located in this module.
def find(ellipsname, crstype, strict=False): """ Search for a ellipsoid name located in this module. Arguments: - **ellipsname**: The ellipsoid name to search for. - **crstype**: Which CRS naming convention to search (different CRS formats have different names for the same ellipsoid). - **strict** (optional): If False, ignores minor name mismatches such as underscore or character casing, otherwise must be exact match (defaults to False). """ if not strict: ellipsname = ellipsname.lower().replace(" ","_") for itemname,item in globals().items(): if itemname.startswith("_") or itemname == 'Ellipsoid': continue try: if hasattr(item.name, crstype): itemname = getattr(item.name, crstype) if not strict: itemname = itemname.lower().replace(" ","_") if ellipsname == itemname: return item except: pass else: return None
Returns the crs object from a string interpreted as a specified format located at a given url site.
def from_url(url, format=None): """ Returns the crs object from a string interpreted as a specified format, located at a given url site. Arguments: - *url*: The url where the crs string is to be read from. - *format* (optional): Which format to parse the crs string as. One of "ogc wkt", "esri wkt", or "proj4". If None, tries to autodetect the format for you (default). Returns: - CRS object. """ # first get string from url string = urllib2.urlopen(url).read() if PY3 is True: # decode str into string string = string.decode('utf-8') # then determine parser if format: # user specified format format = format.lower().replace(" ", "_") func = parse.__getattr__("from_%s" % format) else: # unknown format func = parse.from_unknown_text # then load crs = func(string) return crs
Returns the crs object from a file with the format determined from the filename extension.
def from_file(filepath): """ Returns the crs object from a file, with the format determined from the filename extension. Arguments: - *filepath*: filepath to be loaded, including extension. """ if filepath.endswith(".prj"): string = open(filepath, "r").read() return parse.from_unknown_wkt(string) elif filepath.endswith((".geojson",".json")): raw = open(filepath).read() geoj = json.loads(raw) if "crs" in geoj: crsinfo = geoj["crs"] if crsinfo["type"] == "name": string = crsinfo["properties"]["name"] return parse.from_unknown_text(string) elif crsinfo["type"] == "link": url = crsinfo["properties"]["name"] type = crsinfo["properties"].get("type") return from_url(url, format=type) else: raise FormatError("Invalid GeoJSON crs type: must be either 'name' or 'link'") else: # assume default wgs84 as per the spec return parse.from_epsg_code("4326")
Load crs object from epsg code via spatialreference. org. Parses based on the proj4 representation.
def from_epsg_code(code): """ Load crs object from epsg code, via spatialreference.org. Parses based on the proj4 representation. Arguments: - *code*: The EPSG code as an integer. Returns: - A CS instance of the indicated type. """ # must go online (or look up local table) to get crs details code = str(code) proj4 = utils.crscode_to_string("epsg", code, "proj4") crs = from_proj4(proj4) return crs
Load crs object from esri code via spatialreference. org. Parses based on the proj4 representation.
def from_esri_code(code): """ Load crs object from esri code, via spatialreference.org. Parses based on the proj4 representation. Arguments: - *code*: The ESRI code as an integer. Returns: - A CS instance of the indicated type. """ # must go online (or look up local table) to get crs details code = str(code) proj4 = utils.crscode_to_string("esri", code, "proj4") crs = from_proj4(proj4) return crs
Load crs object from sr - org code via spatialreference. org. Parses based on the proj4 representation.
def from_sr_code(code): """ Load crs object from sr-org code, via spatialreference.org. Parses based on the proj4 representation. Arguments: - *code*: The SR-ORG code as an integer. Returns: - A CS instance of the indicated type. """ # must go online (or look up local table) to get crs details code = str(code) proj4 = utils.crscode_to_string("sr-org", code, "proj4") crs = from_proj4(proj4) return crs
Internal method for parsing wkt with minor differences depending on ogc or esri style.
def _from_wkt(string, wkttype=None, strict=False): """ Internal method for parsing wkt, with minor differences depending on ogc or esri style. Arguments: - *string*: The OGC or ESRI WKT representation as a string. - *wkttype* (optional): How to parse the WKT string, as either 'ogc', 'esri', or None. If None, tries to autodetect the wkt type before parsing (default). - *strict* (optional): When True, the parser is strict about names having to match exactly with upper and lowercases. Default is not strict (False). Returns: - A CS instance of the indicated type. """ # TODO # - Make function for finding next elemt by name, instead of knowing its arg index position # - Maybe verify elem arg name # make sure valid wkttype if wkttype: wkttype = wkttype.lower() assert wkttype in ("ogc","esri",None) # remove newlines and multi spaces string = " ".join(string.split()) # parse arguments into components def _consume_bracket(chars, char): "char must be the opening bracket" consumed = "" depth = 1 while char and depth > 0: consumed += char char = next(chars, None) # update depth level if char == "[": depth += 1 elif char == "]": depth -= 1 consumed += char # consume the last closing char too return consumed def _consume_quote(chars, char, quotechar): "char and quotechar must be the opening quote char" consumed = "" # consume the first opening char consumed += char char = next(chars, None) # consume inside while char and char != quotechar: consumed += char char = next(chars, None) # consume the last closing char too consumed += char return consumed def _next_elem(chars, char): "char must be the first char of the text that precedes brackets" header = "" # skip until next header while not char.isalpha(): char = next(chars, None) # first consume the element text header while char.isalpha(): header += char char = next(chars, None) # skip until next brackets (in case of spaces) while char != "[": char = next(chars, None) # then consume the element bracket contents if char == "[": content = _consume_bracket(chars, char) char = next(chars, None) # split content into args list content = content[1:-1] # remove enclosing brackets content = _split_except(content) # recursively load all subelems for i,item in enumerate(content): if isinstance(item, str) and "[" in item: chars = (char for char in item) char = next(chars) item = _next_elem(chars, char) content[i] = item return header, content def _clean_value(string): string = string.strip() try: string = float(string) except: pass return string def _split_except(string): "split the string on every comma, except not while inside quotes or square brackets" chars = (char for char in string) char = next(chars) items = [] consumed = "" while char: # dont split on quotes, just consume it if char in ("'", '"'): consumed += _consume_quote(chars, char, char) # dont split inside brackets, just consume it elif char == "[": consumed += _consume_bracket(chars, char) # new splitchar found, add what has been consumed so far as an item, reset, and start consuming until next splitchar elif char == ",": consumed = _clean_value(consumed) items.append(consumed) consumed = "" # consume normal char elif char: consumed += char # next char = next(chars, None) # append last item too consumed = _clean_value(consumed) items.append(consumed) return items # load into nested tuples and arglists crstuples = [] chars = (char for char in string) char = next(chars) while char: header,content = _next_elem(chars, char) crstuples.append((header, content)) char = next(chars, None) # autodetect wkttype if not specified if not wkttype: topheader,topcontent = crstuples[0] if topheader == "PROJCS": geogcsheader,geogcscontent = topcontent[1] elif topheader == "GEOGCS": geogcsheader,geogcscontent = topheader,topcontent # datum elem should be second under geogcs datumheader, datumcontent = geogcscontent[1] datumname = datumcontent[0].upper().strip('"') # esri wkt datums all use "D_" before the datum name if datumname.startswith("D_"): wkttype = "esri" else: wkttype = "ogc" # parse into actual crs objects def _parse_top(header, content): "procedure for parsing the toplevel crs element and all its children" if header.upper() == "PROJCS": # find name csname = content[0].strip('"') # find geogcs elem (by running parse again) subheader, subcontent = content[1] geogcs = _parse_top(subheader, subcontent) # find projection elem for part in content: if isinstance(part, tuple): subheader,subcontent = part if subheader == "PROJECTION": break projname = subcontent[0].strip('"') projclass = projections.find(projname, "%s_wkt" % wkttype, strict) if projclass: proj = projclass() else: raise NotImplementedError("Unsupported projection: The specified projection name %r could not be found in the list of supported projections" % projname) # find params params = [] for part in content: if isinstance(part, tuple): subheader,subcontent = part if subheader == "PARAMETER": name, value = subcontent[0].strip('"'), subcontent[1] itemclass = parameters.find(name, "%s_wkt" % wkttype, strict) if itemclass: item = itemclass(value) params.append(item) # find unit for part in content: if isinstance(part, tuple): subheader,subcontent = part if subheader == "UNIT": break unitname,value = subcontent[0].strip('"'), subcontent[1] unitclass = units.find(unitname, "%s_wkt" % wkttype, strict) if unitclass: unit = unitclass() else: unit = units.Unknown() unit.unitmultiplier.value = value # override default multiplier linunit = unit # find twin axis maybe ## if len(content) >= 6: ## twinax = (parameters.Axis( ## else: ## twinax = None # put it all together projcs = containers.ProjCS(csname, geogcs, proj, params, linunit) #, twinax) return projcs elif header.upper() == "GEOGCS": # name csname = content[0].strip('"') # datum subheader, subcontent = content[1] ## datum name datumname = subcontent[0].strip('"') datumclass = datums.find(datumname, "%s_wkt" % wkttype, strict) if datumclass: datum = datumclass() else: datum = datums.Unknown() ## datum ellipsoid subsubheader, subsubcontent = subcontent[1] ellipsname = subsubcontent[0].strip('"') ellipsclass = ellipsoids.find(ellipsname, "%s_wkt" % wkttype, strict) if ellipsclass: ellipsoid = ellipsclass() else: ellipsoid = ellipsoids.Unknown() ellipsoid.semimaj_ax = parameters.SemiMajorRadius(subsubcontent[1]) if subsubcontent[2] == 0: # WKT falsely sets inverse flattening to 0 for spheroids # but actually it cannot be 0, it is the flattening that is 0 ellipsoid.flat = parameters.Flattening(subsubcontent[2]) else: ellipsoid.inv_flat = parameters.InverseFlattening(subsubcontent[2]) ## datum shift if wkttype == "ogc": for subsubheader,subsubcontent in subcontent[1:]: if subsubheader == "TOWGS84": datumshift = parameters.DatumShift(subsubcontent) break else: datumshift = None elif wkttype == "esri": # not used in esri wkt datumshift = None ## put it all togehter datum.ellips = ellipsoid datum.datumshift = datumshift # prime mer subheader, subcontent = content[2] prime_mer = parameters.PrimeMeridian(subcontent[1]) # angunit subheader, subcontent = content[3] unitname,value = subcontent[0].strip('"'), subcontent[1] unitclass = units.find(unitname, "%s_wkt" % wkttype, strict) if unitclass: unit = unitclass() else: unit = units.Unknown() unit.unitmultiplier.value = value # override default multiplier angunit = unit # twin axis # ... # put it all together geogcs = containers.GeogCS(csname, datum, prime_mer, angunit, twin_ax=None) return geogcs # toplevel collection header, content = crstuples[0] crs = _parse_top(header, content) # use args to create crs return crs
Parse crs as proj4 formatted string or dict and return the resulting crs object.
def from_proj4(proj4, strict=False): """ Parse crs as proj4 formatted string or dict and return the resulting crs object. Arguments: - *proj4*: The proj4 representation as a string or dict. - *strict* (optional): When True, the parser is strict about names having to match exactly with upper and lowercases. Default is not strict (False). Returns: - A CS instance of the indicated type. """ # parse arguments into components # use args to create crs # TODO: SLIGTHLY MESSY STILL, CLEANUP.. params = [] if isinstance(proj4, dict): # add leading + sign as expected below, proj4 dicts do not have that partdict = dict([('+'+k,v) for k,v in proj4.items()]) else: partdict = dict([part.split("=") for part in proj4.split() if len(part.split("=")) == 2 ]) # INIT CODES # eg, +init=EPSG:1234 if "+init" in partdict: # first, get the default proj4 string of the +init code codetype, code = partdict["+init"].split(":") if codetype == "EPSG": initproj4 = utils.crscode_to_string("epsg", code, "proj4") elif codetype == "ESRI": initproj4 = utils.crscode_to_string("esri", code, "proj4") # make the default into param dict initpartdict = dict([part.split("=") for part in initproj4.split() if len(part.split("=")) == 2 ]) # override the default with any custom params specified along with the +init code initpartdict.update(partdict) # rerun from_proj4() again on the derived proj4 params as if it was not made with the +init code del initpartdict["+init"] string = " ".join("%s=%s" % (key,val) for key,val in initpartdict.items()) return from_proj4(string) # DATUM # datum param is required if "+datum" in partdict: # get predefined datum def datumname = partdict["+datum"] datumclass = datums.find(datumname, "proj4", strict) if datumclass: datum = datumclass() else: datum = datums.Unknown() else: datum = datums.Unknown() # ELLIPS # ellipse param is required ellips = None if "+ellps" in partdict: # get predefined ellips def ellipsname = partdict["+ellps"] ellipsclass = ellipsoids.find(ellipsname, "proj4", strict) if ellipsclass: ellips = ellipsclass() if not ellips: ellips = ellipsoids.Unknown() # TO WGS 84 COEFFS if "+towgs84" in partdict: coeffs = partdict["+towgs84"].split(",") datumshift = parameters.DatumShift(coeffs) # TODO: if no datum, use ellips + towgs84 params to create the correct datum # ...?? # COMBINE DATUM AND ELLIPS ## create datum and ellips param objs # +ellps loads all the required ellipsoid parameters # here we set or overwrite the parameters manually if "+a" in partdict: # semimajor radius ellips.semimaj_ax = parameters.SemiMajorRadius(partdict["+a"]) if "+b" in partdict: # semiminor radius ellips.semimin_ax = parameters.SemiMinorRadius(partdict["+b"]) if "+f" in partdict: # flattening ellips.flat = parameters.Flattening(partdict["+f"]) if "+rf" in partdict: # inverse flattening ellips.inv_flat = parameters.InverseFlattening(partdict["+rf"]) # check that ellipsoid is sufficiently defined if ellips.semimaj_ax and ellips.semimin_ax: # +a (semimajor radius) and +b (semiminor radius) is enough and can be used to calculate flattening # see https://en.wikipedia.org/wiki/Flattening pass elif ellips.semimaj_ax and ellips.inv_flat: # alternatively, it is okay with if +a (semimajor) and +f (flattening) are specified pass elif ellips.semimaj_ax and ellips.flat: # alternatively, semimajor and +rf is also acceptable (the reciprocal/inverse of +f) pass else: raise FormatError("The format string is missing the required +ellps element, or the alternative manual specification of the +a with +b or +f/+rf elements: \n\t %s" % partdict) if "+datum" in partdict: datum.ellips = ellips elif "+towgs84" in partdict: datum.ellips = ellips datum.datumshift = datumshift else: datum.ellips = ellips # PRIME MERIDIAN # set default prime_mer = parameters.PrimeMeridian(0) # overwrite with user input if "+pm" in partdict: prime_mer = parameters.PrimeMeridian(partdict["+pm"]) # ANGULAR UNIT ## proj4 cannot set angular unit, so just set to default angunit = units.Degree() # GEOGCS (note, currently does not load axes) geogcs = containers.GeogCS("Unknown", datum, prime_mer, angunit) #, twin_ax) # PROJECTION if "+proj" in partdict: # get predefined proj def projname = partdict["+proj"] projclass = projections.find(projname, "proj4", strict) if projclass: proj = projclass() elif projname == "longlat": # proj4 special case, longlat as projection name means unprojected geogcs proj = None else: raise NotImplementedError("Unsupported projection: The specified projection name %r could not be found in the list of supported projections" % projname) else: raise FormatError("The format string is missing the required +proj element") if proj: # Because proj4 has no element hierarchy, using automatic element find() would # ...would not be very effective, as that would need a try-fail approach for each # ...element type (parameter, projection, datum, ellipsoid, unit). # ...Instead load each element individually. # CENTRAL MERIDIAN if "+lon_0" in partdict: val = partdict["+lon_0"] obj = parameters.CentralMeridian(val) params.append(obj) # FALSE EASTING if "+x_0" in partdict: val = partdict["+x_0"] obj = parameters.FalseEasting(val) params.append(obj) # FALSE NORTHING if "+y_0" in partdict: val = partdict["+y_0"] obj = parameters.FalseNorthing(val) params.append(obj) # SCALING FACTOR if "+k_0" in partdict or "+k" in partdict: if "+k_0" in partdict: val = partdict["+k_0"] elif "+k" in partdict: val = partdict["+k"] obj = parameters.ScalingFactor(val) params.append(obj) # LATITUDE ORIGIN if "+lat_0" in partdict: val = partdict["+lat_0"] obj = parameters.LatitudeOrigin(val) params.append(obj) # LATITUDE TRUE SCALE if "+lat_ts" in partdict: val = partdict["+lat_ts"] obj = parameters.LatitudeTrueScale(val) params.append(obj) # LONGITUDE CENTER if "+lonc" in partdict: val = partdict["+lonc"] obj = parameters.LongitudeCenter(val) params.append(obj) # AZIMUTH if "+alpha" in partdict: val = partdict["+alpha"] obj = parameters.Azimuth(val) params.append(obj) # STD PARALLEL 1 if "+lat_1" in partdict: val = partdict["+lat_1"] obj = parameters.LatitudeFirstStndParallel(val) params.append(obj) # STD PARALLEL 2 if "+lat_2" in partdict: val = partdict["+lat_2"] obj = parameters.LatitudeSecondStndParallel(val) params.append(obj) # SATELLITE HEIGHT if "+h" in partdict: val = partdict["+h"] obj = parameters.SatelliteHeight(val) params.append(obj) # TILT ANGLE if "+tilt" in partdict: val = partdict["+tilt"] obj = parameters.TiltAngle(val) params.append(obj) # UNIT # get values if "+units" in partdict: # unit name takes precedence over to_meter unitname = partdict["+units"] unitclass = units.find(unitname, "proj4", strict) if unitclass: unit = unitclass() # takes meter multiplier from name, ignoring any custom meter multiplier else: raise FormatError("The specified unit name %r does not appear to be a valid unit name" % unitname) elif "+to_meter" in partdict: # no unit name specified, only to_meter conversion factor unit = units.Unknown() unit.metermultiplier.value = partdict["+to_meter"] else: # if nothing specified, defaults to meter unit = units.Meter() # PROJCS projcs = containers.ProjCS("Unknown", geogcs, proj, params, unit) return projcs else: # means projdef was None, ie unprojected longlat geogcs return geogcs
Detect crs string format and parse into crs object with appropriate function.
def from_unknown_text(text, strict=False): """ Detect crs string format and parse into crs object with appropriate function. Arguments: - *text*: The crs text representation of unknown type. - *strict* (optional): When True, the parser is strict about names having to match exactly with upper and lowercases. Default is not strict (False). Returns: - CRS object. """ if text.startswith("+"): crs = from_proj4(text, strict) elif text.startswith(("PROJCS[","GEOGCS[")): crs = from_unknown_wkt(text, strict) #elif text.startswith("urn:"): # crs = from_ogc_urn(text, strict) elif text.startswith("EPSG:"): crs = from_epsg_code(text.split(":")[1]) elif text.startswith("ESRI:"): crs = from_esri_code(text.split(":")[1]) elif text.startswith("SR-ORG:"): crs = from_sr_code(text.split(":")[1]) else: raise FormatError("Could not auto-detect the type of crs format, make sure it is one of the supported formats") return crs
Write the raw header content to the out stream
def write_to(self, out): """ Write the raw header content to the out stream Parameters: ---------- out : {file object} The output stream """ out.write(bytes(self.header)) out.write(self.record_data)
Instantiate a RawVLR by reading the content from the data stream
def read_from(cls, data_stream): """ Instantiate a RawVLR by reading the content from the data stream Parameters: ---------- data_stream : {file object} The input stream Returns ------- RawVLR The RawVLR read """ raw_vlr = cls() header = RawVLRHeader.from_stream(data_stream) raw_vlr.header = header raw_vlr.record_data = data_stream.read(header.record_length_after_header) return raw_vlr
Gets the 3 GeoTiff vlrs from the vlr_list and parse them into a nicer structure
def parse_geo_tiff_keys_from_vlrs(vlr_list: vlrlist.VLRList) -> List[GeoTiffKey]: """ Gets the 3 GeoTiff vlrs from the vlr_list and parse them into a nicer structure Parameters ---------- vlr_list: pylas.vrls.vlrslist.VLRList list of vlrs from a las file Raises ------ IndexError if any of the needed GeoTiffVLR is not found in the list Returns ------- List of GeoTiff keys parsed from the VLRs """ geo_key_dir = vlr_list.get_by_id( GeoKeyDirectoryVlr.official_user_id(), GeoKeyDirectoryVlr.official_record_ids() )[0] geo_doubles = vlr_list.get_by_id( GeoDoubleParamsVlr.official_user_id(), GeoDoubleParamsVlr.official_record_ids() )[0] geo_ascii = vlr_list.get_by_id( GeoAsciiParamsVlr.official_user_id(), GeoAsciiParamsVlr.official_record_ids() )[0] return parse_geo_tiff(geo_key_dir, geo_doubles, geo_ascii)
Parses the GeoTiff VLRs information into nicer structs
def parse_geo_tiff( key_dir_vlr: GeoKeyDirectoryVlr, double_vlr: GeoDoubleParamsVlr, ascii_vlr: GeoAsciiParamsVlr, ) -> List[GeoTiffKey]: """ Parses the GeoTiff VLRs information into nicer structs """ geotiff_keys = [] for k in key_dir_vlr.geo_keys: if k.tiff_tag_location == 0: value = k.value_offset elif k.tiff_tag_location == 34736: value = double_vlr.doubles[k.value_offset] elif k.tiff_tag_location == 34737: try: value = ascii_vlr.strings[k.value_offset][k.count :] except IndexError: # Maybe I'm just misunderstanding the specification :thinking: value = ascii_vlr.strings[0][k.value_offset : k.value_offset + k.count] else: logger.warning( "GeoTiffKey with unknown tiff tag location ({})".format( k.tiff_tag_location ) ) continue geotiff_keys.append(GeoTiffKey(k.id, value)) return geotiff_keys
Returns the signedness foe the given type index
def get_signedness_for_extra_dim(type_index): """ Returns the signedness foe the given type index Parameters ---------- type_index: int index of the type as defined in the LAS Specification Returns ------- DimensionSignedness, the enum variant """ try: t = _extra_dims_style_2[type_index] if "uint" in t: return DimensionSignedness.UNSIGNED elif "int" in t: return DimensionSignedness.SIGNED else: return DimensionSignedness.FLOATING except IndexError: raise errors.UnknownExtraType(type_index)
Returns the index of the type as defined in the LAS Specification
def get_id_for_extra_dim_type(type_str): """ Returns the index of the type as defined in the LAS Specification Parameters ---------- type_str: str Returns ------- int index of the type """ try: return _type_to_extra_dim_id_style_1[type_str] except KeyError: try: return _type_to_extra_dim_id_style_2[type_str] except KeyError: raise errors.UnknownExtraType(type_str)
Construct a new PackedPointRecord from an existing one with the ability to change to point format while doing so
def from_point_record(cls, other_point_record, new_point_format): """ Construct a new PackedPointRecord from an existing one with the ability to change to point format while doing so """ array = np.zeros_like(other_point_record.array, dtype=new_point_format.dtype) new_record = cls(array, new_point_format) new_record.copy_fields_from(other_point_record) return new_record
Tries to copy the values of the current dimensions from other_record
def copy_fields_from(self, other_record): """ Tries to copy the values of the current dimensions from other_record """ for dim_name in self.dimensions_names: try: self[dim_name] = other_record[dim_name] except ValueError: pass
Appends zeros to the points stored if the value we are trying to fit is bigger
def _append_zeros_if_too_small(self, value): """ Appends zeros to the points stored if the value we are trying to fit is bigger """ size_diff = len(value) - len(self.array) if size_diff: self.array = np.append( self.array, np.zeros(size_diff, dtype=self.array.dtype) )
Returns all the dimensions names including the names of sub_fields and their corresponding packed fields
def all_dimensions_names(self): """ Returns all the dimensions names, including the names of sub_fields and their corresponding packed fields """ return frozenset(self.array.dtype.names + tuple(self.sub_fields_dict.keys()))
Creates a new point record with all dimensions initialized to zero
def zeros(cls, point_format, point_count): """ Creates a new point record with all dimensions initialized to zero Parameters ---------- point_format_id: int The point format id the point record should have point_count : int The number of point the point record should have Returns ------- PackedPointRecord """ data = np.zeros(point_count, point_format.dtype) return cls(data, point_format)
Construct the point record by reading the points from the stream
def from_stream(cls, stream, point_format, count): """ Construct the point record by reading the points from the stream """ points_dtype = point_format.dtype point_data_buffer = bytearray(stream.read(count * points_dtype.itemsize)) try: data = np.frombuffer(point_data_buffer, dtype=points_dtype, count=count) except ValueError: expected_bytes_len = count * points_dtype.itemsize if len(point_data_buffer) % points_dtype.itemsize != 0: missing_bytes_len = expected_bytes_len - len(point_data_buffer) raise_not_enough_bytes_error( expected_bytes_len, missing_bytes_len, len(point_data_buffer), points_dtype, ) else: actual_count = len(point_data_buffer) // points_dtype.itemsize logger.critical( "Expected {} points, there are {} ({} missing)".format( count, actual_count, count - actual_count ) ) data = np.frombuffer( point_data_buffer, dtype=points_dtype, count=actual_count ) return cls(data, point_format)
Construct the point record by reading and decompressing the points data from the input buffer
def from_compressed_buffer(cls, compressed_buffer, point_format, count, laszip_vlr): """ Construct the point record by reading and decompressing the points data from the input buffer """ point_dtype = point_format.dtype uncompressed = decompress_buffer( compressed_buffer, point_dtype, count, laszip_vlr ) return cls(uncompressed, point_format)
Returns the scaled x positions of the points as doubles
def x(self): """ Returns the scaled x positions of the points as doubles """ return scale_dimension(self.X, self.header.x_scale, self.header.x_offset)
Returns the scaled y positions of the points as doubles
def y(self): """ Returns the scaled y positions of the points as doubles """ return scale_dimension(self.Y, self.header.y_scale, self.header.y_offset)
Returns the scaled z positions of the points as doubles
def z(self): """ Returns the scaled z positions of the points as doubles """ return scale_dimension(self.Z, self.header.z_scale, self.header.z_offset)
Setter for the points property Takes care of changing the point_format of the file ( as long as the point format of the new points it compatible with the file version )
def points(self, value): """ Setter for the points property, Takes care of changing the point_format of the file (as long as the point format of the new points it compatible with the file version) Parameters ---------- value: numpy.array of the new points """ if value.dtype != self.points.dtype: raise errors.IncompatibleDataFormat('Cannot set points with a different point format, convert first') new_point_record = record.PackedPointRecord(value, self.points_data.point_format) dims.raise_if_version_not_compatible_with_fmt( new_point_record.point_format.id, self.header.version ) self.points_data = new_point_record self.update_header()
Adds a new extra dimension to the point record
def add_extra_dim(self, name, type, description=""): """ Adds a new extra dimension to the point record Parameters ---------- name: str the name of the dimension type: str type of the dimension (eg 'uint8') description: str, optional a small description of the dimension """ name = name.replace(" ", "_") type_id = extradims.get_id_for_extra_dim_type(type) extra_byte = ExtraBytesStruct( data_type=type_id, name=name.encode(), description=description.encode() ) try: extra_bytes_vlr = self.vlrs.get("ExtraBytesVlr")[0] except IndexError: extra_bytes_vlr = ExtraBytesVlr() self.vlrs.append(extra_bytes_vlr) finally: extra_bytes_vlr.extra_bytes_structs.append(extra_byte) self.points_data.add_extra_dims([(name, type)])
writes the data to a stream
def write_to(self, out_stream, do_compress=False): """ writes the data to a stream Parameters ---------- out_stream: file object the destination stream, implementing the write method do_compress: bool, optional, default False Flag to indicate if you want the date to be compressed """ self.update_header() if ( self.vlrs.get("ExtraBytesVlr") and not self.points_data.extra_dimensions_names ): logger.error( "Las contains an ExtraBytesVlr, but no extra bytes were found in the point_record, " "removing the vlr" ) self.vlrs.extract("ExtraBytesVlr") if do_compress: laz_vrl = create_laz_vlr(self.points_data) self.vlrs.append(known.LasZipVlr(laz_vrl.data())) raw_vlrs = vlrlist.RawVLRList.from_list(self.vlrs) self.header.offset_to_point_data = ( self.header.size + raw_vlrs.total_size_in_bytes() ) self.header.point_format_id = uncompressed_id_to_compressed( self.header.point_format_id ) self.header.number_of_vlr = len(raw_vlrs) points_bytes = compress_buffer( np.frombuffer(self.points_data.array, np.uint8), laz_vrl.schema, self.header.offset_to_point_data, ).tobytes() else: raw_vlrs = vlrlist.RawVLRList.from_list(self.vlrs) self.header.number_of_vlr = len(raw_vlrs) self.header.offset_to_point_data = ( self.header.size + raw_vlrs.total_size_in_bytes() ) points_bytes = self.points_data.raw_bytes() self.header.write_to(out_stream) self._raise_if_not_expected_pos(out_stream, self.header.size) raw_vlrs.write_to(out_stream) self._raise_if_not_expected_pos(out_stream, self.header.offset_to_point_data) out_stream.write(points_bytes)
Writes the las data into a file
def write_to_file(self, filename, do_compress=None): """ Writes the las data into a file Parameters ---------- filename : str The file where the data should be written. do_compress: bool, optional, default None if None the extension of the filename will be used to determine if the data should be compressed otherwise the do_compress flag indicate if the data should be compressed """ is_ext_laz = filename.split(".")[-1] == "laz" if is_ext_laz and do_compress is None: do_compress = True with open(filename, mode="wb") as out: self.write_to(out, do_compress=do_compress)
Writes to a stream or file
def write(self, destination, do_compress=None): """ Writes to a stream or file When destination is a string, it will be interpreted as the path were the file should be written to, also if do_compress is None, the compression will be guessed from the file extension: - .laz -> compressed - .las -> uncompressed .. note:: This means that you could do something like: # Create .laz but not compressed las.write('out.laz', do_compress=False) # Create .las but compressed las.write('out.las', do_compress=True) While it should not confuse Las/Laz readers, it will confuse humans so avoid doing it Parameters ---------- destination: str or file object filename or stream to write to do_compress: bool, optional Flags to indicate if you want to compress the data """ if isinstance(destination, str): self.write_to_file(destination) else: if do_compress is None: do_compress = False self.write_to(destination, do_compress=do_compress)
Builds the dict mapping point format id to numpy. dtype In the dtypes bit fields are still packed and need to be unpacked each time you want to access them
def _build_point_formats_dtypes(point_format_dimensions, dimensions_dict): """ Builds the dict mapping point format id to numpy.dtype In the dtypes, bit fields are still packed, and need to be unpacked each time you want to access them """ return { fmt_id: _point_format_to_dtype(point_fmt, dimensions_dict) for fmt_id, point_fmt in point_format_dimensions.items() }
Builds the dict mapping point format id to numpy. dtype In the dtypes bit fields are unpacked and can be accessed directly
def _build_unpacked_point_formats_dtypes( point_formats_dimensions, composed_fields_dict, dimensions_dict ): """ Builds the dict mapping point format id to numpy.dtype In the dtypes, bit fields are unpacked and can be accessed directly """ unpacked_dtypes = {} for fmt_id, dim_names in point_formats_dimensions.items(): composed_dims, dtype = composed_fields_dict[fmt_id], [] for dim_name in dim_names: if dim_name in composed_dims: dtype.extend((f.name, f.type) for f in composed_dims[dim_name]) else: dtype.append(dimensions_dict[dim_name]) unpacked_dtypes[fmt_id] = np.dtype(dtype) return unpacked_dtypes
Tries to find a matching point format id for the input numpy dtype To match the input dtype has to be 100% equal to a point format dtype so all names & dimensions types must match
def np_dtype_to_point_format(dtype, unpacked=False): """ Tries to find a matching point format id for the input numpy dtype To match, the input dtype has to be 100% equal to a point format dtype so all names & dimensions types must match Parameters: ---------- dtype : numpy.dtype The input dtype unpacked : bool, optional [description] (the default is False, which [default_description]) Raises ------ errors.IncompatibleDataFormat If No compatible point format was found Returns ------- int The compatible point format found """ all_dtypes = ( ALL_POINT_FORMATS_DTYPE if not unpacked else UNPACKED_POINT_FORMATS_DTYPES ) for format_id, fmt_dtype in all_dtypes.items(): if fmt_dtype == dtype: return format_id else: raise errors.IncompatibleDataFormat( "Data type of array is not compatible with any point format (array dtype: {})".format( dtype ) )
Returns the minimum file version that supports the given point_format_id
def min_file_version_for_point_format(point_format_id): """ Returns the minimum file version that supports the given point_format_id """ for version, point_formats in sorted(VERSION_TO_POINT_FMT.items()): if point_format_id in point_formats: return version else: raise errors.PointFormatNotSupported(point_format_id)
Returns true if the file version support the point_format_id
def is_point_fmt_compatible_with_version(point_format_id, file_version): """ Returns true if the file version support the point_format_id """ try: return point_format_id in VERSION_TO_POINT_FMT[str(file_version)] except KeyError: raise errors.FileVersionNotSupported(file_version)
Function to get vlrs by user_id and/ or record_ids. Always returns a list even if only one vlr matches the user_id and record_id
def get_by_id(self, user_id="", record_ids=(None,)): """ Function to get vlrs by user_id and/or record_ids. Always returns a list even if only one vlr matches the user_id and record_id >>> import pylas >>> from pylas.vlrs.known import ExtraBytesVlr, WktCoordinateSystemVlr >>> las = pylas.read("pylastests/extrabytes.las") >>> las.vlrs [<ExtraBytesVlr(extra bytes structs: 5)>] >>> las.vlrs.get(WktCoordinateSystemVlr.official_user_id()) [] >>> las.vlrs.get(WktCoordinateSystemVlr.official_user_id())[0] Traceback (most recent call last): IndexError: list index out of range >>> las.vlrs.get_by_id(ExtraBytesVlr.official_user_id()) [<ExtraBytesVlr(extra bytes structs: 5)>] >>> las.vlrs.get_by_id(ExtraBytesVlr.official_user_id())[0] <ExtraBytesVlr(extra bytes structs: 5)> Parameters ---------- user_id: str, optional the user id record_ids: iterable of int, optional THe record ids of the vlr(s) you wish to get Returns ------- :py:class:`list` a list of vlrs matching the user_id and records_ids """ if user_id != "" and record_ids != (None,): return [ vlr for vlr in self.vlrs if vlr.user_id == user_id and vlr.record_id in record_ids ] else: return [ vlr for vlr in self.vlrs if vlr.user_id == user_id or vlr.record_id in record_ids ]
Returns the list of vlrs of the requested type Always returns a list even if there is only one VLR of type vlr_type.
def get(self, vlr_type): """ Returns the list of vlrs of the requested type Always returns a list even if there is only one VLR of type vlr_type. >>> import pylas >>> las = pylas.read("pylastests/extrabytes.las") >>> las.vlrs [<ExtraBytesVlr(extra bytes structs: 5)>] >>> las.vlrs.get("WktCoordinateSystemVlr") [] >>> las.vlrs.get("WktCoordinateSystemVlr")[0] Traceback (most recent call last): IndexError: list index out of range >>> las.vlrs.get('ExtraBytesVlr') [<ExtraBytesVlr(extra bytes structs: 5)>] >>> las.vlrs.get('ExtraBytesVlr')[0] <ExtraBytesVlr(extra bytes structs: 5)> Parameters ---------- vlr_type: str the class name of the vlr Returns ------- :py:class:`list` a List of vlrs matching the user_id and records_ids """ return [v for v in self.vlrs if v.__class__.__name__ == vlr_type]
Returns the list of vlrs of the requested type The difference with get is that the returned vlrs will be removed from the list
def extract(self, vlr_type): """ Returns the list of vlrs of the requested type The difference with get is that the returned vlrs will be removed from the list Parameters ---------- vlr_type: str the class name of the vlr Returns ------- list a List of vlrs matching the user_id and records_ids """ kept_vlrs, extracted_vlrs = [], [] for vlr in self.vlrs: if vlr.__class__.__name__ == vlr_type: extracted_vlrs.append(vlr) else: kept_vlrs.append(vlr) self.vlrs = kept_vlrs return extracted_vlrs
Reads vlrs and parse them if possible from the stream
def read_from(cls, data_stream, num_to_read): """ Reads vlrs and parse them if possible from the stream Parameters ---------- data_stream : io.BytesIO stream to read from num_to_read : int number of vlrs to be read Returns ------- pylas.vlrs.vlrlist.VLRList List of vlrs """ vlrlist = cls() for _ in range(num_to_read): raw = RawVLR.read_from(data_stream) try: vlrlist.append(vlr_factory(raw)) except UnicodeDecodeError: logger.error("Failed to decode VLR: {}".format(raw)) return vlrlist
Returns true if all the files have the same points format id
def files_have_same_point_format_id(las_files): """ Returns true if all the files have the same points format id """ point_format_found = {las.header.point_format_id for las in las_files} return len(point_format_found) == 1
Returns true if all the files have the same numpy datatype
def files_have_same_dtype(las_files): """ Returns true if all the files have the same numpy datatype """ dtypes = {las.points.dtype for las in las_files} return len(dtypes) == 1
Reads the 4 first bytes of the stream to check that is LASF
def _raise_if_wrong_file_signature(stream): """ Reads the 4 first bytes of the stream to check that is LASF""" file_sig = stream.read(len(headers.LAS_FILE_SIGNATURE)) if file_sig != headers.LAS_FILE_SIGNATURE: raise errors.PylasError( "File Signature ({}) is not {}".format(file_sig, headers.LAS_FILE_SIGNATURE) )
Reads the head of the las file and returns it
def read_header(self): """ Reads the head of the las file and returns it """ self.stream.seek(self.start_pos) return headers.HeaderFactory().read_from_stream(self.stream)
Reads and return the vlrs of the file
def read_vlrs(self): """ Reads and return the vlrs of the file """ self.stream.seek(self.start_pos + self.header.size) return VLRList.read_from(self.stream, num_to_read=self.header.number_of_vlr)
Reads the whole las data ( header vlrs points etc ) and returns a LasData object
def read(self): """ Reads the whole las data (header, vlrs ,points, etc) and returns a LasData object """ vlrs = self.read_vlrs() self._warn_if_not_at_expected_pos( self.header.offset_to_point_data, "end of vlrs", "start of points" ) self.stream.seek(self.start_pos + self.header.offset_to_point_data) try: points = self._read_points(vlrs) except (RuntimeError, errors.LazPerfNotFound) as e: logger.error("LazPerf failed to decompress ({}), trying laszip.".format(e)) self.stream.seek(self.start_pos) self.__init__(io.BytesIO(laszip_decompress(self.stream))) return self.read() if points.point_format.has_waveform_packet: self.stream.seek( self.start_pos + self.header.start_of_waveform_data_packet_record ) if self.header.global_encoding.are_waveform_flag_equal(): raise errors.PylasError( "Incoherent values for internal and external waveform flags, both are {})".format( "set" if self.header.global_encoding.waveform_internal else "unset" ) ) if self.header.global_encoding.waveform_internal: # TODO: Find out what to do with these _, _ = self._read_internal_waveform_packet() elif self.header.global_encoding.waveform_external: logger.info( "Waveform data is in an external file, you'll have to load it yourself" ) if self.header.version >= "1.4": evlrs = self.read_evlrs() return las14.LasData( header=self.header, vlrs=vlrs, points=points, evlrs=evlrs ) return las12.LasData(header=self.header, vlrs=vlrs, points=points)
private function to handle reading of the points record parts of the las file.
def _read_points(self, vlrs): """ private function to handle reading of the points record parts of the las file. the header is needed for the point format and number of points the vlrs are need to get the potential laszip vlr as well as the extra bytes vlr """ try: extra_dims = vlrs.get("ExtraBytesVlr")[0].type_of_extra_dims() except IndexError: extra_dims = None point_format = PointFormat(self.header.point_format_id, extra_dims=extra_dims) if self.header.are_points_compressed: laszip_vlr = vlrs.pop(vlrs.index("LasZipVlr")) points = self._read_compressed_points_data(laszip_vlr, point_format) else: points = record.PackedPointRecord.from_stream( self.stream, point_format, self.header.point_count ) return points
reads the compressed point record
def _read_compressed_points_data(self, laszip_vlr, point_format): """ reads the compressed point record """ offset_to_chunk_table = struct.unpack("<q", self.stream.read(8))[0] size_of_point_data = offset_to_chunk_table - self.stream.tell() if offset_to_chunk_table <= 0: logger.warning( "Strange offset to chunk table: {}, ignoring it..".format( offset_to_chunk_table ) ) size_of_point_data = -1 # Read everything points = record.PackedPointRecord.from_compressed_buffer( self.stream.read(size_of_point_data), point_format, self.header.point_count, laszip_vlr, ) return points
reads and returns the waveform vlr header waveform record
def _read_internal_waveform_packet(self): """ reads and returns the waveform vlr header, waveform record """ # This is strange, the spec says, waveform data packet is in a EVLR # but in the 2 samples I have its a VLR # but also the 2 samples have a wrong user_id (LAS_Spec instead of LASF_Spec) b = bytearray(self.stream.read(rawvlr.VLR_HEADER_SIZE)) waveform_header = rawvlr.RawVLRHeader.from_buffer(b) waveform_record = self.stream.read() logger.debug( "Read: {} MBytes of waveform_record".format(len(waveform_record) / 10 ** 6) ) return waveform_header, waveform_record
Reads the EVLRs of the file will fail if the file version does not support evlrs
def read_evlrs(self): """ Reads the EVLRs of the file, will fail if the file version does not support evlrs """ self.stream.seek(self.start_pos + self.header.start_of_first_evlr) return evlrs.EVLRList.read_from(self.stream, self.header.number_of_evlr)
Helper function to warn about unknown bytes found in the file
def _warn_if_not_at_expected_pos(self, expected_pos, end_of, start_of): """ Helper function to warn about unknown bytes found in the file""" diff = expected_pos - self.stream.tell() if diff != 0: logger.warning( "There are {} bytes between {} and {}".format(diff, end_of, start_of) )
Given a raw_vlr tries to find its corresponding KnownVLR class that can parse its data. If no KnownVLR implementation is found returns a VLR ( record_data will still be bytes )
def vlr_factory(raw_vlr): """ Given a raw_vlr tries to find its corresponding KnownVLR class that can parse its data. If no KnownVLR implementation is found, returns a VLR (record_data will still be bytes) """ user_id = raw_vlr.header.user_id.rstrip(NULL_BYTE).decode() known_vlrs = BaseKnownVLR.__subclasses__() for known_vlr in known_vlrs: if ( known_vlr.official_user_id() == user_id and raw_vlr.header.record_id in known_vlr.official_record_ids() ): return known_vlr.from_raw(raw_vlr) else: return VLR.from_raw(raw_vlr)
Opens and reads the header of the las content in the source
def open_las(source, closefd=True): """ Opens and reads the header of the las content in the source >>> with open_las('pylastests/simple.las') as f: ... print(f.header.point_format_id) 3 >>> f = open('pylastests/simple.las', mode='rb') >>> with open_las(f, closefd=False) as flas: ... print(flas.header) <LasHeader(1.2)> >>> f.closed False >>> f = open('pylastests/simple.las', mode='rb') >>> with open_las(f) as flas: ... las = flas.read() >>> f.closed True Parameters ---------- source : str or io.BytesIO if source is a str it must be a filename a stream if a file object with the methods read, seek, tell closefd: bool Whether the stream/file object shall be closed, this only work when using open_las in a with statement. An exception is raised if closefd is specified and the source is a filename Returns ------- pylas.lasreader.LasReader """ if isinstance(source, str): stream = open(source, mode="rb") if not closefd: raise ValueError("Cannot use closefd with filename") elif isinstance(source, bytes): stream = io.BytesIO(source) else: stream = source return LasReader(stream, closefd=closefd)
Entry point for reading las data in pylas
def read_las(source, closefd=True): """ Entry point for reading las data in pylas Reads the whole file into memory. >>> las = read_las("pylastests/simple.las") >>> las.classification array([1, 1, 1, ..., 1, 1, 1], dtype=uint8) Parameters ---------- source : str or io.BytesIO The source to read data from closefd: bool if True and the source is a stream, the function will close it after it is done reading Returns ------- pylas.lasdatas.base.LasBase The object you can interact with to get access to the LAS points & VLRs """ with open_las(source, closefd=closefd) as reader: return reader.read()
Creates a File from an existing header allocating the array of point according to the provided header. The input header is copied.
def create_from_header(header): """ Creates a File from an existing header, allocating the array of point according to the provided header. The input header is copied. Parameters ---------- header : existing header to be used to create the file Returns ------- pylas.lasdatas.base.LasBase """ header = copy.copy(header) header.point_count = 0 points = record.PackedPointRecord.empty(PointFormat(header.point_format_id)) if header.version >= "1.4": return las14.LasData(header=header, points=points) return las12.LasData(header=header, points=points)
Function to create a new empty las data object
def create_las(*, point_format_id=0, file_version=None): """ Function to create a new empty las data object .. note:: If you provide both point_format and file_version an exception will be raised if they are not compatible >>> las = create_las(point_format_id=6,file_version="1.2") Traceback (most recent call last): ... pylas.errors.PylasError: Point format 6 is not compatible with file version 1.2 If you provide only the point_format the file_version will automatically selected for you. >>> las = create_las(point_format_id=0) >>> las.header.version == '1.2' True >>> las = create_las(point_format_id=6) >>> las.header.version == '1.4' True Parameters ---------- point_format_id: int The point format you want the created file to have file_version: str, optional, default=None The las version you want the created las to have Returns ------- pylas.lasdatas.base.LasBase A new las data object """ if file_version is not None: dims.raise_if_version_not_compatible_with_fmt(point_format_id, file_version) else: file_version = dims.min_file_version_for_point_format(point_format_id) header = headers.HeaderFactory.new(file_version) header.point_format_id = point_format_id if file_version >= "1.4": return las14.LasData(header=header) return las12.LasData(header=header)
Converts a Las from one point format to another Automatically upgrades the file version if source file version is not compatible with the new point_format_id
def convert(source_las, *, point_format_id=None, file_version=None): """ Converts a Las from one point format to another Automatically upgrades the file version if source file version is not compatible with the new point_format_id convert to point format 0 >>> las = read_las('pylastests/simple.las') >>> las.header.version '1.2' >>> las = convert(las, point_format_id=0) >>> las.header.point_format_id 0 >>> las.header.version '1.2' convert to point format 6, which need version >= 1.4 then convert back to point format 0, version is not downgraded >>> las = read_las('pylastests/simple.las') >>> las.header.version '1.2' >>> las = convert(las, point_format_id=6) >>> las.header.point_format_id 6 >>> las.header.version '1.4' >>> las = convert(las, point_format_id=0) >>> las.header.version '1.4' an exception is raised if the requested point format is not compatible with the file version >>> las = read_las('pylastests/simple.las') >>> convert(las, point_format_id=6, file_version='1.2') Traceback (most recent call last): ... pylas.errors.PylasError: Point format 6 is not compatible with file version 1.2 Parameters ---------- source_las : pylas.lasdatas.base.LasBase The source data to be converted point_format_id : int, optional The new point format id (the default is None, which won't change the source format id) file_version : str, optional, The new file version. None by default which means that the file_version may be upgraded for compatibility with the new point_format. The file version will not be downgraded. Returns ------- pylas.lasdatas.base.LasBase """ if point_format_id is None: point_format_id = source_las.points_data.point_format.id if file_version is None: file_version = max( source_las.header.version, dims.min_file_version_for_point_format(point_format_id), ) else: file_version = str(file_version) dims.raise_if_version_not_compatible_with_fmt(point_format_id, file_version) header = headers.HeaderFactory.convert_header(source_las.header, file_version) header.point_format_id = point_format_id point_format = PointFormat( point_format_id, source_las.points_data.point_format.extra_dims ) points = record.PackedPointRecord.from_point_record( source_las.points_data, point_format ) try: evlrs = source_las.evlrs except ValueError: evlrs = [] if file_version >= "1.4": las = las14.LasData( header=header, vlrs=source_las.vlrs, points=points, evlrs=evlrs ) else: if evlrs: logger.warning( "The source contained {} EVLRs," " they will be lost as version {} doest not support them".format( len(evlrs), file_version ) ) las = las12.LasData(header=header, vlrs=source_las.vlrs, points=points) return las
Merges multiple las files into one
def merge_las(*las_files): """ Merges multiple las files into one merged = merge_las(las_1, las_2) merged = merge_las([las_1, las_2, las_3]) Parameters ---------- las_files: Iterable of LasData or LasData Returns ------- pylas.lasdatas.base.LasBase The result of the merging """ if len(las_files) == 1: las_files = las_files[0] if not las_files: raise ValueError("No files to merge") if not utils.files_have_same_dtype(las_files): raise ValueError("All files must have the same point format") header = las_files[0].header num_pts_merged = sum(len(las.points) for las in las_files) # scaled x, y, z have to be set manually # to be sure to have a good offset in the header merged = create_from_header(header) # TODO extra dimensions should be manged better here for dim_name, dim_type in las_files[0].points_data.point_format.extra_dims: merged.add_extra_dim(dim_name, dim_type) merged.points = np.zeros(num_pts_merged, merged.points.dtype) merged_x = np.zeros(num_pts_merged, np.float64) merged_y = np.zeros(num_pts_merged, np.float64) merged_z = np.zeros(num_pts_merged, np.float64) offset = 0 for i, las in enumerate(las_files, start=1): slc = slice(offset, offset + len(las.points)) merged.points[slc] = las.points merged_x[slc] = las.x merged_y[slc] = las.y merged_z[slc] = las.z merged['point_source_id'][slc] = i offset += len(las.points) merged.x = merged_x merged.y = merged_y merged.z = merged_z return merged
writes the given las into memory using BytesIO and reads it again returning the newly read file.
def write_then_read_again(las, do_compress=False): """ writes the given las into memory using BytesIO and reads it again, returning the newly read file. Mostly used for testing purposes, without having to write to disk """ out = io.BytesIO() las.write(out, do_compress=do_compress) out.seek(0) return read_las(out)
Returns the creation date stored in the las file
def date(self): """ Returns the creation date stored in the las file Returns ------- datetime.date """ try: return datetime.date(self.creation_year, 1, 1) + datetime.timedelta( self.creation_day_of_year - 1 ) except ValueError: return None
Returns the date of file creation as a python date object
def date(self, date): """ Returns the date of file creation as a python date object """ self.creation_year = date.year self.creation_day_of_year = date.timetuple().tm_yday
Returns de minimum values of x y z as a numpy array
def mins(self): """ Returns de minimum values of x, y, z as a numpy array """ return np.array([self.x_min, self.y_min, self.z_min])
Sets de minimum values of x y z as a numpy array
def mins(self, value): """ Sets de minimum values of x, y, z as a numpy array """ self.x_min, self.y_min, self.z_min = value
Returns de maximum values of x y z as a numpy array
def maxs(self): """ Returns de maximum values of x, y, z as a numpy array """ return np.array([self.x_max, self.y_max, self.z_max])
Sets de maximum values of x y z as a numpy array
def maxs(self, value): """ Sets de maximum values of x, y, z as a numpy array """ self.x_max, self.y_max, self.z_max = value
Returns the scaling values of x y z as a numpy array
def scales(self): """ Returns the scaling values of x, y, z as a numpy array """ return np.array([self.x_scale, self.y_scale, self.z_scale])
Returns the offsets values of x y z as a numpy array
def offsets(self): """ Returns the offsets values of x, y, z as a numpy array """ return np.array([self.x_offset, self.y_offset, self.z_offset])
>>> HeaderFactory. header_class_for_version ( 2. 0 ) Traceback ( most recent call last ):... pylas. errors. FileVersionNotSupported: 2. 0
def header_class_for_version(cls, version): """ >>> HeaderFactory.header_class_for_version(2.0) Traceback (most recent call last): ... pylas.errors.FileVersionNotSupported: 2.0 >>> HeaderFactory.header_class_for_version(1.2) <class 'pylas.headers.rawheader.RawHeader1_2'> >>> header_class = HeaderFactory.header_class_for_version(1.4) >>> header_class() <LasHeader(1.4)> """ try: return cls._version_to_header[str(version)] except KeyError: raise errors.FileVersionNotSupported(version)
seeks to the position of the las version header fields in the stream and returns it as a str
def peek_file_version(cls, stream): """ seeks to the position of the las version header fields in the stream and returns it as a str Parameters ---------- stream io.BytesIO Returns ------- str file version read from the stream """ old_pos = stream.tell() stream.seek(cls._offset_to_major_version) major = int.from_bytes(stream.read(ctypes.sizeof(ctypes.c_uint8)), "little") minor = int.from_bytes(stream.read(ctypes.sizeof(ctypes.c_uint8)), "little") stream.seek(old_pos) return "{}.{}".format(major, minor)
Converts a header to a another version
def convert_header(cls, old_header, new_version): """ Converts a header to a another version Parameters ---------- old_header: the old header instance new_version: float or str Returns ------- The converted header >>> old_header = HeaderFactory.new(1.2) >>> HeaderFactory.convert_header(old_header, 1.4) <LasHeader(1.4)> >>> old_header = HeaderFactory.new('1.4') >>> HeaderFactory.convert_header(old_header, '1.2') <LasHeader(1.2)> """ new_header_class = cls.header_class_for_version(new_version) b = bytearray(old_header) b += b"\x00" * (ctypes.sizeof(new_header_class) - len(b)) new_header = new_header_class.from_buffer(b) new_header.version = str(new_version) return new_header
Unpack sub field using its mask
def unpack(source_array, mask, dtype=np.uint8): """ Unpack sub field using its mask Parameters: ---------- source_array : numpy.ndarray The source array mask : mask (ie: 0b00001111) Mask of the sub field to be extracted from the source array Returns ------- numpy.ndarray The sub field array """ lsb = least_significant_bit(mask) return ((source_array & mask) >> lsb).astype(dtype)