INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Convert a multi values header to a case - insensitive dict:
def getdict(self, key): """Convert a multi values header to a case-insensitive dict: .. code-block:: python >>> resp = Message({ ... 'Response': 'Success', ... 'ChanVariable': [ ... 'FROM_DID=', 'SIPURI=sip:42@10.10.10.1:4242'], ... }) >>> print(resp.chanvariable) ['FROM_DID=', 'SIPURI=sip:42@10.10.10.1:4242'] >>> value = resp.getdict('chanvariable') >>> print(value['sipuri']) sip:42@10.10.10.1:4242 """ values = self.get(key, None) if not isinstance(values, list): raise TypeError("{0} must be a list. got {1}".format(key, values)) result = utils.CaseInsensitiveDict() for item in values: k, v = item.split('=', 1) result[k] = v return result
Run a setup script in a somewhat controlled environment and return the Distribution instance that drives things. This is useful if you need to find out the distribution meta - data ( passed as keyword args from script to setup () or the contents of the config files or command - line.
def run_setup(script_name, script_args=None, stop_after="run"): """Run a setup script in a somewhat controlled environment, and return the Distribution instance that drives things. This is useful if you need to find out the distribution meta-data (passed as keyword args from 'script' to 'setup()', or the contents of the config files or command-line. 'script_name' is a file that will be run with 'execfile()'; 'sys.argv[0]' will be replaced with 'script' for the duration of the call. 'script_args' is a list of strings; if supplied, 'sys.argv[1:]' will be replaced by 'script_args' for the duration of the call. 'stop_after' tells 'setup()' when to stop processing; possible values: init stop after the Distribution instance has been created and populated with the keyword arguments to 'setup()' config stop after config files have been parsed (and their data stored in the Distribution instance) commandline stop after the command-line ('sys.argv[1:]' or 'script_args') have been parsed (and the data stored in the Distribution) run [default] stop after all commands have been run (the same as if 'setup()' had been called in the usual way Returns the Distribution instance, which provides all information used to drive the Distutils. """ if stop_after not in ('init', 'config', 'commandline', 'run'): raise ValueError("invalid value for 'stop_after': %r" % stop_after) core._setup_stop_after = stop_after save_argv = sys.argv glocals = copy(globals()) glocals['__file__'] = script_name glocals['__name__'] = "__main__" try: try: sys.argv[0] = script_name if script_args is not None: sys.argv[1:] = script_args f = open(script_name) try: exec(f.read(), glocals, glocals) finally: f.close() finally: sys.argv = save_argv core._setup_stop_after = None except Exception: logging.warn("Exception when running setup.", exc_info=True) if core._setup_distribution is None: raise RuntimeError( "'distutils.core.setup()' was never called -- " "perhaps '%s' is not a Distutils setup script?" % script_name) # I wonder if the setup script's namespace -- g and l -- would be of # any interest to callers? return core._setup_distribution
Returns data from a package directory. path should be an absolute path.
def get_data(path): """ Returns data from a package directory. 'path' should be an absolute path. """ # Run the imported setup to get the metadata. with FakeContext(path): with SetupMonkey() as sm: try: distro = run_setup('setup.py', stop_after='config') metadata = {'_setuptools': sm.used_setuptools} for k, v in distro.metadata.__dict__.items(): if k[0] == '_' or not v: continue if all(not x for x in v): continue metadata[k] = v if sm.used_setuptools: for extras in ['cmdclass', 'zip_safe', 'test_suite']: v = getattr(distro, extras, None) if v is not None and v not in ([], {}): metadata[extras] = v except ImportError as e: # Either there is no setup py, or it's broken. logging.exception(e) metadata = {} return metadata
Get primary key properties for a SQLAlchemy model.
def get_primary_keys(model): """Get primary key properties for a SQLAlchemy model. :param model: SQLAlchemy model class """ mapper = model.__mapper__ return [mapper.get_property_by_column(column) for column in mapper.primary_key]
Deserialize a serialized value to a model instance.
def _deserialize(self, value, *args, **kwargs): """Deserialize a serialized value to a model instance. If the parent schema is transient, create a new (transient) instance. Otherwise, attempt to find an existing instance in the database. :param value: The value to deserialize. """ if not isinstance(value, dict): if len(self.related_keys) != 1: self.fail( "invalid", value=value, keys=[prop.key for prop in self.related_keys], ) value = {self.related_keys[0].key: value} if self.transient: return self.related_model(**value) try: result = self._get_existing_instance( self.session.query(self.related_model), value ) except NoResultFound: # The related-object DNE in the DB, but we still want to deserialize it # ...perhaps we want to add it to the DB later return self.related_model(**value) return result
Retrieve the related object from an existing instance in the DB.
def _get_existing_instance(self, query, value): """Retrieve the related object from an existing instance in the DB. :param query: A SQLAlchemy `Query <sqlalchemy.orm.query.Query>` object. :param value: The serialized value to mapto an existing instance. :raises NoResultFound: if there is no matching record. """ if self.columns: result = query.filter_by( **{prop.key: value.get(prop.key) for prop in self.related_keys} ).one() else: # Use a faster path if the related key is the primary key. result = query.get([value.get(prop.key) for prop in self.related_keys]) if result is None: raise NoResultFound return result
Add keyword arguments to kwargs ( in - place ) based on the passed in Column <sqlalchemy. schema. Column >.
def _add_column_kwargs(self, kwargs, column): """Add keyword arguments to kwargs (in-place) based on the passed in `Column <sqlalchemy.schema.Column>`. """ if column.nullable: kwargs["allow_none"] = True kwargs["required"] = not column.nullable and not _has_default(column) if hasattr(column.type, "enums"): kwargs["validate"].append(validate.OneOf(choices=column.type.enums)) # Add a length validator if a max length is set on the column # Skip UUID columns # (see https://github.com/marshmallow-code/marshmallow-sqlalchemy/issues/54) if hasattr(column.type, "length"): try: python_type = column.type.python_type except (AttributeError, NotImplementedError): python_type = None if not python_type or not issubclass(python_type, uuid.UUID): kwargs["validate"].append(validate.Length(max=column.type.length)) if hasattr(column.type, "scale"): kwargs["places"] = getattr(column.type, "scale", None)
Add keyword arguments to kwargs ( in - place ) based on the passed in relationship Property.
def _add_relationship_kwargs(self, kwargs, prop): """Add keyword arguments to kwargs (in-place) based on the passed in relationship `Property`. """ nullable = True for pair in prop.local_remote_pairs: if not pair[0].nullable: if prop.uselist is True: nullable = False break kwargs.update({"allow_none": nullable, "required": not nullable})
Updates declared fields with fields converted from the SQLAlchemy model passed as the model class Meta option.
def get_declared_fields(mcs, klass, cls_fields, inherited_fields, dict_cls): """Updates declared fields with fields converted from the SQLAlchemy model passed as the `model` class Meta option. """ opts = klass.opts Converter = opts.model_converter converter = Converter(schema_cls=klass) declared_fields = super(SchemaMeta, mcs).get_declared_fields( klass, cls_fields, inherited_fields, dict_cls ) fields = mcs.get_fields(converter, opts, declared_fields, dict_cls) fields.update(declared_fields) return fields
Retrieve an existing record by primary key ( s ). If the schema instance is transient return None.
def get_instance(self, data): """Retrieve an existing record by primary key(s). If the schema instance is transient, return None. :param data: Serialized data to inform lookup. """ if self.transient: return None props = get_primary_keys(self.opts.model) filters = {prop.key: data.get(prop.key) for prop in props} if None not in filters.values(): return self.session.query(self.opts.model).filter_by(**filters).first() return None
Deserialize data to an instance of the model. Update an existing row if specified in self. instance or loaded by primary key ( s ) in the data ; else create a new row.
def make_instance(self, data): """Deserialize data to an instance of the model. Update an existing row if specified in `self.instance` or loaded by primary key(s) in the data; else create a new row. :param data: Data to deserialize. """ instance = self.instance or self.get_instance(data) if instance is not None: for key, value in iteritems(data): setattr(instance, key, value) return instance kwargs, association_attrs = self._split_model_kwargs_association(data) instance = self.opts.model(**kwargs) for attr, value in iteritems(association_attrs): setattr(instance, attr, value) return instance
Deserialize data to internal representation.
def load(self, data, session=None, instance=None, transient=False, *args, **kwargs): """Deserialize data to internal representation. :param session: Optional SQLAlchemy session. :param instance: Optional existing instance to modify. :param transient: Optional switch to allow transient instantiation. """ self._session = session or self._session self._transient = transient or self._transient if not (self.transient or self.session): raise ValueError("Deserialization requires a session") self.instance = instance or self.instance try: return super(ModelSchema, self).load(data, *args, **kwargs) finally: self.instance = None
Split serialized attrs to ensure association proxies are passed separately.
def _split_model_kwargs_association(self, data): """Split serialized attrs to ensure association proxies are passed separately. This is necessary for Python < 3.6.0, as the order in which kwargs are passed is non-deterministic, and associations must be parsed by sqlalchemy after their intermediate relationship, unless their `creator` has been set. Ignore invalid keys at this point - behaviour for unknowns should be handled elsewhere. :param data: serialized dictionary of attrs to split on association_proxy. """ association_attrs = { key: value for key, value in iteritems(data) # association proxy if hasattr(getattr(self.opts.model, key, None), "remote_attr") } kwargs = { key: value for key, value in iteritems(data) if (hasattr(self.opts.model, key) and key not in association_attrs) } return kwargs, association_attrs
Deletes old stellar tables that are not used anymore
def gc(): """Deletes old stellar tables that are not used anymore""" def after_delete(database): click.echo("Deleted table %s" % database) app = get_app() upgrade_from_old_version(app) app.delete_orphan_snapshots(after_delete)
Takes a snapshot of the database
def snapshot(name): """Takes a snapshot of the database""" app = get_app() upgrade_from_old_version(app) name = name or app.default_snapshot_name if app.get_snapshot(name): click.echo("Snapshot with name %s already exists" % name) sys.exit(1) else: def before_copy(table_name): click.echo("Snapshotting database %s" % table_name) app.create_snapshot(name, before_copy=before_copy)
Returns a list of snapshots
def list(): """Returns a list of snapshots""" snapshots = get_app().get_snapshots() click.echo('\n'.join( '%s: %s' % ( s.snapshot_name, humanize.naturaltime(datetime.utcnow() - s.created_at) ) for s in snapshots ))
Restores the database from a snapshot
def restore(name): """Restores the database from a snapshot""" app = get_app() if not name: snapshot = app.get_latest_snapshot() if not snapshot: click.echo( "Couldn't find any snapshots for project %s" % load_config()['project_name'] ) sys.exit(1) else: snapshot = app.get_snapshot(name) if not snapshot: click.echo( "Couldn't find snapshot with name %s.\n" "You can list snapshots with 'stellar list'" % name ) sys.exit(1) # Check if slaves are ready if not snapshot.slaves_ready: if app.is_copy_process_running(snapshot): sys.stdout.write( 'Waiting for background process(%s) to finish' % snapshot.worker_pid ) sys.stdout.flush() while not snapshot.slaves_ready: sys.stdout.write('.') sys.stdout.flush() sleep(1) app.db.session.refresh(snapshot) click.echo('') else: click.echo('Background process missing, doing slow restore.') app.inline_slave_copy(snapshot) app.restore(snapshot) click.echo('Restore complete.')
Removes a snapshot
def remove(name): """Removes a snapshot""" app = get_app() snapshot = app.get_snapshot(name) if not snapshot: click.echo("Couldn't find snapshot %s" % name) sys.exit(1) click.echo("Deleting snapshot %s" % name) app.remove_snapshot(snapshot) click.echo("Deleted")
Renames a snapshot
def rename(old_name, new_name): """Renames a snapshot""" app = get_app() snapshot = app.get_snapshot(old_name) if not snapshot: click.echo("Couldn't find snapshot %s" % old_name) sys.exit(1) new_snapshot = app.get_snapshot(new_name) if new_snapshot: click.echo("Snapshot with name %s already exists" % new_name) sys.exit(1) app.rename_snapshot(snapshot, new_name) click.echo("Renamed snapshot %s to %s" % (old_name, new_name))
Replaces a snapshot
def replace(name): """Replaces a snapshot""" app = get_app() snapshot = app.get_snapshot(name) if not snapshot: click.echo("Couldn't find snapshot %s" % name) sys.exit(1) app.remove_snapshot(snapshot) app.create_snapshot(name) click.echo("Replaced snapshot %s" % name)
Initializes Stellar configuration.
def init(): """Initializes Stellar configuration.""" while True: url = click.prompt( "Please enter the url for your database.\n\n" "For example:\n" "PostgreSQL: postgresql://localhost:5432/\n" "MySQL: mysql+pymysql://root@localhost/" ) if url.count('/') == 2 and not url.endswith('/'): url = url + '/' if ( url.count('/') == 3 and url.endswith('/') and url.startswith('postgresql://') ): connection_url = url + 'template1' else: connection_url = url engine = create_engine(connection_url, echo=False) try: conn = engine.connect() except OperationalError as err: click.echo("Could not connect to database: %s" % url) click.echo("Error message: %s" % err.message) click.echo('') else: break if engine.dialect.name not in SUPPORTED_DIALECTS: click.echo("Your engine dialect %s is not supported." % ( engine.dialect.name )) click.echo("Supported dialects: %s" % ( ', '.join(SUPPORTED_DIALECTS) )) if url.count('/') == 3 and url.endswith('/'): while True: click.echo("You have the following databases: %s" % ', '.join([ db for db in list_of_databases(conn) if not db.startswith('stellar_') ])) db_name = click.prompt( "Please enter the name of the database (eg. projectdb)" ) if database_exists(conn, db_name): break else: click.echo("Could not find database %s" % db_name) click.echo('') else: db_name = url.rsplit('/', 1)[-1] url = url.rsplit('/', 1)[0] + '/' name = click.prompt( 'Please enter your project name (used internally, eg. %s)' % db_name, default=db_name ) raw_url = url if engine.dialect.name == 'postgresql': raw_url = raw_url + 'template1' with open('stellar.yaml', 'w') as project_file: project_file.write( """ project_name: '%(name)s' tracked_databases: ['%(db_name)s'] url: '%(raw_url)s' stellar_url: '%(url)sstellar_data' """.strip() % { 'name': name, 'raw_url': raw_url, 'url': url, 'db_name': db_name } ) click.echo("Wrote stellar.yaml") click.echo('') if engine.dialect.name == 'mysql': click.echo("Warning: MySQL support is still in beta.") click.echo("Tip: You probably want to take a snapshot: stellar snapshot")
Updates indexes after each epoch for shuffling
def on_epoch_end(self) -> None: 'Updates indexes after each epoch for shuffling' self.indexes = np.arange(self.nrows) if self.shuffle: np.random.shuffle(self.indexes)
Defines the default function for cleaning text.
def textacy_cleaner(text: str) -> str: """ Defines the default function for cleaning text. This function operates over a list. """ return preprocess_text(text, fix_unicode=True, lowercase=True, transliterate=True, no_urls=True, no_emails=True, no_phone_numbers=True, no_numbers=True, no_currency_symbols=True, no_punct=True, no_contractions=False, no_accents=True)
Apply function to list of elements.
def apply_parallel(func: Callable, data: List[Any], cpu_cores: int = None) -> List[Any]: """ Apply function to list of elements. Automatically determines the chunk size. """ if not cpu_cores: cpu_cores = cpu_count() try: chunk_size = ceil(len(data) / cpu_cores) pool = Pool(cpu_cores) transformed_data = pool.map(func, chunked(data, chunk_size), chunksize=1) finally: pool.close() pool.join() return transformed_data
Generate a function that will clean and tokenize text.
def process_text_constructor(cleaner: Callable, tokenizer: Callable, append_indicators: bool, start_tok: str, end_tok: str): """Generate a function that will clean and tokenize text.""" def process_text(text): if append_indicators: return [[start_tok] + tokenizer(cleaner(doc)) + [end_tok] for doc in text] return [tokenizer(cleaner(doc)) for doc in text] return process_text
Combine the cleaner and tokenizer.
def process_text(self, text: List[str]) -> List[List[str]]: """Combine the cleaner and tokenizer.""" process_text = process_text_constructor(cleaner=self.cleaner, tokenizer=self.tokenizer, append_indicators=self.append_indicators, start_tok=self.start_tok, end_tok=self.end_tok) return process_text(text)
Apply cleaner - > tokenizer.
def parallel_process_text(self, data: List[str]) -> List[List[str]]: """Apply cleaner -> tokenizer.""" process_text = process_text_constructor(cleaner=self.cleaner, tokenizer=self.tokenizer, append_indicators=self.append_indicators, start_tok=self.start_tok, end_tok=self.end_tok) n_cores = self.num_cores return flattenlist(apply_parallel(process_text, data, n_cores))
Analyze document length statistics for padding strategy
def generate_doc_length_stats(self): """Analyze document length statistics for padding strategy""" heuristic = self.heuristic_pct histdf = (pd.DataFrame([(a, b) for a, b in self.document_length_histogram.items()], columns=['bin', 'doc_count']) .sort_values(by='bin')) histdf['cumsum_pct'] = histdf.doc_count.cumsum() / histdf.doc_count.sum() self.document_length_stats = histdf self.doc_length_huerestic = histdf.query(f'cumsum_pct >= {heuristic}').bin.head(1).values[0] logging.warning(' '.join(["Setting maximum document length to", f'{self.doc_length_huerestic} based upon', f'heuristic of {heuristic} percentile.\n', 'See full histogram by insepecting the', "`document_length_stats` attribute."])) self.padding_maxlen = self.doc_length_huerestic
TODO: update docs
def fit(self, data: List[str], return_tokenized_data: bool = False) -> Union[None, List[List[str]]]: """ TODO: update docs Apply cleaner and tokenzier to raw data and build vocabulary. Parameters ---------- data : List[str] These are raw documents, which are a list of strings. ex: [["The quick brown fox"], ["jumps over the lazy dog"]] return_tokenized_data : bool Return the tokenized strings. This is primarly used for debugging purposes. Returns ------- None or List[List[str]] if return_tokenized_data=True then will return tokenized documents, otherwise will not return anything. """ self.__clear_data() now = get_time() logging.warning(f'....tokenizing data') tokenized_data = self.parallel_process_text(data) if not self.padding_maxlen: # its not worth the overhead to parallelize document length counts length_counts = map(count_len, tokenized_data) self.document_length_histogram = Counter(length_counts) self.generate_doc_length_stats() # Learn corpus on single thread logging.warning(f'(1/2) done. {time_diff(now)} sec') logging.warning(f'....building corpus') now = get_time() self.indexer = custom_Indexer(num_words=self.keep_n) self.indexer.fit_on_tokenized_texts(tokenized_data) # Build Dictionary accounting For 0 padding, and reserve 1 for unknown and rare Words self.token2id = self.indexer.word_index self.id2token = {v: k for k, v in self.token2id.items()} self.n_tokens = max(self.indexer.word_index.values()) # logging logging.warning(f'(2/2) done. {time_diff(now)} sec') logging.warning(f'Finished parsing {self.indexer.document_count:,} documents.') if return_tokenized_data: return tokenized_data
See token counts as pandas dataframe
def token_count_pandas(self): """ See token counts as pandas dataframe""" freq_df = pd.DataFrame.from_dict(self.indexer.word_counts, orient='index') freq_df.columns = ['count'] return freq_df.sort_values('count', ascending=False)
Apply cleaner and tokenzier to raw data build vocabulary and return transfomred dataset that is a List [ List [ int ]]. This will use process - based - threading on all available cores.
def fit_transform(self, data: List[str]) -> List[List[int]]: """ Apply cleaner and tokenzier to raw data, build vocabulary and return transfomred dataset that is a List[List[int]]. This will use process-based-threading on all available cores. ex: >>> data = [["The quick brown fox"], ["jumps over the lazy dog"]] >>> pp = preprocess(maxlen=5, no_below=0) >>> pp.fit_transform(data) # 0 padding is applied [[0, 2, 3, 4, 5], [6, 7, 2, 8, 9]] Parameters ---------- data : List[str] These are raw documents, which are a list of strings. ex: [["The quick brown fox"], ["jumps over the lazy dog"]] Returns ------- numpy.array with shape (number of documents, max_len) """ tokenized_data = self.fit(data, return_tokenized_data=True) logging.warning(f'...fit is finished, beginning transform') now = get_time() indexed_data = self.indexer.tokenized_texts_to_sequences(tokenized_data) logging.warning(f'...padding data') final_data = self.pad(indexed_data) logging.warning(f'done. {time_diff(now)} sec') return final_data
Transform List of documents into List [ List [ int ]] If transforming a large number of documents consider using the method transform_parallel instead.
def transform(self, data: List[str]) -> List[List[int]]: """ Transform List of documents into List[List[int]] If transforming a large number of documents consider using the method `transform_parallel` instead. ex: >> pp = processor() >> pp.fit(docs) >> new_docs = [["The quick brown fox"], ["jumps over the lazy dog"]] >> pp.transform(new_docs) [[1, 2, 3, 4], [5, 6, 1, 7, 8]] """ tokenized_data = self.process_text(data) indexed_data = self.indexer.tokenized_texts_to_sequences(tokenized_data) return self.pad(indexed_data)
Transform List of documents into List [ List [ int ]]. Uses process based threading on all available cores. If only processing a small number of documents ( < 10k ) then consider using the method transform instead.
def transform_parallel(self, data: List[str]) -> List[List[int]]: """ Transform List of documents into List[List[int]]. Uses process based threading on all available cores. If only processing a small number of documents ( < 10k ) then consider using the method `transform` instead. ex: >> pp = processor() >> pp.fit(docs) >> new_docs = [["The quick brown fox"], ["jumps over the lazy dog"]] >> pp.transform_parallel(new_docs) [[1, 2, 3, 4], [5, 6, 1, 7, 8]] """ logging.warning(f'...tokenizing data') tokenized_data = self.parallel_process_text(data) logging.warning(f'...indexing data') indexed_data = self.indexer.tokenized_texts_to_sequences(tokenized_data) logging.warning(f'...padding data') return self.pad(indexed_data)
Vectorize and apply padding on a set of tokenized doucments ex: [[ hello world ] [ goodbye now ]]
def pad(self, docs: List[List[int]]) -> List[List[int]]: """ Vectorize and apply padding on a set of tokenized doucments ex: [['hello, 'world'], ['goodbye', 'now']] """ # First apply indexing on all the rows then pad_sequnces (i found this # faster than trying to do these steps on each row return pad_sequences(docs, maxlen=self.padding_maxlen, dtype=self.padding_dtype, padding=self.padding, truncating=self.truncating, value=self.padding_value)
Transforms tokenized text to a sequence of integers. Only top num_words most frequent words will be taken into account. Only words known by the tokenizer will be taken into account. # Arguments tokenized texts: List [ List [ str ]] # Returns A list of integers.
def tokenized_texts_to_sequences(self, tok_texts): """Transforms tokenized text to a sequence of integers. Only top "num_words" most frequent words will be taken into account. Only words known by the tokenizer will be taken into account. # Arguments tokenized texts: List[List[str]] # Returns A list of integers. """ res = [] for vect in self.tokenized_texts_to_sequences_generator(tok_texts): res.append(vect) return res
Transforms tokenized text to a sequence of integers. Only top num_words most frequent words will be taken into account. Only words known by the tokenizer will be taken into account. # Arguments tokenized texts: List [ List [ str ]] # Yields Yields individual sequences.
def tokenized_texts_to_sequences_generator(self, tok_texts): """Transforms tokenized text to a sequence of integers. Only top "num_words" most frequent words will be taken into account. Only words known by the tokenizer will be taken into account. # Arguments tokenized texts: List[List[str]] # Yields Yields individual sequences. """ for seq in tok_texts: vect = [] for w in seq: # if the word is missing you get oov_index i = self.word_index.get(w, 1) vect.append(i) yield vect
Perform param type mapping This requires a bit of logic since this isn t standardized. If a type doesn t map assume str
def map_param_type(param_type): """ Perform param type mapping This requires a bit of logic since this isn't standardized. If a type doesn't map, assume str """ main_type, sub_type = TYPE_INFO_RE.match(param_type).groups() if main_type in ('list', 'array'): # Handle no sub-type: "required list" if sub_type is not None: sub_type = sub_type.strip() if not sub_type: sub_type = 'str' # Handle list of pairs: "optional list<pair<callsign, path>>" sub_match = TYPE_INFO_RE.match(sub_type) if sub_match: sub_type = sub_match.group(1).lower() return [PARAM_TYPE_MAP.setdefault(sub_type, string_types)] return PARAM_TYPE_MAP.setdefault(main_type, string_types)
Parse the conduit. query json dict response This performs the logic of parsing the non - standard params dict and then returning a dict Resource can understand
def parse_interfaces(interfaces): """ Parse the conduit.query json dict response This performs the logic of parsing the non-standard params dict and then returning a dict Resource can understand """ parsed_interfaces = collections.defaultdict(dict) for m, d in iteritems(interfaces): app, func = m.split('.', 1) method = parsed_interfaces[app][func] = {} # Make default assumptions since these aren't provided by Phab method['formats'] = ['json', 'human'] method['method'] = 'POST' method['optional'] = {} method['required'] = {} for name, type_info in iteritems(dict(d['params'])): # Set the defaults optionality = 'required' param_type = 'string' # Usually in the format: <optionality> <param_type> type_info = TYPE_INFO_COMMENT_RE.sub('', type_info) info_pieces = TYPE_INFO_SPLITTER_RE.findall(type_info) for info_piece in info_pieces: if info_piece in ('optional', 'required'): optionality = info_piece elif info_piece == 'ignored': optionality = 'optional' param_type = 'string' elif info_piece == 'nonempty': optionality = 'required' elif info_piece == 'deprecated': optionality = 'optional' else: param_type = info_piece method[optionality][name] = map_param_type(param_type) return dict(parsed_interfaces)
The inverse of this bidict type i. e. one with * _fwdm_cls * and * _invm_cls * swapped.
def _inv_cls(cls): """The inverse of this bidict type, i.e. one with *_fwdm_cls* and *_invm_cls* swapped.""" if cls._fwdm_cls is cls._invm_cls: return cls if not getattr(cls, '_inv_cls_', None): class _Inv(cls): _fwdm_cls = cls._invm_cls _invm_cls = cls._fwdm_cls _inv_cls_ = cls _Inv.__name__ = cls.__name__ + 'Inv' cls._inv_cls_ = _Inv return cls._inv_cls_
The inverse of this bidict.
def inverse(self): """The inverse of this bidict. *See also* :attr:`inv` """ # Resolve and return a strong reference to the inverse bidict. # One may be stored in self._inv already. if self._inv is not None: return self._inv # Otherwise a weakref is stored in self._invweak. Try to get a strong ref from it. inv = self._invweak() if inv is not None: return inv # Refcount of referent must have dropped to zero, as in `bidict().inv.inv`. Init a new one. self._init_inv() # Now this bidict will retain a strong ref to its inverse. return self._inv
Check * key * and * val * for any duplication in self.
def _dedup_item(self, key, val, on_dup): """ Check *key* and *val* for any duplication in self. Handle any duplication as per the duplication policies given in *on_dup*. (key, val) already present is construed as a no-op, not a duplication. If duplication is found and the corresponding duplication policy is :attr:`~bidict.RAISE`, raise the appropriate error. If duplication is found and the corresponding duplication policy is :attr:`~bidict.IGNORE`, return *None*. If duplication is found and the corresponding duplication policy is :attr:`~bidict.OVERWRITE`, or if no duplication is found, return the _DedupResult *(isdupkey, isdupval, oldkey, oldval)*. """ fwdm = self._fwdm invm = self._invm oldval = fwdm.get(key, _MISS) oldkey = invm.get(val, _MISS) isdupkey = oldval is not _MISS isdupval = oldkey is not _MISS dedup_result = _DedupResult(isdupkey, isdupval, oldkey, oldval) if isdupkey and isdupval: if self._isdupitem(key, val, dedup_result): # (key, val) duplicates an existing item -> no-op. return _NOOP # key and val each duplicate a different existing item. if on_dup.kv is RAISE: raise KeyAndValueDuplicationError(key, val) elif on_dup.kv is IGNORE: return _NOOP assert on_dup.kv is OVERWRITE, 'invalid on_dup_kv: %r' % on_dup.kv # Fall through to the return statement on the last line. elif isdupkey: if on_dup.key is RAISE: raise KeyDuplicationError(key) elif on_dup.key is IGNORE: return _NOOP assert on_dup.key is OVERWRITE, 'invalid on_dup.key: %r' % on_dup.key # Fall through to the return statement on the last line. elif isdupval: if on_dup.val is RAISE: raise ValueDuplicationError(val) elif on_dup.val is IGNORE: return _NOOP assert on_dup.val is OVERWRITE, 'invalid on_dup.val: %r' % on_dup.val # Fall through to the return statement on the last line. # else neither isdupkey nor isdupval. return dedup_result
Update rolling back on failure.
def _update_with_rollback(self, on_dup, *args, **kw): """Update, rolling back on failure.""" writelog = [] appendlog = writelog.append dedup_item = self._dedup_item write_item = self._write_item for (key, val) in _iteritems_args_kw(*args, **kw): try: dedup_result = dedup_item(key, val, on_dup) except DuplicationError: undo_write = self._undo_write for dedup_result, write_result in reversed(writelog): undo_write(dedup_result, write_result) raise if dedup_result is not _NOOP: write_result = write_item(key, val, dedup_result) appendlog((dedup_result, write_result))
A shallow copy.
def copy(self): """A shallow copy.""" # Could just ``return self.__class__(self)`` here instead, but the below is faster. It uses # __new__ to create a copy instance while bypassing its __init__, which would result # in copying this bidict's items into the copy instance one at a time. Instead, make whole # copies of each of the backing mappings, and make them the backing mappings of the copy, # avoiding copying items one at a time. copy = self.__class__.__new__(self.__class__) copy._fwdm = self._fwdm.copy() # pylint: disable=protected-access copy._invm = self._invm.copy() # pylint: disable=protected-access copy._init_inv() # pylint: disable=protected-access return copy
A shallow copy of this ordered bidict.
def copy(self): """A shallow copy of this ordered bidict.""" # Fast copy implementation bypassing __init__. See comments in :meth:`BidictBase.copy`. copy = self.__class__.__new__(self.__class__) sntl = _Sentinel() fwdm = self._fwdm.copy() invm = self._invm.copy() cur = sntl nxt = sntl.nxt for (key, val) in iteritems(self): nxt = _Node(cur, sntl) cur.nxt = fwdm[key] = invm[val] = nxt cur = nxt sntl.prv = nxt copy._sntl = sntl # pylint: disable=protected-access copy._fwdm = fwdm # pylint: disable=protected-access copy._invm = invm # pylint: disable=protected-access copy._init_inv() # pylint: disable=protected-access return copy
Return whether ( key val ) duplicates an existing item.
def _isdupitem(self, key, val, dedup_result): """Return whether (key, val) duplicates an existing item.""" isdupkey, isdupval, nodeinv, nodefwd = dedup_result isdupitem = nodeinv is nodefwd if isdupitem: assert isdupkey assert isdupval return isdupitem
Order - sensitive equality check.
def equals_order_sensitive(self, other): """Order-sensitive equality check. *See also* :ref:`eq-order-insensitive` """ # Same short-circuit as BidictBase.__eq__. Factoring out not worth function call overhead. if not isinstance(other, Mapping) or len(self) != len(other): return False return all(i == j for (i, j) in izip(iteritems(self), iteritems(other)))
r Create a new subclass of * base_type * with custom accessors.
def namedbidict(typename, keyname, valname, base_type=bidict): r"""Create a new subclass of *base_type* with custom accessors. Analagous to :func:`collections.namedtuple`. The new class's ``__name__`` will be set to *typename*. Instances of it will provide access to their :attr:`inverse <BidirectionalMapping.inverse>`\s via the custom *keyname*\_for property, and access to themselves via the custom *valname*\_for property. *See also* the :ref:`namedbidict usage documentation <other-bidict-types:\:func\:\`~bidict.namedbidict\`>` :raises ValueError: if any of the *typename*, *keyname*, or *valname* strings does not match ``%s``, or if *keyname == valname*. :raises TypeError: if *base_type* is not a subclass of :class:`BidirectionalMapping`. (This function requires slightly more of *base_type*, e.g. the availability of an ``_isinv`` attribute, but all the :ref:`concrete bidict types <other-bidict-types:Bidict Types Diagram>` that the :mod:`bidict` module provides can be passed in. Check out the code if you actually need to pass in something else.) """ # Re the `base_type` docs above: # The additional requirements (providing _isinv and __getstate__) do not belong in the # BidirectionalMapping interface, and it's overkill to create additional interface(s) for this. # On the other hand, it's overkill to require that base_type be a subclass of BidictBase, since # that's too specific. The BidirectionalMapping check along with the docs above should suffice. if not issubclass(base_type, BidirectionalMapping): raise TypeError(base_type) names = (typename, keyname, valname) if not all(map(_VALID_NAME.match, names)) or keyname == valname: raise ValueError(names) class _Named(base_type): # pylint: disable=too-many-ancestors __slots__ = () def _getfwd(self): return self.inverse if self._isinv else self def _getinv(self): return self if self._isinv else self.inverse @property def _keyname(self): return valname if self._isinv else keyname @property def _valname(self): return keyname if self._isinv else valname def __reduce__(self): return (_make_empty, (typename, keyname, valname, base_type), self.__getstate__()) bname = base_type.__name__ fname = valname + '_for' iname = keyname + '_for' names = dict(typename=typename, bname=bname, keyname=keyname, valname=valname) fdoc = u'{typename} forward {bname}: {keyname} → {valname}'.format(**names) idoc = u'{typename} inverse {bname}: {valname} → {keyname}'.format(**names) setattr(_Named, fname, property(_Named._getfwd, doc=fdoc)) # pylint: disable=protected-access setattr(_Named, iname, property(_Named._getinv, doc=idoc)) # pylint: disable=protected-access _Named.__name__ = typename return _Named
Create a named bidict with the indicated arguments and return an empty instance. Used to make: func: bidict. namedbidict instances picklable.
def _make_empty(typename, keyname, valname, base_type): """Create a named bidict with the indicated arguments and return an empty instance. Used to make :func:`bidict.namedbidict` instances picklable. """ cls = namedbidict(typename, keyname, valname, base_type=base_type) return cls()
Yield the items from the positional argument ( if given ) and then any from * kw *.
def _iteritems_args_kw(*args, **kw): """Yield the items from the positional argument (if given) and then any from *kw*. :raises TypeError: if more than one positional argument is given. """ args_len = len(args) if args_len > 1: raise TypeError('Expected at most 1 positional argument, got %d' % args_len) itemchain = None if args: arg = args[0] if arg: itemchain = _iteritems_mapping_or_iterable(arg) if kw: iterkw = iteritems(kw) itemchain = chain(itemchain, iterkw) if itemchain else iterkw return itemchain or _NULL_IT
Yield the inverse items of the provided object.
def inverted(arg): """Yield the inverse items of the provided object. If *arg* has a :func:`callable` ``__inverted__`` attribute, return the result of calling it. Otherwise, return an iterator over the items in `arg`, inverting each item on the fly. *See also* :attr:`bidict.BidirectionalMapping.__inverted__` """ inv = getattr(arg, '__inverted__', None) if callable(inv): return inv() return ((val, key) for (key, val) in _iteritems_mapping_or_iterable(arg))
Remove all items.
def clear(self): """Remove all items.""" self._fwdm.clear() self._invm.clear() self._sntl.nxt = self._sntl.prv = self._sntl
u * x. popitem () → ( k v ) *
def popitem(self, last=True): # pylint: disable=arguments-differ u"""*x.popitem() → (k, v)* Remove and return the most recently added item as a (key, value) pair if *last* is True, else the least recently added item. :raises KeyError: if *x* is empty. """ if not self: raise KeyError('mapping is empty') key = next((reversed if last else iter)(self)) val = self._pop(key) return key, val
Move an existing key to the beginning or end of this ordered bidict.
def move_to_end(self, key, last=True): """Move an existing key to the beginning or end of this ordered bidict. The item is moved to the end if *last* is True, else to the beginning. :raises KeyError: if the key does not exist """ node = self._fwdm[key] node.prv.nxt = node.nxt node.nxt.prv = node.prv sntl = self._sntl if last: last = sntl.prv node.prv = last node.nxt = sntl sntl.prv = last.nxt = node else: first = sntl.nxt node.prv = sntl node.nxt = first sntl.nxt = first.prv = node
Associate * key * with * val * with the specified duplication policies.
def put(self, key, val, on_dup_key=RAISE, on_dup_val=RAISE, on_dup_kv=None): """ Associate *key* with *val* with the specified duplication policies. If *on_dup_kv* is ``None``, the *on_dup_val* policy will be used for it. For example, if all given duplication policies are :attr:`~bidict.RAISE`, then *key* will be associated with *val* if and only if *key* is not already associated with an existing value and *val* is not already associated with an existing key, otherwise an exception will be raised. If *key* is already associated with *val*, this is a no-op. :raises bidict.KeyDuplicationError: if attempting to insert an item whose key only duplicates an existing item's, and *on_dup_key* is :attr:`~bidict.RAISE`. :raises bidict.ValueDuplicationError: if attempting to insert an item whose value only duplicates an existing item's, and *on_dup_val* is :attr:`~bidict.RAISE`. :raises bidict.KeyAndValueDuplicationError: if attempting to insert an item whose key duplicates one existing item's, and whose value duplicates another existing item's, and *on_dup_kv* is :attr:`~bidict.RAISE`. """ on_dup = self._get_on_dup((on_dup_key, on_dup_val, on_dup_kv)) self._put(key, val, on_dup)
Associate * key * with * val * unconditionally.
def forceput(self, key, val): """ Associate *key* with *val* unconditionally. Replace any existing mappings containing key *key* or value *val* as necessary to preserve uniqueness. """ self._put(key, val, self._ON_DUP_OVERWRITE)
u * x. pop ( k [ d ] ) → v *
def pop(self, key, default=_MISS): u"""*x.pop(k[, d]) → v* Remove specified key and return the corresponding value. :raises KeyError: if *key* is not found and no *default* is provided. """ try: return self._pop(key) except KeyError: if default is _MISS: raise return default
u * x. popitem () → ( k v ) *
def popitem(self): u"""*x.popitem() → (k, v)* Remove and return some item as a (key, value) pair. :raises KeyError: if *x* is empty. """ if not self: raise KeyError('mapping is empty') key, val = self._fwdm.popitem() del self._invm[val] return key, val
Like: meth: putall with default duplication policies.
def update(self, *args, **kw): """Like :meth:`putall` with default duplication policies.""" if args or kw: self._update(False, None, *args, **kw)
Like a bulk: meth: forceput.
def forceupdate(self, *args, **kw): """Like a bulk :meth:`forceput`.""" self._update(False, self._ON_DUP_OVERWRITE, *args, **kw)
Like a bulk: meth: put.
def putall(self, items, on_dup_key=RAISE, on_dup_val=RAISE, on_dup_kv=None): """ Like a bulk :meth:`put`. If one of the given items causes an exception to be raised, none of the items is inserted. """ if items: on_dup = self._get_on_dup((on_dup_key, on_dup_val, on_dup_kv)) self._update(False, on_dup, items)
Create a new temporary file and write some initial text to it.
def write_temp_file(text=""): """Create a new temporary file and write some initial text to it. :param text: the text to write to the temp file :type text: str :returns: the file name of the newly created temp file :rtype: str """ with NamedTemporaryFile(mode='w+t', suffix='.yml', delete=False) \ as tempfile: tempfile.write(text) return tempfile.name
returns a list of CarddavObject objects: param address_books: list of selected address books: type address_books: list ( address_book. AddressBook ): param search: filter contact list: type search: str: param strict_search: if True search only in full name field: type strict_search: bool: returns: list of CarddavObject objects: rtype: list ( CarddavObject )
def get_contact_list_by_user_selection(address_books, search, strict_search): """returns a list of CarddavObject objects :param address_books: list of selected address books :type address_books: list(address_book.AddressBook) :param search: filter contact list :type search: str :param strict_search: if True, search only in full name field :type strict_search: bool :returns: list of CarddavObject objects :rtype: list(CarddavObject) """ return get_contacts( address_books, search, "name" if strict_search else "all", config.reverse(), config.group_by_addressbook(), config.sort)
Get a list of contacts from one or more address books.
def get_contacts(address_books, query, method="all", reverse=False, group=False, sort="first_name"): """Get a list of contacts from one or more address books. :param address_books: the address books to search :type address_books: list(address_book.AddressBook) :param query: a search query to select contacts :type quer: str :param method: the search method, one of "all", "name" or "uid" :type method: str :param reverse: reverse the order of the returned contacts :type reverse: bool :param group: group results by address book :type group: bool :param sort: the field to use for sorting, one of "first_name", "last_name" :type sort: str :returns: contacts from the address_books that match the query :rtype: list(CarddavObject) """ # Search for the contacts in all address books. contacts = [] for address_book in address_books: contacts.extend(address_book.search(query, method=method)) # Sort the contacts. if group: if sort == "first_name": return sorted(contacts, reverse=reverse, key=lambda x: ( unidecode(x.address_book.name).lower(), unidecode(x.get_first_name_last_name()).lower())) elif sort == "last_name": return sorted(contacts, reverse=reverse, key=lambda x: ( unidecode(x.address_book.name).lower(), unidecode(x.get_last_name_first_name()).lower())) else: raise ValueError('sort must be "first_name" or "last_name" not ' '{}.'.format(sort)) else: if sort == "first_name": return sorted(contacts, reverse=reverse, key=lambda x: unidecode(x.get_first_name_last_name()).lower()) elif sort == "last_name": return sorted(contacts, reverse=reverse, key=lambda x: unidecode(x.get_last_name_first_name()).lower()) else: raise ValueError('sort must be "first_name" or "last_name" not ' '{}.'.format(sort))
Merge the parsed arguments from argparse into the config object.
def merge_args_into_config(args, config): """Merge the parsed arguments from argparse into the config object. :param args: the parsed command line arguments :type args: argparse.Namespace :param config: the parsed config file :type config: config.Config :returns: the merged config object :rtype: config.Config """ # display by name: first or last name if "display" in args and args.display: config.set_display_by_name(args.display) # group by address book if "group_by_addressbook" in args and args.group_by_addressbook: config.set_group_by_addressbook(True) # reverse contact list if "reverse" in args and args.reverse: config.set_reverse(True) # sort criteria: first or last name if "sort" in args and args.sort: config.sort = args.sort # preferred vcard version if "vcard_version" in args and args.vcard_version: config.set_preferred_vcard_version(args.vcard_version) # search in source files if "search_in_source_files" in args and args.search_in_source_files: config.set_search_in_source_files(True) # skip unparsable vcards if "skip_unparsable" in args and args.skip_unparsable: config.set_skip_unparsable(True) # If the user could but did not specify address books on the command line # it means they want to use all address books in that place. if "addressbook" in args and not args.addressbook: args.addressbook = [abook.name for abook in config.abooks] if "target_addressbook" in args and not args.target_addressbook: args.target_addressbook = [abook.name for abook in config.abooks]
Load all address books with the given names from the config.
def load_address_books(names, config, search_queries): """Load all address books with the given names from the config. :param names: the address books to load :type names: list(str) :param config: the config instance to use when looking up address books :type config: config.Config :param search_queries: a mapping of address book names to search queries :type search_queries: dict :yields: the loaded address books :ytype: addressbook.AddressBook """ all_names = {str(book) for book in config.abooks} if not names: names = all_names elif not all_names.issuperset(names): sys.exit('Error: The entered address books "{}" do not exist.\n' 'Possible values are: {}'.format( '", "'.join(set(names) - all_names), ', '.join(all_names))) # load address books which are defined in the configuration file for name in names: address_book = config.abook.get_abook(name) address_book.load(search_queries[address_book.name], search_in_source_files=config.search_in_source_files()) yield address_book
Prepare the search query string from the given command line args.
def prepare_search_queries(args): """Prepare the search query string from the given command line args. Each address book can get a search query string to filter vcards befor loading them. Depending on the question if the address book is used for source or target searches different regexes have to be combined into one search string. :param args: the parsed command line :type args: argparse.Namespace :returns: a dict mapping abook names to their loading queries, if the query is None it means that all cards should be loaded :rtype: dict(str:str or None) """ # get all possible search queries for address book parsing source_queries = [] target_queries = [] if "source_search_terms" in args and args.source_search_terms: escaped_term = ".*".join(re.escape(x) for x in args.source_search_terms) source_queries.append(escaped_term) args.source_search_terms = escaped_term if "search_terms" in args and args.search_terms: escaped_term = ".*".join(re.escape(x) for x in args.search_terms) source_queries.append(escaped_term) args.search_terms = escaped_term if "target_contact" in args and args.target_contact: escaped_term = re.escape(args.target_contact) target_queries.append(escaped_term) args.target_contact = escaped_term if "uid" in args and args.uid: source_queries.append(args.uid) if "target_uid" in args and args.target_uid: target_queries.append(args.target_uid) # create and return regexp, None means that no query is given and hence all # contacts should be searched. source_queries = "^.*(%s).*$" % ')|('.join(source_queries) \ if source_queries else None target_queries = "^.*(%s).*$" % ')|('.join(target_queries) \ if target_queries else None logging.debug('Created source query regex: %s', source_queries) logging.debug('Created target query regex: %s', target_queries) # Get all possible search queries for address book parsing, always # depending on the fact if the address book is used to find source or # target contacts or both. queries = {abook.name: [] for abook in config.abook._abooks} for name in queries: if "addressbook" in args and name in args.addressbook: queries[name].append(source_queries) if "target_addressbook" in args and name in args.target_addressbook: queries[name].append(target_queries) # If None is included in the search queries of an address book it means # that either no source or target query was given and this address book # is affected by this. All contacts should be loaded from that address # book. if None in queries[name]: queries[name] = None else: queries[name] = "({})".format(')|('.join(queries[name])) logging.debug('Created query regex: %s', queries) return queries
TODO: Docstring for generate_contact_list.
def generate_contact_list(config, args): """TODO: Docstring for generate_contact_list. :param config: the config object to use :type config: config.Config :param args: the command line arguments :type args: argparse.Namespace :returns: the contacts for further processing (TODO) :rtype: list(TODO) """ # fill contact list vcard_list = [] if "uid" in args and args.uid: # If an uid was given we use it to find the contact. logging.debug("args.uid=%s", args.uid) # set search terms to the empty query to prevent errors in # phone and email actions args.search_terms = ".*" vcard_list = get_contacts(args.addressbook, args.uid, method="uid") # We require that the uid given can uniquely identify a contact. if not vcard_list: sys.exit("Found no contact for {}uid {}".format( "source " if args.action == "merge" else "", args.uid)) elif len(vcard_list) != 1: print("Found multiple contacts for {}uid {}".format( "source " if args.action == "merge" else "", args.uid)) for vcard in vcard_list: print(" {}: {}".format(vcard, vcard.get_uid())) sys.exit(1) else: # No uid was given so we try to use the search terms to select a # contact. if "source_search_terms" in args: # exception for merge command if args.source_search_terms: args.search_terms = args.source_search_terms else: args.search_terms = ".*" elif "search_terms" in args: if args.search_terms: args.search_terms = args.search_terms else: args.search_terms = ".*" else: # If no search terms where given on the command line we match # everything with the empty search pattern. args.search_terms = ".*" logging.debug("args.search_terms=%s", args.search_terms) vcard_list = get_contact_list_by_user_selection( args.addressbook, args.search_terms, args.strict_search if "strict_search" in args else False) return vcard_list
Create a new contact.
def new_subcommand(selected_address_books, input_from_stdin_or_file, open_editor): """Create a new contact. :param selected_address_books: a list of addressbooks that were selected on the command line :type selected_address_books: list of address_book.AddressBook :param input_from_stdin_or_file: the data for the new contact as a yaml formatted string :type input_from_stdin_or_file: str :param open_editor: whether to open the new contact in the edior after creation :type open_editor: bool :returns: None :rtype: None """ # ask for address book, in which to create the new contact selected_address_book = choose_address_book_from_list( "Select address book for new contact", selected_address_books) if selected_address_book is None: print("Error: address book list is empty") sys.exit(1) # if there is some data in stdin if input_from_stdin_or_file: # create new contact from stdin try: new_contact = CarddavObject.from_user_input( selected_address_book, input_from_stdin_or_file, config.get_supported_private_objects(), config.get_preferred_vcard_version(), config.localize_dates()) except ValueError as err: print(err) sys.exit(1) else: new_contact.write_to_file() if open_editor: modify_existing_contact(new_contact) else: print("Creation successful\n\n%s" % new_contact.print_vcard()) else: create_new_contact(selected_address_book)
Add a new email address to contacts creating new contacts if necessary.
def add_email_subcommand(input_from_stdin_or_file, selected_address_books): """Add a new email address to contacts, creating new contacts if necessary. :param input_from_stdin_or_file: the input text to search for the new email :type input_from_stdin_or_file: str :param selected_address_books: the addressbooks that were selected on the command line :type selected_address_books: list of address_book.AddressBook :returns: None :rtype: None """ # get name and email address message = message_from_string(input_from_stdin_or_file, policy=SMTP_POLICY) print("Khard: Add email address to contact") if not message['From'] \ or not message['From'].addresses: print("Found no email address") sys.exit(1) email_address = message['From'].addresses[0].addr_spec name = message['From'].addresses[0].display_name print("Email address: %s" % email_address) if not name: name = input("Contact's name: ") # search for an existing contact selected_vcard = choose_vcard_from_list( "Select contact for the found e-mail address", get_contact_list_by_user_selection(selected_address_books, name, True)) if selected_vcard is None: # create new contact while True: input_string = input("Contact %s does not exist. Do you want " "to create it (y/n)? " % name) if input_string.lower() in ["", "n", "q"]: print("Canceled") sys.exit(0) if input_string.lower() == "y": break # ask for address book, in which to create the new contact selected_address_book = choose_address_book_from_list( "Select address book for new contact", config.abooks) if selected_address_book is None: print("Error: address book list is empty") sys.exit(1) # ask for name and organisation of new contact while True: first_name = input("First name: ") last_name = input("Last name: ") organisation = input("Organisation: ") if not first_name and not last_name and not organisation: print("Error: All fields are empty.") else: break selected_vcard = CarddavObject.from_user_input( selected_address_book, "First name : %s\nLast name : %s\nOrganisation : %s" % ( first_name, last_name, organisation), config.get_supported_private_objects(), config.get_preferred_vcard_version(), config.localize_dates()) # check if the contact already contains the email address for type, email_list in sorted( selected_vcard.get_email_addresses().items(), key=lambda k: k[0].lower()): for email in email_list: if email == email_address: print("The contact %s already contains the email address %s" % (selected_vcard, email_address)) sys.exit(0) # ask for confirmation again while True: input_string = input( "Do you want to add the email address %s to the contact %s (y/n)? " % (email_address, selected_vcard)) if input_string.lower() in ["", "n", "q"]: print("Canceled") sys.exit(0) if input_string.lower() == "y": break # ask for the email label print("\nAdding email address %s to contact %s\n" "Enter email label\n" " vcard 3.0: At least one of home, internet, pref, work, x400\n" " vcard 4.0: At least one of home, internet, pref, work\n" " Or a custom label (only letters" % (email_address, selected_vcard)) while True: label = input("email label [internet]: ") or "internet" try: selected_vcard.add_email_address(label, email_address) except ValueError as err: print(err) else: break # save to disk selected_vcard.write_to_file(overwrite=True) print("Done.\n\n%s" % selected_vcard.print_vcard())
Print birthday contact table.
def birthdays_subcommand(vcard_list, parsable): """Print birthday contact table. :param vcard_list: the vcards to search for matching entries which should be printed :type vcard_list: list of carddav_object.CarddavObject :param parsable: machine readable output: columns devided by tabulator (\t) :type parsable: bool :returns: None :rtype: None """ # filter out contacts without a birthday date vcard_list = [ vcard for vcard in vcard_list if vcard.get_birthday() is not None] # sort by date (month and day) # The sort function should work for strings and datetime objects. All # strings will besorted before any datetime objects. vcard_list.sort( key=lambda x: (x.get_birthday().month, x.get_birthday().day) if isinstance(x.get_birthday(), datetime.datetime) else (0, 0, x.get_birthday())) # add to string list birthday_list = [] for vcard in vcard_list: date = vcard.get_birthday() if parsable: if config.display_by_name() == "first_name": birthday_list.append("%04d.%02d.%02d\t%s" % (date.year, date.month, date.day, vcard.get_first_name_last_name())) else: birthday_list.append("%04d.%02d.%02d\t%s" % (date.year, date.month, date.day, vcard.get_last_name_first_name())) else: if config.display_by_name() == "first_name": birthday_list.append("%s\t%s" % (vcard.get_first_name_last_name(), vcard.get_formatted_birthday())) else: birthday_list.append("%s\t%s" % (vcard.get_last_name_first_name(), vcard.get_formatted_birthday())) if birthday_list: if parsable: print('\n'.join(birthday_list)) else: list_birthdays(birthday_list) else: if not parsable: print("Found no birthdays") sys.exit(1)
Print a phone application friendly contact table.
def phone_subcommand(search_terms, vcard_list, parsable): """Print a phone application friendly contact table. :param search_terms: used as search term to filter the contacts before printing :type search_terms: str :param vcard_list: the vcards to search for matching entries which should be printed :type vcard_list: list of carddav_object.CarddavObject :param parsable: machine readable output: columns devided by tabulator (\t) :type parsable: bool :returns: None :rtype: None """ all_phone_numbers_list = [] matching_phone_number_list = [] for vcard in vcard_list: for type, number_list in sorted(vcard.get_phone_numbers().items(), key=lambda k: k[0].lower()): for number in sorted(number_list): if config.display_by_name() == "first_name": name = vcard.get_first_name_last_name() else: name = vcard.get_last_name_first_name() # create output lines line_formatted = "\t".join([name, type, number]) line_parsable = "\t".join([number, name, type]) if parsable: # parsable option: start with phone number phone_number_line = line_parsable else: # else: start with name phone_number_line = line_formatted if re.search(search_terms, "%s\n%s" % (line_formatted, line_parsable), re.IGNORECASE | re.DOTALL): matching_phone_number_list.append(phone_number_line) elif len(re.sub("\D", "", search_terms)) >= 3: # The user likely searches for a phone number cause the # search string contains at least three digits. So we # remove all non-digit chars from the phone number field # and match against that. if re.search(re.sub("\D", "", search_terms), re.sub("\D", "", number), re.IGNORECASE): matching_phone_number_list.append(phone_number_line) # collect all phone numbers in a different list as fallback all_phone_numbers_list.append(phone_number_line) if matching_phone_number_list: if parsable: print('\n'.join(matching_phone_number_list)) else: list_phone_numbers(matching_phone_number_list) elif all_phone_numbers_list: if parsable: print('\n'.join(all_phone_numbers_list)) else: list_phone_numbers(all_phone_numbers_list) else: if not parsable: print("Found no phone numbers") sys.exit(1)
Print a contact table. with all postal/ mailing addresses
def post_address_subcommand(search_terms, vcard_list, parsable): """Print a contact table. with all postal / mailing addresses :param search_terms: used as search term to filter the contacts before printing :type search_terms: str :param vcard_list: the vcards to search for matching entries which should be printed :type vcard_list: list of carddav_object.CarddavObject :param parsable: machine readable output: columns devided by tabulator (\t) :type parsable: bool :returns: None :rtype: None """ all_post_address_list = [] matching_post_address_list = [] for vcard in vcard_list: # vcard name if config.display_by_name() == "first_name": name = vcard.get_first_name_last_name() else: name = vcard.get_last_name_first_name() # create post address line list post_address_line_list = [] if parsable: for type, post_address_list in sorted(vcard.get_post_addresses().items(), key=lambda k: k[0].lower()): for post_address in post_address_list: post_address_line_list.append( "\t".join([str(post_address), name, type])) else: for type, post_address_list in sorted(vcard.get_formatted_post_addresses().items(), key=lambda k: k[0].lower()): for post_address in sorted(post_address_list): post_address_line_list.append( "\t".join([name, type, post_address])) # add to matching and all post address lists for post_address_line in post_address_line_list: if re.search(search_terms, "%s\n%s" % (post_address_line, post_address_line), re.IGNORECASE | re.DOTALL): matching_post_address_list.append(post_address_line) # collect all post addresses in a different list as fallback all_post_address_list.append(post_address_line) if matching_post_address_list: if parsable: print('\n'.join(matching_post_address_list)) else: list_post_addresses(matching_post_address_list) elif all_post_address_list: if parsable: print('\n'.join(all_post_address_list)) else: list_post_addresses(all_post_address_list) else: if not parsable: print("Found no post adresses") sys.exit(1)
Print a mail client friendly contacts table that is compatible with the default format used by mutt. Output format: single line of text email_address \ tname \ ttype email_address \ tname \ ttype [... ]
def email_subcommand(search_terms, vcard_list, parsable, remove_first_line): """Print a mail client friendly contacts table that is compatible with the default format used by mutt. Output format: single line of text email_address\tname\ttype email_address\tname\ttype [...] :param search_terms: used as search term to filter the contacts before printing :type search_terms: str :param vcard_list: the vcards to search for matching entries which should be printed :type vcard_list: list of carddav_object.CarddavObject :param parsable: machine readable output: columns devided by tabulator (\t) :type parsable: bool :param remove_first_line: remove first line (searching for '' ...) :type remove_first_line: bool :returns: None :rtype: None """ matching_email_address_list = [] all_email_address_list = [] for vcard in vcard_list: for type, email_list in sorted(vcard.get_email_addresses().items(), key=lambda k: k[0].lower()): for email in sorted(email_list): if config.display_by_name() == "first_name": name = vcard.get_first_name_last_name() else: name = vcard.get_last_name_first_name() # create output lines line_formatted = "\t".join([name, type, email]) line_parsable = "\t".join([email, name, type]) if parsable: # parsable option: start with email address email_address_line = line_parsable else: # else: start with name email_address_line = line_formatted if re.search(search_terms, "%s\n%s" % (line_formatted, line_parsable), re.IGNORECASE | re.DOTALL): matching_email_address_list.append(email_address_line) # collect all email addresses in a different list as fallback all_email_address_list.append(email_address_line) if matching_email_address_list: if parsable: if not remove_first_line: # at least mutt requires that line print("searching for '%s' ..." % search_terms) print('\n'.join(matching_email_address_list)) else: list_email_addresses(matching_email_address_list) elif all_email_address_list: if parsable: if not remove_first_line: # at least mutt requires that line print("searching for '%s' ..." % search_terms) print('\n'.join(all_email_address_list)) else: list_email_addresses(all_email_address_list) else: if not parsable: print("Found no email addresses") elif not remove_first_line: print("searching for '%s' ..." % search_terms) sys.exit(1)
Print a user friendly contacts table.
def list_subcommand(vcard_list, parsable): """Print a user friendly contacts table. :param vcard_list: the vcards to print :type vcard_list: list of carddav_object.CarddavObject :param parsable: machine readable output: columns devided by tabulator (\t) :type parsable: bool :returns: None :rtype: None """ if not vcard_list: if not parsable: print("Found no contacts") sys.exit(1) elif parsable: contact_line_list = [] for vcard in vcard_list: if config.display_by_name() == "first_name": name = vcard.get_first_name_last_name() else: name = vcard.get_last_name_first_name() contact_line_list.append('\t'.join([vcard.get_uid(), name, vcard.address_book.name])) print('\n'.join(contact_line_list)) else: list_contacts(vcard_list)
Modify a contact in an external editor.
def modify_subcommand(selected_vcard, input_from_stdin_or_file, open_editor): """Modify a contact in an external editor. :param selected_vcard: the contact to modify :type selected_vcard: carddav_object.CarddavObject :param input_from_stdin_or_file: new data from stdin (or a file) that should be incorperated into the contact, this should be a yaml formatted string :type input_from_stdin_or_file: str :param open_editor: whether to open the new contact in the edior after creation :type open_editor: bool :returns: None :rtype: None """ # show warning, if vcard version of selected contact is not 3.0 or 4.0 if selected_vcard.get_version() not in config.supported_vcard_versions: print("Warning:\nThe selected contact is based on vcard version %s " "but khard only supports the creation and modification of vcards" " with version 3.0 and 4.0.\nIf you proceed, the contact will be" " converted to vcard version %s but beware: This could corrupt " "the contact file or cause data loss." % (selected_vcard.get_version(), config.get_preferred_vcard_version())) while True: input_string = input("Do you want to proceed anyway (y/n)? ") if input_string.lower() in ["", "n", "q"]: print("Canceled") sys.exit(0) if input_string.lower() == "y": break # if there is some data in stdin if input_from_stdin_or_file: # create new contact from stdin try: new_contact = \ CarddavObject.from_existing_contact_with_new_user_input( selected_vcard, input_from_stdin_or_file, config.localize_dates()) except ValueError as err: print(err) sys.exit(1) if selected_vcard == new_contact: print("Nothing changed\n\n%s" % new_contact.print_vcard()) else: print("Modification\n\n%s\n" % new_contact.print_vcard()) while True: input_string = input("Do you want to proceed (y/n)? ") if input_string.lower() in ["", "n", "q"]: print("Canceled") break if input_string.lower() == "y": new_contact.write_to_file(overwrite=True) if open_editor: modify_existing_contact(new_contact) else: print("Done") break else: modify_existing_contact(selected_vcard)
Remove a contact from the addressbook.
def remove_subcommand(selected_vcard, force): """Remove a contact from the addressbook. :param selected_vcard: the contact to delete :type selected_vcard: carddav_object.CarddavObject :param force: delete without confirmation :type force: bool :returns: None :rtype: None """ if not force: while True: input_string = input( "Deleting contact %s from address book %s. Are you sure? " "(y/n): " % (selected_vcard, selected_vcard.address_book)) if input_string.lower() in ["", "n", "q"]: print("Canceled") sys.exit(0) if input_string.lower() == "y": break selected_vcard.delete_vcard_file() print("Contact %s deleted successfully" % selected_vcard.get_full_name())
Open the vcard file for a contact in an external editor.
def source_subcommand(selected_vcard, editor): """Open the vcard file for a contact in an external editor. :param selected_vcard: the contact to edit :type selected_vcard: carddav_object.CarddavObject :param editor: the eitor command to use :type editor: str :returns: None :rtype: None """ child = subprocess.Popen([editor, selected_vcard.filename]) child.communicate()
Merge two contacts into one.
def merge_subcommand(vcard_list, selected_address_books, search_terms, target_uid): """Merge two contacts into one. :param vcard_list: the vcards from which to choose contacts for mergeing :type vcard_list: list of carddav_object.CarddavObject :param selected_address_books: the addressbooks to use to find the target contact :type selected_address_books: list(addressbook.AddressBook) :param search_terms: the search terms to find the target contact :type search_terms: str :param target_uid: the uid of the target contact or empty :type target_uid: str :returns: None :rtype: None """ # Check arguments. if target_uid != "" and search_terms != "": print("You can not specify a target uid and target search terms for a " "merge.") sys.exit(1) # Find possible target contacts. if target_uid != "": target_vcards = get_contacts(selected_address_books, target_uid, method="uid") # We require that the uid given can uniquely identify a contact. if len(target_vcards) != 1: if not target_vcards: print("Found no contact for target uid %s" % target_uid) else: print("Found multiple contacts for target uid %s" % target_uid) for vcard in target_vcards: print(" %s: %s" % (vcard, vcard.get_uid())) sys.exit(1) else: target_vcards = get_contact_list_by_user_selection( selected_address_books, search_terms, False) # get the source vcard, from which to merge source_vcard = choose_vcard_from_list("Select contact from which to merge", vcard_list) if source_vcard is None: print("Found no source contact for merging") sys.exit(1) else: print("Merge from %s from address book %s\n\n" % (source_vcard, source_vcard.address_book)) # get the target vcard, into which to merge target_vcard = choose_vcard_from_list("Select contact into which to merge", target_vcards) if target_vcard is None: print("Found no target contact for merging") sys.exit(1) else: print("Merge into %s from address book %s\n\n" % (target_vcard, target_vcard.address_book)) # merging if source_vcard == target_vcard: print("The selected contacts are already identical") else: merge_existing_contacts(source_vcard, target_vcard, True)
Copy or move a contact to a different address book.
def copy_or_move_subcommand(action, vcard_list, target_address_book_list): """Copy or move a contact to a different address book. :action: the string "copy" or "move" to indicate what to do :type action: str :param vcard_list: the contact list from which to select one for the action :type vcard_list: list of carddav_object.CarddavObject :param target_address_book_list: the list of target address books :type target_address_book_list: list(addressbook.AddressBook) :returns: None :rtype: None """ # get the source vcard, which to copy or move source_vcard = choose_vcard_from_list( "Select contact to %s" % action.title(), vcard_list) if source_vcard is None: print("Found no contact") sys.exit(1) else: print("%s contact %s from address book %s" % (action.title(), source_vcard, source_vcard.address_book)) # get target address book if len(target_address_book_list) == 1 \ and target_address_book_list[0] == source_vcard.address_book: print("The address book %s already contains the contact %s" % (target_address_book_list[0], source_vcard)) sys.exit(1) else: available_address_books = [abook for abook in target_address_book_list if abook != source_vcard.address_book] selected_target_address_book = choose_address_book_from_list( "Select target address book", available_address_books) if selected_target_address_book is None: print("Error: address book list is empty") sys.exit(1) # check if a contact already exists in the target address book target_vcard = choose_vcard_from_list( "Select target contact which to overwrite", get_contact_list_by_user_selection([selected_target_address_book], source_vcard.get_full_name(), True)) # If the target contact doesn't exist, move or copy the source contact into # the target address book without further questions. if target_vcard is None: copy_contact(source_vcard, selected_target_address_book, action == "move") else: if source_vcard == target_vcard: # source and target contact are identical print("Target contact: %s" % target_vcard) if action == "move": copy_contact(source_vcard, selected_target_address_book, True) else: print("The selected contacts are already identical") else: # source and target contacts are different # either overwrite the target one or merge into target contact print("The address book %s already contains the contact %s\n\n" "Source\n\n%s\n\nTarget\n\n%s\n\n" "Possible actions:\n" " a: %s anyway\n" " m: Merge from source into target contact\n" " o: Overwrite target contact\n" " q: Quit" % ( target_vcard.address_book, source_vcard, source_vcard.print_vcard(), target_vcard.print_vcard(), "Move" if action == "move" else "Copy")) while True: input_string = input("Your choice: ") if input_string.lower() == "a": copy_contact(source_vcard, selected_target_address_book, action == "move") break if input_string.lower() == "o": copy_contact(source_vcard, selected_target_address_book, action == "move") target_vcard.delete_vcard_file() break if input_string.lower() == "m": merge_existing_contacts(source_vcard, target_vcard, action == "move") break if input_string.lower() in ["", "q"]: print("Canceled") break
Parse the command line arguments and return the namespace that was creates by argparse. ArgumentParser. parse_args ().
def parse_args(argv): """Parse the command line arguments and return the namespace that was creates by argparse.ArgumentParser.parse_args(). :returns: the namespace parsed from the command line :rtype: argparse.Namespace """ # Create the base argument parser. It will be reused for the first and # second round of argument parsing. base = argparse.ArgumentParser( description="Khard is a carddav address book for the console", formatter_class=argparse.RawTextHelpFormatter, add_help=False) base.add_argument("-c", "--config", default="", help="config file to use") base.add_argument("--debug", action="store_true", help="enable debug output") base.add_argument("--skip-unparsable", action="store_true", help="skip unparsable vcard files") base.add_argument("-v", "--version", action="version", version="Khard version %s" % khard_version) # Create the first argument parser. Its main job is to set the correct # config file. The config file is needed to get the default command if no # subcommand is given on the command line. This parser will ignore most # arguments, as they will be parsed by the second parser. first_parser = argparse.ArgumentParser(parents=[base]) first_parser.add_argument('remainder', nargs=argparse.REMAINDER) # Create the main argument parser. It will handle the complete command # line only ignoring the config and debug options as these have already # been set. parser = argparse.ArgumentParser(parents=[base]) # create address book subparsers with different help texts default_addressbook_parser = argparse.ArgumentParser(add_help=False) default_addressbook_parser.add_argument( "-a", "--addressbook", default=[], type=lambda x: [y.strip() for y in x.split(",")], help="Specify one or several comma separated address book names to " "narrow the list of contacts") new_addressbook_parser = argparse.ArgumentParser(add_help=False) new_addressbook_parser.add_argument( "-a", "--addressbook", default=[], type=lambda x: [y.strip() for y in x.split(",")], help="Specify address book in which to create the new contact") copy_move_addressbook_parser = argparse.ArgumentParser(add_help=False) copy_move_addressbook_parser.add_argument( "-a", "--addressbook", default=[], type=lambda x: [y.strip() for y in x.split(",")], help="Specify one or several comma separated address book names to " "narrow the list of contacts") copy_move_addressbook_parser.add_argument( "-A", "--target-addressbook", default=[], type=lambda x: [y.strip() for y in x.split(",")], help="Specify target address book in which to copy / move the " "selected contact") merge_addressbook_parser = argparse.ArgumentParser(add_help=False) merge_addressbook_parser.add_argument( "-a", "--addressbook", default=[], type=lambda x: [y.strip() for y in x.split(",")], help="Specify one or several comma separated address book names to " "narrow the list of source contacts") merge_addressbook_parser.add_argument( "-A", "--target-addressbook", default=[], type=lambda x: [y.strip() for y in x.split(",")], help="Specify one or several comma separated address book names to " "narrow the list of target contacts") # create input file subparsers with different help texts email_header_input_file_parser = argparse.ArgumentParser(add_help=False) email_header_input_file_parser.add_argument( "-i", "--input-file", default="-", help="Specify input email header file name or use stdin by default") template_input_file_parser = argparse.ArgumentParser(add_help=False) template_input_file_parser.add_argument( "-i", "--input-file", default="-", help="Specify input template file name or use stdin by default") template_input_file_parser.add_argument( "--open-editor", action="store_true", help="Open the default text " "editor after successful creation of new contact") # create sort subparser sort_parser = argparse.ArgumentParser(add_help=False) sort_parser.add_argument( "-d", "--display", choices=("first_name", "last_name"), help="Display names in contact table by first or last name") sort_parser.add_argument( "-g", "--group-by-addressbook", action="store_true", help="Group contact table by address book") sort_parser.add_argument( "-r", "--reverse", action="store_true", help="Reverse order of contact table") sort_parser.add_argument( "-s", "--sort", choices=("first_name", "last_name"), help="Sort contact table by first or last name") # create search subparsers default_search_parser = argparse.ArgumentParser(add_help=False) default_search_parser.add_argument( "-f", "--search-in-source-files", action="store_true", help="Look into source vcf files to speed up search queries in " "large address books. Beware that this option could lead " "to incomplete results.") default_search_parser.add_argument( "-e", "--strict-search", action="store_true", help="narrow contact search to name field") default_search_parser.add_argument( "-u", "--uid", default="", help="select contact by uid") default_search_parser.add_argument( "search_terms", nargs="*", metavar="search terms", help="search in all fields to find matching contact") merge_search_parser = argparse.ArgumentParser(add_help=False) merge_search_parser.add_argument( "-f", "--search-in-source-files", action="store_true", help="Look into source vcf files to speed up search queries in " "large address books. Beware that this option could lead " "to incomplete results.") merge_search_parser.add_argument( "-e", "--strict-search", action="store_true", help="narrow contact search to name fields") merge_search_parser.add_argument( "-t", "--target-contact", "--target", default="", help="search in all fields to find matching target contact") merge_search_parser.add_argument( "-u", "--uid", default="", help="select source contact by uid") merge_search_parser.add_argument( "-U", "--target-uid", default="", help="select target contact by uid") merge_search_parser.add_argument( "source_search_terms", nargs="*", metavar="source", help="search in all fields to find matching source contact") # create subparsers for actions subparsers = parser.add_subparsers(dest="action") list_parser = subparsers.add_parser( "list", aliases=Actions.get_aliases("list"), parents=[default_addressbook_parser, default_search_parser, sort_parser], description="list all (selected) contacts", help="list all (selected) contacts") list_parser.add_argument( "-p", "--parsable", action="store_true", help="Machine readable format: uid\\tcontact_name\\taddress_book_name") subparsers.add_parser( "details", aliases=Actions.get_aliases("details"), parents=[default_addressbook_parser, default_search_parser, sort_parser], description="display detailed information about one contact", help="display detailed information about one contact") export_parser = subparsers.add_parser( "export", aliases=Actions.get_aliases("export"), parents=[default_addressbook_parser, default_search_parser, sort_parser], description="export a contact to the custom yaml format that is " "also used for editing and creating contacts", help="export a contact to the custom yaml format that is also " "used for editing and creating contacts") export_parser.add_argument( "--empty-contact-template", action="store_true", help="Export an empty contact template") export_parser.add_argument( "-o", "--output-file", default=sys.stdout, type=argparse.FileType("w"), help="Specify output template file name or use stdout by default") birthdays_parser = subparsers.add_parser( "birthdays", aliases=Actions.get_aliases("birthdays"), parents=[default_addressbook_parser, default_search_parser], description="list birthdays (sorted by month and day)", help="list birthdays (sorted by month and day)") birthdays_parser.add_argument( "-d", "--display", choices=("first_name", "last_name"), help="Display names in birthdays table by first or last name") birthdays_parser.add_argument( "-p", "--parsable", action="store_true", help="Machine readable format: name\\tdate") email_parser = subparsers.add_parser( "email", aliases=Actions.get_aliases("email"), parents=[default_addressbook_parser, default_search_parser, sort_parser], description="list email addresses", help="list email addresses") email_parser.add_argument( "-p", "--parsable", action="store_true", help="Machine readable format: address\\tname\\ttype") email_parser.add_argument( "--remove-first-line", action="store_true", help="remove \"searching for '' ...\" line from parsable output " "(that line is required by mutt)") phone_parser = subparsers.add_parser( "phone", aliases=Actions.get_aliases("phone"), parents=[default_addressbook_parser, default_search_parser, sort_parser], description="list phone numbers", help="list phone numbers") phone_parser.add_argument( "-p", "--parsable", action="store_true", help="Machine readable format: number\\tname\\ttype") post_address_parser = subparsers.add_parser( "postaddress", aliases=Actions.get_aliases("postaddress"), parents=[default_addressbook_parser, default_search_parser, sort_parser], description="list postal addresses", help="list postal addresses") post_address_parser.add_argument( "-p", "--parsable", action="store_true", help="Machine readable format: address\\tname\\ttype") subparsers.add_parser( "source", aliases=Actions.get_aliases("source"), parents=[default_addressbook_parser, default_search_parser, sort_parser], description="edit the vcard file of a contact directly", help="edit the vcard file of a contact directly") new_parser = subparsers.add_parser( "new", aliases=Actions.get_aliases("new"), parents=[new_addressbook_parser, template_input_file_parser], description="create a new contact", help="create a new contact") new_parser.add_argument( "--vcard-version", choices=("3.0", "4.0"), help="Select preferred vcard version for new contact") add_email_parser = subparsers.add_parser( "add-email", aliases=Actions.get_aliases("add-email"), parents=[default_addressbook_parser, email_header_input_file_parser, default_search_parser, sort_parser], description="Extract email address from the \"From:\" field of an " "email header and add to an existing contact or create a new one", help="Extract email address from the \"From:\" field of an email " "header and add to an existing contact or create a new one") add_email_parser.add_argument( "--vcard-version", choices=("3.0", "4.0"), help="Select preferred vcard version for new contact") subparsers.add_parser( "merge", aliases=Actions.get_aliases("merge"), parents=[merge_addressbook_parser, merge_search_parser, sort_parser], description="merge two contacts", help="merge two contacts") subparsers.add_parser( "modify", aliases=Actions.get_aliases("modify"), parents=[default_addressbook_parser, template_input_file_parser, default_search_parser, sort_parser], description="edit the data of a contact", help="edit the data of a contact") subparsers.add_parser( "copy", aliases=Actions.get_aliases("copy"), parents=[copy_move_addressbook_parser, default_search_parser, sort_parser], description="copy a contact to a different addressbook", help="copy a contact to a different addressbook") subparsers.add_parser( "move", aliases=Actions.get_aliases("move"), parents=[copy_move_addressbook_parser, default_search_parser, sort_parser], description="move a contact to a different addressbook", help="move a contact to a different addressbook") remove_parser = subparsers.add_parser( "remove", aliases=Actions.get_aliases("remove"), parents=[default_addressbook_parser, default_search_parser, sort_parser], description="remove a contact", help="remove a contact") remove_parser.add_argument( "--force", action="store_true", help="Remove contact without confirmation") subparsers.add_parser( "addressbooks", aliases=Actions.get_aliases("addressbooks"), description="list addressbooks", help="list addressbooks") subparsers.add_parser( "filename", aliases=Actions.get_aliases("filename"), parents=[default_addressbook_parser, default_search_parser, sort_parser], description="list filenames of all matching contacts", help="list filenames of all matching contacts") # Replace the print_help method of the first parser with the print_help # method of the main parser. This makes it possible to have the first # parser handle the help option so that command line help can be printed # without parsing the config file first (which is a problem if there are # errors in the config file). The config file will still be parsed before # the full command line is parsed so errors in the config file might be # reported before command line syntax errors. first_parser.print_help = parser.print_help # Parese the command line with the first argument parser. It will handle # the config option (its main job) and also the help, version and debug # options as these do not depend on anything else. args = first_parser.parse_args(argv) remainder = args.remainder # Set the loglevel to debug if given on the command line. This is done # before parsing the config file to make it possible to debug the parsing # of the config file. if "debug" in args and args.debug: logging.basicConfig(level=logging.DEBUG) # Create the global config instance. global config config = Config(args.config) # Check the log level again and merge the value from the command line with # the config file. if ("debug" in args and args.debug) or config.debug: logging.basicConfig(level=logging.DEBUG) logging.debug("first args=%s", args) logging.debug("remainder=%s", remainder) # Set the default command from the config file if none was given on the # command line. if not remainder or remainder[0] not in Actions.get_all(): remainder.insert(0, config.default_action) logging.debug("updated remainder=%s", remainder) # Save the last option that needs to be carried from the first parser run # to the second. skip = args.skip_unparsable # Parse the remainder of the command line. All options from the previous # run have already been processed and are not needed any more. args = parser.parse_args(remainder) # Restore settings that are left from the first parser run. args.skip_unparsable = skip logging.debug("second args=%s", args) # An integrity check for some options. if "uid" in args and args.uid and ( ("search_terms" in args and args.search_terms) or ("source_search_terms" in args and args.source_search_terms)): # If an uid was given we require that no search terms where given. parser.error("You can not give arbitrary search terms and --uid at the" " same time.") return args
Find the name of the action for the supplied alias. If no action is asociated with the given alias None is returned.
def get_action(cls, alias): """Find the name of the action for the supplied alias. If no action is asociated with the given alias, None is returned. :param alias: the alias to look up :type alias: str :rturns: the name of the corresponding action or None :rtype: str or NoneType """ for action, alias_list in cls.action_map.items(): if alias in alias_list: return action return None
Convert the named field to bool.
def _convert_boolean_config_value(config, name, default=True): """Convert the named field to bool. The current value should be one of the strings "yes" or "no". It will be replaced with its boolean counterpart. If the field is not present in the config object, the default value is used. :param config: the config section where to set the option :type config: configobj.ConfigObj :param name: the name of the option to convert :type name: str :param default: the default value to use if the option was not previously set :type default: bool :returns: None """ if name not in config: config[name] = default elif config[name] == "yes": config[name] = True elif config[name] == "no": config[name] = False else: raise ValueError("Error in config file\nInvalid value for %s " "parameter\nPossible values: yes, no" % name)
Use this to create a new and empty contact.
def new_contact(cls, address_book, supported_private_objects, version, localize_dates): """Use this to create a new and empty contact.""" return cls(address_book, None, supported_private_objects, version, localize_dates)
Use this if you want to create a new contact from an existing. vcf file.
def from_file(cls, address_book, filename, supported_private_objects, localize_dates): """ Use this if you want to create a new contact from an existing .vcf file. """ return cls(address_book, filename, supported_private_objects, None, localize_dates)
Use this if you want to create a new contact from user input.
def from_user_input(cls, address_book, user_input, supported_private_objects, version, localize_dates): """Use this if you want to create a new contact from user input.""" contact = cls(address_book, None, supported_private_objects, version, localize_dates) contact._process_user_input(user_input) return contact
Use this if you want to clone an existing contact and replace its data with new user input in one step.
def from_existing_contact_with_new_user_input(cls, contact, user_input, localize_dates): """ Use this if you want to clone an existing contact and replace its data with new user input in one step. """ contact = cls(contact.address_book, contact.filename, contact.supported_private_objects, None, localize_dates) contact._process_user_input(user_input) return contact
Get some part of the N entry in the vCard as a list
def _get_names_part(self, part): """Get some part of the "N" entry in the vCard as a list :param part: the name to get e.g. "prefix" or "given" :type part: str :returns: a list of entries for this name part :rtype: list(str) """ try: the_list = getattr(self.vcard.n.value, part) except AttributeError: return [] else: # check if list only contains empty strings if not ''.join(the_list): return [] return the_list if isinstance(the_list, list) else [the_list]
: rtype: str
def get_first_name_last_name(self): """ :rtype: str """ names = [] if self._get_first_names(): names += self._get_first_names() if self._get_additional_names(): names += self._get_additional_names() if self._get_last_names(): names += self._get_last_names() if names: return helpers.list_to_string(names, " ") else: return self.get_full_name()
: rtype: str
def get_last_name_first_name(self): """ :rtype: str """ last_names = [] if self._get_last_names(): last_names += self._get_last_names() first_and_additional_names = [] if self._get_first_names(): first_and_additional_names += self._get_first_names() if self._get_additional_names(): first_and_additional_names += self._get_additional_names() if last_names and first_and_additional_names: return "{}, {}".format( helpers.list_to_string(last_names, " "), helpers.list_to_string(first_and_additional_names, " ")) elif last_names: return helpers.list_to_string(last_names, " ") elif first_and_additional_names: return helpers.list_to_string(first_and_additional_names, " ") else: return self.get_full_name()
: returns: list of organisations sorted alphabetically: rtype: list ( list ( str ))
def _get_organisations(self): """ :returns: list of organisations, sorted alphabetically :rtype: list(list(str)) """ organisations = [] for child in self.vcard.getChildren(): if child.name == "ORG": organisations.append(child.value) return sorted(organisations)
: rtype: list ( list ( str ))
def _get_titles(self): """ :rtype: list(list(str)) """ titles = [] for child in self.vcard.getChildren(): if child.name == "TITLE": titles.append(child.value) return sorted(titles)
: rtype: list ( list ( str ))
def _get_roles(self): """ :rtype: list(list(str)) """ roles = [] for child in self.vcard.getChildren(): if child.name == "ROLE": roles.append(child.value) return sorted(roles)
: returns: dict of type and phone number list: rtype: dict ( str list ( str ))
def get_phone_numbers(self): """ : returns: dict of type and phone number list :rtype: dict(str, list(str)) """ phone_dict = {} for child in self.vcard.getChildren(): if child.name == "TEL": # phone types type = helpers.list_to_string( self._get_types_for_vcard_object(child, "voice"), ", ") if type not in phone_dict: phone_dict[type] = [] # phone value # # vcard version 4.0 allows URI scheme "tel" in phone attribute value # Doc: https://tools.ietf.org/html/rfc6350#section-6.4.1 # example: TEL;VALUE=uri;PREF=1;TYPE="voice,home":tel:+1-555-555-5555;ext=5555 if child.value.lower().startswith("tel:"): # cut off the "tel:" uri prefix phone_dict[type].append(child.value[4:]) else: # free text field phone_dict[type].append(child.value) # sort phone number lists for number_list in phone_dict.values(): number_list.sort() return phone_dict
: returns: dict of type and email address list: rtype: dict ( str list ( str ))
def get_email_addresses(self): """ : returns: dict of type and email address list :rtype: dict(str, list(str)) """ email_dict = {} for child in self.vcard.getChildren(): if child.name == "EMAIL": type = helpers.list_to_string( self._get_types_for_vcard_object(child, "internet"), ", ") if type not in email_dict: email_dict[type] = [] email_dict[type].append(child.value) # sort email address lists for email_list in email_dict.values(): email_list.sort() return email_dict
: returns: dict of type and post address list: rtype: dict ( str list ( dict ( str list|str )))
def get_post_addresses(self): """ : returns: dict of type and post address list :rtype: dict(str, list(dict(str,list|str))) """ post_adr_dict = {} for child in self.vcard.getChildren(): if child.name == "ADR": type = helpers.list_to_string( self._get_types_for_vcard_object(child, "home"), ", ") if type not in post_adr_dict: post_adr_dict[type] = [] post_adr_dict[type].append( { "box": child.value.box, "extended": child.value.extended, "street": child.value.street, "code": child.value.code, "city": child.value.city, "region": child.value.region, "country": child.value.country }) # sort post address lists for post_adr_list in post_adr_dict.values(): post_adr_list.sort(key=lambda x: ( helpers.list_to_string(x['city'], " ").lower(), helpers.list_to_string(x['street'], " ").lower())) return post_adr_dict
: rtype: list ( str ) or list ( list ( str ))
def _get_categories(self): """ :rtype: list(str) or list(list(str)) """ category_list = [] for child in self.vcard.getChildren(): if child.name == "CATEGORIES": value = child.value category_list.append( value if isinstance(value, list) else [value]) if len(category_list) == 1: return category_list[0] return sorted(category_list)
categories variable must be a list
def _add_category(self, categories): """ categories variable must be a list """ categories_obj = self.vcard.add('categories') categories_obj.value = helpers.convert_to_vcard( "category", categories, ObjectType.list_with_strings)
: rtype: list ( list ( str ))
def get_nicknames(self): """ :rtype: list(list(str)) """ nicknames = [] for child in self.vcard.getChildren(): if child.name == "NICKNAME": nicknames.append(child.value) return sorted(nicknames)
: rtype: list ( list ( str ))
def _get_notes(self): """ :rtype: list(list(str)) """ notes = [] for child in self.vcard.getChildren(): if child.name == "NOTE": notes.append(child.value) return sorted(notes)
: rtype: dict ( str list ( str ))
def _get_private_objects(self): """ :rtype: dict(str, list(str)) """ private_objects = {} for child in self.vcard.getChildren(): if child.name.lower().startswith("x-"): try: key_index = [ x.lower() for x in self.supported_private_objects ].index(child.name[2:].lower()) except ValueError: pass else: key = self.supported_private_objects[key_index] if key not in private_objects: private_objects[key] = [] private_objects[key].append(child.value) # sort private object lists for value in private_objects.values(): value.sort() return private_objects