|
|
|
|
|
import re |
|
|
|
|
|
from setuptools import setup, find_packages |
|
|
|
|
|
from codecs import open |
|
|
from os import path |
|
|
|
|
|
here = path.abspath(path.dirname(__file__)) |
|
|
|
|
|
|
|
|
version_file_contents = open(path.join(here, 'stanza/_version.py'), encoding='utf-8').read() |
|
|
VERSION = re.compile('__version__ = \"(.*)\"').search(version_file_contents).group(1) |
|
|
|
|
|
|
|
|
with open(path.join(here, 'README.md'), encoding='utf-8') as f: |
|
|
long_description = f.read() |
|
|
|
|
|
setup( |
|
|
name='stanza', |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
version=VERSION, |
|
|
|
|
|
description='A Python NLP Library for Many Human Languages, by the Stanford NLP Group', |
|
|
long_description=long_description, |
|
|
long_description_content_type="text/markdown", |
|
|
|
|
|
url='https://github.com/stanfordnlp/stanza', |
|
|
|
|
|
|
|
|
author='Stanford Natural Language Processing Group', |
|
|
author_email='jebolton@stanford.edu', |
|
|
|
|
|
|
|
|
license='Apache License 2.0', |
|
|
|
|
|
|
|
|
classifiers=[ |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
'Development Status :: 4 - Beta', |
|
|
|
|
|
|
|
|
'Intended Audience :: Developers', |
|
|
'Intended Audience :: Education', |
|
|
'Intended Audience :: Science/Research', |
|
|
'Intended Audience :: Information Technology', |
|
|
'Topic :: Scientific/Engineering', |
|
|
'Topic :: Scientific/Engineering :: Artificial Intelligence', |
|
|
'Topic :: Scientific/Engineering :: Information Analysis', |
|
|
'Topic :: Text Processing', |
|
|
'Topic :: Text Processing :: Linguistic', |
|
|
'Topic :: Software Development', |
|
|
'Topic :: Software Development :: Libraries', |
|
|
|
|
|
|
|
|
|
|
|
'Programming Language :: Python :: 3.8', |
|
|
'Programming Language :: Python :: 3.9', |
|
|
'Programming Language :: Python :: 3.10', |
|
|
'Programming Language :: Python :: 3.11', |
|
|
'Programming Language :: Python :: 3.12', |
|
|
], |
|
|
|
|
|
|
|
|
keywords='natural-language-processing nlp natural-language-understanding stanford-nlp deep-learning', |
|
|
|
|
|
|
|
|
|
|
|
packages=find_packages(exclude=['data', 'docs', 'extern_data', 'figures', 'saved_models']), |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
install_requires=[ |
|
|
'emoji', |
|
|
'numpy', |
|
|
'protobuf>=3.15.0', |
|
|
'requests', |
|
|
'networkx', |
|
|
'tomli;python_version<"3.11"', |
|
|
'torch>=1.3.0', |
|
|
'tqdm', |
|
|
], |
|
|
|
|
|
|
|
|
python_requires='>=3.8', |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
extras_require={ |
|
|
'dev': [ |
|
|
'check-manifest', |
|
|
], |
|
|
'test': [ |
|
|
'coverage', |
|
|
'pytest', |
|
|
], |
|
|
'transformers': [ |
|
|
'transformers>=3.0.0', |
|
|
'peft>=0.6.1', |
|
|
], |
|
|
'datasets': [ |
|
|
'datasets', |
|
|
], |
|
|
'tokenizers': [ |
|
|
'jieba', |
|
|
'pythainlp', |
|
|
'python-crfsuite', |
|
|
'spacy', |
|
|
'sudachidict_core', |
|
|
'sudachipy', |
|
|
], |
|
|
'visualization': [ |
|
|
'spacy', |
|
|
'streamlit', |
|
|
'ipython', |
|
|
], |
|
|
}, |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
package_data={ |
|
|
"": ["*.pl"], |
|
|
}, |
|
|
|
|
|
include_package_data=True, |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data_files=[], |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
entry_points={ |
|
|
}, |
|
|
) |
|
|
|