forked from nltk/nltk
-
Notifications
You must be signed in to change notification settings - Fork 0
/
setup.py
102 lines (95 loc) · 3.22 KB
/
setup.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
#!/usr/bin/env python
#
# Setup script for the Natural Language Toolkit
#
# Copyright (C) 2001-2018 NLTK Project
# Author: Steven Bird <[email protected]>
# Edward Loper <[email protected]>
# Ewan Klein <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
# Work around mbcs bug in distutils.
# http://bugs.python.org/issue10945
import codecs
try:
codecs.lookup('mbcs')
except LookupError:
ascii = codecs.lookup('ascii')
func = lambda name, enc=ascii: {True: enc}.get(name=='mbcs')
codecs.register(func)
import os
# Use the VERSION file to get NLTK version
version_file = os.path.join(os.path.dirname(__file__), 'nltk', 'VERSION')
with open(version_file) as fh:
nltk_version = fh.read().strip()
# setuptools
from setuptools import setup, find_packages
# Specify groups of optional dependencies
extras_require = {
'machine_learning': [
'gensim',
'numpy',
'python-crfsuite',
'scikit-learn',
'scipy'
],
'plot': [
'matplotlib',
],
'tgrep': [
'pyparsing',
],
'twitter': [
'twython',
],
'corenlp': [
'requests',
],
}
# Add a group made up of all optional dependencies
extras_require['all'] = set(package for group in extras_require.values() for package in group)
setup(
name = "nltk",
description = "Natural Language Toolkit",
version = nltk_version,
url = "http://nltk.org/",
long_description = """\
The Natural Language Toolkit (NLTK) is a Python package for
natural language processing. NLTK requires Python 2.7, 3.4, 3.5, or 3.6.""",
license = "Apache License, Version 2.0",
keywords = ['NLP', 'CL', 'natural language processing',
'computational linguistics', 'parsing', 'tagging',
'tokenizing', 'syntax', 'linguistics', 'language',
'natural language', 'text analytics'],
maintainer = "Steven Bird",
maintainer_email = "[email protected]",
author = "Steven Bird",
author_email = "[email protected]",
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Information Technology',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Human Machine Interfaces',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Text Processing',
'Topic :: Text Processing :: Filters',
'Topic :: Text Processing :: General',
'Topic :: Text Processing :: Indexing',
'Topic :: Text Processing :: Linguistic',
],
package_data = {'nltk': ['test/*.doctest', 'VERSION']},
install_requires = ['six'],
extras_require = extras_require,
packages = find_packages(),
zip_safe=False, # since normal files will be present too?
)