summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCoprDistGit <infra@openeuler.org>2023-06-20 09:56:44 +0000
committerCoprDistGit <infra@openeuler.org>2023-06-20 09:56:44 +0000
commit277feab6a7b8dcd90d6ce2466c250902ee58e495 (patch)
tree63758779fed98aec113820776032f0c6c6f44456
parent585ac5778d8b3b020153d448ac7b86aa3374cf23 (diff)
automatic import of python-Anuvaad-Tokenizeropeneuler20.03
-rw-r--r--.gitignore1
-rw-r--r--python-anuvaad-tokenizer.spec244
-rw-r--r--sources1
3 files changed, 246 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
index e69de29..f8baff3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -0,0 +1 @@
+/Anuvaad_Tokenizer-0.0.3.tar.gz
diff --git a/python-anuvaad-tokenizer.spec b/python-anuvaad-tokenizer.spec
new file mode 100644
index 0000000..1c7e179
--- /dev/null
+++ b/python-anuvaad-tokenizer.spec
@@ -0,0 +1,244 @@
+%global _empty_manifest_terminate_build 0
+Name: python-Anuvaad-Tokenizer
+Version: 0.0.3
+Release: 1
+Summary: Tokenizer by Anuvaad
+License: MIT
+URL: https://pypi.org/project/Anuvaad-Tokenizer/
+Source0: https://mirrors.aliyun.com/pypi/web/packages/af/03/daee4d302126faaa71f0dc0c058166441d4ddf3472db35bbd6757ff92c55/Anuvaad_Tokenizer-0.0.3.tar.gz
+BuildArch: noarch
+
+Requires: python3-nltk
+
+%description
+# Anuvaad Tokenizer
+
+Anuvaad Tokenizer is a python package, which can be used to tokenize paragraphs into sentences. It supports most of the Indian languages including English. This Tokenizer is built using regular expressions.
+
+## Prerequisites
+
+- python >= 3.6
+
+## Installation
+``` pip install Anuvaad_Tokenizer==0.0.3 ```
+
+## Author
+
+Anuvaad (nlp-nmt@tarento.com)
+
+# Usage Example
+
+## For English
+```
+from Anuvaad_Tokenizer.AnuvaadEnTokenizer import AnuvaadEnTokenizer
+
+para=" "
+tokenized_text = AnuvaadEnTokenizer().tokenize(para)
+```
+## For Hindi
+```
+from Anuvaad_Tokenizer.AnuvaadHiTokenizer import AnuvaadHiTokenizer
+
+para=" "
+tokenized_text = AnuvaadHiTokenizer().tokenize(para)
+```
+## For Kannada
+```
+from Anuvaad_Tokenizer.AnuvaadKnTokenizer import AnuvaadKnTokenizer
+
+para=" "
+tokenized_text = AnuvaadKnTokenizer().tokenize(para)
+```
+## For Telugu
+```
+from Anuvaad_Tokenizer.AnuvaadTeTokenizer import AnuvaadTeTokenizer
+
+para=" "
+tokenized_text = AnuvaadTeTokenizer().tokenize(para)
+```
+## For Tamil
+```
+from Anuvaad_Tokenizer.AnuvaadTaTokenizer import AnuvaadTaTokenizer
+
+para=" "
+tokenized_text = AnuvaadTaTokenizer().tokenize(para)
+```
+## LICENSE
+
+MIT License 2021
+Developer - Anuvaad
+
+
+
+%package -n python3-Anuvaad-Tokenizer
+Summary: Tokenizer by Anuvaad
+Provides: python-Anuvaad-Tokenizer
+BuildRequires: python3-devel
+BuildRequires: python3-setuptools
+BuildRequires: python3-pip
+%description -n python3-Anuvaad-Tokenizer
+# Anuvaad Tokenizer
+
+Anuvaad Tokenizer is a python package, which can be used to tokenize paragraphs into sentences. It supports most of the Indian languages including English. This Tokenizer is built using regular expressions.
+
+## Prerequisites
+
+- python >= 3.6
+
+## Installation
+``` pip install Anuvaad_Tokenizer==0.0.3 ```
+
+## Author
+
+Anuvaad (nlp-nmt@tarento.com)
+
+# Usage Example
+
+## For English
+```
+from Anuvaad_Tokenizer.AnuvaadEnTokenizer import AnuvaadEnTokenizer
+
+para=" "
+tokenized_text = AnuvaadEnTokenizer().tokenize(para)
+```
+## For Hindi
+```
+from Anuvaad_Tokenizer.AnuvaadHiTokenizer import AnuvaadHiTokenizer
+
+para=" "
+tokenized_text = AnuvaadHiTokenizer().tokenize(para)
+```
+## For Kannada
+```
+from Anuvaad_Tokenizer.AnuvaadKnTokenizer import AnuvaadKnTokenizer
+
+para=" "
+tokenized_text = AnuvaadKnTokenizer().tokenize(para)
+```
+## For Telugu
+```
+from Anuvaad_Tokenizer.AnuvaadTeTokenizer import AnuvaadTeTokenizer
+
+para=" "
+tokenized_text = AnuvaadTeTokenizer().tokenize(para)
+```
+## For Tamil
+```
+from Anuvaad_Tokenizer.AnuvaadTaTokenizer import AnuvaadTaTokenizer
+
+para=" "
+tokenized_text = AnuvaadTaTokenizer().tokenize(para)
+```
+## LICENSE
+
+MIT License 2021
+Developer - Anuvaad
+
+
+
+%package help
+Summary: Development documents and examples for Anuvaad-Tokenizer
+Provides: python3-Anuvaad-Tokenizer-doc
+%description help
+# Anuvaad Tokenizer
+
+Anuvaad Tokenizer is a python package, which can be used to tokenize paragraphs into sentences. It supports most of the Indian languages including English. This Tokenizer is built using regular expressions.
+
+## Prerequisites
+
+- python >= 3.6
+
+## Installation
+``` pip install Anuvaad_Tokenizer==0.0.3 ```
+
+## Author
+
+Anuvaad (nlp-nmt@tarento.com)
+
+# Usage Example
+
+## For English
+```
+from Anuvaad_Tokenizer.AnuvaadEnTokenizer import AnuvaadEnTokenizer
+
+para=" "
+tokenized_text = AnuvaadEnTokenizer().tokenize(para)
+```
+## For Hindi
+```
+from Anuvaad_Tokenizer.AnuvaadHiTokenizer import AnuvaadHiTokenizer
+
+para=" "
+tokenized_text = AnuvaadHiTokenizer().tokenize(para)
+```
+## For Kannada
+```
+from Anuvaad_Tokenizer.AnuvaadKnTokenizer import AnuvaadKnTokenizer
+
+para=" "
+tokenized_text = AnuvaadKnTokenizer().tokenize(para)
+```
+## For Telugu
+```
+from Anuvaad_Tokenizer.AnuvaadTeTokenizer import AnuvaadTeTokenizer
+
+para=" "
+tokenized_text = AnuvaadTeTokenizer().tokenize(para)
+```
+## For Tamil
+```
+from Anuvaad_Tokenizer.AnuvaadTaTokenizer import AnuvaadTaTokenizer
+
+para=" "
+tokenized_text = AnuvaadTaTokenizer().tokenize(para)
+```
+## LICENSE
+
+MIT License 2021
+Developer - Anuvaad
+
+
+
+%prep
+%autosetup -n Anuvaad_Tokenizer-0.0.3
+
+%build
+%py3_build
+
+%install
+%py3_install
+install -d -m755 %{buildroot}/%{_pkgdocdir}
+if [ -d doc ]; then cp -arf doc %{buildroot}/%{_pkgdocdir}; fi
+if [ -d docs ]; then cp -arf docs %{buildroot}/%{_pkgdocdir}; fi
+if [ -d example ]; then cp -arf example %{buildroot}/%{_pkgdocdir}; fi
+if [ -d examples ]; then cp -arf examples %{buildroot}/%{_pkgdocdir}; fi
+pushd %{buildroot}
+if [ -d usr/lib ]; then
+ find usr/lib -type f -printf "\"/%h/%f\"\n" >> filelist.lst
+fi
+if [ -d usr/lib64 ]; then
+ find usr/lib64 -type f -printf "\"/%h/%f\"\n" >> filelist.lst
+fi
+if [ -d usr/bin ]; then
+ find usr/bin -type f -printf "\"/%h/%f\"\n" >> filelist.lst
+fi
+if [ -d usr/sbin ]; then
+ find usr/sbin -type f -printf "\"/%h/%f\"\n" >> filelist.lst
+fi
+touch doclist.lst
+if [ -d usr/share/man ]; then
+ find usr/share/man -type f -printf "\"/%h/%f.gz\"\n" >> doclist.lst
+fi
+popd
+mv %{buildroot}/filelist.lst .
+mv %{buildroot}/doclist.lst .
+
+%files -n python3-Anuvaad-Tokenizer -f filelist.lst
+%dir %{python3_sitelib}/*
+
+%files help -f doclist.lst
+%{_docdir}/*
+
+%changelog
+* Tue Jun 20 2023 Python_Bot <Python_Bot@openeuler.org> - 0.0.3-1
+- Package Spec generated
diff --git a/sources b/sources
new file mode 100644
index 0000000..148ad00
--- /dev/null
+++ b/sources
@@ -0,0 +1 @@
+c1448d6a79166e83a78f0b1896c19ab5 Anuvaad_Tokenizer-0.0.3.tar.gz