summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCoprDistGit <infra@openeuler.org>2023-05-31 05:00:18 +0000
committerCoprDistGit <infra@openeuler.org>2023-05-31 05:00:18 +0000
commitef723a4fa9f0e76ce653262735927a0f098bd79b (patch)
tree7a38a9632ad96ffbb2197951502a3958310b52ba
parent56801a9b7282929f9c194e079fd4671523327d05 (diff)
automatic import of python-vistec-ser
-rw-r--r--.gitignore1
-rw-r--r--python-vistec-ser.spec339
-rw-r--r--sources1
3 files changed, 341 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
index e69de29..cc1575f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -0,0 +1 @@
+/vistec-ser-0.4.6a3.tar.gz
diff --git a/python-vistec-ser.spec b/python-vistec-ser.spec
new file mode 100644
index 0000000..33e79a3
--- /dev/null
+++ b/python-vistec-ser.spec
@@ -0,0 +1,339 @@
+%global _empty_manifest_terminate_build 0
+Name: python-vistec-ser
+Version: 0.4.6a3
+Release: 1
+Summary: Speech Emotion Recognition models and training using PyTorch
+License: Apache Software License
+URL: https://github.com/tann9949/vistec-ser
+Source0: https://mirrors.nju.edu.cn/pypi/web/packages/b6/a9/598ed44abc5b587dd80e812d47e58f058683960470544deb391a48cdfa7e/vistec-ser-0.4.6a3.tar.gz
+BuildArch: noarch
+
+Requires: python3-chardet
+Requires: python3-torch
+Requires: python3-torchvision
+Requires: python3-torchtext
+Requires: python3-torchaudio
+Requires: python3-pytorch-lightning
+Requires: python3-pandas
+Requires: python3-numpy
+Requires: python3-soundfile
+Requires: python3-PyYAML
+Requires: python3-wget
+Requires: python3-fastapi
+Requires: python3-aiofiles
+Requires: python3-multipart
+Requires: python3-uvicorn
+
+%description
+# Vistec-AIS Speech Emotion Recognition
+![python-badge](https://img.shields.io/badge/python-%3E%3D3.6-blue?logo=python)
+![pytorch-badge](https://img.shields.io/badge/pytorch-%3E%3D1.8.0-red?logo=pytorch)
+![license]( https://img.shields.io/github/license/vistec-AI/vistec-ser)
+
+[comment]: <> (![Upload Python Package]&#40;https://github.com/tann9949/vistec-ser/workflows/Upload%20Python%20Package/badge.svg&#41;)
+
+[comment]: <> (![Training]&#40;https://github.com/tann9949/vistec-ser/workflows/Training/badge.svg&#41;)
+
+![Code Grade](https://www.code-inspector.com/project/17426/status/svg)
+![Code Quality Score](https://www.code-inspector.com/project/17426/score/svg)
+
+Speech Emotion Recognition Model and Inferencing using Pytorch
+
+## Installation
+### From Pypi
+```shell
+pip install vistec-ser
+```
+
+### From source
+```shell
+git clone https://github.com/tann9949/vistec-ser.git
+cd vistec-ser
+python setup.py install
+```
+
+## Usage
+### Training with THAI SER Dataset
+We provide Google Colaboratory example for training the [THAI SER dataset](https://github.com/vistec-AI/dataset-releases/releases/tag/v1) using our repository.
+
+[![colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1kF5xBYe7d48JRaz3KfIK65A4N5dZMqWQ?usp=sharing)
+
+### Training using provided scripts
+Note that currently, this workflow only supports pre-loaded features. So it might comsume an additional overhead of ~2 Gb or RAM. To
+run the experiment. Run the following command
+
+Since there are 80 studios recording and 20 zoom recording. We split the dataset into 10-fold, 10 studios each. Then evaluate using
+k-fold cross validation method. We provide 2 k-fold experiments: including and excluding zoom recording. This can be configured
+in config file (see `examples/aisser.yaml`)
+
+```shell
+python examples/train_fold_aisser.py --config-path <path-to-config> --n-iter <number-of-iterations>
+```
+
+### Inferencing
+We also implement a FastAPI backend server as an example of deploying a SER model. To run the server, run
+```shell
+cd examples
+uvicorn server:app --reload
+```
+You can customize the server by modifying `example/thaiser.yaml` in `inference` field.
+
+Once the server spawn, you can do HTTP POST request in `form-data` format. and JSON will return as the following format:
+```json
+[
+ {
+ "name": <request-file-name>,
+ "prob": {
+ "neutral": <p(neu)>,
+ "anger": <p(ang)>,
+ "happiness": <p(hap)>,
+ "sadness": <p(sad)>
+ }
+ }, ...
+]
+```
+See an example below:
+
+![server-demo](figures/server.gif)
+
+## Author & Sponsor
+<a href="https://airesearch.in.th/" style="margin-right:50px">
+<img src="https://airesearch.in.th/assets/img/logo/airesearch-logo.svg" alt="airesearch" width="200"/>
+</a>
+<a href="https://www.ais.co.th/">
+<img src="https://upload.wikimedia.org/wikipedia/en/thumb/3/3b/Advanced_Info_Service_logo.svg/1200px-Advanced_Info_Service_logo.svg.png" alt="ais" width="200"/>
+</a>
+
+Chompakorn Chaksangchaichot
+
+Email: [chompakornc_pro@vistec.ac.th](`chompakornc_pro@vistec.ac.th)
+
+
+
+
+%package -n python3-vistec-ser
+Summary: Speech Emotion Recognition models and training using PyTorch
+Provides: python-vistec-ser
+BuildRequires: python3-devel
+BuildRequires: python3-setuptools
+BuildRequires: python3-pip
+%description -n python3-vistec-ser
+# Vistec-AIS Speech Emotion Recognition
+![python-badge](https://img.shields.io/badge/python-%3E%3D3.6-blue?logo=python)
+![pytorch-badge](https://img.shields.io/badge/pytorch-%3E%3D1.8.0-red?logo=pytorch)
+![license]( https://img.shields.io/github/license/vistec-AI/vistec-ser)
+
+[comment]: <> (![Upload Python Package]&#40;https://github.com/tann9949/vistec-ser/workflows/Upload%20Python%20Package/badge.svg&#41;)
+
+[comment]: <> (![Training]&#40;https://github.com/tann9949/vistec-ser/workflows/Training/badge.svg&#41;)
+
+![Code Grade](https://www.code-inspector.com/project/17426/status/svg)
+![Code Quality Score](https://www.code-inspector.com/project/17426/score/svg)
+
+Speech Emotion Recognition Model and Inferencing using Pytorch
+
+## Installation
+### From Pypi
+```shell
+pip install vistec-ser
+```
+
+### From source
+```shell
+git clone https://github.com/tann9949/vistec-ser.git
+cd vistec-ser
+python setup.py install
+```
+
+## Usage
+### Training with THAI SER Dataset
+We provide Google Colaboratory example for training the [THAI SER dataset](https://github.com/vistec-AI/dataset-releases/releases/tag/v1) using our repository.
+
+[![colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1kF5xBYe7d48JRaz3KfIK65A4N5dZMqWQ?usp=sharing)
+
+### Training using provided scripts
+Note that currently, this workflow only supports pre-loaded features. So it might comsume an additional overhead of ~2 Gb or RAM. To
+run the experiment. Run the following command
+
+Since there are 80 studios recording and 20 zoom recording. We split the dataset into 10-fold, 10 studios each. Then evaluate using
+k-fold cross validation method. We provide 2 k-fold experiments: including and excluding zoom recording. This can be configured
+in config file (see `examples/aisser.yaml`)
+
+```shell
+python examples/train_fold_aisser.py --config-path <path-to-config> --n-iter <number-of-iterations>
+```
+
+### Inferencing
+We also implement a FastAPI backend server as an example of deploying a SER model. To run the server, run
+```shell
+cd examples
+uvicorn server:app --reload
+```
+You can customize the server by modifying `example/thaiser.yaml` in `inference` field.
+
+Once the server spawn, you can do HTTP POST request in `form-data` format. and JSON will return as the following format:
+```json
+[
+ {
+ "name": <request-file-name>,
+ "prob": {
+ "neutral": <p(neu)>,
+ "anger": <p(ang)>,
+ "happiness": <p(hap)>,
+ "sadness": <p(sad)>
+ }
+ }, ...
+]
+```
+See an example below:
+
+![server-demo](figures/server.gif)
+
+## Author & Sponsor
+<a href="https://airesearch.in.th/" style="margin-right:50px">
+<img src="https://airesearch.in.th/assets/img/logo/airesearch-logo.svg" alt="airesearch" width="200"/>
+</a>
+<a href="https://www.ais.co.th/">
+<img src="https://upload.wikimedia.org/wikipedia/en/thumb/3/3b/Advanced_Info_Service_logo.svg/1200px-Advanced_Info_Service_logo.svg.png" alt="ais" width="200"/>
+</a>
+
+Chompakorn Chaksangchaichot
+
+Email: [chompakornc_pro@vistec.ac.th](`chompakornc_pro@vistec.ac.th)
+
+
+
+
+%package help
+Summary: Development documents and examples for vistec-ser
+Provides: python3-vistec-ser-doc
+%description help
+# Vistec-AIS Speech Emotion Recognition
+![python-badge](https://img.shields.io/badge/python-%3E%3D3.6-blue?logo=python)
+![pytorch-badge](https://img.shields.io/badge/pytorch-%3E%3D1.8.0-red?logo=pytorch)
+![license]( https://img.shields.io/github/license/vistec-AI/vistec-ser)
+
+[comment]: <> (![Upload Python Package]&#40;https://github.com/tann9949/vistec-ser/workflows/Upload%20Python%20Package/badge.svg&#41;)
+
+[comment]: <> (![Training]&#40;https://github.com/tann9949/vistec-ser/workflows/Training/badge.svg&#41;)
+
+![Code Grade](https://www.code-inspector.com/project/17426/status/svg)
+![Code Quality Score](https://www.code-inspector.com/project/17426/score/svg)
+
+Speech Emotion Recognition Model and Inferencing using Pytorch
+
+## Installation
+### From Pypi
+```shell
+pip install vistec-ser
+```
+
+### From source
+```shell
+git clone https://github.com/tann9949/vistec-ser.git
+cd vistec-ser
+python setup.py install
+```
+
+## Usage
+### Training with THAI SER Dataset
+We provide Google Colaboratory example for training the [THAI SER dataset](https://github.com/vistec-AI/dataset-releases/releases/tag/v1) using our repository.
+
+[![colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1kF5xBYe7d48JRaz3KfIK65A4N5dZMqWQ?usp=sharing)
+
+### Training using provided scripts
+Note that currently, this workflow only supports pre-loaded features. So it might comsume an additional overhead of ~2 Gb or RAM. To
+run the experiment. Run the following command
+
+Since there are 80 studios recording and 20 zoom recording. We split the dataset into 10-fold, 10 studios each. Then evaluate using
+k-fold cross validation method. We provide 2 k-fold experiments: including and excluding zoom recording. This can be configured
+in config file (see `examples/aisser.yaml`)
+
+```shell
+python examples/train_fold_aisser.py --config-path <path-to-config> --n-iter <number-of-iterations>
+```
+
+### Inferencing
+We also implement a FastAPI backend server as an example of deploying a SER model. To run the server, run
+```shell
+cd examples
+uvicorn server:app --reload
+```
+You can customize the server by modifying `example/thaiser.yaml` in `inference` field.
+
+Once the server spawn, you can do HTTP POST request in `form-data` format. and JSON will return as the following format:
+```json
+[
+ {
+ "name": <request-file-name>,
+ "prob": {
+ "neutral": <p(neu)>,
+ "anger": <p(ang)>,
+ "happiness": <p(hap)>,
+ "sadness": <p(sad)>
+ }
+ }, ...
+]
+```
+See an example below:
+
+![server-demo](figures/server.gif)
+
+## Author & Sponsor
+<a href="https://airesearch.in.th/" style="margin-right:50px">
+<img src="https://airesearch.in.th/assets/img/logo/airesearch-logo.svg" alt="airesearch" width="200"/>
+</a>
+<a href="https://www.ais.co.th/">
+<img src="https://upload.wikimedia.org/wikipedia/en/thumb/3/3b/Advanced_Info_Service_logo.svg/1200px-Advanced_Info_Service_logo.svg.png" alt="ais" width="200"/>
+</a>
+
+Chompakorn Chaksangchaichot
+
+Email: [chompakornc_pro@vistec.ac.th](`chompakornc_pro@vistec.ac.th)
+
+
+
+
+%prep
+%autosetup -n vistec-ser-0.4.6a3
+
+%build
+%py3_build
+
+%install
+%py3_install
+install -d -m755 %{buildroot}/%{_pkgdocdir}
+if [ -d doc ]; then cp -arf doc %{buildroot}/%{_pkgdocdir}; fi
+if [ -d docs ]; then cp -arf docs %{buildroot}/%{_pkgdocdir}; fi
+if [ -d example ]; then cp -arf example %{buildroot}/%{_pkgdocdir}; fi
+if [ -d examples ]; then cp -arf examples %{buildroot}/%{_pkgdocdir}; fi
+pushd %{buildroot}
+if [ -d usr/lib ]; then
+ find usr/lib -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/lib64 ]; then
+ find usr/lib64 -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/bin ]; then
+ find usr/bin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/sbin ]; then
+ find usr/sbin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+touch doclist.lst
+if [ -d usr/share/man ]; then
+ find usr/share/man -type f -printf "/%h/%f.gz\n" >> doclist.lst
+fi
+popd
+mv %{buildroot}/filelist.lst .
+mv %{buildroot}/doclist.lst .
+
+%files -n python3-vistec-ser -f filelist.lst
+%dir %{python3_sitelib}/*
+
+%files help -f doclist.lst
+%{_docdir}/*
+
+%changelog
+* Wed May 31 2023 Python_Bot <Python_Bot@openeuler.org> - 0.4.6a3-1
+- Package Spec generated
diff --git a/sources b/sources
new file mode 100644
index 0000000..adb0da9
--- /dev/null
+++ b/sources
@@ -0,0 +1 @@
+4fbf2e447124c948c2ea5905f32f1ff9 vistec-ser-0.4.6a3.tar.gz