From e797c68b8b21faea8a9f1dc673b244dc68840b04 Mon Sep 17 00:00:00 2001 From: CoprDistGit Date: Fri, 5 May 2023 13:46:10 +0000 Subject: automatic import of python-metriculous --- python-metriculous.spec | 537 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 537 insertions(+) create mode 100644 python-metriculous.spec (limited to 'python-metriculous.spec') diff --git a/python-metriculous.spec b/python-metriculous.spec new file mode 100644 index 0000000..3090a65 --- /dev/null +++ b/python-metriculous.spec @@ -0,0 +1,537 @@ +%global _empty_manifest_terminate_build 0 +Name: python-metriculous +Version: 0.3.0 +Release: 1 +Summary: Very unstable library containing utilities to measure and visualize statistical properties of machine learning models. +License: MIT +URL: https://github.com/metriculous-ml/metriculous +Source0: https://mirrors.nju.edu.cn/pypi/web/packages/64/5d/dae8ff38946fa2a97463b36e3b01fb908f650437c20d2f0c3ceaa6c52b94/metriculous-0.3.0.tar.gz +BuildArch: noarch + +Requires: python3-numpy +Requires: python3-scikit-learn +Requires: python3-assertpy +Requires: python3-pandas +Requires: python3-bokeh +Requires: python3-jupyter + +%description +

+ + Launch Binder + + + Current GitHub Actions build status + + + Checked with mypy + + + PyPI version + + PyPI - Python Version + License MIT + + Friends with Luminovo.AI + +

+ +# __`metriculous`__ + +Measure, visualize, and compare machine learning model performance without the usual boilerplate. +Breaking API improvements to be expected. + + +# Installation +```console +$ pip install metriculous +``` + +Or, for the latest unreleased version: +```console +$ pip install git+https://github.com/metriculous-ml/metriculous.git +``` + + +# Comparing Regression Models [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/metriculous-ml/metriculous/master?filepath=notebooks%2Fquickstart_regression.py) +
Click to see more code +

+ +```python +import numpy as np + +# Mock the ground truth, a one-dimensional array of floats +ground_truth = np.random.random(300) + +# Mock the output of a few models +perfect_model = ground_truth +noisy_model = ground_truth + 0.1 * np.random.randn(*ground_truth.shape) +random_model = np.random.randn(*ground_truth.shape) +zero_model = np.zeros_like(ground_truth) +``` +

+
+ +```python +import metriculous + +metriculous.compare_regressors( + ground_truth=ground_truth, + model_predictions=[perfect_model, noisy_model, random_model, zero_model], + model_names=["Perfect Model", "Noisy Model", "Random Model", "Zero Model"], +).save_html("comparison.html").display() +``` + +This will save an HTML file with common regression metrics and charts, and if you are working in a [Jupyter notebook](https://github.com/jupyter/notebook) will display the output right in front of you: + + +![Screenshot of Metriculous Regression Metrics](./imgs/metriculous_regression_screen_shot_table.png) +![Screenshot of Metriculous Regression Figures](./imgs/metriculous_regression_screen_shot_figures.png) + + +# Comparing Classification Models [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/metriculous-ml/metriculous/master?filepath=notebooks%2Fquickstart_classification.py) +
Click to see more code +

+ +```python +import numpy as np + + +def normalize(array2d: np.ndarray) -> np.ndarray: + return array2d / array2d.sum(axis=1, keepdims=True) + + +class_names = ["Cat", "Dog", "Pig"] +num_classes = len(class_names) +num_samples = 500 + +# Mock ground truth +ground_truth = np.random.choice(range(num_classes), size=num_samples, p=[0.5, 0.4, 0.1]) + +# Mock model predictions +perfect_model = np.eye(num_classes)[ground_truth] +noisy_model = normalize( + perfect_model + 2 * np.random.random((num_samples, num_classes)) +) +random_model = normalize(np.random.random((num_samples, num_classes))) +``` + +

+
+ +```python +import metriculous + +metriculous.compare_classifiers( + ground_truth=ground_truth, + model_predictions=[perfect_model, noisy_model, random_model], + model_names=["Perfect Model", "Noisy Model", "Random Model"], + class_names=class_names, + one_vs_all_figures=True, +).display() +``` + +![Screenshot of Metriculous Classification Table](./imgs/metriculous_classification_table.png) + +![Screenshot of Metriculous Classification Figures](./imgs/metriculous_classification_figures_1.png) + +![Screenshot of Metriculous Classification Figures](./imgs/metriculous_classification_figures_2.png) + +![Screenshot of Metriculous Classification Figures](./imgs/metriculous_classification_figures_3.png) + + +# Development + +### Poetry +This project uses [poetry](https://poetry.eustace.io/) to manage +dependencies. Please make sure it is installed for the required python version. Then install the dependencies with `poetry install`. + +### Makefile +A Makefile is used to automate common development workflows. Type `make` or `make help` to see a list of available commands. Before commiting changes it is recommended to run `make format check test`. + + +%package -n python3-metriculous +Summary: Very unstable library containing utilities to measure and visualize statistical properties of machine learning models. +Provides: python-metriculous +BuildRequires: python3-devel +BuildRequires: python3-setuptools +BuildRequires: python3-pip +%description -n python3-metriculous +

+ + Launch Binder + + + Current GitHub Actions build status + + + Checked with mypy + + + PyPI version + + PyPI - Python Version + License MIT + + Friends with Luminovo.AI + +

+ +# __`metriculous`__ + +Measure, visualize, and compare machine learning model performance without the usual boilerplate. +Breaking API improvements to be expected. + + +# Installation +```console +$ pip install metriculous +``` + +Or, for the latest unreleased version: +```console +$ pip install git+https://github.com/metriculous-ml/metriculous.git +``` + + +# Comparing Regression Models [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/metriculous-ml/metriculous/master?filepath=notebooks%2Fquickstart_regression.py) +
Click to see more code +

+ +```python +import numpy as np + +# Mock the ground truth, a one-dimensional array of floats +ground_truth = np.random.random(300) + +# Mock the output of a few models +perfect_model = ground_truth +noisy_model = ground_truth + 0.1 * np.random.randn(*ground_truth.shape) +random_model = np.random.randn(*ground_truth.shape) +zero_model = np.zeros_like(ground_truth) +``` +

+
+ +```python +import metriculous + +metriculous.compare_regressors( + ground_truth=ground_truth, + model_predictions=[perfect_model, noisy_model, random_model, zero_model], + model_names=["Perfect Model", "Noisy Model", "Random Model", "Zero Model"], +).save_html("comparison.html").display() +``` + +This will save an HTML file with common regression metrics and charts, and if you are working in a [Jupyter notebook](https://github.com/jupyter/notebook) will display the output right in front of you: + + +![Screenshot of Metriculous Regression Metrics](./imgs/metriculous_regression_screen_shot_table.png) +![Screenshot of Metriculous Regression Figures](./imgs/metriculous_regression_screen_shot_figures.png) + + +# Comparing Classification Models [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/metriculous-ml/metriculous/master?filepath=notebooks%2Fquickstart_classification.py) +
Click to see more code +

+ +```python +import numpy as np + + +def normalize(array2d: np.ndarray) -> np.ndarray: + return array2d / array2d.sum(axis=1, keepdims=True) + + +class_names = ["Cat", "Dog", "Pig"] +num_classes = len(class_names) +num_samples = 500 + +# Mock ground truth +ground_truth = np.random.choice(range(num_classes), size=num_samples, p=[0.5, 0.4, 0.1]) + +# Mock model predictions +perfect_model = np.eye(num_classes)[ground_truth] +noisy_model = normalize( + perfect_model + 2 * np.random.random((num_samples, num_classes)) +) +random_model = normalize(np.random.random((num_samples, num_classes))) +``` + +

+
+ +```python +import metriculous + +metriculous.compare_classifiers( + ground_truth=ground_truth, + model_predictions=[perfect_model, noisy_model, random_model], + model_names=["Perfect Model", "Noisy Model", "Random Model"], + class_names=class_names, + one_vs_all_figures=True, +).display() +``` + +![Screenshot of Metriculous Classification Table](./imgs/metriculous_classification_table.png) + +![Screenshot of Metriculous Classification Figures](./imgs/metriculous_classification_figures_1.png) + +![Screenshot of Metriculous Classification Figures](./imgs/metriculous_classification_figures_2.png) + +![Screenshot of Metriculous Classification Figures](./imgs/metriculous_classification_figures_3.png) + + +# Development + +### Poetry +This project uses [poetry](https://poetry.eustace.io/) to manage +dependencies. Please make sure it is installed for the required python version. Then install the dependencies with `poetry install`. + +### Makefile +A Makefile is used to automate common development workflows. Type `make` or `make help` to see a list of available commands. Before commiting changes it is recommended to run `make format check test`. + + +%package help +Summary: Development documents and examples for metriculous +Provides: python3-metriculous-doc +%description help +

+ + Launch Binder + + + Current GitHub Actions build status + + + Checked with mypy + + + PyPI version + + PyPI - Python Version + License MIT + + Friends with Luminovo.AI + +

+ +# __`metriculous`__ + +Measure, visualize, and compare machine learning model performance without the usual boilerplate. +Breaking API improvements to be expected. + + +# Installation +```console +$ pip install metriculous +``` + +Or, for the latest unreleased version: +```console +$ pip install git+https://github.com/metriculous-ml/metriculous.git +``` + + +# Comparing Regression Models [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/metriculous-ml/metriculous/master?filepath=notebooks%2Fquickstart_regression.py) +
Click to see more code +

+ +```python +import numpy as np + +# Mock the ground truth, a one-dimensional array of floats +ground_truth = np.random.random(300) + +# Mock the output of a few models +perfect_model = ground_truth +noisy_model = ground_truth + 0.1 * np.random.randn(*ground_truth.shape) +random_model = np.random.randn(*ground_truth.shape) +zero_model = np.zeros_like(ground_truth) +``` +

+
+ +```python +import metriculous + +metriculous.compare_regressors( + ground_truth=ground_truth, + model_predictions=[perfect_model, noisy_model, random_model, zero_model], + model_names=["Perfect Model", "Noisy Model", "Random Model", "Zero Model"], +).save_html("comparison.html").display() +``` + +This will save an HTML file with common regression metrics and charts, and if you are working in a [Jupyter notebook](https://github.com/jupyter/notebook) will display the output right in front of you: + + +![Screenshot of Metriculous Regression Metrics](./imgs/metriculous_regression_screen_shot_table.png) +![Screenshot of Metriculous Regression Figures](./imgs/metriculous_regression_screen_shot_figures.png) + + +# Comparing Classification Models [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/metriculous-ml/metriculous/master?filepath=notebooks%2Fquickstart_classification.py) +
Click to see more code +

+ +```python +import numpy as np + + +def normalize(array2d: np.ndarray) -> np.ndarray: + return array2d / array2d.sum(axis=1, keepdims=True) + + +class_names = ["Cat", "Dog", "Pig"] +num_classes = len(class_names) +num_samples = 500 + +# Mock ground truth +ground_truth = np.random.choice(range(num_classes), size=num_samples, p=[0.5, 0.4, 0.1]) + +# Mock model predictions +perfect_model = np.eye(num_classes)[ground_truth] +noisy_model = normalize( + perfect_model + 2 * np.random.random((num_samples, num_classes)) +) +random_model = normalize(np.random.random((num_samples, num_classes))) +``` + +

+
+ +```python +import metriculous + +metriculous.compare_classifiers( + ground_truth=ground_truth, + model_predictions=[perfect_model, noisy_model, random_model], + model_names=["Perfect Model", "Noisy Model", "Random Model"], + class_names=class_names, + one_vs_all_figures=True, +).display() +``` + +![Screenshot of Metriculous Classification Table](./imgs/metriculous_classification_table.png) + +![Screenshot of Metriculous Classification Figures](./imgs/metriculous_classification_figures_1.png) + +![Screenshot of Metriculous Classification Figures](./imgs/metriculous_classification_figures_2.png) + +![Screenshot of Metriculous Classification Figures](./imgs/metriculous_classification_figures_3.png) + + +# Development + +### Poetry +This project uses [poetry](https://poetry.eustace.io/) to manage +dependencies. Please make sure it is installed for the required python version. Then install the dependencies with `poetry install`. + +### Makefile +A Makefile is used to automate common development workflows. Type `make` or `make help` to see a list of available commands. Before commiting changes it is recommended to run `make format check test`. + + +%prep +%autosetup -n metriculous-0.3.0 + +%build +%py3_build + +%install +%py3_install +install -d -m755 %{buildroot}/%{_pkgdocdir} +if [ -d doc ]; then cp -arf doc %{buildroot}/%{_pkgdocdir}; fi +if [ -d docs ]; then cp -arf docs %{buildroot}/%{_pkgdocdir}; fi +if [ -d example ]; then cp -arf example %{buildroot}/%{_pkgdocdir}; fi +if [ -d examples ]; then cp -arf examples %{buildroot}/%{_pkgdocdir}; fi +pushd %{buildroot} +if [ -d usr/lib ]; then + find usr/lib -type f -printf "/%h/%f\n" >> filelist.lst +fi +if [ -d usr/lib64 ]; then + find usr/lib64 -type f -printf "/%h/%f\n" >> filelist.lst +fi +if [ -d usr/bin ]; then + find usr/bin -type f -printf "/%h/%f\n" >> filelist.lst +fi +if [ -d usr/sbin ]; then + find usr/sbin -type f -printf "/%h/%f\n" >> filelist.lst +fi +touch doclist.lst +if [ -d usr/share/man ]; then + find usr/share/man -type f -printf "/%h/%f.gz\n" >> doclist.lst +fi +popd +mv %{buildroot}/filelist.lst . +mv %{buildroot}/doclist.lst . + +%files -n python3-metriculous -f filelist.lst +%dir %{python3_sitelib}/* + +%files help -f doclist.lst +%{_docdir}/* + +%changelog +* Fri May 05 2023 Python_Bot - 0.3.0-1 +- Package Spec generated -- cgit v1.2.3