summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCoprDistGit <infra@openeuler.org>2023-05-05 05:58:07 +0000
committerCoprDistGit <infra@openeuler.org>2023-05-05 05:58:07 +0000
commitb6661c63af3a2342272bcd0546a0838ee614eb08 (patch)
treeff4914b9d554b1685717509b1f6d045b547b7a0b
parent629662b79d359cd2da631b4849ebfeb10a3f7312 (diff)
automatic import of python-mean-average-precisionopeneuler20.03
-rw-r--r--.gitignore1
-rw-r--r--python-mean-average-precision.spec284
-rw-r--r--sources1
3 files changed, 286 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
index e69de29..e131c50 100644
--- a/.gitignore
+++ b/.gitignore
@@ -0,0 +1 @@
+/mean_average_precision-2021.4.26.0.tar.gz
diff --git a/python-mean-average-precision.spec b/python-mean-average-precision.spec
new file mode 100644
index 0000000..589dcb1
--- /dev/null
+++ b/python-mean-average-precision.spec
@@ -0,0 +1,284 @@
+%global _empty_manifest_terminate_build 0
+Name: python-mean-average-precision
+Version: 2021.4.26.0
+Release: 1
+Summary: Mean Average Precision evaluator for object detection.
+License: MIT
+URL: https://github.com/bes-dev/mean_average_precision
+Source0: https://mirrors.nju.edu.cn/pypi/web/packages/d0/c8/e0fa7f81b32e5e698d13ff19f2899a854728a60d8eae40b73b6d0dde7568/mean_average_precision-2021.4.26.0.tar.gz
+BuildArch: noarch
+
+Requires: python3-numpy
+Requires: python3-pandas
+
+%description
+# mAP: Mean Average Precision for Object Detection
+
+A simple library for the evaluation of object detectors.
+
+<p align="center">
+ <img src="resources/img0.jpeg"/>
+</p>
+
+In practice, a **higher mAP** value indicates a **better performance** of your detector, given your ground-truth and set of classes.
+
+## Install package
+
+```bash
+pip install mean_average_precision
+```
+
+## Install the latest version
+
+```bash
+pip install --upgrade git+https://github.com/bes-dev/mean_average_precision.git
+```
+
+## Example
+```python
+import numpy as np
+from mean_average_precision import MetricBuilder
+
+# [xmin, ymin, xmax, ymax, class_id, difficult, crowd]
+gt = np.array([
+ [439, 157, 556, 241, 0, 0, 0],
+ [437, 246, 518, 351, 0, 0, 0],
+ [515, 306, 595, 375, 0, 0, 0],
+ [407, 386, 531, 476, 0, 0, 0],
+ [544, 419, 621, 476, 0, 0, 0],
+ [609, 297, 636, 392, 0, 0, 0]
+])
+
+# [xmin, ymin, xmax, ymax, class_id, confidence]
+preds = np.array([
+ [429, 219, 528, 247, 0, 0.460851],
+ [433, 260, 506, 336, 0, 0.269833],
+ [518, 314, 603, 369, 0, 0.462608],
+ [592, 310, 634, 388, 0, 0.298196],
+ [403, 384, 517, 461, 0, 0.382881],
+ [405, 429, 519, 470, 0, 0.369369],
+ [433, 272, 499, 341, 0, 0.272826],
+ [413, 390, 515, 459, 0, 0.619459]
+])
+
+# print list of available metrics
+print(MetricBuilder.get_metrics_list())
+
+# create metric_fn
+metric_fn = MetricBuilder.build_evaluation_metric("map_2d", async_mode=True, num_classes=1)
+
+# add some samples to evaluation
+for i in range(10):
+ metric_fn.add(preds, gt)
+
+# compute PASCAL VOC metric
+print(f"VOC PASCAL mAP: {metric_fn.value(iou_thresholds=0.5, recall_thresholds=np.arange(0., 1.1, 0.1))['mAP']}")
+
+# compute PASCAL VOC metric at the all points
+print(f"VOC PASCAL mAP in all points: {metric_fn.value(iou_thresholds=0.5)['mAP']}")
+
+# compute metric COCO metric
+print(f"COCO mAP: {metric_fn.value(iou_thresholds=np.arange(0.5, 1.0, 0.05), recall_thresholds=np.arange(0., 1.01, 0.01), mpolicy='soft')['mAP']}")
+```
+
+
+
+
+%package -n python3-mean-average-precision
+Summary: Mean Average Precision evaluator for object detection.
+Provides: python-mean-average-precision
+BuildRequires: python3-devel
+BuildRequires: python3-setuptools
+BuildRequires: python3-pip
+%description -n python3-mean-average-precision
+# mAP: Mean Average Precision for Object Detection
+
+A simple library for the evaluation of object detectors.
+
+<p align="center">
+ <img src="resources/img0.jpeg"/>
+</p>
+
+In practice, a **higher mAP** value indicates a **better performance** of your detector, given your ground-truth and set of classes.
+
+## Install package
+
+```bash
+pip install mean_average_precision
+```
+
+## Install the latest version
+
+```bash
+pip install --upgrade git+https://github.com/bes-dev/mean_average_precision.git
+```
+
+## Example
+```python
+import numpy as np
+from mean_average_precision import MetricBuilder
+
+# [xmin, ymin, xmax, ymax, class_id, difficult, crowd]
+gt = np.array([
+ [439, 157, 556, 241, 0, 0, 0],
+ [437, 246, 518, 351, 0, 0, 0],
+ [515, 306, 595, 375, 0, 0, 0],
+ [407, 386, 531, 476, 0, 0, 0],
+ [544, 419, 621, 476, 0, 0, 0],
+ [609, 297, 636, 392, 0, 0, 0]
+])
+
+# [xmin, ymin, xmax, ymax, class_id, confidence]
+preds = np.array([
+ [429, 219, 528, 247, 0, 0.460851],
+ [433, 260, 506, 336, 0, 0.269833],
+ [518, 314, 603, 369, 0, 0.462608],
+ [592, 310, 634, 388, 0, 0.298196],
+ [403, 384, 517, 461, 0, 0.382881],
+ [405, 429, 519, 470, 0, 0.369369],
+ [433, 272, 499, 341, 0, 0.272826],
+ [413, 390, 515, 459, 0, 0.619459]
+])
+
+# print list of available metrics
+print(MetricBuilder.get_metrics_list())
+
+# create metric_fn
+metric_fn = MetricBuilder.build_evaluation_metric("map_2d", async_mode=True, num_classes=1)
+
+# add some samples to evaluation
+for i in range(10):
+ metric_fn.add(preds, gt)
+
+# compute PASCAL VOC metric
+print(f"VOC PASCAL mAP: {metric_fn.value(iou_thresholds=0.5, recall_thresholds=np.arange(0., 1.1, 0.1))['mAP']}")
+
+# compute PASCAL VOC metric at the all points
+print(f"VOC PASCAL mAP in all points: {metric_fn.value(iou_thresholds=0.5)['mAP']}")
+
+# compute metric COCO metric
+print(f"COCO mAP: {metric_fn.value(iou_thresholds=np.arange(0.5, 1.0, 0.05), recall_thresholds=np.arange(0., 1.01, 0.01), mpolicy='soft')['mAP']}")
+```
+
+
+
+
+%package help
+Summary: Development documents and examples for mean-average-precision
+Provides: python3-mean-average-precision-doc
+%description help
+# mAP: Mean Average Precision for Object Detection
+
+A simple library for the evaluation of object detectors.
+
+<p align="center">
+ <img src="resources/img0.jpeg"/>
+</p>
+
+In practice, a **higher mAP** value indicates a **better performance** of your detector, given your ground-truth and set of classes.
+
+## Install package
+
+```bash
+pip install mean_average_precision
+```
+
+## Install the latest version
+
+```bash
+pip install --upgrade git+https://github.com/bes-dev/mean_average_precision.git
+```
+
+## Example
+```python
+import numpy as np
+from mean_average_precision import MetricBuilder
+
+# [xmin, ymin, xmax, ymax, class_id, difficult, crowd]
+gt = np.array([
+ [439, 157, 556, 241, 0, 0, 0],
+ [437, 246, 518, 351, 0, 0, 0],
+ [515, 306, 595, 375, 0, 0, 0],
+ [407, 386, 531, 476, 0, 0, 0],
+ [544, 419, 621, 476, 0, 0, 0],
+ [609, 297, 636, 392, 0, 0, 0]
+])
+
+# [xmin, ymin, xmax, ymax, class_id, confidence]
+preds = np.array([
+ [429, 219, 528, 247, 0, 0.460851],
+ [433, 260, 506, 336, 0, 0.269833],
+ [518, 314, 603, 369, 0, 0.462608],
+ [592, 310, 634, 388, 0, 0.298196],
+ [403, 384, 517, 461, 0, 0.382881],
+ [405, 429, 519, 470, 0, 0.369369],
+ [433, 272, 499, 341, 0, 0.272826],
+ [413, 390, 515, 459, 0, 0.619459]
+])
+
+# print list of available metrics
+print(MetricBuilder.get_metrics_list())
+
+# create metric_fn
+metric_fn = MetricBuilder.build_evaluation_metric("map_2d", async_mode=True, num_classes=1)
+
+# add some samples to evaluation
+for i in range(10):
+ metric_fn.add(preds, gt)
+
+# compute PASCAL VOC metric
+print(f"VOC PASCAL mAP: {metric_fn.value(iou_thresholds=0.5, recall_thresholds=np.arange(0., 1.1, 0.1))['mAP']}")
+
+# compute PASCAL VOC metric at the all points
+print(f"VOC PASCAL mAP in all points: {metric_fn.value(iou_thresholds=0.5)['mAP']}")
+
+# compute metric COCO metric
+print(f"COCO mAP: {metric_fn.value(iou_thresholds=np.arange(0.5, 1.0, 0.05), recall_thresholds=np.arange(0., 1.01, 0.01), mpolicy='soft')['mAP']}")
+```
+
+
+
+
+%prep
+%autosetup -n mean-average-precision-2021.4.26.0
+
+%build
+%py3_build
+
+%install
+%py3_install
+install -d -m755 %{buildroot}/%{_pkgdocdir}
+if [ -d doc ]; then cp -arf doc %{buildroot}/%{_pkgdocdir}; fi
+if [ -d docs ]; then cp -arf docs %{buildroot}/%{_pkgdocdir}; fi
+if [ -d example ]; then cp -arf example %{buildroot}/%{_pkgdocdir}; fi
+if [ -d examples ]; then cp -arf examples %{buildroot}/%{_pkgdocdir}; fi
+pushd %{buildroot}
+if [ -d usr/lib ]; then
+ find usr/lib -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/lib64 ]; then
+ find usr/lib64 -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/bin ]; then
+ find usr/bin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/sbin ]; then
+ find usr/sbin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+touch doclist.lst
+if [ -d usr/share/man ]; then
+ find usr/share/man -type f -printf "/%h/%f.gz\n" >> doclist.lst
+fi
+popd
+mv %{buildroot}/filelist.lst .
+mv %{buildroot}/doclist.lst .
+
+%files -n python3-mean-average-precision -f filelist.lst
+%dir %{python3_sitelib}/*
+
+%files help -f doclist.lst
+%{_docdir}/*
+
+%changelog
+* Fri May 05 2023 Python_Bot <Python_Bot@openeuler.org> - 2021.4.26.0-1
+- Package Spec generated
diff --git a/sources b/sources
new file mode 100644
index 0000000..fe66c16
--- /dev/null
+++ b/sources
@@ -0,0 +1 @@
+e9340c64fc11c38979a49da1441257d6 mean_average_precision-2021.4.26.0.tar.gz