summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCoprDistGit <infra@openeuler.org>2023-04-12 07:03:03 +0000
committerCoprDistGit <infra@openeuler.org>2023-04-12 07:03:03 +0000
commit7586fb58acf6fb6db77261712b736d2fd799813c (patch)
treed295e6e0b097dc70fb0066a23835afb65cd6a3fa
parent2af6900f15fa96692d6cb406aea5af14c4a3db56 (diff)
automatic import of python-adversarial-robustness-toolboxopeneuler20.03
-rw-r--r--.gitignore1
-rw-r--r--python-adversarial-robustness-toolbox.spec387
-rw-r--r--sources1
3 files changed, 389 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
index e69de29..e62ffcc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -0,0 +1 @@
+/adversarial-robustness-toolbox-1.14.0.tar.gz
diff --git a/python-adversarial-robustness-toolbox.spec b/python-adversarial-robustness-toolbox.spec
new file mode 100644
index 0000000..5db7e83
--- /dev/null
+++ b/python-adversarial-robustness-toolbox.spec
@@ -0,0 +1,387 @@
+%global _empty_manifest_terminate_build 0
+Name: python-adversarial-robustness-toolbox
+Version: 1.14.0
+Release: 1
+Summary: Toolbox for adversarial machine learning.
+License: MIT
+URL: https://github.com/Trusted-AI/adversarial-robustness-toolbox
+Source0: https://mirrors.nju.edu.cn/pypi/web/packages/32/52/98469e81703162447154cdd9f2270e4f8ecc39ad6159e917c0767fad4937/adversarial-robustness-toolbox-1.14.0.tar.gz
+BuildArch: noarch
+
+Requires: python3-numpy
+Requires: python3-scipy
+Requires: python3-scikit-learn
+Requires: python3-six
+Requires: python3-setuptools
+Requires: python3-tqdm
+Requires: python3-mxnet
+Requires: python3-catboost
+Requires: python3-lightgbm
+Requires: python3-tensorflow
+Requires: python3-tensorflow-addons
+Requires: python3-h5py
+Requires: python3-torch
+Requires: python3-torchvision
+Requires: python3-xgboost
+Requires: python3-pandas
+Requires: python3-kornia
+Requires: python3-matplotlib
+Requires: python3-Pillow
+Requires: python3-statsmodels
+Requires: python3-pydub
+Requires: python3-resampy
+Requires: python3-ffmpeg-python
+Requires: python3-cma
+Requires: python3-librosa
+Requires: python3-opencv-python
+Requires: python3-numba
+Requires: python3-catboost
+Requires: python3-sphinx
+Requires: python3-sphinx-rtd-theme
+Requires: python3-sphinx-autodoc-annotation
+Requires: python3-sphinx-autodoc-typehints
+Requires: python3-matplotlib
+Requires: python3-numpy
+Requires: python3-scipy
+Requires: python3-six
+Requires: python3-scikit-learn
+Requires: python3-Pillow
+Requires: python3-GPy
+Requires: python3-keras
+Requires: python3-h5py
+Requires: python3-lightgbm
+Requires: python3-tensorflow-gpu
+Requires: python3-lingvo
+Requires: python3-pydub
+Requires: python3-resampy
+Requires: python3-librosa
+Requires: python3-mxnet
+Requires: python3-matplotlib
+Requires: python3-Pillow
+Requires: python3-statsmodels
+Requires: python3-pydub
+Requires: python3-resampy
+Requires: python3-ffmpeg-python
+Requires: python3-cma
+Requires: python3-pandas
+Requires: python3-librosa
+Requires: python3-opencv-python
+Requires: python3-pytest
+Requires: python3-pytest-flake8
+Requires: python3-pytest-mock
+Requires: python3-pytest-cov
+Requires: python3-codecov
+Requires: python3-requests
+Requires: python3-sortedcontainers
+Requires: python3-numba
+Requires: python3-torch
+Requires: python3-torchvision
+Requires: python3-torch
+Requires: python3-torchvision
+Requires: python3-torchaudio
+Requires: python3-pydub
+Requires: python3-resampy
+Requires: python3-librosa
+Requires: python3-torch
+Requires: python3-torchvision
+Requires: python3-kornia
+Requires: python3-Pillow
+Requires: python3-ffmpeg-python
+Requires: python3-opencv-python
+Requires: python3-tensorflow
+Requires: python3-tensorflow-addons
+Requires: python3-h5py
+Requires: python3-tensorflow
+Requires: python3-tensorflow-addons
+Requires: python3-h5py
+Requires: python3-pydub
+Requires: python3-resampy
+Requires: python3-librosa
+Requires: python3-tensorflow
+Requires: python3-tensorflow-addons
+Requires: python3-h5py
+Requires: python3-Pillow
+Requires: python3-ffmpeg-python
+Requires: python3-opencv-python
+Requires: python3-xgboost
+
+%description
+# Adversarial Robustness Toolbox (ART) v1.14
+<p align="center">
+ <img src="docs/images/art_lfai.png?raw=true" width="467" title="ART logo">
+</p>
+<br />
+
+![Continuous Integration](https://github.com/Trusted-AI/adversarial-robustness-toolbox/workflows/Continuous%20Integration/badge.svg)
+![CodeQL](https://github.com/Trusted-AI/adversarial-robustness-toolbox/workflows/CodeQL/badge.svg)
+[![Documentation Status](https://readthedocs.org/projects/adversarial-robustness-toolbox/badge/?version=latest)](http://adversarial-robustness-toolbox.readthedocs.io/en/latest/?badge=latest)
+[![PyPI](https://badge.fury.io/py/adversarial-robustness-toolbox.svg)](https://badge.fury.io/py/adversarial-robustness-toolbox)
+[![codecov](https://codecov.io/gh/Trusted-AI/adversarial-robustness-toolbox/branch/main/graph/badge.svg)](https://codecov.io/gh/Trusted-AI/adversarial-robustness-toolbox)
+[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
+[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
+[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/adversarial-robustness-toolbox)](https://pypi.org/project/adversarial-robustness-toolbox/)
+[![slack-img](https://img.shields.io/badge/chat-on%20slack-yellow.svg)](https://ibm-art.slack.com/)
+[![Downloads](https://pepy.tech/badge/adversarial-robustness-toolbox)](https://pepy.tech/project/adversarial-robustness-toolbox)
+[![Downloads](https://pepy.tech/badge/adversarial-robustness-toolbox/month)](https://pepy.tech/project/adversarial-robustness-toolbox)
+[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/5090/badge)](https://bestpractices.coreinfrastructure.org/projects/5090)
+
+[中文README请按此处](README-cn.md)
+
+<p align="center">
+ <img src="https://raw.githubusercontent.com/lfai/artwork/master/lfaidata-assets/lfaidata-project-badge/graduate/color/lfaidata-project-badge-graduate-color.png" alt="LF AI & Data" width="300"/>
+</p>
+
+Adversarial Robustness Toolbox (ART) is a Python library for Machine Learning Security. ART is hosted by the
+[Linux Foundation AI & Data Foundation](https://lfaidata.foundation) (LF AI & Data). ART provides tools that enable
+developers and researchers to defend and evaluate Machine Learning models and applications against the
+adversarial threats of Evasion, Poisoning, Extraction, and Inference. ART supports all popular machine learning frameworks
+(TensorFlow, Keras, PyTorch, MXNet, scikit-learn, XGBoost, LightGBM, CatBoost, GPy, etc.), all data types
+(images, tables, audio, video, etc.) and machine learning tasks (classification, object detection, speech recognition,
+generation, certification, etc.).
+
+## Adversarial Threats
+
+<p align="center">
+ <img src="docs/images/adversarial_threats_attacker.png?raw=true" width="400" title="ART logo">
+ <img src="docs/images/adversarial_threats_art.png?raw=true" width="400" title="ART logo">
+</p>
+<br />
+
+## ART for Red and Blue Teams (selection)
+
+<p align="center">
+ <img src="docs/images/white_hat_blue_red.png?raw=true" width="800" title="ART Red and Blue Teams">
+</p>
+<br />
+
+## Learn more
+
+| **[Get Started][get-started]** | **[Documentation][documentation]** | **[Contributing][contributing]** |
+|-------------------------------------|-------------------------------|-----------------------------------|
+| - [Installation][installation]<br>- [Examples](examples/README.md)<br>- [Notebooks](notebooks/README.md) | - [Attacks][attacks]<br>- [Defences][defences]<br>- [Estimators][estimators]<br>- [Metrics][metrics]<br>- [Technical Documentation](https://adversarial-robustness-toolbox.readthedocs.io) | - [Slack](https://ibm-art.slack.com), [Invitation](https://join.slack.com/t/ibm-art/shared_invite/enQtMzkyOTkyODE4NzM4LTA4NGQ1OTMxMzFmY2Q1MzE1NWI2MmEzN2FjNGNjOGVlODVkZDE0MjA1NTA4OGVkMjVkNmQ4MTY1NmMyOGM5YTg)<br>- [Contributing](CONTRIBUTING.md)<br>- [Roadmap][roadmap]<br>- [Citing][citing] |
+
+[get-started]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/Get-Started
+[attacks]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/ART-Attacks
+[defences]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/ART-Defences
+[estimators]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/ART-Estimators
+[metrics]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/ART-Metrics
+[contributing]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/Contributing
+[documentation]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/Documentation
+[installation]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/Get-Started#setup
+[roadmap]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/Roadmap
+[citing]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/Contributing#citing-art
+
+The library is under continuous development. Feedback, bug reports and contributions are very welcome!
+
+# Acknowledgment
+This material is partially based upon work supported by the Defense Advanced Research Projects Agency (DARPA) under
+Contract No. HR001120C0013. Any opinions, findings and conclusions or recommendations expressed in this material are
+those of the author(s) and do not necessarily reflect the views of the Defense Advanced Research Projects Agency (DARPA).
+
+
+
+
+%package -n python3-adversarial-robustness-toolbox
+Summary: Toolbox for adversarial machine learning.
+Provides: python-adversarial-robustness-toolbox
+BuildRequires: python3-devel
+BuildRequires: python3-setuptools
+BuildRequires: python3-pip
+%description -n python3-adversarial-robustness-toolbox
+# Adversarial Robustness Toolbox (ART) v1.14
+<p align="center">
+ <img src="docs/images/art_lfai.png?raw=true" width="467" title="ART logo">
+</p>
+<br />
+
+![Continuous Integration](https://github.com/Trusted-AI/adversarial-robustness-toolbox/workflows/Continuous%20Integration/badge.svg)
+![CodeQL](https://github.com/Trusted-AI/adversarial-robustness-toolbox/workflows/CodeQL/badge.svg)
+[![Documentation Status](https://readthedocs.org/projects/adversarial-robustness-toolbox/badge/?version=latest)](http://adversarial-robustness-toolbox.readthedocs.io/en/latest/?badge=latest)
+[![PyPI](https://badge.fury.io/py/adversarial-robustness-toolbox.svg)](https://badge.fury.io/py/adversarial-robustness-toolbox)
+[![codecov](https://codecov.io/gh/Trusted-AI/adversarial-robustness-toolbox/branch/main/graph/badge.svg)](https://codecov.io/gh/Trusted-AI/adversarial-robustness-toolbox)
+[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
+[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
+[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/adversarial-robustness-toolbox)](https://pypi.org/project/adversarial-robustness-toolbox/)
+[![slack-img](https://img.shields.io/badge/chat-on%20slack-yellow.svg)](https://ibm-art.slack.com/)
+[![Downloads](https://pepy.tech/badge/adversarial-robustness-toolbox)](https://pepy.tech/project/adversarial-robustness-toolbox)
+[![Downloads](https://pepy.tech/badge/adversarial-robustness-toolbox/month)](https://pepy.tech/project/adversarial-robustness-toolbox)
+[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/5090/badge)](https://bestpractices.coreinfrastructure.org/projects/5090)
+
+[中文README请按此处](README-cn.md)
+
+<p align="center">
+ <img src="https://raw.githubusercontent.com/lfai/artwork/master/lfaidata-assets/lfaidata-project-badge/graduate/color/lfaidata-project-badge-graduate-color.png" alt="LF AI & Data" width="300"/>
+</p>
+
+Adversarial Robustness Toolbox (ART) is a Python library for Machine Learning Security. ART is hosted by the
+[Linux Foundation AI & Data Foundation](https://lfaidata.foundation) (LF AI & Data). ART provides tools that enable
+developers and researchers to defend and evaluate Machine Learning models and applications against the
+adversarial threats of Evasion, Poisoning, Extraction, and Inference. ART supports all popular machine learning frameworks
+(TensorFlow, Keras, PyTorch, MXNet, scikit-learn, XGBoost, LightGBM, CatBoost, GPy, etc.), all data types
+(images, tables, audio, video, etc.) and machine learning tasks (classification, object detection, speech recognition,
+generation, certification, etc.).
+
+## Adversarial Threats
+
+<p align="center">
+ <img src="docs/images/adversarial_threats_attacker.png?raw=true" width="400" title="ART logo">
+ <img src="docs/images/adversarial_threats_art.png?raw=true" width="400" title="ART logo">
+</p>
+<br />
+
+## ART for Red and Blue Teams (selection)
+
+<p align="center">
+ <img src="docs/images/white_hat_blue_red.png?raw=true" width="800" title="ART Red and Blue Teams">
+</p>
+<br />
+
+## Learn more
+
+| **[Get Started][get-started]** | **[Documentation][documentation]** | **[Contributing][contributing]** |
+|-------------------------------------|-------------------------------|-----------------------------------|
+| - [Installation][installation]<br>- [Examples](examples/README.md)<br>- [Notebooks](notebooks/README.md) | - [Attacks][attacks]<br>- [Defences][defences]<br>- [Estimators][estimators]<br>- [Metrics][metrics]<br>- [Technical Documentation](https://adversarial-robustness-toolbox.readthedocs.io) | - [Slack](https://ibm-art.slack.com), [Invitation](https://join.slack.com/t/ibm-art/shared_invite/enQtMzkyOTkyODE4NzM4LTA4NGQ1OTMxMzFmY2Q1MzE1NWI2MmEzN2FjNGNjOGVlODVkZDE0MjA1NTA4OGVkMjVkNmQ4MTY1NmMyOGM5YTg)<br>- [Contributing](CONTRIBUTING.md)<br>- [Roadmap][roadmap]<br>- [Citing][citing] |
+
+[get-started]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/Get-Started
+[attacks]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/ART-Attacks
+[defences]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/ART-Defences
+[estimators]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/ART-Estimators
+[metrics]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/ART-Metrics
+[contributing]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/Contributing
+[documentation]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/Documentation
+[installation]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/Get-Started#setup
+[roadmap]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/Roadmap
+[citing]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/Contributing#citing-art
+
+The library is under continuous development. Feedback, bug reports and contributions are very welcome!
+
+# Acknowledgment
+This material is partially based upon work supported by the Defense Advanced Research Projects Agency (DARPA) under
+Contract No. HR001120C0013. Any opinions, findings and conclusions or recommendations expressed in this material are
+those of the author(s) and do not necessarily reflect the views of the Defense Advanced Research Projects Agency (DARPA).
+
+
+
+
+%package help
+Summary: Development documents and examples for adversarial-robustness-toolbox
+Provides: python3-adversarial-robustness-toolbox-doc
+%description help
+# Adversarial Robustness Toolbox (ART) v1.14
+<p align="center">
+ <img src="docs/images/art_lfai.png?raw=true" width="467" title="ART logo">
+</p>
+<br />
+
+![Continuous Integration](https://github.com/Trusted-AI/adversarial-robustness-toolbox/workflows/Continuous%20Integration/badge.svg)
+![CodeQL](https://github.com/Trusted-AI/adversarial-robustness-toolbox/workflows/CodeQL/badge.svg)
+[![Documentation Status](https://readthedocs.org/projects/adversarial-robustness-toolbox/badge/?version=latest)](http://adversarial-robustness-toolbox.readthedocs.io/en/latest/?badge=latest)
+[![PyPI](https://badge.fury.io/py/adversarial-robustness-toolbox.svg)](https://badge.fury.io/py/adversarial-robustness-toolbox)
+[![codecov](https://codecov.io/gh/Trusted-AI/adversarial-robustness-toolbox/branch/main/graph/badge.svg)](https://codecov.io/gh/Trusted-AI/adversarial-robustness-toolbox)
+[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
+[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
+[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/adversarial-robustness-toolbox)](https://pypi.org/project/adversarial-robustness-toolbox/)
+[![slack-img](https://img.shields.io/badge/chat-on%20slack-yellow.svg)](https://ibm-art.slack.com/)
+[![Downloads](https://pepy.tech/badge/adversarial-robustness-toolbox)](https://pepy.tech/project/adversarial-robustness-toolbox)
+[![Downloads](https://pepy.tech/badge/adversarial-robustness-toolbox/month)](https://pepy.tech/project/adversarial-robustness-toolbox)
+[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/5090/badge)](https://bestpractices.coreinfrastructure.org/projects/5090)
+
+[中文README请按此处](README-cn.md)
+
+<p align="center">
+ <img src="https://raw.githubusercontent.com/lfai/artwork/master/lfaidata-assets/lfaidata-project-badge/graduate/color/lfaidata-project-badge-graduate-color.png" alt="LF AI & Data" width="300"/>
+</p>
+
+Adversarial Robustness Toolbox (ART) is a Python library for Machine Learning Security. ART is hosted by the
+[Linux Foundation AI & Data Foundation](https://lfaidata.foundation) (LF AI & Data). ART provides tools that enable
+developers and researchers to defend and evaluate Machine Learning models and applications against the
+adversarial threats of Evasion, Poisoning, Extraction, and Inference. ART supports all popular machine learning frameworks
+(TensorFlow, Keras, PyTorch, MXNet, scikit-learn, XGBoost, LightGBM, CatBoost, GPy, etc.), all data types
+(images, tables, audio, video, etc.) and machine learning tasks (classification, object detection, speech recognition,
+generation, certification, etc.).
+
+## Adversarial Threats
+
+<p align="center">
+ <img src="docs/images/adversarial_threats_attacker.png?raw=true" width="400" title="ART logo">
+ <img src="docs/images/adversarial_threats_art.png?raw=true" width="400" title="ART logo">
+</p>
+<br />
+
+## ART for Red and Blue Teams (selection)
+
+<p align="center">
+ <img src="docs/images/white_hat_blue_red.png?raw=true" width="800" title="ART Red and Blue Teams">
+</p>
+<br />
+
+## Learn more
+
+| **[Get Started][get-started]** | **[Documentation][documentation]** | **[Contributing][contributing]** |
+|-------------------------------------|-------------------------------|-----------------------------------|
+| - [Installation][installation]<br>- [Examples](examples/README.md)<br>- [Notebooks](notebooks/README.md) | - [Attacks][attacks]<br>- [Defences][defences]<br>- [Estimators][estimators]<br>- [Metrics][metrics]<br>- [Technical Documentation](https://adversarial-robustness-toolbox.readthedocs.io) | - [Slack](https://ibm-art.slack.com), [Invitation](https://join.slack.com/t/ibm-art/shared_invite/enQtMzkyOTkyODE4NzM4LTA4NGQ1OTMxMzFmY2Q1MzE1NWI2MmEzN2FjNGNjOGVlODVkZDE0MjA1NTA4OGVkMjVkNmQ4MTY1NmMyOGM5YTg)<br>- [Contributing](CONTRIBUTING.md)<br>- [Roadmap][roadmap]<br>- [Citing][citing] |
+
+[get-started]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/Get-Started
+[attacks]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/ART-Attacks
+[defences]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/ART-Defences
+[estimators]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/ART-Estimators
+[metrics]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/ART-Metrics
+[contributing]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/Contributing
+[documentation]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/Documentation
+[installation]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/Get-Started#setup
+[roadmap]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/Roadmap
+[citing]: https://github.com/Trusted-AI/adversarial-robustness-toolbox/wiki/Contributing#citing-art
+
+The library is under continuous development. Feedback, bug reports and contributions are very welcome!
+
+# Acknowledgment
+This material is partially based upon work supported by the Defense Advanced Research Projects Agency (DARPA) under
+Contract No. HR001120C0013. Any opinions, findings and conclusions or recommendations expressed in this material are
+those of the author(s) and do not necessarily reflect the views of the Defense Advanced Research Projects Agency (DARPA).
+
+
+
+
+%prep
+%autosetup -n adversarial-robustness-toolbox-1.14.0
+
+%build
+%py3_build
+
+%install
+%py3_install
+install -d -m755 %{buildroot}/%{_pkgdocdir}
+if [ -d doc ]; then cp -arf doc %{buildroot}/%{_pkgdocdir}; fi
+if [ -d docs ]; then cp -arf docs %{buildroot}/%{_pkgdocdir}; fi
+if [ -d example ]; then cp -arf example %{buildroot}/%{_pkgdocdir}; fi
+if [ -d examples ]; then cp -arf examples %{buildroot}/%{_pkgdocdir}; fi
+pushd %{buildroot}
+if [ -d usr/lib ]; then
+ find usr/lib -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/lib64 ]; then
+ find usr/lib64 -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/bin ]; then
+ find usr/bin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/sbin ]; then
+ find usr/sbin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+touch doclist.lst
+if [ -d usr/share/man ]; then
+ find usr/share/man -type f -printf "/%h/%f.gz\n" >> doclist.lst
+fi
+popd
+mv %{buildroot}/filelist.lst .
+mv %{buildroot}/doclist.lst .
+
+%files -n python3-adversarial-robustness-toolbox -f filelist.lst
+%dir %{python3_sitelib}/*
+
+%files help -f doclist.lst
+%{_docdir}/*
+
+%changelog
+* Wed Apr 12 2023 Python_Bot <Python_Bot@openeuler.org> - 1.14.0-1
+- Package Spec generated
diff --git a/sources b/sources
new file mode 100644
index 0000000..c5cc888
--- /dev/null
+++ b/sources
@@ -0,0 +1 @@
+6d042387dabff68a649703955ca01e71 adversarial-robustness-toolbox-1.14.0.tar.gz