summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCoprDistGit <infra@openeuler.org>2023-07-10 02:44:17 +0000
committerCoprDistGit <infra@openeuler.org>2023-07-10 02:44:17 +0000
commit30fc57a5dc8b247af245a4ac94ca554336ceaf5d (patch)
tree280e9e31afa5d04f02fc20843155981c4eea2677
parentbe7302c9661d0282e2de731e47453de693297706 (diff)
automatic import of python-nnopeneuler23.03
-rw-r--r--.gitignore1
-rw-r--r--python-NN.spec429
-rw-r--r--sources1
3 files changed, 431 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
index e69de29..df5cc5d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -0,0 +1 @@
+/nn-0.1.1.tar.gz
diff --git a/python-NN.spec b/python-NN.spec
new file mode 100644
index 0000000..ef1850e
--- /dev/null
+++ b/python-NN.spec
@@ -0,0 +1,429 @@
+%global _empty_manifest_terminate_build 0
+Name: python-nn
+Version: 0.1.1
+Release: 1
+Summary: A neural network library built on top of TensorFlow for quickly building deep learning models.
+License: MIT
+URL: https://github.com/marella/nn
+Source0: https://mirrors.aliyun.com/pypi/web/packages/b3/2a/b00995cba3fda79210c0002355925b45a3abf882c2b3c42b5275dc6708df/nn-0.1.1.tar.gz
+BuildArch: noarch
+
+
+%description
+A neural network library built on top of TensorFlow for quickly building deep learning models.
+
+[![Build Status](https://travis-ci.org/marella/nn.svg?branch=master)](https://travis-ci.org/marella/nn)
+
+## Usage
+
+`nn.Tensor` is the core data structure which is a wrapper for `tf.Tensor` and provides additional functionality. It can be created using the `nn.tensor()` function:
+
+```py
+import nn
+
+a = nn.tensor([1, 2, 3])
+assert isinstance(a, nn.Tensor)
+assert a.shape == (3, )
+```
+
+It supports method chaining:
+
+```py
+c = a.square().sum()
+assert c.numpy() == 14
+```
+
+and can be used with `tf.Tensor` objects:
+
+```py
+import tensorflow as tf
+
+b = tf.constant(2)
+c = (a - b).square().sum()
+assert c.numpy() == 2
+```
+
+It can also be used with high level APIs such as `tf.keras`:
+
+```py
+model = nn.Sequential([
+ nn.Dense(128, activation='relu'),
+ nn.Dropout(0.2),
+ nn.Dense(10)
+])
+
+y = model(x)
+assert isinstance(y, nn.Tensor)
+```
+
+and to perform automatic differentiation and optimization:
+
+```py
+optimizer = nn.Adam()
+with nn.GradientTape() as tape:
+ outputs = model(inputs)
+ loss = (targets - outputs).square().mean()
+grads = tape.gradient(loss, model.trainable_variables)
+optimizer.apply_gradients(zip(grads, model.trainable_variables))
+```
+
+To use it with ops that expect `tf.Tensor` objects as inputs, wrap the ops using `nn.op()`:
+
+```py
+mean = nn.op(tf.reduce_mean)
+c = mean(a)
+assert isinstance(c, nn.Tensor)
+
+maximum = nn.op(tf.maximum, binary=True)
+c = maximum(a, b)
+assert isinstance(c, nn.Tensor)
+```
+
+or convert it to a `tf.Tensor` object using the `tf()` method or `nn.tf()` function:
+
+```py
+b = a.tf()
+assert isinstance(b, tf.Tensor)
+
+b = nn.tf(a)
+assert isinstance(b, tf.Tensor)
+```
+
+See more examples [here][examples].
+
+## Installation
+
+Requirements:
+
+- TensorFlow >= 2.0
+- Python >= 3.6
+
+Install from PyPI (recommended):
+
+```sh
+pip install nn
+```
+
+Alternatively, install from source:
+
+```sh
+git clone https://github.com/marella/nn.git
+cd nn
+pip install -e .
+```
+
+[TensorFlow] should be installed separately.
+
+## Testing
+
+To run tests, install dependencies:
+
+```sh
+pip install -e .[tests]
+```
+
+and run:
+
+```sh
+pytest tests
+```
+
+[tensorflow]: https://www.tensorflow.org/install
+[examples]: https://github.com/marella/train/tree/master/examples
+
+%package -n python3-nn
+Summary: A neural network library built on top of TensorFlow for quickly building deep learning models.
+Provides: python-nn
+BuildRequires: python3-devel
+BuildRequires: python3-setuptools
+BuildRequires: python3-pip
+%description -n python3-nn
+A neural network library built on top of TensorFlow for quickly building deep learning models.
+
+[![Build Status](https://travis-ci.org/marella/nn.svg?branch=master)](https://travis-ci.org/marella/nn)
+
+## Usage
+
+`nn.Tensor` is the core data structure which is a wrapper for `tf.Tensor` and provides additional functionality. It can be created using the `nn.tensor()` function:
+
+```py
+import nn
+
+a = nn.tensor([1, 2, 3])
+assert isinstance(a, nn.Tensor)
+assert a.shape == (3, )
+```
+
+It supports method chaining:
+
+```py
+c = a.square().sum()
+assert c.numpy() == 14
+```
+
+and can be used with `tf.Tensor` objects:
+
+```py
+import tensorflow as tf
+
+b = tf.constant(2)
+c = (a - b).square().sum()
+assert c.numpy() == 2
+```
+
+It can also be used with high level APIs such as `tf.keras`:
+
+```py
+model = nn.Sequential([
+ nn.Dense(128, activation='relu'),
+ nn.Dropout(0.2),
+ nn.Dense(10)
+])
+
+y = model(x)
+assert isinstance(y, nn.Tensor)
+```
+
+and to perform automatic differentiation and optimization:
+
+```py
+optimizer = nn.Adam()
+with nn.GradientTape() as tape:
+ outputs = model(inputs)
+ loss = (targets - outputs).square().mean()
+grads = tape.gradient(loss, model.trainable_variables)
+optimizer.apply_gradients(zip(grads, model.trainable_variables))
+```
+
+To use it with ops that expect `tf.Tensor` objects as inputs, wrap the ops using `nn.op()`:
+
+```py
+mean = nn.op(tf.reduce_mean)
+c = mean(a)
+assert isinstance(c, nn.Tensor)
+
+maximum = nn.op(tf.maximum, binary=True)
+c = maximum(a, b)
+assert isinstance(c, nn.Tensor)
+```
+
+or convert it to a `tf.Tensor` object using the `tf()` method or `nn.tf()` function:
+
+```py
+b = a.tf()
+assert isinstance(b, tf.Tensor)
+
+b = nn.tf(a)
+assert isinstance(b, tf.Tensor)
+```
+
+See more examples [here][examples].
+
+## Installation
+
+Requirements:
+
+- TensorFlow >= 2.0
+- Python >= 3.6
+
+Install from PyPI (recommended):
+
+```sh
+pip install nn
+```
+
+Alternatively, install from source:
+
+```sh
+git clone https://github.com/marella/nn.git
+cd nn
+pip install -e .
+```
+
+[TensorFlow] should be installed separately.
+
+## Testing
+
+To run tests, install dependencies:
+
+```sh
+pip install -e .[tests]
+```
+
+and run:
+
+```sh
+pytest tests
+```
+
+[tensorflow]: https://www.tensorflow.org/install
+[examples]: https://github.com/marella/train/tree/master/examples
+
+%package help
+Summary: Development documents and examples for nn
+Provides: python3-nn-doc
+%description help
+A neural network library built on top of TensorFlow for quickly building deep learning models.
+
+[![Build Status](https://travis-ci.org/marella/nn.svg?branch=master)](https://travis-ci.org/marella/nn)
+
+## Usage
+
+`nn.Tensor` is the core data structure which is a wrapper for `tf.Tensor` and provides additional functionality. It can be created using the `nn.tensor()` function:
+
+```py
+import nn
+
+a = nn.tensor([1, 2, 3])
+assert isinstance(a, nn.Tensor)
+assert a.shape == (3, )
+```
+
+It supports method chaining:
+
+```py
+c = a.square().sum()
+assert c.numpy() == 14
+```
+
+and can be used with `tf.Tensor` objects:
+
+```py
+import tensorflow as tf
+
+b = tf.constant(2)
+c = (a - b).square().sum()
+assert c.numpy() == 2
+```
+
+It can also be used with high level APIs such as `tf.keras`:
+
+```py
+model = nn.Sequential([
+ nn.Dense(128, activation='relu'),
+ nn.Dropout(0.2),
+ nn.Dense(10)
+])
+
+y = model(x)
+assert isinstance(y, nn.Tensor)
+```
+
+and to perform automatic differentiation and optimization:
+
+```py
+optimizer = nn.Adam()
+with nn.GradientTape() as tape:
+ outputs = model(inputs)
+ loss = (targets - outputs).square().mean()
+grads = tape.gradient(loss, model.trainable_variables)
+optimizer.apply_gradients(zip(grads, model.trainable_variables))
+```
+
+To use it with ops that expect `tf.Tensor` objects as inputs, wrap the ops using `nn.op()`:
+
+```py
+mean = nn.op(tf.reduce_mean)
+c = mean(a)
+assert isinstance(c, nn.Tensor)
+
+maximum = nn.op(tf.maximum, binary=True)
+c = maximum(a, b)
+assert isinstance(c, nn.Tensor)
+```
+
+or convert it to a `tf.Tensor` object using the `tf()` method or `nn.tf()` function:
+
+```py
+b = a.tf()
+assert isinstance(b, tf.Tensor)
+
+b = nn.tf(a)
+assert isinstance(b, tf.Tensor)
+```
+
+See more examples [here][examples].
+
+## Installation
+
+Requirements:
+
+- TensorFlow >= 2.0
+- Python >= 3.6
+
+Install from PyPI (recommended):
+
+```sh
+pip install nn
+```
+
+Alternatively, install from source:
+
+```sh
+git clone https://github.com/marella/nn.git
+cd nn
+pip install -e .
+```
+
+[TensorFlow] should be installed separately.
+
+## Testing
+
+To run tests, install dependencies:
+
+```sh
+pip install -e .[tests]
+```
+
+and run:
+
+```sh
+pytest tests
+```
+
+[tensorflow]: https://www.tensorflow.org/install
+[examples]: https://github.com/marella/train/tree/master/examples
+
+%prep
+%autosetup -n nn-0.1.1
+
+%build
+%py3_build
+
+%install
+%py3_install
+install -d -m755 %{buildroot}/%{_pkgdocdir}
+if [ -d doc ]; then cp -arf doc %{buildroot}/%{_pkgdocdir}; fi
+if [ -d docs ]; then cp -arf docs %{buildroot}/%{_pkgdocdir}; fi
+if [ -d example ]; then cp -arf example %{buildroot}/%{_pkgdocdir}; fi
+if [ -d examples ]; then cp -arf examples %{buildroot}/%{_pkgdocdir}; fi
+pushd %{buildroot}
+if [ -d usr/lib ]; then
+ find usr/lib -type f -printf "\"/%h/%f\"\n" >> filelist.lst
+fi
+if [ -d usr/lib64 ]; then
+ find usr/lib64 -type f -printf "\"/%h/%f\"\n" >> filelist.lst
+fi
+if [ -d usr/bin ]; then
+ find usr/bin -type f -printf "\"/%h/%f\"\n" >> filelist.lst
+fi
+if [ -d usr/sbin ]; then
+ find usr/sbin -type f -printf "\"/%h/%f\"\n" >> filelist.lst
+fi
+touch doclist.lst
+if [ -d usr/share/man ]; then
+ find usr/share/man -type f -printf "\"/%h/%f.gz\"\n" >> doclist.lst
+fi
+popd
+mv %{buildroot}/filelist.lst .
+mv %{buildroot}/doclist.lst .
+
+%files -n python3-nn -f filelist.lst
+%dir %{python3_sitelib}/*
+
+%files help -f doclist.lst
+%{_docdir}/*
+
+%changelog
+* Mon Jul 10 2023 Python_Bot <Python_Bot@openeuler.org> - 0.1.1-1
+- Package Spec generated
diff --git a/sources b/sources
new file mode 100644
index 0000000..30df387
--- /dev/null
+++ b/sources
@@ -0,0 +1 @@
+ca18363db75bb603c2f17641b0ff27ca nn-0.1.1.tar.gz