summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCoprDistGit <infra@openeuler.org>2023-04-11 05:32:05 +0000
committerCoprDistGit <infra@openeuler.org>2023-04-11 05:32:05 +0000
commit4f11e51a2b9680b1982bd0a214ffbd84eb62d9f5 (patch)
treef0e1b35c1ecfb5da12a875803f70d89ac0d944fc
parent0f17c1051043cbf5eb9ad5baa3bd289207390fd4 (diff)
automatic import of python-keras-multi-head
-rw-r--r--.gitignore1
-rw-r--r--python-keras-multi-head.spec507
-rw-r--r--sources1
3 files changed, 509 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
index e69de29..ddffee3 100644
--- a/.gitignore
+++ b/.gitignore
@@ -0,0 +1 @@
+/keras-multi-head-0.29.0.tar.gz
diff --git a/python-keras-multi-head.spec b/python-keras-multi-head.spec
new file mode 100644
index 0000000..3a8418d
--- /dev/null
+++ b/python-keras-multi-head.spec
@@ -0,0 +1,507 @@
+%global _empty_manifest_terminate_build 0
+Name: python-keras-multi-head
+Version: 0.29.0
+Release: 1
+Summary: A wrapper layer for stacking layers horizontally
+License: MIT
+URL: https://github.com/CyberZHG/keras-multi-head
+Source0: https://mirrors.nju.edu.cn/pypi/web/packages/2c/21/5e1699e9d63a8e3c0d5fd0716b9a8be7d8c2c07fc8de34902e55de5ba58e/keras-multi-head-0.29.0.tar.gz
+BuildArch: noarch
+
+
+%description
+# Keras Multi-Head
+
+[![Version](https://img.shields.io/pypi/v/keras-multi-head.svg)](https://pypi.org/project/keras-multi-head/)
+![License](https://img.shields.io/pypi/l/keras-multi-head.svg)
+
+A wrapper layer for stacking layers horizontally.
+
+![](https://user-images.githubusercontent.com/853842/45797517-867b8580-bcd8-11e8-9ec6-39d6508cf438.png)
+
+## Install
+
+```bash
+pip install keras-multi-head
+```
+
+## Usage
+
+### Duplicate Layers
+
+The layer will be duplicated if only a single layer is provided. The `layer_num` argument controls how many layers will be duplicated eventually.
+
+```python
+from tensorflow import keras
+from keras_multi_head import MultiHead
+
+
+model = keras.models.Sequential()
+model.add(keras.layers.Embedding(input_dim=100, output_dim=20, name='Embedding'))
+model.add(MultiHead(keras.layers.LSTM(units=32), layer_num=5, name='Multi-LSTMs'))
+model.add(keras.layers.Flatten(name='Flatten'))
+model.add(keras.layers.Dense(units=4, activation='softmax', name='Dense'))
+model.build()
+model.summary()
+```
+
+### Use Multiple-Layers
+
+The first argument could also be a list of layers with different configurations, however, they must have the same output shapes.
+
+```python
+from tensorflow import keras
+from keras_multi_head import MultiHead
+
+
+model = keras.models.Sequential()
+model.add(keras.layers.Embedding(input_dim=100, output_dim=20, name='Embedding'))
+model.add(MultiHead([
+ keras.layers.Conv1D(filters=32, kernel_size=3, padding='same'),
+ keras.layers.Conv1D(filters=32, kernel_size=5, padding='same'),
+ keras.layers.Conv1D(filters=32, kernel_size=7, padding='same'),
+], name='Multi-CNNs'))
+model.build()
+model.summary()
+```
+
+### Linear Transformation
+
+The input data will be mapped to different values of the same shape for each layer when `hidden_dim` is given.
+
+### Regularization
+
+![](https://user-images.githubusercontent.com/853842/45857922-8b4e4100-bd8d-11e8-905a-4eb07da31418.png)
+
+The regularization is used when you expect to extract different features from the parallel layers. You can customize the indices of weights in the layers, the intervals represent the parts of the weights and the factor of the regularization.
+
+For example, the bidirectional LSTM layer has 6 weights by default, and the first 3s belong to the forward layer. The 2nd weight (recurrent kernel) in the forward layer controls the computation of gates for recurrent connections. The kernel for computing cell states lays in units x 2 to units x 3 of the recurrent kernel. We can used the regularization for the kernels:
+
+```python
+from tensorflow import keras
+from keras_multi_head import MultiHead
+
+
+model = keras.models.Sequential()
+model.add(keras.layers.Embedding(input_dim=5, output_dim=3, name='Embed'))
+model.add(MultiHead(
+ layer=keras.layers.Bidirectional(keras.layers.LSTM(units=16), name='LSTM'),
+ layer_num=5,
+ reg_index=[1, 4],
+ reg_slice=(slice(None, None), slice(32, 48)),
+ reg_factor=0.1,
+ name='Multi-Head-Attention',
+))
+model.add(keras.layers.Flatten(name='Flatten'))
+model.add(keras.layers.Dense(units=2, activation='softmax', name='Dense'))
+model.build()
+```
+
+* `reg_index`: The indices of `layer.get_weights()`, a single integer or a list of integers.
+* `reg_slice`: `slice`s or a tuple of `slice`s or a list of the previous choices. If multiple indices are provided in `reg_index` and `reg_slice` is not a list, then `reg_slice` is assumed to be equal for all the indices. The whole array will be used if you leave this argument to `None`.
+* `reg_factor`: The factor of the regularization, a float or a list of floats.
+
+### Multi-Head Attention
+
+A more specific multi-head layer is provided (since the general one is harder to use). The layer uses scaled dot product attention layers as its sub-layers and only `head_num` is required:
+
+```python
+from tensorflow import keras
+from keras_multi_head import MultiHeadAttention
+
+input_layer = keras.layers.Input(
+ shape=(2, 3),
+ name='Input',
+)
+att_layer = MultiHeadAttention(
+ head_num=3,
+ name='Multi-Head',
+)(input_layer)
+model = keras.models.Model(inputs=input_layer, outputs=att_layer)
+model.compile(
+ optimizer='adam',
+ loss='mse',
+ metrics={},
+)
+model.summary()
+```
+
+The shapes of input and output tensors would be the same if only one layer is presented as input. The input layers will be considered as query, key and value when a list is given:
+
+```python
+from tensorflow import keras
+from keras_multi_head import MultiHeadAttention
+
+input_query = keras.layers.Input(
+ shape=(2, 3),
+ name='Input-Q',
+)
+input_key = keras.layers.Input(
+ shape=(4, 5),
+ name='Input-K',
+)
+input_value = keras.layers.Input(
+ shape=(4, 6),
+ name='Input-V',
+)
+att_layer = MultiHeadAttention(
+ head_num=3,
+ name='Multi-Head',
+)([input_query, input_key, input_value])
+model = keras.models.Model(inputs=[input_query, input_key, input_value], outputs=att_layer)
+model.compile(
+ optimizer='adam',
+ loss='mse',
+ metrics={},
+)
+model.summary()
+```
+
+%package -n python3-keras-multi-head
+Summary: A wrapper layer for stacking layers horizontally
+Provides: python-keras-multi-head
+BuildRequires: python3-devel
+BuildRequires: python3-setuptools
+BuildRequires: python3-pip
+%description -n python3-keras-multi-head
+# Keras Multi-Head
+
+[![Version](https://img.shields.io/pypi/v/keras-multi-head.svg)](https://pypi.org/project/keras-multi-head/)
+![License](https://img.shields.io/pypi/l/keras-multi-head.svg)
+
+A wrapper layer for stacking layers horizontally.
+
+![](https://user-images.githubusercontent.com/853842/45797517-867b8580-bcd8-11e8-9ec6-39d6508cf438.png)
+
+## Install
+
+```bash
+pip install keras-multi-head
+```
+
+## Usage
+
+### Duplicate Layers
+
+The layer will be duplicated if only a single layer is provided. The `layer_num` argument controls how many layers will be duplicated eventually.
+
+```python
+from tensorflow import keras
+from keras_multi_head import MultiHead
+
+
+model = keras.models.Sequential()
+model.add(keras.layers.Embedding(input_dim=100, output_dim=20, name='Embedding'))
+model.add(MultiHead(keras.layers.LSTM(units=32), layer_num=5, name='Multi-LSTMs'))
+model.add(keras.layers.Flatten(name='Flatten'))
+model.add(keras.layers.Dense(units=4, activation='softmax', name='Dense'))
+model.build()
+model.summary()
+```
+
+### Use Multiple-Layers
+
+The first argument could also be a list of layers with different configurations, however, they must have the same output shapes.
+
+```python
+from tensorflow import keras
+from keras_multi_head import MultiHead
+
+
+model = keras.models.Sequential()
+model.add(keras.layers.Embedding(input_dim=100, output_dim=20, name='Embedding'))
+model.add(MultiHead([
+ keras.layers.Conv1D(filters=32, kernel_size=3, padding='same'),
+ keras.layers.Conv1D(filters=32, kernel_size=5, padding='same'),
+ keras.layers.Conv1D(filters=32, kernel_size=7, padding='same'),
+], name='Multi-CNNs'))
+model.build()
+model.summary()
+```
+
+### Linear Transformation
+
+The input data will be mapped to different values of the same shape for each layer when `hidden_dim` is given.
+
+### Regularization
+
+![](https://user-images.githubusercontent.com/853842/45857922-8b4e4100-bd8d-11e8-905a-4eb07da31418.png)
+
+The regularization is used when you expect to extract different features from the parallel layers. You can customize the indices of weights in the layers, the intervals represent the parts of the weights and the factor of the regularization.
+
+For example, the bidirectional LSTM layer has 6 weights by default, and the first 3s belong to the forward layer. The 2nd weight (recurrent kernel) in the forward layer controls the computation of gates for recurrent connections. The kernel for computing cell states lays in units x 2 to units x 3 of the recurrent kernel. We can used the regularization for the kernels:
+
+```python
+from tensorflow import keras
+from keras_multi_head import MultiHead
+
+
+model = keras.models.Sequential()
+model.add(keras.layers.Embedding(input_dim=5, output_dim=3, name='Embed'))
+model.add(MultiHead(
+ layer=keras.layers.Bidirectional(keras.layers.LSTM(units=16), name='LSTM'),
+ layer_num=5,
+ reg_index=[1, 4],
+ reg_slice=(slice(None, None), slice(32, 48)),
+ reg_factor=0.1,
+ name='Multi-Head-Attention',
+))
+model.add(keras.layers.Flatten(name='Flatten'))
+model.add(keras.layers.Dense(units=2, activation='softmax', name='Dense'))
+model.build()
+```
+
+* `reg_index`: The indices of `layer.get_weights()`, a single integer or a list of integers.
+* `reg_slice`: `slice`s or a tuple of `slice`s or a list of the previous choices. If multiple indices are provided in `reg_index` and `reg_slice` is not a list, then `reg_slice` is assumed to be equal for all the indices. The whole array will be used if you leave this argument to `None`.
+* `reg_factor`: The factor of the regularization, a float or a list of floats.
+
+### Multi-Head Attention
+
+A more specific multi-head layer is provided (since the general one is harder to use). The layer uses scaled dot product attention layers as its sub-layers and only `head_num` is required:
+
+```python
+from tensorflow import keras
+from keras_multi_head import MultiHeadAttention
+
+input_layer = keras.layers.Input(
+ shape=(2, 3),
+ name='Input',
+)
+att_layer = MultiHeadAttention(
+ head_num=3,
+ name='Multi-Head',
+)(input_layer)
+model = keras.models.Model(inputs=input_layer, outputs=att_layer)
+model.compile(
+ optimizer='adam',
+ loss='mse',
+ metrics={},
+)
+model.summary()
+```
+
+The shapes of input and output tensors would be the same if only one layer is presented as input. The input layers will be considered as query, key and value when a list is given:
+
+```python
+from tensorflow import keras
+from keras_multi_head import MultiHeadAttention
+
+input_query = keras.layers.Input(
+ shape=(2, 3),
+ name='Input-Q',
+)
+input_key = keras.layers.Input(
+ shape=(4, 5),
+ name='Input-K',
+)
+input_value = keras.layers.Input(
+ shape=(4, 6),
+ name='Input-V',
+)
+att_layer = MultiHeadAttention(
+ head_num=3,
+ name='Multi-Head',
+)([input_query, input_key, input_value])
+model = keras.models.Model(inputs=[input_query, input_key, input_value], outputs=att_layer)
+model.compile(
+ optimizer='adam',
+ loss='mse',
+ metrics={},
+)
+model.summary()
+```
+
+%package help
+Summary: Development documents and examples for keras-multi-head
+Provides: python3-keras-multi-head-doc
+%description help
+# Keras Multi-Head
+
+[![Version](https://img.shields.io/pypi/v/keras-multi-head.svg)](https://pypi.org/project/keras-multi-head/)
+![License](https://img.shields.io/pypi/l/keras-multi-head.svg)
+
+A wrapper layer for stacking layers horizontally.
+
+![](https://user-images.githubusercontent.com/853842/45797517-867b8580-bcd8-11e8-9ec6-39d6508cf438.png)
+
+## Install
+
+```bash
+pip install keras-multi-head
+```
+
+## Usage
+
+### Duplicate Layers
+
+The layer will be duplicated if only a single layer is provided. The `layer_num` argument controls how many layers will be duplicated eventually.
+
+```python
+from tensorflow import keras
+from keras_multi_head import MultiHead
+
+
+model = keras.models.Sequential()
+model.add(keras.layers.Embedding(input_dim=100, output_dim=20, name='Embedding'))
+model.add(MultiHead(keras.layers.LSTM(units=32), layer_num=5, name='Multi-LSTMs'))
+model.add(keras.layers.Flatten(name='Flatten'))
+model.add(keras.layers.Dense(units=4, activation='softmax', name='Dense'))
+model.build()
+model.summary()
+```
+
+### Use Multiple-Layers
+
+The first argument could also be a list of layers with different configurations, however, they must have the same output shapes.
+
+```python
+from tensorflow import keras
+from keras_multi_head import MultiHead
+
+
+model = keras.models.Sequential()
+model.add(keras.layers.Embedding(input_dim=100, output_dim=20, name='Embedding'))
+model.add(MultiHead([
+ keras.layers.Conv1D(filters=32, kernel_size=3, padding='same'),
+ keras.layers.Conv1D(filters=32, kernel_size=5, padding='same'),
+ keras.layers.Conv1D(filters=32, kernel_size=7, padding='same'),
+], name='Multi-CNNs'))
+model.build()
+model.summary()
+```
+
+### Linear Transformation
+
+The input data will be mapped to different values of the same shape for each layer when `hidden_dim` is given.
+
+### Regularization
+
+![](https://user-images.githubusercontent.com/853842/45857922-8b4e4100-bd8d-11e8-905a-4eb07da31418.png)
+
+The regularization is used when you expect to extract different features from the parallel layers. You can customize the indices of weights in the layers, the intervals represent the parts of the weights and the factor of the regularization.
+
+For example, the bidirectional LSTM layer has 6 weights by default, and the first 3s belong to the forward layer. The 2nd weight (recurrent kernel) in the forward layer controls the computation of gates for recurrent connections. The kernel for computing cell states lays in units x 2 to units x 3 of the recurrent kernel. We can used the regularization for the kernels:
+
+```python
+from tensorflow import keras
+from keras_multi_head import MultiHead
+
+
+model = keras.models.Sequential()
+model.add(keras.layers.Embedding(input_dim=5, output_dim=3, name='Embed'))
+model.add(MultiHead(
+ layer=keras.layers.Bidirectional(keras.layers.LSTM(units=16), name='LSTM'),
+ layer_num=5,
+ reg_index=[1, 4],
+ reg_slice=(slice(None, None), slice(32, 48)),
+ reg_factor=0.1,
+ name='Multi-Head-Attention',
+))
+model.add(keras.layers.Flatten(name='Flatten'))
+model.add(keras.layers.Dense(units=2, activation='softmax', name='Dense'))
+model.build()
+```
+
+* `reg_index`: The indices of `layer.get_weights()`, a single integer or a list of integers.
+* `reg_slice`: `slice`s or a tuple of `slice`s or a list of the previous choices. If multiple indices are provided in `reg_index` and `reg_slice` is not a list, then `reg_slice` is assumed to be equal for all the indices. The whole array will be used if you leave this argument to `None`.
+* `reg_factor`: The factor of the regularization, a float or a list of floats.
+
+### Multi-Head Attention
+
+A more specific multi-head layer is provided (since the general one is harder to use). The layer uses scaled dot product attention layers as its sub-layers and only `head_num` is required:
+
+```python
+from tensorflow import keras
+from keras_multi_head import MultiHeadAttention
+
+input_layer = keras.layers.Input(
+ shape=(2, 3),
+ name='Input',
+)
+att_layer = MultiHeadAttention(
+ head_num=3,
+ name='Multi-Head',
+)(input_layer)
+model = keras.models.Model(inputs=input_layer, outputs=att_layer)
+model.compile(
+ optimizer='adam',
+ loss='mse',
+ metrics={},
+)
+model.summary()
+```
+
+The shapes of input and output tensors would be the same if only one layer is presented as input. The input layers will be considered as query, key and value when a list is given:
+
+```python
+from tensorflow import keras
+from keras_multi_head import MultiHeadAttention
+
+input_query = keras.layers.Input(
+ shape=(2, 3),
+ name='Input-Q',
+)
+input_key = keras.layers.Input(
+ shape=(4, 5),
+ name='Input-K',
+)
+input_value = keras.layers.Input(
+ shape=(4, 6),
+ name='Input-V',
+)
+att_layer = MultiHeadAttention(
+ head_num=3,
+ name='Multi-Head',
+)([input_query, input_key, input_value])
+model = keras.models.Model(inputs=[input_query, input_key, input_value], outputs=att_layer)
+model.compile(
+ optimizer='adam',
+ loss='mse',
+ metrics={},
+)
+model.summary()
+```
+
+%prep
+%autosetup -n keras-multi-head-0.29.0
+
+%build
+%py3_build
+
+%install
+%py3_install
+install -d -m755 %{buildroot}/%{_pkgdocdir}
+if [ -d doc ]; then cp -arf doc %{buildroot}/%{_pkgdocdir}; fi
+if [ -d docs ]; then cp -arf docs %{buildroot}/%{_pkgdocdir}; fi
+if [ -d example ]; then cp -arf example %{buildroot}/%{_pkgdocdir}; fi
+if [ -d examples ]; then cp -arf examples %{buildroot}/%{_pkgdocdir}; fi
+pushd %{buildroot}
+if [ -d usr/lib ]; then
+ find usr/lib -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/lib64 ]; then
+ find usr/lib64 -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/bin ]; then
+ find usr/bin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/sbin ]; then
+ find usr/sbin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+touch doclist.lst
+if [ -d usr/share/man ]; then
+ find usr/share/man -type f -printf "/%h/%f.gz\n" >> doclist.lst
+fi
+popd
+mv %{buildroot}/filelist.lst .
+mv %{buildroot}/doclist.lst .
+
+%files -n python3-keras-multi-head -f filelist.lst
+%dir %{python3_sitelib}/*
+
+%files help -f doclist.lst
+%{_docdir}/*
+
+%changelog
+* Tue Apr 11 2023 Python_Bot <Python_Bot@openeuler.org> - 0.29.0-1
+- Package Spec generated
diff --git a/sources b/sources
new file mode 100644
index 0000000..ef0b297
--- /dev/null
+++ b/sources
@@ -0,0 +1 @@
+fad0c0532a7c37b34708f3023e3707c0 keras-multi-head-0.29.0.tar.gz