summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCoprDistGit <infra@openeuler.org>2023-04-11 03:37:00 +0000
committerCoprDistGit <infra@openeuler.org>2023-04-11 03:37:00 +0000
commit3825905df838a1b6ff41113e063e57a962f2521e (patch)
tree4f8972f21975aa6b781cd50bc3f1d8fcea4640c4
parentde918104ea05ede1e1cdb47b784ca6bf16d67fae (diff)
automatic import of python-keras-transformer
-rw-r--r--.gitignore1
-rw-r--r--python-keras-transformer.spec651
-rw-r--r--sources1
3 files changed, 653 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
index e69de29..d81ee58 100644
--- a/.gitignore
+++ b/.gitignore
@@ -0,0 +1 @@
+/keras-transformer-0.40.0.tar.gz
diff --git a/python-keras-transformer.spec b/python-keras-transformer.spec
new file mode 100644
index 0000000..b8c9de2
--- /dev/null
+++ b/python-keras-transformer.spec
@@ -0,0 +1,651 @@
+%global _empty_manifest_terminate_build 0
+Name: python-keras-transformer
+Version: 0.40.0
+Release: 1
+Summary: Transformer implemented in Keras
+License: MIT
+URL: https://github.com/CyberZHG/keras-transformer
+Source0: https://mirrors.nju.edu.cn/pypi/web/packages/82/71/de82872c32a803274f9200c63aae9086969143cd569e7bb080a1f291ed0e/keras-transformer-0.40.0.tar.gz
+BuildArch: noarch
+
+
+%description
+# Keras Transformer
+
+[![Version](https://img.shields.io/pypi/v/keras-transformer.svg)](https://pypi.org/project/keras-transformer/)
+![License](https://img.shields.io/pypi/l/keras-transformer.svg)
+
+ \[[中文](https://github.com/CyberZHG/keras-transformer/blob/master/README.zh-CN.md)|[English](https://github.com/CyberZHG/keras-transformer/blob/master/README.md)\]
+
+Implementation of [transformer](https://arxiv.org/pdf/1706.03762.pdf) for seq2seq tasks.
+
+## Install
+
+```bash
+pip install keras-transformer
+```
+
+## Usage
+
+### Train
+
+```python
+import numpy as np
+from keras_transformer import get_model
+
+# Build a small toy token dictionary
+tokens = 'all work and no play makes jack a dull boy'.split(' ')
+token_dict = {
+ '<PAD>': 0,
+ '<START>': 1,
+ '<END>': 2,
+}
+for token in tokens:
+ if token not in token_dict:
+ token_dict[token] = len(token_dict)
+
+# Generate toy data
+encoder_inputs_no_padding = []
+encoder_inputs, decoder_inputs, decoder_outputs = [], [], []
+for i in range(1, len(tokens) - 1):
+ encode_tokens, decode_tokens = tokens[:i], tokens[i:]
+ encode_tokens = ['<START>'] + encode_tokens + ['<END>'] + ['<PAD>'] * (len(tokens) - len(encode_tokens))
+ output_tokens = decode_tokens + ['<END>', '<PAD>'] + ['<PAD>'] * (len(tokens) - len(decode_tokens))
+ decode_tokens = ['<START>'] + decode_tokens + ['<END>'] + ['<PAD>'] * (len(tokens) - len(decode_tokens))
+ encode_tokens = list(map(lambda x: token_dict[x], encode_tokens))
+ decode_tokens = list(map(lambda x: token_dict[x], decode_tokens))
+ output_tokens = list(map(lambda x: [token_dict[x]], output_tokens))
+ encoder_inputs_no_padding.append(encode_tokens[:i + 2])
+ encoder_inputs.append(encode_tokens)
+ decoder_inputs.append(decode_tokens)
+ decoder_outputs.append(output_tokens)
+
+# Build the model
+model = get_model(
+ token_num=len(token_dict),
+ embed_dim=30,
+ encoder_num=3,
+ decoder_num=2,
+ head_num=3,
+ hidden_dim=120,
+ attention_activation='relu',
+ feed_forward_activation='relu',
+ dropout_rate=0.05,
+ embed_weights=np.random.random((13, 30)),
+)
+model.compile(
+ optimizer='adam',
+ loss='sparse_categorical_crossentropy',
+)
+model.summary()
+
+# Train the model
+model.fit(
+ x=[np.asarray(encoder_inputs * 1000), np.asarray(decoder_inputs * 1000)],
+ y=np.asarray(decoder_outputs * 1000),
+ epochs=5,
+)
+```
+
+### Predict
+
+```python
+from keras_transformer import decode
+
+decoded = decode(
+ model,
+ encoder_inputs_no_padding,
+ start_token=token_dict['<START>'],
+ end_token=token_dict['<END>'],
+ pad_token=token_dict['<PAD>'],
+ max_len=100,
+)
+token_dict_rev = {v: k for k, v in token_dict.items()}
+for i in range(len(decoded)):
+ print(' '.join(map(lambda x: token_dict_rev[x], decoded[i][1:-1])))
+```
+
+### Translation
+
+```python
+import numpy as np
+from keras_transformer import get_model, decode
+
+source_tokens = [
+ 'i need more power'.split(' '),
+ 'eat jujube and pill'.split(' '),
+]
+target_tokens = [
+ list('我要更多的抛瓦'),
+ list('吃枣💊'),
+]
+
+# Generate dictionaries
+def build_token_dict(token_list):
+ token_dict = {
+ '<PAD>': 0,
+ '<START>': 1,
+ '<END>': 2,
+ }
+ for tokens in token_list:
+ for token in tokens:
+ if token not in token_dict:
+ token_dict[token] = len(token_dict)
+ return token_dict
+
+source_token_dict = build_token_dict(source_tokens)
+target_token_dict = build_token_dict(target_tokens)
+target_token_dict_inv = {v: k for k, v in target_token_dict.items()}
+
+# Add special tokens
+encode_tokens = [['<START>'] + tokens + ['<END>'] for tokens in source_tokens]
+decode_tokens = [['<START>'] + tokens + ['<END>'] for tokens in target_tokens]
+output_tokens = [tokens + ['<END>', '<PAD>'] for tokens in target_tokens]
+
+# Padding
+source_max_len = max(map(len, encode_tokens))
+target_max_len = max(map(len, decode_tokens))
+
+encode_tokens = [tokens + ['<PAD>'] * (source_max_len - len(tokens)) for tokens in encode_tokens]
+decode_tokens = [tokens + ['<PAD>'] * (target_max_len - len(tokens)) for tokens in decode_tokens]
+output_tokens = [tokens + ['<PAD>'] * (target_max_len - len(tokens)) for tokens in output_tokens]
+
+encode_input = [list(map(lambda x: source_token_dict[x], tokens)) for tokens in encode_tokens]
+decode_input = [list(map(lambda x: target_token_dict[x], tokens)) for tokens in decode_tokens]
+decode_output = [list(map(lambda x: [target_token_dict[x]], tokens)) for tokens in output_tokens]
+
+# Build & fit model
+model = get_model(
+ token_num=max(len(source_token_dict), len(target_token_dict)),
+ embed_dim=32,
+ encoder_num=2,
+ decoder_num=2,
+ head_num=4,
+ hidden_dim=128,
+ dropout_rate=0.05,
+ use_same_embed=False, # Use different embeddings for different languages
+)
+model.compile('adam', 'sparse_categorical_crossentropy')
+model.summary()
+
+model.fit(
+ x=[np.array(encode_input * 1024), np.array(decode_input * 1024)],
+ y=np.array(decode_output * 1024),
+ epochs=10,
+ batch_size=32,
+)
+
+# Predict
+decoded = decode(
+ model,
+ encode_input,
+ start_token=target_token_dict['<START>'],
+ end_token=target_token_dict['<END>'],
+ pad_token=target_token_dict['<PAD>'],
+)
+print(''.join(map(lambda x: target_token_dict_inv[x], decoded[0][1:-1])))
+print(''.join(map(lambda x: target_token_dict_inv[x], decoded[1][1:-1])))
+```
+
+### Decode
+
+In `decode`, the word with top probability is selected as the predicted token by default. You can add randomness by setting `top_k` and `temperature`:
+
+```python
+decoded = decode(
+ model,
+ encode_input,
+ start_token=target_token_dict['<START>'],
+ end_token=target_token_dict['<END>'],
+ pad_token=target_token_dict['<PAD>'],
+ top_k=10,
+ temperature=1.0,
+)
+print(''.join(map(lambda x: target_token_dict_inv[x], decoded[0][1:-1])))
+print(''.join(map(lambda x: target_token_dict_inv[x], decoded[1][1:-1])))
+```
+
+%package -n python3-keras-transformer
+Summary: Transformer implemented in Keras
+Provides: python-keras-transformer
+BuildRequires: python3-devel
+BuildRequires: python3-setuptools
+BuildRequires: python3-pip
+%description -n python3-keras-transformer
+# Keras Transformer
+
+[![Version](https://img.shields.io/pypi/v/keras-transformer.svg)](https://pypi.org/project/keras-transformer/)
+![License](https://img.shields.io/pypi/l/keras-transformer.svg)
+
+ \[[中文](https://github.com/CyberZHG/keras-transformer/blob/master/README.zh-CN.md)|[English](https://github.com/CyberZHG/keras-transformer/blob/master/README.md)\]
+
+Implementation of [transformer](https://arxiv.org/pdf/1706.03762.pdf) for seq2seq tasks.
+
+## Install
+
+```bash
+pip install keras-transformer
+```
+
+## Usage
+
+### Train
+
+```python
+import numpy as np
+from keras_transformer import get_model
+
+# Build a small toy token dictionary
+tokens = 'all work and no play makes jack a dull boy'.split(' ')
+token_dict = {
+ '<PAD>': 0,
+ '<START>': 1,
+ '<END>': 2,
+}
+for token in tokens:
+ if token not in token_dict:
+ token_dict[token] = len(token_dict)
+
+# Generate toy data
+encoder_inputs_no_padding = []
+encoder_inputs, decoder_inputs, decoder_outputs = [], [], []
+for i in range(1, len(tokens) - 1):
+ encode_tokens, decode_tokens = tokens[:i], tokens[i:]
+ encode_tokens = ['<START>'] + encode_tokens + ['<END>'] + ['<PAD>'] * (len(tokens) - len(encode_tokens))
+ output_tokens = decode_tokens + ['<END>', '<PAD>'] + ['<PAD>'] * (len(tokens) - len(decode_tokens))
+ decode_tokens = ['<START>'] + decode_tokens + ['<END>'] + ['<PAD>'] * (len(tokens) - len(decode_tokens))
+ encode_tokens = list(map(lambda x: token_dict[x], encode_tokens))
+ decode_tokens = list(map(lambda x: token_dict[x], decode_tokens))
+ output_tokens = list(map(lambda x: [token_dict[x]], output_tokens))
+ encoder_inputs_no_padding.append(encode_tokens[:i + 2])
+ encoder_inputs.append(encode_tokens)
+ decoder_inputs.append(decode_tokens)
+ decoder_outputs.append(output_tokens)
+
+# Build the model
+model = get_model(
+ token_num=len(token_dict),
+ embed_dim=30,
+ encoder_num=3,
+ decoder_num=2,
+ head_num=3,
+ hidden_dim=120,
+ attention_activation='relu',
+ feed_forward_activation='relu',
+ dropout_rate=0.05,
+ embed_weights=np.random.random((13, 30)),
+)
+model.compile(
+ optimizer='adam',
+ loss='sparse_categorical_crossentropy',
+)
+model.summary()
+
+# Train the model
+model.fit(
+ x=[np.asarray(encoder_inputs * 1000), np.asarray(decoder_inputs * 1000)],
+ y=np.asarray(decoder_outputs * 1000),
+ epochs=5,
+)
+```
+
+### Predict
+
+```python
+from keras_transformer import decode
+
+decoded = decode(
+ model,
+ encoder_inputs_no_padding,
+ start_token=token_dict['<START>'],
+ end_token=token_dict['<END>'],
+ pad_token=token_dict['<PAD>'],
+ max_len=100,
+)
+token_dict_rev = {v: k for k, v in token_dict.items()}
+for i in range(len(decoded)):
+ print(' '.join(map(lambda x: token_dict_rev[x], decoded[i][1:-1])))
+```
+
+### Translation
+
+```python
+import numpy as np
+from keras_transformer import get_model, decode
+
+source_tokens = [
+ 'i need more power'.split(' '),
+ 'eat jujube and pill'.split(' '),
+]
+target_tokens = [
+ list('我要更多的抛瓦'),
+ list('吃枣💊'),
+]
+
+# Generate dictionaries
+def build_token_dict(token_list):
+ token_dict = {
+ '<PAD>': 0,
+ '<START>': 1,
+ '<END>': 2,
+ }
+ for tokens in token_list:
+ for token in tokens:
+ if token not in token_dict:
+ token_dict[token] = len(token_dict)
+ return token_dict
+
+source_token_dict = build_token_dict(source_tokens)
+target_token_dict = build_token_dict(target_tokens)
+target_token_dict_inv = {v: k for k, v in target_token_dict.items()}
+
+# Add special tokens
+encode_tokens = [['<START>'] + tokens + ['<END>'] for tokens in source_tokens]
+decode_tokens = [['<START>'] + tokens + ['<END>'] for tokens in target_tokens]
+output_tokens = [tokens + ['<END>', '<PAD>'] for tokens in target_tokens]
+
+# Padding
+source_max_len = max(map(len, encode_tokens))
+target_max_len = max(map(len, decode_tokens))
+
+encode_tokens = [tokens + ['<PAD>'] * (source_max_len - len(tokens)) for tokens in encode_tokens]
+decode_tokens = [tokens + ['<PAD>'] * (target_max_len - len(tokens)) for tokens in decode_tokens]
+output_tokens = [tokens + ['<PAD>'] * (target_max_len - len(tokens)) for tokens in output_tokens]
+
+encode_input = [list(map(lambda x: source_token_dict[x], tokens)) for tokens in encode_tokens]
+decode_input = [list(map(lambda x: target_token_dict[x], tokens)) for tokens in decode_tokens]
+decode_output = [list(map(lambda x: [target_token_dict[x]], tokens)) for tokens in output_tokens]
+
+# Build & fit model
+model = get_model(
+ token_num=max(len(source_token_dict), len(target_token_dict)),
+ embed_dim=32,
+ encoder_num=2,
+ decoder_num=2,
+ head_num=4,
+ hidden_dim=128,
+ dropout_rate=0.05,
+ use_same_embed=False, # Use different embeddings for different languages
+)
+model.compile('adam', 'sparse_categorical_crossentropy')
+model.summary()
+
+model.fit(
+ x=[np.array(encode_input * 1024), np.array(decode_input * 1024)],
+ y=np.array(decode_output * 1024),
+ epochs=10,
+ batch_size=32,
+)
+
+# Predict
+decoded = decode(
+ model,
+ encode_input,
+ start_token=target_token_dict['<START>'],
+ end_token=target_token_dict['<END>'],
+ pad_token=target_token_dict['<PAD>'],
+)
+print(''.join(map(lambda x: target_token_dict_inv[x], decoded[0][1:-1])))
+print(''.join(map(lambda x: target_token_dict_inv[x], decoded[1][1:-1])))
+```
+
+### Decode
+
+In `decode`, the word with top probability is selected as the predicted token by default. You can add randomness by setting `top_k` and `temperature`:
+
+```python
+decoded = decode(
+ model,
+ encode_input,
+ start_token=target_token_dict['<START>'],
+ end_token=target_token_dict['<END>'],
+ pad_token=target_token_dict['<PAD>'],
+ top_k=10,
+ temperature=1.0,
+)
+print(''.join(map(lambda x: target_token_dict_inv[x], decoded[0][1:-1])))
+print(''.join(map(lambda x: target_token_dict_inv[x], decoded[1][1:-1])))
+```
+
+%package help
+Summary: Development documents and examples for keras-transformer
+Provides: python3-keras-transformer-doc
+%description help
+# Keras Transformer
+
+[![Version](https://img.shields.io/pypi/v/keras-transformer.svg)](https://pypi.org/project/keras-transformer/)
+![License](https://img.shields.io/pypi/l/keras-transformer.svg)
+
+ \[[中文](https://github.com/CyberZHG/keras-transformer/blob/master/README.zh-CN.md)|[English](https://github.com/CyberZHG/keras-transformer/blob/master/README.md)\]
+
+Implementation of [transformer](https://arxiv.org/pdf/1706.03762.pdf) for seq2seq tasks.
+
+## Install
+
+```bash
+pip install keras-transformer
+```
+
+## Usage
+
+### Train
+
+```python
+import numpy as np
+from keras_transformer import get_model
+
+# Build a small toy token dictionary
+tokens = 'all work and no play makes jack a dull boy'.split(' ')
+token_dict = {
+ '<PAD>': 0,
+ '<START>': 1,
+ '<END>': 2,
+}
+for token in tokens:
+ if token not in token_dict:
+ token_dict[token] = len(token_dict)
+
+# Generate toy data
+encoder_inputs_no_padding = []
+encoder_inputs, decoder_inputs, decoder_outputs = [], [], []
+for i in range(1, len(tokens) - 1):
+ encode_tokens, decode_tokens = tokens[:i], tokens[i:]
+ encode_tokens = ['<START>'] + encode_tokens + ['<END>'] + ['<PAD>'] * (len(tokens) - len(encode_tokens))
+ output_tokens = decode_tokens + ['<END>', '<PAD>'] + ['<PAD>'] * (len(tokens) - len(decode_tokens))
+ decode_tokens = ['<START>'] + decode_tokens + ['<END>'] + ['<PAD>'] * (len(tokens) - len(decode_tokens))
+ encode_tokens = list(map(lambda x: token_dict[x], encode_tokens))
+ decode_tokens = list(map(lambda x: token_dict[x], decode_tokens))
+ output_tokens = list(map(lambda x: [token_dict[x]], output_tokens))
+ encoder_inputs_no_padding.append(encode_tokens[:i + 2])
+ encoder_inputs.append(encode_tokens)
+ decoder_inputs.append(decode_tokens)
+ decoder_outputs.append(output_tokens)
+
+# Build the model
+model = get_model(
+ token_num=len(token_dict),
+ embed_dim=30,
+ encoder_num=3,
+ decoder_num=2,
+ head_num=3,
+ hidden_dim=120,
+ attention_activation='relu',
+ feed_forward_activation='relu',
+ dropout_rate=0.05,
+ embed_weights=np.random.random((13, 30)),
+)
+model.compile(
+ optimizer='adam',
+ loss='sparse_categorical_crossentropy',
+)
+model.summary()
+
+# Train the model
+model.fit(
+ x=[np.asarray(encoder_inputs * 1000), np.asarray(decoder_inputs * 1000)],
+ y=np.asarray(decoder_outputs * 1000),
+ epochs=5,
+)
+```
+
+### Predict
+
+```python
+from keras_transformer import decode
+
+decoded = decode(
+ model,
+ encoder_inputs_no_padding,
+ start_token=token_dict['<START>'],
+ end_token=token_dict['<END>'],
+ pad_token=token_dict['<PAD>'],
+ max_len=100,
+)
+token_dict_rev = {v: k for k, v in token_dict.items()}
+for i in range(len(decoded)):
+ print(' '.join(map(lambda x: token_dict_rev[x], decoded[i][1:-1])))
+```
+
+### Translation
+
+```python
+import numpy as np
+from keras_transformer import get_model, decode
+
+source_tokens = [
+ 'i need more power'.split(' '),
+ 'eat jujube and pill'.split(' '),
+]
+target_tokens = [
+ list('我要更多的抛瓦'),
+ list('吃枣💊'),
+]
+
+# Generate dictionaries
+def build_token_dict(token_list):
+ token_dict = {
+ '<PAD>': 0,
+ '<START>': 1,
+ '<END>': 2,
+ }
+ for tokens in token_list:
+ for token in tokens:
+ if token not in token_dict:
+ token_dict[token] = len(token_dict)
+ return token_dict
+
+source_token_dict = build_token_dict(source_tokens)
+target_token_dict = build_token_dict(target_tokens)
+target_token_dict_inv = {v: k for k, v in target_token_dict.items()}
+
+# Add special tokens
+encode_tokens = [['<START>'] + tokens + ['<END>'] for tokens in source_tokens]
+decode_tokens = [['<START>'] + tokens + ['<END>'] for tokens in target_tokens]
+output_tokens = [tokens + ['<END>', '<PAD>'] for tokens in target_tokens]
+
+# Padding
+source_max_len = max(map(len, encode_tokens))
+target_max_len = max(map(len, decode_tokens))
+
+encode_tokens = [tokens + ['<PAD>'] * (source_max_len - len(tokens)) for tokens in encode_tokens]
+decode_tokens = [tokens + ['<PAD>'] * (target_max_len - len(tokens)) for tokens in decode_tokens]
+output_tokens = [tokens + ['<PAD>'] * (target_max_len - len(tokens)) for tokens in output_tokens]
+
+encode_input = [list(map(lambda x: source_token_dict[x], tokens)) for tokens in encode_tokens]
+decode_input = [list(map(lambda x: target_token_dict[x], tokens)) for tokens in decode_tokens]
+decode_output = [list(map(lambda x: [target_token_dict[x]], tokens)) for tokens in output_tokens]
+
+# Build & fit model
+model = get_model(
+ token_num=max(len(source_token_dict), len(target_token_dict)),
+ embed_dim=32,
+ encoder_num=2,
+ decoder_num=2,
+ head_num=4,
+ hidden_dim=128,
+ dropout_rate=0.05,
+ use_same_embed=False, # Use different embeddings for different languages
+)
+model.compile('adam', 'sparse_categorical_crossentropy')
+model.summary()
+
+model.fit(
+ x=[np.array(encode_input * 1024), np.array(decode_input * 1024)],
+ y=np.array(decode_output * 1024),
+ epochs=10,
+ batch_size=32,
+)
+
+# Predict
+decoded = decode(
+ model,
+ encode_input,
+ start_token=target_token_dict['<START>'],
+ end_token=target_token_dict['<END>'],
+ pad_token=target_token_dict['<PAD>'],
+)
+print(''.join(map(lambda x: target_token_dict_inv[x], decoded[0][1:-1])))
+print(''.join(map(lambda x: target_token_dict_inv[x], decoded[1][1:-1])))
+```
+
+### Decode
+
+In `decode`, the word with top probability is selected as the predicted token by default. You can add randomness by setting `top_k` and `temperature`:
+
+```python
+decoded = decode(
+ model,
+ encode_input,
+ start_token=target_token_dict['<START>'],
+ end_token=target_token_dict['<END>'],
+ pad_token=target_token_dict['<PAD>'],
+ top_k=10,
+ temperature=1.0,
+)
+print(''.join(map(lambda x: target_token_dict_inv[x], decoded[0][1:-1])))
+print(''.join(map(lambda x: target_token_dict_inv[x], decoded[1][1:-1])))
+```
+
+%prep
+%autosetup -n keras-transformer-0.40.0
+
+%build
+%py3_build
+
+%install
+%py3_install
+install -d -m755 %{buildroot}/%{_pkgdocdir}
+if [ -d doc ]; then cp -arf doc %{buildroot}/%{_pkgdocdir}; fi
+if [ -d docs ]; then cp -arf docs %{buildroot}/%{_pkgdocdir}; fi
+if [ -d example ]; then cp -arf example %{buildroot}/%{_pkgdocdir}; fi
+if [ -d examples ]; then cp -arf examples %{buildroot}/%{_pkgdocdir}; fi
+pushd %{buildroot}
+if [ -d usr/lib ]; then
+ find usr/lib -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/lib64 ]; then
+ find usr/lib64 -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/bin ]; then
+ find usr/bin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/sbin ]; then
+ find usr/sbin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+touch doclist.lst
+if [ -d usr/share/man ]; then
+ find usr/share/man -type f -printf "/%h/%f.gz\n" >> doclist.lst
+fi
+popd
+mv %{buildroot}/filelist.lst .
+mv %{buildroot}/doclist.lst .
+
+%files -n python3-keras-transformer -f filelist.lst
+%dir %{python3_sitelib}/*
+
+%files help -f doclist.lst
+%{_docdir}/*
+
+%changelog
+* Tue Apr 11 2023 Python_Bot <Python_Bot@openeuler.org> - 0.40.0-1
+- Package Spec generated
diff --git a/sources b/sources
new file mode 100644
index 0000000..c2538e0
--- /dev/null
+++ b/sources
@@ -0,0 +1 @@
+9ac9becf5595b09b0bdbe39c704cf2d5 keras-transformer-0.40.0.tar.gz