summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCoprDistGit <infra@openeuler.org>2023-05-29 10:59:17 +0000
committerCoprDistGit <infra@openeuler.org>2023-05-29 10:59:17 +0000
commit2b79901cc94f8b776bfb5b0270cc6fb72d452387 (patch)
tree9e599ab04de0ac3f5ecde4dabefa5612304ac3ae
parentaec93cd74fa766d947ad24973bbedc76f0c906ba (diff)
automatic import of python-hideandseek
-rw-r--r--.gitignore1
-rw-r--r--python-hideandseek.spec263
-rw-r--r--sources1
3 files changed, 265 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
index e69de29..87b56c0 100644
--- a/.gitignore
+++ b/.gitignore
@@ -0,0 +1 @@
+/hideandseek-0.1.5.tar.gz
diff --git a/python-hideandseek.spec b/python-hideandseek.spec
new file mode 100644
index 0000000..8621750
--- /dev/null
+++ b/python-hideandseek.spec
@@ -0,0 +1,263 @@
+%global _empty_manifest_terminate_build 0
+Name: python-hideandseek
+Version: 0.1.5
+Release: 1
+Summary: library for deep learning and privacy preserving deep learning
+License: MIT License
+URL: https://github.com/jsyoo61/hideandseek
+Source0: https://mirrors.nju.edu.cn/pypi/web/packages/95/0a/554f1ae7e4f35482523a3494164c40048beb6476ec72fa7550a51df3f4a0/hideandseek-0.1.5.tar.gz
+BuildArch: noarch
+
+Requires: python3-numpy
+Requires: python3-pandas
+Requires: python3-matplotlib
+Requires: python3-hydra-core
+Requires: python3-tools-jsyoo61
+
+%description
+# hideandseek
+Highly modularized deep learning training library.
+
+Why use `hideandseek`?
+
+- Easy training & saving deep learning models along with other modules (ex: preprocessing modules) required in inference
+- Run multiple deep learning experiments in parallel on multiples GPUs (powered by [hydra](https://hydra.cc/docs/intro/), and python multiprocessing)
+- Design and analyze experiments scientifically by modifying variables (powered by [hydra](https://hydra.cc/docs/intro/))
+
+- Modularized machine learning pipeline allows using the same script for all types of experiments
+- The same training code can be run in privacy preserving setting by minimal modifications
+
+Currently prettifying codes. (30.10.2022.)
+
+ import torch
+ import torch.nn as nn
+
+ # Generate data
+ x = torch.rand(200,1)
+ y = 5*x+2
+
+ model = nn.Linear(1,1)
+ dataset = torch.utils.data.TensorDataset(x, y)
+ criterion = nn.MSELoss()
+ cfg = {
+ 'lr': 1e-2,
+ 'batch_size': 32,
+ 'epoch': 10 # optional
+ }
+
+ # Training configuration. All you need to train a neural network
+ kwargs = {
+ 'model':model,
+ 'dataset':dataset,
+ 'cfg_train':cfg,
+ 'criterion':criterion,
+ 'name': 'Test' # optional
+ }
+ trainer = hs.N.Node(**kwargs)
+
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+ trainer.model.to(device)
+
+ # Train for predefined number of epochs
+ trainer.train() # Train for predefined number of epochs
+ trainer.train(5) # Train for specified number of epochs
+ trainer.train(epoch=5) # Same thing with trainer.train(5)
+ trainer.train(step=500) # Train for specified number of updates
+
+ node.model.cpu()
+
+and simply run multiple batch of experiments with a single line command such as:
+
+ python train.py -m lr=1e-3,1e-2 batch_size=32,64 "random_seed=range(0,5)" \
+ hydra/launcher=joblib hydra.launcher.n_jobs=8
+ # Runs total of 2*2*5=40 batch of experiments, with 8 processes at a time. Experiment results are stored in hydra.sweep.dir which can be overridden.
+
+To do
+- [ ] Draw figures to explain hideandseek
+- [ ] GUI for generating experiment scripts when conducting variable sweeps
+
+
+
+
+%package -n python3-hideandseek
+Summary: library for deep learning and privacy preserving deep learning
+Provides: python-hideandseek
+BuildRequires: python3-devel
+BuildRequires: python3-setuptools
+BuildRequires: python3-pip
+%description -n python3-hideandseek
+# hideandseek
+Highly modularized deep learning training library.
+
+Why use `hideandseek`?
+
+- Easy training & saving deep learning models along with other modules (ex: preprocessing modules) required in inference
+- Run multiple deep learning experiments in parallel on multiples GPUs (powered by [hydra](https://hydra.cc/docs/intro/), and python multiprocessing)
+- Design and analyze experiments scientifically by modifying variables (powered by [hydra](https://hydra.cc/docs/intro/))
+
+- Modularized machine learning pipeline allows using the same script for all types of experiments
+- The same training code can be run in privacy preserving setting by minimal modifications
+
+Currently prettifying codes. (30.10.2022.)
+
+ import torch
+ import torch.nn as nn
+
+ # Generate data
+ x = torch.rand(200,1)
+ y = 5*x+2
+
+ model = nn.Linear(1,1)
+ dataset = torch.utils.data.TensorDataset(x, y)
+ criterion = nn.MSELoss()
+ cfg = {
+ 'lr': 1e-2,
+ 'batch_size': 32,
+ 'epoch': 10 # optional
+ }
+
+ # Training configuration. All you need to train a neural network
+ kwargs = {
+ 'model':model,
+ 'dataset':dataset,
+ 'cfg_train':cfg,
+ 'criterion':criterion,
+ 'name': 'Test' # optional
+ }
+ trainer = hs.N.Node(**kwargs)
+
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+ trainer.model.to(device)
+
+ # Train for predefined number of epochs
+ trainer.train() # Train for predefined number of epochs
+ trainer.train(5) # Train for specified number of epochs
+ trainer.train(epoch=5) # Same thing with trainer.train(5)
+ trainer.train(step=500) # Train for specified number of updates
+
+ node.model.cpu()
+
+and simply run multiple batch of experiments with a single line command such as:
+
+ python train.py -m lr=1e-3,1e-2 batch_size=32,64 "random_seed=range(0,5)" \
+ hydra/launcher=joblib hydra.launcher.n_jobs=8
+ # Runs total of 2*2*5=40 batch of experiments, with 8 processes at a time. Experiment results are stored in hydra.sweep.dir which can be overridden.
+
+To do
+- [ ] Draw figures to explain hideandseek
+- [ ] GUI for generating experiment scripts when conducting variable sweeps
+
+
+
+
+%package help
+Summary: Development documents and examples for hideandseek
+Provides: python3-hideandseek-doc
+%description help
+# hideandseek
+Highly modularized deep learning training library.
+
+Why use `hideandseek`?
+
+- Easy training & saving deep learning models along with other modules (ex: preprocessing modules) required in inference
+- Run multiple deep learning experiments in parallel on multiples GPUs (powered by [hydra](https://hydra.cc/docs/intro/), and python multiprocessing)
+- Design and analyze experiments scientifically by modifying variables (powered by [hydra](https://hydra.cc/docs/intro/))
+
+- Modularized machine learning pipeline allows using the same script for all types of experiments
+- The same training code can be run in privacy preserving setting by minimal modifications
+
+Currently prettifying codes. (30.10.2022.)
+
+ import torch
+ import torch.nn as nn
+
+ # Generate data
+ x = torch.rand(200,1)
+ y = 5*x+2
+
+ model = nn.Linear(1,1)
+ dataset = torch.utils.data.TensorDataset(x, y)
+ criterion = nn.MSELoss()
+ cfg = {
+ 'lr': 1e-2,
+ 'batch_size': 32,
+ 'epoch': 10 # optional
+ }
+
+ # Training configuration. All you need to train a neural network
+ kwargs = {
+ 'model':model,
+ 'dataset':dataset,
+ 'cfg_train':cfg,
+ 'criterion':criterion,
+ 'name': 'Test' # optional
+ }
+ trainer = hs.N.Node(**kwargs)
+
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+ trainer.model.to(device)
+
+ # Train for predefined number of epochs
+ trainer.train() # Train for predefined number of epochs
+ trainer.train(5) # Train for specified number of epochs
+ trainer.train(epoch=5) # Same thing with trainer.train(5)
+ trainer.train(step=500) # Train for specified number of updates
+
+ node.model.cpu()
+
+and simply run multiple batch of experiments with a single line command such as:
+
+ python train.py -m lr=1e-3,1e-2 batch_size=32,64 "random_seed=range(0,5)" \
+ hydra/launcher=joblib hydra.launcher.n_jobs=8
+ # Runs total of 2*2*5=40 batch of experiments, with 8 processes at a time. Experiment results are stored in hydra.sweep.dir which can be overridden.
+
+To do
+- [ ] Draw figures to explain hideandseek
+- [ ] GUI for generating experiment scripts when conducting variable sweeps
+
+
+
+
+%prep
+%autosetup -n hideandseek-0.1.5
+
+%build
+%py3_build
+
+%install
+%py3_install
+install -d -m755 %{buildroot}/%{_pkgdocdir}
+if [ -d doc ]; then cp -arf doc %{buildroot}/%{_pkgdocdir}; fi
+if [ -d docs ]; then cp -arf docs %{buildroot}/%{_pkgdocdir}; fi
+if [ -d example ]; then cp -arf example %{buildroot}/%{_pkgdocdir}; fi
+if [ -d examples ]; then cp -arf examples %{buildroot}/%{_pkgdocdir}; fi
+pushd %{buildroot}
+if [ -d usr/lib ]; then
+ find usr/lib -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/lib64 ]; then
+ find usr/lib64 -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/bin ]; then
+ find usr/bin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/sbin ]; then
+ find usr/sbin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+touch doclist.lst
+if [ -d usr/share/man ]; then
+ find usr/share/man -type f -printf "/%h/%f.gz\n" >> doclist.lst
+fi
+popd
+mv %{buildroot}/filelist.lst .
+mv %{buildroot}/doclist.lst .
+
+%files -n python3-hideandseek -f filelist.lst
+%dir %{python3_sitelib}/*
+
+%files help -f doclist.lst
+%{_docdir}/*
+
+%changelog
+* Mon May 29 2023 Python_Bot <Python_Bot@openeuler.org> - 0.1.5-1
+- Package Spec generated
diff --git a/sources b/sources
new file mode 100644
index 0000000..76ba992
--- /dev/null
+++ b/sources
@@ -0,0 +1 @@
+c13d19b452f8dcf79d4bdcb8e8904025 hideandseek-0.1.5.tar.gz