summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCoprDistGit <infra@openeuler.org>2023-05-15 03:53:56 +0000
committerCoprDistGit <infra@openeuler.org>2023-05-15 03:53:56 +0000
commit12d4ddaa59c47e0d73b78affd60f6357b48c98f6 (patch)
tree9fecd67ccc487f2962fb208409268ea262362ea1
parent25afcb4e4262122ca00e8b1d8ddf4b55fcb2cf86 (diff)
automatic import of python-pytorch-segmentation-models-trainer
-rw-r--r--.gitignore1
-rw-r--r--python-pytorch-segmentation-models-trainer.spec531
-rw-r--r--sources1
3 files changed, 533 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
index e69de29..dd2bd45 100644
--- a/.gitignore
+++ b/.gitignore
@@ -0,0 +1 @@
+/pytorch_segmentation_models_trainer-0.17.0.tar.gz
diff --git a/python-pytorch-segmentation-models-trainer.spec b/python-pytorch-segmentation-models-trainer.spec
new file mode 100644
index 0000000..1307401
--- /dev/null
+++ b/python-pytorch-segmentation-models-trainer.spec
@@ -0,0 +1,531 @@
+%global _empty_manifest_terminate_build 0
+Name: python-pytorch-segmentation-models-trainer
+Version: 0.17.0
+Release: 1
+Summary: Image segmentation models training of popular architectures.
+License: GPL
+URL: https://github.com/phborba/pytorch_segmentation_models_trainer
+Source0: https://mirrors.nju.edu.cn/pypi/web/packages/23/77/6763d17050316adab53302082893346db749f2b350e85357562331c795ec/pytorch_segmentation_models_trainer-0.17.0.tar.gz
+BuildArch: noarch
+
+Requires: python3-torch
+Requires: python3-torchvision
+Requires: python3-pytorch-lightning
+Requires: python3-torchmetrics
+Requires: python3-segmentation-models-pytorch
+Requires: python3-hydra-core
+Requires: python3-kornia
+Requires: python3-albumentations
+Requires: python3-pandas
+Requires: python3-tensorboardX
+Requires: python3-pillow
+Requires: python3-matplotlib
+Requires: python3-scipy
+Requires: python3-numpy
+Requires: python3-pytorch-toolbelt
+Requires: python3-descartes
+Requires: python3-fiona
+Requires: python3-psycopg2
+Requires: python3-shapely
+Requires: python3-geopandas
+Requires: python3-geoalchemy2
+Requires: python3-rasterio
+Requires: python3-numba
+Requires: python3-sahi
+Requires: python3-skan
+Requires: python3-torch-scatter
+Requires: python3-tqdm
+Requires: python3-pygeos
+Requires: python3-rtree
+Requires: python3-bidict
+Requires: python3-Cython
+Requires: python3-ninja
+Requires: python3-pyyaml
+Requires: python3-pycocotools
+Requires: python3-multiprocess
+Requires: python3-wget
+Requires: python3-fastapi
+Requires: python3-uvicorn
+Requires: python3-similaritymeasures
+Requires: python3-colorama
+Requires: python3-swifter
+Requires: python3-multipart
+Requires: python3-pytest
+Requires: python3-scikit-image
+Requires: python3-parameterized
+
+%description
+
+# pytorch_segmentation_models_trainer
+
+
+[![Torch](https://img.shields.io/badge/-PyTorch-red?logo=pytorch&labelColor=gray)](https://pytorch.org/get-started/locally/)
+[![Pytorch Lightning](https://img.shields.io/badge/code-Lightning-blueviolet?logo=pytorchlightning&labelColor=gray)](https://pytorchlightning.ai/)
+[![Hydra](https://img.shields.io/badge/conf-hydra-blue)](https://hydra.cc/)
+[![Segmentation Models](https://img.shields.io/badge/models-segmentation_models_pytorch-yellow)](https://github.com/qubvel/segmentation_models.pytorch)
+[![Python application](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/python-app.yml/badge.svg)](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/python-app.yml)
+[![Upload Python Package](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/python-publish.yml/badge.svg)](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/python-publish.yml)
+[![Publish Docker image](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/docker-publish.yml/badge.svg)](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/docker-publish.yml)
+[![pre-commit.ci status](https://results.pre-commit.ci/badge/github/phborba/pytorch_segmentation_models_trainer/main.svg)](https://results.pre-commit.ci/latest/github/phborba/pytorch_segmentation_models_trainer/main)
+[![PyPI package](https://img.shields.io/pypi/v/pytorch-segmentation-models-trainer?logo=pypi&color=green)](https://pypi.org/project/pytorch-segmentation-models-trainer/)
+[![codecov](https://codecov.io/gh/phborba/pytorch_segmentation_models_trainer/branch/main/graph/badge.svg?token=PRJL5GVOL2)](https://codecov.io/gh/phborba/pytorch_segmentation_models_trainer)
+[![CodeQL](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/codeql-analysis.yml)
+[![maintainer](https://img.shields.io/badge/maintainer-phborba-blue.svg)](https://github.com/phborba)
+[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4573996.svg)](https://doi.org/10.5281/zenodo.4573996)
+
+
+Framework based on Pytorch, Pytorch Lightning, segmentation_models.pytorch and hydra to train semantic segmentation models using yaml config files as follows:
+
+```
+model:
+ _target_: segmentation_models_pytorch.Unet
+ encoder_name: resnet34
+ encoder_weights: imagenet
+ in_channels: 3
+ classes: 1
+
+loss:
+ _target_: segmentation_models_pytorch.utils.losses.DiceLoss
+
+optimizer:
+ _target_: torch.optim.AdamW
+ lr: 0.001
+ weight_decay: 1e-4
+
+hyperparameters:
+ batch_size: 1
+ epochs: 2
+ max_lr: 0.1
+
+pl_trainer:
+ max_epochs: ${hyperparameters.batch_size}
+ gpus: 0
+
+train_dataset:
+ _target_: pytorch_segmentation_models_trainer.dataset_loader.dataset.SegmentationDataset
+ input_csv_path: /path/to/input.csv
+ data_loader:
+ shuffle: True
+ num_workers: 1
+ pin_memory: True
+ drop_last: True
+ prefetch_factor: 1
+ augmentation_list:
+ - _target_: albumentations.HueSaturationValue
+ always_apply: false
+ hue_shift_limit: 0.2
+ p: 0.5
+ - _target_: albumentations.RandomBrightnessContrast
+ brightness_limit: 0.2
+ contrast_limit: 0.2
+ p: 0.5
+ - _target_: albumentations.RandomCrop
+ always_apply: true
+ height: 256
+ width: 256
+ p: 1.0
+ - _target_: albumentations.Flip
+ always_apply: true
+ - _target_: albumentations.Normalize
+ p: 1.0
+ - _target_: albumentations.pytorch.transforms.ToTensorV2
+ always_apply: true
+
+val_dataset:
+ _target_: pytorch_segmentation_models_trainer.dataset_loader.dataset.SegmentationDataset
+ input_csv_path: /path/to/input.csv
+ data_loader:
+ shuffle: True
+ num_workers: 1
+ pin_memory: True
+ drop_last: True
+ prefetch_factor: 1
+ augmentation_list:
+ - _target_: albumentations.Resize
+ always_apply: true
+ height: 256
+ width: 256
+ p: 1.0
+ - _target_: albumentations.Normalize
+ p: 1.0
+ - _target_: albumentations.pytorch.transforms.ToTensorV2
+ always_apply: true
+```
+
+To train a model with configuration path ```/path/to/config/folder``` and name ```test.yaml```:
+
+```
+pytorch-smt --config-dir /path/to/config/folder --config-name test +mode=train
+```
+
+The mode can be stored in configuration yaml as well. In this case, do not pass the +mode= argument. If the mode is stored in the yaml and you want to overwrite the value, do not use the + clause, just mode= .
+
+This module suports hydra features such as configuration composition. For further information, please visit https://hydra.cc/docs/intro
+
+# Install
+
+If you are not using docker and if you want to enable gpu acceleration, before installing this package, you should install pytorch_scatter as instructed in https://github.com/rusty1s/pytorch_scatter
+
+After installing pytorch_scatter, just do
+
+```
+pip install pytorch_segmentation_models_trainer
+```
+
+We have a docker container in which all dependencies are installed and ready for gpu usage. You can pull the image from dockerhub:
+
+```
+docker pull phborba/pytorch_segmentation_models_trainer:latest
+```
+
+# Citing:
+
+```
+
+@software{philipe_borba_2021_5115127,
+ author = {Philipe Borba},
+ title = {{phborba/pytorch\_segmentation\_models\_trainer:
+ Version 0.8.0}},
+ month = jul,
+ year = 2021,
+ publisher = {Zenodo},
+ version = {v0.8.0},
+ doi = {10.5281/zenodo.5115127},
+ url = {https://doi.org/10.5281/zenodo.5115127}
+}
+
+
+%package -n python3-pytorch-segmentation-models-trainer
+Summary: Image segmentation models training of popular architectures.
+Provides: python-pytorch-segmentation-models-trainer
+BuildRequires: python3-devel
+BuildRequires: python3-setuptools
+BuildRequires: python3-pip
+%description -n python3-pytorch-segmentation-models-trainer
+
+# pytorch_segmentation_models_trainer
+
+
+[![Torch](https://img.shields.io/badge/-PyTorch-red?logo=pytorch&labelColor=gray)](https://pytorch.org/get-started/locally/)
+[![Pytorch Lightning](https://img.shields.io/badge/code-Lightning-blueviolet?logo=pytorchlightning&labelColor=gray)](https://pytorchlightning.ai/)
+[![Hydra](https://img.shields.io/badge/conf-hydra-blue)](https://hydra.cc/)
+[![Segmentation Models](https://img.shields.io/badge/models-segmentation_models_pytorch-yellow)](https://github.com/qubvel/segmentation_models.pytorch)
+[![Python application](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/python-app.yml/badge.svg)](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/python-app.yml)
+[![Upload Python Package](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/python-publish.yml/badge.svg)](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/python-publish.yml)
+[![Publish Docker image](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/docker-publish.yml/badge.svg)](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/docker-publish.yml)
+[![pre-commit.ci status](https://results.pre-commit.ci/badge/github/phborba/pytorch_segmentation_models_trainer/main.svg)](https://results.pre-commit.ci/latest/github/phborba/pytorch_segmentation_models_trainer/main)
+[![PyPI package](https://img.shields.io/pypi/v/pytorch-segmentation-models-trainer?logo=pypi&color=green)](https://pypi.org/project/pytorch-segmentation-models-trainer/)
+[![codecov](https://codecov.io/gh/phborba/pytorch_segmentation_models_trainer/branch/main/graph/badge.svg?token=PRJL5GVOL2)](https://codecov.io/gh/phborba/pytorch_segmentation_models_trainer)
+[![CodeQL](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/codeql-analysis.yml)
+[![maintainer](https://img.shields.io/badge/maintainer-phborba-blue.svg)](https://github.com/phborba)
+[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4573996.svg)](https://doi.org/10.5281/zenodo.4573996)
+
+
+Framework based on Pytorch, Pytorch Lightning, segmentation_models.pytorch and hydra to train semantic segmentation models using yaml config files as follows:
+
+```
+model:
+ _target_: segmentation_models_pytorch.Unet
+ encoder_name: resnet34
+ encoder_weights: imagenet
+ in_channels: 3
+ classes: 1
+
+loss:
+ _target_: segmentation_models_pytorch.utils.losses.DiceLoss
+
+optimizer:
+ _target_: torch.optim.AdamW
+ lr: 0.001
+ weight_decay: 1e-4
+
+hyperparameters:
+ batch_size: 1
+ epochs: 2
+ max_lr: 0.1
+
+pl_trainer:
+ max_epochs: ${hyperparameters.batch_size}
+ gpus: 0
+
+train_dataset:
+ _target_: pytorch_segmentation_models_trainer.dataset_loader.dataset.SegmentationDataset
+ input_csv_path: /path/to/input.csv
+ data_loader:
+ shuffle: True
+ num_workers: 1
+ pin_memory: True
+ drop_last: True
+ prefetch_factor: 1
+ augmentation_list:
+ - _target_: albumentations.HueSaturationValue
+ always_apply: false
+ hue_shift_limit: 0.2
+ p: 0.5
+ - _target_: albumentations.RandomBrightnessContrast
+ brightness_limit: 0.2
+ contrast_limit: 0.2
+ p: 0.5
+ - _target_: albumentations.RandomCrop
+ always_apply: true
+ height: 256
+ width: 256
+ p: 1.0
+ - _target_: albumentations.Flip
+ always_apply: true
+ - _target_: albumentations.Normalize
+ p: 1.0
+ - _target_: albumentations.pytorch.transforms.ToTensorV2
+ always_apply: true
+
+val_dataset:
+ _target_: pytorch_segmentation_models_trainer.dataset_loader.dataset.SegmentationDataset
+ input_csv_path: /path/to/input.csv
+ data_loader:
+ shuffle: True
+ num_workers: 1
+ pin_memory: True
+ drop_last: True
+ prefetch_factor: 1
+ augmentation_list:
+ - _target_: albumentations.Resize
+ always_apply: true
+ height: 256
+ width: 256
+ p: 1.0
+ - _target_: albumentations.Normalize
+ p: 1.0
+ - _target_: albumentations.pytorch.transforms.ToTensorV2
+ always_apply: true
+```
+
+To train a model with configuration path ```/path/to/config/folder``` and name ```test.yaml```:
+
+```
+pytorch-smt --config-dir /path/to/config/folder --config-name test +mode=train
+```
+
+The mode can be stored in configuration yaml as well. In this case, do not pass the +mode= argument. If the mode is stored in the yaml and you want to overwrite the value, do not use the + clause, just mode= .
+
+This module suports hydra features such as configuration composition. For further information, please visit https://hydra.cc/docs/intro
+
+# Install
+
+If you are not using docker and if you want to enable gpu acceleration, before installing this package, you should install pytorch_scatter as instructed in https://github.com/rusty1s/pytorch_scatter
+
+After installing pytorch_scatter, just do
+
+```
+pip install pytorch_segmentation_models_trainer
+```
+
+We have a docker container in which all dependencies are installed and ready for gpu usage. You can pull the image from dockerhub:
+
+```
+docker pull phborba/pytorch_segmentation_models_trainer:latest
+```
+
+# Citing:
+
+```
+
+@software{philipe_borba_2021_5115127,
+ author = {Philipe Borba},
+ title = {{phborba/pytorch\_segmentation\_models\_trainer:
+ Version 0.8.0}},
+ month = jul,
+ year = 2021,
+ publisher = {Zenodo},
+ version = {v0.8.0},
+ doi = {10.5281/zenodo.5115127},
+ url = {https://doi.org/10.5281/zenodo.5115127}
+}
+
+
+%package help
+Summary: Development documents and examples for pytorch-segmentation-models-trainer
+Provides: python3-pytorch-segmentation-models-trainer-doc
+%description help
+
+# pytorch_segmentation_models_trainer
+
+
+[![Torch](https://img.shields.io/badge/-PyTorch-red?logo=pytorch&labelColor=gray)](https://pytorch.org/get-started/locally/)
+[![Pytorch Lightning](https://img.shields.io/badge/code-Lightning-blueviolet?logo=pytorchlightning&labelColor=gray)](https://pytorchlightning.ai/)
+[![Hydra](https://img.shields.io/badge/conf-hydra-blue)](https://hydra.cc/)
+[![Segmentation Models](https://img.shields.io/badge/models-segmentation_models_pytorch-yellow)](https://github.com/qubvel/segmentation_models.pytorch)
+[![Python application](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/python-app.yml/badge.svg)](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/python-app.yml)
+[![Upload Python Package](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/python-publish.yml/badge.svg)](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/python-publish.yml)
+[![Publish Docker image](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/docker-publish.yml/badge.svg)](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/docker-publish.yml)
+[![pre-commit.ci status](https://results.pre-commit.ci/badge/github/phborba/pytorch_segmentation_models_trainer/main.svg)](https://results.pre-commit.ci/latest/github/phborba/pytorch_segmentation_models_trainer/main)
+[![PyPI package](https://img.shields.io/pypi/v/pytorch-segmentation-models-trainer?logo=pypi&color=green)](https://pypi.org/project/pytorch-segmentation-models-trainer/)
+[![codecov](https://codecov.io/gh/phborba/pytorch_segmentation_models_trainer/branch/main/graph/badge.svg?token=PRJL5GVOL2)](https://codecov.io/gh/phborba/pytorch_segmentation_models_trainer)
+[![CodeQL](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/phborba/pytorch_segmentation_models_trainer/actions/workflows/codeql-analysis.yml)
+[![maintainer](https://img.shields.io/badge/maintainer-phborba-blue.svg)](https://github.com/phborba)
+[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.4573996.svg)](https://doi.org/10.5281/zenodo.4573996)
+
+
+Framework based on Pytorch, Pytorch Lightning, segmentation_models.pytorch and hydra to train semantic segmentation models using yaml config files as follows:
+
+```
+model:
+ _target_: segmentation_models_pytorch.Unet
+ encoder_name: resnet34
+ encoder_weights: imagenet
+ in_channels: 3
+ classes: 1
+
+loss:
+ _target_: segmentation_models_pytorch.utils.losses.DiceLoss
+
+optimizer:
+ _target_: torch.optim.AdamW
+ lr: 0.001
+ weight_decay: 1e-4
+
+hyperparameters:
+ batch_size: 1
+ epochs: 2
+ max_lr: 0.1
+
+pl_trainer:
+ max_epochs: ${hyperparameters.batch_size}
+ gpus: 0
+
+train_dataset:
+ _target_: pytorch_segmentation_models_trainer.dataset_loader.dataset.SegmentationDataset
+ input_csv_path: /path/to/input.csv
+ data_loader:
+ shuffle: True
+ num_workers: 1
+ pin_memory: True
+ drop_last: True
+ prefetch_factor: 1
+ augmentation_list:
+ - _target_: albumentations.HueSaturationValue
+ always_apply: false
+ hue_shift_limit: 0.2
+ p: 0.5
+ - _target_: albumentations.RandomBrightnessContrast
+ brightness_limit: 0.2
+ contrast_limit: 0.2
+ p: 0.5
+ - _target_: albumentations.RandomCrop
+ always_apply: true
+ height: 256
+ width: 256
+ p: 1.0
+ - _target_: albumentations.Flip
+ always_apply: true
+ - _target_: albumentations.Normalize
+ p: 1.0
+ - _target_: albumentations.pytorch.transforms.ToTensorV2
+ always_apply: true
+
+val_dataset:
+ _target_: pytorch_segmentation_models_trainer.dataset_loader.dataset.SegmentationDataset
+ input_csv_path: /path/to/input.csv
+ data_loader:
+ shuffle: True
+ num_workers: 1
+ pin_memory: True
+ drop_last: True
+ prefetch_factor: 1
+ augmentation_list:
+ - _target_: albumentations.Resize
+ always_apply: true
+ height: 256
+ width: 256
+ p: 1.0
+ - _target_: albumentations.Normalize
+ p: 1.0
+ - _target_: albumentations.pytorch.transforms.ToTensorV2
+ always_apply: true
+```
+
+To train a model with configuration path ```/path/to/config/folder``` and name ```test.yaml```:
+
+```
+pytorch-smt --config-dir /path/to/config/folder --config-name test +mode=train
+```
+
+The mode can be stored in configuration yaml as well. In this case, do not pass the +mode= argument. If the mode is stored in the yaml and you want to overwrite the value, do not use the + clause, just mode= .
+
+This module suports hydra features such as configuration composition. For further information, please visit https://hydra.cc/docs/intro
+
+# Install
+
+If you are not using docker and if you want to enable gpu acceleration, before installing this package, you should install pytorch_scatter as instructed in https://github.com/rusty1s/pytorch_scatter
+
+After installing pytorch_scatter, just do
+
+```
+pip install pytorch_segmentation_models_trainer
+```
+
+We have a docker container in which all dependencies are installed and ready for gpu usage. You can pull the image from dockerhub:
+
+```
+docker pull phborba/pytorch_segmentation_models_trainer:latest
+```
+
+# Citing:
+
+```
+
+@software{philipe_borba_2021_5115127,
+ author = {Philipe Borba},
+ title = {{phborba/pytorch\_segmentation\_models\_trainer:
+ Version 0.8.0}},
+ month = jul,
+ year = 2021,
+ publisher = {Zenodo},
+ version = {v0.8.0},
+ doi = {10.5281/zenodo.5115127},
+ url = {https://doi.org/10.5281/zenodo.5115127}
+}
+
+
+%prep
+%autosetup -n pytorch-segmentation-models-trainer-0.17.0
+
+%build
+%py3_build
+
+%install
+%py3_install
+install -d -m755 %{buildroot}/%{_pkgdocdir}
+if [ -d doc ]; then cp -arf doc %{buildroot}/%{_pkgdocdir}; fi
+if [ -d docs ]; then cp -arf docs %{buildroot}/%{_pkgdocdir}; fi
+if [ -d example ]; then cp -arf example %{buildroot}/%{_pkgdocdir}; fi
+if [ -d examples ]; then cp -arf examples %{buildroot}/%{_pkgdocdir}; fi
+pushd %{buildroot}
+if [ -d usr/lib ]; then
+ find usr/lib -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/lib64 ]; then
+ find usr/lib64 -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/bin ]; then
+ find usr/bin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/sbin ]; then
+ find usr/sbin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+touch doclist.lst
+if [ -d usr/share/man ]; then
+ find usr/share/man -type f -printf "/%h/%f.gz\n" >> doclist.lst
+fi
+popd
+mv %{buildroot}/filelist.lst .
+mv %{buildroot}/doclist.lst .
+
+%files -n python3-pytorch-segmentation-models-trainer -f filelist.lst
+%dir %{python3_sitelib}/*
+
+%files help -f doclist.lst
+%{_docdir}/*
+
+%changelog
+* Mon May 15 2023 Python_Bot <Python_Bot@openeuler.org> - 0.17.0-1
+- Package Spec generated
diff --git a/sources b/sources
new file mode 100644
index 0000000..5d7fb93
--- /dev/null
+++ b/sources
@@ -0,0 +1 @@
+1c08dbd207e7be8762d1f82ea09466f7 pytorch_segmentation_models_trainer-0.17.0.tar.gz