From 877513770200f27c14f7c91c3a240b66be93b7c0 Mon Sep 17 00:00:00 2001 From: CoprDistGit Date: Tue, 25 Apr 2023 13:43:24 +0000 Subject: automatic import of python-autoviml --- .gitignore | 1 + python-autoviml.spec | 14 +++++++------- sources | 2 +- 3 files changed, 9 insertions(+), 8 deletions(-) diff --git a/.gitignore b/.gitignore index a15ed5e..657957f 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ /autoviml-0.1.713.tar.gz +/autoviml-0.1.714.tar.gz diff --git a/python-autoviml.spec b/python-autoviml.spec index bc81298..1ecd01f 100644 --- a/python-autoviml.spec +++ b/python-autoviml.spec @@ -1,11 +1,11 @@ %global _empty_manifest_terminate_build 0 Name: python-autoviml -Version: 0.1.713 +Version: 0.1.714 Release: 1 Summary: Automatically Build Variant Interpretable ML models fast - now with CatBoost! License: Apache License 2.0 URL: https://github.com/AutoViML/Auto_ViML -Source0: https://mirrors.nju.edu.cn/pypi/web/packages/32/e9/2cf9f258d78aee8cdf3d809dc7198c71da454ecaa7d94aa8dda68d993db9/autoviml-0.1.713.tar.gz +Source0: https://mirrors.nju.edu.cn/pypi/web/packages/01/90/635a82bbe04d17c70d972e7f82e155e079c4ff56447a0c8208ff15b6db87/autoviml-0.1.714.tar.gz BuildArch: noarch Requires: python3-beautifulsoup4 @@ -44,7 +44,7 @@ Automatically Build Various Interpretable ML models fast!
Auto_ViML is pronounced as "auto vimal" (autovimal logo created by Sanket Ghanmare). -## Update (May 2022) +## Update (March 2023)
  1. Auto_ViML has a new flag to speed up processing using GPU's. Just set the `GPU_flag`=`True` on Colab and other environments. But don't forget to set the runtime type to be "GPU" while running on Colab. Otherwise you will get an error.
@@ -255,7 +255,7 @@ Automatically Build Various Interpretable ML models fast!
Auto_ViML is pronounced as "auto vimal" (autovimal logo created by Sanket Ghanmare). -## Update (May 2022) +## Update (March 2023)
  1. Auto_ViML has a new flag to speed up processing using GPU's. Just set the `GPU_flag`=`True` on Colab and other environments. But don't forget to set the runtime type to be "GPU" while running on Colab. Otherwise you will get an error.
@@ -463,7 +463,7 @@ Automatically Build Various Interpretable ML models fast!
Auto_ViML is pronounced as "auto vimal" (autovimal logo created by Sanket Ghanmare). -## Update (May 2022) +## Update (March 2023)
  1. Auto_ViML has a new flag to speed up processing using GPU's. Just set the `GPU_flag`=`True` on Colab and other environments. But don't forget to set the runtime type to be "GPU" while running on Colab. Otherwise you will get an error.
@@ -654,7 +654,7 @@ This project is not an official Google project. It is not supported by Google an %prep -%autosetup -n autoviml-0.1.713 +%autosetup -n autoviml-0.1.714 %build %py3_build @@ -694,5 +694,5 @@ mv %{buildroot}/doclist.lst . %{_docdir}/* %changelog -* Wed Apr 12 2023 Python_Bot - 0.1.713-1 +* Tue Apr 25 2023 Python_Bot - 0.1.714-1 - Package Spec generated diff --git a/sources b/sources index 5bbdd82..e7711ec 100644 --- a/sources +++ b/sources @@ -1 +1 @@ -e1a74156ddf143b70b57deec7ccab2c1 autoviml-0.1.713.tar.gz +6b502cfa63925d2b598bd3b98e94a68f autoviml-0.1.714.tar.gz -- cgit v1.2.3