From 49930bbfa628f4b8f28b57520501eb2ca9a101e9 Mon Sep 17 00:00:00 2001 From: CoprDistGit Date: Wed, 10 May 2023 04:57:35 +0000 Subject: automatic import of python-fast-to-sql --- .gitignore | 1 + python-fast-to-sql.spec | 344 ++++++++++++++++++++++++++++++++++++++++++++++++ sources | 1 + 3 files changed, 346 insertions(+) create mode 100644 python-fast-to-sql.spec create mode 100644 sources diff --git a/.gitignore b/.gitignore index e69de29..c1a44f4 100644 --- a/.gitignore +++ b/.gitignore @@ -0,0 +1 @@ +/fast-to-sql-2.1.15.tar.gz diff --git a/python-fast-to-sql.spec b/python-fast-to-sql.spec new file mode 100644 index 0000000..65e9a6d --- /dev/null +++ b/python-fast-to-sql.spec @@ -0,0 +1,344 @@ +%global _empty_manifest_terminate_build 0 +Name: python-fast-to-sql +Version: 2.1.15 +Release: 1 +Summary: An improved way to upload pandas dataframes to Microsoft SQL Server. +License: MIT License +URL: https://github.com/jdglaser/fast-to-sql +Source0: https://mirrors.nju.edu.cn/pypi/web/packages/14/4d/536937b91080ddd34fc7ccf6278e267312a41596a1aa2083b733acb22c54/fast-to-sql-2.1.15.tar.gz +BuildArch: noarch + +Requires: python3-pandas +Requires: python3-pyodbc + +%description +# fast_to_sql + +## Introduction + +`fast_to_sql` is an improved way to upload pandas dataframes to Microsoft SQL Server. + +`fast_to_sql` takes advantage of pyodbc rather than SQLAlchemy. This allows for a much lighter weight import for writing pandas dataframes to sql server. It uses pyodbc's `executemany` method with `fast_executemany` set to `True`, resulting in far superior run times when inserting data. + +## Installation + +```python +pip install fast_to_sql +``` + +## Requirements + +* Written for Python 3.8+ +* Requires pandas, pyodbc + +## Example + +```py +from datetime import datetime + +import pandas as pd + +import pyodbc +from fast_to_sql import fast_to_sql as fts + +# Test Dataframe for insertion +df = pd.DataFrame({ + "Col1": [1, 2, 3], + "Col2": ["A", "B", "C"], + "Col3": [True, False, True], + "Col4": [datetime(2020,1,1),datetime(2020,1,2),datetime(2020,1,3)] +}) + +# Create a pyodbc connection +conn = pyodbc.connect( + """ + Driver={ODBC Driver 17 for SQL Server}; + Server=localhost; + Database=my_database; + UID=my_user; + PWD=my_pass; + """ +) + +# If a table is created, the generated sql is returned +create_statement = fts.fast_to_sql(df, "my_great_table", conn, if_exists="replace", custom={"Col1":"INT PRIMARY KEY"}, temp=False) + +# Commit upload actions and close connection +conn.commit() +conn.close() +``` + +## Usage + +### Main function + +```python +fts.fast_to_sql(df, name, conn, if_exists="append", custom=None, temp=False, copy=False) +``` + +* ```df```: pandas DataFrame to upload +* ```name```: String of desired name for the table in SQL server +* ```conn```: A valid pyodbc connection object +* ```if_exists```: Option for what to do if the specified table name already exists in the database. If the table does not exist a new one will be created. By default this option is set to 'append' + * __'append'__: Appends the dataframe to the table if it already exists in SQL server. + * __'fail'__: Purposely raises a `FailError` if the table already exists in SQL server. + * __'replace'__: Drops the old table with the specified name, and creates a new one. **Be careful with this option**, it will completely delete a table with the specified name in SQL server. +* ```custom```: A dictionary object with one or more of the column names being uploaded as the key, and a valid SQL column definition as the value. The value must contain a type (`INT`, `FLOAT`, `VARCHAR(500)`, etc.), and can optionally also include constraints (`NOT NULL`, `PRIMARY KEY`, etc.) + * Examples: + `{'ColumnName':'varchar(1000)'}` + `{'ColumnName2':'int primary key'}` +* ```temp```: Either `True` if creating a local sql server temporary table for the connection, or `False` (default) if not. +* ```copy```: Defaults to `False`. If set to `True`, a copy of the dataframe will be made so column names of the original dataframe are not altered. Use this if you plan to continue to use the dataframe in your script after running `fast_to_sql`. + + + + + + + + + + + + + + + +%package -n python3-fast-to-sql +Summary: An improved way to upload pandas dataframes to Microsoft SQL Server. +Provides: python-fast-to-sql +BuildRequires: python3-devel +BuildRequires: python3-setuptools +BuildRequires: python3-pip +%description -n python3-fast-to-sql +# fast_to_sql + +## Introduction + +`fast_to_sql` is an improved way to upload pandas dataframes to Microsoft SQL Server. + +`fast_to_sql` takes advantage of pyodbc rather than SQLAlchemy. This allows for a much lighter weight import for writing pandas dataframes to sql server. It uses pyodbc's `executemany` method with `fast_executemany` set to `True`, resulting in far superior run times when inserting data. + +## Installation + +```python +pip install fast_to_sql +``` + +## Requirements + +* Written for Python 3.8+ +* Requires pandas, pyodbc + +## Example + +```py +from datetime import datetime + +import pandas as pd + +import pyodbc +from fast_to_sql import fast_to_sql as fts + +# Test Dataframe for insertion +df = pd.DataFrame({ + "Col1": [1, 2, 3], + "Col2": ["A", "B", "C"], + "Col3": [True, False, True], + "Col4": [datetime(2020,1,1),datetime(2020,1,2),datetime(2020,1,3)] +}) + +# Create a pyodbc connection +conn = pyodbc.connect( + """ + Driver={ODBC Driver 17 for SQL Server}; + Server=localhost; + Database=my_database; + UID=my_user; + PWD=my_pass; + """ +) + +# If a table is created, the generated sql is returned +create_statement = fts.fast_to_sql(df, "my_great_table", conn, if_exists="replace", custom={"Col1":"INT PRIMARY KEY"}, temp=False) + +# Commit upload actions and close connection +conn.commit() +conn.close() +``` + +## Usage + +### Main function + +```python +fts.fast_to_sql(df, name, conn, if_exists="append", custom=None, temp=False, copy=False) +``` + +* ```df```: pandas DataFrame to upload +* ```name```: String of desired name for the table in SQL server +* ```conn```: A valid pyodbc connection object +* ```if_exists```: Option for what to do if the specified table name already exists in the database. If the table does not exist a new one will be created. By default this option is set to 'append' + * __'append'__: Appends the dataframe to the table if it already exists in SQL server. + * __'fail'__: Purposely raises a `FailError` if the table already exists in SQL server. + * __'replace'__: Drops the old table with the specified name, and creates a new one. **Be careful with this option**, it will completely delete a table with the specified name in SQL server. +* ```custom```: A dictionary object with one or more of the column names being uploaded as the key, and a valid SQL column definition as the value. The value must contain a type (`INT`, `FLOAT`, `VARCHAR(500)`, etc.), and can optionally also include constraints (`NOT NULL`, `PRIMARY KEY`, etc.) + * Examples: + `{'ColumnName':'varchar(1000)'}` + `{'ColumnName2':'int primary key'}` +* ```temp```: Either `True` if creating a local sql server temporary table for the connection, or `False` (default) if not. +* ```copy```: Defaults to `False`. If set to `True`, a copy of the dataframe will be made so column names of the original dataframe are not altered. Use this if you plan to continue to use the dataframe in your script after running `fast_to_sql`. + + + + + + + + + + + + + + + +%package help +Summary: Development documents and examples for fast-to-sql +Provides: python3-fast-to-sql-doc +%description help +# fast_to_sql + +## Introduction + +`fast_to_sql` is an improved way to upload pandas dataframes to Microsoft SQL Server. + +`fast_to_sql` takes advantage of pyodbc rather than SQLAlchemy. This allows for a much lighter weight import for writing pandas dataframes to sql server. It uses pyodbc's `executemany` method with `fast_executemany` set to `True`, resulting in far superior run times when inserting data. + +## Installation + +```python +pip install fast_to_sql +``` + +## Requirements + +* Written for Python 3.8+ +* Requires pandas, pyodbc + +## Example + +```py +from datetime import datetime + +import pandas as pd + +import pyodbc +from fast_to_sql import fast_to_sql as fts + +# Test Dataframe for insertion +df = pd.DataFrame({ + "Col1": [1, 2, 3], + "Col2": ["A", "B", "C"], + "Col3": [True, False, True], + "Col4": [datetime(2020,1,1),datetime(2020,1,2),datetime(2020,1,3)] +}) + +# Create a pyodbc connection +conn = pyodbc.connect( + """ + Driver={ODBC Driver 17 for SQL Server}; + Server=localhost; + Database=my_database; + UID=my_user; + PWD=my_pass; + """ +) + +# If a table is created, the generated sql is returned +create_statement = fts.fast_to_sql(df, "my_great_table", conn, if_exists="replace", custom={"Col1":"INT PRIMARY KEY"}, temp=False) + +# Commit upload actions and close connection +conn.commit() +conn.close() +``` + +## Usage + +### Main function + +```python +fts.fast_to_sql(df, name, conn, if_exists="append", custom=None, temp=False, copy=False) +``` + +* ```df```: pandas DataFrame to upload +* ```name```: String of desired name for the table in SQL server +* ```conn```: A valid pyodbc connection object +* ```if_exists```: Option for what to do if the specified table name already exists in the database. If the table does not exist a new one will be created. By default this option is set to 'append' + * __'append'__: Appends the dataframe to the table if it already exists in SQL server. + * __'fail'__: Purposely raises a `FailError` if the table already exists in SQL server. + * __'replace'__: Drops the old table with the specified name, and creates a new one. **Be careful with this option**, it will completely delete a table with the specified name in SQL server. +* ```custom```: A dictionary object with one or more of the column names being uploaded as the key, and a valid SQL column definition as the value. The value must contain a type (`INT`, `FLOAT`, `VARCHAR(500)`, etc.), and can optionally also include constraints (`NOT NULL`, `PRIMARY KEY`, etc.) + * Examples: + `{'ColumnName':'varchar(1000)'}` + `{'ColumnName2':'int primary key'}` +* ```temp```: Either `True` if creating a local sql server temporary table for the connection, or `False` (default) if not. +* ```copy```: Defaults to `False`. If set to `True`, a copy of the dataframe will be made so column names of the original dataframe are not altered. Use this if you plan to continue to use the dataframe in your script after running `fast_to_sql`. + + + + + + + + + + + + + + + +%prep +%autosetup -n fast-to-sql-2.1.15 + +%build +%py3_build + +%install +%py3_install +install -d -m755 %{buildroot}/%{_pkgdocdir} +if [ -d doc ]; then cp -arf doc %{buildroot}/%{_pkgdocdir}; fi +if [ -d docs ]; then cp -arf docs %{buildroot}/%{_pkgdocdir}; fi +if [ -d example ]; then cp -arf example %{buildroot}/%{_pkgdocdir}; fi +if [ -d examples ]; then cp -arf examples %{buildroot}/%{_pkgdocdir}; fi +pushd %{buildroot} +if [ -d usr/lib ]; then + find usr/lib -type f -printf "/%h/%f\n" >> filelist.lst +fi +if [ -d usr/lib64 ]; then + find usr/lib64 -type f -printf "/%h/%f\n" >> filelist.lst +fi +if [ -d usr/bin ]; then + find usr/bin -type f -printf "/%h/%f\n" >> filelist.lst +fi +if [ -d usr/sbin ]; then + find usr/sbin -type f -printf "/%h/%f\n" >> filelist.lst +fi +touch doclist.lst +if [ -d usr/share/man ]; then + find usr/share/man -type f -printf "/%h/%f.gz\n" >> doclist.lst +fi +popd +mv %{buildroot}/filelist.lst . +mv %{buildroot}/doclist.lst . + +%files -n python3-fast-to-sql -f filelist.lst +%dir %{python3_sitelib}/* + +%files help -f doclist.lst +%{_docdir}/* + +%changelog +* Wed May 10 2023 Python_Bot - 2.1.15-1 +- Package Spec generated diff --git a/sources b/sources new file mode 100644 index 0000000..103af88 --- /dev/null +++ b/sources @@ -0,0 +1 @@ +b359c5e708e3b88c3d89f3d6d35a0723 fast-to-sql-2.1.15.tar.gz -- cgit v1.2.3