summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCoprDistGit <infra@openeuler.org>2023-04-11 05:25:21 +0000
committerCoprDistGit <infra@openeuler.org>2023-04-11 05:25:21 +0000
commit4a9b398e15629fd083011993350888a8549db6a9 (patch)
tree332a55453a5704cbb43b399e72ab12c5cc93cc92
parent82bd133077a33f0f89abcc3d6ec30840d2c6adb4 (diff)
automatic import of python-markdown-frames
-rw-r--r--.gitignore1
-rw-r--r--python-markdown-frames.spec314
-rw-r--r--sources1
3 files changed, 316 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
index e69de29..0c398f9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -0,0 +1 @@
+/markdown_frames-1.0.6.tar.gz
diff --git a/python-markdown-frames.spec b/python-markdown-frames.spec
new file mode 100644
index 0000000..dc6716d
--- /dev/null
+++ b/python-markdown-frames.spec
@@ -0,0 +1,314 @@
+%global _empty_manifest_terminate_build 0
+Name: python-markdown-frames
+Version: 1.0.6
+Release: 1
+Summary: Markdown tables parsing to pyspark / pandas DataFrames
+License: MIT License
+URL: https://github.com/exacaster/markdown_frames
+Source0: https://mirrors.nju.edu.cn/pypi/web/packages/b5/1c/e1bf523d26db16a99d1b3c024834f8f5cdb3c52afb6f381fcc6ae6463c5e/markdown_frames-1.0.6.tar.gz
+BuildArch: noarch
+
+Requires: python3-pandas
+Requires: python3-pyspark
+
+%description
+# Markdown Frames
+
+Helper package for testing Apache Spark and Pandas DataFrames.
+It makes your data-related unit tests more readable.
+
+## History
+
+While working at [Exacaster](https://exacaster.com/) [Vaidas Armonas](https://github.com/Va1da2) came up with the idea to make testing data more representable. And with the help of his team, he implemented the initial version of this package.
+
+Before that, we had to define our testing data as follows:
+```python
+schema = ["user_id", "even_type", "item_id", "event_time", "country", "dt"]
+input_df = spark.createDataFrame([
+ (123456, 'page_view', None, datetime(2017,12,31,23,50,50), "uk", "2017-12-31"),
+ (123456, 'item_view', 68471513, datetime(2017,12,31,23,50,55), "uk", "2017-12-31")],
+ schema)
+```
+
+And with this library you can define same data like this:
+```python
+input_data = """
+ | user_id | even_type | item_id | event_time | country | dt |
+ | bigint | string | bigint | timestamp | string | string |
+ | ---------- | ----------- | -------- | ------------------- | -------- | ----------- |
+ | 123456 | page_view | None | 2017-12-31 23:50:50 | uk | 2017-12-31 |
+ | 123456 | item_view | 68471513 | 2017-12-31 23:50:55 | uk | 2017-12-31 |
+"""
+input_df = spark_df(input_data, spark)
+```
+
+## Installation
+To install this package, run this command on your python environment:
+```bash
+pip install markdown_frames[pyspark]
+```
+
+## Usage
+
+When you have this package installed, you can use it in your unit tests as follows (assuming you are using `pytest-spark` ang have Spark Session available):
+
+```python
+from pyspark.sql import SparkSession
+from markdown_frames.spark_dataframe import spark_df
+
+def test_your_use_case(spark: SpakSession): -> None
+ expected_data = """
+ | column1 | column2 | column3 | column4 |
+ | int | string | float | bigint |
+ | ------- | ------- | ------- | ------- |
+ | 1 | user1 | 3.14 | 111111 |
+ | 2 | None | 1.618 | 222222 |
+ | 3 | '' | 2.718 | 333333 |
+ """
+ expected_df = spark_df(expected_data, spark)
+
+ actaual_df = your_use_case(spark)
+
+ assert expected_df.collect()) == actaual_df.collect())
+```
+
+## Supported data types
+
+This package supports all major datatypes, use these type names in your table definitions:
+- `int`
+- `bigint`
+- `float`
+- `double`
+- `string`
+- `boolean`
+- `date`
+- `timestamp`
+- `decimal(precision,scale)` (scale and precision must be integers)
+- `array<int>` (int can be replaced by any of mentioned types)
+- `map<string,int>` (string and int can be replaced by any of mentioned types)
+
+For `null` values use `None` keyword.
+
+## License
+
+This project is [MIT](./LICENSE) licensed.
+
+
+%package -n python3-markdown-frames
+Summary: Markdown tables parsing to pyspark / pandas DataFrames
+Provides: python-markdown-frames
+BuildRequires: python3-devel
+BuildRequires: python3-setuptools
+BuildRequires: python3-pip
+%description -n python3-markdown-frames
+# Markdown Frames
+
+Helper package for testing Apache Spark and Pandas DataFrames.
+It makes your data-related unit tests more readable.
+
+## History
+
+While working at [Exacaster](https://exacaster.com/) [Vaidas Armonas](https://github.com/Va1da2) came up with the idea to make testing data more representable. And with the help of his team, he implemented the initial version of this package.
+
+Before that, we had to define our testing data as follows:
+```python
+schema = ["user_id", "even_type", "item_id", "event_time", "country", "dt"]
+input_df = spark.createDataFrame([
+ (123456, 'page_view', None, datetime(2017,12,31,23,50,50), "uk", "2017-12-31"),
+ (123456, 'item_view', 68471513, datetime(2017,12,31,23,50,55), "uk", "2017-12-31")],
+ schema)
+```
+
+And with this library you can define same data like this:
+```python
+input_data = """
+ | user_id | even_type | item_id | event_time | country | dt |
+ | bigint | string | bigint | timestamp | string | string |
+ | ---------- | ----------- | -------- | ------------------- | -------- | ----------- |
+ | 123456 | page_view | None | 2017-12-31 23:50:50 | uk | 2017-12-31 |
+ | 123456 | item_view | 68471513 | 2017-12-31 23:50:55 | uk | 2017-12-31 |
+"""
+input_df = spark_df(input_data, spark)
+```
+
+## Installation
+To install this package, run this command on your python environment:
+```bash
+pip install markdown_frames[pyspark]
+```
+
+## Usage
+
+When you have this package installed, you can use it in your unit tests as follows (assuming you are using `pytest-spark` ang have Spark Session available):
+
+```python
+from pyspark.sql import SparkSession
+from markdown_frames.spark_dataframe import spark_df
+
+def test_your_use_case(spark: SpakSession): -> None
+ expected_data = """
+ | column1 | column2 | column3 | column4 |
+ | int | string | float | bigint |
+ | ------- | ------- | ------- | ------- |
+ | 1 | user1 | 3.14 | 111111 |
+ | 2 | None | 1.618 | 222222 |
+ | 3 | '' | 2.718 | 333333 |
+ """
+ expected_df = spark_df(expected_data, spark)
+
+ actaual_df = your_use_case(spark)
+
+ assert expected_df.collect()) == actaual_df.collect())
+```
+
+## Supported data types
+
+This package supports all major datatypes, use these type names in your table definitions:
+- `int`
+- `bigint`
+- `float`
+- `double`
+- `string`
+- `boolean`
+- `date`
+- `timestamp`
+- `decimal(precision,scale)` (scale and precision must be integers)
+- `array<int>` (int can be replaced by any of mentioned types)
+- `map<string,int>` (string and int can be replaced by any of mentioned types)
+
+For `null` values use `None` keyword.
+
+## License
+
+This project is [MIT](./LICENSE) licensed.
+
+
+%package help
+Summary: Development documents and examples for markdown-frames
+Provides: python3-markdown-frames-doc
+%description help
+# Markdown Frames
+
+Helper package for testing Apache Spark and Pandas DataFrames.
+It makes your data-related unit tests more readable.
+
+## History
+
+While working at [Exacaster](https://exacaster.com/) [Vaidas Armonas](https://github.com/Va1da2) came up with the idea to make testing data more representable. And with the help of his team, he implemented the initial version of this package.
+
+Before that, we had to define our testing data as follows:
+```python
+schema = ["user_id", "even_type", "item_id", "event_time", "country", "dt"]
+input_df = spark.createDataFrame([
+ (123456, 'page_view', None, datetime(2017,12,31,23,50,50), "uk", "2017-12-31"),
+ (123456, 'item_view', 68471513, datetime(2017,12,31,23,50,55), "uk", "2017-12-31")],
+ schema)
+```
+
+And with this library you can define same data like this:
+```python
+input_data = """
+ | user_id | even_type | item_id | event_time | country | dt |
+ | bigint | string | bigint | timestamp | string | string |
+ | ---------- | ----------- | -------- | ------------------- | -------- | ----------- |
+ | 123456 | page_view | None | 2017-12-31 23:50:50 | uk | 2017-12-31 |
+ | 123456 | item_view | 68471513 | 2017-12-31 23:50:55 | uk | 2017-12-31 |
+"""
+input_df = spark_df(input_data, spark)
+```
+
+## Installation
+To install this package, run this command on your python environment:
+```bash
+pip install markdown_frames[pyspark]
+```
+
+## Usage
+
+When you have this package installed, you can use it in your unit tests as follows (assuming you are using `pytest-spark` ang have Spark Session available):
+
+```python
+from pyspark.sql import SparkSession
+from markdown_frames.spark_dataframe import spark_df
+
+def test_your_use_case(spark: SpakSession): -> None
+ expected_data = """
+ | column1 | column2 | column3 | column4 |
+ | int | string | float | bigint |
+ | ------- | ------- | ------- | ------- |
+ | 1 | user1 | 3.14 | 111111 |
+ | 2 | None | 1.618 | 222222 |
+ | 3 | '' | 2.718 | 333333 |
+ """
+ expected_df = spark_df(expected_data, spark)
+
+ actaual_df = your_use_case(spark)
+
+ assert expected_df.collect()) == actaual_df.collect())
+```
+
+## Supported data types
+
+This package supports all major datatypes, use these type names in your table definitions:
+- `int`
+- `bigint`
+- `float`
+- `double`
+- `string`
+- `boolean`
+- `date`
+- `timestamp`
+- `decimal(precision,scale)` (scale and precision must be integers)
+- `array<int>` (int can be replaced by any of mentioned types)
+- `map<string,int>` (string and int can be replaced by any of mentioned types)
+
+For `null` values use `None` keyword.
+
+## License
+
+This project is [MIT](./LICENSE) licensed.
+
+
+%prep
+%autosetup -n markdown-frames-1.0.6
+
+%build
+%py3_build
+
+%install
+%py3_install
+install -d -m755 %{buildroot}/%{_pkgdocdir}
+if [ -d doc ]; then cp -arf doc %{buildroot}/%{_pkgdocdir}; fi
+if [ -d docs ]; then cp -arf docs %{buildroot}/%{_pkgdocdir}; fi
+if [ -d example ]; then cp -arf example %{buildroot}/%{_pkgdocdir}; fi
+if [ -d examples ]; then cp -arf examples %{buildroot}/%{_pkgdocdir}; fi
+pushd %{buildroot}
+if [ -d usr/lib ]; then
+ find usr/lib -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/lib64 ]; then
+ find usr/lib64 -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/bin ]; then
+ find usr/bin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/sbin ]; then
+ find usr/sbin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+touch doclist.lst
+if [ -d usr/share/man ]; then
+ find usr/share/man -type f -printf "/%h/%f.gz\n" >> doclist.lst
+fi
+popd
+mv %{buildroot}/filelist.lst .
+mv %{buildroot}/doclist.lst .
+
+%files -n python3-markdown-frames -f filelist.lst
+%dir %{python3_sitelib}/*
+
+%files help -f doclist.lst
+%{_docdir}/*
+
+%changelog
+* Tue Apr 11 2023 Python_Bot <Python_Bot@openeuler.org> - 1.0.6-1
+- Package Spec generated
diff --git a/sources b/sources
new file mode 100644
index 0000000..7e03a63
--- /dev/null
+++ b/sources
@@ -0,0 +1 @@
+93dde8202f47498b16e5433385538d50 markdown_frames-1.0.6.tar.gz