summaryrefslogtreecommitdiff
path: root/python-zlib-state.spec
diff options
context:
space:
mode:
authorCoprDistGit <infra@openeuler.org>2023-05-05 07:41:58 +0000
committerCoprDistGit <infra@openeuler.org>2023-05-05 07:41:58 +0000
commitb4babf97ee4e99bd7623df17907cc0beb52dde12 (patch)
tree6bca259e706a9167f7a074fee73d2eae82b5e89a /python-zlib-state.spec
parent76702883eabc72b9e0c7d894c96533593f6867eb (diff)
automatic import of python-zlib-stateopeneuler20.03
Diffstat (limited to 'python-zlib-state.spec')
-rw-r--r--python-zlib-state.spec335
1 files changed, 335 insertions, 0 deletions
diff --git a/python-zlib-state.spec b/python-zlib-state.spec
new file mode 100644
index 0000000..16982cb
--- /dev/null
+++ b/python-zlib-state.spec
@@ -0,0 +1,335 @@
+%global _empty_manifest_terminate_build 0
+Name: python-zlib-state
+Version: 0.1.5
+Release: 1
+Summary: Low-level interface to the zlib library that enables capturing the decoding state
+License: MIT License
+URL: https://github.com/seanmacavaney/zlib-state
+Source0: https://mirrors.nju.edu.cn/pypi/web/packages/d6/36/aef7d61c7b0a7f5b928cb31bf12baab3a96830f9c9e421a9f728e2a81b9d/zlib-state-0.1.5.tar.gz
+
+
+%description
+# zlib-state
+
+Low-level interface to the zlib library that enables capturing the decoding state.
+
+## Install
+
+From PyPi:
+
+```
+pip install zlib-state
+```
+
+From source:
+
+```
+python setup.py install
+```
+
+Tested on ubuntu/macos/windows with python 3.6-3.10.
+
+## GzipStateFile
+
+Wraps Decompressor as a buffered reader.
+
+Based on my benchmarking, this is somewhat slower than python's gzip.
+
+A typical usage pattern looks like:
+
+```python
+import zlib_state
+
+TARGET_LINE = 5000 # pick back up after around the 5,000th line
+# Specify keep_last_state=True to tell object to grab and keep the state and pos after each block
+with zlib_state.GzipStateFile('testdata/frankenstein.txt.gz', keep_last_state=True) as f:
+ for i, line in enumerate(f):
+ if i == TARGET_LINE:
+ state, pos = f.last_state, f.last_state_pos
+
+with zlib_state.GzipStateFile('testdata/frankenstein.txt.gz') as f:
+ f.zseek(pos, state)
+ remainder = f.read()
+```
+
+## Decompressor
+
+Very basic decompression object that's picky and unforgiving.
+
+Based on my benchmarking, this can iterate over gzip files faster than python's gzip.
+
+A typical usage pattern looks like:
+
+```python
+import zlib_state
+
+decomp = zlib_state.Decompressor(32 + 15) # from zlib; 32 indicates gzip header, 15 window size
+block_count = 0
+with open('testdata/frankenstein.txt.gz', 'rb') as f:
+ while not decomp.eof():
+ needed_input = decomp.needs_input()
+ if needed_input > 0:
+ # decomp needs more input, and it tells you how much.
+ decomp.feed_input(f.read(needed_input))
+ # next_chunk may be empty (e.g., if finished with gzip headers) or may contain data.
+ # It sends as much as it has left in its output buffer, or asks zlib to continue.
+ next_chunk = decomp.read() # you can also pass a maximum size to take and/or a buffer to write to
+ if decomp.block_boundary():
+ block_count += 1
+ # When it reaches the end of a deflate block, it always stops. At these times, you can grab the state
+ # if you wish.
+ if block_count == 4: # resume after the 4th block
+ state = decomp.get_state() # includes zdict, bits, byte -- everything it needs to resume from pos
+ pos = decomp.total_in() # the current position in the binary file to resume from
+ print(f'{block_count} blocks processed')
+ # resume from somewhere in the file. Only possible spots are the block boundaries, given the state
+ f.seek(pos)
+ decomp = zlib_state.Decompressor(-15) # from zlib; 15 window size, negative means no headers
+ decomp.set_state(*state)
+ while not decomp.eof():
+ needed_input = decomp.needs_input()
+ if needed_input > 0:
+ # decomp needs more input, and it tells you how much.
+ decomp.feed_input(f.read(needed_input))
+ next_chunk = decomp.read()
+```
+
+
+
+
+
+%package -n python3-zlib-state
+Summary: Low-level interface to the zlib library that enables capturing the decoding state
+Provides: python-zlib-state
+BuildRequires: python3-devel
+BuildRequires: python3-setuptools
+BuildRequires: python3-pip
+BuildRequires: python3-cffi
+BuildRequires: gcc
+BuildRequires: gdb
+%description -n python3-zlib-state
+# zlib-state
+
+Low-level interface to the zlib library that enables capturing the decoding state.
+
+## Install
+
+From PyPi:
+
+```
+pip install zlib-state
+```
+
+From source:
+
+```
+python setup.py install
+```
+
+Tested on ubuntu/macos/windows with python 3.6-3.10.
+
+## GzipStateFile
+
+Wraps Decompressor as a buffered reader.
+
+Based on my benchmarking, this is somewhat slower than python's gzip.
+
+A typical usage pattern looks like:
+
+```python
+import zlib_state
+
+TARGET_LINE = 5000 # pick back up after around the 5,000th line
+# Specify keep_last_state=True to tell object to grab and keep the state and pos after each block
+with zlib_state.GzipStateFile('testdata/frankenstein.txt.gz', keep_last_state=True) as f:
+ for i, line in enumerate(f):
+ if i == TARGET_LINE:
+ state, pos = f.last_state, f.last_state_pos
+
+with zlib_state.GzipStateFile('testdata/frankenstein.txt.gz') as f:
+ f.zseek(pos, state)
+ remainder = f.read()
+```
+
+## Decompressor
+
+Very basic decompression object that's picky and unforgiving.
+
+Based on my benchmarking, this can iterate over gzip files faster than python's gzip.
+
+A typical usage pattern looks like:
+
+```python
+import zlib_state
+
+decomp = zlib_state.Decompressor(32 + 15) # from zlib; 32 indicates gzip header, 15 window size
+block_count = 0
+with open('testdata/frankenstein.txt.gz', 'rb') as f:
+ while not decomp.eof():
+ needed_input = decomp.needs_input()
+ if needed_input > 0:
+ # decomp needs more input, and it tells you how much.
+ decomp.feed_input(f.read(needed_input))
+ # next_chunk may be empty (e.g., if finished with gzip headers) or may contain data.
+ # It sends as much as it has left in its output buffer, or asks zlib to continue.
+ next_chunk = decomp.read() # you can also pass a maximum size to take and/or a buffer to write to
+ if decomp.block_boundary():
+ block_count += 1
+ # When it reaches the end of a deflate block, it always stops. At these times, you can grab the state
+ # if you wish.
+ if block_count == 4: # resume after the 4th block
+ state = decomp.get_state() # includes zdict, bits, byte -- everything it needs to resume from pos
+ pos = decomp.total_in() # the current position in the binary file to resume from
+ print(f'{block_count} blocks processed')
+ # resume from somewhere in the file. Only possible spots are the block boundaries, given the state
+ f.seek(pos)
+ decomp = zlib_state.Decompressor(-15) # from zlib; 15 window size, negative means no headers
+ decomp.set_state(*state)
+ while not decomp.eof():
+ needed_input = decomp.needs_input()
+ if needed_input > 0:
+ # decomp needs more input, and it tells you how much.
+ decomp.feed_input(f.read(needed_input))
+ next_chunk = decomp.read()
+```
+
+
+
+
+
+%package help
+Summary: Development documents and examples for zlib-state
+Provides: python3-zlib-state-doc
+%description help
+# zlib-state
+
+Low-level interface to the zlib library that enables capturing the decoding state.
+
+## Install
+
+From PyPi:
+
+```
+pip install zlib-state
+```
+
+From source:
+
+```
+python setup.py install
+```
+
+Tested on ubuntu/macos/windows with python 3.6-3.10.
+
+## GzipStateFile
+
+Wraps Decompressor as a buffered reader.
+
+Based on my benchmarking, this is somewhat slower than python's gzip.
+
+A typical usage pattern looks like:
+
+```python
+import zlib_state
+
+TARGET_LINE = 5000 # pick back up after around the 5,000th line
+# Specify keep_last_state=True to tell object to grab and keep the state and pos after each block
+with zlib_state.GzipStateFile('testdata/frankenstein.txt.gz', keep_last_state=True) as f:
+ for i, line in enumerate(f):
+ if i == TARGET_LINE:
+ state, pos = f.last_state, f.last_state_pos
+
+with zlib_state.GzipStateFile('testdata/frankenstein.txt.gz') as f:
+ f.zseek(pos, state)
+ remainder = f.read()
+```
+
+## Decompressor
+
+Very basic decompression object that's picky and unforgiving.
+
+Based on my benchmarking, this can iterate over gzip files faster than python's gzip.
+
+A typical usage pattern looks like:
+
+```python
+import zlib_state
+
+decomp = zlib_state.Decompressor(32 + 15) # from zlib; 32 indicates gzip header, 15 window size
+block_count = 0
+with open('testdata/frankenstein.txt.gz', 'rb') as f:
+ while not decomp.eof():
+ needed_input = decomp.needs_input()
+ if needed_input > 0:
+ # decomp needs more input, and it tells you how much.
+ decomp.feed_input(f.read(needed_input))
+ # next_chunk may be empty (e.g., if finished with gzip headers) or may contain data.
+ # It sends as much as it has left in its output buffer, or asks zlib to continue.
+ next_chunk = decomp.read() # you can also pass a maximum size to take and/or a buffer to write to
+ if decomp.block_boundary():
+ block_count += 1
+ # When it reaches the end of a deflate block, it always stops. At these times, you can grab the state
+ # if you wish.
+ if block_count == 4: # resume after the 4th block
+ state = decomp.get_state() # includes zdict, bits, byte -- everything it needs to resume from pos
+ pos = decomp.total_in() # the current position in the binary file to resume from
+ print(f'{block_count} blocks processed')
+ # resume from somewhere in the file. Only possible spots are the block boundaries, given the state
+ f.seek(pos)
+ decomp = zlib_state.Decompressor(-15) # from zlib; 15 window size, negative means no headers
+ decomp.set_state(*state)
+ while not decomp.eof():
+ needed_input = decomp.needs_input()
+ if needed_input > 0:
+ # decomp needs more input, and it tells you how much.
+ decomp.feed_input(f.read(needed_input))
+ next_chunk = decomp.read()
+```
+
+
+
+
+
+%prep
+%autosetup -n zlib-state-0.1.5
+
+%build
+%py3_build
+
+%install
+%py3_install
+install -d -m755 %{buildroot}/%{_pkgdocdir}
+if [ -d doc ]; then cp -arf doc %{buildroot}/%{_pkgdocdir}; fi
+if [ -d docs ]; then cp -arf docs %{buildroot}/%{_pkgdocdir}; fi
+if [ -d example ]; then cp -arf example %{buildroot}/%{_pkgdocdir}; fi
+if [ -d examples ]; then cp -arf examples %{buildroot}/%{_pkgdocdir}; fi
+pushd %{buildroot}
+if [ -d usr/lib ]; then
+ find usr/lib -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/lib64 ]; then
+ find usr/lib64 -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/bin ]; then
+ find usr/bin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/sbin ]; then
+ find usr/sbin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+touch doclist.lst
+if [ -d usr/share/man ]; then
+ find usr/share/man -type f -printf "/%h/%f.gz\n" >> doclist.lst
+fi
+popd
+mv %{buildroot}/filelist.lst .
+mv %{buildroot}/doclist.lst .
+
+%files -n python3-zlib-state -f filelist.lst
+%dir %{python3_sitearch}/*
+
+%files help -f doclist.lst
+%{_docdir}/*
+
+%changelog
+* Fri May 05 2023 Python_Bot <Python_Bot@openeuler.org> - 0.1.5-1
+- Package Spec generated