summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCoprDistGit <infra@openeuler.org>2023-04-10 08:28:31 +0000
committerCoprDistGit <infra@openeuler.org>2023-04-10 08:28:31 +0000
commit2f4b8d6cb8c617145380395e86a3904c24c254e2 (patch)
tree8913f3cd00bd32652df68726a05cbcdc1e81b4e5
parent46353c38ac345a0b58d41550569e1649ac55c462 (diff)
automatic import of python-kafka-python
-rw-r--r--.gitignore1
-rw-r--r--python-kafka-python.spec394
-rw-r--r--sources1
3 files changed, 396 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
index e69de29..5d92fdf 100644
--- a/.gitignore
+++ b/.gitignore
@@ -0,0 +1 @@
+/kafka-python-2.0.2.tar.gz
diff --git a/python-kafka-python.spec b/python-kafka-python.spec
new file mode 100644
index 0000000..16f3e21
--- /dev/null
+++ b/python-kafka-python.spec
@@ -0,0 +1,394 @@
+%global _empty_manifest_terminate_build 0
+Name: python-kafka-python
+Version: 2.0.2
+Release: 1
+Summary: Pure Python client for Apache Kafka
+License: Apache License 2.0
+URL: https://github.com/dpkp/kafka-python
+Source0: https://mirrors.nju.edu.cn/pypi/web/packages/07/4c/2595fb5733c3ac01aef3dacce17ff07f7f3336d9f96548bcf723b9073e5c/kafka-python-2.0.2.tar.gz
+BuildArch: noarch
+
+Requires: python3-crc32c
+
+%description
+Python client for the Apache Kafka distributed stream processing system.
+kafka-python is designed to function much like the official java client, with a
+sprinkling of pythonic interfaces (e.g., consumer iterators).
+kafka-python is best used with newer brokers (0.9+), but is backwards-compatible with
+older versions (to 0.8.0). Some features will only be enabled on newer brokers.
+For example, fully coordinated consumer groups -- i.e., dynamic partition
+assignment to multiple consumers in the same group -- requires use of 0.9+ kafka
+brokers. Supporting this feature for earlier broker releases would require
+writing and maintaining custom leadership election and membership / health
+check code (perhaps using zookeeper or consul). For older brokers, you can
+achieve something similar by manually assigning different partitions to each
+consumer instance with config management tools like chef, ansible, etc. This
+approach will work fine, though it does not support rebalancing on failures.
+See <https://kafka-python.readthedocs.io/en/master/compatibility.html>
+for more details.
+Please note that the master branch may contain unreleased features. For release
+documentation, please see readthedocs and/or python's inline help.
+>>> pip install kafka-python
+KafkaConsumer
+*************
+KafkaConsumer is a high-level message consumer, intended to operate as similarly
+as possible to the official java client. Full support for coordinated
+consumer groups requires use of kafka brokers that support the Group APIs: kafka v0.9+.
+See <https://kafka-python.readthedocs.io/en/master/apidoc/KafkaConsumer.html>
+for API and configuration details.
+The consumer iterator returns ConsumerRecords, which are simple namedtuples
+that expose basic message attributes: topic, partition, offset, key, and value:
+>>> from kafka import KafkaConsumer
+>>> consumer = KafkaConsumer('my_favorite_topic')
+>>> for msg in consumer:
+>>> # join a consumer group for dynamic partition assignment and offset commits
+>>> from kafka import KafkaConsumer
+>>> consumer = KafkaConsumer('my_favorite_topic', group_id='my_favorite_group')
+>>> for msg in consumer:
+>>> # manually assign the partition list for the consumer
+>>> from kafka import TopicPartition
+>>> consumer = KafkaConsumer(bootstrap_servers='localhost:1234')
+>>> consumer.assign([TopicPartition('foobar', 2)])
+>>> msg = next(consumer)
+>>> # Deserialize msgpack-encoded values
+>>> consumer = KafkaConsumer(value_deserializer=msgpack.loads)
+>>> consumer.subscribe(['msgpackfoo'])
+>>> for msg in consumer:
+>>> # Access record headers. The returned value is a list of tuples
+>>> # with str, bytes for key and value
+>>> for msg in consumer:
+>>> # Get consumer metrics
+>>> metrics = consumer.metrics()
+KafkaProducer
+*************
+KafkaProducer is a high-level, asynchronous message producer. The class is
+intended to operate as similarly as possible to the official java client.
+See <https://kafka-python.readthedocs.io/en/master/apidoc/KafkaProducer.html>
+for more details.
+>>> from kafka import KafkaProducer
+>>> producer = KafkaProducer(bootstrap_servers='localhost:1234')
+>>> for _ in range(100):
+>>> # Block until a single message is sent (or timeout)
+>>> future = producer.send('foobar', b'another_message')
+>>> result = future.get(timeout=60)
+>>> # Block until all pending messages are at least put on the network
+>>> # NOTE: This does not guarantee delivery or success! It is really
+>>> # only useful if you configure internal batching using linger_ms
+>>> producer.flush()
+>>> # Use a key for hashed-partitioning
+>>> producer.send('foobar', key=b'foo', value=b'bar')
+>>> # Serialize json messages
+>>> import json
+>>> producer = KafkaProducer(value_serializer=lambda v: json.dumps(v).encode('utf-8'))
+>>> producer.send('fizzbuzz', {'foo': 'bar'})
+>>> # Serialize string keys
+>>> producer = KafkaProducer(key_serializer=str.encode)
+>>> producer.send('flipflap', key='ping', value=b'1234')
+>>> # Compress messages
+>>> producer = KafkaProducer(compression_type='gzip')
+>>> for i in range(1000):
+>>> # Include record headers. The format is list of tuples with string key
+>>> # and bytes value.
+>>> producer.send('foobar', value=b'c29tZSB2YWx1ZQ==', headers=[('content-encoding', b'base64')])
+>>> # Get producer performance metrics
+>>> metrics = producer.metrics()
+Thread safety
+*************
+The KafkaProducer can be used across threads without issue, unlike the
+KafkaConsumer which cannot.
+While it is possible to use the KafkaConsumer in a thread-local manner,
+multiprocessing is recommended.
+Compression
+***********
+kafka-python supports gzip compression/decompression natively. To produce or consume lz4
+compressed messages, you should install python-lz4 (pip install lz4).
+To enable snappy compression/decompression install python-snappy (also requires snappy library).
+See <https://kafka-python.readthedocs.io/en/master/install.html#optional-snappy-install>
+for more information.
+Optimized CRC32 Validation
+**************************
+Kafka uses CRC32 checksums to validate messages. kafka-python includes a pure
+python implementation for compatibility. To improve performance for high-throughput
+applications, kafka-python will use `crc32c` for optimized native code if installed.
+See https://pypi.org/project/crc32c/
+Protocol
+********
+A secondary goal of kafka-python is to provide an easy-to-use protocol layer
+for interacting with kafka brokers via the python repl. This is useful for
+testing, probing, and general experimentation. The protocol support is
+leveraged to enable a KafkaClient.check_version() method that
+probes a kafka broker and attempts to identify which version it is running
+(0.8.0 to 2.4+).
+
+%package -n python3-kafka-python
+Summary: Pure Python client for Apache Kafka
+Provides: python-kafka-python
+BuildRequires: python3-devel
+BuildRequires: python3-setuptools
+BuildRequires: python3-pip
+%description -n python3-kafka-python
+Python client for the Apache Kafka distributed stream processing system.
+kafka-python is designed to function much like the official java client, with a
+sprinkling of pythonic interfaces (e.g., consumer iterators).
+kafka-python is best used with newer brokers (0.9+), but is backwards-compatible with
+older versions (to 0.8.0). Some features will only be enabled on newer brokers.
+For example, fully coordinated consumer groups -- i.e., dynamic partition
+assignment to multiple consumers in the same group -- requires use of 0.9+ kafka
+brokers. Supporting this feature for earlier broker releases would require
+writing and maintaining custom leadership election and membership / health
+check code (perhaps using zookeeper or consul). For older brokers, you can
+achieve something similar by manually assigning different partitions to each
+consumer instance with config management tools like chef, ansible, etc. This
+approach will work fine, though it does not support rebalancing on failures.
+See <https://kafka-python.readthedocs.io/en/master/compatibility.html>
+for more details.
+Please note that the master branch may contain unreleased features. For release
+documentation, please see readthedocs and/or python's inline help.
+>>> pip install kafka-python
+KafkaConsumer
+*************
+KafkaConsumer is a high-level message consumer, intended to operate as similarly
+as possible to the official java client. Full support for coordinated
+consumer groups requires use of kafka brokers that support the Group APIs: kafka v0.9+.
+See <https://kafka-python.readthedocs.io/en/master/apidoc/KafkaConsumer.html>
+for API and configuration details.
+The consumer iterator returns ConsumerRecords, which are simple namedtuples
+that expose basic message attributes: topic, partition, offset, key, and value:
+>>> from kafka import KafkaConsumer
+>>> consumer = KafkaConsumer('my_favorite_topic')
+>>> for msg in consumer:
+>>> # join a consumer group for dynamic partition assignment and offset commits
+>>> from kafka import KafkaConsumer
+>>> consumer = KafkaConsumer('my_favorite_topic', group_id='my_favorite_group')
+>>> for msg in consumer:
+>>> # manually assign the partition list for the consumer
+>>> from kafka import TopicPartition
+>>> consumer = KafkaConsumer(bootstrap_servers='localhost:1234')
+>>> consumer.assign([TopicPartition('foobar', 2)])
+>>> msg = next(consumer)
+>>> # Deserialize msgpack-encoded values
+>>> consumer = KafkaConsumer(value_deserializer=msgpack.loads)
+>>> consumer.subscribe(['msgpackfoo'])
+>>> for msg in consumer:
+>>> # Access record headers. The returned value is a list of tuples
+>>> # with str, bytes for key and value
+>>> for msg in consumer:
+>>> # Get consumer metrics
+>>> metrics = consumer.metrics()
+KafkaProducer
+*************
+KafkaProducer is a high-level, asynchronous message producer. The class is
+intended to operate as similarly as possible to the official java client.
+See <https://kafka-python.readthedocs.io/en/master/apidoc/KafkaProducer.html>
+for more details.
+>>> from kafka import KafkaProducer
+>>> producer = KafkaProducer(bootstrap_servers='localhost:1234')
+>>> for _ in range(100):
+>>> # Block until a single message is sent (or timeout)
+>>> future = producer.send('foobar', b'another_message')
+>>> result = future.get(timeout=60)
+>>> # Block until all pending messages are at least put on the network
+>>> # NOTE: This does not guarantee delivery or success! It is really
+>>> # only useful if you configure internal batching using linger_ms
+>>> producer.flush()
+>>> # Use a key for hashed-partitioning
+>>> producer.send('foobar', key=b'foo', value=b'bar')
+>>> # Serialize json messages
+>>> import json
+>>> producer = KafkaProducer(value_serializer=lambda v: json.dumps(v).encode('utf-8'))
+>>> producer.send('fizzbuzz', {'foo': 'bar'})
+>>> # Serialize string keys
+>>> producer = KafkaProducer(key_serializer=str.encode)
+>>> producer.send('flipflap', key='ping', value=b'1234')
+>>> # Compress messages
+>>> producer = KafkaProducer(compression_type='gzip')
+>>> for i in range(1000):
+>>> # Include record headers. The format is list of tuples with string key
+>>> # and bytes value.
+>>> producer.send('foobar', value=b'c29tZSB2YWx1ZQ==', headers=[('content-encoding', b'base64')])
+>>> # Get producer performance metrics
+>>> metrics = producer.metrics()
+Thread safety
+*************
+The KafkaProducer can be used across threads without issue, unlike the
+KafkaConsumer which cannot.
+While it is possible to use the KafkaConsumer in a thread-local manner,
+multiprocessing is recommended.
+Compression
+***********
+kafka-python supports gzip compression/decompression natively. To produce or consume lz4
+compressed messages, you should install python-lz4 (pip install lz4).
+To enable snappy compression/decompression install python-snappy (also requires snappy library).
+See <https://kafka-python.readthedocs.io/en/master/install.html#optional-snappy-install>
+for more information.
+Optimized CRC32 Validation
+**************************
+Kafka uses CRC32 checksums to validate messages. kafka-python includes a pure
+python implementation for compatibility. To improve performance for high-throughput
+applications, kafka-python will use `crc32c` for optimized native code if installed.
+See https://pypi.org/project/crc32c/
+Protocol
+********
+A secondary goal of kafka-python is to provide an easy-to-use protocol layer
+for interacting with kafka brokers via the python repl. This is useful for
+testing, probing, and general experimentation. The protocol support is
+leveraged to enable a KafkaClient.check_version() method that
+probes a kafka broker and attempts to identify which version it is running
+(0.8.0 to 2.4+).
+
+%package help
+Summary: Development documents and examples for kafka-python
+Provides: python3-kafka-python-doc
+%description help
+Python client for the Apache Kafka distributed stream processing system.
+kafka-python is designed to function much like the official java client, with a
+sprinkling of pythonic interfaces (e.g., consumer iterators).
+kafka-python is best used with newer brokers (0.9+), but is backwards-compatible with
+older versions (to 0.8.0). Some features will only be enabled on newer brokers.
+For example, fully coordinated consumer groups -- i.e., dynamic partition
+assignment to multiple consumers in the same group -- requires use of 0.9+ kafka
+brokers. Supporting this feature for earlier broker releases would require
+writing and maintaining custom leadership election and membership / health
+check code (perhaps using zookeeper or consul). For older brokers, you can
+achieve something similar by manually assigning different partitions to each
+consumer instance with config management tools like chef, ansible, etc. This
+approach will work fine, though it does not support rebalancing on failures.
+See <https://kafka-python.readthedocs.io/en/master/compatibility.html>
+for more details.
+Please note that the master branch may contain unreleased features. For release
+documentation, please see readthedocs and/or python's inline help.
+>>> pip install kafka-python
+KafkaConsumer
+*************
+KafkaConsumer is a high-level message consumer, intended to operate as similarly
+as possible to the official java client. Full support for coordinated
+consumer groups requires use of kafka brokers that support the Group APIs: kafka v0.9+.
+See <https://kafka-python.readthedocs.io/en/master/apidoc/KafkaConsumer.html>
+for API and configuration details.
+The consumer iterator returns ConsumerRecords, which are simple namedtuples
+that expose basic message attributes: topic, partition, offset, key, and value:
+>>> from kafka import KafkaConsumer
+>>> consumer = KafkaConsumer('my_favorite_topic')
+>>> for msg in consumer:
+>>> # join a consumer group for dynamic partition assignment and offset commits
+>>> from kafka import KafkaConsumer
+>>> consumer = KafkaConsumer('my_favorite_topic', group_id='my_favorite_group')
+>>> for msg in consumer:
+>>> # manually assign the partition list for the consumer
+>>> from kafka import TopicPartition
+>>> consumer = KafkaConsumer(bootstrap_servers='localhost:1234')
+>>> consumer.assign([TopicPartition('foobar', 2)])
+>>> msg = next(consumer)
+>>> # Deserialize msgpack-encoded values
+>>> consumer = KafkaConsumer(value_deserializer=msgpack.loads)
+>>> consumer.subscribe(['msgpackfoo'])
+>>> for msg in consumer:
+>>> # Access record headers. The returned value is a list of tuples
+>>> # with str, bytes for key and value
+>>> for msg in consumer:
+>>> # Get consumer metrics
+>>> metrics = consumer.metrics()
+KafkaProducer
+*************
+KafkaProducer is a high-level, asynchronous message producer. The class is
+intended to operate as similarly as possible to the official java client.
+See <https://kafka-python.readthedocs.io/en/master/apidoc/KafkaProducer.html>
+for more details.
+>>> from kafka import KafkaProducer
+>>> producer = KafkaProducer(bootstrap_servers='localhost:1234')
+>>> for _ in range(100):
+>>> # Block until a single message is sent (or timeout)
+>>> future = producer.send('foobar', b'another_message')
+>>> result = future.get(timeout=60)
+>>> # Block until all pending messages are at least put on the network
+>>> # NOTE: This does not guarantee delivery or success! It is really
+>>> # only useful if you configure internal batching using linger_ms
+>>> producer.flush()
+>>> # Use a key for hashed-partitioning
+>>> producer.send('foobar', key=b'foo', value=b'bar')
+>>> # Serialize json messages
+>>> import json
+>>> producer = KafkaProducer(value_serializer=lambda v: json.dumps(v).encode('utf-8'))
+>>> producer.send('fizzbuzz', {'foo': 'bar'})
+>>> # Serialize string keys
+>>> producer = KafkaProducer(key_serializer=str.encode)
+>>> producer.send('flipflap', key='ping', value=b'1234')
+>>> # Compress messages
+>>> producer = KafkaProducer(compression_type='gzip')
+>>> for i in range(1000):
+>>> # Include record headers. The format is list of tuples with string key
+>>> # and bytes value.
+>>> producer.send('foobar', value=b'c29tZSB2YWx1ZQ==', headers=[('content-encoding', b'base64')])
+>>> # Get producer performance metrics
+>>> metrics = producer.metrics()
+Thread safety
+*************
+The KafkaProducer can be used across threads without issue, unlike the
+KafkaConsumer which cannot.
+While it is possible to use the KafkaConsumer in a thread-local manner,
+multiprocessing is recommended.
+Compression
+***********
+kafka-python supports gzip compression/decompression natively. To produce or consume lz4
+compressed messages, you should install python-lz4 (pip install lz4).
+To enable snappy compression/decompression install python-snappy (also requires snappy library).
+See <https://kafka-python.readthedocs.io/en/master/install.html#optional-snappy-install>
+for more information.
+Optimized CRC32 Validation
+**************************
+Kafka uses CRC32 checksums to validate messages. kafka-python includes a pure
+python implementation for compatibility. To improve performance for high-throughput
+applications, kafka-python will use `crc32c` for optimized native code if installed.
+See https://pypi.org/project/crc32c/
+Protocol
+********
+A secondary goal of kafka-python is to provide an easy-to-use protocol layer
+for interacting with kafka brokers via the python repl. This is useful for
+testing, probing, and general experimentation. The protocol support is
+leveraged to enable a KafkaClient.check_version() method that
+probes a kafka broker and attempts to identify which version it is running
+(0.8.0 to 2.4+).
+
+%prep
+%autosetup -n kafka-python-2.0.2
+
+%build
+%py3_build
+
+%install
+%py3_install
+install -d -m755 %{buildroot}/%{_pkgdocdir}
+if [ -d doc ]; then cp -arf doc %{buildroot}/%{_pkgdocdir}; fi
+if [ -d docs ]; then cp -arf docs %{buildroot}/%{_pkgdocdir}; fi
+if [ -d example ]; then cp -arf example %{buildroot}/%{_pkgdocdir}; fi
+if [ -d examples ]; then cp -arf examples %{buildroot}/%{_pkgdocdir}; fi
+pushd %{buildroot}
+if [ -d usr/lib ]; then
+ find usr/lib -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/lib64 ]; then
+ find usr/lib64 -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/bin ]; then
+ find usr/bin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/sbin ]; then
+ find usr/sbin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+touch doclist.lst
+if [ -d usr/share/man ]; then
+ find usr/share/man -type f -printf "/%h/%f.gz\n" >> doclist.lst
+fi
+popd
+mv %{buildroot}/filelist.lst .
+mv %{buildroot}/doclist.lst .
+
+%files -n python3-kafka-python -f filelist.lst
+%dir %{python3_sitelib}/*
+
+%files help -f doclist.lst
+%{_docdir}/*
+
+%changelog
+* Mon Apr 10 2023 Python_Bot <Python_Bot@openeuler.org> - 2.0.2-1
+- Package Spec generated
diff --git a/sources b/sources
new file mode 100644
index 0000000..c85bd1f
--- /dev/null
+++ b/sources
@@ -0,0 +1 @@
+7e34fc134934d05b43437ea404076537 kafka-python-2.0.2.tar.gz