summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCoprDistGit <infra@openeuler.org>2023-04-10 14:26:18 +0000
committerCoprDistGit <infra@openeuler.org>2023-04-10 14:26:18 +0000
commit159f4a259a51027180043d87272863f81dbb9d19 (patch)
tree82dc2d0ff404856816bdf102d4ee7e1e3fb9aaff
parent63b982274e9714c9ea474b9d977205f6b3c8e621 (diff)
automatic import of python-kafka
-rw-r--r--.gitignore1
-rw-r--r--python-kafka.spec360
-rw-r--r--sources1
3 files changed, 362 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
index e69de29..0e30db1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -0,0 +1 @@
+/kafka-1.3.5.tar.gz
diff --git a/python-kafka.spec b/python-kafka.spec
new file mode 100644
index 0000000..9a47520
--- /dev/null
+++ b/python-kafka.spec
@@ -0,0 +1,360 @@
+%global _empty_manifest_terminate_build 0
+Name: python-kafka
+Version: 1.3.5
+Release: 1
+Summary: Pure Python client for Apache Kafka
+License: Apache License 2.0
+URL: https://github.com/dpkp/kafka-python
+Source0: https://mirrors.nju.edu.cn/pypi/web/packages/3b/1b/44605e699e0970a2be3d7135d185f95e8605399aa0f2a9d64de342eae4b7/kafka-1.3.5.tar.gz
+BuildArch: noarch
+
+
+%description
+Python client for the Apache Kafka distributed stream processing system.
+kafka-python is designed to function much like the official java client, with a
+sprinkling of pythonic interfaces (e.g., consumer iterators).
+kafka-python is best used with newer brokers (0.9+), but is backwards-compatible with
+older versions (to 0.8.0). Some features will only be enabled on newer brokers.
+For example, fully coordinated consumer groups -- i.e., dynamic partition
+assignment to multiple consumers in the same group -- requires use of 0.9+ kafka
+brokers. Supporting this feature for earlier broker releases would require
+writing and maintaining custom leadership election and membership / health
+check code (perhaps using zookeeper or consul). For older brokers, you can
+achieve something similar by manually assigning different partitions to each
+consumer instance with config management tools like chef, ansible, etc. This
+approach will work fine, though it does not support rebalancing on failures.
+See <https://kafka-python.readthedocs.io/en/master/compatibility.html>
+for more details.
+Please note that the master branch may contain unreleased features. For release
+documentation, please see readthedocs and/or python's inline help.
+>>> pip install kafka
+KafkaConsumer
+*************
+KafkaConsumer is a high-level message consumer, intended to operate as similarly
+as possible to the official java client. Full support for coordinated
+consumer groups requires use of kafka brokers that support the Group APIs: kafka v0.9+.
+See <https://kafka-python.readthedocs.io/en/master/apidoc/KafkaConsumer.html>
+for API and configuration details.
+The consumer iterator returns ConsumerRecords, which are simple namedtuples
+that expose basic message attributes: topic, partition, offset, key, and value:
+>>> from kafka import KafkaConsumer
+>>> consumer = KafkaConsumer('my_favorite_topic')
+>>> for msg in consumer:
+>>> # join a consumer group for dynamic partition assignment and offset commits
+>>> from kafka import KafkaConsumer
+>>> consumer = KafkaConsumer('my_favorite_topic', group_id='my_favorite_group')
+>>> for msg in consumer:
+>>> # manually assign the partition list for the consumer
+>>> from kafka import TopicPartition
+>>> consumer = KafkaConsumer(bootstrap_servers='localhost:1234')
+>>> consumer.assign([TopicPartition('foobar', 2)])
+>>> msg = next(consumer)
+>>> # Deserialize msgpack-encoded values
+>>> consumer = KafkaConsumer(value_deserializer=msgpack.loads)
+>>> consumer.subscribe(['msgpackfoo'])
+>>> for msg in consumer:
+KafkaProducer
+*************
+KafkaProducer is a high-level, asynchronous message producer. The class is
+intended to operate as similarly as possible to the official java client.
+See <https://kafka-python.readthedocs.io/en/master/apidoc/KafkaProducer.html>
+for more details.
+>>> from kafka import KafkaProducer
+>>> producer = KafkaProducer(bootstrap_servers='localhost:1234')
+>>> for _ in range(100):
+>>> # Block until a single message is sent (or timeout)
+>>> future = producer.send('foobar', b'another_message')
+>>> result = future.get(timeout=60)
+>>> # Block until all pending messages are at least put on the network
+>>> # NOTE: This does not guarantee delivery or success! It is really
+>>> # only useful if you configure internal batching using linger_ms
+>>> producer.flush()
+>>> # Use a key for hashed-partitioning
+>>> producer.send('foobar', key=b'foo', value=b'bar')
+>>> # Serialize json messages
+>>> import json
+>>> producer = KafkaProducer(value_serializer=lambda v: json.dumps(v).encode('utf-8'))
+>>> producer.send('fizzbuzz', {'foo': 'bar'})
+>>> # Serialize string keys
+>>> producer = KafkaProducer(key_serializer=str.encode)
+>>> producer.send('flipflap', key='ping', value=b'1234')
+>>> # Compress messages
+>>> producer = KafkaProducer(compression_type='gzip')
+>>> for i in range(1000):
+Thread safety
+*************
+The KafkaProducer can be used across threads without issue, unlike the
+KafkaConsumer which cannot.
+While it is possible to use the KafkaConsumer in a thread-local manner,
+multiprocessing is recommended.
+Compression
+***********
+kafka-python supports gzip compression/decompression natively. To produce or consume lz4
+compressed messages, you should install python-lz4 (pip install lz4).
+To enable snappy compression/decompression install python-snappy (also requires snappy library).
+See <https://kafka-python.readthedocs.io/en/master/install.html#optional-snappy-install>
+for more information.
+Protocol
+********
+A secondary goal of kafka-python is to provide an easy-to-use protocol layer
+for interacting with kafka brokers via the python repl. This is useful for
+testing, probing, and general experimentation. The protocol support is
+leveraged to enable a KafkaClient.check_version() method that
+probes a kafka broker and attempts to identify which version it is running
+(0.8.0 to 0.11).
+Low-level
+*********
+Legacy support is maintained for low-level consumer and producer classes,
+SimpleConsumer and SimpleProducer. See
+<https://kafka-python.readthedocs.io/en/master/simple.html?highlight=SimpleProducer> for API details.
+
+%package -n python3-kafka
+Summary: Pure Python client for Apache Kafka
+Provides: python-kafka
+BuildRequires: python3-devel
+BuildRequires: python3-setuptools
+BuildRequires: python3-pip
+%description -n python3-kafka
+Python client for the Apache Kafka distributed stream processing system.
+kafka-python is designed to function much like the official java client, with a
+sprinkling of pythonic interfaces (e.g., consumer iterators).
+kafka-python is best used with newer brokers (0.9+), but is backwards-compatible with
+older versions (to 0.8.0). Some features will only be enabled on newer brokers.
+For example, fully coordinated consumer groups -- i.e., dynamic partition
+assignment to multiple consumers in the same group -- requires use of 0.9+ kafka
+brokers. Supporting this feature for earlier broker releases would require
+writing and maintaining custom leadership election and membership / health
+check code (perhaps using zookeeper or consul). For older brokers, you can
+achieve something similar by manually assigning different partitions to each
+consumer instance with config management tools like chef, ansible, etc. This
+approach will work fine, though it does not support rebalancing on failures.
+See <https://kafka-python.readthedocs.io/en/master/compatibility.html>
+for more details.
+Please note that the master branch may contain unreleased features. For release
+documentation, please see readthedocs and/or python's inline help.
+>>> pip install kafka
+KafkaConsumer
+*************
+KafkaConsumer is a high-level message consumer, intended to operate as similarly
+as possible to the official java client. Full support for coordinated
+consumer groups requires use of kafka brokers that support the Group APIs: kafka v0.9+.
+See <https://kafka-python.readthedocs.io/en/master/apidoc/KafkaConsumer.html>
+for API and configuration details.
+The consumer iterator returns ConsumerRecords, which are simple namedtuples
+that expose basic message attributes: topic, partition, offset, key, and value:
+>>> from kafka import KafkaConsumer
+>>> consumer = KafkaConsumer('my_favorite_topic')
+>>> for msg in consumer:
+>>> # join a consumer group for dynamic partition assignment and offset commits
+>>> from kafka import KafkaConsumer
+>>> consumer = KafkaConsumer('my_favorite_topic', group_id='my_favorite_group')
+>>> for msg in consumer:
+>>> # manually assign the partition list for the consumer
+>>> from kafka import TopicPartition
+>>> consumer = KafkaConsumer(bootstrap_servers='localhost:1234')
+>>> consumer.assign([TopicPartition('foobar', 2)])
+>>> msg = next(consumer)
+>>> # Deserialize msgpack-encoded values
+>>> consumer = KafkaConsumer(value_deserializer=msgpack.loads)
+>>> consumer.subscribe(['msgpackfoo'])
+>>> for msg in consumer:
+KafkaProducer
+*************
+KafkaProducer is a high-level, asynchronous message producer. The class is
+intended to operate as similarly as possible to the official java client.
+See <https://kafka-python.readthedocs.io/en/master/apidoc/KafkaProducer.html>
+for more details.
+>>> from kafka import KafkaProducer
+>>> producer = KafkaProducer(bootstrap_servers='localhost:1234')
+>>> for _ in range(100):
+>>> # Block until a single message is sent (or timeout)
+>>> future = producer.send('foobar', b'another_message')
+>>> result = future.get(timeout=60)
+>>> # Block until all pending messages are at least put on the network
+>>> # NOTE: This does not guarantee delivery or success! It is really
+>>> # only useful if you configure internal batching using linger_ms
+>>> producer.flush()
+>>> # Use a key for hashed-partitioning
+>>> producer.send('foobar', key=b'foo', value=b'bar')
+>>> # Serialize json messages
+>>> import json
+>>> producer = KafkaProducer(value_serializer=lambda v: json.dumps(v).encode('utf-8'))
+>>> producer.send('fizzbuzz', {'foo': 'bar'})
+>>> # Serialize string keys
+>>> producer = KafkaProducer(key_serializer=str.encode)
+>>> producer.send('flipflap', key='ping', value=b'1234')
+>>> # Compress messages
+>>> producer = KafkaProducer(compression_type='gzip')
+>>> for i in range(1000):
+Thread safety
+*************
+The KafkaProducer can be used across threads without issue, unlike the
+KafkaConsumer which cannot.
+While it is possible to use the KafkaConsumer in a thread-local manner,
+multiprocessing is recommended.
+Compression
+***********
+kafka-python supports gzip compression/decompression natively. To produce or consume lz4
+compressed messages, you should install python-lz4 (pip install lz4).
+To enable snappy compression/decompression install python-snappy (also requires snappy library).
+See <https://kafka-python.readthedocs.io/en/master/install.html#optional-snappy-install>
+for more information.
+Protocol
+********
+A secondary goal of kafka-python is to provide an easy-to-use protocol layer
+for interacting with kafka brokers via the python repl. This is useful for
+testing, probing, and general experimentation. The protocol support is
+leveraged to enable a KafkaClient.check_version() method that
+probes a kafka broker and attempts to identify which version it is running
+(0.8.0 to 0.11).
+Low-level
+*********
+Legacy support is maintained for low-level consumer and producer classes,
+SimpleConsumer and SimpleProducer. See
+<https://kafka-python.readthedocs.io/en/master/simple.html?highlight=SimpleProducer> for API details.
+
+%package help
+Summary: Development documents and examples for kafka
+Provides: python3-kafka-doc
+%description help
+Python client for the Apache Kafka distributed stream processing system.
+kafka-python is designed to function much like the official java client, with a
+sprinkling of pythonic interfaces (e.g., consumer iterators).
+kafka-python is best used with newer brokers (0.9+), but is backwards-compatible with
+older versions (to 0.8.0). Some features will only be enabled on newer brokers.
+For example, fully coordinated consumer groups -- i.e., dynamic partition
+assignment to multiple consumers in the same group -- requires use of 0.9+ kafka
+brokers. Supporting this feature for earlier broker releases would require
+writing and maintaining custom leadership election and membership / health
+check code (perhaps using zookeeper or consul). For older brokers, you can
+achieve something similar by manually assigning different partitions to each
+consumer instance with config management tools like chef, ansible, etc. This
+approach will work fine, though it does not support rebalancing on failures.
+See <https://kafka-python.readthedocs.io/en/master/compatibility.html>
+for more details.
+Please note that the master branch may contain unreleased features. For release
+documentation, please see readthedocs and/or python's inline help.
+>>> pip install kafka
+KafkaConsumer
+*************
+KafkaConsumer is a high-level message consumer, intended to operate as similarly
+as possible to the official java client. Full support for coordinated
+consumer groups requires use of kafka brokers that support the Group APIs: kafka v0.9+.
+See <https://kafka-python.readthedocs.io/en/master/apidoc/KafkaConsumer.html>
+for API and configuration details.
+The consumer iterator returns ConsumerRecords, which are simple namedtuples
+that expose basic message attributes: topic, partition, offset, key, and value:
+>>> from kafka import KafkaConsumer
+>>> consumer = KafkaConsumer('my_favorite_topic')
+>>> for msg in consumer:
+>>> # join a consumer group for dynamic partition assignment and offset commits
+>>> from kafka import KafkaConsumer
+>>> consumer = KafkaConsumer('my_favorite_topic', group_id='my_favorite_group')
+>>> for msg in consumer:
+>>> # manually assign the partition list for the consumer
+>>> from kafka import TopicPartition
+>>> consumer = KafkaConsumer(bootstrap_servers='localhost:1234')
+>>> consumer.assign([TopicPartition('foobar', 2)])
+>>> msg = next(consumer)
+>>> # Deserialize msgpack-encoded values
+>>> consumer = KafkaConsumer(value_deserializer=msgpack.loads)
+>>> consumer.subscribe(['msgpackfoo'])
+>>> for msg in consumer:
+KafkaProducer
+*************
+KafkaProducer is a high-level, asynchronous message producer. The class is
+intended to operate as similarly as possible to the official java client.
+See <https://kafka-python.readthedocs.io/en/master/apidoc/KafkaProducer.html>
+for more details.
+>>> from kafka import KafkaProducer
+>>> producer = KafkaProducer(bootstrap_servers='localhost:1234')
+>>> for _ in range(100):
+>>> # Block until a single message is sent (or timeout)
+>>> future = producer.send('foobar', b'another_message')
+>>> result = future.get(timeout=60)
+>>> # Block until all pending messages are at least put on the network
+>>> # NOTE: This does not guarantee delivery or success! It is really
+>>> # only useful if you configure internal batching using linger_ms
+>>> producer.flush()
+>>> # Use a key for hashed-partitioning
+>>> producer.send('foobar', key=b'foo', value=b'bar')
+>>> # Serialize json messages
+>>> import json
+>>> producer = KafkaProducer(value_serializer=lambda v: json.dumps(v).encode('utf-8'))
+>>> producer.send('fizzbuzz', {'foo': 'bar'})
+>>> # Serialize string keys
+>>> producer = KafkaProducer(key_serializer=str.encode)
+>>> producer.send('flipflap', key='ping', value=b'1234')
+>>> # Compress messages
+>>> producer = KafkaProducer(compression_type='gzip')
+>>> for i in range(1000):
+Thread safety
+*************
+The KafkaProducer can be used across threads without issue, unlike the
+KafkaConsumer which cannot.
+While it is possible to use the KafkaConsumer in a thread-local manner,
+multiprocessing is recommended.
+Compression
+***********
+kafka-python supports gzip compression/decompression natively. To produce or consume lz4
+compressed messages, you should install python-lz4 (pip install lz4).
+To enable snappy compression/decompression install python-snappy (also requires snappy library).
+See <https://kafka-python.readthedocs.io/en/master/install.html#optional-snappy-install>
+for more information.
+Protocol
+********
+A secondary goal of kafka-python is to provide an easy-to-use protocol layer
+for interacting with kafka brokers via the python repl. This is useful for
+testing, probing, and general experimentation. The protocol support is
+leveraged to enable a KafkaClient.check_version() method that
+probes a kafka broker and attempts to identify which version it is running
+(0.8.0 to 0.11).
+Low-level
+*********
+Legacy support is maintained for low-level consumer and producer classes,
+SimpleConsumer and SimpleProducer. See
+<https://kafka-python.readthedocs.io/en/master/simple.html?highlight=SimpleProducer> for API details.
+
+%prep
+%autosetup -n kafka-1.3.5
+
+%build
+%py3_build
+
+%install
+%py3_install
+install -d -m755 %{buildroot}/%{_pkgdocdir}
+if [ -d doc ]; then cp -arf doc %{buildroot}/%{_pkgdocdir}; fi
+if [ -d docs ]; then cp -arf docs %{buildroot}/%{_pkgdocdir}; fi
+if [ -d example ]; then cp -arf example %{buildroot}/%{_pkgdocdir}; fi
+if [ -d examples ]; then cp -arf examples %{buildroot}/%{_pkgdocdir}; fi
+pushd %{buildroot}
+if [ -d usr/lib ]; then
+ find usr/lib -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/lib64 ]; then
+ find usr/lib64 -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/bin ]; then
+ find usr/bin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/sbin ]; then
+ find usr/sbin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+touch doclist.lst
+if [ -d usr/share/man ]; then
+ find usr/share/man -type f -printf "/%h/%f.gz\n" >> doclist.lst
+fi
+popd
+mv %{buildroot}/filelist.lst .
+mv %{buildroot}/doclist.lst .
+
+%files -n python3-kafka -f filelist.lst
+%dir %{python3_sitelib}/*
+
+%files help -f doclist.lst
+%{_docdir}/*
+
+%changelog
+* Mon Apr 10 2023 Python_Bot <Python_Bot@openeuler.org> - 1.3.5-1
+- Package Spec generated
diff --git a/sources b/sources
new file mode 100644
index 0000000..a955c35
--- /dev/null
+++ b/sources
@@ -0,0 +1 @@
+ccea518f48ee50a301ac4611ac8f73ee kafka-1.3.5.tar.gz