summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCoprDistGit <infra@openeuler.org>2023-06-20 08:19:11 +0000
committerCoprDistGit <infra@openeuler.org>2023-06-20 08:19:11 +0000
commit8a44372c1f71c3c2f6a918727f225cdbd772dbf0 (patch)
treec565f663bda6ff964b3af085f0a91ca648e6c823
parenta9bce8194bf724283befd2807e3a402ed467ec33 (diff)
automatic import of python-actspotteropeneuler20.03
-rw-r--r--.gitignore1
-rw-r--r--python-actspotter.spec340
-rw-r--r--sources1
3 files changed, 342 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
index e69de29..f6d48bf 100644
--- a/.gitignore
+++ b/.gitignore
@@ -0,0 +1 @@
+/actspotter-0.1.3.tar.gz
diff --git a/python-actspotter.spec b/python-actspotter.spec
new file mode 100644
index 0000000..3d74c35
--- /dev/null
+++ b/python-actspotter.spec
@@ -0,0 +1,340 @@
+%global _empty_manifest_terminate_build 0
+Name: python-actspotter
+Version: 0.1.3
+Release: 1
+Summary: Actspotter library for detecting activities
+License: GPL 3.0
+URL: https://github.com/aaad/actspotter
+Source0: https://mirrors.aliyun.com/pypi/web/packages/55/23/8a70229047d4c4d3b386656a97c54e9da03c1d43fe38ce708090fd36ec78/actspotter-0.1.3.tar.gz
+BuildArch: noarch
+
+Requires: python3-tensorflow
+
+%description
+The `actspotter` is a library / tensorflow model for detecting activities. It allows to classify body activities in images or videos. The package is limited to videos and images with only one person by design.
+The following classes are available:
+- none
+- pull_up_up
+- pull_up_down
+- pull_up_none
+- push_up_up
+- push_up_down
+- push_up_none
+- sit_up_up
+- sit_up_down
+- sit_up_none
+The package is currently in early development.
+Future plans
+~~~~~~~~~~~~~
+Tensorflow model deployment will be integrated soon. Currently this package allows to classify push-ups, sit-ups and pull-ups. In future version kicks and others body activities will follow.
+It is also planned to provide a signal processing layer that allows to easily detect connected activites and count them.
+Another application will be to integrate with keyboard drivers so that activities could be used for controlling video games (e.g. by kicks).
+Installation
+~~~~~~~~~~~~
+Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to
+create isolated Python environments. The basic problem it addresses is one of
+dependencies and versions, and indirectly permissions.
+With `virtualenv`_, it's possible to install this library without needing system
+install permissions, and without clashing with the installed system
+dependencies.
+Supported Python Versions
+^^^^^^^^^^^^^^^^^^^^^^^^^
+Python >= 3.6
+Mac/Linux
+^^^^^^^^^
+ pip install virtualenv
+ virtualenv <your-env>
+ source <your-env>/bin/activate
+ <your-env>/bin/pip install actspotter
+Windows
+^^^^^^^
+ pip install virtualenv
+ virtualenv <your-env>
+ <your-env>\Scripts\activate
+ <your-env>\Scripts\pip.exe install actspotter
+Example Usage
+~~~~~~~~~~~~~
+Requirement: cv2 (opencv) installed.
+Classification of images:
+ import cv2
+ import tensorflow as tf
+ from actspotter import ImageClassifier, classify_image_input_dimension, class_names
+ def _resize(frame, dim=classify_image_input_dimension):
+ frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
+ return frame
+ def _to_tf_array(frame, dim=classify_image_input_dimension):
+ frame = _resize(frame, dim)
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
+ frame = tf.convert_to_tensor(frame, dtype=tf.float32)
+ return frame
+ images = [
+ to_tf_array(cv2.imread("test.jpg")),
+ ]
+ print(class_names)
+ print(image_classifier.classify_images(images))
+Classification of a video:
+ import cv2
+ import tensorflow as tf
+ from actspotter import VideoClassifier, classify_image_input_dimension
+ def _resize(frame, dim=classify_image_input_dimension):
+ return frame
+ def _to_tf_array(frame, dim=classify_image_input_dimension):
+ frame = _resize(frame, dim)
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
+ frame = tf.convert_to_tensor(frame, dtype=tf.float32)
+ return frame
+ cap = cv2.VideoCapture(0)
+ video_classifier = VideoClassifier(buffer_size=4)
+ video_classifier.start()
+ while cap.isOpened():
+ ret, frame = cap.read()
+ if ret == True:
+ video_classifier.add_image(to_tf_array(frame))
+ state = video_classifier.get_last_classification()
+ print(state)
+ frame = resize(frame, dim=(600, 600))
+ cv2.putText(frame, f"{state}", (10, 40), 0, 2, 255)
+ cv2.imshow("Frame", frame)
+ waitkey = cv2.waitKey(25) & 0xFF
+ if waitkey == ord("q"):
+ break
+ video_classifier.exit()
+ cap.release()
+ cv2.destroyAllWindows()
+
+%package -n python3-actspotter
+Summary: Actspotter library for detecting activities
+Provides: python-actspotter
+BuildRequires: python3-devel
+BuildRequires: python3-setuptools
+BuildRequires: python3-pip
+%description -n python3-actspotter
+The `actspotter` is a library / tensorflow model for detecting activities. It allows to classify body activities in images or videos. The package is limited to videos and images with only one person by design.
+The following classes are available:
+- none
+- pull_up_up
+- pull_up_down
+- pull_up_none
+- push_up_up
+- push_up_down
+- push_up_none
+- sit_up_up
+- sit_up_down
+- sit_up_none
+The package is currently in early development.
+Future plans
+~~~~~~~~~~~~~
+Tensorflow model deployment will be integrated soon. Currently this package allows to classify push-ups, sit-ups and pull-ups. In future version kicks and others body activities will follow.
+It is also planned to provide a signal processing layer that allows to easily detect connected activites and count them.
+Another application will be to integrate with keyboard drivers so that activities could be used for controlling video games (e.g. by kicks).
+Installation
+~~~~~~~~~~~~
+Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to
+create isolated Python environments. The basic problem it addresses is one of
+dependencies and versions, and indirectly permissions.
+With `virtualenv`_, it's possible to install this library without needing system
+install permissions, and without clashing with the installed system
+dependencies.
+Supported Python Versions
+^^^^^^^^^^^^^^^^^^^^^^^^^
+Python >= 3.6
+Mac/Linux
+^^^^^^^^^
+ pip install virtualenv
+ virtualenv <your-env>
+ source <your-env>/bin/activate
+ <your-env>/bin/pip install actspotter
+Windows
+^^^^^^^
+ pip install virtualenv
+ virtualenv <your-env>
+ <your-env>\Scripts\activate
+ <your-env>\Scripts\pip.exe install actspotter
+Example Usage
+~~~~~~~~~~~~~
+Requirement: cv2 (opencv) installed.
+Classification of images:
+ import cv2
+ import tensorflow as tf
+ from actspotter import ImageClassifier, classify_image_input_dimension, class_names
+ def _resize(frame, dim=classify_image_input_dimension):
+ frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
+ return frame
+ def _to_tf_array(frame, dim=classify_image_input_dimension):
+ frame = _resize(frame, dim)
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
+ frame = tf.convert_to_tensor(frame, dtype=tf.float32)
+ return frame
+ images = [
+ to_tf_array(cv2.imread("test.jpg")),
+ ]
+ print(class_names)
+ print(image_classifier.classify_images(images))
+Classification of a video:
+ import cv2
+ import tensorflow as tf
+ from actspotter import VideoClassifier, classify_image_input_dimension
+ def _resize(frame, dim=classify_image_input_dimension):
+ return frame
+ def _to_tf_array(frame, dim=classify_image_input_dimension):
+ frame = _resize(frame, dim)
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
+ frame = tf.convert_to_tensor(frame, dtype=tf.float32)
+ return frame
+ cap = cv2.VideoCapture(0)
+ video_classifier = VideoClassifier(buffer_size=4)
+ video_classifier.start()
+ while cap.isOpened():
+ ret, frame = cap.read()
+ if ret == True:
+ video_classifier.add_image(to_tf_array(frame))
+ state = video_classifier.get_last_classification()
+ print(state)
+ frame = resize(frame, dim=(600, 600))
+ cv2.putText(frame, f"{state}", (10, 40), 0, 2, 255)
+ cv2.imshow("Frame", frame)
+ waitkey = cv2.waitKey(25) & 0xFF
+ if waitkey == ord("q"):
+ break
+ video_classifier.exit()
+ cap.release()
+ cv2.destroyAllWindows()
+
+%package help
+Summary: Development documents and examples for actspotter
+Provides: python3-actspotter-doc
+%description help
+The `actspotter` is a library / tensorflow model for detecting activities. It allows to classify body activities in images or videos. The package is limited to videos and images with only one person by design.
+The following classes are available:
+- none
+- pull_up_up
+- pull_up_down
+- pull_up_none
+- push_up_up
+- push_up_down
+- push_up_none
+- sit_up_up
+- sit_up_down
+- sit_up_none
+The package is currently in early development.
+Future plans
+~~~~~~~~~~~~~
+Tensorflow model deployment will be integrated soon. Currently this package allows to classify push-ups, sit-ups and pull-ups. In future version kicks and others body activities will follow.
+It is also planned to provide a signal processing layer that allows to easily detect connected activites and count them.
+Another application will be to integrate with keyboard drivers so that activities could be used for controlling video games (e.g. by kicks).
+Installation
+~~~~~~~~~~~~
+Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to
+create isolated Python environments. The basic problem it addresses is one of
+dependencies and versions, and indirectly permissions.
+With `virtualenv`_, it's possible to install this library without needing system
+install permissions, and without clashing with the installed system
+dependencies.
+Supported Python Versions
+^^^^^^^^^^^^^^^^^^^^^^^^^
+Python >= 3.6
+Mac/Linux
+^^^^^^^^^
+ pip install virtualenv
+ virtualenv <your-env>
+ source <your-env>/bin/activate
+ <your-env>/bin/pip install actspotter
+Windows
+^^^^^^^
+ pip install virtualenv
+ virtualenv <your-env>
+ <your-env>\Scripts\activate
+ <your-env>\Scripts\pip.exe install actspotter
+Example Usage
+~~~~~~~~~~~~~
+Requirement: cv2 (opencv) installed.
+Classification of images:
+ import cv2
+ import tensorflow as tf
+ from actspotter import ImageClassifier, classify_image_input_dimension, class_names
+ def _resize(frame, dim=classify_image_input_dimension):
+ frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
+ return frame
+ def _to_tf_array(frame, dim=classify_image_input_dimension):
+ frame = _resize(frame, dim)
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
+ frame = tf.convert_to_tensor(frame, dtype=tf.float32)
+ return frame
+ images = [
+ to_tf_array(cv2.imread("test.jpg")),
+ ]
+ print(class_names)
+ print(image_classifier.classify_images(images))
+Classification of a video:
+ import cv2
+ import tensorflow as tf
+ from actspotter import VideoClassifier, classify_image_input_dimension
+ def _resize(frame, dim=classify_image_input_dimension):
+ return frame
+ def _to_tf_array(frame, dim=classify_image_input_dimension):
+ frame = _resize(frame, dim)
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
+ frame = tf.convert_to_tensor(frame, dtype=tf.float32)
+ return frame
+ cap = cv2.VideoCapture(0)
+ video_classifier = VideoClassifier(buffer_size=4)
+ video_classifier.start()
+ while cap.isOpened():
+ ret, frame = cap.read()
+ if ret == True:
+ video_classifier.add_image(to_tf_array(frame))
+ state = video_classifier.get_last_classification()
+ print(state)
+ frame = resize(frame, dim=(600, 600))
+ cv2.putText(frame, f"{state}", (10, 40), 0, 2, 255)
+ cv2.imshow("Frame", frame)
+ waitkey = cv2.waitKey(25) & 0xFF
+ if waitkey == ord("q"):
+ break
+ video_classifier.exit()
+ cap.release()
+ cv2.destroyAllWindows()
+
+%prep
+%autosetup -n actspotter-0.1.3
+
+%build
+%py3_build
+
+%install
+%py3_install
+install -d -m755 %{buildroot}/%{_pkgdocdir}
+if [ -d doc ]; then cp -arf doc %{buildroot}/%{_pkgdocdir}; fi
+if [ -d docs ]; then cp -arf docs %{buildroot}/%{_pkgdocdir}; fi
+if [ -d example ]; then cp -arf example %{buildroot}/%{_pkgdocdir}; fi
+if [ -d examples ]; then cp -arf examples %{buildroot}/%{_pkgdocdir}; fi
+pushd %{buildroot}
+if [ -d usr/lib ]; then
+ find usr/lib -type f -printf "\"/%h/%f\"\n" >> filelist.lst
+fi
+if [ -d usr/lib64 ]; then
+ find usr/lib64 -type f -printf "\"/%h/%f\"\n" >> filelist.lst
+fi
+if [ -d usr/bin ]; then
+ find usr/bin -type f -printf "\"/%h/%f\"\n" >> filelist.lst
+fi
+if [ -d usr/sbin ]; then
+ find usr/sbin -type f -printf "\"/%h/%f\"\n" >> filelist.lst
+fi
+touch doclist.lst
+if [ -d usr/share/man ]; then
+ find usr/share/man -type f -printf "\"/%h/%f.gz\"\n" >> doclist.lst
+fi
+popd
+mv %{buildroot}/filelist.lst .
+mv %{buildroot}/doclist.lst .
+
+%files -n python3-actspotter -f filelist.lst
+%dir %{python3_sitelib}/*
+
+%files help -f doclist.lst
+%{_docdir}/*
+
+%changelog
+* Tue Jun 20 2023 Python_Bot <Python_Bot@openeuler.org> - 0.1.3-1
+- Package Spec generated
diff --git a/sources b/sources
new file mode 100644
index 0000000..5082f48
--- /dev/null
+++ b/sources
@@ -0,0 +1 @@
+6da89d8e99c622c0e6338cf8a64e133d actspotter-0.1.3.tar.gz