summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCoprDistGit <infra@openeuler.org>2023-05-05 13:22:59 +0000
committerCoprDistGit <infra@openeuler.org>2023-05-05 13:22:59 +0000
commit6c55c48a0afa7d5054c8965fe91df15e01a17498 (patch)
tree58fe77193682c1b9ad9983a237e519dd60cca0cb
parenta155bab3feef28fc945fb5d1f61ec866b7a3b59c (diff)
automatic import of python-anarcuteopeneuler20.03
-rw-r--r--.gitignore1
-rw-r--r--python-anarcute.spec504
-rw-r--r--sources1
3 files changed, 506 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
index e69de29..0bffe00 100644
--- a/.gitignore
+++ b/.gitignore
@@ -0,0 +1 @@
+/anarcute-0.1.996.4.tar.gz
diff --git a/python-anarcute.spec b/python-anarcute.spec
new file mode 100644
index 0000000..bfe7823
--- /dev/null
+++ b/python-anarcute.spec
@@ -0,0 +1,504 @@
+%global _empty_manifest_terminate_build 0
+Name: python-anarcute
+Version: 0.1.996.4
+Release: 1
+Summary: My hy toolbox
+License: MIT
+URL: https://github.com/user/reponame
+Source0: https://mirrors.nju.edu.cn/pypi/web/packages/47/4f/849560fd90465b0adeb65d490ffe7e18ad06ffa0e3f6e85306628080f7fb/anarcute-0.1.996.4.tar.gz
+BuildArch: noarch
+
+
+%description
+My toolbox for dynamic programming
+
+#to be documented
+#Chapter: tf-idf
+
+from anarcute import *
+
+import requests, json
+
+sentence="Eat more of those french fries and drink cola"
+
+alice=requests.get("https://gist.githubusercontent.com/phillipj/4944029/raw/75ba2243dd5ec2875f629bf5d79f6c1e4b5a8b46/alice_in_wonderland.txt").text
+
+print(tf_idf(sentence,alice))
+
+>> {'eat': 168.7962962962963, 'more': 62.006802721088434, 'of': 5.9111543450064845, 'those': 303.8333333333333, 'french': 759.5833333333333, 'and': 3.4843272171253816, 'drink': 434.047619047619}
+
+#If text is too big it's frequencies can be pre-cached.
+
+filename="alice.json"
+
+vector=vectorize(alice)
+
+open(filename,"w+").write(json.dumps(vector))
+
+vector=json.load(open(filename,"r+"))
+
+print(tf_idf(sentence,vector))
+
+>>{'eat': 168.7962962962902, 'more': 62.00680272108618, 'of': 5.91115434500627, 'those': 303.8333333333223, 'french': 759.5833333333056, 'and': 3.484327217125255, 'drink': 434.0476190476033}
+
+
+
+#we can sort by value
+
+print(sort_by_value(tf_idf(sentence,vector)))
+
+>>{'french': 759.5833333332979, 'drink': 434.04761904759886, 'those': 303.8333333333192, 'eat': 168.7962962962885, 'more': 62.006802721085556, 'of': 5.911154345006209, 'and': 3.4843272171252204}
+
+
+#Chapter: Google
+
+#We have Google Translate and Google Custom Search Engine now
+
+key="MY_GOOGLE_KEY"
+
+gt=GT(key)
+
+gt.translate("pl","en","Jeszcze Polska nie zginęła, Kiedy my żyjemy. Co nam obca przemoc wzięła, Szablą odbierzemy.")
+
+>> {'data': {'translations': [{'translatedText': 'Poland is not dead yet, When we live. What foreign violence has taken from us, we will take away the Saber.'}]}}
+
+cx="MY_CUSTOM_SEARCH_ENGINE_KEY"
+
+gs=GS(cx,key)
+
+gs.search("krakauer sausage recipe")
+
+>> dict with search result, up to 10 items
+
+gs.items("krakauer sausage recipe"")
+
+>> array of results, up to 100 items
+
+#Chapter: Multithreading
+
+#based on multithreading_on_dill library
+
+#let's reverse every string of Alice in Wonderland
+
+url="https://gist.githubusercontent.com/phillipj/4944029/raw/75ba2243dd5ec2875f629bf5d79f6c1e4b5a8b46/alice_in_wonderland.txt"
+
+alice=requests.get(url).text
+
+alice_reversed=mapp(lambda s: str(s[::-1]),alice.split('\n'))
+
+#as you see we have no problem with lambda
+
+#by default the number of processes equals to cpu number, but you can make it bigger for highly async tasks or smaller to prevent overload
+
+alice_reversed=mapp(lambda s: str(s[::-1]),alice.split('\n'),processes=2)
+
+#decorator @timeit also included in the library
+
+@timeit
+
+def test(p=None):
+
+ r=mapp(lambda s: math.factorial(150*len(s)),alice.split('\n'),processes=p)
+
+ return None
+
+
+test()
+
+>> 'test' 2563.11 ms
+
+test(1)
+
+>> 'test' 5287.27 ms
+
+
+#multithreading filter
+
+alice_special=filterp(lambda s: "alice" in s.lower(),alice.split('\n'))
+
+#run one async function
+
+run(print,["A B C"])
+
+#you can wait for it's result when you need to catch up
+
+p=run(lambda x: request.get(x).text,url)
+
+some_other_stuff()
+
+p.join()
+
+#apply - function that executes functions. Used to run few different functions in one multithreading process
+
+r=mapp(apply,[lambda:requests.get("https://gist.githubusercontent.com/phillipj/4944029/raw/75ba2243dd5ec2875f629bf5d79f6c1e4b5a8b46/alice_in_wonderland.txt").text,lambda: math.factorial(9000)])
+
+#Chapter predicates
+
+#in_or(a,b) - returns if at least one element of array is in array/string b
+
+a=["Some","important","array"]
+
+b=["Another","array"]
+
+in_or(a,b)
+
+>> True
+
+c=["Something", "Else"]
+
+in_or(a,c)
+
+>> False
+
+d="Some string"
+
+in_or(a,d)
+
+>> True
+
+%package -n python3-anarcute
+Summary: My hy toolbox
+Provides: python-anarcute
+BuildRequires: python3-devel
+BuildRequires: python3-setuptools
+BuildRequires: python3-pip
+%description -n python3-anarcute
+My toolbox for dynamic programming
+
+#to be documented
+#Chapter: tf-idf
+
+from anarcute import *
+
+import requests, json
+
+sentence="Eat more of those french fries and drink cola"
+
+alice=requests.get("https://gist.githubusercontent.com/phillipj/4944029/raw/75ba2243dd5ec2875f629bf5d79f6c1e4b5a8b46/alice_in_wonderland.txt").text
+
+print(tf_idf(sentence,alice))
+
+>> {'eat': 168.7962962962963, 'more': 62.006802721088434, 'of': 5.9111543450064845, 'those': 303.8333333333333, 'french': 759.5833333333333, 'and': 3.4843272171253816, 'drink': 434.047619047619}
+
+#If text is too big it's frequencies can be pre-cached.
+
+filename="alice.json"
+
+vector=vectorize(alice)
+
+open(filename,"w+").write(json.dumps(vector))
+
+vector=json.load(open(filename,"r+"))
+
+print(tf_idf(sentence,vector))
+
+>>{'eat': 168.7962962962902, 'more': 62.00680272108618, 'of': 5.91115434500627, 'those': 303.8333333333223, 'french': 759.5833333333056, 'and': 3.484327217125255, 'drink': 434.0476190476033}
+
+
+
+#we can sort by value
+
+print(sort_by_value(tf_idf(sentence,vector)))
+
+>>{'french': 759.5833333332979, 'drink': 434.04761904759886, 'those': 303.8333333333192, 'eat': 168.7962962962885, 'more': 62.006802721085556, 'of': 5.911154345006209, 'and': 3.4843272171252204}
+
+
+#Chapter: Google
+
+#We have Google Translate and Google Custom Search Engine now
+
+key="MY_GOOGLE_KEY"
+
+gt=GT(key)
+
+gt.translate("pl","en","Jeszcze Polska nie zginęła, Kiedy my żyjemy. Co nam obca przemoc wzięła, Szablą odbierzemy.")
+
+>> {'data': {'translations': [{'translatedText': 'Poland is not dead yet, When we live. What foreign violence has taken from us, we will take away the Saber.'}]}}
+
+cx="MY_CUSTOM_SEARCH_ENGINE_KEY"
+
+gs=GS(cx,key)
+
+gs.search("krakauer sausage recipe")
+
+>> dict with search result, up to 10 items
+
+gs.items("krakauer sausage recipe"")
+
+>> array of results, up to 100 items
+
+#Chapter: Multithreading
+
+#based on multithreading_on_dill library
+
+#let's reverse every string of Alice in Wonderland
+
+url="https://gist.githubusercontent.com/phillipj/4944029/raw/75ba2243dd5ec2875f629bf5d79f6c1e4b5a8b46/alice_in_wonderland.txt"
+
+alice=requests.get(url).text
+
+alice_reversed=mapp(lambda s: str(s[::-1]),alice.split('\n'))
+
+#as you see we have no problem with lambda
+
+#by default the number of processes equals to cpu number, but you can make it bigger for highly async tasks or smaller to prevent overload
+
+alice_reversed=mapp(lambda s: str(s[::-1]),alice.split('\n'),processes=2)
+
+#decorator @timeit also included in the library
+
+@timeit
+
+def test(p=None):
+
+ r=mapp(lambda s: math.factorial(150*len(s)),alice.split('\n'),processes=p)
+
+ return None
+
+
+test()
+
+>> 'test' 2563.11 ms
+
+test(1)
+
+>> 'test' 5287.27 ms
+
+
+#multithreading filter
+
+alice_special=filterp(lambda s: "alice" in s.lower(),alice.split('\n'))
+
+#run one async function
+
+run(print,["A B C"])
+
+#you can wait for it's result when you need to catch up
+
+p=run(lambda x: request.get(x).text,url)
+
+some_other_stuff()
+
+p.join()
+
+#apply - function that executes functions. Used to run few different functions in one multithreading process
+
+r=mapp(apply,[lambda:requests.get("https://gist.githubusercontent.com/phillipj/4944029/raw/75ba2243dd5ec2875f629bf5d79f6c1e4b5a8b46/alice_in_wonderland.txt").text,lambda: math.factorial(9000)])
+
+#Chapter predicates
+
+#in_or(a,b) - returns if at least one element of array is in array/string b
+
+a=["Some","important","array"]
+
+b=["Another","array"]
+
+in_or(a,b)
+
+>> True
+
+c=["Something", "Else"]
+
+in_or(a,c)
+
+>> False
+
+d="Some string"
+
+in_or(a,d)
+
+>> True
+
+%package help
+Summary: Development documents and examples for anarcute
+Provides: python3-anarcute-doc
+%description help
+My toolbox for dynamic programming
+
+#to be documented
+#Chapter: tf-idf
+
+from anarcute import *
+
+import requests, json
+
+sentence="Eat more of those french fries and drink cola"
+
+alice=requests.get("https://gist.githubusercontent.com/phillipj/4944029/raw/75ba2243dd5ec2875f629bf5d79f6c1e4b5a8b46/alice_in_wonderland.txt").text
+
+print(tf_idf(sentence,alice))
+
+>> {'eat': 168.7962962962963, 'more': 62.006802721088434, 'of': 5.9111543450064845, 'those': 303.8333333333333, 'french': 759.5833333333333, 'and': 3.4843272171253816, 'drink': 434.047619047619}
+
+#If text is too big it's frequencies can be pre-cached.
+
+filename="alice.json"
+
+vector=vectorize(alice)
+
+open(filename,"w+").write(json.dumps(vector))
+
+vector=json.load(open(filename,"r+"))
+
+print(tf_idf(sentence,vector))
+
+>>{'eat': 168.7962962962902, 'more': 62.00680272108618, 'of': 5.91115434500627, 'those': 303.8333333333223, 'french': 759.5833333333056, 'and': 3.484327217125255, 'drink': 434.0476190476033}
+
+
+
+#we can sort by value
+
+print(sort_by_value(tf_idf(sentence,vector)))
+
+>>{'french': 759.5833333332979, 'drink': 434.04761904759886, 'those': 303.8333333333192, 'eat': 168.7962962962885, 'more': 62.006802721085556, 'of': 5.911154345006209, 'and': 3.4843272171252204}
+
+
+#Chapter: Google
+
+#We have Google Translate and Google Custom Search Engine now
+
+key="MY_GOOGLE_KEY"
+
+gt=GT(key)
+
+gt.translate("pl","en","Jeszcze Polska nie zginęła, Kiedy my żyjemy. Co nam obca przemoc wzięła, Szablą odbierzemy.")
+
+>> {'data': {'translations': [{'translatedText': 'Poland is not dead yet, When we live. What foreign violence has taken from us, we will take away the Saber.'}]}}
+
+cx="MY_CUSTOM_SEARCH_ENGINE_KEY"
+
+gs=GS(cx,key)
+
+gs.search("krakauer sausage recipe")
+
+>> dict with search result, up to 10 items
+
+gs.items("krakauer sausage recipe"")
+
+>> array of results, up to 100 items
+
+#Chapter: Multithreading
+
+#based on multithreading_on_dill library
+
+#let's reverse every string of Alice in Wonderland
+
+url="https://gist.githubusercontent.com/phillipj/4944029/raw/75ba2243dd5ec2875f629bf5d79f6c1e4b5a8b46/alice_in_wonderland.txt"
+
+alice=requests.get(url).text
+
+alice_reversed=mapp(lambda s: str(s[::-1]),alice.split('\n'))
+
+#as you see we have no problem with lambda
+
+#by default the number of processes equals to cpu number, but you can make it bigger for highly async tasks or smaller to prevent overload
+
+alice_reversed=mapp(lambda s: str(s[::-1]),alice.split('\n'),processes=2)
+
+#decorator @timeit also included in the library
+
+@timeit
+
+def test(p=None):
+
+ r=mapp(lambda s: math.factorial(150*len(s)),alice.split('\n'),processes=p)
+
+ return None
+
+
+test()
+
+>> 'test' 2563.11 ms
+
+test(1)
+
+>> 'test' 5287.27 ms
+
+
+#multithreading filter
+
+alice_special=filterp(lambda s: "alice" in s.lower(),alice.split('\n'))
+
+#run one async function
+
+run(print,["A B C"])
+
+#you can wait for it's result when you need to catch up
+
+p=run(lambda x: request.get(x).text,url)
+
+some_other_stuff()
+
+p.join()
+
+#apply - function that executes functions. Used to run few different functions in one multithreading process
+
+r=mapp(apply,[lambda:requests.get("https://gist.githubusercontent.com/phillipj/4944029/raw/75ba2243dd5ec2875f629bf5d79f6c1e4b5a8b46/alice_in_wonderland.txt").text,lambda: math.factorial(9000)])
+
+#Chapter predicates
+
+#in_or(a,b) - returns if at least one element of array is in array/string b
+
+a=["Some","important","array"]
+
+b=["Another","array"]
+
+in_or(a,b)
+
+>> True
+
+c=["Something", "Else"]
+
+in_or(a,c)
+
+>> False
+
+d="Some string"
+
+in_or(a,d)
+
+>> True
+
+%prep
+%autosetup -n anarcute-0.1.996.4
+
+%build
+%py3_build
+
+%install
+%py3_install
+install -d -m755 %{buildroot}/%{_pkgdocdir}
+if [ -d doc ]; then cp -arf doc %{buildroot}/%{_pkgdocdir}; fi
+if [ -d docs ]; then cp -arf docs %{buildroot}/%{_pkgdocdir}; fi
+if [ -d example ]; then cp -arf example %{buildroot}/%{_pkgdocdir}; fi
+if [ -d examples ]; then cp -arf examples %{buildroot}/%{_pkgdocdir}; fi
+pushd %{buildroot}
+if [ -d usr/lib ]; then
+ find usr/lib -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/lib64 ]; then
+ find usr/lib64 -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/bin ]; then
+ find usr/bin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+if [ -d usr/sbin ]; then
+ find usr/sbin -type f -printf "/%h/%f\n" >> filelist.lst
+fi
+touch doclist.lst
+if [ -d usr/share/man ]; then
+ find usr/share/man -type f -printf "/%h/%f.gz\n" >> doclist.lst
+fi
+popd
+mv %{buildroot}/filelist.lst .
+mv %{buildroot}/doclist.lst .
+
+%files -n python3-anarcute -f filelist.lst
+%dir %{python3_sitelib}/*
+
+%files help -f doclist.lst
+%{_docdir}/*
+
+%changelog
+* Fri May 05 2023 Python_Bot <Python_Bot@openeuler.org> - 0.1.996.4-1
+- Package Spec generated
diff --git a/sources b/sources
new file mode 100644
index 0000000..6f6bc17
--- /dev/null
+++ b/sources
@@ -0,0 +1 @@
+f5de501bb6e775f020949d83d656becb anarcute-0.1.996.4.tar.gz