commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
edef5ae25b7228cf8b769a171305e9f9c32eeabb
|
app/test_ghmarkup.py
|
app/test_ghmarkup.py
|
# encoding: utf-8
import unittest
import ghmarkup
class test_ghmarkup(unittest.TestCase):
def test_unadorned_text(self):
self.assertEqual("<p>\nfoo\n</p>", ghmarkup.ghmarkup("foo"))
def test_bold(self):
self.assertEqual("<p>\nthe <strong>quick</strong> brown fox\n</p>", ghmarkup.ghmarkup(
"the *quick* brown fox"))
def test_bullet(self):
self.assertEqual("""<ul>
<li>
<div>
one
</div>
</li>
</ul>""", ghmarkup.ghmarkup("* one"))
def test_two_bullets(self):
self.assertEqual("""<ul>
<li>
<div>
one
</div>
</li>
<li>
<div>
two
</div>
</li>
</ul>""", ghmarkup.ghmarkup("* one\n* two\n"))
def test_math(self):
self.assertEqual(u'<p>\n<span class="sexp">(β p q)</span>\n</p>', ghmarkup.ghmarkup("#(β p q)#"))
def test_literal(self):
self.assertEqual(u'<p>\n<tt>#(β p q)#</tt>\n</p>', ghmarkup.ghmarkup("{{{#(β p q)#}}}"))
|
Add some tests for the wiki markup.
|
Add some tests for the wiki markup.
|
Python
|
apache-2.0
|
jkingdon/ghilbert,kryptine/ghilbert,raphlinus/ghilbert,kryptine/ghilbert,raphlinus/ghilbert,raphlinus/ghilbert,raphlinus/ghilbert,jkingdon/ghilbert
|
Add some tests for the wiki markup.
|
# encoding: utf-8
import unittest
import ghmarkup
class test_ghmarkup(unittest.TestCase):
def test_unadorned_text(self):
self.assertEqual("<p>\nfoo\n</p>", ghmarkup.ghmarkup("foo"))
def test_bold(self):
self.assertEqual("<p>\nthe <strong>quick</strong> brown fox\n</p>", ghmarkup.ghmarkup(
"the *quick* brown fox"))
def test_bullet(self):
self.assertEqual("""<ul>
<li>
<div>
one
</div>
</li>
</ul>""", ghmarkup.ghmarkup("* one"))
def test_two_bullets(self):
self.assertEqual("""<ul>
<li>
<div>
one
</div>
</li>
<li>
<div>
two
</div>
</li>
</ul>""", ghmarkup.ghmarkup("* one\n* two\n"))
def test_math(self):
self.assertEqual(u'<p>\n<span class="sexp">(β p q)</span>\n</p>', ghmarkup.ghmarkup("#(β p q)#"))
def test_literal(self):
self.assertEqual(u'<p>\n<tt>#(β p q)#</tt>\n</p>', ghmarkup.ghmarkup("{{{#(β p q)#}}}"))
|
<commit_before><commit_msg>Add some tests for the wiki markup.<commit_after>
|
# encoding: utf-8
import unittest
import ghmarkup
class test_ghmarkup(unittest.TestCase):
def test_unadorned_text(self):
self.assertEqual("<p>\nfoo\n</p>", ghmarkup.ghmarkup("foo"))
def test_bold(self):
self.assertEqual("<p>\nthe <strong>quick</strong> brown fox\n</p>", ghmarkup.ghmarkup(
"the *quick* brown fox"))
def test_bullet(self):
self.assertEqual("""<ul>
<li>
<div>
one
</div>
</li>
</ul>""", ghmarkup.ghmarkup("* one"))
def test_two_bullets(self):
self.assertEqual("""<ul>
<li>
<div>
one
</div>
</li>
<li>
<div>
two
</div>
</li>
</ul>""", ghmarkup.ghmarkup("* one\n* two\n"))
def test_math(self):
self.assertEqual(u'<p>\n<span class="sexp">(β p q)</span>\n</p>', ghmarkup.ghmarkup("#(β p q)#"))
def test_literal(self):
self.assertEqual(u'<p>\n<tt>#(β p q)#</tt>\n</p>', ghmarkup.ghmarkup("{{{#(β p q)#}}}"))
|
Add some tests for the wiki markup.# encoding: utf-8
import unittest
import ghmarkup
class test_ghmarkup(unittest.TestCase):
def test_unadorned_text(self):
self.assertEqual("<p>\nfoo\n</p>", ghmarkup.ghmarkup("foo"))
def test_bold(self):
self.assertEqual("<p>\nthe <strong>quick</strong> brown fox\n</p>", ghmarkup.ghmarkup(
"the *quick* brown fox"))
def test_bullet(self):
self.assertEqual("""<ul>
<li>
<div>
one
</div>
</li>
</ul>""", ghmarkup.ghmarkup("* one"))
def test_two_bullets(self):
self.assertEqual("""<ul>
<li>
<div>
one
</div>
</li>
<li>
<div>
two
</div>
</li>
</ul>""", ghmarkup.ghmarkup("* one\n* two\n"))
def test_math(self):
self.assertEqual(u'<p>\n<span class="sexp">(β p q)</span>\n</p>', ghmarkup.ghmarkup("#(β p q)#"))
def test_literal(self):
self.assertEqual(u'<p>\n<tt>#(β p q)#</tt>\n</p>', ghmarkup.ghmarkup("{{{#(β p q)#}}}"))
|
<commit_before><commit_msg>Add some tests for the wiki markup.<commit_after># encoding: utf-8
import unittest
import ghmarkup
class test_ghmarkup(unittest.TestCase):
def test_unadorned_text(self):
self.assertEqual("<p>\nfoo\n</p>", ghmarkup.ghmarkup("foo"))
def test_bold(self):
self.assertEqual("<p>\nthe <strong>quick</strong> brown fox\n</p>", ghmarkup.ghmarkup(
"the *quick* brown fox"))
def test_bullet(self):
self.assertEqual("""<ul>
<li>
<div>
one
</div>
</li>
</ul>""", ghmarkup.ghmarkup("* one"))
def test_two_bullets(self):
self.assertEqual("""<ul>
<li>
<div>
one
</div>
</li>
<li>
<div>
two
</div>
</li>
</ul>""", ghmarkup.ghmarkup("* one\n* two\n"))
def test_math(self):
self.assertEqual(u'<p>\n<span class="sexp">(β p q)</span>\n</p>', ghmarkup.ghmarkup("#(β p q)#"))
def test_literal(self):
self.assertEqual(u'<p>\n<tt>#(β p q)#</tt>\n</p>', ghmarkup.ghmarkup("{{{#(β p q)#}}}"))
|
|
056de4daf1a00f677d3f9bccebae849dc49e7c48
|
checks/check_with_hue_and_saturation.py
|
checks/check_with_hue_and_saturation.py
|
from __future__ import print_function, division
import imgaug as ia
import imgaug.augmenters as iaa
def main():
image = ia.quokka_square(size=(128, 128))
images = []
for i in range(15):
aug = iaa.WithHueAndSaturation(iaa.WithChannels(0, iaa.Add(i*20)))
images.append(aug.augment_image(image))
for i in range(15):
aug = iaa.WithHueAndSaturation(iaa.WithChannels(1, iaa.Add(i*20)))
images.append(aug.augment_image(image))
ia.imshow(ia.draw_grid(images, rows=2))
if __name__ == "__main__":
main()
|
Add check script for WithHueAndSaturation
|
Add check script for WithHueAndSaturation
|
Python
|
mit
|
aleju/imgaug,aleju/ImageAugmenter,aleju/imgaug
|
Add check script for WithHueAndSaturation
|
from __future__ import print_function, division
import imgaug as ia
import imgaug.augmenters as iaa
def main():
image = ia.quokka_square(size=(128, 128))
images = []
for i in range(15):
aug = iaa.WithHueAndSaturation(iaa.WithChannels(0, iaa.Add(i*20)))
images.append(aug.augment_image(image))
for i in range(15):
aug = iaa.WithHueAndSaturation(iaa.WithChannels(1, iaa.Add(i*20)))
images.append(aug.augment_image(image))
ia.imshow(ia.draw_grid(images, rows=2))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add check script for WithHueAndSaturation<commit_after>
|
from __future__ import print_function, division
import imgaug as ia
import imgaug.augmenters as iaa
def main():
image = ia.quokka_square(size=(128, 128))
images = []
for i in range(15):
aug = iaa.WithHueAndSaturation(iaa.WithChannels(0, iaa.Add(i*20)))
images.append(aug.augment_image(image))
for i in range(15):
aug = iaa.WithHueAndSaturation(iaa.WithChannels(1, iaa.Add(i*20)))
images.append(aug.augment_image(image))
ia.imshow(ia.draw_grid(images, rows=2))
if __name__ == "__main__":
main()
|
Add check script for WithHueAndSaturationfrom __future__ import print_function, division
import imgaug as ia
import imgaug.augmenters as iaa
def main():
image = ia.quokka_square(size=(128, 128))
images = []
for i in range(15):
aug = iaa.WithHueAndSaturation(iaa.WithChannels(0, iaa.Add(i*20)))
images.append(aug.augment_image(image))
for i in range(15):
aug = iaa.WithHueAndSaturation(iaa.WithChannels(1, iaa.Add(i*20)))
images.append(aug.augment_image(image))
ia.imshow(ia.draw_grid(images, rows=2))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add check script for WithHueAndSaturation<commit_after>from __future__ import print_function, division
import imgaug as ia
import imgaug.augmenters as iaa
def main():
image = ia.quokka_square(size=(128, 128))
images = []
for i in range(15):
aug = iaa.WithHueAndSaturation(iaa.WithChannels(0, iaa.Add(i*20)))
images.append(aug.augment_image(image))
for i in range(15):
aug = iaa.WithHueAndSaturation(iaa.WithChannels(1, iaa.Add(i*20)))
images.append(aug.augment_image(image))
ia.imshow(ia.draw_grid(images, rows=2))
if __name__ == "__main__":
main()
|
|
b05f95fb3e67ef2d13d67bf369e10c679a8b55cf
|
find_site_packages.py
|
find_site_packages.py
|
import site
from distutils.sysconfig import get_python_lib
print("Your main site-packages directory:")
print(get_python_lib())
print
print("All global site-packages directories on your system:")
print(site.getsitepackages())
|
Add script to find site-packages dirs.
|
Add script to find site-packages dirs.
|
Python
|
bsd-3-clause
|
audreyr/useful
|
Add script to find site-packages dirs.
|
import site
from distutils.sysconfig import get_python_lib
print("Your main site-packages directory:")
print(get_python_lib())
print
print("All global site-packages directories on your system:")
print(site.getsitepackages())
|
<commit_before><commit_msg>Add script to find site-packages dirs.<commit_after>
|
import site
from distutils.sysconfig import get_python_lib
print("Your main site-packages directory:")
print(get_python_lib())
print
print("All global site-packages directories on your system:")
print(site.getsitepackages())
|
Add script to find site-packages dirs.import site
from distutils.sysconfig import get_python_lib
print("Your main site-packages directory:")
print(get_python_lib())
print
print("All global site-packages directories on your system:")
print(site.getsitepackages())
|
<commit_before><commit_msg>Add script to find site-packages dirs.<commit_after>import site
from distutils.sysconfig import get_python_lib
print("Your main site-packages directory:")
print(get_python_lib())
print
print("All global site-packages directories on your system:")
print(site.getsitepackages())
|
|
f12dba1134b8c9b42dc607e3e672de0e1cc2a4d1
|
examples/copy.py
|
examples/copy.py
|
from opterator import opterate
@opterate
def main(filename1, filename2, recursive=False, backup=False,
suffix='~', *other_filenames):
'''An example copy script with some example parameters that might
be used in a copy command.
@param recursive store_true -r --recursive copy directories
recursively
@param backup store_true -b --backup backup any files you copy over
@param suffix store -S --suffix override the usual backup
suffix '''
filenames = [filename1, filename2] + list(other_filenames)
destination = filenames.pop()
print "You asked to move %s to %s" % (filenames, destination)
if recursive:
print "You asked to copy directories recursively."
if backup:
print "You asked to backup any overwritten files."
print "You would use the suffix %s" % suffix
if __name__ == '__main__':
main()
|
Add an examples directory to illustrate operator usage.
|
Add an examples directory to illustrate operator usage.
|
Python
|
mit
|
buchuki/opterator
|
Add an examples directory to illustrate operator usage.
|
from opterator import opterate
@opterate
def main(filename1, filename2, recursive=False, backup=False,
suffix='~', *other_filenames):
'''An example copy script with some example parameters that might
be used in a copy command.
@param recursive store_true -r --recursive copy directories
recursively
@param backup store_true -b --backup backup any files you copy over
@param suffix store -S --suffix override the usual backup
suffix '''
filenames = [filename1, filename2] + list(other_filenames)
destination = filenames.pop()
print "You asked to move %s to %s" % (filenames, destination)
if recursive:
print "You asked to copy directories recursively."
if backup:
print "You asked to backup any overwritten files."
print "You would use the suffix %s" % suffix
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add an examples directory to illustrate operator usage.<commit_after>
|
from opterator import opterate
@opterate
def main(filename1, filename2, recursive=False, backup=False,
suffix='~', *other_filenames):
'''An example copy script with some example parameters that might
be used in a copy command.
@param recursive store_true -r --recursive copy directories
recursively
@param backup store_true -b --backup backup any files you copy over
@param suffix store -S --suffix override the usual backup
suffix '''
filenames = [filename1, filename2] + list(other_filenames)
destination = filenames.pop()
print "You asked to move %s to %s" % (filenames, destination)
if recursive:
print "You asked to copy directories recursively."
if backup:
print "You asked to backup any overwritten files."
print "You would use the suffix %s" % suffix
if __name__ == '__main__':
main()
|
Add an examples directory to illustrate operator usage.from opterator import opterate
@opterate
def main(filename1, filename2, recursive=False, backup=False,
suffix='~', *other_filenames):
'''An example copy script with some example parameters that might
be used in a copy command.
@param recursive store_true -r --recursive copy directories
recursively
@param backup store_true -b --backup backup any files you copy over
@param suffix store -S --suffix override the usual backup
suffix '''
filenames = [filename1, filename2] + list(other_filenames)
destination = filenames.pop()
print "You asked to move %s to %s" % (filenames, destination)
if recursive:
print "You asked to copy directories recursively."
if backup:
print "You asked to backup any overwritten files."
print "You would use the suffix %s" % suffix
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add an examples directory to illustrate operator usage.<commit_after>from opterator import opterate
@opterate
def main(filename1, filename2, recursive=False, backup=False,
suffix='~', *other_filenames):
'''An example copy script with some example parameters that might
be used in a copy command.
@param recursive store_true -r --recursive copy directories
recursively
@param backup store_true -b --backup backup any files you copy over
@param suffix store -S --suffix override the usual backup
suffix '''
filenames = [filename1, filename2] + list(other_filenames)
destination = filenames.pop()
print "You asked to move %s to %s" % (filenames, destination)
if recursive:
print "You asked to copy directories recursively."
if backup:
print "You asked to backup any overwritten files."
print "You would use the suffix %s" % suffix
if __name__ == '__main__':
main()
|
|
65bae0d38b96fde3e3dd54cdc915e99b661b974a
|
web_tools.py
|
web_tools.py
|
import urllib, subprocess, random, re
from bs4 import BeautifulSoup # http://www.crummy.com/software/BeautifulSoup/bs4/doc/#
import socket, struct, binascii
class Web():
def getHeader(self, url):
"""Get Header Info"""
http_r = urllib.urlopen(url)
if http_r.code == 200:
return http_r.headers
else:
return "No Header"
def getBsObject(self, url):
ht= urllib.urlopen(url)
html_page = ht.read()
bs_object = BeautifulSoup(html_page)
return bs_object
def getParticularClass(self,url):
bs_object = self.getBsObject(url)
"""Get All Repository from the Github"""
repos = bs_object.findAll('span', {'class':"repo"})
return repos
def getLinks(self, url):
bs_object = self.getBsObject(url)
print bs_object.title.text
for link in bs_object.find_all('a'):
print(link.get('href'))
def errorHandling(self, url):
u = chr(random.randint(97,122))
url2 = url+u
http_r = urllib.urlopen(url2)
content= http_r.read()
flag =0
i=0
list1 = []
a_tag = "<*address>"
file_text = open("web_error_handling_result.txt",'a')
while flag ==0:
if http_r.code == 404:
file_text.write("--------------")
file_text.write(url)
file_text.write("--------------\n")
file_text.write(content)
for match in re.finditer(a_tag,content):
i=i+1
s= match.start()
e= match.end()
list1.append(s)
list1.append(e)
if (i>0):
print "Coding is not good"
if len(list1)>0:
a= list1[1]
b= list1[2]
print content[a:b]
elif http_r.code == 200:
print "Web page is using custom Error Page"
break
else:
print "Error handling seems to be OK."
flag =1
def bannerGrabber(self):
"""Banner grabbing or OS fingerprinting is a method to determine the operating system that is running on a target web server"""
s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0800))
while True:
pkt = s.recvfrom(2048)
banner = pkt[0][54:533]
print banner
print "--"*40
if __name__ == '__main__':
subprocess.call('clear',shell=True)
myWeb = Web()
url = "https://github.com/rudolfvavra"
print myWeb.getParticularClass(url)
print myWeb.getHeader(url)
myWeb.getLinks(url)
myWeb.errorHandling(url)
#myWeb.bannerGrabber() # have to run as sudo user
|
Add Web tools: getHeader, getParticularClass, getLinks, errorHandling, bannerGrapper
|
Add Web tools: getHeader, getParticularClass, getLinks, errorHandling, bannerGrapper
|
Python
|
mit
|
rudolfvavra/network,rudolfvavra/network,rudolfvavra/network,rudolfvavra/network,rudolfvavra/network,rudolfvavra/network,rudolfvavra/network,rudolfvavra/network,rudolfvavra/network,rudolfvavra/network,rudolfvavra/network
|
Add Web tools: getHeader, getParticularClass, getLinks, errorHandling, bannerGrapper
|
import urllib, subprocess, random, re
from bs4 import BeautifulSoup # http://www.crummy.com/software/BeautifulSoup/bs4/doc/#
import socket, struct, binascii
class Web():
def getHeader(self, url):
"""Get Header Info"""
http_r = urllib.urlopen(url)
if http_r.code == 200:
return http_r.headers
else:
return "No Header"
def getBsObject(self, url):
ht= urllib.urlopen(url)
html_page = ht.read()
bs_object = BeautifulSoup(html_page)
return bs_object
def getParticularClass(self,url):
bs_object = self.getBsObject(url)
"""Get All Repository from the Github"""
repos = bs_object.findAll('span', {'class':"repo"})
return repos
def getLinks(self, url):
bs_object = self.getBsObject(url)
print bs_object.title.text
for link in bs_object.find_all('a'):
print(link.get('href'))
def errorHandling(self, url):
u = chr(random.randint(97,122))
url2 = url+u
http_r = urllib.urlopen(url2)
content= http_r.read()
flag =0
i=0
list1 = []
a_tag = "<*address>"
file_text = open("web_error_handling_result.txt",'a')
while flag ==0:
if http_r.code == 404:
file_text.write("--------------")
file_text.write(url)
file_text.write("--------------\n")
file_text.write(content)
for match in re.finditer(a_tag,content):
i=i+1
s= match.start()
e= match.end()
list1.append(s)
list1.append(e)
if (i>0):
print "Coding is not good"
if len(list1)>0:
a= list1[1]
b= list1[2]
print content[a:b]
elif http_r.code == 200:
print "Web page is using custom Error Page"
break
else:
print "Error handling seems to be OK."
flag =1
def bannerGrabber(self):
"""Banner grabbing or OS fingerprinting is a method to determine the operating system that is running on a target web server"""
s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0800))
while True:
pkt = s.recvfrom(2048)
banner = pkt[0][54:533]
print banner
print "--"*40
if __name__ == '__main__':
subprocess.call('clear',shell=True)
myWeb = Web()
url = "https://github.com/rudolfvavra"
print myWeb.getParticularClass(url)
print myWeb.getHeader(url)
myWeb.getLinks(url)
myWeb.errorHandling(url)
#myWeb.bannerGrabber() # have to run as sudo user
|
<commit_before><commit_msg>Add Web tools: getHeader, getParticularClass, getLinks, errorHandling, bannerGrapper<commit_after>
|
import urllib, subprocess, random, re
from bs4 import BeautifulSoup # http://www.crummy.com/software/BeautifulSoup/bs4/doc/#
import socket, struct, binascii
class Web():
def getHeader(self, url):
"""Get Header Info"""
http_r = urllib.urlopen(url)
if http_r.code == 200:
return http_r.headers
else:
return "No Header"
def getBsObject(self, url):
ht= urllib.urlopen(url)
html_page = ht.read()
bs_object = BeautifulSoup(html_page)
return bs_object
def getParticularClass(self,url):
bs_object = self.getBsObject(url)
"""Get All Repository from the Github"""
repos = bs_object.findAll('span', {'class':"repo"})
return repos
def getLinks(self, url):
bs_object = self.getBsObject(url)
print bs_object.title.text
for link in bs_object.find_all('a'):
print(link.get('href'))
def errorHandling(self, url):
u = chr(random.randint(97,122))
url2 = url+u
http_r = urllib.urlopen(url2)
content= http_r.read()
flag =0
i=0
list1 = []
a_tag = "<*address>"
file_text = open("web_error_handling_result.txt",'a')
while flag ==0:
if http_r.code == 404:
file_text.write("--------------")
file_text.write(url)
file_text.write("--------------\n")
file_text.write(content)
for match in re.finditer(a_tag,content):
i=i+1
s= match.start()
e= match.end()
list1.append(s)
list1.append(e)
if (i>0):
print "Coding is not good"
if len(list1)>0:
a= list1[1]
b= list1[2]
print content[a:b]
elif http_r.code == 200:
print "Web page is using custom Error Page"
break
else:
print "Error handling seems to be OK."
flag =1
def bannerGrabber(self):
"""Banner grabbing or OS fingerprinting is a method to determine the operating system that is running on a target web server"""
s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0800))
while True:
pkt = s.recvfrom(2048)
banner = pkt[0][54:533]
print banner
print "--"*40
if __name__ == '__main__':
subprocess.call('clear',shell=True)
myWeb = Web()
url = "https://github.com/rudolfvavra"
print myWeb.getParticularClass(url)
print myWeb.getHeader(url)
myWeb.getLinks(url)
myWeb.errorHandling(url)
#myWeb.bannerGrabber() # have to run as sudo user
|
Add Web tools: getHeader, getParticularClass, getLinks, errorHandling, bannerGrapperimport urllib, subprocess, random, re
from bs4 import BeautifulSoup # http://www.crummy.com/software/BeautifulSoup/bs4/doc/#
import socket, struct, binascii
class Web():
def getHeader(self, url):
"""Get Header Info"""
http_r = urllib.urlopen(url)
if http_r.code == 200:
return http_r.headers
else:
return "No Header"
def getBsObject(self, url):
ht= urllib.urlopen(url)
html_page = ht.read()
bs_object = BeautifulSoup(html_page)
return bs_object
def getParticularClass(self,url):
bs_object = self.getBsObject(url)
"""Get All Repository from the Github"""
repos = bs_object.findAll('span', {'class':"repo"})
return repos
def getLinks(self, url):
bs_object = self.getBsObject(url)
print bs_object.title.text
for link in bs_object.find_all('a'):
print(link.get('href'))
def errorHandling(self, url):
u = chr(random.randint(97,122))
url2 = url+u
http_r = urllib.urlopen(url2)
content= http_r.read()
flag =0
i=0
list1 = []
a_tag = "<*address>"
file_text = open("web_error_handling_result.txt",'a')
while flag ==0:
if http_r.code == 404:
file_text.write("--------------")
file_text.write(url)
file_text.write("--------------\n")
file_text.write(content)
for match in re.finditer(a_tag,content):
i=i+1
s= match.start()
e= match.end()
list1.append(s)
list1.append(e)
if (i>0):
print "Coding is not good"
if len(list1)>0:
a= list1[1]
b= list1[2]
print content[a:b]
elif http_r.code == 200:
print "Web page is using custom Error Page"
break
else:
print "Error handling seems to be OK."
flag =1
def bannerGrabber(self):
"""Banner grabbing or OS fingerprinting is a method to determine the operating system that is running on a target web server"""
s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0800))
while True:
pkt = s.recvfrom(2048)
banner = pkt[0][54:533]
print banner
print "--"*40
if __name__ == '__main__':
subprocess.call('clear',shell=True)
myWeb = Web()
url = "https://github.com/rudolfvavra"
print myWeb.getParticularClass(url)
print myWeb.getHeader(url)
myWeb.getLinks(url)
myWeb.errorHandling(url)
#myWeb.bannerGrabber() # have to run as sudo user
|
<commit_before><commit_msg>Add Web tools: getHeader, getParticularClass, getLinks, errorHandling, bannerGrapper<commit_after>import urllib, subprocess, random, re
from bs4 import BeautifulSoup # http://www.crummy.com/software/BeautifulSoup/bs4/doc/#
import socket, struct, binascii
class Web():
def getHeader(self, url):
"""Get Header Info"""
http_r = urllib.urlopen(url)
if http_r.code == 200:
return http_r.headers
else:
return "No Header"
def getBsObject(self, url):
ht= urllib.urlopen(url)
html_page = ht.read()
bs_object = BeautifulSoup(html_page)
return bs_object
def getParticularClass(self,url):
bs_object = self.getBsObject(url)
"""Get All Repository from the Github"""
repos = bs_object.findAll('span', {'class':"repo"})
return repos
def getLinks(self, url):
bs_object = self.getBsObject(url)
print bs_object.title.text
for link in bs_object.find_all('a'):
print(link.get('href'))
def errorHandling(self, url):
u = chr(random.randint(97,122))
url2 = url+u
http_r = urllib.urlopen(url2)
content= http_r.read()
flag =0
i=0
list1 = []
a_tag = "<*address>"
file_text = open("web_error_handling_result.txt",'a')
while flag ==0:
if http_r.code == 404:
file_text.write("--------------")
file_text.write(url)
file_text.write("--------------\n")
file_text.write(content)
for match in re.finditer(a_tag,content):
i=i+1
s= match.start()
e= match.end()
list1.append(s)
list1.append(e)
if (i>0):
print "Coding is not good"
if len(list1)>0:
a= list1[1]
b= list1[2]
print content[a:b]
elif http_r.code == 200:
print "Web page is using custom Error Page"
break
else:
print "Error handling seems to be OK."
flag =1
def bannerGrabber(self):
"""Banner grabbing or OS fingerprinting is a method to determine the operating system that is running on a target web server"""
s = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, socket.ntohs(0x0800))
while True:
pkt = s.recvfrom(2048)
banner = pkt[0][54:533]
print banner
print "--"*40
if __name__ == '__main__':
subprocess.call('clear',shell=True)
myWeb = Web()
url = "https://github.com/rudolfvavra"
print myWeb.getParticularClass(url)
print myWeb.getHeader(url)
myWeb.getLinks(url)
myWeb.errorHandling(url)
#myWeb.bannerGrabber() # have to run as sudo user
|
|
b264313d48a66b847a1cfa6459745f2d35e10cee
|
tests/conftest.py
|
tests/conftest.py
|
import os
import subprocess
import sys
def _is_pip_installed():
try:
import pip # NOQA
return True
except ImportError:
return False
def _is_in_ci():
ci_name = os.environ.get('CUPY_CI', '')
return ci_name != ''
def pytest_configure(config):
# Print installed packages
if _is_in_ci() and _is_pip_installed():
print("***** Installed packages *****", flush=True)
subprocess.check_call([sys.executable, '-m', 'pip', 'freeze', '--all'])
|
Print installed packages in pytest
|
Print installed packages in pytest
|
Python
|
mit
|
cupy/cupy,cupy/cupy,cupy/cupy,cupy/cupy
|
Print installed packages in pytest
|
import os
import subprocess
import sys
def _is_pip_installed():
try:
import pip # NOQA
return True
except ImportError:
return False
def _is_in_ci():
ci_name = os.environ.get('CUPY_CI', '')
return ci_name != ''
def pytest_configure(config):
# Print installed packages
if _is_in_ci() and _is_pip_installed():
print("***** Installed packages *****", flush=True)
subprocess.check_call([sys.executable, '-m', 'pip', 'freeze', '--all'])
|
<commit_before><commit_msg>Print installed packages in pytest<commit_after>
|
import os
import subprocess
import sys
def _is_pip_installed():
try:
import pip # NOQA
return True
except ImportError:
return False
def _is_in_ci():
ci_name = os.environ.get('CUPY_CI', '')
return ci_name != ''
def pytest_configure(config):
# Print installed packages
if _is_in_ci() and _is_pip_installed():
print("***** Installed packages *****", flush=True)
subprocess.check_call([sys.executable, '-m', 'pip', 'freeze', '--all'])
|
Print installed packages in pytestimport os
import subprocess
import sys
def _is_pip_installed():
try:
import pip # NOQA
return True
except ImportError:
return False
def _is_in_ci():
ci_name = os.environ.get('CUPY_CI', '')
return ci_name != ''
def pytest_configure(config):
# Print installed packages
if _is_in_ci() and _is_pip_installed():
print("***** Installed packages *****", flush=True)
subprocess.check_call([sys.executable, '-m', 'pip', 'freeze', '--all'])
|
<commit_before><commit_msg>Print installed packages in pytest<commit_after>import os
import subprocess
import sys
def _is_pip_installed():
try:
import pip # NOQA
return True
except ImportError:
return False
def _is_in_ci():
ci_name = os.environ.get('CUPY_CI', '')
return ci_name != ''
def pytest_configure(config):
# Print installed packages
if _is_in_ci() and _is_pip_installed():
print("***** Installed packages *****", flush=True)
subprocess.check_call([sys.executable, '-m', 'pip', 'freeze', '--all'])
|
|
c31eae3d02889a5517782bcaf20406c72bc0de14
|
actions/cloudbolt_plugins/aws/poll_for_init_complete.py
|
actions/cloudbolt_plugins/aws/poll_for_init_complete.py
|
import sys
import time
from infrastructure.models import Server
from jobs.models import Job
TIMEOUT = 600
def is_reachable(server):
"""
:type server: Server
"""
instance_id = server.ec2serverinfo.instance_id
ec2_region = server.ec2serverinfo.ec2_region
rh = server.resource_handler.cast()
rh.connect_ec2(ec2_region)
wc = rh.resource_technology.work_class
instance = wc.get_instance(instance_id)
conn = instance.connection
status = conn.get_all_instance_status(instance_id)
return True if status[0].instance_status.details[u'reachability'] == u'passed' else False
def run(job, logger=None):
assert isinstance(job, Job)
assert job.type == u'provision'
server = job.server_set.first()
timeout = time.time() + TIMEOUT
while True:
if is_reachable(server):
job.set_progress("EC2 instance is reachable.")
break
elif time.time() > timeout:
job.set_progress("Waited {} seconds. Continuing...".format(TIMEOUT))
break
else:
time.sleep(2)
return "", "", ""
if __name__ == '__main__':
if len(sys.argv) != 2:
print ' Usage: {} <job_id>'.format(sys.argv[0])
sys.exit(1)
print run(Job.objects.get(id=sys.argv[1]))
|
Add AWS poll for init complete plug-in.
|
Add AWS poll for init complete plug-in.
|
Python
|
apache-2.0
|
CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge,CloudBoltSoftware/cloudbolt-forge
|
Add AWS poll for init complete plug-in.
|
import sys
import time
from infrastructure.models import Server
from jobs.models import Job
TIMEOUT = 600
def is_reachable(server):
"""
:type server: Server
"""
instance_id = server.ec2serverinfo.instance_id
ec2_region = server.ec2serverinfo.ec2_region
rh = server.resource_handler.cast()
rh.connect_ec2(ec2_region)
wc = rh.resource_technology.work_class
instance = wc.get_instance(instance_id)
conn = instance.connection
status = conn.get_all_instance_status(instance_id)
return True if status[0].instance_status.details[u'reachability'] == u'passed' else False
def run(job, logger=None):
assert isinstance(job, Job)
assert job.type == u'provision'
server = job.server_set.first()
timeout = time.time() + TIMEOUT
while True:
if is_reachable(server):
job.set_progress("EC2 instance is reachable.")
break
elif time.time() > timeout:
job.set_progress("Waited {} seconds. Continuing...".format(TIMEOUT))
break
else:
time.sleep(2)
return "", "", ""
if __name__ == '__main__':
if len(sys.argv) != 2:
print ' Usage: {} <job_id>'.format(sys.argv[0])
sys.exit(1)
print run(Job.objects.get(id=sys.argv[1]))
|
<commit_before><commit_msg>Add AWS poll for init complete plug-in.<commit_after>
|
import sys
import time
from infrastructure.models import Server
from jobs.models import Job
TIMEOUT = 600
def is_reachable(server):
"""
:type server: Server
"""
instance_id = server.ec2serverinfo.instance_id
ec2_region = server.ec2serverinfo.ec2_region
rh = server.resource_handler.cast()
rh.connect_ec2(ec2_region)
wc = rh.resource_technology.work_class
instance = wc.get_instance(instance_id)
conn = instance.connection
status = conn.get_all_instance_status(instance_id)
return True if status[0].instance_status.details[u'reachability'] == u'passed' else False
def run(job, logger=None):
assert isinstance(job, Job)
assert job.type == u'provision'
server = job.server_set.first()
timeout = time.time() + TIMEOUT
while True:
if is_reachable(server):
job.set_progress("EC2 instance is reachable.")
break
elif time.time() > timeout:
job.set_progress("Waited {} seconds. Continuing...".format(TIMEOUT))
break
else:
time.sleep(2)
return "", "", ""
if __name__ == '__main__':
if len(sys.argv) != 2:
print ' Usage: {} <job_id>'.format(sys.argv[0])
sys.exit(1)
print run(Job.objects.get(id=sys.argv[1]))
|
Add AWS poll for init complete plug-in.import sys
import time
from infrastructure.models import Server
from jobs.models import Job
TIMEOUT = 600
def is_reachable(server):
"""
:type server: Server
"""
instance_id = server.ec2serverinfo.instance_id
ec2_region = server.ec2serverinfo.ec2_region
rh = server.resource_handler.cast()
rh.connect_ec2(ec2_region)
wc = rh.resource_technology.work_class
instance = wc.get_instance(instance_id)
conn = instance.connection
status = conn.get_all_instance_status(instance_id)
return True if status[0].instance_status.details[u'reachability'] == u'passed' else False
def run(job, logger=None):
assert isinstance(job, Job)
assert job.type == u'provision'
server = job.server_set.first()
timeout = time.time() + TIMEOUT
while True:
if is_reachable(server):
job.set_progress("EC2 instance is reachable.")
break
elif time.time() > timeout:
job.set_progress("Waited {} seconds. Continuing...".format(TIMEOUT))
break
else:
time.sleep(2)
return "", "", ""
if __name__ == '__main__':
if len(sys.argv) != 2:
print ' Usage: {} <job_id>'.format(sys.argv[0])
sys.exit(1)
print run(Job.objects.get(id=sys.argv[1]))
|
<commit_before><commit_msg>Add AWS poll for init complete plug-in.<commit_after>import sys
import time
from infrastructure.models import Server
from jobs.models import Job
TIMEOUT = 600
def is_reachable(server):
"""
:type server: Server
"""
instance_id = server.ec2serverinfo.instance_id
ec2_region = server.ec2serverinfo.ec2_region
rh = server.resource_handler.cast()
rh.connect_ec2(ec2_region)
wc = rh.resource_technology.work_class
instance = wc.get_instance(instance_id)
conn = instance.connection
status = conn.get_all_instance_status(instance_id)
return True if status[0].instance_status.details[u'reachability'] == u'passed' else False
def run(job, logger=None):
assert isinstance(job, Job)
assert job.type == u'provision'
server = job.server_set.first()
timeout = time.time() + TIMEOUT
while True:
if is_reachable(server):
job.set_progress("EC2 instance is reachable.")
break
elif time.time() > timeout:
job.set_progress("Waited {} seconds. Continuing...".format(TIMEOUT))
break
else:
time.sleep(2)
return "", "", ""
if __name__ == '__main__':
if len(sys.argv) != 2:
print ' Usage: {} <job_id>'.format(sys.argv[0])
sys.exit(1)
print run(Job.objects.get(id=sys.argv[1]))
|
|
ee91d78412ce57d35fbba3bbe13f646c90756027
|
addons/stock_account/migrations/8.0.1.1/post-migrate.py
|
addons/stock_account/migrations/8.0.1.1/post-migrate.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.openupgrade import openupgrade
from openerp import pooler, SUPERUSER_ID
def create_properties(cr, pool):
""" Fields moved to properties (cost_method).
Write using the ORM so the cost_method will be written as properties.
"""
template_obj = pool['product.template']
company_obj = pool['res.company']
company_ids = company_obj.search(cr, SUPERUSER_ID, [])
sql = ("SELECT id, %s FROM product_template" %
openupgrade.get_legacy_name('cost_method'))
cr.execute(sql)
for template_id, cost_method in cr.fetchall():
for company_id in company_ids:
ctx = {'force_company': company_id}
template_obj.write(cr, SUPERUSER_ID, [template_id],
{'cost_method': cost_method},
context=ctx)
@openupgrade.migrate()
def migrate(cr, version):
pool = pooler.get_pool(cr.dbname)
create_properties(cr, pool)
|
Migrate product_template.cost_method to property (in stock_account)
|
Migrate product_template.cost_method to property (in stock_account)
|
Python
|
agpl-3.0
|
0k/OpenUpgrade,grap/OpenUpgrade,blaggacao/OpenUpgrade,csrocha/OpenUpgrade,0k/OpenUpgrade,grap/OpenUpgrade,damdam-s/OpenUpgrade,csrocha/OpenUpgrade,csrocha/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,Endika/OpenUpgrade,kirca/OpenUpgrade,OpenUpgrade/OpenUpgrade,hifly/OpenUpgrade,hifly/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,pedrobaeza/OpenUpgrade,hifly/OpenUpgrade,bwrsandman/OpenUpgrade,0k/OpenUpgrade,kirca/OpenUpgrade,OpenUpgrade/OpenUpgrade,OpenUpgrade/OpenUpgrade,hifly/OpenUpgrade,kirca/OpenUpgrade,pedrobaeza/OpenUpgrade,hifly/OpenUpgrade,blaggacao/OpenUpgrade,sebalix/OpenUpgrade,OpenUpgrade/OpenUpgrade,florentx/OpenUpgrade,mvaled/OpenUpgrade,kirca/OpenUpgrade,mvaled/OpenUpgrade,mvaled/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,csrocha/OpenUpgrade,grap/OpenUpgrade,hifly/OpenUpgrade,bwrsandman/OpenUpgrade,Endika/OpenUpgrade,blaggacao/OpenUpgrade,OpenUpgrade/OpenUpgrade,Endika/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,bwrsandman/OpenUpgrade,blaggacao/OpenUpgrade,blaggacao/OpenUpgrade,blaggacao/OpenUpgrade,csrocha/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,kirca/OpenUpgrade,mvaled/OpenUpgrade,damdam-s/OpenUpgrade,OpenUpgrade/OpenUpgrade,damdam-s/OpenUpgrade,bwrsandman/OpenUpgrade,pedrobaeza/OpenUpgrade,florentx/OpenUpgrade,damdam-s/OpenUpgrade,bwrsandman/OpenUpgrade,0k/OpenUpgrade,sebalix/OpenUpgrade,bwrsandman/OpenUpgrade,pedrobaeza/OpenUpgrade,grap/OpenUpgrade,sebalix/OpenUpgrade,mvaled/OpenUpgrade,grap/OpenUpgrade,pedrobaeza/OpenUpgrade,sebalix/OpenUpgrade,pedrobaeza/OpenUpgrade,grap/OpenUpgrade,Endika/OpenUpgrade,sebalix/OpenUpgrade,0k/OpenUpgrade,Endika/OpenUpgrade,OpenUpgrade/OpenUpgrade,bwrsandman/OpenUpgrade,mvaled/OpenUpgrade,sebalix/OpenUpgrade,csrocha/OpenUpgrade,mvaled/OpenUpgrade,damdam-s/OpenUpgrade,damdam-s/OpenUpgrade,pedrobaeza/OpenUpgrade,kirca/OpenUpgrade,sebalix/OpenUpgrade,grap/OpenUpgrade,florentx/OpenUpgrade,Endika/OpenUpgrade,csrocha/OpenUpgrade,florentx/OpenUpgrade,hifly/OpenUpgrade,damdam-s/OpenUpgrade,0k/OpenUpgrade,florentx/OpenUpgrade,blaggacao/OpenUpgrade,kirca/OpenUpgrade,Endika/OpenUpgrade,florentx/OpenUpgrade
|
Migrate product_template.cost_method to property (in stock_account)
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.openupgrade import openupgrade
from openerp import pooler, SUPERUSER_ID
def create_properties(cr, pool):
""" Fields moved to properties (cost_method).
Write using the ORM so the cost_method will be written as properties.
"""
template_obj = pool['product.template']
company_obj = pool['res.company']
company_ids = company_obj.search(cr, SUPERUSER_ID, [])
sql = ("SELECT id, %s FROM product_template" %
openupgrade.get_legacy_name('cost_method'))
cr.execute(sql)
for template_id, cost_method in cr.fetchall():
for company_id in company_ids:
ctx = {'force_company': company_id}
template_obj.write(cr, SUPERUSER_ID, [template_id],
{'cost_method': cost_method},
context=ctx)
@openupgrade.migrate()
def migrate(cr, version):
pool = pooler.get_pool(cr.dbname)
create_properties(cr, pool)
|
<commit_before><commit_msg>Migrate product_template.cost_method to property (in stock_account)<commit_after>
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.openupgrade import openupgrade
from openerp import pooler, SUPERUSER_ID
def create_properties(cr, pool):
""" Fields moved to properties (cost_method).
Write using the ORM so the cost_method will be written as properties.
"""
template_obj = pool['product.template']
company_obj = pool['res.company']
company_ids = company_obj.search(cr, SUPERUSER_ID, [])
sql = ("SELECT id, %s FROM product_template" %
openupgrade.get_legacy_name('cost_method'))
cr.execute(sql)
for template_id, cost_method in cr.fetchall():
for company_id in company_ids:
ctx = {'force_company': company_id}
template_obj.write(cr, SUPERUSER_ID, [template_id],
{'cost_method': cost_method},
context=ctx)
@openupgrade.migrate()
def migrate(cr, version):
pool = pooler.get_pool(cr.dbname)
create_properties(cr, pool)
|
Migrate product_template.cost_method to property (in stock_account)# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.openupgrade import openupgrade
from openerp import pooler, SUPERUSER_ID
def create_properties(cr, pool):
""" Fields moved to properties (cost_method).
Write using the ORM so the cost_method will be written as properties.
"""
template_obj = pool['product.template']
company_obj = pool['res.company']
company_ids = company_obj.search(cr, SUPERUSER_ID, [])
sql = ("SELECT id, %s FROM product_template" %
openupgrade.get_legacy_name('cost_method'))
cr.execute(sql)
for template_id, cost_method in cr.fetchall():
for company_id in company_ids:
ctx = {'force_company': company_id}
template_obj.write(cr, SUPERUSER_ID, [template_id],
{'cost_method': cost_method},
context=ctx)
@openupgrade.migrate()
def migrate(cr, version):
pool = pooler.get_pool(cr.dbname)
create_properties(cr, pool)
|
<commit_before><commit_msg>Migrate product_template.cost_method to property (in stock_account)<commit_after># -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.openupgrade import openupgrade
from openerp import pooler, SUPERUSER_ID
def create_properties(cr, pool):
""" Fields moved to properties (cost_method).
Write using the ORM so the cost_method will be written as properties.
"""
template_obj = pool['product.template']
company_obj = pool['res.company']
company_ids = company_obj.search(cr, SUPERUSER_ID, [])
sql = ("SELECT id, %s FROM product_template" %
openupgrade.get_legacy_name('cost_method'))
cr.execute(sql)
for template_id, cost_method in cr.fetchall():
for company_id in company_ids:
ctx = {'force_company': company_id}
template_obj.write(cr, SUPERUSER_ID, [template_id],
{'cost_method': cost_method},
context=ctx)
@openupgrade.migrate()
def migrate(cr, version):
pool = pooler.get_pool(cr.dbname)
create_properties(cr, pool)
|
|
8e75202872dd640c28173e2070b234165774f9db
|
buhmm/misc.py
|
buhmm/misc.py
|
# encoding: utf-8
"""
Miscellaneous functions.
"""
from __future__ import division
import numpy as np
def logspace_int(limit, num=50):
"""
Returns (approximately) logarithmically spaced integers from 0 to `limit`.
This is often more appropriate than calling `np.logspace(...).astype(int)`,
or something similar, as those approaches will contain duplicate integers.
One common use case is to generate logarithmically spaced indices.
Parameters
----------
limit : int
The maximum possible integer.
num : int, optional
Number of samples to generate. Default is 50.
Returns
-------
samples : NumPy array
The `num` logarithmically spaced integer samples.
References
----------
.. [1] http://stackoverflow.com/a/12421820
"""
if limit <= 0:
raise Exception('`limit` must be greater than zero.')
if num == 0:
return np.array([], dtype=np.uint64)
elif num == 1:
return np.array([0], dtype=np.uint64)
if limit < num:
msg = "Not enough integers between 0 and {0}".format(limit)
raise Exception(msg)
result = [1]
if num > 1:
# Only calculate ratio if we avoid a ZeroDivisionError.
ratio = ( limit / result[-1] ) ** (1 / (num - len(result)))
while len(result) < num:
next_value = result[-1] * ratio
if next_value - result[-1] >= 1:
# Safe zone. next_value will be a different integer
result.append(next_value)
else:
# Problem! Same integer. We need to find next_value by
# artificially incrementing previous value
result.append(result[-1] + 1)
# Recalculate the ratio so that remaining values scale correctly.
ratio = (limit / (result[-1])) ** (1 / (num - len(result)))
# Round and return np.uint64 array
result = np.round(result) - 1
return result.astype(np.int64)
|
Add function to generate logarithmically spaced integers.
|
Add function to generate logarithmically spaced integers.
|
Python
|
mit
|
chebee7i/buhmm
|
Add function to generate logarithmically spaced integers.
|
# encoding: utf-8
"""
Miscellaneous functions.
"""
from __future__ import division
import numpy as np
def logspace_int(limit, num=50):
"""
Returns (approximately) logarithmically spaced integers from 0 to `limit`.
This is often more appropriate than calling `np.logspace(...).astype(int)`,
or something similar, as those approaches will contain duplicate integers.
One common use case is to generate logarithmically spaced indices.
Parameters
----------
limit : int
The maximum possible integer.
num : int, optional
Number of samples to generate. Default is 50.
Returns
-------
samples : NumPy array
The `num` logarithmically spaced integer samples.
References
----------
.. [1] http://stackoverflow.com/a/12421820
"""
if limit <= 0:
raise Exception('`limit` must be greater than zero.')
if num == 0:
return np.array([], dtype=np.uint64)
elif num == 1:
return np.array([0], dtype=np.uint64)
if limit < num:
msg = "Not enough integers between 0 and {0}".format(limit)
raise Exception(msg)
result = [1]
if num > 1:
# Only calculate ratio if we avoid a ZeroDivisionError.
ratio = ( limit / result[-1] ) ** (1 / (num - len(result)))
while len(result) < num:
next_value = result[-1] * ratio
if next_value - result[-1] >= 1:
# Safe zone. next_value will be a different integer
result.append(next_value)
else:
# Problem! Same integer. We need to find next_value by
# artificially incrementing previous value
result.append(result[-1] + 1)
# Recalculate the ratio so that remaining values scale correctly.
ratio = (limit / (result[-1])) ** (1 / (num - len(result)))
# Round and return np.uint64 array
result = np.round(result) - 1
return result.astype(np.int64)
|
<commit_before><commit_msg>Add function to generate logarithmically spaced integers.<commit_after>
|
# encoding: utf-8
"""
Miscellaneous functions.
"""
from __future__ import division
import numpy as np
def logspace_int(limit, num=50):
"""
Returns (approximately) logarithmically spaced integers from 0 to `limit`.
This is often more appropriate than calling `np.logspace(...).astype(int)`,
or something similar, as those approaches will contain duplicate integers.
One common use case is to generate logarithmically spaced indices.
Parameters
----------
limit : int
The maximum possible integer.
num : int, optional
Number of samples to generate. Default is 50.
Returns
-------
samples : NumPy array
The `num` logarithmically spaced integer samples.
References
----------
.. [1] http://stackoverflow.com/a/12421820
"""
if limit <= 0:
raise Exception('`limit` must be greater than zero.')
if num == 0:
return np.array([], dtype=np.uint64)
elif num == 1:
return np.array([0], dtype=np.uint64)
if limit < num:
msg = "Not enough integers between 0 and {0}".format(limit)
raise Exception(msg)
result = [1]
if num > 1:
# Only calculate ratio if we avoid a ZeroDivisionError.
ratio = ( limit / result[-1] ) ** (1 / (num - len(result)))
while len(result) < num:
next_value = result[-1] * ratio
if next_value - result[-1] >= 1:
# Safe zone. next_value will be a different integer
result.append(next_value)
else:
# Problem! Same integer. We need to find next_value by
# artificially incrementing previous value
result.append(result[-1] + 1)
# Recalculate the ratio so that remaining values scale correctly.
ratio = (limit / (result[-1])) ** (1 / (num - len(result)))
# Round and return np.uint64 array
result = np.round(result) - 1
return result.astype(np.int64)
|
Add function to generate logarithmically spaced integers.# encoding: utf-8
"""
Miscellaneous functions.
"""
from __future__ import division
import numpy as np
def logspace_int(limit, num=50):
"""
Returns (approximately) logarithmically spaced integers from 0 to `limit`.
This is often more appropriate than calling `np.logspace(...).astype(int)`,
or something similar, as those approaches will contain duplicate integers.
One common use case is to generate logarithmically spaced indices.
Parameters
----------
limit : int
The maximum possible integer.
num : int, optional
Number of samples to generate. Default is 50.
Returns
-------
samples : NumPy array
The `num` logarithmically spaced integer samples.
References
----------
.. [1] http://stackoverflow.com/a/12421820
"""
if limit <= 0:
raise Exception('`limit` must be greater than zero.')
if num == 0:
return np.array([], dtype=np.uint64)
elif num == 1:
return np.array([0], dtype=np.uint64)
if limit < num:
msg = "Not enough integers between 0 and {0}".format(limit)
raise Exception(msg)
result = [1]
if num > 1:
# Only calculate ratio if we avoid a ZeroDivisionError.
ratio = ( limit / result[-1] ) ** (1 / (num - len(result)))
while len(result) < num:
next_value = result[-1] * ratio
if next_value - result[-1] >= 1:
# Safe zone. next_value will be a different integer
result.append(next_value)
else:
# Problem! Same integer. We need to find next_value by
# artificially incrementing previous value
result.append(result[-1] + 1)
# Recalculate the ratio so that remaining values scale correctly.
ratio = (limit / (result[-1])) ** (1 / (num - len(result)))
# Round and return np.uint64 array
result = np.round(result) - 1
return result.astype(np.int64)
|
<commit_before><commit_msg>Add function to generate logarithmically spaced integers.<commit_after># encoding: utf-8
"""
Miscellaneous functions.
"""
from __future__ import division
import numpy as np
def logspace_int(limit, num=50):
"""
Returns (approximately) logarithmically spaced integers from 0 to `limit`.
This is often more appropriate than calling `np.logspace(...).astype(int)`,
or something similar, as those approaches will contain duplicate integers.
One common use case is to generate logarithmically spaced indices.
Parameters
----------
limit : int
The maximum possible integer.
num : int, optional
Number of samples to generate. Default is 50.
Returns
-------
samples : NumPy array
The `num` logarithmically spaced integer samples.
References
----------
.. [1] http://stackoverflow.com/a/12421820
"""
if limit <= 0:
raise Exception('`limit` must be greater than zero.')
if num == 0:
return np.array([], dtype=np.uint64)
elif num == 1:
return np.array([0], dtype=np.uint64)
if limit < num:
msg = "Not enough integers between 0 and {0}".format(limit)
raise Exception(msg)
result = [1]
if num > 1:
# Only calculate ratio if we avoid a ZeroDivisionError.
ratio = ( limit / result[-1] ) ** (1 / (num - len(result)))
while len(result) < num:
next_value = result[-1] * ratio
if next_value - result[-1] >= 1:
# Safe zone. next_value will be a different integer
result.append(next_value)
else:
# Problem! Same integer. We need to find next_value by
# artificially incrementing previous value
result.append(result[-1] + 1)
# Recalculate the ratio so that remaining values scale correctly.
ratio = (limit / (result[-1])) ** (1 / (num - len(result)))
# Round and return np.uint64 array
result = np.round(result) - 1
return result.astype(np.int64)
|
|
b5db260f9499b7bdd5c13dc0c59c1ad6ad2d37fc
|
datastore/management/commands/dev_seed.py
|
datastore/management/commands/dev_seed.py
|
from django.utils.timezone import now, timedelta
from django.core.management.base import BaseCommand
from datastore.models import *
from django.contrib.auth.models import User
from oauth2_provider.models import AccessToken, get_application_model
from datetime import datetime
ApplicationModel = get_application_model()
class Command(BaseCommand):
help = "Initialize the datastore for development"
def handle(self, *args, **options):
# create a superuser
user = User.objects.create_superuser('demo','demo@example.com','demo-password')
user.save()
app = ApplicationModel.objects.create(
name='app',
client_type=ApplicationModel.CLIENT_CONFIDENTIAL,
authorization_grant_type=ApplicationModel.GRANT_CLIENT_CREDENTIALS,
user=user
)
token = AccessToken.objects.create(
user=user,
token='tokstr',
application=app,
expires=now() + timedelta(days=365),
scope="read write"
)
|
Automate initial seeding of user and oauth for development
|
Automate initial seeding of user and oauth for development
|
Python
|
mit
|
impactlab/oeem-energy-datastore,impactlab/oeem-energy-datastore,impactlab/oeem-energy-datastore
|
Automate initial seeding of user and oauth for development
|
from django.utils.timezone import now, timedelta
from django.core.management.base import BaseCommand
from datastore.models import *
from django.contrib.auth.models import User
from oauth2_provider.models import AccessToken, get_application_model
from datetime import datetime
ApplicationModel = get_application_model()
class Command(BaseCommand):
help = "Initialize the datastore for development"
def handle(self, *args, **options):
# create a superuser
user = User.objects.create_superuser('demo','demo@example.com','demo-password')
user.save()
app = ApplicationModel.objects.create(
name='app',
client_type=ApplicationModel.CLIENT_CONFIDENTIAL,
authorization_grant_type=ApplicationModel.GRANT_CLIENT_CREDENTIALS,
user=user
)
token = AccessToken.objects.create(
user=user,
token='tokstr',
application=app,
expires=now() + timedelta(days=365),
scope="read write"
)
|
<commit_before><commit_msg>Automate initial seeding of user and oauth for development<commit_after>
|
from django.utils.timezone import now, timedelta
from django.core.management.base import BaseCommand
from datastore.models import *
from django.contrib.auth.models import User
from oauth2_provider.models import AccessToken, get_application_model
from datetime import datetime
ApplicationModel = get_application_model()
class Command(BaseCommand):
help = "Initialize the datastore for development"
def handle(self, *args, **options):
# create a superuser
user = User.objects.create_superuser('demo','demo@example.com','demo-password')
user.save()
app = ApplicationModel.objects.create(
name='app',
client_type=ApplicationModel.CLIENT_CONFIDENTIAL,
authorization_grant_type=ApplicationModel.GRANT_CLIENT_CREDENTIALS,
user=user
)
token = AccessToken.objects.create(
user=user,
token='tokstr',
application=app,
expires=now() + timedelta(days=365),
scope="read write"
)
|
Automate initial seeding of user and oauth for developmentfrom django.utils.timezone import now, timedelta
from django.core.management.base import BaseCommand
from datastore.models import *
from django.contrib.auth.models import User
from oauth2_provider.models import AccessToken, get_application_model
from datetime import datetime
ApplicationModel = get_application_model()
class Command(BaseCommand):
help = "Initialize the datastore for development"
def handle(self, *args, **options):
# create a superuser
user = User.objects.create_superuser('demo','demo@example.com','demo-password')
user.save()
app = ApplicationModel.objects.create(
name='app',
client_type=ApplicationModel.CLIENT_CONFIDENTIAL,
authorization_grant_type=ApplicationModel.GRANT_CLIENT_CREDENTIALS,
user=user
)
token = AccessToken.objects.create(
user=user,
token='tokstr',
application=app,
expires=now() + timedelta(days=365),
scope="read write"
)
|
<commit_before><commit_msg>Automate initial seeding of user and oauth for development<commit_after>from django.utils.timezone import now, timedelta
from django.core.management.base import BaseCommand
from datastore.models import *
from django.contrib.auth.models import User
from oauth2_provider.models import AccessToken, get_application_model
from datetime import datetime
ApplicationModel = get_application_model()
class Command(BaseCommand):
help = "Initialize the datastore for development"
def handle(self, *args, **options):
# create a superuser
user = User.objects.create_superuser('demo','demo@example.com','demo-password')
user.save()
app = ApplicationModel.objects.create(
name='app',
client_type=ApplicationModel.CLIENT_CONFIDENTIAL,
authorization_grant_type=ApplicationModel.GRANT_CLIENT_CREDENTIALS,
user=user
)
token = AccessToken.objects.create(
user=user,
token='tokstr',
application=app,
expires=now() + timedelta(days=365),
scope="read write"
)
|
|
88b9ab92c5fe3374dba27c1d8c1d0b0b0e7411b7
|
tests/test_footnotes.py
|
tests/test_footnotes.py
|
from MarkdownEditing.tests import DereferrablePanelTestCase
class TestMdeReferenceNewFootnoteCommand(DereferrablePanelTestCase):
def test_new_footnote_in_first_line(self):
self.setBlockText(
"""
# Test 1
First inline.
Second inline.
Third inline.
# Test 2
paragraph
"""
)
self.setCaretTo(3,6)
self.view.run_command("mde_reference_new_footnote")
self.assertEqualBlockText(
"""
# Test 1
First[^1] inline.
Second inline.
Third inline.
# Test 2
paragraph
[^1]:\x20
"""
)
def test_new_footnote_in_second_line(self):
self.setBlockText(
"""
# Test 1
First[^1] inline.
Second inline.
Third inline.
# Test 2
paragraph
[^1]:
Footnote text
with second line
"""
)
self.setCaretTo(4,7)
self.view.run_command("mde_reference_new_footnote")
self.assertEqualBlockText(
"""
# Test 1
First[^1] inline.
Second[^2] inline.
Third inline.
# Test 2
paragraph
[^1]:
Footnote text
with second line
[^2]:\x20
"""
)
|
Add simple test for `mde_reference_new_footnote` command
|
Tests: Add simple test for `mde_reference_new_footnote` command
Start to ensure not to break existing functions.
|
Python
|
mit
|
SublimeText-Markdown/MarkdownEditing
|
Tests: Add simple test for `mde_reference_new_footnote` command
Start to ensure not to break existing functions.
|
from MarkdownEditing.tests import DereferrablePanelTestCase
class TestMdeReferenceNewFootnoteCommand(DereferrablePanelTestCase):
def test_new_footnote_in_first_line(self):
self.setBlockText(
"""
# Test 1
First inline.
Second inline.
Third inline.
# Test 2
paragraph
"""
)
self.setCaretTo(3,6)
self.view.run_command("mde_reference_new_footnote")
self.assertEqualBlockText(
"""
# Test 1
First[^1] inline.
Second inline.
Third inline.
# Test 2
paragraph
[^1]:\x20
"""
)
def test_new_footnote_in_second_line(self):
self.setBlockText(
"""
# Test 1
First[^1] inline.
Second inline.
Third inline.
# Test 2
paragraph
[^1]:
Footnote text
with second line
"""
)
self.setCaretTo(4,7)
self.view.run_command("mde_reference_new_footnote")
self.assertEqualBlockText(
"""
# Test 1
First[^1] inline.
Second[^2] inline.
Third inline.
# Test 2
paragraph
[^1]:
Footnote text
with second line
[^2]:\x20
"""
)
|
<commit_before><commit_msg>Tests: Add simple test for `mde_reference_new_footnote` command
Start to ensure not to break existing functions.<commit_after>
|
from MarkdownEditing.tests import DereferrablePanelTestCase
class TestMdeReferenceNewFootnoteCommand(DereferrablePanelTestCase):
def test_new_footnote_in_first_line(self):
self.setBlockText(
"""
# Test 1
First inline.
Second inline.
Third inline.
# Test 2
paragraph
"""
)
self.setCaretTo(3,6)
self.view.run_command("mde_reference_new_footnote")
self.assertEqualBlockText(
"""
# Test 1
First[^1] inline.
Second inline.
Third inline.
# Test 2
paragraph
[^1]:\x20
"""
)
def test_new_footnote_in_second_line(self):
self.setBlockText(
"""
# Test 1
First[^1] inline.
Second inline.
Third inline.
# Test 2
paragraph
[^1]:
Footnote text
with second line
"""
)
self.setCaretTo(4,7)
self.view.run_command("mde_reference_new_footnote")
self.assertEqualBlockText(
"""
# Test 1
First[^1] inline.
Second[^2] inline.
Third inline.
# Test 2
paragraph
[^1]:
Footnote text
with second line
[^2]:\x20
"""
)
|
Tests: Add simple test for `mde_reference_new_footnote` command
Start to ensure not to break existing functions.from MarkdownEditing.tests import DereferrablePanelTestCase
class TestMdeReferenceNewFootnoteCommand(DereferrablePanelTestCase):
def test_new_footnote_in_first_line(self):
self.setBlockText(
"""
# Test 1
First inline.
Second inline.
Third inline.
# Test 2
paragraph
"""
)
self.setCaretTo(3,6)
self.view.run_command("mde_reference_new_footnote")
self.assertEqualBlockText(
"""
# Test 1
First[^1] inline.
Second inline.
Third inline.
# Test 2
paragraph
[^1]:\x20
"""
)
def test_new_footnote_in_second_line(self):
self.setBlockText(
"""
# Test 1
First[^1] inline.
Second inline.
Third inline.
# Test 2
paragraph
[^1]:
Footnote text
with second line
"""
)
self.setCaretTo(4,7)
self.view.run_command("mde_reference_new_footnote")
self.assertEqualBlockText(
"""
# Test 1
First[^1] inline.
Second[^2] inline.
Third inline.
# Test 2
paragraph
[^1]:
Footnote text
with second line
[^2]:\x20
"""
)
|
<commit_before><commit_msg>Tests: Add simple test for `mde_reference_new_footnote` command
Start to ensure not to break existing functions.<commit_after>from MarkdownEditing.tests import DereferrablePanelTestCase
class TestMdeReferenceNewFootnoteCommand(DereferrablePanelTestCase):
def test_new_footnote_in_first_line(self):
self.setBlockText(
"""
# Test 1
First inline.
Second inline.
Third inline.
# Test 2
paragraph
"""
)
self.setCaretTo(3,6)
self.view.run_command("mde_reference_new_footnote")
self.assertEqualBlockText(
"""
# Test 1
First[^1] inline.
Second inline.
Third inline.
# Test 2
paragraph
[^1]:\x20
"""
)
def test_new_footnote_in_second_line(self):
self.setBlockText(
"""
# Test 1
First[^1] inline.
Second inline.
Third inline.
# Test 2
paragraph
[^1]:
Footnote text
with second line
"""
)
self.setCaretTo(4,7)
self.view.run_command("mde_reference_new_footnote")
self.assertEqualBlockText(
"""
# Test 1
First[^1] inline.
Second[^2] inline.
Third inline.
# Test 2
paragraph
[^1]:
Footnote text
with second line
[^2]:\x20
"""
)
|
|
74b31ba7fec330ec167c2e001f60695272da71b8
|
pages/views.py
|
pages/views.py
|
from django.views import generic
from django.contrib.auth.models import Group
from django_countries.fields import Country
from hosting.models import Profile, Place
from hosting.utils import sort_by_name
class AboutView(generic.TemplateView):
template_name = 'pages/about.html'
about = AboutView.as_view()
class TermsAndConditionsView(generic.TemplateView):
template_name = 'pages/terms_conditions.html'
terms_conditions = TermsAndConditionsView.as_view()
class SupervisorsView(generic.TemplateView):
template_name = 'pages/supervisors.html'
def countries(self):
places = Place.objects.filter(in_book=True)
groups = Group.objects.exclude(user=None)
countries = sort_by_name({p.country for p in places})
for country in countries:
try:
group = groups.get(name=str(country))
country.supervisors = sorted(user.profile for user in group.user_set.all())
except Group.DoesNotExist:
pass
country.place_count = places.filter(country=country).count()
return countries
supervisors = SupervisorsView.as_view()
class FaqView(generic.TemplateView):
template_name = 'pages/faq.html'
faq = FaqView.as_view()
|
from django.views import generic
from django.contrib.auth.models import Group
from hosting.models import Place
from hosting.utils import sort_by_name
class AboutView(generic.TemplateView):
template_name = 'pages/about.html'
about = AboutView.as_view()
class TermsAndConditionsView(generic.TemplateView):
template_name = 'pages/terms_conditions.html'
terms_conditions = TermsAndConditionsView.as_view()
class SupervisorsView(generic.TemplateView):
template_name = 'pages/supervisors.html'
def countries(self):
places = Place.available_objects.filter(in_book=True)
groups = Group.objects.exclude(user=None)
countries = sort_by_name({p.country for p in places})
for country in countries:
try:
group = groups.get(name=str(country))
country.supervisors = sorted(user.profile for user in group.user_set.all())
except Group.DoesNotExist:
pass
country.place_count = places.filter(country=country).count()
return countries
supervisors = SupervisorsView.as_view()
class FaqView(generic.TemplateView):
template_name = 'pages/faq.html'
faq = FaqView.as_view()
|
Fix numbers in LO list.
|
Fix numbers in LO list.
|
Python
|
agpl-3.0
|
batisteo/pasportaservo,tejo-esperanto/pasportaservo,tejo-esperanto/pasportaservo,tejo-esperanto/pasportaservo,tejo-esperanto/pasportaservo,batisteo/pasportaservo,batisteo/pasportaservo,batisteo/pasportaservo
|
from django.views import generic
from django.contrib.auth.models import Group
from django_countries.fields import Country
from hosting.models import Profile, Place
from hosting.utils import sort_by_name
class AboutView(generic.TemplateView):
template_name = 'pages/about.html'
about = AboutView.as_view()
class TermsAndConditionsView(generic.TemplateView):
template_name = 'pages/terms_conditions.html'
terms_conditions = TermsAndConditionsView.as_view()
class SupervisorsView(generic.TemplateView):
template_name = 'pages/supervisors.html'
def countries(self):
places = Place.objects.filter(in_book=True)
groups = Group.objects.exclude(user=None)
countries = sort_by_name({p.country for p in places})
for country in countries:
try:
group = groups.get(name=str(country))
country.supervisors = sorted(user.profile for user in group.user_set.all())
except Group.DoesNotExist:
pass
country.place_count = places.filter(country=country).count()
return countries
supervisors = SupervisorsView.as_view()
class FaqView(generic.TemplateView):
template_name = 'pages/faq.html'
faq = FaqView.as_view()
Fix numbers in LO list.
|
from django.views import generic
from django.contrib.auth.models import Group
from hosting.models import Place
from hosting.utils import sort_by_name
class AboutView(generic.TemplateView):
template_name = 'pages/about.html'
about = AboutView.as_view()
class TermsAndConditionsView(generic.TemplateView):
template_name = 'pages/terms_conditions.html'
terms_conditions = TermsAndConditionsView.as_view()
class SupervisorsView(generic.TemplateView):
template_name = 'pages/supervisors.html'
def countries(self):
places = Place.available_objects.filter(in_book=True)
groups = Group.objects.exclude(user=None)
countries = sort_by_name({p.country for p in places})
for country in countries:
try:
group = groups.get(name=str(country))
country.supervisors = sorted(user.profile for user in group.user_set.all())
except Group.DoesNotExist:
pass
country.place_count = places.filter(country=country).count()
return countries
supervisors = SupervisorsView.as_view()
class FaqView(generic.TemplateView):
template_name = 'pages/faq.html'
faq = FaqView.as_view()
|
<commit_before>from django.views import generic
from django.contrib.auth.models import Group
from django_countries.fields import Country
from hosting.models import Profile, Place
from hosting.utils import sort_by_name
class AboutView(generic.TemplateView):
template_name = 'pages/about.html'
about = AboutView.as_view()
class TermsAndConditionsView(generic.TemplateView):
template_name = 'pages/terms_conditions.html'
terms_conditions = TermsAndConditionsView.as_view()
class SupervisorsView(generic.TemplateView):
template_name = 'pages/supervisors.html'
def countries(self):
places = Place.objects.filter(in_book=True)
groups = Group.objects.exclude(user=None)
countries = sort_by_name({p.country for p in places})
for country in countries:
try:
group = groups.get(name=str(country))
country.supervisors = sorted(user.profile for user in group.user_set.all())
except Group.DoesNotExist:
pass
country.place_count = places.filter(country=country).count()
return countries
supervisors = SupervisorsView.as_view()
class FaqView(generic.TemplateView):
template_name = 'pages/faq.html'
faq = FaqView.as_view()
<commit_msg>Fix numbers in LO list.<commit_after>
|
from django.views import generic
from django.contrib.auth.models import Group
from hosting.models import Place
from hosting.utils import sort_by_name
class AboutView(generic.TemplateView):
template_name = 'pages/about.html'
about = AboutView.as_view()
class TermsAndConditionsView(generic.TemplateView):
template_name = 'pages/terms_conditions.html'
terms_conditions = TermsAndConditionsView.as_view()
class SupervisorsView(generic.TemplateView):
template_name = 'pages/supervisors.html'
def countries(self):
places = Place.available_objects.filter(in_book=True)
groups = Group.objects.exclude(user=None)
countries = sort_by_name({p.country for p in places})
for country in countries:
try:
group = groups.get(name=str(country))
country.supervisors = sorted(user.profile for user in group.user_set.all())
except Group.DoesNotExist:
pass
country.place_count = places.filter(country=country).count()
return countries
supervisors = SupervisorsView.as_view()
class FaqView(generic.TemplateView):
template_name = 'pages/faq.html'
faq = FaqView.as_view()
|
from django.views import generic
from django.contrib.auth.models import Group
from django_countries.fields import Country
from hosting.models import Profile, Place
from hosting.utils import sort_by_name
class AboutView(generic.TemplateView):
template_name = 'pages/about.html'
about = AboutView.as_view()
class TermsAndConditionsView(generic.TemplateView):
template_name = 'pages/terms_conditions.html'
terms_conditions = TermsAndConditionsView.as_view()
class SupervisorsView(generic.TemplateView):
template_name = 'pages/supervisors.html'
def countries(self):
places = Place.objects.filter(in_book=True)
groups = Group.objects.exclude(user=None)
countries = sort_by_name({p.country for p in places})
for country in countries:
try:
group = groups.get(name=str(country))
country.supervisors = sorted(user.profile for user in group.user_set.all())
except Group.DoesNotExist:
pass
country.place_count = places.filter(country=country).count()
return countries
supervisors = SupervisorsView.as_view()
class FaqView(generic.TemplateView):
template_name = 'pages/faq.html'
faq = FaqView.as_view()
Fix numbers in LO list.from django.views import generic
from django.contrib.auth.models import Group
from hosting.models import Place
from hosting.utils import sort_by_name
class AboutView(generic.TemplateView):
template_name = 'pages/about.html'
about = AboutView.as_view()
class TermsAndConditionsView(generic.TemplateView):
template_name = 'pages/terms_conditions.html'
terms_conditions = TermsAndConditionsView.as_view()
class SupervisorsView(generic.TemplateView):
template_name = 'pages/supervisors.html'
def countries(self):
places = Place.available_objects.filter(in_book=True)
groups = Group.objects.exclude(user=None)
countries = sort_by_name({p.country for p in places})
for country in countries:
try:
group = groups.get(name=str(country))
country.supervisors = sorted(user.profile for user in group.user_set.all())
except Group.DoesNotExist:
pass
country.place_count = places.filter(country=country).count()
return countries
supervisors = SupervisorsView.as_view()
class FaqView(generic.TemplateView):
template_name = 'pages/faq.html'
faq = FaqView.as_view()
|
<commit_before>from django.views import generic
from django.contrib.auth.models import Group
from django_countries.fields import Country
from hosting.models import Profile, Place
from hosting.utils import sort_by_name
class AboutView(generic.TemplateView):
template_name = 'pages/about.html'
about = AboutView.as_view()
class TermsAndConditionsView(generic.TemplateView):
template_name = 'pages/terms_conditions.html'
terms_conditions = TermsAndConditionsView.as_view()
class SupervisorsView(generic.TemplateView):
template_name = 'pages/supervisors.html'
def countries(self):
places = Place.objects.filter(in_book=True)
groups = Group.objects.exclude(user=None)
countries = sort_by_name({p.country for p in places})
for country in countries:
try:
group = groups.get(name=str(country))
country.supervisors = sorted(user.profile for user in group.user_set.all())
except Group.DoesNotExist:
pass
country.place_count = places.filter(country=country).count()
return countries
supervisors = SupervisorsView.as_view()
class FaqView(generic.TemplateView):
template_name = 'pages/faq.html'
faq = FaqView.as_view()
<commit_msg>Fix numbers in LO list.<commit_after>from django.views import generic
from django.contrib.auth.models import Group
from hosting.models import Place
from hosting.utils import sort_by_name
class AboutView(generic.TemplateView):
template_name = 'pages/about.html'
about = AboutView.as_view()
class TermsAndConditionsView(generic.TemplateView):
template_name = 'pages/terms_conditions.html'
terms_conditions = TermsAndConditionsView.as_view()
class SupervisorsView(generic.TemplateView):
template_name = 'pages/supervisors.html'
def countries(self):
places = Place.available_objects.filter(in_book=True)
groups = Group.objects.exclude(user=None)
countries = sort_by_name({p.country for p in places})
for country in countries:
try:
group = groups.get(name=str(country))
country.supervisors = sorted(user.profile for user in group.user_set.all())
except Group.DoesNotExist:
pass
country.place_count = places.filter(country=country).count()
return countries
supervisors = SupervisorsView.as_view()
class FaqView(generic.TemplateView):
template_name = 'pages/faq.html'
faq = FaqView.as_view()
|
5c8c8585b19a9031cacad6a21fa4990560933e02
|
commpy/examples/wifi80211_conv_encode_decode.py
|
commpy/examples/wifi80211_conv_encode_decode.py
|
# Authors: CommPy contributors
# License: BSD 3-Clause
import math
import matplotlib.pyplot as plt
import numpy as np
import commpy.channels as chan
# ==================================================================================================
# Complete example using Commpy Wifi 802.11 physical parameters
# ==================================================================================================
from commpy.wifi80211 import Wifi80211
# AWGN channel
channels = chan.SISOFlatChannel(None, (1 + 0j, 0j))
w2 = Wifi80211(mcs=2)
w3 = Wifi80211(mcs=3)
# SNR range to test
SNRs2 = np.arange(0, 6) + 10 * math.log10(w2.get_modem().num_bits_symbol)
SNRs3 = np.arange(0, 6) + 10 * math.log10(w3.get_modem().num_bits_symbol)
BERs_mcs2 = w2.link_performance(channels, SNRs2, 10, 10, 600, stop_on_surpass_error=False)
BERs_mcs3 = w3.link_performance(channels, SNRs3, 10, 10, 600, stop_on_surpass_error=False)
# Test
plt.semilogy(SNRs2, BERs_mcs2, 'o-', SNRs3, BERs_mcs3, 'o-')
plt.grid()
plt.xlabel('Signal to Noise Ration (dB)')
plt.ylabel('Bit Error Rate')
plt.legend(('MCS 2', 'MCS 3'))
plt.show()
|
Add example of usage of the simulation of 802.11
|
Add example of usage of the simulation of 802.11
|
Python
|
bsd-3-clause
|
veeresht/CommPy
|
Add example of usage of the simulation of 802.11
|
# Authors: CommPy contributors
# License: BSD 3-Clause
import math
import matplotlib.pyplot as plt
import numpy as np
import commpy.channels as chan
# ==================================================================================================
# Complete example using Commpy Wifi 802.11 physical parameters
# ==================================================================================================
from commpy.wifi80211 import Wifi80211
# AWGN channel
channels = chan.SISOFlatChannel(None, (1 + 0j, 0j))
w2 = Wifi80211(mcs=2)
w3 = Wifi80211(mcs=3)
# SNR range to test
SNRs2 = np.arange(0, 6) + 10 * math.log10(w2.get_modem().num_bits_symbol)
SNRs3 = np.arange(0, 6) + 10 * math.log10(w3.get_modem().num_bits_symbol)
BERs_mcs2 = w2.link_performance(channels, SNRs2, 10, 10, 600, stop_on_surpass_error=False)
BERs_mcs3 = w3.link_performance(channels, SNRs3, 10, 10, 600, stop_on_surpass_error=False)
# Test
plt.semilogy(SNRs2, BERs_mcs2, 'o-', SNRs3, BERs_mcs3, 'o-')
plt.grid()
plt.xlabel('Signal to Noise Ration (dB)')
plt.ylabel('Bit Error Rate')
plt.legend(('MCS 2', 'MCS 3'))
plt.show()
|
<commit_before><commit_msg>Add example of usage of the simulation of 802.11<commit_after>
|
# Authors: CommPy contributors
# License: BSD 3-Clause
import math
import matplotlib.pyplot as plt
import numpy as np
import commpy.channels as chan
# ==================================================================================================
# Complete example using Commpy Wifi 802.11 physical parameters
# ==================================================================================================
from commpy.wifi80211 import Wifi80211
# AWGN channel
channels = chan.SISOFlatChannel(None, (1 + 0j, 0j))
w2 = Wifi80211(mcs=2)
w3 = Wifi80211(mcs=3)
# SNR range to test
SNRs2 = np.arange(0, 6) + 10 * math.log10(w2.get_modem().num_bits_symbol)
SNRs3 = np.arange(0, 6) + 10 * math.log10(w3.get_modem().num_bits_symbol)
BERs_mcs2 = w2.link_performance(channels, SNRs2, 10, 10, 600, stop_on_surpass_error=False)
BERs_mcs3 = w3.link_performance(channels, SNRs3, 10, 10, 600, stop_on_surpass_error=False)
# Test
plt.semilogy(SNRs2, BERs_mcs2, 'o-', SNRs3, BERs_mcs3, 'o-')
plt.grid()
plt.xlabel('Signal to Noise Ration (dB)')
plt.ylabel('Bit Error Rate')
plt.legend(('MCS 2', 'MCS 3'))
plt.show()
|
Add example of usage of the simulation of 802.11# Authors: CommPy contributors
# License: BSD 3-Clause
import math
import matplotlib.pyplot as plt
import numpy as np
import commpy.channels as chan
# ==================================================================================================
# Complete example using Commpy Wifi 802.11 physical parameters
# ==================================================================================================
from commpy.wifi80211 import Wifi80211
# AWGN channel
channels = chan.SISOFlatChannel(None, (1 + 0j, 0j))
w2 = Wifi80211(mcs=2)
w3 = Wifi80211(mcs=3)
# SNR range to test
SNRs2 = np.arange(0, 6) + 10 * math.log10(w2.get_modem().num_bits_symbol)
SNRs3 = np.arange(0, 6) + 10 * math.log10(w3.get_modem().num_bits_symbol)
BERs_mcs2 = w2.link_performance(channels, SNRs2, 10, 10, 600, stop_on_surpass_error=False)
BERs_mcs3 = w3.link_performance(channels, SNRs3, 10, 10, 600, stop_on_surpass_error=False)
# Test
plt.semilogy(SNRs2, BERs_mcs2, 'o-', SNRs3, BERs_mcs3, 'o-')
plt.grid()
plt.xlabel('Signal to Noise Ration (dB)')
plt.ylabel('Bit Error Rate')
plt.legend(('MCS 2', 'MCS 3'))
plt.show()
|
<commit_before><commit_msg>Add example of usage of the simulation of 802.11<commit_after># Authors: CommPy contributors
# License: BSD 3-Clause
import math
import matplotlib.pyplot as plt
import numpy as np
import commpy.channels as chan
# ==================================================================================================
# Complete example using Commpy Wifi 802.11 physical parameters
# ==================================================================================================
from commpy.wifi80211 import Wifi80211
# AWGN channel
channels = chan.SISOFlatChannel(None, (1 + 0j, 0j))
w2 = Wifi80211(mcs=2)
w3 = Wifi80211(mcs=3)
# SNR range to test
SNRs2 = np.arange(0, 6) + 10 * math.log10(w2.get_modem().num_bits_symbol)
SNRs3 = np.arange(0, 6) + 10 * math.log10(w3.get_modem().num_bits_symbol)
BERs_mcs2 = w2.link_performance(channels, SNRs2, 10, 10, 600, stop_on_surpass_error=False)
BERs_mcs3 = w3.link_performance(channels, SNRs3, 10, 10, 600, stop_on_surpass_error=False)
# Test
plt.semilogy(SNRs2, BERs_mcs2, 'o-', SNRs3, BERs_mcs3, 'o-')
plt.grid()
plt.xlabel('Signal to Noise Ration (dB)')
plt.ylabel('Bit Error Rate')
plt.legend(('MCS 2', 'MCS 3'))
plt.show()
|
|
a75effa9489e41915b624084c61fba144003b448
|
utils/test_h5py.py
|
utils/test_h5py.py
|
"""
Simple test of a parallel build of h5py (from h5py's documentation).
http://h5py.readthedocs.org/en/latest/mpi.html#using-parallel-hdf5-from-h5py
If you've built h5py properly against a parallel build of hdf5, you should be
able to run this code with::
$ mpiexec -n 4 python test_h5py.py
and then check the output with `h5dump`::
$ h5dump parallel_test.hdf5
HDF5 "parallel_test.hdf5" {
GROUP "/" {
DATASET "test" {
DATATYPE H5T_STD_I32LE
DATASPACE SIMPLE { ( 4 ) / ( 4 ) }
DATA {
(0): 0, 1, 2, 3
}
}
}
}
"""
from mpi4py import MPI
import h5py
rank = MPI.COMM_WORLD.rank # The process ID (integer 0-3 for 4-process run)
f = h5py.File('parallel_test.hdf5', 'w', driver='mpio', comm=MPI.COMM_WORLD)
dset = f.create_dataset('test', (4,), dtype='i')
dset[rank] = rank
f.close()
|
Add a simple test of one's h5py installation.
|
Add a simple test of one's h5py installation.
|
Python
|
bsd-3-clause
|
RaoUmer/distarray,enthought/distarray,RaoUmer/distarray,enthought/distarray
|
Add a simple test of one's h5py installation.
|
"""
Simple test of a parallel build of h5py (from h5py's documentation).
http://h5py.readthedocs.org/en/latest/mpi.html#using-parallel-hdf5-from-h5py
If you've built h5py properly against a parallel build of hdf5, you should be
able to run this code with::
$ mpiexec -n 4 python test_h5py.py
and then check the output with `h5dump`::
$ h5dump parallel_test.hdf5
HDF5 "parallel_test.hdf5" {
GROUP "/" {
DATASET "test" {
DATATYPE H5T_STD_I32LE
DATASPACE SIMPLE { ( 4 ) / ( 4 ) }
DATA {
(0): 0, 1, 2, 3
}
}
}
}
"""
from mpi4py import MPI
import h5py
rank = MPI.COMM_WORLD.rank # The process ID (integer 0-3 for 4-process run)
f = h5py.File('parallel_test.hdf5', 'w', driver='mpio', comm=MPI.COMM_WORLD)
dset = f.create_dataset('test', (4,), dtype='i')
dset[rank] = rank
f.close()
|
<commit_before><commit_msg>Add a simple test of one's h5py installation.<commit_after>
|
"""
Simple test of a parallel build of h5py (from h5py's documentation).
http://h5py.readthedocs.org/en/latest/mpi.html#using-parallel-hdf5-from-h5py
If you've built h5py properly against a parallel build of hdf5, you should be
able to run this code with::
$ mpiexec -n 4 python test_h5py.py
and then check the output with `h5dump`::
$ h5dump parallel_test.hdf5
HDF5 "parallel_test.hdf5" {
GROUP "/" {
DATASET "test" {
DATATYPE H5T_STD_I32LE
DATASPACE SIMPLE { ( 4 ) / ( 4 ) }
DATA {
(0): 0, 1, 2, 3
}
}
}
}
"""
from mpi4py import MPI
import h5py
rank = MPI.COMM_WORLD.rank # The process ID (integer 0-3 for 4-process run)
f = h5py.File('parallel_test.hdf5', 'w', driver='mpio', comm=MPI.COMM_WORLD)
dset = f.create_dataset('test', (4,), dtype='i')
dset[rank] = rank
f.close()
|
Add a simple test of one's h5py installation."""
Simple test of a parallel build of h5py (from h5py's documentation).
http://h5py.readthedocs.org/en/latest/mpi.html#using-parallel-hdf5-from-h5py
If you've built h5py properly against a parallel build of hdf5, you should be
able to run this code with::
$ mpiexec -n 4 python test_h5py.py
and then check the output with `h5dump`::
$ h5dump parallel_test.hdf5
HDF5 "parallel_test.hdf5" {
GROUP "/" {
DATASET "test" {
DATATYPE H5T_STD_I32LE
DATASPACE SIMPLE { ( 4 ) / ( 4 ) }
DATA {
(0): 0, 1, 2, 3
}
}
}
}
"""
from mpi4py import MPI
import h5py
rank = MPI.COMM_WORLD.rank # The process ID (integer 0-3 for 4-process run)
f = h5py.File('parallel_test.hdf5', 'w', driver='mpio', comm=MPI.COMM_WORLD)
dset = f.create_dataset('test', (4,), dtype='i')
dset[rank] = rank
f.close()
|
<commit_before><commit_msg>Add a simple test of one's h5py installation.<commit_after>"""
Simple test of a parallel build of h5py (from h5py's documentation).
http://h5py.readthedocs.org/en/latest/mpi.html#using-parallel-hdf5-from-h5py
If you've built h5py properly against a parallel build of hdf5, you should be
able to run this code with::
$ mpiexec -n 4 python test_h5py.py
and then check the output with `h5dump`::
$ h5dump parallel_test.hdf5
HDF5 "parallel_test.hdf5" {
GROUP "/" {
DATASET "test" {
DATATYPE H5T_STD_I32LE
DATASPACE SIMPLE { ( 4 ) / ( 4 ) }
DATA {
(0): 0, 1, 2, 3
}
}
}
}
"""
from mpi4py import MPI
import h5py
rank = MPI.COMM_WORLD.rank # The process ID (integer 0-3 for 4-process run)
f = h5py.File('parallel_test.hdf5', 'w', driver='mpio', comm=MPI.COMM_WORLD)
dset = f.create_dataset('test', (4,), dtype='i')
dset[rank] = rank
f.close()
|
|
736b1453087ebf3f813fcb499ba0fd5ccaad965d
|
login/migrations/0003_auto_20160805_1311.py
|
login/migrations/0003_auto_20160805_1311.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-08-05 11:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('login', '0002_auto_20160228_1805'),
]
operations = [
migrations.RenameModel(
old_name='myuser',
new_name='OepUser',
),
]
|
Add oep user for wiki auth
|
Add oep user for wiki auth
|
Python
|
agpl-3.0
|
openego/oeplatform,openego/oeplatform,tom-heimbrodt/oeplatform,openego/oeplatform,openego/oeplatform,tom-heimbrodt/oeplatform,tom-heimbrodt/oeplatform
|
Add oep user for wiki auth
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-08-05 11:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('login', '0002_auto_20160228_1805'),
]
operations = [
migrations.RenameModel(
old_name='myuser',
new_name='OepUser',
),
]
|
<commit_before><commit_msg>Add oep user for wiki auth<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-08-05 11:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('login', '0002_auto_20160228_1805'),
]
operations = [
migrations.RenameModel(
old_name='myuser',
new_name='OepUser',
),
]
|
Add oep user for wiki auth# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-08-05 11:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('login', '0002_auto_20160228_1805'),
]
operations = [
migrations.RenameModel(
old_name='myuser',
new_name='OepUser',
),
]
|
<commit_before><commit_msg>Add oep user for wiki auth<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-08-05 11:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('login', '0002_auto_20160228_1805'),
]
operations = [
migrations.RenameModel(
old_name='myuser',
new_name='OepUser',
),
]
|
|
ec742a7572f94d8864444379ce91a4de32c45715
|
actionizer.py
|
actionizer.py
|
#! /usr/bin/python
import numpy as np
import os
from sklearn import datasets
MESSAGES_DIR = "data/messages/"
JUDGMENTS_PATH = "data/judgments/judgments.txt"
def load_messages():
messages = []
for filename in os.listdir(MESSAGES_DIR):
with open(MESSAGES_DIR + filename) as message_file:
messages.append(message_file.read())
return messages
def tfidf(documents):
# TODO: Stub implementation
return [[]]
def load_judgments():
judgments = []
with open(JUDGMENTS_PATH) as judgments_file:
for line in judgments_file:
judgments.append(1 if len(line.split()) > 2 else 0)
return judgments
def main():
messages = load_messages()
target = load_judgments()
print target
data = tfidf(messages)
if __name__ == "__main__":
main()
|
Add functions to read in message data and parse targets.
|
Add functions to read in message data and parse targets.
|
Python
|
mit
|
chiubaka/actionizer
|
Add functions to read in message data and parse targets.
|
#! /usr/bin/python
import numpy as np
import os
from sklearn import datasets
MESSAGES_DIR = "data/messages/"
JUDGMENTS_PATH = "data/judgments/judgments.txt"
def load_messages():
messages = []
for filename in os.listdir(MESSAGES_DIR):
with open(MESSAGES_DIR + filename) as message_file:
messages.append(message_file.read())
return messages
def tfidf(documents):
# TODO: Stub implementation
return [[]]
def load_judgments():
judgments = []
with open(JUDGMENTS_PATH) as judgments_file:
for line in judgments_file:
judgments.append(1 if len(line.split()) > 2 else 0)
return judgments
def main():
messages = load_messages()
target = load_judgments()
print target
data = tfidf(messages)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add functions to read in message data and parse targets.<commit_after>
|
#! /usr/bin/python
import numpy as np
import os
from sklearn import datasets
MESSAGES_DIR = "data/messages/"
JUDGMENTS_PATH = "data/judgments/judgments.txt"
def load_messages():
messages = []
for filename in os.listdir(MESSAGES_DIR):
with open(MESSAGES_DIR + filename) as message_file:
messages.append(message_file.read())
return messages
def tfidf(documents):
# TODO: Stub implementation
return [[]]
def load_judgments():
judgments = []
with open(JUDGMENTS_PATH) as judgments_file:
for line in judgments_file:
judgments.append(1 if len(line.split()) > 2 else 0)
return judgments
def main():
messages = load_messages()
target = load_judgments()
print target
data = tfidf(messages)
if __name__ == "__main__":
main()
|
Add functions to read in message data and parse targets.#! /usr/bin/python
import numpy as np
import os
from sklearn import datasets
MESSAGES_DIR = "data/messages/"
JUDGMENTS_PATH = "data/judgments/judgments.txt"
def load_messages():
messages = []
for filename in os.listdir(MESSAGES_DIR):
with open(MESSAGES_DIR + filename) as message_file:
messages.append(message_file.read())
return messages
def tfidf(documents):
# TODO: Stub implementation
return [[]]
def load_judgments():
judgments = []
with open(JUDGMENTS_PATH) as judgments_file:
for line in judgments_file:
judgments.append(1 if len(line.split()) > 2 else 0)
return judgments
def main():
messages = load_messages()
target = load_judgments()
print target
data = tfidf(messages)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add functions to read in message data and parse targets.<commit_after>#! /usr/bin/python
import numpy as np
import os
from sklearn import datasets
MESSAGES_DIR = "data/messages/"
JUDGMENTS_PATH = "data/judgments/judgments.txt"
def load_messages():
messages = []
for filename in os.listdir(MESSAGES_DIR):
with open(MESSAGES_DIR + filename) as message_file:
messages.append(message_file.read())
return messages
def tfidf(documents):
# TODO: Stub implementation
return [[]]
def load_judgments():
judgments = []
with open(JUDGMENTS_PATH) as judgments_file:
for line in judgments_file:
judgments.append(1 if len(line.split()) > 2 else 0)
return judgments
def main():
messages = load_messages()
target = load_judgments()
print target
data = tfidf(messages)
if __name__ == "__main__":
main()
|
|
564613521716161ca09ed127acfec984ca1ab48a
|
apirus_abc.py
|
apirus_abc.py
|
#!/usr/bin/env python
"""
The APIRUS API as an Abstract Base Class
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
class APIRUS:
"""
ABC for the API for Regular, Unstructured and Staggered model output
(APIRUS)
This ABC serves as a way to docuemnt the API, and is designed to be
subclassed by py_ugrid, py_sgrid and any other future implementations.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self):
"""
this one is probably going to be very differenct for each subclass
"""
pass
@abstractmethod
@classmethod
def from_netcdf(cls, filename):
"""
load a data set from a netcdf file
"""
pass
|
Add a start for an ABC for the API
|
Add a start for an ABC for the API
|
Python
|
cc0-1.0
|
ocefpaf/APIRUS,ioos/APIRUS
|
Add a start for an ABC for the API
|
#!/usr/bin/env python
"""
The APIRUS API as an Abstract Base Class
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
class APIRUS:
"""
ABC for the API for Regular, Unstructured and Staggered model output
(APIRUS)
This ABC serves as a way to docuemnt the API, and is designed to be
subclassed by py_ugrid, py_sgrid and any other future implementations.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self):
"""
this one is probably going to be very differenct for each subclass
"""
pass
@abstractmethod
@classmethod
def from_netcdf(cls, filename):
"""
load a data set from a netcdf file
"""
pass
|
<commit_before><commit_msg>Add a start for an ABC for the API<commit_after>
|
#!/usr/bin/env python
"""
The APIRUS API as an Abstract Base Class
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
class APIRUS:
"""
ABC for the API for Regular, Unstructured and Staggered model output
(APIRUS)
This ABC serves as a way to docuemnt the API, and is designed to be
subclassed by py_ugrid, py_sgrid and any other future implementations.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self):
"""
this one is probably going to be very differenct for each subclass
"""
pass
@abstractmethod
@classmethod
def from_netcdf(cls, filename):
"""
load a data set from a netcdf file
"""
pass
|
Add a start for an ABC for the API#!/usr/bin/env python
"""
The APIRUS API as an Abstract Base Class
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
class APIRUS:
"""
ABC for the API for Regular, Unstructured and Staggered model output
(APIRUS)
This ABC serves as a way to docuemnt the API, and is designed to be
subclassed by py_ugrid, py_sgrid and any other future implementations.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self):
"""
this one is probably going to be very differenct for each subclass
"""
pass
@abstractmethod
@classmethod
def from_netcdf(cls, filename):
"""
load a data set from a netcdf file
"""
pass
|
<commit_before><commit_msg>Add a start for an ABC for the API<commit_after>#!/usr/bin/env python
"""
The APIRUS API as an Abstract Base Class
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
class APIRUS:
"""
ABC for the API for Regular, Unstructured and Staggered model output
(APIRUS)
This ABC serves as a way to docuemnt the API, and is designed to be
subclassed by py_ugrid, py_sgrid and any other future implementations.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(self):
"""
this one is probably going to be very differenct for each subclass
"""
pass
@abstractmethod
@classmethod
def from_netcdf(cls, filename):
"""
load a data set from a netcdf file
"""
pass
|
|
1f947d6cfe05f32ffa6566714e5aeeb74543a932
|
osf/migrations/0095_migration_comments.py
|
osf/migrations/0095_migration_comments.py
|
from __future__ import unicode_literals
from django.db import migrations
import logging
logger = logging.getLogger(__file__)
def update_comment_root_target(state, *args, **kwargs):
Comment = state.get_model('osf', 'comment')
comments = Comment.objects.filter(is_deleted=False)
count = 0
for comment in comments:
if comment.root_target:
if hasattr(comment.root_target.referent, 'is_deleted') and comment.root_target.referent.is_deleted:
comment.root_target = None
comment.save()
count += 1
if hasattr(comment.root_target.referent, 'deleted') and comment.root_target.referent.deleted:
comment.root_target = None
comment.save()
count += 1
logger.info('Total download number of commnet migrated is {}.'.format(count))
class Migration(migrations.Migration):
dependencies = [
('osf', '0094_update_preprintprovider_group_auth'),
]
operations = [
migrations.RunPython(update_comment_root_target)
]
|
from __future__ import unicode_literals
import logging
from django.db import migrations
from django_bulk_update.helper import bulk_update
logger = logging.getLogger(__file__)
def update_comment_root_target(state, *args, **kwargs):
Comment = state.get_model('osf', 'comment')
comments = Comment.objects.exclude(is_deleted=True).select_related('root_target')
logger.info('{} comments to check'.format(comments.count()))
comments_to_update = []
for comment in comments:
if comment.root_target:
root_target_ctype = comment.root_target.content_type
root_target_model_cls = state.get_model(root_target_ctype.app_label, root_target_ctype.model)
root_target = root_target_model_cls.objects.get(pk=comment.root_target.object_id)
if hasattr(root_target, 'is_deleted') and root_target.is_deleted:
logger.info('{} is deleted. Setting Comment {} root_target to None'.format(root_target, comment.pk))
comment.root_target = None
comments_to_update.append(comment)
if hasattr(root_target, 'deleted') and root_target.deleted:
logger.info('{} is deleted. Setting Comment {} root_target to None'.format(root_target, comment.pk))
comment.root_target = None
comments_to_update.append(comment)
bulk_update(comments_to_update, update_fields=['root_target'])
logger.info('Total comments migrated: {}.'.format(len(comments_to_update)))
class Migration(migrations.Migration):
dependencies = [
('osf', '0094_update_preprintprovider_group_auth'),
]
operations = [
migrations.RunPython(update_comment_root_target)
]
|
Fix loading root_target in migration
|
Fix loading root_target in migration
Migrations don't allow accessing generic foreign keys, so we
need to load root_target manually.
Also:
* add more logging
* Bulk-update comments
|
Python
|
apache-2.0
|
caseyrollins/osf.io,pattisdr/osf.io,HalcyonChimera/osf.io,erinspace/osf.io,mfraezz/osf.io,mfraezz/osf.io,felliott/osf.io,sloria/osf.io,aaxelb/osf.io,icereval/osf.io,felliott/osf.io,mattclark/osf.io,baylee-d/osf.io,Johnetordoff/osf.io,erinspace/osf.io,pattisdr/osf.io,sloria/osf.io,Johnetordoff/osf.io,mfraezz/osf.io,icereval/osf.io,CenterForOpenScience/osf.io,mattclark/osf.io,brianjgeiger/osf.io,sloria/osf.io,CenterForOpenScience/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,icereval/osf.io,cslzchen/osf.io,adlius/osf.io,CenterForOpenScience/osf.io,caseyrollins/osf.io,adlius/osf.io,caseyrollins/osf.io,HalcyonChimera/osf.io,cslzchen/osf.io,mattclark/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,felliott/osf.io,adlius/osf.io,adlius/osf.io,baylee-d/osf.io,cslzchen/osf.io,aaxelb/osf.io,baylee-d/osf.io,Johnetordoff/osf.io,saradbowman/osf.io,saradbowman/osf.io,aaxelb/osf.io,CenterForOpenScience/osf.io,aaxelb/osf.io,erinspace/osf.io,brianjgeiger/osf.io,pattisdr/osf.io,cslzchen/osf.io,mfraezz/osf.io,felliott/osf.io
|
from __future__ import unicode_literals
from django.db import migrations
import logging
logger = logging.getLogger(__file__)
def update_comment_root_target(state, *args, **kwargs):
Comment = state.get_model('osf', 'comment')
comments = Comment.objects.filter(is_deleted=False)
count = 0
for comment in comments:
if comment.root_target:
if hasattr(comment.root_target.referent, 'is_deleted') and comment.root_target.referent.is_deleted:
comment.root_target = None
comment.save()
count += 1
if hasattr(comment.root_target.referent, 'deleted') and comment.root_target.referent.deleted:
comment.root_target = None
comment.save()
count += 1
logger.info('Total download number of commnet migrated is {}.'.format(count))
class Migration(migrations.Migration):
dependencies = [
('osf', '0094_update_preprintprovider_group_auth'),
]
operations = [
migrations.RunPython(update_comment_root_target)
]Fix loading root_target in migration
Migrations don't allow accessing generic foreign keys, so we
need to load root_target manually.
Also:
* add more logging
* Bulk-update comments
|
from __future__ import unicode_literals
import logging
from django.db import migrations
from django_bulk_update.helper import bulk_update
logger = logging.getLogger(__file__)
def update_comment_root_target(state, *args, **kwargs):
Comment = state.get_model('osf', 'comment')
comments = Comment.objects.exclude(is_deleted=True).select_related('root_target')
logger.info('{} comments to check'.format(comments.count()))
comments_to_update = []
for comment in comments:
if comment.root_target:
root_target_ctype = comment.root_target.content_type
root_target_model_cls = state.get_model(root_target_ctype.app_label, root_target_ctype.model)
root_target = root_target_model_cls.objects.get(pk=comment.root_target.object_id)
if hasattr(root_target, 'is_deleted') and root_target.is_deleted:
logger.info('{} is deleted. Setting Comment {} root_target to None'.format(root_target, comment.pk))
comment.root_target = None
comments_to_update.append(comment)
if hasattr(root_target, 'deleted') and root_target.deleted:
logger.info('{} is deleted. Setting Comment {} root_target to None'.format(root_target, comment.pk))
comment.root_target = None
comments_to_update.append(comment)
bulk_update(comments_to_update, update_fields=['root_target'])
logger.info('Total comments migrated: {}.'.format(len(comments_to_update)))
class Migration(migrations.Migration):
dependencies = [
('osf', '0094_update_preprintprovider_group_auth'),
]
operations = [
migrations.RunPython(update_comment_root_target)
]
|
<commit_before>from __future__ import unicode_literals
from django.db import migrations
import logging
logger = logging.getLogger(__file__)
def update_comment_root_target(state, *args, **kwargs):
Comment = state.get_model('osf', 'comment')
comments = Comment.objects.filter(is_deleted=False)
count = 0
for comment in comments:
if comment.root_target:
if hasattr(comment.root_target.referent, 'is_deleted') and comment.root_target.referent.is_deleted:
comment.root_target = None
comment.save()
count += 1
if hasattr(comment.root_target.referent, 'deleted') and comment.root_target.referent.deleted:
comment.root_target = None
comment.save()
count += 1
logger.info('Total download number of commnet migrated is {}.'.format(count))
class Migration(migrations.Migration):
dependencies = [
('osf', '0094_update_preprintprovider_group_auth'),
]
operations = [
migrations.RunPython(update_comment_root_target)
]<commit_msg>Fix loading root_target in migration
Migrations don't allow accessing generic foreign keys, so we
need to load root_target manually.
Also:
* add more logging
* Bulk-update comments<commit_after>
|
from __future__ import unicode_literals
import logging
from django.db import migrations
from django_bulk_update.helper import bulk_update
logger = logging.getLogger(__file__)
def update_comment_root_target(state, *args, **kwargs):
Comment = state.get_model('osf', 'comment')
comments = Comment.objects.exclude(is_deleted=True).select_related('root_target')
logger.info('{} comments to check'.format(comments.count()))
comments_to_update = []
for comment in comments:
if comment.root_target:
root_target_ctype = comment.root_target.content_type
root_target_model_cls = state.get_model(root_target_ctype.app_label, root_target_ctype.model)
root_target = root_target_model_cls.objects.get(pk=comment.root_target.object_id)
if hasattr(root_target, 'is_deleted') and root_target.is_deleted:
logger.info('{} is deleted. Setting Comment {} root_target to None'.format(root_target, comment.pk))
comment.root_target = None
comments_to_update.append(comment)
if hasattr(root_target, 'deleted') and root_target.deleted:
logger.info('{} is deleted. Setting Comment {} root_target to None'.format(root_target, comment.pk))
comment.root_target = None
comments_to_update.append(comment)
bulk_update(comments_to_update, update_fields=['root_target'])
logger.info('Total comments migrated: {}.'.format(len(comments_to_update)))
class Migration(migrations.Migration):
dependencies = [
('osf', '0094_update_preprintprovider_group_auth'),
]
operations = [
migrations.RunPython(update_comment_root_target)
]
|
from __future__ import unicode_literals
from django.db import migrations
import logging
logger = logging.getLogger(__file__)
def update_comment_root_target(state, *args, **kwargs):
Comment = state.get_model('osf', 'comment')
comments = Comment.objects.filter(is_deleted=False)
count = 0
for comment in comments:
if comment.root_target:
if hasattr(comment.root_target.referent, 'is_deleted') and comment.root_target.referent.is_deleted:
comment.root_target = None
comment.save()
count += 1
if hasattr(comment.root_target.referent, 'deleted') and comment.root_target.referent.deleted:
comment.root_target = None
comment.save()
count += 1
logger.info('Total download number of commnet migrated is {}.'.format(count))
class Migration(migrations.Migration):
dependencies = [
('osf', '0094_update_preprintprovider_group_auth'),
]
operations = [
migrations.RunPython(update_comment_root_target)
]Fix loading root_target in migration
Migrations don't allow accessing generic foreign keys, so we
need to load root_target manually.
Also:
* add more logging
* Bulk-update commentsfrom __future__ import unicode_literals
import logging
from django.db import migrations
from django_bulk_update.helper import bulk_update
logger = logging.getLogger(__file__)
def update_comment_root_target(state, *args, **kwargs):
Comment = state.get_model('osf', 'comment')
comments = Comment.objects.exclude(is_deleted=True).select_related('root_target')
logger.info('{} comments to check'.format(comments.count()))
comments_to_update = []
for comment in comments:
if comment.root_target:
root_target_ctype = comment.root_target.content_type
root_target_model_cls = state.get_model(root_target_ctype.app_label, root_target_ctype.model)
root_target = root_target_model_cls.objects.get(pk=comment.root_target.object_id)
if hasattr(root_target, 'is_deleted') and root_target.is_deleted:
logger.info('{} is deleted. Setting Comment {} root_target to None'.format(root_target, comment.pk))
comment.root_target = None
comments_to_update.append(comment)
if hasattr(root_target, 'deleted') and root_target.deleted:
logger.info('{} is deleted. Setting Comment {} root_target to None'.format(root_target, comment.pk))
comment.root_target = None
comments_to_update.append(comment)
bulk_update(comments_to_update, update_fields=['root_target'])
logger.info('Total comments migrated: {}.'.format(len(comments_to_update)))
class Migration(migrations.Migration):
dependencies = [
('osf', '0094_update_preprintprovider_group_auth'),
]
operations = [
migrations.RunPython(update_comment_root_target)
]
|
<commit_before>from __future__ import unicode_literals
from django.db import migrations
import logging
logger = logging.getLogger(__file__)
def update_comment_root_target(state, *args, **kwargs):
Comment = state.get_model('osf', 'comment')
comments = Comment.objects.filter(is_deleted=False)
count = 0
for comment in comments:
if comment.root_target:
if hasattr(comment.root_target.referent, 'is_deleted') and comment.root_target.referent.is_deleted:
comment.root_target = None
comment.save()
count += 1
if hasattr(comment.root_target.referent, 'deleted') and comment.root_target.referent.deleted:
comment.root_target = None
comment.save()
count += 1
logger.info('Total download number of commnet migrated is {}.'.format(count))
class Migration(migrations.Migration):
dependencies = [
('osf', '0094_update_preprintprovider_group_auth'),
]
operations = [
migrations.RunPython(update_comment_root_target)
]<commit_msg>Fix loading root_target in migration
Migrations don't allow accessing generic foreign keys, so we
need to load root_target manually.
Also:
* add more logging
* Bulk-update comments<commit_after>from __future__ import unicode_literals
import logging
from django.db import migrations
from django_bulk_update.helper import bulk_update
logger = logging.getLogger(__file__)
def update_comment_root_target(state, *args, **kwargs):
Comment = state.get_model('osf', 'comment')
comments = Comment.objects.exclude(is_deleted=True).select_related('root_target')
logger.info('{} comments to check'.format(comments.count()))
comments_to_update = []
for comment in comments:
if comment.root_target:
root_target_ctype = comment.root_target.content_type
root_target_model_cls = state.get_model(root_target_ctype.app_label, root_target_ctype.model)
root_target = root_target_model_cls.objects.get(pk=comment.root_target.object_id)
if hasattr(root_target, 'is_deleted') and root_target.is_deleted:
logger.info('{} is deleted. Setting Comment {} root_target to None'.format(root_target, comment.pk))
comment.root_target = None
comments_to_update.append(comment)
if hasattr(root_target, 'deleted') and root_target.deleted:
logger.info('{} is deleted. Setting Comment {} root_target to None'.format(root_target, comment.pk))
comment.root_target = None
comments_to_update.append(comment)
bulk_update(comments_to_update, update_fields=['root_target'])
logger.info('Total comments migrated: {}.'.format(len(comments_to_update)))
class Migration(migrations.Migration):
dependencies = [
('osf', '0094_update_preprintprovider_group_auth'),
]
operations = [
migrations.RunPython(update_comment_root_target)
]
|
bdf40e28126e9e18ac9ea069659686e37e70f34d
|
Lib/test/test_future3.py
|
Lib/test/test_future3.py
|
from __future__ import nested_scopes
from __future__ import division
from __future__ import nested_scopes
def f(x):
def g(y):
return y // x
return g
print f(2)(5)
|
Add test case to cover multiple future statements on separate lines of a module.
|
Add test case to cover multiple future statements on separate lines of
a module.
|
Python
|
mit
|
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
|
Add test case to cover multiple future statements on separate lines of
a module.
|
from __future__ import nested_scopes
from __future__ import division
from __future__ import nested_scopes
def f(x):
def g(y):
return y // x
return g
print f(2)(5)
|
<commit_before><commit_msg>Add test case to cover multiple future statements on separate lines of
a module.<commit_after>
|
from __future__ import nested_scopes
from __future__ import division
from __future__ import nested_scopes
def f(x):
def g(y):
return y // x
return g
print f(2)(5)
|
Add test case to cover multiple future statements on separate lines of
a module.from __future__ import nested_scopes
from __future__ import division
from __future__ import nested_scopes
def f(x):
def g(y):
return y // x
return g
print f(2)(5)
|
<commit_before><commit_msg>Add test case to cover multiple future statements on separate lines of
a module.<commit_after>from __future__ import nested_scopes
from __future__ import division
from __future__ import nested_scopes
def f(x):
def g(y):
return y // x
return g
print f(2)(5)
|
|
00fc071687661a0f27e61cef74a3d6af0dadf38b
|
OnionLauncher/torctl.py
|
OnionLauncher/torctl.py
|
import stem.process
def startTor(config):
process = stem.process.launch_tor_with_config(config, take_ownership = True)
return process
def stopTor(process):
if "kil" in dir(process):
process.kill()
|
Add initial code for starting Tor
|
Add initial code for starting Tor
|
Python
|
bsd-2-clause
|
neelchauhan/OnionLauncher
|
Add initial code for starting Tor
|
import stem.process
def startTor(config):
process = stem.process.launch_tor_with_config(config, take_ownership = True)
return process
def stopTor(process):
if "kil" in dir(process):
process.kill()
|
<commit_before><commit_msg>Add initial code for starting Tor<commit_after>
|
import stem.process
def startTor(config):
process = stem.process.launch_tor_with_config(config, take_ownership = True)
return process
def stopTor(process):
if "kil" in dir(process):
process.kill()
|
Add initial code for starting Torimport stem.process
def startTor(config):
process = stem.process.launch_tor_with_config(config, take_ownership = True)
return process
def stopTor(process):
if "kil" in dir(process):
process.kill()
|
<commit_before><commit_msg>Add initial code for starting Tor<commit_after>import stem.process
def startTor(config):
process = stem.process.launch_tor_with_config(config, take_ownership = True)
return process
def stopTor(process):
if "kil" in dir(process):
process.kill()
|
|
85736989c7f3049d9d6c107e9dac385a751d0ba4
|
image_processing/steganography/python/steganography.py
|
image_processing/steganography/python/steganography.py
|
from __future__ import print_function
from PIL import Image
def encode(image, string_to_encode):
image_data = list(image.getdata())
bin_string = "".join(["{0:08b}".format(ord(c)) for c in string_to_encode])
print("Initial pixel values: ")
print(image_data[:25], end=" ")
print(",....")
j = 0
for i in range(0, len(bin_string), 2):
# Replace LSB by bits_to_replace
bits_to_replace = bin_string[i:i+2]
data = bin(image_data[j])[2:]
new_pixel = data[:len(data)-2] + bits_to_replace
image_data[j] = int(new_pixel, 2)
j += 1
print("Encoded pixel values: ")
print(image_data[:25], end=" ")
print(",....")
output_image = Image.new(image.mode, image.size)
output_image.putdata(image_data)
output_image.save('out.bmp')
print("Encoded successfully")
def decode(image):
image_data = list(image.getdata())
output_string = ""
temp_string = ""
bits = 0
for pixel in image_data:
binary_pixel = bin(pixel)[2:]
encoded_value = binary_pixel[-2:]
temp_string += encoded_value
bits += 2
if(bits == 8):
# Check if character is end of message
if chr(int(temp_string,2)) == '$':
break
output_string += temp_string
temp_string = ""
bits = 0
# Now convert binary string to ascii string
ans = ""
for i in range(0, len(output_string), 8):
ans += chr(int(output_string[i:i+8],2))
print("Decoded successfully: " + ans)
a = Image.open('bird.bmp', 'r')
print("Enter string to encode ended by $: ")
string_to_encode = raw_input()
encode(a, string_to_encode)
b = Image.open('out.bmp', 'r')
decode(b)
|
Change folder structure. Add Python folder
|
Change folder structure. Add Python folder
|
Python
|
cc0-1.0
|
ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms
|
Change folder structure. Add Python folder
|
from __future__ import print_function
from PIL import Image
def encode(image, string_to_encode):
image_data = list(image.getdata())
bin_string = "".join(["{0:08b}".format(ord(c)) for c in string_to_encode])
print("Initial pixel values: ")
print(image_data[:25], end=" ")
print(",....")
j = 0
for i in range(0, len(bin_string), 2):
# Replace LSB by bits_to_replace
bits_to_replace = bin_string[i:i+2]
data = bin(image_data[j])[2:]
new_pixel = data[:len(data)-2] + bits_to_replace
image_data[j] = int(new_pixel, 2)
j += 1
print("Encoded pixel values: ")
print(image_data[:25], end=" ")
print(",....")
output_image = Image.new(image.mode, image.size)
output_image.putdata(image_data)
output_image.save('out.bmp')
print("Encoded successfully")
def decode(image):
image_data = list(image.getdata())
output_string = ""
temp_string = ""
bits = 0
for pixel in image_data:
binary_pixel = bin(pixel)[2:]
encoded_value = binary_pixel[-2:]
temp_string += encoded_value
bits += 2
if(bits == 8):
# Check if character is end of message
if chr(int(temp_string,2)) == '$':
break
output_string += temp_string
temp_string = ""
bits = 0
# Now convert binary string to ascii string
ans = ""
for i in range(0, len(output_string), 8):
ans += chr(int(output_string[i:i+8],2))
print("Decoded successfully: " + ans)
a = Image.open('bird.bmp', 'r')
print("Enter string to encode ended by $: ")
string_to_encode = raw_input()
encode(a, string_to_encode)
b = Image.open('out.bmp', 'r')
decode(b)
|
<commit_before><commit_msg>Change folder structure. Add Python folder<commit_after>
|
from __future__ import print_function
from PIL import Image
def encode(image, string_to_encode):
image_data = list(image.getdata())
bin_string = "".join(["{0:08b}".format(ord(c)) for c in string_to_encode])
print("Initial pixel values: ")
print(image_data[:25], end=" ")
print(",....")
j = 0
for i in range(0, len(bin_string), 2):
# Replace LSB by bits_to_replace
bits_to_replace = bin_string[i:i+2]
data = bin(image_data[j])[2:]
new_pixel = data[:len(data)-2] + bits_to_replace
image_data[j] = int(new_pixel, 2)
j += 1
print("Encoded pixel values: ")
print(image_data[:25], end=" ")
print(",....")
output_image = Image.new(image.mode, image.size)
output_image.putdata(image_data)
output_image.save('out.bmp')
print("Encoded successfully")
def decode(image):
image_data = list(image.getdata())
output_string = ""
temp_string = ""
bits = 0
for pixel in image_data:
binary_pixel = bin(pixel)[2:]
encoded_value = binary_pixel[-2:]
temp_string += encoded_value
bits += 2
if(bits == 8):
# Check if character is end of message
if chr(int(temp_string,2)) == '$':
break
output_string += temp_string
temp_string = ""
bits = 0
# Now convert binary string to ascii string
ans = ""
for i in range(0, len(output_string), 8):
ans += chr(int(output_string[i:i+8],2))
print("Decoded successfully: " + ans)
a = Image.open('bird.bmp', 'r')
print("Enter string to encode ended by $: ")
string_to_encode = raw_input()
encode(a, string_to_encode)
b = Image.open('out.bmp', 'r')
decode(b)
|
Change folder structure. Add Python folderfrom __future__ import print_function
from PIL import Image
def encode(image, string_to_encode):
image_data = list(image.getdata())
bin_string = "".join(["{0:08b}".format(ord(c)) for c in string_to_encode])
print("Initial pixel values: ")
print(image_data[:25], end=" ")
print(",....")
j = 0
for i in range(0, len(bin_string), 2):
# Replace LSB by bits_to_replace
bits_to_replace = bin_string[i:i+2]
data = bin(image_data[j])[2:]
new_pixel = data[:len(data)-2] + bits_to_replace
image_data[j] = int(new_pixel, 2)
j += 1
print("Encoded pixel values: ")
print(image_data[:25], end=" ")
print(",....")
output_image = Image.new(image.mode, image.size)
output_image.putdata(image_data)
output_image.save('out.bmp')
print("Encoded successfully")
def decode(image):
image_data = list(image.getdata())
output_string = ""
temp_string = ""
bits = 0
for pixel in image_data:
binary_pixel = bin(pixel)[2:]
encoded_value = binary_pixel[-2:]
temp_string += encoded_value
bits += 2
if(bits == 8):
# Check if character is end of message
if chr(int(temp_string,2)) == '$':
break
output_string += temp_string
temp_string = ""
bits = 0
# Now convert binary string to ascii string
ans = ""
for i in range(0, len(output_string), 8):
ans += chr(int(output_string[i:i+8],2))
print("Decoded successfully: " + ans)
a = Image.open('bird.bmp', 'r')
print("Enter string to encode ended by $: ")
string_to_encode = raw_input()
encode(a, string_to_encode)
b = Image.open('out.bmp', 'r')
decode(b)
|
<commit_before><commit_msg>Change folder structure. Add Python folder<commit_after>from __future__ import print_function
from PIL import Image
def encode(image, string_to_encode):
image_data = list(image.getdata())
bin_string = "".join(["{0:08b}".format(ord(c)) for c in string_to_encode])
print("Initial pixel values: ")
print(image_data[:25], end=" ")
print(",....")
j = 0
for i in range(0, len(bin_string), 2):
# Replace LSB by bits_to_replace
bits_to_replace = bin_string[i:i+2]
data = bin(image_data[j])[2:]
new_pixel = data[:len(data)-2] + bits_to_replace
image_data[j] = int(new_pixel, 2)
j += 1
print("Encoded pixel values: ")
print(image_data[:25], end=" ")
print(",....")
output_image = Image.new(image.mode, image.size)
output_image.putdata(image_data)
output_image.save('out.bmp')
print("Encoded successfully")
def decode(image):
image_data = list(image.getdata())
output_string = ""
temp_string = ""
bits = 0
for pixel in image_data:
binary_pixel = bin(pixel)[2:]
encoded_value = binary_pixel[-2:]
temp_string += encoded_value
bits += 2
if(bits == 8):
# Check if character is end of message
if chr(int(temp_string,2)) == '$':
break
output_string += temp_string
temp_string = ""
bits = 0
# Now convert binary string to ascii string
ans = ""
for i in range(0, len(output_string), 8):
ans += chr(int(output_string[i:i+8],2))
print("Decoded successfully: " + ans)
a = Image.open('bird.bmp', 'r')
print("Enter string to encode ended by $: ")
string_to_encode = raw_input()
encode(a, string_to_encode)
b = Image.open('out.bmp', 'r')
decode(b)
|
|
e81f63798b84d26a29db79603efb0458ac25d374
|
accounts/migrations/0006_auto_20170325_1024.py
|
accounts/migrations/0006_auto_20170325_1024.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-25 10:24
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import migrations
from accounts.models import Account
def link_user_accounts(apps, schema_editor):
'''
Link every Django user objects to a new Account object
'''
for user in User.objects.all():
if user.account is None:
Account.objects.create(user=user)
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_account_cleaning'),
]
operations = [
migrations.RunPython(link_user_accounts),
]
|
Add auto-link between User and Account models in migration
|
Add auto-link between User and Account models in migration
|
Python
|
mit
|
Atilla106/members.atilla.org,Atilla106/members.atilla.org,Atilla106/members.atilla.org,Atilla106/members.atilla.org,Atilla106/members.atilla.org
|
Add auto-link between User and Account models in migration
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-25 10:24
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import migrations
from accounts.models import Account
def link_user_accounts(apps, schema_editor):
'''
Link every Django user objects to a new Account object
'''
for user in User.objects.all():
if user.account is None:
Account.objects.create(user=user)
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_account_cleaning'),
]
operations = [
migrations.RunPython(link_user_accounts),
]
|
<commit_before><commit_msg>Add auto-link between User and Account models in migration<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-25 10:24
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import migrations
from accounts.models import Account
def link_user_accounts(apps, schema_editor):
'''
Link every Django user objects to a new Account object
'''
for user in User.objects.all():
if user.account is None:
Account.objects.create(user=user)
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_account_cleaning'),
]
operations = [
migrations.RunPython(link_user_accounts),
]
|
Add auto-link between User and Account models in migration# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-25 10:24
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import migrations
from accounts.models import Account
def link_user_accounts(apps, schema_editor):
'''
Link every Django user objects to a new Account object
'''
for user in User.objects.all():
if user.account is None:
Account.objects.create(user=user)
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_account_cleaning'),
]
operations = [
migrations.RunPython(link_user_accounts),
]
|
<commit_before><commit_msg>Add auto-link between User and Account models in migration<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-25 10:24
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import migrations
from accounts.models import Account
def link_user_accounts(apps, schema_editor):
'''
Link every Django user objects to a new Account object
'''
for user in User.objects.all():
if user.account is None:
Account.objects.create(user=user)
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_account_cleaning'),
]
operations = [
migrations.RunPython(link_user_accounts),
]
|
|
3c49ca199fe41f0ca434b0fdc00ab76e7b134289
|
src/ggrc/migrations/versions/20160510122526_44ebc240800b_remove_response_relationships.py
|
src/ggrc/migrations/versions/20160510122526_44ebc240800b_remove_response_relationships.py
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: peter@reciprocitylabs.com
"""
Remove relationships related to deleted response objects
Create Date: 2016-05-10 12:25:26.383695
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '44ebc240800b'
down_revision = '3715694bd315'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute(
'DELETE FROM relationships '
'WHERE source_type IN '
' ("Response", "DocumentationResponse", "InterviewResponse",'
' "PopulationSampleResponse") '
' OR destination_type IN '
' ("Response", "DocumentationResponse", "InterviewResponse", '
' "PopulationSampleResponse")')
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
pass
|
Fix error message when expanding data asset
|
Fix error message when expanding data asset
|
Python
|
apache-2.0
|
selahssea/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,kr41/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,andrei-karalionak/ggrc-core,edofic/ggrc-core,josthkko/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,kr41/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,edofic/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,j0gurt/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,NejcZupec/ggrc-core,josthkko/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,josthkko/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,kr41/ggrc-core,NejcZupec/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core
|
Fix error message when expanding data asset
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: peter@reciprocitylabs.com
"""
Remove relationships related to deleted response objects
Create Date: 2016-05-10 12:25:26.383695
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '44ebc240800b'
down_revision = '3715694bd315'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute(
'DELETE FROM relationships '
'WHERE source_type IN '
' ("Response", "DocumentationResponse", "InterviewResponse",'
' "PopulationSampleResponse") '
' OR destination_type IN '
' ("Response", "DocumentationResponse", "InterviewResponse", '
' "PopulationSampleResponse")')
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
pass
|
<commit_before><commit_msg>Fix error message when expanding data asset<commit_after>
|
# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: peter@reciprocitylabs.com
"""
Remove relationships related to deleted response objects
Create Date: 2016-05-10 12:25:26.383695
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '44ebc240800b'
down_revision = '3715694bd315'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute(
'DELETE FROM relationships '
'WHERE source_type IN '
' ("Response", "DocumentationResponse", "InterviewResponse",'
' "PopulationSampleResponse") '
' OR destination_type IN '
' ("Response", "DocumentationResponse", "InterviewResponse", '
' "PopulationSampleResponse")')
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
pass
|
Fix error message when expanding data asset# Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: peter@reciprocitylabs.com
"""
Remove relationships related to deleted response objects
Create Date: 2016-05-10 12:25:26.383695
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '44ebc240800b'
down_revision = '3715694bd315'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute(
'DELETE FROM relationships '
'WHERE source_type IN '
' ("Response", "DocumentationResponse", "InterviewResponse",'
' "PopulationSampleResponse") '
' OR destination_type IN '
' ("Response", "DocumentationResponse", "InterviewResponse", '
' "PopulationSampleResponse")')
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
pass
|
<commit_before><commit_msg>Fix error message when expanding data asset<commit_after># Copyright (C) 2016 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: dan@reciprocitylabs.com
# Maintained By: peter@reciprocitylabs.com
"""
Remove relationships related to deleted response objects
Create Date: 2016-05-10 12:25:26.383695
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '44ebc240800b'
down_revision = '3715694bd315'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
op.execute(
'DELETE FROM relationships '
'WHERE source_type IN '
' ("Response", "DocumentationResponse", "InterviewResponse",'
' "PopulationSampleResponse") '
' OR destination_type IN '
' ("Response", "DocumentationResponse", "InterviewResponse", '
' "PopulationSampleResponse")')
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
pass
|
|
e7aa94722c3657fb4b0dfacb4c1e432438e4670a
|
flexget/tests/test_move.py
|
flexget/tests/test_move.py
|
import pytest
@pytest.mark.usefixtures('tmpdir')
class TestMove:
config = """
tasks:
test_move:
mock:
- title: a movie
location: __tmp__/movie.mkv
accept_all: yes
move:
# Take advantage that path validation allows non-existent dirs if they are jinja
to: __tmp__/{{ 'newdir' }}/
"""
@pytest.mark.filecopy('movie.mkv', '__tmp__/movie.mkv')
def test_move(self, execute_task, tmpdir):
assert (tmpdir / 'movie.mkv').exists()
task = execute_task('test_move')
assert not (tmpdir / 'movie.mkv').exists()
assert (tmpdir / 'newdir/movie.mkv').exists()
|
Add very basic move plugin test
|
Add very basic move plugin test
|
Python
|
mit
|
crawln45/Flexget,ianstalk/Flexget,ianstalk/Flexget,crawln45/Flexget,Flexget/Flexget,malkavi/Flexget,Flexget/Flexget,malkavi/Flexget,ianstalk/Flexget,crawln45/Flexget,malkavi/Flexget,Flexget/Flexget,malkavi/Flexget,crawln45/Flexget,Flexget/Flexget
|
Add very basic move plugin test
|
import pytest
@pytest.mark.usefixtures('tmpdir')
class TestMove:
config = """
tasks:
test_move:
mock:
- title: a movie
location: __tmp__/movie.mkv
accept_all: yes
move:
# Take advantage that path validation allows non-existent dirs if they are jinja
to: __tmp__/{{ 'newdir' }}/
"""
@pytest.mark.filecopy('movie.mkv', '__tmp__/movie.mkv')
def test_move(self, execute_task, tmpdir):
assert (tmpdir / 'movie.mkv').exists()
task = execute_task('test_move')
assert not (tmpdir / 'movie.mkv').exists()
assert (tmpdir / 'newdir/movie.mkv').exists()
|
<commit_before><commit_msg>Add very basic move plugin test<commit_after>
|
import pytest
@pytest.mark.usefixtures('tmpdir')
class TestMove:
config = """
tasks:
test_move:
mock:
- title: a movie
location: __tmp__/movie.mkv
accept_all: yes
move:
# Take advantage that path validation allows non-existent dirs if they are jinja
to: __tmp__/{{ 'newdir' }}/
"""
@pytest.mark.filecopy('movie.mkv', '__tmp__/movie.mkv')
def test_move(self, execute_task, tmpdir):
assert (tmpdir / 'movie.mkv').exists()
task = execute_task('test_move')
assert not (tmpdir / 'movie.mkv').exists()
assert (tmpdir / 'newdir/movie.mkv').exists()
|
Add very basic move plugin testimport pytest
@pytest.mark.usefixtures('tmpdir')
class TestMove:
config = """
tasks:
test_move:
mock:
- title: a movie
location: __tmp__/movie.mkv
accept_all: yes
move:
# Take advantage that path validation allows non-existent dirs if they are jinja
to: __tmp__/{{ 'newdir' }}/
"""
@pytest.mark.filecopy('movie.mkv', '__tmp__/movie.mkv')
def test_move(self, execute_task, tmpdir):
assert (tmpdir / 'movie.mkv').exists()
task = execute_task('test_move')
assert not (tmpdir / 'movie.mkv').exists()
assert (tmpdir / 'newdir/movie.mkv').exists()
|
<commit_before><commit_msg>Add very basic move plugin test<commit_after>import pytest
@pytest.mark.usefixtures('tmpdir')
class TestMove:
config = """
tasks:
test_move:
mock:
- title: a movie
location: __tmp__/movie.mkv
accept_all: yes
move:
# Take advantage that path validation allows non-existent dirs if they are jinja
to: __tmp__/{{ 'newdir' }}/
"""
@pytest.mark.filecopy('movie.mkv', '__tmp__/movie.mkv')
def test_move(self, execute_task, tmpdir):
assert (tmpdir / 'movie.mkv').exists()
task = execute_task('test_move')
assert not (tmpdir / 'movie.mkv').exists()
assert (tmpdir / 'newdir/movie.mkv').exists()
|
|
f7f8d7c6a92aa5d2d491674df14df46768f8b037
|
show_sample_entry.py
|
show_sample_entry.py
|
#!/usr/bin/python3
# Copyright (c) 2016 Eric Eisenhart
# This software is released under an MIT-style license.
# See LICENSE.md for full details.
import sys
import pprint
import feedparser
import argparse
feedparser.PREFERRED_XML_PARSERS.remove('drv_libxml2')
# 0 is command itself:
if len(sys.argv) == 2:
feed_url = sys.argv[1]
feed_data = feedparser.parse(feed_url)
# depth=1 because we can't handle anything deeper
pp = pprint.PrettyPrinter(indent=4, depth=1)
pp.pprint(feed_data.entries[0])
else:
print("Give me 1 feed URL on the command-line, and I'll give the first entry from it.")
|
Add helper tool to help with examining entries in a feed.
|
Add helper tool to help with examining entries in a feed.
|
Python
|
mit
|
freiheit/discord_feedbot,freiheit/discord_rss_bot,freiheit/discord_feedbot,freiheit/discord_rss_bot
|
Add helper tool to help with examining entries in a feed.
|
#!/usr/bin/python3
# Copyright (c) 2016 Eric Eisenhart
# This software is released under an MIT-style license.
# See LICENSE.md for full details.
import sys
import pprint
import feedparser
import argparse
feedparser.PREFERRED_XML_PARSERS.remove('drv_libxml2')
# 0 is command itself:
if len(sys.argv) == 2:
feed_url = sys.argv[1]
feed_data = feedparser.parse(feed_url)
# depth=1 because we can't handle anything deeper
pp = pprint.PrettyPrinter(indent=4, depth=1)
pp.pprint(feed_data.entries[0])
else:
print("Give me 1 feed URL on the command-line, and I'll give the first entry from it.")
|
<commit_before><commit_msg>Add helper tool to help with examining entries in a feed.<commit_after>
|
#!/usr/bin/python3
# Copyright (c) 2016 Eric Eisenhart
# This software is released under an MIT-style license.
# See LICENSE.md for full details.
import sys
import pprint
import feedparser
import argparse
feedparser.PREFERRED_XML_PARSERS.remove('drv_libxml2')
# 0 is command itself:
if len(sys.argv) == 2:
feed_url = sys.argv[1]
feed_data = feedparser.parse(feed_url)
# depth=1 because we can't handle anything deeper
pp = pprint.PrettyPrinter(indent=4, depth=1)
pp.pprint(feed_data.entries[0])
else:
print("Give me 1 feed URL on the command-line, and I'll give the first entry from it.")
|
Add helper tool to help with examining entries in a feed.#!/usr/bin/python3
# Copyright (c) 2016 Eric Eisenhart
# This software is released under an MIT-style license.
# See LICENSE.md for full details.
import sys
import pprint
import feedparser
import argparse
feedparser.PREFERRED_XML_PARSERS.remove('drv_libxml2')
# 0 is command itself:
if len(sys.argv) == 2:
feed_url = sys.argv[1]
feed_data = feedparser.parse(feed_url)
# depth=1 because we can't handle anything deeper
pp = pprint.PrettyPrinter(indent=4, depth=1)
pp.pprint(feed_data.entries[0])
else:
print("Give me 1 feed URL on the command-line, and I'll give the first entry from it.")
|
<commit_before><commit_msg>Add helper tool to help with examining entries in a feed.<commit_after>#!/usr/bin/python3
# Copyright (c) 2016 Eric Eisenhart
# This software is released under an MIT-style license.
# See LICENSE.md for full details.
import sys
import pprint
import feedparser
import argparse
feedparser.PREFERRED_XML_PARSERS.remove('drv_libxml2')
# 0 is command itself:
if len(sys.argv) == 2:
feed_url = sys.argv[1]
feed_data = feedparser.parse(feed_url)
# depth=1 because we can't handle anything deeper
pp = pprint.PrettyPrinter(indent=4, depth=1)
pp.pprint(feed_data.entries[0])
else:
print("Give me 1 feed URL on the command-line, and I'll give the first entry from it.")
|
|
31e1b7837f101ead043b1d85eaa281d32c00c13b
|
django_auth_policy/migrations/0003_auto_20150410_0408.py
|
django_auth_policy/migrations/0003_auto_20150410_0408.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('django_auth_policy', '0002_users_nullable'),
]
operations = [
migrations.AlterField(
model_name='passwordchange',
name='user_repr',
field=models.CharField(max_length=200, verbose_name='user'),
preserve_default=True,
),
migrations.AlterField(
model_name='userchange',
name='by_user_repr',
field=models.CharField(max_length=200, verbose_name='by user'),
preserve_default=True,
),
migrations.AlterField(
model_name='userchange',
name='user_repr',
field=models.CharField(max_length=200, verbose_name='user'),
preserve_default=True,
),
]
|
Add migration which no longer allows blank user representation fields
|
Add migration which no longer allows blank user representation fields
|
Python
|
bsd-3-clause
|
Dreamsolution/django-auth-policy,mcella/django-auth-policy,Dreamsolution/django-auth-policy,mcella/django-auth-policy
|
Add migration which no longer allows blank user representation fields
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('django_auth_policy', '0002_users_nullable'),
]
operations = [
migrations.AlterField(
model_name='passwordchange',
name='user_repr',
field=models.CharField(max_length=200, verbose_name='user'),
preserve_default=True,
),
migrations.AlterField(
model_name='userchange',
name='by_user_repr',
field=models.CharField(max_length=200, verbose_name='by user'),
preserve_default=True,
),
migrations.AlterField(
model_name='userchange',
name='user_repr',
field=models.CharField(max_length=200, verbose_name='user'),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migration which no longer allows blank user representation fields<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('django_auth_policy', '0002_users_nullable'),
]
operations = [
migrations.AlterField(
model_name='passwordchange',
name='user_repr',
field=models.CharField(max_length=200, verbose_name='user'),
preserve_default=True,
),
migrations.AlterField(
model_name='userchange',
name='by_user_repr',
field=models.CharField(max_length=200, verbose_name='by user'),
preserve_default=True,
),
migrations.AlterField(
model_name='userchange',
name='user_repr',
field=models.CharField(max_length=200, verbose_name='user'),
preserve_default=True,
),
]
|
Add migration which no longer allows blank user representation fields# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('django_auth_policy', '0002_users_nullable'),
]
operations = [
migrations.AlterField(
model_name='passwordchange',
name='user_repr',
field=models.CharField(max_length=200, verbose_name='user'),
preserve_default=True,
),
migrations.AlterField(
model_name='userchange',
name='by_user_repr',
field=models.CharField(max_length=200, verbose_name='by user'),
preserve_default=True,
),
migrations.AlterField(
model_name='userchange',
name='user_repr',
field=models.CharField(max_length=200, verbose_name='user'),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add migration which no longer allows blank user representation fields<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('django_auth_policy', '0002_users_nullable'),
]
operations = [
migrations.AlterField(
model_name='passwordchange',
name='user_repr',
field=models.CharField(max_length=200, verbose_name='user'),
preserve_default=True,
),
migrations.AlterField(
model_name='userchange',
name='by_user_repr',
field=models.CharField(max_length=200, verbose_name='by user'),
preserve_default=True,
),
migrations.AlterField(
model_name='userchange',
name='user_repr',
field=models.CharField(max_length=200, verbose_name='user'),
preserve_default=True,
),
]
|
|
15013cde32597c6414ba731461d881080083bf24
|
lib/ansible/modules/cloud/openstack/os_server_facts.py
|
lib/ansible/modules/cloud/openstack/os_server_facts.py
|
#!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_server_facts
short_description: Retrieve facts about a compute instance
extends_documentation_fragment: openstack
version_added: "1.10"
description:
- Retrieve facts about a server instance from OpenStack.
notes:
- Facts are placed in the C(openstack) variable.
options:
server:
description:
- Name or ID of the instance
required: true
requirements: ["shade"]
'''
EXAMPLES = '''
# Gather facts about a previously created server named vm1
- os_server_facts:
cloud: rax-dfw
server: vm1
- debug: var=openstack
'''
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
server = cloud.get_server(module.params['server'])
hostvars = dict(openstack=meta.get_hostvars_from_server(
cloud, server))
module.exit_json(changed=False, ansible_facts=hostvars)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
|
Add OpenStack Server facts module
|
Add OpenStack Server facts module
|
Python
|
mit
|
thaim/ansible,thaim/ansible
|
Add OpenStack Server facts module
|
#!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_server_facts
short_description: Retrieve facts about a compute instance
extends_documentation_fragment: openstack
version_added: "1.10"
description:
- Retrieve facts about a server instance from OpenStack.
notes:
- Facts are placed in the C(openstack) variable.
options:
server:
description:
- Name or ID of the instance
required: true
requirements: ["shade"]
'''
EXAMPLES = '''
# Gather facts about a previously created server named vm1
- os_server_facts:
cloud: rax-dfw
server: vm1
- debug: var=openstack
'''
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
server = cloud.get_server(module.params['server'])
hostvars = dict(openstack=meta.get_hostvars_from_server(
cloud, server))
module.exit_json(changed=False, ansible_facts=hostvars)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
|
<commit_before><commit_msg>Add OpenStack Server facts module<commit_after>
|
#!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_server_facts
short_description: Retrieve facts about a compute instance
extends_documentation_fragment: openstack
version_added: "1.10"
description:
- Retrieve facts about a server instance from OpenStack.
notes:
- Facts are placed in the C(openstack) variable.
options:
server:
description:
- Name or ID of the instance
required: true
requirements: ["shade"]
'''
EXAMPLES = '''
# Gather facts about a previously created server named vm1
- os_server_facts:
cloud: rax-dfw
server: vm1
- debug: var=openstack
'''
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
server = cloud.get_server(module.params['server'])
hostvars = dict(openstack=meta.get_hostvars_from_server(
cloud, server))
module.exit_json(changed=False, ansible_facts=hostvars)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
|
Add OpenStack Server facts module#!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_server_facts
short_description: Retrieve facts about a compute instance
extends_documentation_fragment: openstack
version_added: "1.10"
description:
- Retrieve facts about a server instance from OpenStack.
notes:
- Facts are placed in the C(openstack) variable.
options:
server:
description:
- Name or ID of the instance
required: true
requirements: ["shade"]
'''
EXAMPLES = '''
# Gather facts about a previously created server named vm1
- os_server_facts:
cloud: rax-dfw
server: vm1
- debug: var=openstack
'''
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
server = cloud.get_server(module.params['server'])
hostvars = dict(openstack=meta.get_hostvars_from_server(
cloud, server))
module.exit_json(changed=False, ansible_facts=hostvars)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
|
<commit_before><commit_msg>Add OpenStack Server facts module<commit_after>#!/usr/bin/python
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
from shade import meta
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
DOCUMENTATION = '''
---
module: os_server_facts
short_description: Retrieve facts about a compute instance
extends_documentation_fragment: openstack
version_added: "1.10"
description:
- Retrieve facts about a server instance from OpenStack.
notes:
- Facts are placed in the C(openstack) variable.
options:
server:
description:
- Name or ID of the instance
required: true
requirements: ["shade"]
'''
EXAMPLES = '''
# Gather facts about a previously created server named vm1
- os_server_facts:
cloud: rax-dfw
server: vm1
- debug: var=openstack
'''
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
cloud = shade.openstack_cloud(**module.params)
server = cloud.get_server(module.params['server'])
hostvars = dict(openstack=meta.get_hostvars_from_server(
cloud, server))
module.exit_json(changed=False, ansible_facts=hostvars)
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
main()
|
|
c11b0f905ad844ef95bd779d3277318e357fdef0
|
oscar/management/commands/oscar_fork_statics.py
|
oscar/management/commands/oscar_fork_statics.py
|
import logging
import os
import shutil
from django.db.models import get_model
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
ProductAlert = get_model('customer', 'ProductAlert')
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Copy Oscar's statics into local project so they can be used as a base for
styling a new site.
"""
args = '<destination folder>'
help = "Copy Oscar's static files"
def handle(self, *args, **options):
# Determine where to copy to
folder = args[0] if args else 'static'
if not folder.startswith('/'):
destination = os.path.join(os.getcwd(), folder)
else:
destination = folder
if os.path.exists(destination):
raise CommandError(
"The folder %s already exists - aborting!" % destination)
source = os.path.realpath(
os.path.join(os.path.dirname(__file__), '../../static'))
print "Copying Oscar's static files to %s" % (source, destination)
shutil.copytree(source, destination)
# Check if this new folder is in STATICFILES_DIRS
if destination not in settings.STATICFILES_DIRS:
print ("You need to add %s to STATICFILES_DIRS in order for your "
"local overrides to be picked up") % destination
|
Add management command for forking Oscar's statics
|
Add management command for forking Oscar's statics
This makes it easier for FEDs to get started styling a new Oscar
project. It copies all of Oscar's statics into a specified folder and
checks whether the new folder is in STATICFILES_DIRS.
Fixes #463
|
Python
|
bsd-3-clause
|
jlmadurga/django-oscar,spartonia/django-oscar,WadeYuChen/django-oscar,makielab/django-oscar,josesanch/django-oscar,taedori81/django-oscar,faratro/django-oscar,vovanbo/django-oscar,DrOctogon/unwash_ecom,anentropic/django-oscar,mexeniz/django-oscar,kapt/django-oscar,WillisXChen/django-oscar,jinnykoo/wuyisj.com,sasha0/django-oscar,ahmetdaglarbas/e-commerce,anentropic/django-oscar,amirrpp/django-oscar,vovanbo/django-oscar,DrOctogon/unwash_ecom,WillisXChen/django-oscar,rocopartners/django-oscar,kapt/django-oscar,django-oscar/django-oscar,bschuon/django-oscar,manevant/django-oscar,nfletton/django-oscar,elliotthill/django-oscar,WillisXChen/django-oscar,nickpack/django-oscar,ahmetdaglarbas/e-commerce,dongguangming/django-oscar,ademuk/django-oscar,jinnykoo/wuyisj,thechampanurag/django-oscar,dongguangming/django-oscar,rocopartners/django-oscar,sonofatailor/django-oscar,nfletton/django-oscar,makielab/django-oscar,saadatqadri/django-oscar,elliotthill/django-oscar,saadatqadri/django-oscar,Jannes123/django-oscar,sasha0/django-oscar,mexeniz/django-oscar,monikasulik/django-oscar,pasqualguerrero/django-oscar,kapari/django-oscar,monikasulik/django-oscar,saadatqadri/django-oscar,Bogh/django-oscar,amirrpp/django-oscar,vovanbo/django-oscar,sonofatailor/django-oscar,spartonia/django-oscar,Jannes123/django-oscar,jinnykoo/wuyisj,WadeYuChen/django-oscar,thechampanurag/django-oscar,kapari/django-oscar,ahmetdaglarbas/e-commerce,faratro/django-oscar,lijoantony/django-oscar,rocopartners/django-oscar,kapt/django-oscar,john-parton/django-oscar,Jannes123/django-oscar,adamend/django-oscar,bnprk/django-oscar,amirrpp/django-oscar,anentropic/django-oscar,marcoantoniooliveira/labweb,michaelkuty/django-oscar,taedori81/django-oscar,sasha0/django-oscar,jmt4/django-oscar,WillisXChen/django-oscar,ademuk/django-oscar,thechampanurag/django-oscar,pasqualguerrero/django-oscar,solarissmoke/django-oscar,faratro/django-oscar,okfish/django-oscar,eddiep1101/django-oscar,monikasulik/django-oscar,eddiep1101/django-oscar,saadatqadri/django-oscar,sonofatailor/django-oscar,WadeYuChen/django-oscar,josesanch/django-oscar,manevant/django-oscar,WillisXChen/django-oscar,thechampanurag/django-oscar,faratro/django-oscar,QLGu/django-oscar,makielab/django-oscar,pasqualguerrero/django-oscar,QLGu/django-oscar,bnprk/django-oscar,itbabu/django-oscar,eddiep1101/django-oscar,jmt4/django-oscar,ademuk/django-oscar,spartonia/django-oscar,eddiep1101/django-oscar,lijoantony/django-oscar,bnprk/django-oscar,ka7eh/django-oscar,machtfit/django-oscar,john-parton/django-oscar,bschuon/django-oscar,Jannes123/django-oscar,okfish/django-oscar,rocopartners/django-oscar,vovanbo/django-oscar,manevant/django-oscar,pdonadeo/django-oscar,marcoantoniooliveira/labweb,WadeYuChen/django-oscar,john-parton/django-oscar,jinnykoo/wuyisj,django-oscar/django-oscar,ademuk/django-oscar,jlmadurga/django-oscar,solarissmoke/django-oscar,sasha0/django-oscar,WillisXChen/django-oscar,MatthewWilkes/django-oscar,adamend/django-oscar,dongguangming/django-oscar,binarydud/django-oscar,MatthewWilkes/django-oscar,Bogh/django-oscar,nickpack/django-oscar,okfish/django-oscar,bschuon/django-oscar,amirrpp/django-oscar,john-parton/django-oscar,jinnykoo/christmas,binarydud/django-oscar,nickpack/django-oscar,jinnykoo/wuyisj.com,pdonadeo/django-oscar,nfletton/django-oscar,makielab/django-oscar,spartonia/django-oscar,Idematica/django-oscar,itbabu/django-oscar,DrOctogon/unwash_ecom,MatthewWilkes/django-oscar,pdonadeo/django-oscar,michaelkuty/django-oscar,adamend/django-oscar,Bogh/django-oscar,elliotthill/django-oscar,Idematica/django-oscar,michaelkuty/django-oscar,manevant/django-oscar,ka7eh/django-oscar,bnprk/django-oscar,pdonadeo/django-oscar,jinnykoo/wuyisj,taedori81/django-oscar,jinnykoo/wuyisj.com,machtfit/django-oscar,QLGu/django-oscar,ka7eh/django-oscar,MatthewWilkes/django-oscar,nickpack/django-oscar,solarissmoke/django-oscar,itbabu/django-oscar,itbabu/django-oscar,jmt4/django-oscar,Idematica/django-oscar,ka7eh/django-oscar,jlmadurga/django-oscar,mexeniz/django-oscar,jinnykoo/christmas,jmt4/django-oscar,marcoantoniooliveira/labweb,nfletton/django-oscar,monikasulik/django-oscar,anentropic/django-oscar,ahmetdaglarbas/e-commerce,kapari/django-oscar,pasqualguerrero/django-oscar,okfish/django-oscar,adamend/django-oscar,django-oscar/django-oscar,jinnykoo/christmas,josesanch/django-oscar,dongguangming/django-oscar,sonofatailor/django-oscar,machtfit/django-oscar,QLGu/django-oscar,solarissmoke/django-oscar,kapari/django-oscar,django-oscar/django-oscar,taedori81/django-oscar,jinnykoo/wuyisj.com,lijoantony/django-oscar,lijoantony/django-oscar,binarydud/django-oscar,marcoantoniooliveira/labweb,jlmadurga/django-oscar,bschuon/django-oscar,mexeniz/django-oscar,Bogh/django-oscar,michaelkuty/django-oscar,binarydud/django-oscar
|
Add management command for forking Oscar's statics
This makes it easier for FEDs to get started styling a new Oscar
project. It copies all of Oscar's statics into a specified folder and
checks whether the new folder is in STATICFILES_DIRS.
Fixes #463
|
import logging
import os
import shutil
from django.db.models import get_model
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
ProductAlert = get_model('customer', 'ProductAlert')
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Copy Oscar's statics into local project so they can be used as a base for
styling a new site.
"""
args = '<destination folder>'
help = "Copy Oscar's static files"
def handle(self, *args, **options):
# Determine where to copy to
folder = args[0] if args else 'static'
if not folder.startswith('/'):
destination = os.path.join(os.getcwd(), folder)
else:
destination = folder
if os.path.exists(destination):
raise CommandError(
"The folder %s already exists - aborting!" % destination)
source = os.path.realpath(
os.path.join(os.path.dirname(__file__), '../../static'))
print "Copying Oscar's static files to %s" % (source, destination)
shutil.copytree(source, destination)
# Check if this new folder is in STATICFILES_DIRS
if destination not in settings.STATICFILES_DIRS:
print ("You need to add %s to STATICFILES_DIRS in order for your "
"local overrides to be picked up") % destination
|
<commit_before><commit_msg>Add management command for forking Oscar's statics
This makes it easier for FEDs to get started styling a new Oscar
project. It copies all of Oscar's statics into a specified folder and
checks whether the new folder is in STATICFILES_DIRS.
Fixes #463<commit_after>
|
import logging
import os
import shutil
from django.db.models import get_model
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
ProductAlert = get_model('customer', 'ProductAlert')
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Copy Oscar's statics into local project so they can be used as a base for
styling a new site.
"""
args = '<destination folder>'
help = "Copy Oscar's static files"
def handle(self, *args, **options):
# Determine where to copy to
folder = args[0] if args else 'static'
if not folder.startswith('/'):
destination = os.path.join(os.getcwd(), folder)
else:
destination = folder
if os.path.exists(destination):
raise CommandError(
"The folder %s already exists - aborting!" % destination)
source = os.path.realpath(
os.path.join(os.path.dirname(__file__), '../../static'))
print "Copying Oscar's static files to %s" % (source, destination)
shutil.copytree(source, destination)
# Check if this new folder is in STATICFILES_DIRS
if destination not in settings.STATICFILES_DIRS:
print ("You need to add %s to STATICFILES_DIRS in order for your "
"local overrides to be picked up") % destination
|
Add management command for forking Oscar's statics
This makes it easier for FEDs to get started styling a new Oscar
project. It copies all of Oscar's statics into a specified folder and
checks whether the new folder is in STATICFILES_DIRS.
Fixes #463import logging
import os
import shutil
from django.db.models import get_model
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
ProductAlert = get_model('customer', 'ProductAlert')
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Copy Oscar's statics into local project so they can be used as a base for
styling a new site.
"""
args = '<destination folder>'
help = "Copy Oscar's static files"
def handle(self, *args, **options):
# Determine where to copy to
folder = args[0] if args else 'static'
if not folder.startswith('/'):
destination = os.path.join(os.getcwd(), folder)
else:
destination = folder
if os.path.exists(destination):
raise CommandError(
"The folder %s already exists - aborting!" % destination)
source = os.path.realpath(
os.path.join(os.path.dirname(__file__), '../../static'))
print "Copying Oscar's static files to %s" % (source, destination)
shutil.copytree(source, destination)
# Check if this new folder is in STATICFILES_DIRS
if destination not in settings.STATICFILES_DIRS:
print ("You need to add %s to STATICFILES_DIRS in order for your "
"local overrides to be picked up") % destination
|
<commit_before><commit_msg>Add management command for forking Oscar's statics
This makes it easier for FEDs to get started styling a new Oscar
project. It copies all of Oscar's statics into a specified folder and
checks whether the new folder is in STATICFILES_DIRS.
Fixes #463<commit_after>import logging
import os
import shutil
from django.db.models import get_model
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
ProductAlert = get_model('customer', 'ProductAlert')
logger = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Copy Oscar's statics into local project so they can be used as a base for
styling a new site.
"""
args = '<destination folder>'
help = "Copy Oscar's static files"
def handle(self, *args, **options):
# Determine where to copy to
folder = args[0] if args else 'static'
if not folder.startswith('/'):
destination = os.path.join(os.getcwd(), folder)
else:
destination = folder
if os.path.exists(destination):
raise CommandError(
"The folder %s already exists - aborting!" % destination)
source = os.path.realpath(
os.path.join(os.path.dirname(__file__), '../../static'))
print "Copying Oscar's static files to %s" % (source, destination)
shutil.copytree(source, destination)
# Check if this new folder is in STATICFILES_DIRS
if destination not in settings.STATICFILES_DIRS:
print ("You need to add %s to STATICFILES_DIRS in order for your "
"local overrides to be picked up") % destination
|
|
a4eb88bcd351c65aee85a2586b1c2eeb52dcd0e4
|
tests/terminal_tests/DifferentGrammarsTest.py
|
tests/terminal_tests/DifferentGrammarsTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase
from grammpy import Terminal
class TempClass:
pass
class TerminalCreationTest(TestCase):
def test_noneClass(self):
ter1 = Terminal(0, None)
ter2 = Terminal(0, TempClass)
self.assertNotEqual(ter1, ter2)
def test_noneInstance(self):
ter1 = Terminal(0, None)
ter2 = Terminal(0, TempClass())
self.assertNotEqual(ter1, ter2)
def test_noneString(self):
ter1 = Terminal(0, None)
ter2 = Terminal(0, 'str')
self.assertNotEqual(ter1, ter2)
def test_noneNumber(self):
ter1 = Terminal(0, None)
ter2 = Terminal(0, 5)
self.assertNotEqual(ter1, ter2)
def test_classInstance(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, TempClass())
self.assertNotEqual(ter1, ter2)
def test_classString(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, 'str')
self.assertNotEqual(ter1, ter2)
def test_classNumber(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, 5)
self.assertNotEqual(ter1, ter2)
def test_instanceString(self):
ter1 = Terminal(0, TempClass())
ter2 = Terminal(0, 'str')
self.assertNotEqual(ter1, ter2)
def test_instanceNumber(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, 5)
self.assertNotEqual(ter1, ter2)
def test_stringNumber(self):
ter1 = Terminal(0, 'str')
ter2 = Terminal(0, 5)
self.assertNotEqual(ter1, ter2)
def test_defferentInstances(self):
ter1 = Terminal(0, TempClass())
ter2 = Terminal(0, TempClass())
self.assertNotEqual(ter1, ter2)
|
Add tests for Terminal when grammar is different so terminals should not be equal
|
Add tests for Terminal when grammar is different so terminals should not be equal
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add tests for Terminal when grammar is different so terminals should not be equal
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase
from grammpy import Terminal
class TempClass:
pass
class TerminalCreationTest(TestCase):
def test_noneClass(self):
ter1 = Terminal(0, None)
ter2 = Terminal(0, TempClass)
self.assertNotEqual(ter1, ter2)
def test_noneInstance(self):
ter1 = Terminal(0, None)
ter2 = Terminal(0, TempClass())
self.assertNotEqual(ter1, ter2)
def test_noneString(self):
ter1 = Terminal(0, None)
ter2 = Terminal(0, 'str')
self.assertNotEqual(ter1, ter2)
def test_noneNumber(self):
ter1 = Terminal(0, None)
ter2 = Terminal(0, 5)
self.assertNotEqual(ter1, ter2)
def test_classInstance(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, TempClass())
self.assertNotEqual(ter1, ter2)
def test_classString(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, 'str')
self.assertNotEqual(ter1, ter2)
def test_classNumber(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, 5)
self.assertNotEqual(ter1, ter2)
def test_instanceString(self):
ter1 = Terminal(0, TempClass())
ter2 = Terminal(0, 'str')
self.assertNotEqual(ter1, ter2)
def test_instanceNumber(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, 5)
self.assertNotEqual(ter1, ter2)
def test_stringNumber(self):
ter1 = Terminal(0, 'str')
ter2 = Terminal(0, 5)
self.assertNotEqual(ter1, ter2)
def test_defferentInstances(self):
ter1 = Terminal(0, TempClass())
ter2 = Terminal(0, TempClass())
self.assertNotEqual(ter1, ter2)
|
<commit_before><commit_msg>Add tests for Terminal when grammar is different so terminals should not be equal<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase
from grammpy import Terminal
class TempClass:
pass
class TerminalCreationTest(TestCase):
def test_noneClass(self):
ter1 = Terminal(0, None)
ter2 = Terminal(0, TempClass)
self.assertNotEqual(ter1, ter2)
def test_noneInstance(self):
ter1 = Terminal(0, None)
ter2 = Terminal(0, TempClass())
self.assertNotEqual(ter1, ter2)
def test_noneString(self):
ter1 = Terminal(0, None)
ter2 = Terminal(0, 'str')
self.assertNotEqual(ter1, ter2)
def test_noneNumber(self):
ter1 = Terminal(0, None)
ter2 = Terminal(0, 5)
self.assertNotEqual(ter1, ter2)
def test_classInstance(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, TempClass())
self.assertNotEqual(ter1, ter2)
def test_classString(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, 'str')
self.assertNotEqual(ter1, ter2)
def test_classNumber(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, 5)
self.assertNotEqual(ter1, ter2)
def test_instanceString(self):
ter1 = Terminal(0, TempClass())
ter2 = Terminal(0, 'str')
self.assertNotEqual(ter1, ter2)
def test_instanceNumber(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, 5)
self.assertNotEqual(ter1, ter2)
def test_stringNumber(self):
ter1 = Terminal(0, 'str')
ter2 = Terminal(0, 5)
self.assertNotEqual(ter1, ter2)
def test_defferentInstances(self):
ter1 = Terminal(0, TempClass())
ter2 = Terminal(0, TempClass())
self.assertNotEqual(ter1, ter2)
|
Add tests for Terminal when grammar is different so terminals should not be equal#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase
from grammpy import Terminal
class TempClass:
pass
class TerminalCreationTest(TestCase):
def test_noneClass(self):
ter1 = Terminal(0, None)
ter2 = Terminal(0, TempClass)
self.assertNotEqual(ter1, ter2)
def test_noneInstance(self):
ter1 = Terminal(0, None)
ter2 = Terminal(0, TempClass())
self.assertNotEqual(ter1, ter2)
def test_noneString(self):
ter1 = Terminal(0, None)
ter2 = Terminal(0, 'str')
self.assertNotEqual(ter1, ter2)
def test_noneNumber(self):
ter1 = Terminal(0, None)
ter2 = Terminal(0, 5)
self.assertNotEqual(ter1, ter2)
def test_classInstance(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, TempClass())
self.assertNotEqual(ter1, ter2)
def test_classString(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, 'str')
self.assertNotEqual(ter1, ter2)
def test_classNumber(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, 5)
self.assertNotEqual(ter1, ter2)
def test_instanceString(self):
ter1 = Terminal(0, TempClass())
ter2 = Terminal(0, 'str')
self.assertNotEqual(ter1, ter2)
def test_instanceNumber(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, 5)
self.assertNotEqual(ter1, ter2)
def test_stringNumber(self):
ter1 = Terminal(0, 'str')
ter2 = Terminal(0, 5)
self.assertNotEqual(ter1, ter2)
def test_defferentInstances(self):
ter1 = Terminal(0, TempClass())
ter2 = Terminal(0, TempClass())
self.assertNotEqual(ter1, ter2)
|
<commit_before><commit_msg>Add tests for Terminal when grammar is different so terminals should not be equal<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase
from grammpy import Terminal
class TempClass:
pass
class TerminalCreationTest(TestCase):
def test_noneClass(self):
ter1 = Terminal(0, None)
ter2 = Terminal(0, TempClass)
self.assertNotEqual(ter1, ter2)
def test_noneInstance(self):
ter1 = Terminal(0, None)
ter2 = Terminal(0, TempClass())
self.assertNotEqual(ter1, ter2)
def test_noneString(self):
ter1 = Terminal(0, None)
ter2 = Terminal(0, 'str')
self.assertNotEqual(ter1, ter2)
def test_noneNumber(self):
ter1 = Terminal(0, None)
ter2 = Terminal(0, 5)
self.assertNotEqual(ter1, ter2)
def test_classInstance(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, TempClass())
self.assertNotEqual(ter1, ter2)
def test_classString(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, 'str')
self.assertNotEqual(ter1, ter2)
def test_classNumber(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, 5)
self.assertNotEqual(ter1, ter2)
def test_instanceString(self):
ter1 = Terminal(0, TempClass())
ter2 = Terminal(0, 'str')
self.assertNotEqual(ter1, ter2)
def test_instanceNumber(self):
ter1 = Terminal(0, TempClass)
ter2 = Terminal(0, 5)
self.assertNotEqual(ter1, ter2)
def test_stringNumber(self):
ter1 = Terminal(0, 'str')
ter2 = Terminal(0, 5)
self.assertNotEqual(ter1, ter2)
def test_defferentInstances(self):
ter1 = Terminal(0, TempClass())
ter2 = Terminal(0, TempClass())
self.assertNotEqual(ter1, ter2)
|
|
101cdcaf3fff6c10e5c519f1ed409feefeff313a
|
tests/views/test_admin_committee_questions.py
|
tests/views/test_admin_committee_questions.py
|
import os
from builtins import str
from tests import PMGLiveServerTestCase
from pmg.models import db, Committee
from tests.fixtures import dbfixture, UserData, CommitteeData, MembershipData
from werkzeug.datastructures import FileStorage
from io import BytesIO
class TestAdminCommitteeQuestions(PMGLiveServerTestCase):
def setUp(self):
super().setUp()
self.fx = dbfixture.data(UserData)
self.fx.setup()
self.user = self.fx.UserData.admin
def test_upload_committee_question_document(self):
"""
Upload committee question document (/admin/committee-question/upload)
"""
url = "/admin/committee-question/upload"
data = {}
path = self.get_absolute_file_path("../data/RNW190-200303.docx")
with open(path, "rb") as f:
data["file"] = (f, "RNW190-200303.docx")
response = self.make_request(
url,
self.user,
data=data,
method="POST",
headers={"Referer": "/somethingelse"},
content_type="multipart/form-data",
follow_redirects=True,
)
self.assertEqual(200, response.status_code)
# TODO: check fields were parsed correctly
# TODO: delete created objects
def get_absolute_file_path(self, relative_path):
dir_name = os.path.dirname(__file__)
return os.path.join(dir_name, relative_path)
|
Add first test for committee question uploads
|
Add first test for committee question uploads
|
Python
|
apache-2.0
|
Code4SA/pmg-cms-2,Code4SA/pmg-cms-2,Code4SA/pmg-cms-2
|
Add first test for committee question uploads
|
import os
from builtins import str
from tests import PMGLiveServerTestCase
from pmg.models import db, Committee
from tests.fixtures import dbfixture, UserData, CommitteeData, MembershipData
from werkzeug.datastructures import FileStorage
from io import BytesIO
class TestAdminCommitteeQuestions(PMGLiveServerTestCase):
def setUp(self):
super().setUp()
self.fx = dbfixture.data(UserData)
self.fx.setup()
self.user = self.fx.UserData.admin
def test_upload_committee_question_document(self):
"""
Upload committee question document (/admin/committee-question/upload)
"""
url = "/admin/committee-question/upload"
data = {}
path = self.get_absolute_file_path("../data/RNW190-200303.docx")
with open(path, "rb") as f:
data["file"] = (f, "RNW190-200303.docx")
response = self.make_request(
url,
self.user,
data=data,
method="POST",
headers={"Referer": "/somethingelse"},
content_type="multipart/form-data",
follow_redirects=True,
)
self.assertEqual(200, response.status_code)
# TODO: check fields were parsed correctly
# TODO: delete created objects
def get_absolute_file_path(self, relative_path):
dir_name = os.path.dirname(__file__)
return os.path.join(dir_name, relative_path)
|
<commit_before><commit_msg>Add first test for committee question uploads<commit_after>
|
import os
from builtins import str
from tests import PMGLiveServerTestCase
from pmg.models import db, Committee
from tests.fixtures import dbfixture, UserData, CommitteeData, MembershipData
from werkzeug.datastructures import FileStorage
from io import BytesIO
class TestAdminCommitteeQuestions(PMGLiveServerTestCase):
def setUp(self):
super().setUp()
self.fx = dbfixture.data(UserData)
self.fx.setup()
self.user = self.fx.UserData.admin
def test_upload_committee_question_document(self):
"""
Upload committee question document (/admin/committee-question/upload)
"""
url = "/admin/committee-question/upload"
data = {}
path = self.get_absolute_file_path("../data/RNW190-200303.docx")
with open(path, "rb") as f:
data["file"] = (f, "RNW190-200303.docx")
response = self.make_request(
url,
self.user,
data=data,
method="POST",
headers={"Referer": "/somethingelse"},
content_type="multipart/form-data",
follow_redirects=True,
)
self.assertEqual(200, response.status_code)
# TODO: check fields were parsed correctly
# TODO: delete created objects
def get_absolute_file_path(self, relative_path):
dir_name = os.path.dirname(__file__)
return os.path.join(dir_name, relative_path)
|
Add first test for committee question uploadsimport os
from builtins import str
from tests import PMGLiveServerTestCase
from pmg.models import db, Committee
from tests.fixtures import dbfixture, UserData, CommitteeData, MembershipData
from werkzeug.datastructures import FileStorage
from io import BytesIO
class TestAdminCommitteeQuestions(PMGLiveServerTestCase):
def setUp(self):
super().setUp()
self.fx = dbfixture.data(UserData)
self.fx.setup()
self.user = self.fx.UserData.admin
def test_upload_committee_question_document(self):
"""
Upload committee question document (/admin/committee-question/upload)
"""
url = "/admin/committee-question/upload"
data = {}
path = self.get_absolute_file_path("../data/RNW190-200303.docx")
with open(path, "rb") as f:
data["file"] = (f, "RNW190-200303.docx")
response = self.make_request(
url,
self.user,
data=data,
method="POST",
headers={"Referer": "/somethingelse"},
content_type="multipart/form-data",
follow_redirects=True,
)
self.assertEqual(200, response.status_code)
# TODO: check fields were parsed correctly
# TODO: delete created objects
def get_absolute_file_path(self, relative_path):
dir_name = os.path.dirname(__file__)
return os.path.join(dir_name, relative_path)
|
<commit_before><commit_msg>Add first test for committee question uploads<commit_after>import os
from builtins import str
from tests import PMGLiveServerTestCase
from pmg.models import db, Committee
from tests.fixtures import dbfixture, UserData, CommitteeData, MembershipData
from werkzeug.datastructures import FileStorage
from io import BytesIO
class TestAdminCommitteeQuestions(PMGLiveServerTestCase):
def setUp(self):
super().setUp()
self.fx = dbfixture.data(UserData)
self.fx.setup()
self.user = self.fx.UserData.admin
def test_upload_committee_question_document(self):
"""
Upload committee question document (/admin/committee-question/upload)
"""
url = "/admin/committee-question/upload"
data = {}
path = self.get_absolute_file_path("../data/RNW190-200303.docx")
with open(path, "rb") as f:
data["file"] = (f, "RNW190-200303.docx")
response = self.make_request(
url,
self.user,
data=data,
method="POST",
headers={"Referer": "/somethingelse"},
content_type="multipart/form-data",
follow_redirects=True,
)
self.assertEqual(200, response.status_code)
# TODO: check fields were parsed correctly
# TODO: delete created objects
def get_absolute_file_path(self, relative_path):
dir_name = os.path.dirname(__file__)
return os.path.join(dir_name, relative_path)
|
|
d4bfe2f59d2552d211caddf90ceefcf27a06f08a
|
spdx/writers/yaml.py
|
spdx/writers/yaml.py
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import ruamel.yaml as yaml
from spdx.writers.tagvalue import InvalidDocumentError
from spdx.writers.jsonyaml import Writer
import rdflib
def write_document(document, out, validate=True):
if validate:
messages = []
messages = document.validate(messages)
if messages:
raise InvalidDocumentError(messages)
writer = Writer(document)
document_object = writer.create_document()
yaml.round_trip_dump(document_object, out, indent=2, explicit_start=True)
|
Add YAML documents creating capability.
|
Add YAML documents creating capability.
Signed-off-by: Xavier Figueroa <2556c6f4facc1e5829d3394b9ce7316a84702c26@gmail.com>
|
Python
|
apache-2.0
|
spdx/tools-python
|
Add YAML documents creating capability.
Signed-off-by: Xavier Figueroa <2556c6f4facc1e5829d3394b9ce7316a84702c26@gmail.com>
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import ruamel.yaml as yaml
from spdx.writers.tagvalue import InvalidDocumentError
from spdx.writers.jsonyaml import Writer
import rdflib
def write_document(document, out, validate=True):
if validate:
messages = []
messages = document.validate(messages)
if messages:
raise InvalidDocumentError(messages)
writer = Writer(document)
document_object = writer.create_document()
yaml.round_trip_dump(document_object, out, indent=2, explicit_start=True)
|
<commit_before><commit_msg>Add YAML documents creating capability.
Signed-off-by: Xavier Figueroa <2556c6f4facc1e5829d3394b9ce7316a84702c26@gmail.com><commit_after>
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import ruamel.yaml as yaml
from spdx.writers.tagvalue import InvalidDocumentError
from spdx.writers.jsonyaml import Writer
import rdflib
def write_document(document, out, validate=True):
if validate:
messages = []
messages = document.validate(messages)
if messages:
raise InvalidDocumentError(messages)
writer = Writer(document)
document_object = writer.create_document()
yaml.round_trip_dump(document_object, out, indent=2, explicit_start=True)
|
Add YAML documents creating capability.
Signed-off-by: Xavier Figueroa <2556c6f4facc1e5829d3394b9ce7316a84702c26@gmail.com>from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import ruamel.yaml as yaml
from spdx.writers.tagvalue import InvalidDocumentError
from spdx.writers.jsonyaml import Writer
import rdflib
def write_document(document, out, validate=True):
if validate:
messages = []
messages = document.validate(messages)
if messages:
raise InvalidDocumentError(messages)
writer = Writer(document)
document_object = writer.create_document()
yaml.round_trip_dump(document_object, out, indent=2, explicit_start=True)
|
<commit_before><commit_msg>Add YAML documents creating capability.
Signed-off-by: Xavier Figueroa <2556c6f4facc1e5829d3394b9ce7316a84702c26@gmail.com><commit_after>from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import ruamel.yaml as yaml
from spdx.writers.tagvalue import InvalidDocumentError
from spdx.writers.jsonyaml import Writer
import rdflib
def write_document(document, out, validate=True):
if validate:
messages = []
messages = document.validate(messages)
if messages:
raise InvalidDocumentError(messages)
writer = Writer(document)
document_object = writer.create_document()
yaml.round_trip_dump(document_object, out, indent=2, explicit_start=True)
|
|
079be3496b22d1ac08408dcee13bca02bb20babd
|
articles/migrations/0010_auto_20180817_2321.py
|
articles/migrations/0010_auto_20180817_2321.py
|
# Generated by Django 2.0.8 on 2018-08-17 23:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('articles', '0009_auto_20180727_1437'),
]
operations = [
migrations.AlterField(
model_name='articlepage',
name='kicker',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='articles.Kicker'),
),
]
|
Add migration for blank kicker
|
Add migration for blank kicker
|
Python
|
mit
|
thepoly/Pipeline,thepoly/Pipeline,thepoly/Pipeline,thepoly/Pipeline,thepoly/Pipeline
|
Add migration for blank kicker
|
# Generated by Django 2.0.8 on 2018-08-17 23:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('articles', '0009_auto_20180727_1437'),
]
operations = [
migrations.AlterField(
model_name='articlepage',
name='kicker',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='articles.Kicker'),
),
]
|
<commit_before><commit_msg>Add migration for blank kicker<commit_after>
|
# Generated by Django 2.0.8 on 2018-08-17 23:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('articles', '0009_auto_20180727_1437'),
]
operations = [
migrations.AlterField(
model_name='articlepage',
name='kicker',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='articles.Kicker'),
),
]
|
Add migration for blank kicker# Generated by Django 2.0.8 on 2018-08-17 23:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('articles', '0009_auto_20180727_1437'),
]
operations = [
migrations.AlterField(
model_name='articlepage',
name='kicker',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='articles.Kicker'),
),
]
|
<commit_before><commit_msg>Add migration for blank kicker<commit_after># Generated by Django 2.0.8 on 2018-08-17 23:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('articles', '0009_auto_20180727_1437'),
]
operations = [
migrations.AlterField(
model_name='articlepage',
name='kicker',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, to='articles.Kicker'),
),
]
|
|
237ce6e8bc8534411825c9477f7cc8d6f3bfa570
|
tests/functional/test_contact_page.py
|
tests/functional/test_contact_page.py
|
from .base import FunctionalTest
class ContactPageTest(FunctionalTest):
def test_contact_page_navigation(self):
self.browser.get(self.live_server_url)
self.browser.set_window_size(1024, 768)
contact_link = self.browser.find_element_by_link_text('CONTACT US')
contact_link.click()
# Assert that the Contact Us link in the navbar works
self.assertIn("Contact Us", self.browser.title)
self.assertEqual(self.browser.find_element_by_tag_name('h1').text, 'Contact Us')
|
Add Contact Page functional tests
|
Add Contact Page functional tests
|
Python
|
bsd-3-clause
|
andela-kndungu/compshop,kevgathuku/compshop,kevgathuku/compshop,andela-kndungu/compshop,andela-kndungu/compshop,kevgathuku/compshop,kevgathuku/compshop,andela-kndungu/compshop
|
Add Contact Page functional tests
|
from .base import FunctionalTest
class ContactPageTest(FunctionalTest):
def test_contact_page_navigation(self):
self.browser.get(self.live_server_url)
self.browser.set_window_size(1024, 768)
contact_link = self.browser.find_element_by_link_text('CONTACT US')
contact_link.click()
# Assert that the Contact Us link in the navbar works
self.assertIn("Contact Us", self.browser.title)
self.assertEqual(self.browser.find_element_by_tag_name('h1').text, 'Contact Us')
|
<commit_before><commit_msg>Add Contact Page functional tests<commit_after>
|
from .base import FunctionalTest
class ContactPageTest(FunctionalTest):
def test_contact_page_navigation(self):
self.browser.get(self.live_server_url)
self.browser.set_window_size(1024, 768)
contact_link = self.browser.find_element_by_link_text('CONTACT US')
contact_link.click()
# Assert that the Contact Us link in the navbar works
self.assertIn("Contact Us", self.browser.title)
self.assertEqual(self.browser.find_element_by_tag_name('h1').text, 'Contact Us')
|
Add Contact Page functional testsfrom .base import FunctionalTest
class ContactPageTest(FunctionalTest):
def test_contact_page_navigation(self):
self.browser.get(self.live_server_url)
self.browser.set_window_size(1024, 768)
contact_link = self.browser.find_element_by_link_text('CONTACT US')
contact_link.click()
# Assert that the Contact Us link in the navbar works
self.assertIn("Contact Us", self.browser.title)
self.assertEqual(self.browser.find_element_by_tag_name('h1').text, 'Contact Us')
|
<commit_before><commit_msg>Add Contact Page functional tests<commit_after>from .base import FunctionalTest
class ContactPageTest(FunctionalTest):
def test_contact_page_navigation(self):
self.browser.get(self.live_server_url)
self.browser.set_window_size(1024, 768)
contact_link = self.browser.find_element_by_link_text('CONTACT US')
contact_link.click()
# Assert that the Contact Us link in the navbar works
self.assertIn("Contact Us", self.browser.title)
self.assertEqual(self.browser.find_element_by_tag_name('h1').text, 'Contact Us')
|
|
42fc7eda507037de7df4bc148b3d798b2871bba5
|
scripts/make_user_admin.py
|
scripts/make_user_admin.py
|
#!/usr/bin/env python
"""
Extract email adresses from adhocracy. Emails from deleted users won't be
exported.
"""
from datetime import datetime
from sqlalchemy import and_, or_
from adhocracy.model import Group, Membership, meta, User
# boilerplate code. copy that
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
from common import create_parser, get_instances, load_from_args
# /end boilerplate code
def main():
parser = create_parser(description=__doc__, use_instance=False)
parser.add_argument(
"username",
help=("The name of the user who should become a global admin"))
args = parser.parse_args()
load_from_args(args)
user = User.find(args.username)
if user is None:
print 'Cannot find user %s\n' % args.username
parser.exit()
global_membership = [membership for membership in user.memberships if
membership.instance is None][0]
admin_group = Group.by_code(Group.CODE_ADMIN)
global_membership.group = admin_group
meta.Session.commit()
if __name__ == '__main__':
sys.exit(main())
|
Add script to make a user the global admin
|
Add script to make a user the global admin
|
Python
|
agpl-3.0
|
liqd/adhocracy,liqd/adhocracy,SysTheron/adhocracy,phihag/adhocracy,phihag/adhocracy,alkadis/vcv,alkadis/vcv,SysTheron/adhocracy,SysTheron/adhocracy,alkadis/vcv,DanielNeugebauer/adhocracy,liqd/adhocracy,alkadis/vcv,phihag/adhocracy,phihag/adhocracy,phihag/adhocracy,liqd/adhocracy,DanielNeugebauer/adhocracy,DanielNeugebauer/adhocracy,alkadis/vcv,DanielNeugebauer/adhocracy,DanielNeugebauer/adhocracy
|
Add script to make a user the global admin
|
#!/usr/bin/env python
"""
Extract email adresses from adhocracy. Emails from deleted users won't be
exported.
"""
from datetime import datetime
from sqlalchemy import and_, or_
from adhocracy.model import Group, Membership, meta, User
# boilerplate code. copy that
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
from common import create_parser, get_instances, load_from_args
# /end boilerplate code
def main():
parser = create_parser(description=__doc__, use_instance=False)
parser.add_argument(
"username",
help=("The name of the user who should become a global admin"))
args = parser.parse_args()
load_from_args(args)
user = User.find(args.username)
if user is None:
print 'Cannot find user %s\n' % args.username
parser.exit()
global_membership = [membership for membership in user.memberships if
membership.instance is None][0]
admin_group = Group.by_code(Group.CODE_ADMIN)
global_membership.group = admin_group
meta.Session.commit()
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add script to make a user the global admin<commit_after>
|
#!/usr/bin/env python
"""
Extract email adresses from adhocracy. Emails from deleted users won't be
exported.
"""
from datetime import datetime
from sqlalchemy import and_, or_
from adhocracy.model import Group, Membership, meta, User
# boilerplate code. copy that
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
from common import create_parser, get_instances, load_from_args
# /end boilerplate code
def main():
parser = create_parser(description=__doc__, use_instance=False)
parser.add_argument(
"username",
help=("The name of the user who should become a global admin"))
args = parser.parse_args()
load_from_args(args)
user = User.find(args.username)
if user is None:
print 'Cannot find user %s\n' % args.username
parser.exit()
global_membership = [membership for membership in user.memberships if
membership.instance is None][0]
admin_group = Group.by_code(Group.CODE_ADMIN)
global_membership.group = admin_group
meta.Session.commit()
if __name__ == '__main__':
sys.exit(main())
|
Add script to make a user the global admin#!/usr/bin/env python
"""
Extract email adresses from adhocracy. Emails from deleted users won't be
exported.
"""
from datetime import datetime
from sqlalchemy import and_, or_
from adhocracy.model import Group, Membership, meta, User
# boilerplate code. copy that
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
from common import create_parser, get_instances, load_from_args
# /end boilerplate code
def main():
parser = create_parser(description=__doc__, use_instance=False)
parser.add_argument(
"username",
help=("The name of the user who should become a global admin"))
args = parser.parse_args()
load_from_args(args)
user = User.find(args.username)
if user is None:
print 'Cannot find user %s\n' % args.username
parser.exit()
global_membership = [membership for membership in user.memberships if
membership.instance is None][0]
admin_group = Group.by_code(Group.CODE_ADMIN)
global_membership.group = admin_group
meta.Session.commit()
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add script to make a user the global admin<commit_after>#!/usr/bin/env python
"""
Extract email adresses from adhocracy. Emails from deleted users won't be
exported.
"""
from datetime import datetime
from sqlalchemy import and_, or_
from adhocracy.model import Group, Membership, meta, User
# boilerplate code. copy that
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
from common import create_parser, get_instances, load_from_args
# /end boilerplate code
def main():
parser = create_parser(description=__doc__, use_instance=False)
parser.add_argument(
"username",
help=("The name of the user who should become a global admin"))
args = parser.parse_args()
load_from_args(args)
user = User.find(args.username)
if user is None:
print 'Cannot find user %s\n' % args.username
parser.exit()
global_membership = [membership for membership in user.memberships if
membership.instance is None][0]
admin_group = Group.by_code(Group.CODE_ADMIN)
global_membership.group = admin_group
meta.Session.commit()
if __name__ == '__main__':
sys.exit(main())
|
|
1692e53b86f830cdde631437de3ad746d5225fc6
|
clover/__init__.py
|
clover/__init__.py
|
# This is here simply to aid migration to trefoil. It will be removed in a future version!
import sys
import warnings
import trefoil
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
"the package name 'clover' has been deprecated; use 'trefoil' instead",
DeprecationWarning)
sys.modules['clover'] = trefoil
|
Add shim for clover package name and deprecation warning
|
Add shim for clover package name and deprecation warning
|
Python
|
bsd-3-clause
|
consbio/clover,consbio/clover
|
Add shim for clover package name and deprecation warning
|
# This is here simply to aid migration to trefoil. It will be removed in a future version!
import sys
import warnings
import trefoil
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
"the package name 'clover' has been deprecated; use 'trefoil' instead",
DeprecationWarning)
sys.modules['clover'] = trefoil
|
<commit_before><commit_msg>Add shim for clover package name and deprecation warning<commit_after>
|
# This is here simply to aid migration to trefoil. It will be removed in a future version!
import sys
import warnings
import trefoil
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
"the package name 'clover' has been deprecated; use 'trefoil' instead",
DeprecationWarning)
sys.modules['clover'] = trefoil
|
Add shim for clover package name and deprecation warning# This is here simply to aid migration to trefoil. It will be removed in a future version!
import sys
import warnings
import trefoil
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
"the package name 'clover' has been deprecated; use 'trefoil' instead",
DeprecationWarning)
sys.modules['clover'] = trefoil
|
<commit_before><commit_msg>Add shim for clover package name and deprecation warning<commit_after># This is here simply to aid migration to trefoil. It will be removed in a future version!
import sys
import warnings
import trefoil
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
"the package name 'clover' has been deprecated; use 'trefoil' instead",
DeprecationWarning)
sys.modules['clover'] = trefoil
|
|
ce31b68b98802291c2d6e6871ba20f4711617f45
|
test/on_yubikey/test_interfaces.py
|
test/on_yubikey/test_interfaces.py
|
from .util import DestructiveYubikeyTestCase
from ykman import driver_fido, driver_otp, driver_ccid
class TestInterfaces(DestructiveYubikeyTestCase):
def test_switch_interfaces(self):
next(driver_fido.open_devices()).read_config()
next(driver_otp.open_devices()).read_config()
next(driver_fido.open_devices()).read_config()
next(driver_ccid.open_devices()).read_config()
next(driver_otp.open_devices()).read_config()
next(driver_ccid.open_devices()).read_config()
next(driver_otp.open_devices()).read_config()
next(driver_fido.open_devices()).read_config()
next(driver_ccid.open_devices()).read_config()
next(driver_fido.open_devices()).read_config()
next(driver_ccid.open_devices()).read_config()
next(driver_otp.open_devices()).read_config()
|
Add integration test for switching interfaces
|
Add integration test for switching interfaces
|
Python
|
bsd-2-clause
|
Yubico/yubikey-manager,Yubico/yubikey-manager
|
Add integration test for switching interfaces
|
from .util import DestructiveYubikeyTestCase
from ykman import driver_fido, driver_otp, driver_ccid
class TestInterfaces(DestructiveYubikeyTestCase):
def test_switch_interfaces(self):
next(driver_fido.open_devices()).read_config()
next(driver_otp.open_devices()).read_config()
next(driver_fido.open_devices()).read_config()
next(driver_ccid.open_devices()).read_config()
next(driver_otp.open_devices()).read_config()
next(driver_ccid.open_devices()).read_config()
next(driver_otp.open_devices()).read_config()
next(driver_fido.open_devices()).read_config()
next(driver_ccid.open_devices()).read_config()
next(driver_fido.open_devices()).read_config()
next(driver_ccid.open_devices()).read_config()
next(driver_otp.open_devices()).read_config()
|
<commit_before><commit_msg>Add integration test for switching interfaces<commit_after>
|
from .util import DestructiveYubikeyTestCase
from ykman import driver_fido, driver_otp, driver_ccid
class TestInterfaces(DestructiveYubikeyTestCase):
def test_switch_interfaces(self):
next(driver_fido.open_devices()).read_config()
next(driver_otp.open_devices()).read_config()
next(driver_fido.open_devices()).read_config()
next(driver_ccid.open_devices()).read_config()
next(driver_otp.open_devices()).read_config()
next(driver_ccid.open_devices()).read_config()
next(driver_otp.open_devices()).read_config()
next(driver_fido.open_devices()).read_config()
next(driver_ccid.open_devices()).read_config()
next(driver_fido.open_devices()).read_config()
next(driver_ccid.open_devices()).read_config()
next(driver_otp.open_devices()).read_config()
|
Add integration test for switching interfacesfrom .util import DestructiveYubikeyTestCase
from ykman import driver_fido, driver_otp, driver_ccid
class TestInterfaces(DestructiveYubikeyTestCase):
def test_switch_interfaces(self):
next(driver_fido.open_devices()).read_config()
next(driver_otp.open_devices()).read_config()
next(driver_fido.open_devices()).read_config()
next(driver_ccid.open_devices()).read_config()
next(driver_otp.open_devices()).read_config()
next(driver_ccid.open_devices()).read_config()
next(driver_otp.open_devices()).read_config()
next(driver_fido.open_devices()).read_config()
next(driver_ccid.open_devices()).read_config()
next(driver_fido.open_devices()).read_config()
next(driver_ccid.open_devices()).read_config()
next(driver_otp.open_devices()).read_config()
|
<commit_before><commit_msg>Add integration test for switching interfaces<commit_after>from .util import DestructiveYubikeyTestCase
from ykman import driver_fido, driver_otp, driver_ccid
class TestInterfaces(DestructiveYubikeyTestCase):
def test_switch_interfaces(self):
next(driver_fido.open_devices()).read_config()
next(driver_otp.open_devices()).read_config()
next(driver_fido.open_devices()).read_config()
next(driver_ccid.open_devices()).read_config()
next(driver_otp.open_devices()).read_config()
next(driver_ccid.open_devices()).read_config()
next(driver_otp.open_devices()).read_config()
next(driver_fido.open_devices()).read_config()
next(driver_ccid.open_devices()).read_config()
next(driver_fido.open_devices()).read_config()
next(driver_ccid.open_devices()).read_config()
next(driver_otp.open_devices()).read_config()
|
|
887a81f21033a0694bccec9ac07470b95569646b
|
tests/uri/test_uri_query.py
|
tests/uri/test_uri_query.py
|
import pytest
from httoop import URI
@pytest.mark.parametrize('query_string,query', [
(b'', ()),
(b'&', ()),
(b'&&', ()),
# (b'=', ((u'', u''),)),
# (b'=a', ((u'', u'a'),)),
(b'a', ((u'a', u''),)),
(b'a=', ((u'a', u''),)),
(b'&a=b', ((u'a', u'b'),)),
(b'&&a=b&&b=c&d=f&', ((u'a', u'b'), (u'b', u'c'), (u'd', u'f'))),
(b'a=a+b&b=b+c', ((u'a', u'a b'), (u'b', u'b c'))),
(b'a=1&a=2', ((u'a', u'1'), (u'a', u'2'))),
])
def test_query_string_parse(query_string, query):
uri = URI(b'http://example.com/?%s' % (query_string,))
assert uri.query == query
@pytest.mark.parametrize('query_string,query', [
(b'', ()),
# (b'=', ((u'', u''),)),
# (b'=a', ((u'', u'a'),)),
(b'a', ((u'a', u''),)),
# (b'a=', ((u'a', u''),)),
(b'a=b', ((u'a', u'b'),)),
(b'a=b&b=c&d=f', ((u'a', u'b'), (u'b', u'c'), (u'd', u'f'))),
(b'a=a+b&b=b+c', ((u'a', u'a b'), (u'b', u'b c'))),
(b'a=1&a=2', ((u'a', u'1'), (u'a', u'2'))),
])
def test_query_string_compose(query_string, query):
uri = URI(b'http://example.com/')
uri.query = query
assert uri.query_string == query_string
|
Add basic query string tests
|
Add basic query string tests
|
Python
|
mit
|
spaceone/httoop,spaceone/httoop,spaceone/httoop
|
Add basic query string tests
|
import pytest
from httoop import URI
@pytest.mark.parametrize('query_string,query', [
(b'', ()),
(b'&', ()),
(b'&&', ()),
# (b'=', ((u'', u''),)),
# (b'=a', ((u'', u'a'),)),
(b'a', ((u'a', u''),)),
(b'a=', ((u'a', u''),)),
(b'&a=b', ((u'a', u'b'),)),
(b'&&a=b&&b=c&d=f&', ((u'a', u'b'), (u'b', u'c'), (u'd', u'f'))),
(b'a=a+b&b=b+c', ((u'a', u'a b'), (u'b', u'b c'))),
(b'a=1&a=2', ((u'a', u'1'), (u'a', u'2'))),
])
def test_query_string_parse(query_string, query):
uri = URI(b'http://example.com/?%s' % (query_string,))
assert uri.query == query
@pytest.mark.parametrize('query_string,query', [
(b'', ()),
# (b'=', ((u'', u''),)),
# (b'=a', ((u'', u'a'),)),
(b'a', ((u'a', u''),)),
# (b'a=', ((u'a', u''),)),
(b'a=b', ((u'a', u'b'),)),
(b'a=b&b=c&d=f', ((u'a', u'b'), (u'b', u'c'), (u'd', u'f'))),
(b'a=a+b&b=b+c', ((u'a', u'a b'), (u'b', u'b c'))),
(b'a=1&a=2', ((u'a', u'1'), (u'a', u'2'))),
])
def test_query_string_compose(query_string, query):
uri = URI(b'http://example.com/')
uri.query = query
assert uri.query_string == query_string
|
<commit_before><commit_msg>Add basic query string tests<commit_after>
|
import pytest
from httoop import URI
@pytest.mark.parametrize('query_string,query', [
(b'', ()),
(b'&', ()),
(b'&&', ()),
# (b'=', ((u'', u''),)),
# (b'=a', ((u'', u'a'),)),
(b'a', ((u'a', u''),)),
(b'a=', ((u'a', u''),)),
(b'&a=b', ((u'a', u'b'),)),
(b'&&a=b&&b=c&d=f&', ((u'a', u'b'), (u'b', u'c'), (u'd', u'f'))),
(b'a=a+b&b=b+c', ((u'a', u'a b'), (u'b', u'b c'))),
(b'a=1&a=2', ((u'a', u'1'), (u'a', u'2'))),
])
def test_query_string_parse(query_string, query):
uri = URI(b'http://example.com/?%s' % (query_string,))
assert uri.query == query
@pytest.mark.parametrize('query_string,query', [
(b'', ()),
# (b'=', ((u'', u''),)),
# (b'=a', ((u'', u'a'),)),
(b'a', ((u'a', u''),)),
# (b'a=', ((u'a', u''),)),
(b'a=b', ((u'a', u'b'),)),
(b'a=b&b=c&d=f', ((u'a', u'b'), (u'b', u'c'), (u'd', u'f'))),
(b'a=a+b&b=b+c', ((u'a', u'a b'), (u'b', u'b c'))),
(b'a=1&a=2', ((u'a', u'1'), (u'a', u'2'))),
])
def test_query_string_compose(query_string, query):
uri = URI(b'http://example.com/')
uri.query = query
assert uri.query_string == query_string
|
Add basic query string testsimport pytest
from httoop import URI
@pytest.mark.parametrize('query_string,query', [
(b'', ()),
(b'&', ()),
(b'&&', ()),
# (b'=', ((u'', u''),)),
# (b'=a', ((u'', u'a'),)),
(b'a', ((u'a', u''),)),
(b'a=', ((u'a', u''),)),
(b'&a=b', ((u'a', u'b'),)),
(b'&&a=b&&b=c&d=f&', ((u'a', u'b'), (u'b', u'c'), (u'd', u'f'))),
(b'a=a+b&b=b+c', ((u'a', u'a b'), (u'b', u'b c'))),
(b'a=1&a=2', ((u'a', u'1'), (u'a', u'2'))),
])
def test_query_string_parse(query_string, query):
uri = URI(b'http://example.com/?%s' % (query_string,))
assert uri.query == query
@pytest.mark.parametrize('query_string,query', [
(b'', ()),
# (b'=', ((u'', u''),)),
# (b'=a', ((u'', u'a'),)),
(b'a', ((u'a', u''),)),
# (b'a=', ((u'a', u''),)),
(b'a=b', ((u'a', u'b'),)),
(b'a=b&b=c&d=f', ((u'a', u'b'), (u'b', u'c'), (u'd', u'f'))),
(b'a=a+b&b=b+c', ((u'a', u'a b'), (u'b', u'b c'))),
(b'a=1&a=2', ((u'a', u'1'), (u'a', u'2'))),
])
def test_query_string_compose(query_string, query):
uri = URI(b'http://example.com/')
uri.query = query
assert uri.query_string == query_string
|
<commit_before><commit_msg>Add basic query string tests<commit_after>import pytest
from httoop import URI
@pytest.mark.parametrize('query_string,query', [
(b'', ()),
(b'&', ()),
(b'&&', ()),
# (b'=', ((u'', u''),)),
# (b'=a', ((u'', u'a'),)),
(b'a', ((u'a', u''),)),
(b'a=', ((u'a', u''),)),
(b'&a=b', ((u'a', u'b'),)),
(b'&&a=b&&b=c&d=f&', ((u'a', u'b'), (u'b', u'c'), (u'd', u'f'))),
(b'a=a+b&b=b+c', ((u'a', u'a b'), (u'b', u'b c'))),
(b'a=1&a=2', ((u'a', u'1'), (u'a', u'2'))),
])
def test_query_string_parse(query_string, query):
uri = URI(b'http://example.com/?%s' % (query_string,))
assert uri.query == query
@pytest.mark.parametrize('query_string,query', [
(b'', ()),
# (b'=', ((u'', u''),)),
# (b'=a', ((u'', u'a'),)),
(b'a', ((u'a', u''),)),
# (b'a=', ((u'a', u''),)),
(b'a=b', ((u'a', u'b'),)),
(b'a=b&b=c&d=f', ((u'a', u'b'), (u'b', u'c'), (u'd', u'f'))),
(b'a=a+b&b=b+c', ((u'a', u'a b'), (u'b', u'b c'))),
(b'a=1&a=2', ((u'a', u'1'), (u'a', u'2'))),
])
def test_query_string_compose(query_string, query):
uri = URI(b'http://example.com/')
uri.query = query
assert uri.query_string == query_string
|
|
523e98fd4d91beff0f3e1b25bf5e61209bcc00ee
|
unittests/test_networking.py
|
unittests/test_networking.py
|
import pytest
import ipaddress
from lab import network
@pytest.mark.parametrize('ip', [
'127.0.0.1',
'::1'
])
def test_ping(ip):
network.ping(ip)
@pytest.mark.parametrize('ip, expected', [
('127.0.0.1', 4),
('::1', 6)
])
def test_addr_version_detection(ip, expected):
assert network.get_addr_version(ip) == expected
@pytest.mark.parametrize('ip, version, expected', [
('127.0.0.1', 4, ipaddress.IPv4Address(u'127.0.0.1')),
('::1', 6, ipaddress.IPv6Address(u'::1')),
('localhost.localdomain', 4, ipaddress.IPv4Address(u'127.0.0.1'))
])
def test_check_ipaddr(ip, version, expected):
assert network.check_ipaddr(ip, version=version) == (expected, ip)
@pytest.mark.parametrize('destination, version, expected', [
('localhost', 4, 'lo'),
])
def test_find_best_route(destination, version, expected):
iface, addr = network.find_best_route(destination, version=version)
assert iface == expected
assert addr.version == version
|
Add a set of tests for the network module
|
Add a set of tests for the network module
|
Python
|
mpl-2.0
|
sangoma/pytestlab
|
Add a set of tests for the network module
|
import pytest
import ipaddress
from lab import network
@pytest.mark.parametrize('ip', [
'127.0.0.1',
'::1'
])
def test_ping(ip):
network.ping(ip)
@pytest.mark.parametrize('ip, expected', [
('127.0.0.1', 4),
('::1', 6)
])
def test_addr_version_detection(ip, expected):
assert network.get_addr_version(ip) == expected
@pytest.mark.parametrize('ip, version, expected', [
('127.0.0.1', 4, ipaddress.IPv4Address(u'127.0.0.1')),
('::1', 6, ipaddress.IPv6Address(u'::1')),
('localhost.localdomain', 4, ipaddress.IPv4Address(u'127.0.0.1'))
])
def test_check_ipaddr(ip, version, expected):
assert network.check_ipaddr(ip, version=version) == (expected, ip)
@pytest.mark.parametrize('destination, version, expected', [
('localhost', 4, 'lo'),
])
def test_find_best_route(destination, version, expected):
iface, addr = network.find_best_route(destination, version=version)
assert iface == expected
assert addr.version == version
|
<commit_before><commit_msg>Add a set of tests for the network module<commit_after>
|
import pytest
import ipaddress
from lab import network
@pytest.mark.parametrize('ip', [
'127.0.0.1',
'::1'
])
def test_ping(ip):
network.ping(ip)
@pytest.mark.parametrize('ip, expected', [
('127.0.0.1', 4),
('::1', 6)
])
def test_addr_version_detection(ip, expected):
assert network.get_addr_version(ip) == expected
@pytest.mark.parametrize('ip, version, expected', [
('127.0.0.1', 4, ipaddress.IPv4Address(u'127.0.0.1')),
('::1', 6, ipaddress.IPv6Address(u'::1')),
('localhost.localdomain', 4, ipaddress.IPv4Address(u'127.0.0.1'))
])
def test_check_ipaddr(ip, version, expected):
assert network.check_ipaddr(ip, version=version) == (expected, ip)
@pytest.mark.parametrize('destination, version, expected', [
('localhost', 4, 'lo'),
])
def test_find_best_route(destination, version, expected):
iface, addr = network.find_best_route(destination, version=version)
assert iface == expected
assert addr.version == version
|
Add a set of tests for the network moduleimport pytest
import ipaddress
from lab import network
@pytest.mark.parametrize('ip', [
'127.0.0.1',
'::1'
])
def test_ping(ip):
network.ping(ip)
@pytest.mark.parametrize('ip, expected', [
('127.0.0.1', 4),
('::1', 6)
])
def test_addr_version_detection(ip, expected):
assert network.get_addr_version(ip) == expected
@pytest.mark.parametrize('ip, version, expected', [
('127.0.0.1', 4, ipaddress.IPv4Address(u'127.0.0.1')),
('::1', 6, ipaddress.IPv6Address(u'::1')),
('localhost.localdomain', 4, ipaddress.IPv4Address(u'127.0.0.1'))
])
def test_check_ipaddr(ip, version, expected):
assert network.check_ipaddr(ip, version=version) == (expected, ip)
@pytest.mark.parametrize('destination, version, expected', [
('localhost', 4, 'lo'),
])
def test_find_best_route(destination, version, expected):
iface, addr = network.find_best_route(destination, version=version)
assert iface == expected
assert addr.version == version
|
<commit_before><commit_msg>Add a set of tests for the network module<commit_after>import pytest
import ipaddress
from lab import network
@pytest.mark.parametrize('ip', [
'127.0.0.1',
'::1'
])
def test_ping(ip):
network.ping(ip)
@pytest.mark.parametrize('ip, expected', [
('127.0.0.1', 4),
('::1', 6)
])
def test_addr_version_detection(ip, expected):
assert network.get_addr_version(ip) == expected
@pytest.mark.parametrize('ip, version, expected', [
('127.0.0.1', 4, ipaddress.IPv4Address(u'127.0.0.1')),
('::1', 6, ipaddress.IPv6Address(u'::1')),
('localhost.localdomain', 4, ipaddress.IPv4Address(u'127.0.0.1'))
])
def test_check_ipaddr(ip, version, expected):
assert network.check_ipaddr(ip, version=version) == (expected, ip)
@pytest.mark.parametrize('destination, version, expected', [
('localhost', 4, 'lo'),
])
def test_find_best_route(destination, version, expected):
iface, addr = network.find_best_route(destination, version=version)
assert iface == expected
assert addr.version == version
|
|
17c39350788c9d371b6810dcc0bf82dd1fd93be1
|
tests/rules/test_brew_uninstall.py
|
tests/rules/test_brew_uninstall.py
|
import pytest
from tests.utils import Command
from thefuck.rules.brew_uninstall import get_new_command, match
@pytest.fixture
def stdout():
return ("Uninstalling /usr/local/Cellar/tbb/4.4-20160916... (118 files, 1.9M)\n"
"tbb 4.4-20160526, 4.4-20160722 are still installed.\n"
"Remove all versions with `brew uninstall --force tbb`.\n")
@pytest.fixture
def new_command(formula):
return 'brew uninstall --force {}'.format(formula)
@pytest.mark.parametrize('script', ['brew uninstall tbb', 'brew rm tbb', 'brew remove tbb'])
def test_match(stdout, script):
assert match(Command(script=script, stdout=stdout))
@pytest.mark.parametrize('script', ['brew remove gnuplot'])
def test_not_match(script):
stdout='Uninstalling /usr/local/Cellar/gnuplot/5.0.4_1... (44 files, 2.3M)\n'
assert not match(Command(script=script, stdout=stdout))
@pytest.mark.parametrize('script, formula, ', [('brew uninstall tbb', 'tbb')])
def test_get_new_command(stdout, new_command, script, formula):
assert get_new_command(Command(script=script, stdout=stdout)) == new_command
|
Test `brew uninstall --force` suggestion
|
Test `brew uninstall --force` suggestion
|
Python
|
mit
|
Clpsplug/thefuck,SimenB/thefuck,SimenB/thefuck,mlk/thefuck,scorphus/thefuck,nvbn/thefuck,Clpsplug/thefuck,mlk/thefuck,nvbn/thefuck,scorphus/thefuck
|
Test `brew uninstall --force` suggestion
|
import pytest
from tests.utils import Command
from thefuck.rules.brew_uninstall import get_new_command, match
@pytest.fixture
def stdout():
return ("Uninstalling /usr/local/Cellar/tbb/4.4-20160916... (118 files, 1.9M)\n"
"tbb 4.4-20160526, 4.4-20160722 are still installed.\n"
"Remove all versions with `brew uninstall --force tbb`.\n")
@pytest.fixture
def new_command(formula):
return 'brew uninstall --force {}'.format(formula)
@pytest.mark.parametrize('script', ['brew uninstall tbb', 'brew rm tbb', 'brew remove tbb'])
def test_match(stdout, script):
assert match(Command(script=script, stdout=stdout))
@pytest.mark.parametrize('script', ['brew remove gnuplot'])
def test_not_match(script):
stdout='Uninstalling /usr/local/Cellar/gnuplot/5.0.4_1... (44 files, 2.3M)\n'
assert not match(Command(script=script, stdout=stdout))
@pytest.mark.parametrize('script, formula, ', [('brew uninstall tbb', 'tbb')])
def test_get_new_command(stdout, new_command, script, formula):
assert get_new_command(Command(script=script, stdout=stdout)) == new_command
|
<commit_before><commit_msg>Test `brew uninstall --force` suggestion<commit_after>
|
import pytest
from tests.utils import Command
from thefuck.rules.brew_uninstall import get_new_command, match
@pytest.fixture
def stdout():
return ("Uninstalling /usr/local/Cellar/tbb/4.4-20160916... (118 files, 1.9M)\n"
"tbb 4.4-20160526, 4.4-20160722 are still installed.\n"
"Remove all versions with `brew uninstall --force tbb`.\n")
@pytest.fixture
def new_command(formula):
return 'brew uninstall --force {}'.format(formula)
@pytest.mark.parametrize('script', ['brew uninstall tbb', 'brew rm tbb', 'brew remove tbb'])
def test_match(stdout, script):
assert match(Command(script=script, stdout=stdout))
@pytest.mark.parametrize('script', ['brew remove gnuplot'])
def test_not_match(script):
stdout='Uninstalling /usr/local/Cellar/gnuplot/5.0.4_1... (44 files, 2.3M)\n'
assert not match(Command(script=script, stdout=stdout))
@pytest.mark.parametrize('script, formula, ', [('brew uninstall tbb', 'tbb')])
def test_get_new_command(stdout, new_command, script, formula):
assert get_new_command(Command(script=script, stdout=stdout)) == new_command
|
Test `brew uninstall --force` suggestionimport pytest
from tests.utils import Command
from thefuck.rules.brew_uninstall import get_new_command, match
@pytest.fixture
def stdout():
return ("Uninstalling /usr/local/Cellar/tbb/4.4-20160916... (118 files, 1.9M)\n"
"tbb 4.4-20160526, 4.4-20160722 are still installed.\n"
"Remove all versions with `brew uninstall --force tbb`.\n")
@pytest.fixture
def new_command(formula):
return 'brew uninstall --force {}'.format(formula)
@pytest.mark.parametrize('script', ['brew uninstall tbb', 'brew rm tbb', 'brew remove tbb'])
def test_match(stdout, script):
assert match(Command(script=script, stdout=stdout))
@pytest.mark.parametrize('script', ['brew remove gnuplot'])
def test_not_match(script):
stdout='Uninstalling /usr/local/Cellar/gnuplot/5.0.4_1... (44 files, 2.3M)\n'
assert not match(Command(script=script, stdout=stdout))
@pytest.mark.parametrize('script, formula, ', [('brew uninstall tbb', 'tbb')])
def test_get_new_command(stdout, new_command, script, formula):
assert get_new_command(Command(script=script, stdout=stdout)) == new_command
|
<commit_before><commit_msg>Test `brew uninstall --force` suggestion<commit_after>import pytest
from tests.utils import Command
from thefuck.rules.brew_uninstall import get_new_command, match
@pytest.fixture
def stdout():
return ("Uninstalling /usr/local/Cellar/tbb/4.4-20160916... (118 files, 1.9M)\n"
"tbb 4.4-20160526, 4.4-20160722 are still installed.\n"
"Remove all versions with `brew uninstall --force tbb`.\n")
@pytest.fixture
def new_command(formula):
return 'brew uninstall --force {}'.format(formula)
@pytest.mark.parametrize('script', ['brew uninstall tbb', 'brew rm tbb', 'brew remove tbb'])
def test_match(stdout, script):
assert match(Command(script=script, stdout=stdout))
@pytest.mark.parametrize('script', ['brew remove gnuplot'])
def test_not_match(script):
stdout='Uninstalling /usr/local/Cellar/gnuplot/5.0.4_1... (44 files, 2.3M)\n'
assert not match(Command(script=script, stdout=stdout))
@pytest.mark.parametrize('script, formula, ', [('brew uninstall tbb', 'tbb')])
def test_get_new_command(stdout, new_command, script, formula):
assert get_new_command(Command(script=script, stdout=stdout)) == new_command
|
|
c44ce1e29e8af7808dda8f80e65a92842dd8144a
|
py/binary-watch.py
|
py/binary-watch.py
|
from operator import mul
class Solution(object):
def dfs(self, rem, cur, depth):
if depth == 10:
h = sum(map(mul, cur[:4], [1, 2, 4, 8]))
m = sum(map(mul, cur[4:], [1, 2, 4, 8, 16, 32]))
if h < 12 and m < 60:
yield '{}:{:02d}'.format(h, m)
else:
for i, r in enumerate(rem):
if r > 0:
rem[i] -= 1
cur[depth] = i
for ans in self.dfs(rem, cur, depth + 1):
yield ans
rem[i] += 1
def readBinaryWatch(self, num):
"""
:type num: int
:rtype: List[str]
"""
return list(self.dfs([10 - num, num], [None] * 10, 0))
|
Add py solution for 401. Binary Watch
|
Add py solution for 401. Binary Watch
401. Binary Watch: https://leetcode.com/problems/binary-watch/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 401. Binary Watch
401. Binary Watch: https://leetcode.com/problems/binary-watch/
|
from operator import mul
class Solution(object):
def dfs(self, rem, cur, depth):
if depth == 10:
h = sum(map(mul, cur[:4], [1, 2, 4, 8]))
m = sum(map(mul, cur[4:], [1, 2, 4, 8, 16, 32]))
if h < 12 and m < 60:
yield '{}:{:02d}'.format(h, m)
else:
for i, r in enumerate(rem):
if r > 0:
rem[i] -= 1
cur[depth] = i
for ans in self.dfs(rem, cur, depth + 1):
yield ans
rem[i] += 1
def readBinaryWatch(self, num):
"""
:type num: int
:rtype: List[str]
"""
return list(self.dfs([10 - num, num], [None] * 10, 0))
|
<commit_before><commit_msg>Add py solution for 401. Binary Watch
401. Binary Watch: https://leetcode.com/problems/binary-watch/<commit_after>
|
from operator import mul
class Solution(object):
def dfs(self, rem, cur, depth):
if depth == 10:
h = sum(map(mul, cur[:4], [1, 2, 4, 8]))
m = sum(map(mul, cur[4:], [1, 2, 4, 8, 16, 32]))
if h < 12 and m < 60:
yield '{}:{:02d}'.format(h, m)
else:
for i, r in enumerate(rem):
if r > 0:
rem[i] -= 1
cur[depth] = i
for ans in self.dfs(rem, cur, depth + 1):
yield ans
rem[i] += 1
def readBinaryWatch(self, num):
"""
:type num: int
:rtype: List[str]
"""
return list(self.dfs([10 - num, num], [None] * 10, 0))
|
Add py solution for 401. Binary Watch
401. Binary Watch: https://leetcode.com/problems/binary-watch/from operator import mul
class Solution(object):
def dfs(self, rem, cur, depth):
if depth == 10:
h = sum(map(mul, cur[:4], [1, 2, 4, 8]))
m = sum(map(mul, cur[4:], [1, 2, 4, 8, 16, 32]))
if h < 12 and m < 60:
yield '{}:{:02d}'.format(h, m)
else:
for i, r in enumerate(rem):
if r > 0:
rem[i] -= 1
cur[depth] = i
for ans in self.dfs(rem, cur, depth + 1):
yield ans
rem[i] += 1
def readBinaryWatch(self, num):
"""
:type num: int
:rtype: List[str]
"""
return list(self.dfs([10 - num, num], [None] * 10, 0))
|
<commit_before><commit_msg>Add py solution for 401. Binary Watch
401. Binary Watch: https://leetcode.com/problems/binary-watch/<commit_after>from operator import mul
class Solution(object):
def dfs(self, rem, cur, depth):
if depth == 10:
h = sum(map(mul, cur[:4], [1, 2, 4, 8]))
m = sum(map(mul, cur[4:], [1, 2, 4, 8, 16, 32]))
if h < 12 and m < 60:
yield '{}:{:02d}'.format(h, m)
else:
for i, r in enumerate(rem):
if r > 0:
rem[i] -= 1
cur[depth] = i
for ans in self.dfs(rem, cur, depth + 1):
yield ans
rem[i] += 1
def readBinaryWatch(self, num):
"""
:type num: int
:rtype: List[str]
"""
return list(self.dfs([10 - num, num], [None] * 10, 0))
|
|
c83ec7a33334c253362f93b991c396d3d1338ea4
|
tests/ChromskyForm/OneRuleWithMultipleNonterminalsTest.py
|
tests/ChromskyForm/OneRuleWithMultipleNonterminalsTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 28.08.2017 14:37
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import ContextFree
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class C(Nonterminal): pass
class Rules(Rule):
rules = [([S], [A,B,C])]
class OneRuleWithMultipleNonterminalsTest(TestCase):
def test_transform(self):
g = Grammar(nonterminals=[S, A, B, C],
rules=[Rules])
com = ContextFree.transform_to_chomsky_normal_form(g)
self.assertEqual(com.rules_count(), 2)
self.assertEqual(len(com.rules()), 2)
fromS = list(filter(lambda r: r.fromSymbol == S, com.rules()))[0]
self.assertEqual(fromS.right[0], A)
temp = fromS.right[1]
fromTemp = list(filter(lambda r: r.right == [B, C], com.rules()))[0]
self.assertEqual(temp, fromTemp.fromSymbol)
self.assertEqual(com.nonterms_count(), 5)
self.assertEqual(len(com.nonterms()), 5)
def test_transformShouldNotChange(self):
g = Grammar(nonterminals=[S, A,B,C],
rules=[Rules])
ContextFree.transform_to_chomsky_normal_form(g)
self.assertEqual(g.rules_count(), 1)
self.assertEqual(len(g.rules()), 1)
self.assertEqual(g.rules()[0], Rules)
def test_transformShouldChange(self):
g = Grammar(nonterminals=[S, A, B, C],
rules=[Rules])
ContextFree.transform_to_chomsky_normal_form(g, transform_grammar=True)
self.assertEqual(g.rules_count(), 2)
self.assertEqual(len(g.rules()), 2)
fromS = list(filter(lambda r: r.fromSymbol == S, g.rules()))[0]
self.assertEqual(fromS.right[0], A)
temp = fromS.right[1]
fromTemp = list(filter(lambda r: r.right == [B, C], g.rules()))[0]
self.assertEqual(temp, fromTemp.fromSymbol)
self.assertEqual(g.nonterms_count(), 5)
self.assertEqual(len(g.nonterms()), 5)
if __name__ == '__main__':
main()
|
Add test with single rule with three nonterminals for chromsky normal form transformation
|
Add test with single rule with three nonterminals for chromsky normal form transformation
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add test with single rule with three nonterminals for chromsky normal form transformation
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 28.08.2017 14:37
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import ContextFree
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class C(Nonterminal): pass
class Rules(Rule):
rules = [([S], [A,B,C])]
class OneRuleWithMultipleNonterminalsTest(TestCase):
def test_transform(self):
g = Grammar(nonterminals=[S, A, B, C],
rules=[Rules])
com = ContextFree.transform_to_chomsky_normal_form(g)
self.assertEqual(com.rules_count(), 2)
self.assertEqual(len(com.rules()), 2)
fromS = list(filter(lambda r: r.fromSymbol == S, com.rules()))[0]
self.assertEqual(fromS.right[0], A)
temp = fromS.right[1]
fromTemp = list(filter(lambda r: r.right == [B, C], com.rules()))[0]
self.assertEqual(temp, fromTemp.fromSymbol)
self.assertEqual(com.nonterms_count(), 5)
self.assertEqual(len(com.nonterms()), 5)
def test_transformShouldNotChange(self):
g = Grammar(nonterminals=[S, A,B,C],
rules=[Rules])
ContextFree.transform_to_chomsky_normal_form(g)
self.assertEqual(g.rules_count(), 1)
self.assertEqual(len(g.rules()), 1)
self.assertEqual(g.rules()[0], Rules)
def test_transformShouldChange(self):
g = Grammar(nonterminals=[S, A, B, C],
rules=[Rules])
ContextFree.transform_to_chomsky_normal_form(g, transform_grammar=True)
self.assertEqual(g.rules_count(), 2)
self.assertEqual(len(g.rules()), 2)
fromS = list(filter(lambda r: r.fromSymbol == S, g.rules()))[0]
self.assertEqual(fromS.right[0], A)
temp = fromS.right[1]
fromTemp = list(filter(lambda r: r.right == [B, C], g.rules()))[0]
self.assertEqual(temp, fromTemp.fromSymbol)
self.assertEqual(g.nonterms_count(), 5)
self.assertEqual(len(g.nonterms()), 5)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add test with single rule with three nonterminals for chromsky normal form transformation<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 28.08.2017 14:37
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import ContextFree
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class C(Nonterminal): pass
class Rules(Rule):
rules = [([S], [A,B,C])]
class OneRuleWithMultipleNonterminalsTest(TestCase):
def test_transform(self):
g = Grammar(nonterminals=[S, A, B, C],
rules=[Rules])
com = ContextFree.transform_to_chomsky_normal_form(g)
self.assertEqual(com.rules_count(), 2)
self.assertEqual(len(com.rules()), 2)
fromS = list(filter(lambda r: r.fromSymbol == S, com.rules()))[0]
self.assertEqual(fromS.right[0], A)
temp = fromS.right[1]
fromTemp = list(filter(lambda r: r.right == [B, C], com.rules()))[0]
self.assertEqual(temp, fromTemp.fromSymbol)
self.assertEqual(com.nonterms_count(), 5)
self.assertEqual(len(com.nonterms()), 5)
def test_transformShouldNotChange(self):
g = Grammar(nonterminals=[S, A,B,C],
rules=[Rules])
ContextFree.transform_to_chomsky_normal_form(g)
self.assertEqual(g.rules_count(), 1)
self.assertEqual(len(g.rules()), 1)
self.assertEqual(g.rules()[0], Rules)
def test_transformShouldChange(self):
g = Grammar(nonterminals=[S, A, B, C],
rules=[Rules])
ContextFree.transform_to_chomsky_normal_form(g, transform_grammar=True)
self.assertEqual(g.rules_count(), 2)
self.assertEqual(len(g.rules()), 2)
fromS = list(filter(lambda r: r.fromSymbol == S, g.rules()))[0]
self.assertEqual(fromS.right[0], A)
temp = fromS.right[1]
fromTemp = list(filter(lambda r: r.right == [B, C], g.rules()))[0]
self.assertEqual(temp, fromTemp.fromSymbol)
self.assertEqual(g.nonterms_count(), 5)
self.assertEqual(len(g.nonterms()), 5)
if __name__ == '__main__':
main()
|
Add test with single rule with three nonterminals for chromsky normal form transformation#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 28.08.2017 14:37
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import ContextFree
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class C(Nonterminal): pass
class Rules(Rule):
rules = [([S], [A,B,C])]
class OneRuleWithMultipleNonterminalsTest(TestCase):
def test_transform(self):
g = Grammar(nonterminals=[S, A, B, C],
rules=[Rules])
com = ContextFree.transform_to_chomsky_normal_form(g)
self.assertEqual(com.rules_count(), 2)
self.assertEqual(len(com.rules()), 2)
fromS = list(filter(lambda r: r.fromSymbol == S, com.rules()))[0]
self.assertEqual(fromS.right[0], A)
temp = fromS.right[1]
fromTemp = list(filter(lambda r: r.right == [B, C], com.rules()))[0]
self.assertEqual(temp, fromTemp.fromSymbol)
self.assertEqual(com.nonterms_count(), 5)
self.assertEqual(len(com.nonterms()), 5)
def test_transformShouldNotChange(self):
g = Grammar(nonterminals=[S, A,B,C],
rules=[Rules])
ContextFree.transform_to_chomsky_normal_form(g)
self.assertEqual(g.rules_count(), 1)
self.assertEqual(len(g.rules()), 1)
self.assertEqual(g.rules()[0], Rules)
def test_transformShouldChange(self):
g = Grammar(nonterminals=[S, A, B, C],
rules=[Rules])
ContextFree.transform_to_chomsky_normal_form(g, transform_grammar=True)
self.assertEqual(g.rules_count(), 2)
self.assertEqual(len(g.rules()), 2)
fromS = list(filter(lambda r: r.fromSymbol == S, g.rules()))[0]
self.assertEqual(fromS.right[0], A)
temp = fromS.right[1]
fromTemp = list(filter(lambda r: r.right == [B, C], g.rules()))[0]
self.assertEqual(temp, fromTemp.fromSymbol)
self.assertEqual(g.nonterms_count(), 5)
self.assertEqual(len(g.nonterms()), 5)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add test with single rule with three nonterminals for chromsky normal form transformation<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 28.08.2017 14:37
:Licence GNUv3
Part of grammpy-transforms
"""
from unittest import TestCase, main
from grammpy import *
from grammpy_transforms import ContextFree
class S(Nonterminal): pass
class A(Nonterminal): pass
class B(Nonterminal): pass
class C(Nonterminal): pass
class Rules(Rule):
rules = [([S], [A,B,C])]
class OneRuleWithMultipleNonterminalsTest(TestCase):
def test_transform(self):
g = Grammar(nonterminals=[S, A, B, C],
rules=[Rules])
com = ContextFree.transform_to_chomsky_normal_form(g)
self.assertEqual(com.rules_count(), 2)
self.assertEqual(len(com.rules()), 2)
fromS = list(filter(lambda r: r.fromSymbol == S, com.rules()))[0]
self.assertEqual(fromS.right[0], A)
temp = fromS.right[1]
fromTemp = list(filter(lambda r: r.right == [B, C], com.rules()))[0]
self.assertEqual(temp, fromTemp.fromSymbol)
self.assertEqual(com.nonterms_count(), 5)
self.assertEqual(len(com.nonterms()), 5)
def test_transformShouldNotChange(self):
g = Grammar(nonterminals=[S, A,B,C],
rules=[Rules])
ContextFree.transform_to_chomsky_normal_form(g)
self.assertEqual(g.rules_count(), 1)
self.assertEqual(len(g.rules()), 1)
self.assertEqual(g.rules()[0], Rules)
def test_transformShouldChange(self):
g = Grammar(nonterminals=[S, A, B, C],
rules=[Rules])
ContextFree.transform_to_chomsky_normal_form(g, transform_grammar=True)
self.assertEqual(g.rules_count(), 2)
self.assertEqual(len(g.rules()), 2)
fromS = list(filter(lambda r: r.fromSymbol == S, g.rules()))[0]
self.assertEqual(fromS.right[0], A)
temp = fromS.right[1]
fromTemp = list(filter(lambda r: r.right == [B, C], g.rules()))[0]
self.assertEqual(temp, fromTemp.fromSymbol)
self.assertEqual(g.nonterms_count(), 5)
self.assertEqual(len(g.nonterms()), 5)
if __name__ == '__main__':
main()
|
|
62ef8f6c90d3d1ed0d850a893e9711d410dfeb16
|
entities/migrations/0045_auto_20160922_1330.py
|
entities/migrations/0045_auto_20160922_1330.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-22 11:30
from __future__ import unicode_literals
from django.db import migrations
def set_groupcontent_group(apps, schema_editor):
Group1 = apps.get_model('entities.Group')
Group2 = apps.get_model('groups.Group')
GroupContent = apps.get_model('entities.GroupContent')
for gc in GroupContent.objects.values('id', 'group_id'):
g1 = Group1.objects.get(id=gc['group_id'])
g2 = Group2.objects.get(slug=g1.slug)
GroupContent.objects.filter(id=gc['id']).update(group_id=g2.id)
class Migration(migrations.Migration):
dependencies = [
('entities', '0044_auto_20160922_1118'),
]
operations = [
migrations.RunPython(set_groupcontent_group)
]
|
Add migration to fix groupcontent foreign key
|
Add migration to fix groupcontent foreign key
|
Python
|
agpl-3.0
|
stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten,stadtgestalten/stadtgestalten
|
Add migration to fix groupcontent foreign key
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-22 11:30
from __future__ import unicode_literals
from django.db import migrations
def set_groupcontent_group(apps, schema_editor):
Group1 = apps.get_model('entities.Group')
Group2 = apps.get_model('groups.Group')
GroupContent = apps.get_model('entities.GroupContent')
for gc in GroupContent.objects.values('id', 'group_id'):
g1 = Group1.objects.get(id=gc['group_id'])
g2 = Group2.objects.get(slug=g1.slug)
GroupContent.objects.filter(id=gc['id']).update(group_id=g2.id)
class Migration(migrations.Migration):
dependencies = [
('entities', '0044_auto_20160922_1118'),
]
operations = [
migrations.RunPython(set_groupcontent_group)
]
|
<commit_before><commit_msg>Add migration to fix groupcontent foreign key<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-22 11:30
from __future__ import unicode_literals
from django.db import migrations
def set_groupcontent_group(apps, schema_editor):
Group1 = apps.get_model('entities.Group')
Group2 = apps.get_model('groups.Group')
GroupContent = apps.get_model('entities.GroupContent')
for gc in GroupContent.objects.values('id', 'group_id'):
g1 = Group1.objects.get(id=gc['group_id'])
g2 = Group2.objects.get(slug=g1.slug)
GroupContent.objects.filter(id=gc['id']).update(group_id=g2.id)
class Migration(migrations.Migration):
dependencies = [
('entities', '0044_auto_20160922_1118'),
]
operations = [
migrations.RunPython(set_groupcontent_group)
]
|
Add migration to fix groupcontent foreign key# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-22 11:30
from __future__ import unicode_literals
from django.db import migrations
def set_groupcontent_group(apps, schema_editor):
Group1 = apps.get_model('entities.Group')
Group2 = apps.get_model('groups.Group')
GroupContent = apps.get_model('entities.GroupContent')
for gc in GroupContent.objects.values('id', 'group_id'):
g1 = Group1.objects.get(id=gc['group_id'])
g2 = Group2.objects.get(slug=g1.slug)
GroupContent.objects.filter(id=gc['id']).update(group_id=g2.id)
class Migration(migrations.Migration):
dependencies = [
('entities', '0044_auto_20160922_1118'),
]
operations = [
migrations.RunPython(set_groupcontent_group)
]
|
<commit_before><commit_msg>Add migration to fix groupcontent foreign key<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-22 11:30
from __future__ import unicode_literals
from django.db import migrations
def set_groupcontent_group(apps, schema_editor):
Group1 = apps.get_model('entities.Group')
Group2 = apps.get_model('groups.Group')
GroupContent = apps.get_model('entities.GroupContent')
for gc in GroupContent.objects.values('id', 'group_id'):
g1 = Group1.objects.get(id=gc['group_id'])
g2 = Group2.objects.get(slug=g1.slug)
GroupContent.objects.filter(id=gc['id']).update(group_id=g2.id)
class Migration(migrations.Migration):
dependencies = [
('entities', '0044_auto_20160922_1118'),
]
operations = [
migrations.RunPython(set_groupcontent_group)
]
|
|
eb5f3db33bf607b21dfc132127e873c9722c687e
|
workalendar/brazil_holidays_set.py
|
workalendar/brazil_holidays_set.py
|
from .registry import registry
from workalendar.america import Brazil, BrazilBankCalendar
# Commemorative holidays list
COMMEMORATIVE_HOLIDAYS = [
'ConsciΓͺncia Negra',
]
def brazil_all_holidays_set(year):
"Returns all holidays in brazil with their respective type and coverage"
holidays_set = []
# Get brazilian national holidays
cal = Brazil()
for national_holidays in cal.holidays(year):
if national_holidays in COMMEMORATIVE_HOLIDAYS:
tipo_feriado = 'C'
else:
tipo_feriado = 'F'
if [national_holidays, 'N', tipo_feriado] not in holidays_set:
holidays_set.append([national_holidays, 'N', tipo_feriado])
# Get brazilian bank holidays
cal = BrazilBankCalendar()
for bank_holidays in cal.holidays(year):
if [bank_holidays, 'N', 'B'] not in holidays_set:
holidays_set.append([bank_holidays, 'N', 'B'])
# Get holidays from brazilian state
for state in registry.get_subregions('BR'):
cal_state = registry.get_calendar_class(state)()
for state_holidays in cal_state.holidays(year):
if [state_holidays, 'E', 'F'] not in holidays_set \
and [state_holidays, 'M', 'F'] not in holidays_set:
holidays_set.append([state_holidays, 'E', 'F'])
# Get brazilian municipal holidays
for city in registry.get_subregions(state):
cal_city = registry.get_calendar_class(city)()
for city_holiday in cal_city.holidays(year):
if [city_holiday, 'M', 'F'] not in holidays_set \
and [city_holiday, 'E', 'F'] not in holidays_set:
holidays_set.append([city_holiday, 'M', 'F'])
return holidays_set
|
Add script to get all brazilian state with their respective type and coverage
|
Add script to get all brazilian state with their respective type and coverage
|
Python
|
mit
|
novafloss/workalendar,novapost/workalendar,jaraco/calendra
|
Add script to get all brazilian state with their respective type and coverage
|
from .registry import registry
from workalendar.america import Brazil, BrazilBankCalendar
# Commemorative holidays list
COMMEMORATIVE_HOLIDAYS = [
'ConsciΓͺncia Negra',
]
def brazil_all_holidays_set(year):
"Returns all holidays in brazil with their respective type and coverage"
holidays_set = []
# Get brazilian national holidays
cal = Brazil()
for national_holidays in cal.holidays(year):
if national_holidays in COMMEMORATIVE_HOLIDAYS:
tipo_feriado = 'C'
else:
tipo_feriado = 'F'
if [national_holidays, 'N', tipo_feriado] not in holidays_set:
holidays_set.append([national_holidays, 'N', tipo_feriado])
# Get brazilian bank holidays
cal = BrazilBankCalendar()
for bank_holidays in cal.holidays(year):
if [bank_holidays, 'N', 'B'] not in holidays_set:
holidays_set.append([bank_holidays, 'N', 'B'])
# Get holidays from brazilian state
for state in registry.get_subregions('BR'):
cal_state = registry.get_calendar_class(state)()
for state_holidays in cal_state.holidays(year):
if [state_holidays, 'E', 'F'] not in holidays_set \
and [state_holidays, 'M', 'F'] not in holidays_set:
holidays_set.append([state_holidays, 'E', 'F'])
# Get brazilian municipal holidays
for city in registry.get_subregions(state):
cal_city = registry.get_calendar_class(city)()
for city_holiday in cal_city.holidays(year):
if [city_holiday, 'M', 'F'] not in holidays_set \
and [city_holiday, 'E', 'F'] not in holidays_set:
holidays_set.append([city_holiday, 'M', 'F'])
return holidays_set
|
<commit_before><commit_msg>Add script to get all brazilian state with their respective type and coverage<commit_after>
|
from .registry import registry
from workalendar.america import Brazil, BrazilBankCalendar
# Commemorative holidays list
COMMEMORATIVE_HOLIDAYS = [
'ConsciΓͺncia Negra',
]
def brazil_all_holidays_set(year):
"Returns all holidays in brazil with their respective type and coverage"
holidays_set = []
# Get brazilian national holidays
cal = Brazil()
for national_holidays in cal.holidays(year):
if national_holidays in COMMEMORATIVE_HOLIDAYS:
tipo_feriado = 'C'
else:
tipo_feriado = 'F'
if [national_holidays, 'N', tipo_feriado] not in holidays_set:
holidays_set.append([national_holidays, 'N', tipo_feriado])
# Get brazilian bank holidays
cal = BrazilBankCalendar()
for bank_holidays in cal.holidays(year):
if [bank_holidays, 'N', 'B'] not in holidays_set:
holidays_set.append([bank_holidays, 'N', 'B'])
# Get holidays from brazilian state
for state in registry.get_subregions('BR'):
cal_state = registry.get_calendar_class(state)()
for state_holidays in cal_state.holidays(year):
if [state_holidays, 'E', 'F'] not in holidays_set \
and [state_holidays, 'M', 'F'] not in holidays_set:
holidays_set.append([state_holidays, 'E', 'F'])
# Get brazilian municipal holidays
for city in registry.get_subregions(state):
cal_city = registry.get_calendar_class(city)()
for city_holiday in cal_city.holidays(year):
if [city_holiday, 'M', 'F'] not in holidays_set \
and [city_holiday, 'E', 'F'] not in holidays_set:
holidays_set.append([city_holiday, 'M', 'F'])
return holidays_set
|
Add script to get all brazilian state with their respective type and coveragefrom .registry import registry
from workalendar.america import Brazil, BrazilBankCalendar
# Commemorative holidays list
COMMEMORATIVE_HOLIDAYS = [
'ConsciΓͺncia Negra',
]
def brazil_all_holidays_set(year):
"Returns all holidays in brazil with their respective type and coverage"
holidays_set = []
# Get brazilian national holidays
cal = Brazil()
for national_holidays in cal.holidays(year):
if national_holidays in COMMEMORATIVE_HOLIDAYS:
tipo_feriado = 'C'
else:
tipo_feriado = 'F'
if [national_holidays, 'N', tipo_feriado] not in holidays_set:
holidays_set.append([national_holidays, 'N', tipo_feriado])
# Get brazilian bank holidays
cal = BrazilBankCalendar()
for bank_holidays in cal.holidays(year):
if [bank_holidays, 'N', 'B'] not in holidays_set:
holidays_set.append([bank_holidays, 'N', 'B'])
# Get holidays from brazilian state
for state in registry.get_subregions('BR'):
cal_state = registry.get_calendar_class(state)()
for state_holidays in cal_state.holidays(year):
if [state_holidays, 'E', 'F'] not in holidays_set \
and [state_holidays, 'M', 'F'] not in holidays_set:
holidays_set.append([state_holidays, 'E', 'F'])
# Get brazilian municipal holidays
for city in registry.get_subregions(state):
cal_city = registry.get_calendar_class(city)()
for city_holiday in cal_city.holidays(year):
if [city_holiday, 'M', 'F'] not in holidays_set \
and [city_holiday, 'E', 'F'] not in holidays_set:
holidays_set.append([city_holiday, 'M', 'F'])
return holidays_set
|
<commit_before><commit_msg>Add script to get all brazilian state with their respective type and coverage<commit_after>from .registry import registry
from workalendar.america import Brazil, BrazilBankCalendar
# Commemorative holidays list
COMMEMORATIVE_HOLIDAYS = [
'ConsciΓͺncia Negra',
]
def brazil_all_holidays_set(year):
"Returns all holidays in brazil with their respective type and coverage"
holidays_set = []
# Get brazilian national holidays
cal = Brazil()
for national_holidays in cal.holidays(year):
if national_holidays in COMMEMORATIVE_HOLIDAYS:
tipo_feriado = 'C'
else:
tipo_feriado = 'F'
if [national_holidays, 'N', tipo_feriado] not in holidays_set:
holidays_set.append([national_holidays, 'N', tipo_feriado])
# Get brazilian bank holidays
cal = BrazilBankCalendar()
for bank_holidays in cal.holidays(year):
if [bank_holidays, 'N', 'B'] not in holidays_set:
holidays_set.append([bank_holidays, 'N', 'B'])
# Get holidays from brazilian state
for state in registry.get_subregions('BR'):
cal_state = registry.get_calendar_class(state)()
for state_holidays in cal_state.holidays(year):
if [state_holidays, 'E', 'F'] not in holidays_set \
and [state_holidays, 'M', 'F'] not in holidays_set:
holidays_set.append([state_holidays, 'E', 'F'])
# Get brazilian municipal holidays
for city in registry.get_subregions(state):
cal_city = registry.get_calendar_class(city)()
for city_holiday in cal_city.holidays(year):
if [city_holiday, 'M', 'F'] not in holidays_set \
and [city_holiday, 'E', 'F'] not in holidays_set:
holidays_set.append([city_holiday, 'M', 'F'])
return holidays_set
|
|
3134a061d9bd1d80fd99d223537f6c046a862d31
|
release_notes_prep.py
|
release_notes_prep.py
|
# Find head of current branch and merge commit of last release
# Get merges in between
current_branch_head = '8dfeae34'
last_release_head = '3550d330'
cmd = 'git log --oneline --merges %s ^%s' % (current_branch_head, last_release_head)
import subprocess
merge_commits = subprocess.check_output(cmd, shell=True).split('\n')
# remove all merges of master or origin/master
master_merges = ["Merge branch 'master'", "Merge remote-tracking branch 'origin/master'"]
for master_merge in master_merges:
merge_commits = [commit for commit in merge_commits if commit.find(master_merge) == -1 and commit.strip()]
import re
# map pr #s to commit messages
prs_to_commits = {re.search('#\d+', commit).group(0):commit for commit in merge_commits}
# Get PRs from CHANGELOG.rst
changelog_prs = []
changelog = open('CHANGELOG.rst', 'r')
for line in changelog.readlines():
changelog_prs.extend(re.findall('#\d+', line))
for pr in changelog_prs:
if pr in prs_to_commits:
del prs_to_commits[pr]
# These should now be the PRs that do not appear in the changelog
for pr, commit in prs_to_commits.items():
# print out lines that can be pasted into GitHub
print '- [ ] %s' % commit[commit.find('#'):]
|
Add script to id missing CHANGELOG entries
|
Add script to id missing CHANGELOG entries
|
Python
|
apache-2.0
|
Kitware/girder,Xarthisius/girder,Xarthisius/girder,girder/girder,data-exp-lab/girder,manthey/girder,manthey/girder,jbeezley/girder,RafaelPalomar/girder,Kitware/girder,kotfic/girder,girder/girder,girder/girder,RafaelPalomar/girder,kotfic/girder,Xarthisius/girder,Xarthisius/girder,manthey/girder,data-exp-lab/girder,Kitware/girder,data-exp-lab/girder,jbeezley/girder,data-exp-lab/girder,kotfic/girder,jbeezley/girder,RafaelPalomar/girder,kotfic/girder,manthey/girder,data-exp-lab/girder,girder/girder,Xarthisius/girder,RafaelPalomar/girder,jbeezley/girder,RafaelPalomar/girder,kotfic/girder,Kitware/girder
|
Add script to id missing CHANGELOG entries
|
# Find head of current branch and merge commit of last release
# Get merges in between
current_branch_head = '8dfeae34'
last_release_head = '3550d330'
cmd = 'git log --oneline --merges %s ^%s' % (current_branch_head, last_release_head)
import subprocess
merge_commits = subprocess.check_output(cmd, shell=True).split('\n')
# remove all merges of master or origin/master
master_merges = ["Merge branch 'master'", "Merge remote-tracking branch 'origin/master'"]
for master_merge in master_merges:
merge_commits = [commit for commit in merge_commits if commit.find(master_merge) == -1 and commit.strip()]
import re
# map pr #s to commit messages
prs_to_commits = {re.search('#\d+', commit).group(0):commit for commit in merge_commits}
# Get PRs from CHANGELOG.rst
changelog_prs = []
changelog = open('CHANGELOG.rst', 'r')
for line in changelog.readlines():
changelog_prs.extend(re.findall('#\d+', line))
for pr in changelog_prs:
if pr in prs_to_commits:
del prs_to_commits[pr]
# These should now be the PRs that do not appear in the changelog
for pr, commit in prs_to_commits.items():
# print out lines that can be pasted into GitHub
print '- [ ] %s' % commit[commit.find('#'):]
|
<commit_before><commit_msg>Add script to id missing CHANGELOG entries<commit_after>
|
# Find head of current branch and merge commit of last release
# Get merges in between
current_branch_head = '8dfeae34'
last_release_head = '3550d330'
cmd = 'git log --oneline --merges %s ^%s' % (current_branch_head, last_release_head)
import subprocess
merge_commits = subprocess.check_output(cmd, shell=True).split('\n')
# remove all merges of master or origin/master
master_merges = ["Merge branch 'master'", "Merge remote-tracking branch 'origin/master'"]
for master_merge in master_merges:
merge_commits = [commit for commit in merge_commits if commit.find(master_merge) == -1 and commit.strip()]
import re
# map pr #s to commit messages
prs_to_commits = {re.search('#\d+', commit).group(0):commit for commit in merge_commits}
# Get PRs from CHANGELOG.rst
changelog_prs = []
changelog = open('CHANGELOG.rst', 'r')
for line in changelog.readlines():
changelog_prs.extend(re.findall('#\d+', line))
for pr in changelog_prs:
if pr in prs_to_commits:
del prs_to_commits[pr]
# These should now be the PRs that do not appear in the changelog
for pr, commit in prs_to_commits.items():
# print out lines that can be pasted into GitHub
print '- [ ] %s' % commit[commit.find('#'):]
|
Add script to id missing CHANGELOG entries# Find head of current branch and merge commit of last release
# Get merges in between
current_branch_head = '8dfeae34'
last_release_head = '3550d330'
cmd = 'git log --oneline --merges %s ^%s' % (current_branch_head, last_release_head)
import subprocess
merge_commits = subprocess.check_output(cmd, shell=True).split('\n')
# remove all merges of master or origin/master
master_merges = ["Merge branch 'master'", "Merge remote-tracking branch 'origin/master'"]
for master_merge in master_merges:
merge_commits = [commit for commit in merge_commits if commit.find(master_merge) == -1 and commit.strip()]
import re
# map pr #s to commit messages
prs_to_commits = {re.search('#\d+', commit).group(0):commit for commit in merge_commits}
# Get PRs from CHANGELOG.rst
changelog_prs = []
changelog = open('CHANGELOG.rst', 'r')
for line in changelog.readlines():
changelog_prs.extend(re.findall('#\d+', line))
for pr in changelog_prs:
if pr in prs_to_commits:
del prs_to_commits[pr]
# These should now be the PRs that do not appear in the changelog
for pr, commit in prs_to_commits.items():
# print out lines that can be pasted into GitHub
print '- [ ] %s' % commit[commit.find('#'):]
|
<commit_before><commit_msg>Add script to id missing CHANGELOG entries<commit_after># Find head of current branch and merge commit of last release
# Get merges in between
current_branch_head = '8dfeae34'
last_release_head = '3550d330'
cmd = 'git log --oneline --merges %s ^%s' % (current_branch_head, last_release_head)
import subprocess
merge_commits = subprocess.check_output(cmd, shell=True).split('\n')
# remove all merges of master or origin/master
master_merges = ["Merge branch 'master'", "Merge remote-tracking branch 'origin/master'"]
for master_merge in master_merges:
merge_commits = [commit for commit in merge_commits if commit.find(master_merge) == -1 and commit.strip()]
import re
# map pr #s to commit messages
prs_to_commits = {re.search('#\d+', commit).group(0):commit for commit in merge_commits}
# Get PRs from CHANGELOG.rst
changelog_prs = []
changelog = open('CHANGELOG.rst', 'r')
for line in changelog.readlines():
changelog_prs.extend(re.findall('#\d+', line))
for pr in changelog_prs:
if pr in prs_to_commits:
del prs_to_commits[pr]
# These should now be the PRs that do not appear in the changelog
for pr, commit in prs_to_commits.items():
# print out lines that can be pasted into GitHub
print '- [ ] %s' % commit[commit.find('#'):]
|
|
98f37bb6fff90d9a4385ceea5454f0b5146e6dee
|
wagtail/wagtailimages/rich_text.py
|
wagtail/wagtailimages/rich_text.py
|
from wagtail.wagtailimages.models import get_image_model
from wagtail.wagtailimages.formats import get_image_format
class ImageEmbedHandler(object):
"""
ImageEmbedHandler will be invoked whenever we encounter an element in HTML content
with an attribute of data-embedtype="image". The resulting element in the database
representation will be:
<embed embedtype="image" id="42" format="thumb" alt="some custom alt text">
"""
@staticmethod
def get_db_attributes(tag):
"""
Given a tag that we've identified as an image embed (because it has a
data-embedtype="image" attribute), return a dict of the attributes we should
have on the resulting <embed> element.
"""
return {
'id': tag['data-id'],
'format': tag['data-format'],
'alt': tag['data-alt'],
}
@staticmethod
def expand_db_attributes(attrs, for_editor):
"""
Given a dict of attributes from the <embed> tag, return the real HTML
representation.
"""
Image = get_image_model()
try:
image = Image.objects.get(id=attrs['id'])
format = get_image_format(attrs['format'])
if for_editor:
try:
return format.image_to_editor_html(image, attrs['alt'])
except:
return ''
else:
return format.image_to_html(image, attrs['alt'])
except Image.DoesNotExist:
return "<img>"
|
from wagtail.wagtailimages.models import get_image_model
from wagtail.wagtailimages.formats import get_image_format
class ImageEmbedHandler(object):
"""
ImageEmbedHandler will be invoked whenever we encounter an element in HTML content
with an attribute of data-embedtype="image". The resulting element in the database
representation will be:
<embed embedtype="image" id="42" format="thumb" alt="some custom alt text">
"""
@staticmethod
def get_db_attributes(tag):
"""
Given a tag that we've identified as an image embed (because it has a
data-embedtype="image" attribute), return a dict of the attributes we should
have on the resulting <embed> element.
"""
return {
'id': tag['data-id'],
'format': tag['data-format'],
'alt': tag['data-alt'],
}
@staticmethod
def expand_db_attributes(attrs, for_editor):
"""
Given a dict of attributes from the <embed> tag, return the real HTML
representation.
"""
Image = get_image_model()
try:
image = Image.objects.get(id=attrs['id'])
except Image.DoesNotExist:
return "<img>"
format = get_image_format(attrs['format'])
if for_editor:
return format.image_to_editor_html(image, attrs['alt'])
else:
return format.image_to_html(image, attrs['alt'])
|
Remove bare except in unnecessary try block
|
Remove bare except in unnecessary try block
From a comment by @gasman on #1684:
> It appears the try/catch in ImageEmbedHandler was added here: 74b9f43
>
> Since the rest of the commit doesn't deal with images at all, and the
> commit makes it clear that the corresponding change to
> MediaEmbedHandler was intended to preserve the existing 'fail
> silently' behaviour in the light of the new exceptions added for media
> embeds - I think this try/catch is redundant. `Format.image_to_html`
> does its own catching of image IO errors, which seems to be
> sufficiently robust (if it wasn't, we'd be seeing errors on front-end
> page rendering), so I think this try/catch can reasonably be deleted.
https://github.com/torchbox/wagtail/pull/1684#issuecomment-140695060
|
Python
|
bsd-3-clause
|
mikedingjan/wagtail,Toshakins/wagtail,nutztherookie/wagtail,quru/wagtail,mayapurmedia/wagtail,iansprice/wagtail,mixxorz/wagtail,zerolab/wagtail,iansprice/wagtail,takeflight/wagtail,gogobook/wagtail,mixxorz/wagtail,hamsterbacke23/wagtail,FlipperPA/wagtail,kurtw/wagtail,davecranwell/wagtail,chrxr/wagtail,nrsimha/wagtail,kaedroho/wagtail,hanpama/wagtail,kurtw/wagtail,rsalmaso/wagtail,rsalmaso/wagtail,serzans/wagtail,nealtodd/wagtail,nealtodd/wagtail,mayapurmedia/wagtail,gasman/wagtail,gasman/wagtail,thenewguy/wagtail,mayapurmedia/wagtail,Toshakins/wagtail,wagtail/wagtail,timorieber/wagtail,gogobook/wagtail,davecranwell/wagtail,FlipperPA/wagtail,takeflight/wagtail,nilnvoid/wagtail,thenewguy/wagtail,gogobook/wagtail,torchbox/wagtail,hamsterbacke23/wagtail,iansprice/wagtail,Tivix/wagtail,davecranwell/wagtail,quru/wagtail,nimasmi/wagtail,thenewguy/wagtail,davecranwell/wagtail,kurtrwall/wagtail,takeflight/wagtail,nutztherookie/wagtail,gogobook/wagtail,serzans/wagtail,zerolab/wagtail,iansprice/wagtail,nrsimha/wagtail,nealtodd/wagtail,nutztherookie/wagtail,JoshBarr/wagtail,inonit/wagtail,kurtrwall/wagtail,torchbox/wagtail,FlipperPA/wagtail,JoshBarr/wagtail,wagtail/wagtail,chrxr/wagtail,torchbox/wagtail,nimasmi/wagtail,zerolab/wagtail,wagtail/wagtail,timorieber/wagtail,nimasmi/wagtail,FlipperPA/wagtail,Toshakins/wagtail,thenewguy/wagtail,timorieber/wagtail,mayapurmedia/wagtail,jnns/wagtail,rsalmaso/wagtail,inonit/wagtail,gasman/wagtail,kaedroho/wagtail,nrsimha/wagtail,Tivix/wagtail,rsalmaso/wagtail,Toshakins/wagtail,Tivix/wagtail,hanpama/wagtail,inonit/wagtail,kurtw/wagtail,kaedroho/wagtail,takeflight/wagtail,hanpama/wagtail,serzans/wagtail,chrxr/wagtail,mixxorz/wagtail,mikedingjan/wagtail,hamsterbacke23/wagtail,zerolab/wagtail,mikedingjan/wagtail,mixxorz/wagtail,wagtail/wagtail,kaedroho/wagtail,mikedingjan/wagtail,kaedroho/wagtail,zerolab/wagtail,nrsimha/wagtail,mixxorz/wagtail,kurtrwall/wagtail,JoshBarr/wagtail,inonit/wagtail,nealtodd/wagtail,quru/wagtail,nilnvoid/wagtail,timorieber/wagtail,nimasmi/wagtail,thenewguy/wagtail,hamsterbacke23/wagtail,gasman/wagtail,nilnvoid/wagtail,chrxr/wagtail,kurtrwall/wagtail,kurtw/wagtail,Tivix/wagtail,jnns/wagtail,torchbox/wagtail,nutztherookie/wagtail,hanpama/wagtail,jnns/wagtail,nilnvoid/wagtail,jnns/wagtail,JoshBarr/wagtail,wagtail/wagtail,quru/wagtail,rsalmaso/wagtail,serzans/wagtail,gasman/wagtail
|
from wagtail.wagtailimages.models import get_image_model
from wagtail.wagtailimages.formats import get_image_format
class ImageEmbedHandler(object):
"""
ImageEmbedHandler will be invoked whenever we encounter an element in HTML content
with an attribute of data-embedtype="image". The resulting element in the database
representation will be:
<embed embedtype="image" id="42" format="thumb" alt="some custom alt text">
"""
@staticmethod
def get_db_attributes(tag):
"""
Given a tag that we've identified as an image embed (because it has a
data-embedtype="image" attribute), return a dict of the attributes we should
have on the resulting <embed> element.
"""
return {
'id': tag['data-id'],
'format': tag['data-format'],
'alt': tag['data-alt'],
}
@staticmethod
def expand_db_attributes(attrs, for_editor):
"""
Given a dict of attributes from the <embed> tag, return the real HTML
representation.
"""
Image = get_image_model()
try:
image = Image.objects.get(id=attrs['id'])
format = get_image_format(attrs['format'])
if for_editor:
try:
return format.image_to_editor_html(image, attrs['alt'])
except:
return ''
else:
return format.image_to_html(image, attrs['alt'])
except Image.DoesNotExist:
return "<img>"
Remove bare except in unnecessary try block
From a comment by @gasman on #1684:
> It appears the try/catch in ImageEmbedHandler was added here: 74b9f43
>
> Since the rest of the commit doesn't deal with images at all, and the
> commit makes it clear that the corresponding change to
> MediaEmbedHandler was intended to preserve the existing 'fail
> silently' behaviour in the light of the new exceptions added for media
> embeds - I think this try/catch is redundant. `Format.image_to_html`
> does its own catching of image IO errors, which seems to be
> sufficiently robust (if it wasn't, we'd be seeing errors on front-end
> page rendering), so I think this try/catch can reasonably be deleted.
https://github.com/torchbox/wagtail/pull/1684#issuecomment-140695060
|
from wagtail.wagtailimages.models import get_image_model
from wagtail.wagtailimages.formats import get_image_format
class ImageEmbedHandler(object):
"""
ImageEmbedHandler will be invoked whenever we encounter an element in HTML content
with an attribute of data-embedtype="image". The resulting element in the database
representation will be:
<embed embedtype="image" id="42" format="thumb" alt="some custom alt text">
"""
@staticmethod
def get_db_attributes(tag):
"""
Given a tag that we've identified as an image embed (because it has a
data-embedtype="image" attribute), return a dict of the attributes we should
have on the resulting <embed> element.
"""
return {
'id': tag['data-id'],
'format': tag['data-format'],
'alt': tag['data-alt'],
}
@staticmethod
def expand_db_attributes(attrs, for_editor):
"""
Given a dict of attributes from the <embed> tag, return the real HTML
representation.
"""
Image = get_image_model()
try:
image = Image.objects.get(id=attrs['id'])
except Image.DoesNotExist:
return "<img>"
format = get_image_format(attrs['format'])
if for_editor:
return format.image_to_editor_html(image, attrs['alt'])
else:
return format.image_to_html(image, attrs['alt'])
|
<commit_before>from wagtail.wagtailimages.models import get_image_model
from wagtail.wagtailimages.formats import get_image_format
class ImageEmbedHandler(object):
"""
ImageEmbedHandler will be invoked whenever we encounter an element in HTML content
with an attribute of data-embedtype="image". The resulting element in the database
representation will be:
<embed embedtype="image" id="42" format="thumb" alt="some custom alt text">
"""
@staticmethod
def get_db_attributes(tag):
"""
Given a tag that we've identified as an image embed (because it has a
data-embedtype="image" attribute), return a dict of the attributes we should
have on the resulting <embed> element.
"""
return {
'id': tag['data-id'],
'format': tag['data-format'],
'alt': tag['data-alt'],
}
@staticmethod
def expand_db_attributes(attrs, for_editor):
"""
Given a dict of attributes from the <embed> tag, return the real HTML
representation.
"""
Image = get_image_model()
try:
image = Image.objects.get(id=attrs['id'])
format = get_image_format(attrs['format'])
if for_editor:
try:
return format.image_to_editor_html(image, attrs['alt'])
except:
return ''
else:
return format.image_to_html(image, attrs['alt'])
except Image.DoesNotExist:
return "<img>"
<commit_msg>Remove bare except in unnecessary try block
From a comment by @gasman on #1684:
> It appears the try/catch in ImageEmbedHandler was added here: 74b9f43
>
> Since the rest of the commit doesn't deal with images at all, and the
> commit makes it clear that the corresponding change to
> MediaEmbedHandler was intended to preserve the existing 'fail
> silently' behaviour in the light of the new exceptions added for media
> embeds - I think this try/catch is redundant. `Format.image_to_html`
> does its own catching of image IO errors, which seems to be
> sufficiently robust (if it wasn't, we'd be seeing errors on front-end
> page rendering), so I think this try/catch can reasonably be deleted.
https://github.com/torchbox/wagtail/pull/1684#issuecomment-140695060<commit_after>
|
from wagtail.wagtailimages.models import get_image_model
from wagtail.wagtailimages.formats import get_image_format
class ImageEmbedHandler(object):
"""
ImageEmbedHandler will be invoked whenever we encounter an element in HTML content
with an attribute of data-embedtype="image". The resulting element in the database
representation will be:
<embed embedtype="image" id="42" format="thumb" alt="some custom alt text">
"""
@staticmethod
def get_db_attributes(tag):
"""
Given a tag that we've identified as an image embed (because it has a
data-embedtype="image" attribute), return a dict of the attributes we should
have on the resulting <embed> element.
"""
return {
'id': tag['data-id'],
'format': tag['data-format'],
'alt': tag['data-alt'],
}
@staticmethod
def expand_db_attributes(attrs, for_editor):
"""
Given a dict of attributes from the <embed> tag, return the real HTML
representation.
"""
Image = get_image_model()
try:
image = Image.objects.get(id=attrs['id'])
except Image.DoesNotExist:
return "<img>"
format = get_image_format(attrs['format'])
if for_editor:
return format.image_to_editor_html(image, attrs['alt'])
else:
return format.image_to_html(image, attrs['alt'])
|
from wagtail.wagtailimages.models import get_image_model
from wagtail.wagtailimages.formats import get_image_format
class ImageEmbedHandler(object):
"""
ImageEmbedHandler will be invoked whenever we encounter an element in HTML content
with an attribute of data-embedtype="image". The resulting element in the database
representation will be:
<embed embedtype="image" id="42" format="thumb" alt="some custom alt text">
"""
@staticmethod
def get_db_attributes(tag):
"""
Given a tag that we've identified as an image embed (because it has a
data-embedtype="image" attribute), return a dict of the attributes we should
have on the resulting <embed> element.
"""
return {
'id': tag['data-id'],
'format': tag['data-format'],
'alt': tag['data-alt'],
}
@staticmethod
def expand_db_attributes(attrs, for_editor):
"""
Given a dict of attributes from the <embed> tag, return the real HTML
representation.
"""
Image = get_image_model()
try:
image = Image.objects.get(id=attrs['id'])
format = get_image_format(attrs['format'])
if for_editor:
try:
return format.image_to_editor_html(image, attrs['alt'])
except:
return ''
else:
return format.image_to_html(image, attrs['alt'])
except Image.DoesNotExist:
return "<img>"
Remove bare except in unnecessary try block
From a comment by @gasman on #1684:
> It appears the try/catch in ImageEmbedHandler was added here: 74b9f43
>
> Since the rest of the commit doesn't deal with images at all, and the
> commit makes it clear that the corresponding change to
> MediaEmbedHandler was intended to preserve the existing 'fail
> silently' behaviour in the light of the new exceptions added for media
> embeds - I think this try/catch is redundant. `Format.image_to_html`
> does its own catching of image IO errors, which seems to be
> sufficiently robust (if it wasn't, we'd be seeing errors on front-end
> page rendering), so I think this try/catch can reasonably be deleted.
https://github.com/torchbox/wagtail/pull/1684#issuecomment-140695060from wagtail.wagtailimages.models import get_image_model
from wagtail.wagtailimages.formats import get_image_format
class ImageEmbedHandler(object):
"""
ImageEmbedHandler will be invoked whenever we encounter an element in HTML content
with an attribute of data-embedtype="image". The resulting element in the database
representation will be:
<embed embedtype="image" id="42" format="thumb" alt="some custom alt text">
"""
@staticmethod
def get_db_attributes(tag):
"""
Given a tag that we've identified as an image embed (because it has a
data-embedtype="image" attribute), return a dict of the attributes we should
have on the resulting <embed> element.
"""
return {
'id': tag['data-id'],
'format': tag['data-format'],
'alt': tag['data-alt'],
}
@staticmethod
def expand_db_attributes(attrs, for_editor):
"""
Given a dict of attributes from the <embed> tag, return the real HTML
representation.
"""
Image = get_image_model()
try:
image = Image.objects.get(id=attrs['id'])
except Image.DoesNotExist:
return "<img>"
format = get_image_format(attrs['format'])
if for_editor:
return format.image_to_editor_html(image, attrs['alt'])
else:
return format.image_to_html(image, attrs['alt'])
|
<commit_before>from wagtail.wagtailimages.models import get_image_model
from wagtail.wagtailimages.formats import get_image_format
class ImageEmbedHandler(object):
"""
ImageEmbedHandler will be invoked whenever we encounter an element in HTML content
with an attribute of data-embedtype="image". The resulting element in the database
representation will be:
<embed embedtype="image" id="42" format="thumb" alt="some custom alt text">
"""
@staticmethod
def get_db_attributes(tag):
"""
Given a tag that we've identified as an image embed (because it has a
data-embedtype="image" attribute), return a dict of the attributes we should
have on the resulting <embed> element.
"""
return {
'id': tag['data-id'],
'format': tag['data-format'],
'alt': tag['data-alt'],
}
@staticmethod
def expand_db_attributes(attrs, for_editor):
"""
Given a dict of attributes from the <embed> tag, return the real HTML
representation.
"""
Image = get_image_model()
try:
image = Image.objects.get(id=attrs['id'])
format = get_image_format(attrs['format'])
if for_editor:
try:
return format.image_to_editor_html(image, attrs['alt'])
except:
return ''
else:
return format.image_to_html(image, attrs['alt'])
except Image.DoesNotExist:
return "<img>"
<commit_msg>Remove bare except in unnecessary try block
From a comment by @gasman on #1684:
> It appears the try/catch in ImageEmbedHandler was added here: 74b9f43
>
> Since the rest of the commit doesn't deal with images at all, and the
> commit makes it clear that the corresponding change to
> MediaEmbedHandler was intended to preserve the existing 'fail
> silently' behaviour in the light of the new exceptions added for media
> embeds - I think this try/catch is redundant. `Format.image_to_html`
> does its own catching of image IO errors, which seems to be
> sufficiently robust (if it wasn't, we'd be seeing errors on front-end
> page rendering), so I think this try/catch can reasonably be deleted.
https://github.com/torchbox/wagtail/pull/1684#issuecomment-140695060<commit_after>from wagtail.wagtailimages.models import get_image_model
from wagtail.wagtailimages.formats import get_image_format
class ImageEmbedHandler(object):
"""
ImageEmbedHandler will be invoked whenever we encounter an element in HTML content
with an attribute of data-embedtype="image". The resulting element in the database
representation will be:
<embed embedtype="image" id="42" format="thumb" alt="some custom alt text">
"""
@staticmethod
def get_db_attributes(tag):
"""
Given a tag that we've identified as an image embed (because it has a
data-embedtype="image" attribute), return a dict of the attributes we should
have on the resulting <embed> element.
"""
return {
'id': tag['data-id'],
'format': tag['data-format'],
'alt': tag['data-alt'],
}
@staticmethod
def expand_db_attributes(attrs, for_editor):
"""
Given a dict of attributes from the <embed> tag, return the real HTML
representation.
"""
Image = get_image_model()
try:
image = Image.objects.get(id=attrs['id'])
except Image.DoesNotExist:
return "<img>"
format = get_image_format(attrs['format'])
if for_editor:
return format.image_to_editor_html(image, attrs['alt'])
else:
return format.image_to_html(image, attrs['alt'])
|
487a4c070d37fc201c9863119d6b0cee405439e4
|
ckanofworms/upgrades/20130100501_drop_indexes.py
|
ckanofworms/upgrades/20130100501_drop_indexes.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# CKAN-of-Worms -- A logger for errors found in CKAN datasets
# By: Emmanuel Raviart <emmanuel@raviart.com>
#
# Copyright (C) 2013 Etalab
# http://github.com/etalab/ckan-of-worms
#
# This file is part of CKAN-of-Worms.
#
# CKAN-of-Worms is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# CKAN-of-Worms is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Drop indexes to ensure that they are fully rebuild."""
import logging
import os
import sys
from ckanofworms import objects
app_name = os.path.splitext(os.path.basename(__file__))[0]
log = logging.getLogger(app_name)
def main():
import argparse
import paste.deploy
from ckanofworms import environment, model
parser = argparse.ArgumentParser(description = __doc__)
parser.add_argument('config_path', help = 'path of CKAN-of-Worms Paste configuration file')
parser.add_argument('-v', '--verbose', action = 'store_true', help = 'increase output verbosity')
args = parser.parse_args()
logging.basicConfig(level = logging.DEBUG if args.verbose else logging.WARNING, stream = sys.stdout)
site_conf = paste.deploy.appconfig('config:{}'.format(os.path.abspath(args.config_path)))
environment.load_environment(site_conf.global_conf, site_conf.local_conf)
status = model.Status.find_one()
if status is None:
status = model.Status()
upgrade(status)
return 0
def upgrade(status):
db = objects.Wrapper.db
for collection_name in db.collection_names():
if collection_name.startswith('system.'):
continue
db[collection_name].drop_indexes()
if status.last_upgrade_name is None or status.last_upgrade_name < app_name:
status.last_upgrade_name = app_name
status.save()
if __name__ == "__main__":
sys.exit(main())
|
Add upgrade script to fully rebuild indexes.
|
Add upgrade script to fully rebuild indexes.
|
Python
|
agpl-3.0
|
etalab/ckan-of-worms,etalab/ckan-of-worms
|
Add upgrade script to fully rebuild indexes.
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# CKAN-of-Worms -- A logger for errors found in CKAN datasets
# By: Emmanuel Raviart <emmanuel@raviart.com>
#
# Copyright (C) 2013 Etalab
# http://github.com/etalab/ckan-of-worms
#
# This file is part of CKAN-of-Worms.
#
# CKAN-of-Worms is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# CKAN-of-Worms is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Drop indexes to ensure that they are fully rebuild."""
import logging
import os
import sys
from ckanofworms import objects
app_name = os.path.splitext(os.path.basename(__file__))[0]
log = logging.getLogger(app_name)
def main():
import argparse
import paste.deploy
from ckanofworms import environment, model
parser = argparse.ArgumentParser(description = __doc__)
parser.add_argument('config_path', help = 'path of CKAN-of-Worms Paste configuration file')
parser.add_argument('-v', '--verbose', action = 'store_true', help = 'increase output verbosity')
args = parser.parse_args()
logging.basicConfig(level = logging.DEBUG if args.verbose else logging.WARNING, stream = sys.stdout)
site_conf = paste.deploy.appconfig('config:{}'.format(os.path.abspath(args.config_path)))
environment.load_environment(site_conf.global_conf, site_conf.local_conf)
status = model.Status.find_one()
if status is None:
status = model.Status()
upgrade(status)
return 0
def upgrade(status):
db = objects.Wrapper.db
for collection_name in db.collection_names():
if collection_name.startswith('system.'):
continue
db[collection_name].drop_indexes()
if status.last_upgrade_name is None or status.last_upgrade_name < app_name:
status.last_upgrade_name = app_name
status.save()
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add upgrade script to fully rebuild indexes.<commit_after>
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# CKAN-of-Worms -- A logger for errors found in CKAN datasets
# By: Emmanuel Raviart <emmanuel@raviart.com>
#
# Copyright (C) 2013 Etalab
# http://github.com/etalab/ckan-of-worms
#
# This file is part of CKAN-of-Worms.
#
# CKAN-of-Worms is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# CKAN-of-Worms is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Drop indexes to ensure that they are fully rebuild."""
import logging
import os
import sys
from ckanofworms import objects
app_name = os.path.splitext(os.path.basename(__file__))[0]
log = logging.getLogger(app_name)
def main():
import argparse
import paste.deploy
from ckanofworms import environment, model
parser = argparse.ArgumentParser(description = __doc__)
parser.add_argument('config_path', help = 'path of CKAN-of-Worms Paste configuration file')
parser.add_argument('-v', '--verbose', action = 'store_true', help = 'increase output verbosity')
args = parser.parse_args()
logging.basicConfig(level = logging.DEBUG if args.verbose else logging.WARNING, stream = sys.stdout)
site_conf = paste.deploy.appconfig('config:{}'.format(os.path.abspath(args.config_path)))
environment.load_environment(site_conf.global_conf, site_conf.local_conf)
status = model.Status.find_one()
if status is None:
status = model.Status()
upgrade(status)
return 0
def upgrade(status):
db = objects.Wrapper.db
for collection_name in db.collection_names():
if collection_name.startswith('system.'):
continue
db[collection_name].drop_indexes()
if status.last_upgrade_name is None or status.last_upgrade_name < app_name:
status.last_upgrade_name = app_name
status.save()
if __name__ == "__main__":
sys.exit(main())
|
Add upgrade script to fully rebuild indexes.#! /usr/bin/env python
# -*- coding: utf-8 -*-
# CKAN-of-Worms -- A logger for errors found in CKAN datasets
# By: Emmanuel Raviart <emmanuel@raviart.com>
#
# Copyright (C) 2013 Etalab
# http://github.com/etalab/ckan-of-worms
#
# This file is part of CKAN-of-Worms.
#
# CKAN-of-Worms is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# CKAN-of-Worms is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Drop indexes to ensure that they are fully rebuild."""
import logging
import os
import sys
from ckanofworms import objects
app_name = os.path.splitext(os.path.basename(__file__))[0]
log = logging.getLogger(app_name)
def main():
import argparse
import paste.deploy
from ckanofworms import environment, model
parser = argparse.ArgumentParser(description = __doc__)
parser.add_argument('config_path', help = 'path of CKAN-of-Worms Paste configuration file')
parser.add_argument('-v', '--verbose', action = 'store_true', help = 'increase output verbosity')
args = parser.parse_args()
logging.basicConfig(level = logging.DEBUG if args.verbose else logging.WARNING, stream = sys.stdout)
site_conf = paste.deploy.appconfig('config:{}'.format(os.path.abspath(args.config_path)))
environment.load_environment(site_conf.global_conf, site_conf.local_conf)
status = model.Status.find_one()
if status is None:
status = model.Status()
upgrade(status)
return 0
def upgrade(status):
db = objects.Wrapper.db
for collection_name in db.collection_names():
if collection_name.startswith('system.'):
continue
db[collection_name].drop_indexes()
if status.last_upgrade_name is None or status.last_upgrade_name < app_name:
status.last_upgrade_name = app_name
status.save()
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add upgrade script to fully rebuild indexes.<commit_after>#! /usr/bin/env python
# -*- coding: utf-8 -*-
# CKAN-of-Worms -- A logger for errors found in CKAN datasets
# By: Emmanuel Raviart <emmanuel@raviart.com>
#
# Copyright (C) 2013 Etalab
# http://github.com/etalab/ckan-of-worms
#
# This file is part of CKAN-of-Worms.
#
# CKAN-of-Worms is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# CKAN-of-Worms is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Drop indexes to ensure that they are fully rebuild."""
import logging
import os
import sys
from ckanofworms import objects
app_name = os.path.splitext(os.path.basename(__file__))[0]
log = logging.getLogger(app_name)
def main():
import argparse
import paste.deploy
from ckanofworms import environment, model
parser = argparse.ArgumentParser(description = __doc__)
parser.add_argument('config_path', help = 'path of CKAN-of-Worms Paste configuration file')
parser.add_argument('-v', '--verbose', action = 'store_true', help = 'increase output verbosity')
args = parser.parse_args()
logging.basicConfig(level = logging.DEBUG if args.verbose else logging.WARNING, stream = sys.stdout)
site_conf = paste.deploy.appconfig('config:{}'.format(os.path.abspath(args.config_path)))
environment.load_environment(site_conf.global_conf, site_conf.local_conf)
status = model.Status.find_one()
if status is None:
status = model.Status()
upgrade(status)
return 0
def upgrade(status):
db = objects.Wrapper.db
for collection_name in db.collection_names():
if collection_name.startswith('system.'):
continue
db[collection_name].drop_indexes()
if status.last_upgrade_name is None or status.last_upgrade_name < app_name:
status.last_upgrade_name = app_name
status.save()
if __name__ == "__main__":
sys.exit(main())
|
|
e42718a59d9c0bffed524f5764ee119cb616085e
|
py/accounts-merge.py
|
py/accounts-merge.py
|
from collections import defaultdict
class Solution(object):
def accountsMerge(self, accounts):
"""
:type accounts: List[List[str]]
:rtype: List[List[str]]
"""
parent = dict()
def find_root(a):
if parent[a] != a:
parent[a] = find_root(parent[a])
return parent[a]
def link(ra, rb):
parent[ra] = rb
for acct in accounts:
name = acct[0]
for email in acct[1:]:
parent.setdefault((email, name), (email, name))
for email in acct[2:]:
ra, rb = find_root((email, name)), find_root((acct[1], name))
if ra != rb:
link(ra, rb)
ans_mapping = defaultdict(list)
for email, name in parent:
ans_mapping[find_root((email, name))].append(email)
ans = []
for (email, name), l in ans_mapping.iteritems():
acct = [name] + sorted(l)
ans.append(acct)
return ans
|
Add py solution for 721. Accounts Merge
|
Add py solution for 721. Accounts Merge
721. Accounts Merge: https://leetcode.com/problems/accounts-merge/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 721. Accounts Merge
721. Accounts Merge: https://leetcode.com/problems/accounts-merge/
|
from collections import defaultdict
class Solution(object):
def accountsMerge(self, accounts):
"""
:type accounts: List[List[str]]
:rtype: List[List[str]]
"""
parent = dict()
def find_root(a):
if parent[a] != a:
parent[a] = find_root(parent[a])
return parent[a]
def link(ra, rb):
parent[ra] = rb
for acct in accounts:
name = acct[0]
for email in acct[1:]:
parent.setdefault((email, name), (email, name))
for email in acct[2:]:
ra, rb = find_root((email, name)), find_root((acct[1], name))
if ra != rb:
link(ra, rb)
ans_mapping = defaultdict(list)
for email, name in parent:
ans_mapping[find_root((email, name))].append(email)
ans = []
for (email, name), l in ans_mapping.iteritems():
acct = [name] + sorted(l)
ans.append(acct)
return ans
|
<commit_before><commit_msg>Add py solution for 721. Accounts Merge
721. Accounts Merge: https://leetcode.com/problems/accounts-merge/<commit_after>
|
from collections import defaultdict
class Solution(object):
def accountsMerge(self, accounts):
"""
:type accounts: List[List[str]]
:rtype: List[List[str]]
"""
parent = dict()
def find_root(a):
if parent[a] != a:
parent[a] = find_root(parent[a])
return parent[a]
def link(ra, rb):
parent[ra] = rb
for acct in accounts:
name = acct[0]
for email in acct[1:]:
parent.setdefault((email, name), (email, name))
for email in acct[2:]:
ra, rb = find_root((email, name)), find_root((acct[1], name))
if ra != rb:
link(ra, rb)
ans_mapping = defaultdict(list)
for email, name in parent:
ans_mapping[find_root((email, name))].append(email)
ans = []
for (email, name), l in ans_mapping.iteritems():
acct = [name] + sorted(l)
ans.append(acct)
return ans
|
Add py solution for 721. Accounts Merge
721. Accounts Merge: https://leetcode.com/problems/accounts-merge/from collections import defaultdict
class Solution(object):
def accountsMerge(self, accounts):
"""
:type accounts: List[List[str]]
:rtype: List[List[str]]
"""
parent = dict()
def find_root(a):
if parent[a] != a:
parent[a] = find_root(parent[a])
return parent[a]
def link(ra, rb):
parent[ra] = rb
for acct in accounts:
name = acct[0]
for email in acct[1:]:
parent.setdefault((email, name), (email, name))
for email in acct[2:]:
ra, rb = find_root((email, name)), find_root((acct[1], name))
if ra != rb:
link(ra, rb)
ans_mapping = defaultdict(list)
for email, name in parent:
ans_mapping[find_root((email, name))].append(email)
ans = []
for (email, name), l in ans_mapping.iteritems():
acct = [name] + sorted(l)
ans.append(acct)
return ans
|
<commit_before><commit_msg>Add py solution for 721. Accounts Merge
721. Accounts Merge: https://leetcode.com/problems/accounts-merge/<commit_after>from collections import defaultdict
class Solution(object):
def accountsMerge(self, accounts):
"""
:type accounts: List[List[str]]
:rtype: List[List[str]]
"""
parent = dict()
def find_root(a):
if parent[a] != a:
parent[a] = find_root(parent[a])
return parent[a]
def link(ra, rb):
parent[ra] = rb
for acct in accounts:
name = acct[0]
for email in acct[1:]:
parent.setdefault((email, name), (email, name))
for email in acct[2:]:
ra, rb = find_root((email, name)), find_root((acct[1], name))
if ra != rb:
link(ra, rb)
ans_mapping = defaultdict(list)
for email, name in parent:
ans_mapping[find_root((email, name))].append(email)
ans = []
for (email, name), l in ans_mapping.iteritems():
acct = [name] + sorted(l)
ans.append(acct)
return ans
|
|
bbd07d4cd43eb9c6a5d60dca2b815d0286b2eda3
|
list_issues_by_milestone.py
|
list_issues_by_milestone.py
|
from gitlab import Gitlab
from configparser import ConfigParser
import sys
__author__ = 'Cedric RICARD'
config = ConfigParser()
config.read_file(open('defaults.cfg'))
def connect_to_gitlab():
gl = Gitlab(config.get('gitlab', 'url'), config.get('gitlab', 'key'))
gl.auth()
return gl
def list_issues(gl, project_name, state=None, group_by_milestone=True):
tab = " " * 4
gl_project_id = None
gl_project = None
for p in gl.Project(per_page=1000):
# print(p.path_with_namespace)
if p.path_with_namespace == project_name:
gl_project_id = p.id
gl_project = p
break
if gl_project:
milestone_dict = {}
labels = gl_project.Label(per_page=1000)
milestones = gl_project.Milestone(per_page=1000)
issues = gl_project.Issue(per_page=1000)
for issue in issues:
if state and issue.state == state:
if group_by_milestone:
if issue.milestone:
milestone_dict.setdefault(issue.milestone.title, []).append(issue)
else:
print(issue)
if milestone_dict:
for ms in milestones:
print(ms.title)
print("state: %s" % ms.state, "/ %d issues" % len(milestone_dict[ms.title]))
print("expired at %s" % ms.due_date)
print("details: ", ms.description)
for issue in milestone_dict[ms.title]:
print(tab, "#%(id)d\t%(title)s" % issue.__dict__)
print()
if __name__ == '__main__':
# project_name = config.get('gitlab', 'project')
project_name = sys.argv[1]
issue_state = sys.argv[2]
gl = connect_to_gitlab()
list_issues(gl, project_name, state=issue_state)
|
Add script to produce a changelog
|
Add script to produce a changelog
|
Python
|
agpl-3.0
|
11h42/gitlab-tools
|
Add script to produce a changelog
|
from gitlab import Gitlab
from configparser import ConfigParser
import sys
__author__ = 'Cedric RICARD'
config = ConfigParser()
config.read_file(open('defaults.cfg'))
def connect_to_gitlab():
gl = Gitlab(config.get('gitlab', 'url'), config.get('gitlab', 'key'))
gl.auth()
return gl
def list_issues(gl, project_name, state=None, group_by_milestone=True):
tab = " " * 4
gl_project_id = None
gl_project = None
for p in gl.Project(per_page=1000):
# print(p.path_with_namespace)
if p.path_with_namespace == project_name:
gl_project_id = p.id
gl_project = p
break
if gl_project:
milestone_dict = {}
labels = gl_project.Label(per_page=1000)
milestones = gl_project.Milestone(per_page=1000)
issues = gl_project.Issue(per_page=1000)
for issue in issues:
if state and issue.state == state:
if group_by_milestone:
if issue.milestone:
milestone_dict.setdefault(issue.milestone.title, []).append(issue)
else:
print(issue)
if milestone_dict:
for ms in milestones:
print(ms.title)
print("state: %s" % ms.state, "/ %d issues" % len(milestone_dict[ms.title]))
print("expired at %s" % ms.due_date)
print("details: ", ms.description)
for issue in milestone_dict[ms.title]:
print(tab, "#%(id)d\t%(title)s" % issue.__dict__)
print()
if __name__ == '__main__':
# project_name = config.get('gitlab', 'project')
project_name = sys.argv[1]
issue_state = sys.argv[2]
gl = connect_to_gitlab()
list_issues(gl, project_name, state=issue_state)
|
<commit_before><commit_msg>Add script to produce a changelog<commit_after>
|
from gitlab import Gitlab
from configparser import ConfigParser
import sys
__author__ = 'Cedric RICARD'
config = ConfigParser()
config.read_file(open('defaults.cfg'))
def connect_to_gitlab():
gl = Gitlab(config.get('gitlab', 'url'), config.get('gitlab', 'key'))
gl.auth()
return gl
def list_issues(gl, project_name, state=None, group_by_milestone=True):
tab = " " * 4
gl_project_id = None
gl_project = None
for p in gl.Project(per_page=1000):
# print(p.path_with_namespace)
if p.path_with_namespace == project_name:
gl_project_id = p.id
gl_project = p
break
if gl_project:
milestone_dict = {}
labels = gl_project.Label(per_page=1000)
milestones = gl_project.Milestone(per_page=1000)
issues = gl_project.Issue(per_page=1000)
for issue in issues:
if state and issue.state == state:
if group_by_milestone:
if issue.milestone:
milestone_dict.setdefault(issue.milestone.title, []).append(issue)
else:
print(issue)
if milestone_dict:
for ms in milestones:
print(ms.title)
print("state: %s" % ms.state, "/ %d issues" % len(milestone_dict[ms.title]))
print("expired at %s" % ms.due_date)
print("details: ", ms.description)
for issue in milestone_dict[ms.title]:
print(tab, "#%(id)d\t%(title)s" % issue.__dict__)
print()
if __name__ == '__main__':
# project_name = config.get('gitlab', 'project')
project_name = sys.argv[1]
issue_state = sys.argv[2]
gl = connect_to_gitlab()
list_issues(gl, project_name, state=issue_state)
|
Add script to produce a changelogfrom gitlab import Gitlab
from configparser import ConfigParser
import sys
__author__ = 'Cedric RICARD'
config = ConfigParser()
config.read_file(open('defaults.cfg'))
def connect_to_gitlab():
gl = Gitlab(config.get('gitlab', 'url'), config.get('gitlab', 'key'))
gl.auth()
return gl
def list_issues(gl, project_name, state=None, group_by_milestone=True):
tab = " " * 4
gl_project_id = None
gl_project = None
for p in gl.Project(per_page=1000):
# print(p.path_with_namespace)
if p.path_with_namespace == project_name:
gl_project_id = p.id
gl_project = p
break
if gl_project:
milestone_dict = {}
labels = gl_project.Label(per_page=1000)
milestones = gl_project.Milestone(per_page=1000)
issues = gl_project.Issue(per_page=1000)
for issue in issues:
if state and issue.state == state:
if group_by_milestone:
if issue.milestone:
milestone_dict.setdefault(issue.milestone.title, []).append(issue)
else:
print(issue)
if milestone_dict:
for ms in milestones:
print(ms.title)
print("state: %s" % ms.state, "/ %d issues" % len(milestone_dict[ms.title]))
print("expired at %s" % ms.due_date)
print("details: ", ms.description)
for issue in milestone_dict[ms.title]:
print(tab, "#%(id)d\t%(title)s" % issue.__dict__)
print()
if __name__ == '__main__':
# project_name = config.get('gitlab', 'project')
project_name = sys.argv[1]
issue_state = sys.argv[2]
gl = connect_to_gitlab()
list_issues(gl, project_name, state=issue_state)
|
<commit_before><commit_msg>Add script to produce a changelog<commit_after>from gitlab import Gitlab
from configparser import ConfigParser
import sys
__author__ = 'Cedric RICARD'
config = ConfigParser()
config.read_file(open('defaults.cfg'))
def connect_to_gitlab():
gl = Gitlab(config.get('gitlab', 'url'), config.get('gitlab', 'key'))
gl.auth()
return gl
def list_issues(gl, project_name, state=None, group_by_milestone=True):
tab = " " * 4
gl_project_id = None
gl_project = None
for p in gl.Project(per_page=1000):
# print(p.path_with_namespace)
if p.path_with_namespace == project_name:
gl_project_id = p.id
gl_project = p
break
if gl_project:
milestone_dict = {}
labels = gl_project.Label(per_page=1000)
milestones = gl_project.Milestone(per_page=1000)
issues = gl_project.Issue(per_page=1000)
for issue in issues:
if state and issue.state == state:
if group_by_milestone:
if issue.milestone:
milestone_dict.setdefault(issue.milestone.title, []).append(issue)
else:
print(issue)
if milestone_dict:
for ms in milestones:
print(ms.title)
print("state: %s" % ms.state, "/ %d issues" % len(milestone_dict[ms.title]))
print("expired at %s" % ms.due_date)
print("details: ", ms.description)
for issue in milestone_dict[ms.title]:
print(tab, "#%(id)d\t%(title)s" % issue.__dict__)
print()
if __name__ == '__main__':
# project_name = config.get('gitlab', 'project')
project_name = sys.argv[1]
issue_state = sys.argv[2]
gl = connect_to_gitlab()
list_issues(gl, project_name, state=issue_state)
|
|
86c8f3311efaf6bc4f1ef5bda4213ec07f36a7fd
|
tests/cpydiff/core_function_moduleattr.py
|
tests/cpydiff/core_function_moduleattr.py
|
"""
categories: Core,Functions
description: Function objects do not have the ``__module__`` attribute
cause: MicroPython is optimized for reduced code size and RAM usage.
workaround: Use ``sys.modules[function.__globals__['__name__']]`` for non-builtin modules.
"""
def f():
pass
print(f.__module__)
|
Add test and workaround for function.__module__ attr.
|
tests/cpydiff: Add test and workaround for function.__module__ attr.
MicroPython does not store any reference from a function object to the
module it was defined in, but there is a way to use function.__globals__ to
indirectly get the module.
See issue #7259.
Signed-off-by: Damien George <99e6b749acbfbe2a596df99e91d24d6e1fdbee00@micropython.org>
|
Python
|
mit
|
pfalcon/micropython,pfalcon/micropython,pfalcon/micropython,pfalcon/micropython,pfalcon/micropython
|
tests/cpydiff: Add test and workaround for function.__module__ attr.
MicroPython does not store any reference from a function object to the
module it was defined in, but there is a way to use function.__globals__ to
indirectly get the module.
See issue #7259.
Signed-off-by: Damien George <99e6b749acbfbe2a596df99e91d24d6e1fdbee00@micropython.org>
|
"""
categories: Core,Functions
description: Function objects do not have the ``__module__`` attribute
cause: MicroPython is optimized for reduced code size and RAM usage.
workaround: Use ``sys.modules[function.__globals__['__name__']]`` for non-builtin modules.
"""
def f():
pass
print(f.__module__)
|
<commit_before><commit_msg>tests/cpydiff: Add test and workaround for function.__module__ attr.
MicroPython does not store any reference from a function object to the
module it was defined in, but there is a way to use function.__globals__ to
indirectly get the module.
See issue #7259.
Signed-off-by: Damien George <99e6b749acbfbe2a596df99e91d24d6e1fdbee00@micropython.org><commit_after>
|
"""
categories: Core,Functions
description: Function objects do not have the ``__module__`` attribute
cause: MicroPython is optimized for reduced code size and RAM usage.
workaround: Use ``sys.modules[function.__globals__['__name__']]`` for non-builtin modules.
"""
def f():
pass
print(f.__module__)
|
tests/cpydiff: Add test and workaround for function.__module__ attr.
MicroPython does not store any reference from a function object to the
module it was defined in, but there is a way to use function.__globals__ to
indirectly get the module.
See issue #7259.
Signed-off-by: Damien George <99e6b749acbfbe2a596df99e91d24d6e1fdbee00@micropython.org>"""
categories: Core,Functions
description: Function objects do not have the ``__module__`` attribute
cause: MicroPython is optimized for reduced code size and RAM usage.
workaround: Use ``sys.modules[function.__globals__['__name__']]`` for non-builtin modules.
"""
def f():
pass
print(f.__module__)
|
<commit_before><commit_msg>tests/cpydiff: Add test and workaround for function.__module__ attr.
MicroPython does not store any reference from a function object to the
module it was defined in, but there is a way to use function.__globals__ to
indirectly get the module.
See issue #7259.
Signed-off-by: Damien George <99e6b749acbfbe2a596df99e91d24d6e1fdbee00@micropython.org><commit_after>"""
categories: Core,Functions
description: Function objects do not have the ``__module__`` attribute
cause: MicroPython is optimized for reduced code size and RAM usage.
workaround: Use ``sys.modules[function.__globals__['__name__']]`` for non-builtin modules.
"""
def f():
pass
print(f.__module__)
|
|
4e53ea6ea9e3f5f409014221c000486bc188769f
|
tools/heapcheck/PRESUBMIT.py
|
tools/heapcheck/PRESUBMIT.py
|
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def CheckChange(input_api, output_api):
"""Checks the memcheck suppressions files for bad data."""
errors = []
skip_next_line = False
func_re = input_api.re.compile('[a-z_.]+\(.+\)$')
for f, line_num, line in input_api.RightHandSideLines(lambda x:
x.LocalPath().endswith('.txt')):
line = line.lstrip()
if line.startswith('#') or not line:
continue
if skip_next_line:
skip_next_line = False
continue
if line == '{':
skip_next_line = True
continue
if (line.startswith('fun:') or line.startswith('obj:') or
line == 'Heapcheck:Leak' or line == '}' or
line == '...'):
continue
if func_re.match(line):
continue
errors.append('"%s" is probably wrong: %s line %s' % (line, f.LocalPath(),
line_num))
if errors:
return [output_api.PresubmitError('\n'.join(errors))]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
|
Add presubmit checks for suppressions.
|
Heapchecker: Add presubmit checks for suppressions.
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/3197014
git-svn-id: http://src.chromium.org/svn/trunk/src@57132 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
Former-commit-id: 8ac37d84cf01f879652cb455c925bb75bee0bc34
|
Python
|
bsd-3-clause
|
meego-tablet-ux/meego-app-browser,meego-tablet-ux/meego-app-browser,meego-tablet-ux/meego-app-browser,meego-tablet-ux/meego-app-browser,meego-tablet-ux/meego-app-browser,meego-tablet-ux/meego-app-browser,meego-tablet-ux/meego-app-browser,meego-tablet-ux/meego-app-browser,meego-tablet-ux/meego-app-browser,meego-tablet-ux/meego-app-browser
|
Heapchecker: Add presubmit checks for suppressions.
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/3197014
git-svn-id: http://src.chromium.org/svn/trunk/src@57132 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
Former-commit-id: 8ac37d84cf01f879652cb455c925bb75bee0bc34
|
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def CheckChange(input_api, output_api):
"""Checks the memcheck suppressions files for bad data."""
errors = []
skip_next_line = False
func_re = input_api.re.compile('[a-z_.]+\(.+\)$')
for f, line_num, line in input_api.RightHandSideLines(lambda x:
x.LocalPath().endswith('.txt')):
line = line.lstrip()
if line.startswith('#') or not line:
continue
if skip_next_line:
skip_next_line = False
continue
if line == '{':
skip_next_line = True
continue
if (line.startswith('fun:') or line.startswith('obj:') or
line == 'Heapcheck:Leak' or line == '}' or
line == '...'):
continue
if func_re.match(line):
continue
errors.append('"%s" is probably wrong: %s line %s' % (line, f.LocalPath(),
line_num))
if errors:
return [output_api.PresubmitError('\n'.join(errors))]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
|
<commit_before><commit_msg>Heapchecker: Add presubmit checks for suppressions.
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/3197014
git-svn-id: http://src.chromium.org/svn/trunk/src@57132 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
Former-commit-id: 8ac37d84cf01f879652cb455c925bb75bee0bc34<commit_after>
|
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def CheckChange(input_api, output_api):
"""Checks the memcheck suppressions files for bad data."""
errors = []
skip_next_line = False
func_re = input_api.re.compile('[a-z_.]+\(.+\)$')
for f, line_num, line in input_api.RightHandSideLines(lambda x:
x.LocalPath().endswith('.txt')):
line = line.lstrip()
if line.startswith('#') or not line:
continue
if skip_next_line:
skip_next_line = False
continue
if line == '{':
skip_next_line = True
continue
if (line.startswith('fun:') or line.startswith('obj:') or
line == 'Heapcheck:Leak' or line == '}' or
line == '...'):
continue
if func_re.match(line):
continue
errors.append('"%s" is probably wrong: %s line %s' % (line, f.LocalPath(),
line_num))
if errors:
return [output_api.PresubmitError('\n'.join(errors))]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
|
Heapchecker: Add presubmit checks for suppressions.
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/3197014
git-svn-id: http://src.chromium.org/svn/trunk/src@57132 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
Former-commit-id: 8ac37d84cf01f879652cb455c925bb75bee0bc34# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def CheckChange(input_api, output_api):
"""Checks the memcheck suppressions files for bad data."""
errors = []
skip_next_line = False
func_re = input_api.re.compile('[a-z_.]+\(.+\)$')
for f, line_num, line in input_api.RightHandSideLines(lambda x:
x.LocalPath().endswith('.txt')):
line = line.lstrip()
if line.startswith('#') or not line:
continue
if skip_next_line:
skip_next_line = False
continue
if line == '{':
skip_next_line = True
continue
if (line.startswith('fun:') or line.startswith('obj:') or
line == 'Heapcheck:Leak' or line == '}' or
line == '...'):
continue
if func_re.match(line):
continue
errors.append('"%s" is probably wrong: %s line %s' % (line, f.LocalPath(),
line_num))
if errors:
return [output_api.PresubmitError('\n'.join(errors))]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
|
<commit_before><commit_msg>Heapchecker: Add presubmit checks for suppressions.
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/3197014
git-svn-id: http://src.chromium.org/svn/trunk/src@57132 4ff67af0-8c30-449e-8e8b-ad334ec8d88c
Former-commit-id: 8ac37d84cf01f879652cb455c925bb75bee0bc34<commit_after># Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def CheckChange(input_api, output_api):
"""Checks the memcheck suppressions files for bad data."""
errors = []
skip_next_line = False
func_re = input_api.re.compile('[a-z_.]+\(.+\)$')
for f, line_num, line in input_api.RightHandSideLines(lambda x:
x.LocalPath().endswith('.txt')):
line = line.lstrip()
if line.startswith('#') or not line:
continue
if skip_next_line:
skip_next_line = False
continue
if line == '{':
skip_next_line = True
continue
if (line.startswith('fun:') or line.startswith('obj:') or
line == 'Heapcheck:Leak' or line == '}' or
line == '...'):
continue
if func_re.match(line):
continue
errors.append('"%s" is probably wrong: %s line %s' % (line, f.LocalPath(),
line_num))
if errors:
return [output_api.PresubmitError('\n'.join(errors))]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
|
|
f0a721f2dbc4b8b9354e459d45325a08a646e09b
|
py/longest-increasing-path-in-a-matrix.py
|
py/longest-increasing-path-in-a-matrix.py
|
from collections import Counter
class Solution(object):
def dfs(self, dp, matrix, x, y, w, h):
v = matrix[x][y]
if dp[x, y] == 0:
dp[x, y] = 1 + max(
0 if x == 0 or matrix[x - 1][y] <= v else self.dfs(dp, matrix, x - 1, y, w, h),
0 if y == 0 or matrix[x][y - 1] <= v else self.dfs(dp, matrix, x, y - 1, w, h),
0 if x == h - 1 or matrix[x + 1][y] <= v else self.dfs(dp, matrix, x + 1, y, w, h),
0 if y == w - 1 or matrix[x][y + 1] <= v else self.dfs(dp, matrix, x, y + 1, w, h)
)
return dp[x, y]
def longestIncreasingPath(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
if not matrix:
return 0
h = len(matrix)
w = len(matrix[0])
ans = 1
starts = set(xrange(h * w))
dp = Counter()
for x, row in enumerate(matrix):
for y, v in enumerate(row):
ans = max(ans, self.dfs(dp, matrix, x, y, w, h))
return ans
|
Add py solution for 329. Longest Increasing Path in a Matrix
|
Add py solution for 329. Longest Increasing Path in a Matrix
329. Longest Increasing Path in a Matrix: https://leetcode.com/problems/longest-increasing-path-in-a-matrix/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 329. Longest Increasing Path in a Matrix
329. Longest Increasing Path in a Matrix: https://leetcode.com/problems/longest-increasing-path-in-a-matrix/
|
from collections import Counter
class Solution(object):
def dfs(self, dp, matrix, x, y, w, h):
v = matrix[x][y]
if dp[x, y] == 0:
dp[x, y] = 1 + max(
0 if x == 0 or matrix[x - 1][y] <= v else self.dfs(dp, matrix, x - 1, y, w, h),
0 if y == 0 or matrix[x][y - 1] <= v else self.dfs(dp, matrix, x, y - 1, w, h),
0 if x == h - 1 or matrix[x + 1][y] <= v else self.dfs(dp, matrix, x + 1, y, w, h),
0 if y == w - 1 or matrix[x][y + 1] <= v else self.dfs(dp, matrix, x, y + 1, w, h)
)
return dp[x, y]
def longestIncreasingPath(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
if not matrix:
return 0
h = len(matrix)
w = len(matrix[0])
ans = 1
starts = set(xrange(h * w))
dp = Counter()
for x, row in enumerate(matrix):
for y, v in enumerate(row):
ans = max(ans, self.dfs(dp, matrix, x, y, w, h))
return ans
|
<commit_before><commit_msg>Add py solution for 329. Longest Increasing Path in a Matrix
329. Longest Increasing Path in a Matrix: https://leetcode.com/problems/longest-increasing-path-in-a-matrix/<commit_after>
|
from collections import Counter
class Solution(object):
def dfs(self, dp, matrix, x, y, w, h):
v = matrix[x][y]
if dp[x, y] == 0:
dp[x, y] = 1 + max(
0 if x == 0 or matrix[x - 1][y] <= v else self.dfs(dp, matrix, x - 1, y, w, h),
0 if y == 0 or matrix[x][y - 1] <= v else self.dfs(dp, matrix, x, y - 1, w, h),
0 if x == h - 1 or matrix[x + 1][y] <= v else self.dfs(dp, matrix, x + 1, y, w, h),
0 if y == w - 1 or matrix[x][y + 1] <= v else self.dfs(dp, matrix, x, y + 1, w, h)
)
return dp[x, y]
def longestIncreasingPath(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
if not matrix:
return 0
h = len(matrix)
w = len(matrix[0])
ans = 1
starts = set(xrange(h * w))
dp = Counter()
for x, row in enumerate(matrix):
for y, v in enumerate(row):
ans = max(ans, self.dfs(dp, matrix, x, y, w, h))
return ans
|
Add py solution for 329. Longest Increasing Path in a Matrix
329. Longest Increasing Path in a Matrix: https://leetcode.com/problems/longest-increasing-path-in-a-matrix/from collections import Counter
class Solution(object):
def dfs(self, dp, matrix, x, y, w, h):
v = matrix[x][y]
if dp[x, y] == 0:
dp[x, y] = 1 + max(
0 if x == 0 or matrix[x - 1][y] <= v else self.dfs(dp, matrix, x - 1, y, w, h),
0 if y == 0 or matrix[x][y - 1] <= v else self.dfs(dp, matrix, x, y - 1, w, h),
0 if x == h - 1 or matrix[x + 1][y] <= v else self.dfs(dp, matrix, x + 1, y, w, h),
0 if y == w - 1 or matrix[x][y + 1] <= v else self.dfs(dp, matrix, x, y + 1, w, h)
)
return dp[x, y]
def longestIncreasingPath(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
if not matrix:
return 0
h = len(matrix)
w = len(matrix[0])
ans = 1
starts = set(xrange(h * w))
dp = Counter()
for x, row in enumerate(matrix):
for y, v in enumerate(row):
ans = max(ans, self.dfs(dp, matrix, x, y, w, h))
return ans
|
<commit_before><commit_msg>Add py solution for 329. Longest Increasing Path in a Matrix
329. Longest Increasing Path in a Matrix: https://leetcode.com/problems/longest-increasing-path-in-a-matrix/<commit_after>from collections import Counter
class Solution(object):
def dfs(self, dp, matrix, x, y, w, h):
v = matrix[x][y]
if dp[x, y] == 0:
dp[x, y] = 1 + max(
0 if x == 0 or matrix[x - 1][y] <= v else self.dfs(dp, matrix, x - 1, y, w, h),
0 if y == 0 or matrix[x][y - 1] <= v else self.dfs(dp, matrix, x, y - 1, w, h),
0 if x == h - 1 or matrix[x + 1][y] <= v else self.dfs(dp, matrix, x + 1, y, w, h),
0 if y == w - 1 or matrix[x][y + 1] <= v else self.dfs(dp, matrix, x, y + 1, w, h)
)
return dp[x, y]
def longestIncreasingPath(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
if not matrix:
return 0
h = len(matrix)
w = len(matrix[0])
ans = 1
starts = set(xrange(h * w))
dp = Counter()
for x, row in enumerate(matrix):
for y, v in enumerate(row):
ans = max(ans, self.dfs(dp, matrix, x, y, w, h))
return ans
|
|
5c3cdc7a23fc1b0733b8b8bb9266c6d8c419dcd4
|
py/minimum-ascii-delete-sum-for-two-strings.py
|
py/minimum-ascii-delete-sum-for-two-strings.py
|
class Solution(object):
def minimumDeleteSum(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: int
"""
prev = [0]
for c in s2:
prev.append(prev[-1] + ord(c))
for i1, c1 in enumerate(s1, 1):
nxtRow = [0] * (len(s2) + 1)
o1 = ord(c1)
nxtRow[0] = o1 + prev[0]
for i2, c2 in enumerate(s2, 1):
if c1 == c2:
nxtRow[i2] = prev[i2 - 1]
else:
o2 = ord(c2)
nxtRow[i2] = min(prev[i2] + o1, nxtRow[i2 - 1] + o2)
prev = nxtRow
return nxtRow[len(s2)]
|
Add py solution for 712. Minimum ASCII Delete Sum for Two Strings
|
Add py solution for 712. Minimum ASCII Delete Sum for Two Strings
712. Minimum ASCII Delete Sum for Two Strings: https://leetcode.com/problems/minimum-ascii-delete-sum-for-two-strings/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 712. Minimum ASCII Delete Sum for Two Strings
712. Minimum ASCII Delete Sum for Two Strings: https://leetcode.com/problems/minimum-ascii-delete-sum-for-two-strings/
|
class Solution(object):
def minimumDeleteSum(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: int
"""
prev = [0]
for c in s2:
prev.append(prev[-1] + ord(c))
for i1, c1 in enumerate(s1, 1):
nxtRow = [0] * (len(s2) + 1)
o1 = ord(c1)
nxtRow[0] = o1 + prev[0]
for i2, c2 in enumerate(s2, 1):
if c1 == c2:
nxtRow[i2] = prev[i2 - 1]
else:
o2 = ord(c2)
nxtRow[i2] = min(prev[i2] + o1, nxtRow[i2 - 1] + o2)
prev = nxtRow
return nxtRow[len(s2)]
|
<commit_before><commit_msg>Add py solution for 712. Minimum ASCII Delete Sum for Two Strings
712. Minimum ASCII Delete Sum for Two Strings: https://leetcode.com/problems/minimum-ascii-delete-sum-for-two-strings/<commit_after>
|
class Solution(object):
def minimumDeleteSum(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: int
"""
prev = [0]
for c in s2:
prev.append(prev[-1] + ord(c))
for i1, c1 in enumerate(s1, 1):
nxtRow = [0] * (len(s2) + 1)
o1 = ord(c1)
nxtRow[0] = o1 + prev[0]
for i2, c2 in enumerate(s2, 1):
if c1 == c2:
nxtRow[i2] = prev[i2 - 1]
else:
o2 = ord(c2)
nxtRow[i2] = min(prev[i2] + o1, nxtRow[i2 - 1] + o2)
prev = nxtRow
return nxtRow[len(s2)]
|
Add py solution for 712. Minimum ASCII Delete Sum for Two Strings
712. Minimum ASCII Delete Sum for Two Strings: https://leetcode.com/problems/minimum-ascii-delete-sum-for-two-strings/class Solution(object):
def minimumDeleteSum(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: int
"""
prev = [0]
for c in s2:
prev.append(prev[-1] + ord(c))
for i1, c1 in enumerate(s1, 1):
nxtRow = [0] * (len(s2) + 1)
o1 = ord(c1)
nxtRow[0] = o1 + prev[0]
for i2, c2 in enumerate(s2, 1):
if c1 == c2:
nxtRow[i2] = prev[i2 - 1]
else:
o2 = ord(c2)
nxtRow[i2] = min(prev[i2] + o1, nxtRow[i2 - 1] + o2)
prev = nxtRow
return nxtRow[len(s2)]
|
<commit_before><commit_msg>Add py solution for 712. Minimum ASCII Delete Sum for Two Strings
712. Minimum ASCII Delete Sum for Two Strings: https://leetcode.com/problems/minimum-ascii-delete-sum-for-two-strings/<commit_after>class Solution(object):
def minimumDeleteSum(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: int
"""
prev = [0]
for c in s2:
prev.append(prev[-1] + ord(c))
for i1, c1 in enumerate(s1, 1):
nxtRow = [0] * (len(s2) + 1)
o1 = ord(c1)
nxtRow[0] = o1 + prev[0]
for i2, c2 in enumerate(s2, 1):
if c1 == c2:
nxtRow[i2] = prev[i2 - 1]
else:
o2 = ord(c2)
nxtRow[i2] = min(prev[i2] + o1, nxtRow[i2 - 1] + o2)
prev = nxtRow
return nxtRow[len(s2)]
|
|
d77cff9cc75b949b63d9497db54a5d86aabf9281
|
tests/test_manual.py
|
tests/test_manual.py
|
from tests.helper import ExternalVersionTestCase
class ManualTest(ExternalVersionTestCase):
def test_manual(self):
self.assertEqual(self.sync_get_version("example", {"manual": "Meow"}), "Meow")
|
Add a testcase for Manual
|
Add a testcase for Manual
|
Python
|
mit
|
lilydjwg/nvchecker
|
Add a testcase for Manual
|
from tests.helper import ExternalVersionTestCase
class ManualTest(ExternalVersionTestCase):
def test_manual(self):
self.assertEqual(self.sync_get_version("example", {"manual": "Meow"}), "Meow")
|
<commit_before><commit_msg>Add a testcase for Manual<commit_after>
|
from tests.helper import ExternalVersionTestCase
class ManualTest(ExternalVersionTestCase):
def test_manual(self):
self.assertEqual(self.sync_get_version("example", {"manual": "Meow"}), "Meow")
|
Add a testcase for Manualfrom tests.helper import ExternalVersionTestCase
class ManualTest(ExternalVersionTestCase):
def test_manual(self):
self.assertEqual(self.sync_get_version("example", {"manual": "Meow"}), "Meow")
|
<commit_before><commit_msg>Add a testcase for Manual<commit_after>from tests.helper import ExternalVersionTestCase
class ManualTest(ExternalVersionTestCase):
def test_manual(self):
self.assertEqual(self.sync_get_version("example", {"manual": "Meow"}), "Meow")
|
|
d3731c0b3c15b6f321a91121300de4da7d47acca
|
tests/test_module.py
|
tests/test_module.py
|
#!/usr/bin/env python
#
# Copyright 2011-2012 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import unittest
def read_baseline(filename):
fd = open(filename, "r")
baseline = fd.read().replace("\n", "")
fd.close()
return baseline
class TestCase(unittest.TestCase):
def check_module(self, modulename):
__import__(modulename)
module = sys.modules[modulename]
names = str(dir(module))
baseline = read_baseline(modulename + ".baseline")
self.assertEqual(names, baseline)
def test_splunklib(self):
self.check_module("splunklib")
def test_binding(self):
self.check_module("splunklib.binding")
def test_client(self):
self.check_module("splunklib.client")
def test_data(self):
self.check_module("splunklib.data")
def test_results(self):
self.check_module("splunklib.results")
if __name__ == "__main__":
unittest.main()
|
Move module namespace test to a separate test script.
|
Move module namespace test to a separate test script.
|
Python
|
apache-2.0
|
splunk/splunk-sdk-python,ww9rivers/splunk-sdk-python,kkirsche/splunk-sdk-python,lowtalker/splunk-sdk-python,sullivanmatt/splunk-sdk-python,kkirsche/splunk-sdk-python,kkirsche/splunk-sdk-python,kkirsche/splunk-sdk-python,splunk/splunk-sdk-python
|
Move module namespace test to a separate test script.
|
#!/usr/bin/env python
#
# Copyright 2011-2012 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import unittest
def read_baseline(filename):
fd = open(filename, "r")
baseline = fd.read().replace("\n", "")
fd.close()
return baseline
class TestCase(unittest.TestCase):
def check_module(self, modulename):
__import__(modulename)
module = sys.modules[modulename]
names = str(dir(module))
baseline = read_baseline(modulename + ".baseline")
self.assertEqual(names, baseline)
def test_splunklib(self):
self.check_module("splunklib")
def test_binding(self):
self.check_module("splunklib.binding")
def test_client(self):
self.check_module("splunklib.client")
def test_data(self):
self.check_module("splunklib.data")
def test_results(self):
self.check_module("splunklib.results")
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Move module namespace test to a separate test script.<commit_after>
|
#!/usr/bin/env python
#
# Copyright 2011-2012 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import unittest
def read_baseline(filename):
fd = open(filename, "r")
baseline = fd.read().replace("\n", "")
fd.close()
return baseline
class TestCase(unittest.TestCase):
def check_module(self, modulename):
__import__(modulename)
module = sys.modules[modulename]
names = str(dir(module))
baseline = read_baseline(modulename + ".baseline")
self.assertEqual(names, baseline)
def test_splunklib(self):
self.check_module("splunklib")
def test_binding(self):
self.check_module("splunklib.binding")
def test_client(self):
self.check_module("splunklib.client")
def test_data(self):
self.check_module("splunklib.data")
def test_results(self):
self.check_module("splunklib.results")
if __name__ == "__main__":
unittest.main()
|
Move module namespace test to a separate test script.#!/usr/bin/env python
#
# Copyright 2011-2012 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import unittest
def read_baseline(filename):
fd = open(filename, "r")
baseline = fd.read().replace("\n", "")
fd.close()
return baseline
class TestCase(unittest.TestCase):
def check_module(self, modulename):
__import__(modulename)
module = sys.modules[modulename]
names = str(dir(module))
baseline = read_baseline(modulename + ".baseline")
self.assertEqual(names, baseline)
def test_splunklib(self):
self.check_module("splunklib")
def test_binding(self):
self.check_module("splunklib.binding")
def test_client(self):
self.check_module("splunklib.client")
def test_data(self):
self.check_module("splunklib.data")
def test_results(self):
self.check_module("splunklib.results")
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Move module namespace test to a separate test script.<commit_after>#!/usr/bin/env python
#
# Copyright 2011-2012 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import unittest
def read_baseline(filename):
fd = open(filename, "r")
baseline = fd.read().replace("\n", "")
fd.close()
return baseline
class TestCase(unittest.TestCase):
def check_module(self, modulename):
__import__(modulename)
module = sys.modules[modulename]
names = str(dir(module))
baseline = read_baseline(modulename + ".baseline")
self.assertEqual(names, baseline)
def test_splunklib(self):
self.check_module("splunklib")
def test_binding(self):
self.check_module("splunklib.binding")
def test_client(self):
self.check_module("splunklib.client")
def test_data(self):
self.check_module("splunklib.data")
def test_results(self):
self.check_module("splunklib.results")
if __name__ == "__main__":
unittest.main()
|
|
7e4aab6980519fd8124e36a6f8fd4415eaf8a4e7
|
tests/test_tracer.py
|
tests/test_tracer.py
|
import os
import nose
import tracer
import logging
l = logging.getLogger("tracer.tests.test_tracer")
bin_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries'))
pov_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), "povs"))
test_data_location = str(os.path.dirname(os.path.realpath(__file__)))
def test_cgc_0b32aa01_01_raw():
'''
Test CGC Scored Event 1's palindrome challenge with raw input
'''
# test a valid palindrome
t = tracer.Tracer(os.path.join(bin_location, "cgc_scored_event_1/cgc/0b32aa01_01"), "racecar\n")
result_path, crash_state = t.run()
# make sure there is no crash state
nose.tools.assert_equal(crash_state, None)
result_state = result_path.state
# make sure angr modeled the correct output
stdout_dump = result_state.posix.dumps(1)
nose.tools.assert_true(stdout_dump.startswith("\t\tYes, that's a palindrome!\n\n"))
# make sure there were no 'Nope's from non-palindromes
nose.tools.assert_false("Nope" in stdout_dump)
# now test crashing input
t = tracer.Tracer(os.path.join(bin_location, "cgc_scored_event_1/cgc/0b32aa01_01"), "A" * 129)
crash_path, crash_state = t.run()
nose.tools.assert_not_equal(crash_path, None)
nose.tools.assert_not_equal(crash_state, None)
def run_all():
functions = globals()
all_functions = dict(filter((lambda (k, v): k.startswith('test_')), functions.items()))
for f in sorted(all_functions.keys()):
if hasattr(all_functions[f], '__call__'):
all_functions[f]()
if __name__ == "__main__":
logging.getLogger("angrop.rop").setLevel(logging.DEBUG)
import sys
if len(sys.argv) > 1:
globals()['test_' + sys.argv[1]]()
else:
run_all()
|
Add a single testcase for the tracer
|
Add a single testcase for the tracer
|
Python
|
bsd-2-clause
|
schieb/angr,tyb0807/angr,tyb0807/angr,f-prettyland/angr,iamahuman/angr,angr/angr,angr/tracer,schieb/angr,iamahuman/angr,angr/angr,iamahuman/angr,schieb/angr,f-prettyland/angr,tyb0807/angr,f-prettyland/angr,angr/angr
|
Add a single testcase for the tracer
|
import os
import nose
import tracer
import logging
l = logging.getLogger("tracer.tests.test_tracer")
bin_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries'))
pov_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), "povs"))
test_data_location = str(os.path.dirname(os.path.realpath(__file__)))
def test_cgc_0b32aa01_01_raw():
'''
Test CGC Scored Event 1's palindrome challenge with raw input
'''
# test a valid palindrome
t = tracer.Tracer(os.path.join(bin_location, "cgc_scored_event_1/cgc/0b32aa01_01"), "racecar\n")
result_path, crash_state = t.run()
# make sure there is no crash state
nose.tools.assert_equal(crash_state, None)
result_state = result_path.state
# make sure angr modeled the correct output
stdout_dump = result_state.posix.dumps(1)
nose.tools.assert_true(stdout_dump.startswith("\t\tYes, that's a palindrome!\n\n"))
# make sure there were no 'Nope's from non-palindromes
nose.tools.assert_false("Nope" in stdout_dump)
# now test crashing input
t = tracer.Tracer(os.path.join(bin_location, "cgc_scored_event_1/cgc/0b32aa01_01"), "A" * 129)
crash_path, crash_state = t.run()
nose.tools.assert_not_equal(crash_path, None)
nose.tools.assert_not_equal(crash_state, None)
def run_all():
functions = globals()
all_functions = dict(filter((lambda (k, v): k.startswith('test_')), functions.items()))
for f in sorted(all_functions.keys()):
if hasattr(all_functions[f], '__call__'):
all_functions[f]()
if __name__ == "__main__":
logging.getLogger("angrop.rop").setLevel(logging.DEBUG)
import sys
if len(sys.argv) > 1:
globals()['test_' + sys.argv[1]]()
else:
run_all()
|
<commit_before><commit_msg>Add a single testcase for the tracer<commit_after>
|
import os
import nose
import tracer
import logging
l = logging.getLogger("tracer.tests.test_tracer")
bin_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries'))
pov_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), "povs"))
test_data_location = str(os.path.dirname(os.path.realpath(__file__)))
def test_cgc_0b32aa01_01_raw():
'''
Test CGC Scored Event 1's palindrome challenge with raw input
'''
# test a valid palindrome
t = tracer.Tracer(os.path.join(bin_location, "cgc_scored_event_1/cgc/0b32aa01_01"), "racecar\n")
result_path, crash_state = t.run()
# make sure there is no crash state
nose.tools.assert_equal(crash_state, None)
result_state = result_path.state
# make sure angr modeled the correct output
stdout_dump = result_state.posix.dumps(1)
nose.tools.assert_true(stdout_dump.startswith("\t\tYes, that's a palindrome!\n\n"))
# make sure there were no 'Nope's from non-palindromes
nose.tools.assert_false("Nope" in stdout_dump)
# now test crashing input
t = tracer.Tracer(os.path.join(bin_location, "cgc_scored_event_1/cgc/0b32aa01_01"), "A" * 129)
crash_path, crash_state = t.run()
nose.tools.assert_not_equal(crash_path, None)
nose.tools.assert_not_equal(crash_state, None)
def run_all():
functions = globals()
all_functions = dict(filter((lambda (k, v): k.startswith('test_')), functions.items()))
for f in sorted(all_functions.keys()):
if hasattr(all_functions[f], '__call__'):
all_functions[f]()
if __name__ == "__main__":
logging.getLogger("angrop.rop").setLevel(logging.DEBUG)
import sys
if len(sys.argv) > 1:
globals()['test_' + sys.argv[1]]()
else:
run_all()
|
Add a single testcase for the tracerimport os
import nose
import tracer
import logging
l = logging.getLogger("tracer.tests.test_tracer")
bin_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries'))
pov_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), "povs"))
test_data_location = str(os.path.dirname(os.path.realpath(__file__)))
def test_cgc_0b32aa01_01_raw():
'''
Test CGC Scored Event 1's palindrome challenge with raw input
'''
# test a valid palindrome
t = tracer.Tracer(os.path.join(bin_location, "cgc_scored_event_1/cgc/0b32aa01_01"), "racecar\n")
result_path, crash_state = t.run()
# make sure there is no crash state
nose.tools.assert_equal(crash_state, None)
result_state = result_path.state
# make sure angr modeled the correct output
stdout_dump = result_state.posix.dumps(1)
nose.tools.assert_true(stdout_dump.startswith("\t\tYes, that's a palindrome!\n\n"))
# make sure there were no 'Nope's from non-palindromes
nose.tools.assert_false("Nope" in stdout_dump)
# now test crashing input
t = tracer.Tracer(os.path.join(bin_location, "cgc_scored_event_1/cgc/0b32aa01_01"), "A" * 129)
crash_path, crash_state = t.run()
nose.tools.assert_not_equal(crash_path, None)
nose.tools.assert_not_equal(crash_state, None)
def run_all():
functions = globals()
all_functions = dict(filter((lambda (k, v): k.startswith('test_')), functions.items()))
for f in sorted(all_functions.keys()):
if hasattr(all_functions[f], '__call__'):
all_functions[f]()
if __name__ == "__main__":
logging.getLogger("angrop.rop").setLevel(logging.DEBUG)
import sys
if len(sys.argv) > 1:
globals()['test_' + sys.argv[1]]()
else:
run_all()
|
<commit_before><commit_msg>Add a single testcase for the tracer<commit_after>import os
import nose
import tracer
import logging
l = logging.getLogger("tracer.tests.test_tracer")
bin_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries'))
pov_location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), "povs"))
test_data_location = str(os.path.dirname(os.path.realpath(__file__)))
def test_cgc_0b32aa01_01_raw():
'''
Test CGC Scored Event 1's palindrome challenge with raw input
'''
# test a valid palindrome
t = tracer.Tracer(os.path.join(bin_location, "cgc_scored_event_1/cgc/0b32aa01_01"), "racecar\n")
result_path, crash_state = t.run()
# make sure there is no crash state
nose.tools.assert_equal(crash_state, None)
result_state = result_path.state
# make sure angr modeled the correct output
stdout_dump = result_state.posix.dumps(1)
nose.tools.assert_true(stdout_dump.startswith("\t\tYes, that's a palindrome!\n\n"))
# make sure there were no 'Nope's from non-palindromes
nose.tools.assert_false("Nope" in stdout_dump)
# now test crashing input
t = tracer.Tracer(os.path.join(bin_location, "cgc_scored_event_1/cgc/0b32aa01_01"), "A" * 129)
crash_path, crash_state = t.run()
nose.tools.assert_not_equal(crash_path, None)
nose.tools.assert_not_equal(crash_state, None)
def run_all():
functions = globals()
all_functions = dict(filter((lambda (k, v): k.startswith('test_')), functions.items()))
for f in sorted(all_functions.keys()):
if hasattr(all_functions[f], '__call__'):
all_functions[f]()
if __name__ == "__main__":
logging.getLogger("angrop.rop").setLevel(logging.DEBUG)
import sys
if len(sys.argv) > 1:
globals()['test_' + sys.argv[1]]()
else:
run_all()
|
|
c108147bd1d421443010640ea4ed206d4d0131e5
|
mrfilesys.py
|
mrfilesys.py
|
#!/usr/bin/env python
#
## mrfilesys
##
## This program provides an interface to retrieve information about Moira filesystems.
## It could also provide the writing functions, but the developer of this software does
## not have access to test that functionality nor is insane enough to set up a test Moira
## server.
#
from pymoira import Filesys
import common
def show_info():
"""Handle 'mrfilesys info'."""
fs = Filesys(client, args.filesys)
fs.loadInfo()
show_info_for_fs(fs)
# FIXME: here, if fs is FSGROUP this should expand all included filesystems
# Unfortunately, for some reasons the query to get filesystems in a group
# is not publicly accessible, so I am unable to test it.
def show_info_for_fs(fs):
"""Outputs an information about a filesystem. Used by 'mrfilesys info'
to show info about filesytems and FS groups."""
common_fields = (
('Description', fs.description),
('Owner (user)', fs.owner_user),
('Owner (list)', fs.owner_group),
('Type code', fs.locker_type),
('Quota', "%i megabytes" % (fs.quota / 1000) if fs.quota else False ),
('Last modified', "%s ago by %s using %s" % (common.last_modified_date(fs.lastmod_datetime), fs.lastmod_by, fs.lastmod_with))
)
if fs.quota:
common_fields += ( ('Quota last modified', "%s ago by %s using %s" % (common.last_modified_date(fs.quota_lastmod_datetime), fs.quota_lastmod_by, fs.quota_lastmod_with) ), )
if fs.type == 'AFS':
common.section_header( 'AFS filesystem %s' % common.emph_text(fs.label) )
common.show_fields( *((
('Cell', fs.machine),
('Location', fs.name),
('Mount point', fs.mountpoint),
) + common_fields) )
else:
common.section_header( 'Filesystem %s' % common.emph_text(fs.label) )
common.show_fields( *((
('Filesystem type', fs.type),
('Machine', fs.machine),
('Location/Name', fs.name),
('Mount point', fs.mountpoint),
('Description', fs.description),
) + common_fields) )
def setup_subcommands(argparser):
"""Sets up all the subcommands."""
subparsers = argparser.add_subparsers()
parser_info = subparsers.add_parser('info', help = 'Provide the information about the filesystem')
parser_info.add_argument('filesys', help = 'The filesystem to inspect')
parser_info.set_defaults(handler = show_info)
if __name__ == '__main__':
client, args = common.init('mrfilesys', 'Inspect Moira filesystems', setup_subcommands)
common.main()
|
Add a tool to insepct filesystems.
|
Add a tool to insepct filesystems.
|
Python
|
mit
|
vasilvv/mrtools,vasilvv/mrtools
|
Add a tool to insepct filesystems.
|
#!/usr/bin/env python
#
## mrfilesys
##
## This program provides an interface to retrieve information about Moira filesystems.
## It could also provide the writing functions, but the developer of this software does
## not have access to test that functionality nor is insane enough to set up a test Moira
## server.
#
from pymoira import Filesys
import common
def show_info():
"""Handle 'mrfilesys info'."""
fs = Filesys(client, args.filesys)
fs.loadInfo()
show_info_for_fs(fs)
# FIXME: here, if fs is FSGROUP this should expand all included filesystems
# Unfortunately, for some reasons the query to get filesystems in a group
# is not publicly accessible, so I am unable to test it.
def show_info_for_fs(fs):
"""Outputs an information about a filesystem. Used by 'mrfilesys info'
to show info about filesytems and FS groups."""
common_fields = (
('Description', fs.description),
('Owner (user)', fs.owner_user),
('Owner (list)', fs.owner_group),
('Type code', fs.locker_type),
('Quota', "%i megabytes" % (fs.quota / 1000) if fs.quota else False ),
('Last modified', "%s ago by %s using %s" % (common.last_modified_date(fs.lastmod_datetime), fs.lastmod_by, fs.lastmod_with))
)
if fs.quota:
common_fields += ( ('Quota last modified', "%s ago by %s using %s" % (common.last_modified_date(fs.quota_lastmod_datetime), fs.quota_lastmod_by, fs.quota_lastmod_with) ), )
if fs.type == 'AFS':
common.section_header( 'AFS filesystem %s' % common.emph_text(fs.label) )
common.show_fields( *((
('Cell', fs.machine),
('Location', fs.name),
('Mount point', fs.mountpoint),
) + common_fields) )
else:
common.section_header( 'Filesystem %s' % common.emph_text(fs.label) )
common.show_fields( *((
('Filesystem type', fs.type),
('Machine', fs.machine),
('Location/Name', fs.name),
('Mount point', fs.mountpoint),
('Description', fs.description),
) + common_fields) )
def setup_subcommands(argparser):
"""Sets up all the subcommands."""
subparsers = argparser.add_subparsers()
parser_info = subparsers.add_parser('info', help = 'Provide the information about the filesystem')
parser_info.add_argument('filesys', help = 'The filesystem to inspect')
parser_info.set_defaults(handler = show_info)
if __name__ == '__main__':
client, args = common.init('mrfilesys', 'Inspect Moira filesystems', setup_subcommands)
common.main()
|
<commit_before><commit_msg>Add a tool to insepct filesystems.<commit_after>
|
#!/usr/bin/env python
#
## mrfilesys
##
## This program provides an interface to retrieve information about Moira filesystems.
## It could also provide the writing functions, but the developer of this software does
## not have access to test that functionality nor is insane enough to set up a test Moira
## server.
#
from pymoira import Filesys
import common
def show_info():
"""Handle 'mrfilesys info'."""
fs = Filesys(client, args.filesys)
fs.loadInfo()
show_info_for_fs(fs)
# FIXME: here, if fs is FSGROUP this should expand all included filesystems
# Unfortunately, for some reasons the query to get filesystems in a group
# is not publicly accessible, so I am unable to test it.
def show_info_for_fs(fs):
"""Outputs an information about a filesystem. Used by 'mrfilesys info'
to show info about filesytems and FS groups."""
common_fields = (
('Description', fs.description),
('Owner (user)', fs.owner_user),
('Owner (list)', fs.owner_group),
('Type code', fs.locker_type),
('Quota', "%i megabytes" % (fs.quota / 1000) if fs.quota else False ),
('Last modified', "%s ago by %s using %s" % (common.last_modified_date(fs.lastmod_datetime), fs.lastmod_by, fs.lastmod_with))
)
if fs.quota:
common_fields += ( ('Quota last modified', "%s ago by %s using %s" % (common.last_modified_date(fs.quota_lastmod_datetime), fs.quota_lastmod_by, fs.quota_lastmod_with) ), )
if fs.type == 'AFS':
common.section_header( 'AFS filesystem %s' % common.emph_text(fs.label) )
common.show_fields( *((
('Cell', fs.machine),
('Location', fs.name),
('Mount point', fs.mountpoint),
) + common_fields) )
else:
common.section_header( 'Filesystem %s' % common.emph_text(fs.label) )
common.show_fields( *((
('Filesystem type', fs.type),
('Machine', fs.machine),
('Location/Name', fs.name),
('Mount point', fs.mountpoint),
('Description', fs.description),
) + common_fields) )
def setup_subcommands(argparser):
"""Sets up all the subcommands."""
subparsers = argparser.add_subparsers()
parser_info = subparsers.add_parser('info', help = 'Provide the information about the filesystem')
parser_info.add_argument('filesys', help = 'The filesystem to inspect')
parser_info.set_defaults(handler = show_info)
if __name__ == '__main__':
client, args = common.init('mrfilesys', 'Inspect Moira filesystems', setup_subcommands)
common.main()
|
Add a tool to insepct filesystems.#!/usr/bin/env python
#
## mrfilesys
##
## This program provides an interface to retrieve information about Moira filesystems.
## It could also provide the writing functions, but the developer of this software does
## not have access to test that functionality nor is insane enough to set up a test Moira
## server.
#
from pymoira import Filesys
import common
def show_info():
"""Handle 'mrfilesys info'."""
fs = Filesys(client, args.filesys)
fs.loadInfo()
show_info_for_fs(fs)
# FIXME: here, if fs is FSGROUP this should expand all included filesystems
# Unfortunately, for some reasons the query to get filesystems in a group
# is not publicly accessible, so I am unable to test it.
def show_info_for_fs(fs):
"""Outputs an information about a filesystem. Used by 'mrfilesys info'
to show info about filesytems and FS groups."""
common_fields = (
('Description', fs.description),
('Owner (user)', fs.owner_user),
('Owner (list)', fs.owner_group),
('Type code', fs.locker_type),
('Quota', "%i megabytes" % (fs.quota / 1000) if fs.quota else False ),
('Last modified', "%s ago by %s using %s" % (common.last_modified_date(fs.lastmod_datetime), fs.lastmod_by, fs.lastmod_with))
)
if fs.quota:
common_fields += ( ('Quota last modified', "%s ago by %s using %s" % (common.last_modified_date(fs.quota_lastmod_datetime), fs.quota_lastmod_by, fs.quota_lastmod_with) ), )
if fs.type == 'AFS':
common.section_header( 'AFS filesystem %s' % common.emph_text(fs.label) )
common.show_fields( *((
('Cell', fs.machine),
('Location', fs.name),
('Mount point', fs.mountpoint),
) + common_fields) )
else:
common.section_header( 'Filesystem %s' % common.emph_text(fs.label) )
common.show_fields( *((
('Filesystem type', fs.type),
('Machine', fs.machine),
('Location/Name', fs.name),
('Mount point', fs.mountpoint),
('Description', fs.description),
) + common_fields) )
def setup_subcommands(argparser):
"""Sets up all the subcommands."""
subparsers = argparser.add_subparsers()
parser_info = subparsers.add_parser('info', help = 'Provide the information about the filesystem')
parser_info.add_argument('filesys', help = 'The filesystem to inspect')
parser_info.set_defaults(handler = show_info)
if __name__ == '__main__':
client, args = common.init('mrfilesys', 'Inspect Moira filesystems', setup_subcommands)
common.main()
|
<commit_before><commit_msg>Add a tool to insepct filesystems.<commit_after>#!/usr/bin/env python
#
## mrfilesys
##
## This program provides an interface to retrieve information about Moira filesystems.
## It could also provide the writing functions, but the developer of this software does
## not have access to test that functionality nor is insane enough to set up a test Moira
## server.
#
from pymoira import Filesys
import common
def show_info():
"""Handle 'mrfilesys info'."""
fs = Filesys(client, args.filesys)
fs.loadInfo()
show_info_for_fs(fs)
# FIXME: here, if fs is FSGROUP this should expand all included filesystems
# Unfortunately, for some reasons the query to get filesystems in a group
# is not publicly accessible, so I am unable to test it.
def show_info_for_fs(fs):
"""Outputs an information about a filesystem. Used by 'mrfilesys info'
to show info about filesytems and FS groups."""
common_fields = (
('Description', fs.description),
('Owner (user)', fs.owner_user),
('Owner (list)', fs.owner_group),
('Type code', fs.locker_type),
('Quota', "%i megabytes" % (fs.quota / 1000) if fs.quota else False ),
('Last modified', "%s ago by %s using %s" % (common.last_modified_date(fs.lastmod_datetime), fs.lastmod_by, fs.lastmod_with))
)
if fs.quota:
common_fields += ( ('Quota last modified', "%s ago by %s using %s" % (common.last_modified_date(fs.quota_lastmod_datetime), fs.quota_lastmod_by, fs.quota_lastmod_with) ), )
if fs.type == 'AFS':
common.section_header( 'AFS filesystem %s' % common.emph_text(fs.label) )
common.show_fields( *((
('Cell', fs.machine),
('Location', fs.name),
('Mount point', fs.mountpoint),
) + common_fields) )
else:
common.section_header( 'Filesystem %s' % common.emph_text(fs.label) )
common.show_fields( *((
('Filesystem type', fs.type),
('Machine', fs.machine),
('Location/Name', fs.name),
('Mount point', fs.mountpoint),
('Description', fs.description),
) + common_fields) )
def setup_subcommands(argparser):
"""Sets up all the subcommands."""
subparsers = argparser.add_subparsers()
parser_info = subparsers.add_parser('info', help = 'Provide the information about the filesystem')
parser_info.add_argument('filesys', help = 'The filesystem to inspect')
parser_info.set_defaults(handler = show_info)
if __name__ == '__main__':
client, args = common.init('mrfilesys', 'Inspect Moira filesystems', setup_subcommands)
common.main()
|
|
7a0a82ea85fa9f09af7dc63437744db5d199c902
|
Clean_Energy_Outlook/Ridge_pred_solar.py
|
Clean_Energy_Outlook/Ridge_pred_solar.py
|
# import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.model_selection import train_test_split
# define the function to predict solar data in the future
def pred_nuclear(samplefile, filelist):
# read data
data = pd.read_csv(samplefile)
year1 = data[['Year']][:44]
#print(year1.shape)
year2 = data[['Year']][-11:]
# predict nuclear for future
year3 = year2 = data[['Year']][-6:]
year3 = year3.set_index([[0, 1, 2, 3, 4, 5]])
#statelist=["AK","AL","AR","AZ","CA","CO","CT","DE","FL","GA","IA","ID","IL","IN","KS","KY","LA","MA","MD","ME","MI","MN","MO","MS","MT","NC","ND","NE","NH","NJ","NM","NV","NY","OH","OK","OR","PA","RI","SC","SD","TN","TX","UT","VA","VT","WA","WI","WV","WY"]
#print(len(statelist))
future = year3
# do ridge regression on train data
for i in range(49):
data = pd.read_csv('%s.csv' % (statelist[i]))
year1 = data[['Year']][:44]
#print(year1.shape)
year2 = data[['Year']][-11:]
# Split data for train and test
#print(i)
all_x = data[['GDP','CLPRB','EMFDB','ENPRP','NGMPB','PAPRB','PCP','ZNDX','Nominal Price', 'Inflation Adjusted Price']][0:55]
all_y = data[['SOEGP']][0:55]
train_x, test_x, train_y, test_y = train_test_split(all_x, all_y, test_size=0.2)
regr2 = linear_model.Ridge(alpha = 0.75)
regr2.fit(train_x, train_y)
# predict SOEGP for future
#year3 = data[['Year']][-6:]
#year3 = year3.set_index([[0, 1, 2, 3, 4, 5]])
#print(year3)
future_x = data[['GDP','CLPRB','EMFDB','ENPRP','NGMPB','PAPRB','PCP','ZNDX','Nominal Price', 'Inflation Adjusted Price']][-6:]
pred = pd.DataFrame(regr2.predict(future_x))
pred.columns = [statelist[i]]
#print(pred)
future = pd.concat([future, pred], axis=1)
|
Add code for ridge prediction for solar energy
|
Add code for ridge prediction for solar energy
|
Python
|
mit
|
uwkejia/Clean-Energy-Outlook
|
Add code for ridge prediction for solar energy
|
# import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.model_selection import train_test_split
# define the function to predict solar data in the future
def pred_nuclear(samplefile, filelist):
# read data
data = pd.read_csv(samplefile)
year1 = data[['Year']][:44]
#print(year1.shape)
year2 = data[['Year']][-11:]
# predict nuclear for future
year3 = year2 = data[['Year']][-6:]
year3 = year3.set_index([[0, 1, 2, 3, 4, 5]])
#statelist=["AK","AL","AR","AZ","CA","CO","CT","DE","FL","GA","IA","ID","IL","IN","KS","KY","LA","MA","MD","ME","MI","MN","MO","MS","MT","NC","ND","NE","NH","NJ","NM","NV","NY","OH","OK","OR","PA","RI","SC","SD","TN","TX","UT","VA","VT","WA","WI","WV","WY"]
#print(len(statelist))
future = year3
# do ridge regression on train data
for i in range(49):
data = pd.read_csv('%s.csv' % (statelist[i]))
year1 = data[['Year']][:44]
#print(year1.shape)
year2 = data[['Year']][-11:]
# Split data for train and test
#print(i)
all_x = data[['GDP','CLPRB','EMFDB','ENPRP','NGMPB','PAPRB','PCP','ZNDX','Nominal Price', 'Inflation Adjusted Price']][0:55]
all_y = data[['SOEGP']][0:55]
train_x, test_x, train_y, test_y = train_test_split(all_x, all_y, test_size=0.2)
regr2 = linear_model.Ridge(alpha = 0.75)
regr2.fit(train_x, train_y)
# predict SOEGP for future
#year3 = data[['Year']][-6:]
#year3 = year3.set_index([[0, 1, 2, 3, 4, 5]])
#print(year3)
future_x = data[['GDP','CLPRB','EMFDB','ENPRP','NGMPB','PAPRB','PCP','ZNDX','Nominal Price', 'Inflation Adjusted Price']][-6:]
pred = pd.DataFrame(regr2.predict(future_x))
pred.columns = [statelist[i]]
#print(pred)
future = pd.concat([future, pred], axis=1)
|
<commit_before><commit_msg>Add code for ridge prediction for solar energy<commit_after>
|
# import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.model_selection import train_test_split
# define the function to predict solar data in the future
def pred_nuclear(samplefile, filelist):
# read data
data = pd.read_csv(samplefile)
year1 = data[['Year']][:44]
#print(year1.shape)
year2 = data[['Year']][-11:]
# predict nuclear for future
year3 = year2 = data[['Year']][-6:]
year3 = year3.set_index([[0, 1, 2, 3, 4, 5]])
#statelist=["AK","AL","AR","AZ","CA","CO","CT","DE","FL","GA","IA","ID","IL","IN","KS","KY","LA","MA","MD","ME","MI","MN","MO","MS","MT","NC","ND","NE","NH","NJ","NM","NV","NY","OH","OK","OR","PA","RI","SC","SD","TN","TX","UT","VA","VT","WA","WI","WV","WY"]
#print(len(statelist))
future = year3
# do ridge regression on train data
for i in range(49):
data = pd.read_csv('%s.csv' % (statelist[i]))
year1 = data[['Year']][:44]
#print(year1.shape)
year2 = data[['Year']][-11:]
# Split data for train and test
#print(i)
all_x = data[['GDP','CLPRB','EMFDB','ENPRP','NGMPB','PAPRB','PCP','ZNDX','Nominal Price', 'Inflation Adjusted Price']][0:55]
all_y = data[['SOEGP']][0:55]
train_x, test_x, train_y, test_y = train_test_split(all_x, all_y, test_size=0.2)
regr2 = linear_model.Ridge(alpha = 0.75)
regr2.fit(train_x, train_y)
# predict SOEGP for future
#year3 = data[['Year']][-6:]
#year3 = year3.set_index([[0, 1, 2, 3, 4, 5]])
#print(year3)
future_x = data[['GDP','CLPRB','EMFDB','ENPRP','NGMPB','PAPRB','PCP','ZNDX','Nominal Price', 'Inflation Adjusted Price']][-6:]
pred = pd.DataFrame(regr2.predict(future_x))
pred.columns = [statelist[i]]
#print(pred)
future = pd.concat([future, pred], axis=1)
|
Add code for ridge prediction for solar energy# import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.model_selection import train_test_split
# define the function to predict solar data in the future
def pred_nuclear(samplefile, filelist):
# read data
data = pd.read_csv(samplefile)
year1 = data[['Year']][:44]
#print(year1.shape)
year2 = data[['Year']][-11:]
# predict nuclear for future
year3 = year2 = data[['Year']][-6:]
year3 = year3.set_index([[0, 1, 2, 3, 4, 5]])
#statelist=["AK","AL","AR","AZ","CA","CO","CT","DE","FL","GA","IA","ID","IL","IN","KS","KY","LA","MA","MD","ME","MI","MN","MO","MS","MT","NC","ND","NE","NH","NJ","NM","NV","NY","OH","OK","OR","PA","RI","SC","SD","TN","TX","UT","VA","VT","WA","WI","WV","WY"]
#print(len(statelist))
future = year3
# do ridge regression on train data
for i in range(49):
data = pd.read_csv('%s.csv' % (statelist[i]))
year1 = data[['Year']][:44]
#print(year1.shape)
year2 = data[['Year']][-11:]
# Split data for train and test
#print(i)
all_x = data[['GDP','CLPRB','EMFDB','ENPRP','NGMPB','PAPRB','PCP','ZNDX','Nominal Price', 'Inflation Adjusted Price']][0:55]
all_y = data[['SOEGP']][0:55]
train_x, test_x, train_y, test_y = train_test_split(all_x, all_y, test_size=0.2)
regr2 = linear_model.Ridge(alpha = 0.75)
regr2.fit(train_x, train_y)
# predict SOEGP for future
#year3 = data[['Year']][-6:]
#year3 = year3.set_index([[0, 1, 2, 3, 4, 5]])
#print(year3)
future_x = data[['GDP','CLPRB','EMFDB','ENPRP','NGMPB','PAPRB','PCP','ZNDX','Nominal Price', 'Inflation Adjusted Price']][-6:]
pred = pd.DataFrame(regr2.predict(future_x))
pred.columns = [statelist[i]]
#print(pred)
future = pd.concat([future, pred], axis=1)
|
<commit_before><commit_msg>Add code for ridge prediction for solar energy<commit_after># import libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import linear_model
from sklearn.model_selection import train_test_split
# define the function to predict solar data in the future
def pred_nuclear(samplefile, filelist):
# read data
data = pd.read_csv(samplefile)
year1 = data[['Year']][:44]
#print(year1.shape)
year2 = data[['Year']][-11:]
# predict nuclear for future
year3 = year2 = data[['Year']][-6:]
year3 = year3.set_index([[0, 1, 2, 3, 4, 5]])
#statelist=["AK","AL","AR","AZ","CA","CO","CT","DE","FL","GA","IA","ID","IL","IN","KS","KY","LA","MA","MD","ME","MI","MN","MO","MS","MT","NC","ND","NE","NH","NJ","NM","NV","NY","OH","OK","OR","PA","RI","SC","SD","TN","TX","UT","VA","VT","WA","WI","WV","WY"]
#print(len(statelist))
future = year3
# do ridge regression on train data
for i in range(49):
data = pd.read_csv('%s.csv' % (statelist[i]))
year1 = data[['Year']][:44]
#print(year1.shape)
year2 = data[['Year']][-11:]
# Split data for train and test
#print(i)
all_x = data[['GDP','CLPRB','EMFDB','ENPRP','NGMPB','PAPRB','PCP','ZNDX','Nominal Price', 'Inflation Adjusted Price']][0:55]
all_y = data[['SOEGP']][0:55]
train_x, test_x, train_y, test_y = train_test_split(all_x, all_y, test_size=0.2)
regr2 = linear_model.Ridge(alpha = 0.75)
regr2.fit(train_x, train_y)
# predict SOEGP for future
#year3 = data[['Year']][-6:]
#year3 = year3.set_index([[0, 1, 2, 3, 4, 5]])
#print(year3)
future_x = data[['GDP','CLPRB','EMFDB','ENPRP','NGMPB','PAPRB','PCP','ZNDX','Nominal Price', 'Inflation Adjusted Price']][-6:]
pred = pd.DataFrame(regr2.predict(future_x))
pred.columns = [statelist[i]]
#print(pred)
future = pd.concat([future, pred], axis=1)
|
|
b87c9002aeabde283f2cf5e37de311fb3469af2b
|
lib/ansible/modules/cloud/openstack/os_client_config.py
|
lib/ansible/modules/cloud/openstack/os_client_config.py
|
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import os_client_config
from os_client_config import exceptions
DOCUMENTATION = '''
---
module: os_client_config
short_description: Get OpenStack Client config
description:
- Get I(openstack) client config data from clouds.yaml or environment
options:
regions:
description:
- Include regions in the returned data
required: false
default: 'yes'
version_added: "2.0"
requirements: [ os-client-config ]
author: Monty Taylor
'''
EXAMPLES = '''
# Inject facts about OpenStack clouds
- os-client-config
'''
def main():
module = AnsibleModule(
argument_spec=dict(
regions = dict(default=True, required=False, type='bool'),
action = dict(default='list', choices=['list']),
),
)
p = module.params
try:
config = os_client_config.OpenStackConfig()
clouds = {}
for cloud in config.get_all_clouds():
if p['regions']:
cloud_region = clouds.get(cloud.name, {})
cloud_region[cloud.region] = cloud.config
clouds[cloud.name] = cloud_region
else:
clouds[cloud.name] = cloud.config
module.exit_json(clouds=clouds)
except exceptions.OpenStackConfigException as e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
main()
|
Add OpenStack Client Config module
|
Add OpenStack Client Config module
All of the ansible OpenStack modules are driven by a clouds.yaml config
file which is processed by os-client-config. Expose the data returned by
that library to enable playbooks to iterate over available clouds.
|
Python
|
mit
|
thaim/ansible,thaim/ansible
|
Add OpenStack Client Config module
All of the ansible OpenStack modules are driven by a clouds.yaml config
file which is processed by os-client-config. Expose the data returned by
that library to enable playbooks to iterate over available clouds.
|
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import os_client_config
from os_client_config import exceptions
DOCUMENTATION = '''
---
module: os_client_config
short_description: Get OpenStack Client config
description:
- Get I(openstack) client config data from clouds.yaml or environment
options:
regions:
description:
- Include regions in the returned data
required: false
default: 'yes'
version_added: "2.0"
requirements: [ os-client-config ]
author: Monty Taylor
'''
EXAMPLES = '''
# Inject facts about OpenStack clouds
- os-client-config
'''
def main():
module = AnsibleModule(
argument_spec=dict(
regions = dict(default=True, required=False, type='bool'),
action = dict(default='list', choices=['list']),
),
)
p = module.params
try:
config = os_client_config.OpenStackConfig()
clouds = {}
for cloud in config.get_all_clouds():
if p['regions']:
cloud_region = clouds.get(cloud.name, {})
cloud_region[cloud.region] = cloud.config
clouds[cloud.name] = cloud_region
else:
clouds[cloud.name] = cloud.config
module.exit_json(clouds=clouds)
except exceptions.OpenStackConfigException as e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
main()
|
<commit_before><commit_msg>Add OpenStack Client Config module
All of the ansible OpenStack modules are driven by a clouds.yaml config
file which is processed by os-client-config. Expose the data returned by
that library to enable playbooks to iterate over available clouds.<commit_after>
|
#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import os_client_config
from os_client_config import exceptions
DOCUMENTATION = '''
---
module: os_client_config
short_description: Get OpenStack Client config
description:
- Get I(openstack) client config data from clouds.yaml or environment
options:
regions:
description:
- Include regions in the returned data
required: false
default: 'yes'
version_added: "2.0"
requirements: [ os-client-config ]
author: Monty Taylor
'''
EXAMPLES = '''
# Inject facts about OpenStack clouds
- os-client-config
'''
def main():
module = AnsibleModule(
argument_spec=dict(
regions = dict(default=True, required=False, type='bool'),
action = dict(default='list', choices=['list']),
),
)
p = module.params
try:
config = os_client_config.OpenStackConfig()
clouds = {}
for cloud in config.get_all_clouds():
if p['regions']:
cloud_region = clouds.get(cloud.name, {})
cloud_region[cloud.region] = cloud.config
clouds[cloud.name] = cloud_region
else:
clouds[cloud.name] = cloud.config
module.exit_json(clouds=clouds)
except exceptions.OpenStackConfigException as e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
main()
|
Add OpenStack Client Config module
All of the ansible OpenStack modules are driven by a clouds.yaml config
file which is processed by os-client-config. Expose the data returned by
that library to enable playbooks to iterate over available clouds.#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import os_client_config
from os_client_config import exceptions
DOCUMENTATION = '''
---
module: os_client_config
short_description: Get OpenStack Client config
description:
- Get I(openstack) client config data from clouds.yaml or environment
options:
regions:
description:
- Include regions in the returned data
required: false
default: 'yes'
version_added: "2.0"
requirements: [ os-client-config ]
author: Monty Taylor
'''
EXAMPLES = '''
# Inject facts about OpenStack clouds
- os-client-config
'''
def main():
module = AnsibleModule(
argument_spec=dict(
regions = dict(default=True, required=False, type='bool'),
action = dict(default='list', choices=['list']),
),
)
p = module.params
try:
config = os_client_config.OpenStackConfig()
clouds = {}
for cloud in config.get_all_clouds():
if p['regions']:
cloud_region = clouds.get(cloud.name, {})
cloud_region[cloud.region] = cloud.config
clouds[cloud.name] = cloud_region
else:
clouds[cloud.name] = cloud.config
module.exit_json(clouds=clouds)
except exceptions.OpenStackConfigException as e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
main()
|
<commit_before><commit_msg>Add OpenStack Client Config module
All of the ansible OpenStack modules are driven by a clouds.yaml config
file which is processed by os-client-config. Expose the data returned by
that library to enable playbooks to iterate over available clouds.<commit_after>#!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import os_client_config
from os_client_config import exceptions
DOCUMENTATION = '''
---
module: os_client_config
short_description: Get OpenStack Client config
description:
- Get I(openstack) client config data from clouds.yaml or environment
options:
regions:
description:
- Include regions in the returned data
required: false
default: 'yes'
version_added: "2.0"
requirements: [ os-client-config ]
author: Monty Taylor
'''
EXAMPLES = '''
# Inject facts about OpenStack clouds
- os-client-config
'''
def main():
module = AnsibleModule(
argument_spec=dict(
regions = dict(default=True, required=False, type='bool'),
action = dict(default='list', choices=['list']),
),
)
p = module.params
try:
config = os_client_config.OpenStackConfig()
clouds = {}
for cloud in config.get_all_clouds():
if p['regions']:
cloud_region = clouds.get(cloud.name, {})
cloud_region[cloud.region] = cloud.config
clouds[cloud.name] = cloud_region
else:
clouds[cloud.name] = cloud.config
module.exit_json(clouds=clouds)
except exceptions.OpenStackConfigException as e:
module.fail_json(msg=str(e))
# import module snippets
from ansible.module_utils.basic import *
main()
|
|
0da1480903e965597fd4450c8dea28a7d8bd6a44
|
graph/Python/kahn_algorithm.py
|
graph/Python/kahn_algorithm.py
|
from collections import defaultdict
#Class to represent a graph
class Graph:
def __init__(self,vertices):
self.graph = defaultdict(list) #dictionary containing adjacency List
self.V = vertices #No. of vertices
# function to add an edge to graph
def addEdge(self,u,v):
self.graph[u].append(v)
def topological_sort(self):
#initialise in_degrees
in_degree = [0 for i in range(self.V)]
#calculate in_degrees of all vertices
for i in self.graph:
for j in self.graph[i]:
in_degree[j]+=1
#queue to keep track of vertices with 0 in_degree
queue = []
for i in range(self.V):
if(in_degree[i] == 0):
queue.append(i)
top_order = []
#run loop until all vertices are added
cnt = 0
while queue:
u = queue.pop(0)
top_order.append(u)
#remove edges outgoing from u
for vertex in self.graph[u]:
in_degree[vertex] -= 1
if in_degree[vertex] == 0:
queue.append(vertex)
cnt += 1
if cnt != self.V:
print "No topolocial ordering exists."
else:
print top_order
#Normal case
g= Graph(6)
g.addEdge(5, 2);
g.addEdge(5, 0);
g.addEdge(4, 0);
g.addEdge(4, 1);
g.addEdge(2, 3);
g.addEdge(3, 1);
print "Following is a Topological Sort of the given graph"
g.topological_sort()
# Cyclic graph
g2= Graph(6)
g2.addEdge(5, 2);
g2.addEdge(2, 5);
g2.addEdge(4, 0);
g2.addEdge(4, 1);
g2.addEdge(2, 3);
g2.addEdge(3, 1);
g2.addEdge(5, 0);
print "Following is a Topological Sort of the given graph"
g2.topological_sort()
|
Add Kahn's algorithm in python
|
Add Kahn's algorithm in python
|
Python
|
cc0-1.0
|
ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms
|
Add Kahn's algorithm in python
|
from collections import defaultdict
#Class to represent a graph
class Graph:
def __init__(self,vertices):
self.graph = defaultdict(list) #dictionary containing adjacency List
self.V = vertices #No. of vertices
# function to add an edge to graph
def addEdge(self,u,v):
self.graph[u].append(v)
def topological_sort(self):
#initialise in_degrees
in_degree = [0 for i in range(self.V)]
#calculate in_degrees of all vertices
for i in self.graph:
for j in self.graph[i]:
in_degree[j]+=1
#queue to keep track of vertices with 0 in_degree
queue = []
for i in range(self.V):
if(in_degree[i] == 0):
queue.append(i)
top_order = []
#run loop until all vertices are added
cnt = 0
while queue:
u = queue.pop(0)
top_order.append(u)
#remove edges outgoing from u
for vertex in self.graph[u]:
in_degree[vertex] -= 1
if in_degree[vertex] == 0:
queue.append(vertex)
cnt += 1
if cnt != self.V:
print "No topolocial ordering exists."
else:
print top_order
#Normal case
g= Graph(6)
g.addEdge(5, 2);
g.addEdge(5, 0);
g.addEdge(4, 0);
g.addEdge(4, 1);
g.addEdge(2, 3);
g.addEdge(3, 1);
print "Following is a Topological Sort of the given graph"
g.topological_sort()
# Cyclic graph
g2= Graph(6)
g2.addEdge(5, 2);
g2.addEdge(2, 5);
g2.addEdge(4, 0);
g2.addEdge(4, 1);
g2.addEdge(2, 3);
g2.addEdge(3, 1);
g2.addEdge(5, 0);
print "Following is a Topological Sort of the given graph"
g2.topological_sort()
|
<commit_before><commit_msg>Add Kahn's algorithm in python<commit_after>
|
from collections import defaultdict
#Class to represent a graph
class Graph:
def __init__(self,vertices):
self.graph = defaultdict(list) #dictionary containing adjacency List
self.V = vertices #No. of vertices
# function to add an edge to graph
def addEdge(self,u,v):
self.graph[u].append(v)
def topological_sort(self):
#initialise in_degrees
in_degree = [0 for i in range(self.V)]
#calculate in_degrees of all vertices
for i in self.graph:
for j in self.graph[i]:
in_degree[j]+=1
#queue to keep track of vertices with 0 in_degree
queue = []
for i in range(self.V):
if(in_degree[i] == 0):
queue.append(i)
top_order = []
#run loop until all vertices are added
cnt = 0
while queue:
u = queue.pop(0)
top_order.append(u)
#remove edges outgoing from u
for vertex in self.graph[u]:
in_degree[vertex] -= 1
if in_degree[vertex] == 0:
queue.append(vertex)
cnt += 1
if cnt != self.V:
print "No topolocial ordering exists."
else:
print top_order
#Normal case
g= Graph(6)
g.addEdge(5, 2);
g.addEdge(5, 0);
g.addEdge(4, 0);
g.addEdge(4, 1);
g.addEdge(2, 3);
g.addEdge(3, 1);
print "Following is a Topological Sort of the given graph"
g.topological_sort()
# Cyclic graph
g2= Graph(6)
g2.addEdge(5, 2);
g2.addEdge(2, 5);
g2.addEdge(4, 0);
g2.addEdge(4, 1);
g2.addEdge(2, 3);
g2.addEdge(3, 1);
g2.addEdge(5, 0);
print "Following is a Topological Sort of the given graph"
g2.topological_sort()
|
Add Kahn's algorithm in python
from collections import defaultdict
#Class to represent a graph
class Graph:
def __init__(self,vertices):
self.graph = defaultdict(list) #dictionary containing adjacency List
self.V = vertices #No. of vertices
# function to add an edge to graph
def addEdge(self,u,v):
self.graph[u].append(v)
def topological_sort(self):
#initialise in_degrees
in_degree = [0 for i in range(self.V)]
#calculate in_degrees of all vertices
for i in self.graph:
for j in self.graph[i]:
in_degree[j]+=1
#queue to keep track of vertices with 0 in_degree
queue = []
for i in range(self.V):
if(in_degree[i] == 0):
queue.append(i)
top_order = []
#run loop until all vertices are added
cnt = 0
while queue:
u = queue.pop(0)
top_order.append(u)
#remove edges outgoing from u
for vertex in self.graph[u]:
in_degree[vertex] -= 1
if in_degree[vertex] == 0:
queue.append(vertex)
cnt += 1
if cnt != self.V:
print "No topolocial ordering exists."
else:
print top_order
#Normal case
g= Graph(6)
g.addEdge(5, 2);
g.addEdge(5, 0);
g.addEdge(4, 0);
g.addEdge(4, 1);
g.addEdge(2, 3);
g.addEdge(3, 1);
print "Following is a Topological Sort of the given graph"
g.topological_sort()
# Cyclic graph
g2= Graph(6)
g2.addEdge(5, 2);
g2.addEdge(2, 5);
g2.addEdge(4, 0);
g2.addEdge(4, 1);
g2.addEdge(2, 3);
g2.addEdge(3, 1);
g2.addEdge(5, 0);
print "Following is a Topological Sort of the given graph"
g2.topological_sort()
|
<commit_before><commit_msg>Add Kahn's algorithm in python<commit_after>
from collections import defaultdict
#Class to represent a graph
class Graph:
def __init__(self,vertices):
self.graph = defaultdict(list) #dictionary containing adjacency List
self.V = vertices #No. of vertices
# function to add an edge to graph
def addEdge(self,u,v):
self.graph[u].append(v)
def topological_sort(self):
#initialise in_degrees
in_degree = [0 for i in range(self.V)]
#calculate in_degrees of all vertices
for i in self.graph:
for j in self.graph[i]:
in_degree[j]+=1
#queue to keep track of vertices with 0 in_degree
queue = []
for i in range(self.V):
if(in_degree[i] == 0):
queue.append(i)
top_order = []
#run loop until all vertices are added
cnt = 0
while queue:
u = queue.pop(0)
top_order.append(u)
#remove edges outgoing from u
for vertex in self.graph[u]:
in_degree[vertex] -= 1
if in_degree[vertex] == 0:
queue.append(vertex)
cnt += 1
if cnt != self.V:
print "No topolocial ordering exists."
else:
print top_order
#Normal case
g= Graph(6)
g.addEdge(5, 2);
g.addEdge(5, 0);
g.addEdge(4, 0);
g.addEdge(4, 1);
g.addEdge(2, 3);
g.addEdge(3, 1);
print "Following is a Topological Sort of the given graph"
g.topological_sort()
# Cyclic graph
g2= Graph(6)
g2.addEdge(5, 2);
g2.addEdge(2, 5);
g2.addEdge(4, 0);
g2.addEdge(4, 1);
g2.addEdge(2, 3);
g2.addEdge(3, 1);
g2.addEdge(5, 0);
print "Following is a Topological Sort of the given graph"
g2.topological_sort()
|
|
71cd470f727c61330d22fda8b9a9f94e47b6353f
|
avena/tests/test-logistic.py
|
avena/tests/test-logistic.py
|
#!/usr/bin/env python
from numpy import all, array, random
from .. import logistic
def test_logistic():
x = random.random_sample(100)
x *= 10.0
x -= 5.0
for k in [1.0, 2.0, 5.0, 10.0]:
y = logistic._logistic(k, x)
assert all(y >= 0.0) and all(y <= 1.0)
if __name__ == '__main__':
pass
|
Add a unit test for the logistic module.
|
Add a unit test for the logistic module.
|
Python
|
isc
|
eliteraspberries/avena
|
Add a unit test for the logistic module.
|
#!/usr/bin/env python
from numpy import all, array, random
from .. import logistic
def test_logistic():
x = random.random_sample(100)
x *= 10.0
x -= 5.0
for k in [1.0, 2.0, 5.0, 10.0]:
y = logistic._logistic(k, x)
assert all(y >= 0.0) and all(y <= 1.0)
if __name__ == '__main__':
pass
|
<commit_before><commit_msg>Add a unit test for the logistic module.<commit_after>
|
#!/usr/bin/env python
from numpy import all, array, random
from .. import logistic
def test_logistic():
x = random.random_sample(100)
x *= 10.0
x -= 5.0
for k in [1.0, 2.0, 5.0, 10.0]:
y = logistic._logistic(k, x)
assert all(y >= 0.0) and all(y <= 1.0)
if __name__ == '__main__':
pass
|
Add a unit test for the logistic module.#!/usr/bin/env python
from numpy import all, array, random
from .. import logistic
def test_logistic():
x = random.random_sample(100)
x *= 10.0
x -= 5.0
for k in [1.0, 2.0, 5.0, 10.0]:
y = logistic._logistic(k, x)
assert all(y >= 0.0) and all(y <= 1.0)
if __name__ == '__main__':
pass
|
<commit_before><commit_msg>Add a unit test for the logistic module.<commit_after>#!/usr/bin/env python
from numpy import all, array, random
from .. import logistic
def test_logistic():
x = random.random_sample(100)
x *= 10.0
x -= 5.0
for k in [1.0, 2.0, 5.0, 10.0]:
y = logistic._logistic(k, x)
assert all(y >= 0.0) and all(y <= 1.0)
if __name__ == '__main__':
pass
|
|
24cf12a47c95883fc431d4ee295bbe763a107c93
|
snippet_parser/cs.py
|
snippet_parser/cs.py
|
from base import *
class SnippetParser(SnippetParserBase):
def strip_template(self, template, normalize, collapse):
if self.is_citation_needed(template):
# These templates often contain other information
# (date/justification), so we drop it all here
return CITATION_NEEDED_MARKER
return ''
|
Drop dates and comments from Czech templates.
|
Drop dates and comments from Czech templates.
Former-commit-id: 2291964afbbb8f2d896bb069bb37f24afc2fb081
|
Python
|
mit
|
eggpi/citationhunt,guilherme-pg/citationhunt,eggpi/citationhunt,eggpi/citationhunt,guilherme-pg/citationhunt,eggpi/citationhunt,guilherme-pg/citationhunt,guilherme-pg/citationhunt
|
Drop dates and comments from Czech templates.
Former-commit-id: 2291964afbbb8f2d896bb069bb37f24afc2fb081
|
from base import *
class SnippetParser(SnippetParserBase):
def strip_template(self, template, normalize, collapse):
if self.is_citation_needed(template):
# These templates often contain other information
# (date/justification), so we drop it all here
return CITATION_NEEDED_MARKER
return ''
|
<commit_before><commit_msg>Drop dates and comments from Czech templates.
Former-commit-id: 2291964afbbb8f2d896bb069bb37f24afc2fb081<commit_after>
|
from base import *
class SnippetParser(SnippetParserBase):
def strip_template(self, template, normalize, collapse):
if self.is_citation_needed(template):
# These templates often contain other information
# (date/justification), so we drop it all here
return CITATION_NEEDED_MARKER
return ''
|
Drop dates and comments from Czech templates.
Former-commit-id: 2291964afbbb8f2d896bb069bb37f24afc2fb081from base import *
class SnippetParser(SnippetParserBase):
def strip_template(self, template, normalize, collapse):
if self.is_citation_needed(template):
# These templates often contain other information
# (date/justification), so we drop it all here
return CITATION_NEEDED_MARKER
return ''
|
<commit_before><commit_msg>Drop dates and comments from Czech templates.
Former-commit-id: 2291964afbbb8f2d896bb069bb37f24afc2fb081<commit_after>from base import *
class SnippetParser(SnippetParserBase):
def strip_template(self, template, normalize, collapse):
if self.is_citation_needed(template):
# These templates often contain other information
# (date/justification), so we drop it all here
return CITATION_NEEDED_MARKER
return ''
|
|
682f942799c15acadea1a707261ed606b4c1e245
|
mailqueue/migrations/0003_auto__add_field_mailermessage_bcc_address__add_field_mailermessage_las.py
|
mailqueue/migrations/0003_auto__add_field_mailermessage_bcc_address__add_field_mailermessage_las.py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'MailerMessage.bcc_address'
db.add_column('mailqueue_mailermessage', 'bcc_address',
self.gf('django.db.models.fields.EmailField')(max_length=250, null=True, blank=True),
keep_default=False)
# Adding field 'MailerMessage.last_attempt'
db.add_column('mailqueue_mailermessage', 'last_attempt',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'MailerMessage.bcc_address'
db.delete_column('mailqueue_mailermessage', 'bcc_address')
# Deleting field 'MailerMessage.last_attempt'
db.delete_column('mailqueue_mailermessage', 'last_attempt')
models = {
'mailqueue.mailermessage': {
'Meta': {'object_name': 'MailerMessage'},
'app': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'bcc_address': ('django.db.models.fields.EmailField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'from_address': ('django.db.models.fields.EmailField', [], {'max_length': '250'}),
'html_content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_attempt': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'to_address': ('django.db.models.fields.EmailField', [], {'max_length': '250'})
}
}
complete_apps = ['mailqueue']
|
Add migrations for new fields.
|
Add migrations for new fields.
|
Python
|
mit
|
winfieldco/django-mail-queue,styrmis/django-mail-queue,Goury/django-mail-queue,dstegelman/django-mail-queue,dstegelman/django-mail-queue,Goury/django-mail-queue
|
Add migrations for new fields.
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'MailerMessage.bcc_address'
db.add_column('mailqueue_mailermessage', 'bcc_address',
self.gf('django.db.models.fields.EmailField')(max_length=250, null=True, blank=True),
keep_default=False)
# Adding field 'MailerMessage.last_attempt'
db.add_column('mailqueue_mailermessage', 'last_attempt',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'MailerMessage.bcc_address'
db.delete_column('mailqueue_mailermessage', 'bcc_address')
# Deleting field 'MailerMessage.last_attempt'
db.delete_column('mailqueue_mailermessage', 'last_attempt')
models = {
'mailqueue.mailermessage': {
'Meta': {'object_name': 'MailerMessage'},
'app': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'bcc_address': ('django.db.models.fields.EmailField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'from_address': ('django.db.models.fields.EmailField', [], {'max_length': '250'}),
'html_content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_attempt': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'to_address': ('django.db.models.fields.EmailField', [], {'max_length': '250'})
}
}
complete_apps = ['mailqueue']
|
<commit_before><commit_msg>Add migrations for new fields.<commit_after>
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'MailerMessage.bcc_address'
db.add_column('mailqueue_mailermessage', 'bcc_address',
self.gf('django.db.models.fields.EmailField')(max_length=250, null=True, blank=True),
keep_default=False)
# Adding field 'MailerMessage.last_attempt'
db.add_column('mailqueue_mailermessage', 'last_attempt',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'MailerMessage.bcc_address'
db.delete_column('mailqueue_mailermessage', 'bcc_address')
# Deleting field 'MailerMessage.last_attempt'
db.delete_column('mailqueue_mailermessage', 'last_attempt')
models = {
'mailqueue.mailermessage': {
'Meta': {'object_name': 'MailerMessage'},
'app': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'bcc_address': ('django.db.models.fields.EmailField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'from_address': ('django.db.models.fields.EmailField', [], {'max_length': '250'}),
'html_content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_attempt': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'to_address': ('django.db.models.fields.EmailField', [], {'max_length': '250'})
}
}
complete_apps = ['mailqueue']
|
Add migrations for new fields.# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'MailerMessage.bcc_address'
db.add_column('mailqueue_mailermessage', 'bcc_address',
self.gf('django.db.models.fields.EmailField')(max_length=250, null=True, blank=True),
keep_default=False)
# Adding field 'MailerMessage.last_attempt'
db.add_column('mailqueue_mailermessage', 'last_attempt',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'MailerMessage.bcc_address'
db.delete_column('mailqueue_mailermessage', 'bcc_address')
# Deleting field 'MailerMessage.last_attempt'
db.delete_column('mailqueue_mailermessage', 'last_attempt')
models = {
'mailqueue.mailermessage': {
'Meta': {'object_name': 'MailerMessage'},
'app': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'bcc_address': ('django.db.models.fields.EmailField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'from_address': ('django.db.models.fields.EmailField', [], {'max_length': '250'}),
'html_content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_attempt': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'to_address': ('django.db.models.fields.EmailField', [], {'max_length': '250'})
}
}
complete_apps = ['mailqueue']
|
<commit_before><commit_msg>Add migrations for new fields.<commit_after># -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'MailerMessage.bcc_address'
db.add_column('mailqueue_mailermessage', 'bcc_address',
self.gf('django.db.models.fields.EmailField')(max_length=250, null=True, blank=True),
keep_default=False)
# Adding field 'MailerMessage.last_attempt'
db.add_column('mailqueue_mailermessage', 'last_attempt',
self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'MailerMessage.bcc_address'
db.delete_column('mailqueue_mailermessage', 'bcc_address')
# Deleting field 'MailerMessage.last_attempt'
db.delete_column('mailqueue_mailermessage', 'last_attempt')
models = {
'mailqueue.mailermessage': {
'Meta': {'object_name': 'MailerMessage'},
'app': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'bcc_address': ('django.db.models.fields.EmailField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'from_address': ('django.db.models.fields.EmailField', [], {'max_length': '250'}),
'html_content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_attempt': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject': ('django.db.models.fields.CharField', [], {'max_length': '250', 'null': 'True', 'blank': 'True'}),
'to_address': ('django.db.models.fields.EmailField', [], {'max_length': '250'})
}
}
complete_apps = ['mailqueue']
|
|
358f94e58557f9c2cec21a2ec8fb55cb8def0e34
|
plot_prior_misses.py
|
plot_prior_misses.py
|
# Run h264dec on a single file compiled with PRIOR_STATS and then run this script
# Outputs timeseries plot at /tmp/misses.pdf
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import os
def temporal_misses(key):
values = data[key]
numbins = 100
binsize = len(values) // numbins
bins = [[]]
for v in values:
if len(bins[-1]) >= binsize:
bins.append([])
bins[-1].append(v)
x = range(len(bins))
total_misses = float(sum(values))
y = [100 * float(sum(b)) / total_misses for b in bins]
return plt.plot(x, y, label=key)[0]
paths = filter(lambda s: 'misses.log' in s, os.listdir('/tmp/'))
data = {p.split('_misses.')[0]: map(lambda c: c == '0', open('/tmp/' + p).read()) for p in paths}
handles = []
plt.figure(figsize=(20,10))
keys = data.keys()
for k in keys:
handles.append(temporal_misses(k))
plt.axis((0, 100, 0, 2))
plt.xlabel('temporal %')
plt.ylabel('% total misses')
plt.legend(handles, keys, bbox_to_anchor=(1, 1), bbox_transform=plt.gcf().transFigure)
out = PdfPages('/tmp/misses.pdf')
out.savefig()
out.close()
|
Add pyplot script for plotting prior miss timeseries
|
Add pyplot script for plotting prior miss timeseries
|
Python
|
bsd-2-clause
|
eastlhu/losslessh264,erillfire/wusunyasuo,itplanes/losslessh264,xiangshuai/losslessh264,xiangshuai/losslessh264,noname007/losslessh264,TonySheh/losslessh264,common2015/losslessh264,krsjoseph/losslessh264,krsjoseph/losslessh264,sunfei/losslessh264,sunfei/losslessh264,aquar25/losslessh264,yurenyong123/losslessh264,lioonline/losslessh264,erillfire/wusunyasuo,xiangshuai/losslessh264,hj3938/losslessh264,hcxyzlm/losslessh264,eastlhu/losslessh264,SunGuo/losslessh264,dahebolangkuan/losslessh264,treble37/losslessh264,lioonline/losslessh264,WuYaoWang/losslessh264,froggatt/losslessh264,treble37/losslessh264,dahebolangkuan/losslessh264,eastlhu/losslessh264,heavenlw/losslessh264,bitwing/losslessh264,erillfire/wusunyasuo,froggatt/losslessh264,hanchl/losslessh264,krsjoseph/losslessh264,joncampbell123/losslessh264,hj3938/losslessh264,dahebolangkuan/losslessh264,zyhh/losslessh264,erillfire/wusunyasuo,TonySheh/losslessh264,hioop/losslessh264,hanchl/losslessh264,heavenlw/losslessh264,itplanes/losslessh264,zyhh/losslessh264,hanchl/losslessh264,jasonzhong/losslessh264,joncampbell123/losslessh264,zyhh/losslessh264,hcxyzlm/losslessh264,abhishekgahlot/losslessh264,kevinzhang1986/losslessh264,erillfire/wusunyasuo,subailong/losslessh264,abhishekgahlot/losslessh264,hgl888/losslessh264,xiangshuai/losslessh264,hj3938/losslessh264,zyhh/losslessh264,itplanes/losslessh264,LaurenLuoYun/losslessh264,hgl888/losslessh264,PeterBLITZ/losslessh264,hioop/losslessh264,mazalet/losslessh264,jlhbaseball15/losslessh264,kevinzhang1986/losslessh264,hioop/losslessh264,aquar25/losslessh264,danielrh/losslessh264,danielrh/losslessh264,treble37/losslessh264,LaurenLuoYun/losslessh264,xiangshuai/losslessh264,krsjoseph/losslessh264,yurenyong123/losslessh264,hanchl/losslessh264,hj3938/losslessh264,bowlofstew/losslessh264,heavenlw/losslessh264,hgl888/losslessh264,alihalabyah/losslessh264,noname007/losslessh264,danielrh/losslessh264,hcxyzlm/losslessh264,SunGuo/losslessh264,yurenyong123/losslessh264,bitwing/losslessh264,Ghimtim/losslessh264,jlhbaseball15/losslessh264,heavenlw/losslessh264,maxming2333/losslessh264,itplanes/losslessh264,legendtkl/losslessh264,Ghimtim/losslessh264,xiangshuai/losslessh264,krsjoseph/losslessh264,dahebolangkuan/losslessh264,hgl888/losslessh264,hanchl/losslessh264,danielrh/losslessh264,froggatt/losslessh264,legendtkl/losslessh264,hj3938/losslessh264,dahebolangkuan/losslessh264,jlhbaseball15/losslessh264,froggatt/losslessh264,xiangshuai/losslessh264,erillfire/wusunyasuo,mazalet/losslessh264,hj3938/losslessh264,alihalabyah/losslessh264,itplanes/losslessh264,hioop/losslessh264,bowlofstew/losslessh264,LaurenLuoYun/losslessh264,WuYaoWang/losslessh264,noname007/losslessh264,hanchl/losslessh264,aquar25/losslessh264,PeterBLITZ/losslessh264,aquar25/losslessh264,froggatt/losslessh264,joncampbell123/losslessh264,joncampbell123/losslessh264,lioonline/losslessh264,sunfei/losslessh264,bowlofstew/losslessh264,LaurenLuoYun/losslessh264,TonySheh/losslessh264,LaurenLuoYun/losslessh264,alihalabyah/losslessh264,legendtkl/losslessh264,alihalabyah/losslessh264,Ghimtim/losslessh264,itplanes/losslessh264,treble37/losslessh264,lioonline/losslessh264,hj3938/losslessh264,kevinzhang1986/losslessh264,joncampbell123/losslessh264,abhishekgahlot/losslessh264,bowlofstew/losslessh264,Ghimtim/losslessh264,hgl888/losslessh264,eastlhu/losslessh264,jasonzhong/losslessh264,kevinzhang1986/losslessh264,treble37/losslessh264,TonySheh/losslessh264,noname007/losslessh264,common2015/losslessh264,legendtkl/losslessh264,danielrh/losslessh264,Ghimtim/losslessh264,TonySheh/losslessh264,sunfei/losslessh264,kevinzhang1986/losslessh264,noname007/losslessh264,PeterBLITZ/losslessh264,SunGuo/losslessh264,noname007/losslessh264,dahebolangkuan/losslessh264,bitwing/losslessh264,aquar25/losslessh264,mazalet/losslessh264,bitwing/losslessh264,PeterBLITZ/losslessh264,jasonzhong/losslessh264,maxming2333/losslessh264,yurenyong123/losslessh264,lioonline/losslessh264,itplanes/losslessh264,SunGuo/losslessh264,sunfei/losslessh264,zyhh/losslessh264,subailong/losslessh264,hioop/losslessh264,WuYaoWang/losslessh264,mazalet/losslessh264,SunGuo/losslessh264,WuYaoWang/losslessh264,maxming2333/losslessh264,bitwing/losslessh264,sunfei/losslessh264,jasonzhong/losslessh264,subailong/losslessh264,bowlofstew/losslessh264,legendtkl/losslessh264,hgl888/losslessh264,TonySheh/losslessh264,WuYaoWang/losslessh264,hioop/losslessh264,SunGuo/losslessh264,legendtkl/losslessh264,treble37/losslessh264,jlhbaseball15/losslessh264,maxming2333/losslessh264,alihalabyah/losslessh264,LaurenLuoYun/losslessh264,subailong/losslessh264,danielrh/losslessh264,eastlhu/losslessh264,abhishekgahlot/losslessh264,Ghimtim/losslessh264,hcxyzlm/losslessh264,krsjoseph/losslessh264,common2015/losslessh264,common2015/losslessh264,zyhh/losslessh264,hanchl/losslessh264,dahebolangkuan/losslessh264,heavenlw/losslessh264,hioop/losslessh264,common2015/losslessh264,alihalabyah/losslessh264,WuYaoWang/losslessh264,jlhbaseball15/losslessh264,eastlhu/losslessh264,maxming2333/losslessh264,treble37/losslessh264,mazalet/losslessh264,PeterBLITZ/losslessh264,jlhbaseball15/losslessh264,common2015/losslessh264,PeterBLITZ/losslessh264,heavenlw/losslessh264,maxming2333/losslessh264,WuYaoWang/losslessh264,subailong/losslessh264,yurenyong123/losslessh264,abhishekgahlot/losslessh264,froggatt/losslessh264,joncampbell123/losslessh264,noname007/losslessh264,bowlofstew/losslessh264,SunGuo/losslessh264,Ghimtim/losslessh264,subailong/losslessh264,aquar25/losslessh264,heavenlw/losslessh264,subailong/losslessh264,hcxyzlm/losslessh264,yurenyong123/losslessh264,joncampbell123/losslessh264,bitwing/losslessh264,jasonzhong/losslessh264,mazalet/losslessh264,common2015/losslessh264,alihalabyah/losslessh264,zyhh/losslessh264,jasonzhong/losslessh264,abhishekgahlot/losslessh264,TonySheh/losslessh264,erillfire/wusunyasuo,maxming2333/losslessh264,hcxyzlm/losslessh264,LaurenLuoYun/losslessh264,bitwing/losslessh264,kevinzhang1986/losslessh264,mazalet/losslessh264,aquar25/losslessh264,kevinzhang1986/losslessh264,eastlhu/losslessh264,froggatt/losslessh264,bowlofstew/losslessh264,lioonline/losslessh264,sunfei/losslessh264,yurenyong123/losslessh264,legendtkl/losslessh264,lioonline/losslessh264,hcxyzlm/losslessh264,hgl888/losslessh264,jasonzhong/losslessh264,PeterBLITZ/losslessh264,jlhbaseball15/losslessh264,krsjoseph/losslessh264,abhishekgahlot/losslessh264,danielrh/losslessh264
|
Add pyplot script for plotting prior miss timeseries
|
# Run h264dec on a single file compiled with PRIOR_STATS and then run this script
# Outputs timeseries plot at /tmp/misses.pdf
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import os
def temporal_misses(key):
values = data[key]
numbins = 100
binsize = len(values) // numbins
bins = [[]]
for v in values:
if len(bins[-1]) >= binsize:
bins.append([])
bins[-1].append(v)
x = range(len(bins))
total_misses = float(sum(values))
y = [100 * float(sum(b)) / total_misses for b in bins]
return plt.plot(x, y, label=key)[0]
paths = filter(lambda s: 'misses.log' in s, os.listdir('/tmp/'))
data = {p.split('_misses.')[0]: map(lambda c: c == '0', open('/tmp/' + p).read()) for p in paths}
handles = []
plt.figure(figsize=(20,10))
keys = data.keys()
for k in keys:
handles.append(temporal_misses(k))
plt.axis((0, 100, 0, 2))
plt.xlabel('temporal %')
plt.ylabel('% total misses')
plt.legend(handles, keys, bbox_to_anchor=(1, 1), bbox_transform=plt.gcf().transFigure)
out = PdfPages('/tmp/misses.pdf')
out.savefig()
out.close()
|
<commit_before><commit_msg>Add pyplot script for plotting prior miss timeseries<commit_after>
|
# Run h264dec on a single file compiled with PRIOR_STATS and then run this script
# Outputs timeseries plot at /tmp/misses.pdf
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import os
def temporal_misses(key):
values = data[key]
numbins = 100
binsize = len(values) // numbins
bins = [[]]
for v in values:
if len(bins[-1]) >= binsize:
bins.append([])
bins[-1].append(v)
x = range(len(bins))
total_misses = float(sum(values))
y = [100 * float(sum(b)) / total_misses for b in bins]
return plt.plot(x, y, label=key)[0]
paths = filter(lambda s: 'misses.log' in s, os.listdir('/tmp/'))
data = {p.split('_misses.')[0]: map(lambda c: c == '0', open('/tmp/' + p).read()) for p in paths}
handles = []
plt.figure(figsize=(20,10))
keys = data.keys()
for k in keys:
handles.append(temporal_misses(k))
plt.axis((0, 100, 0, 2))
plt.xlabel('temporal %')
plt.ylabel('% total misses')
plt.legend(handles, keys, bbox_to_anchor=(1, 1), bbox_transform=plt.gcf().transFigure)
out = PdfPages('/tmp/misses.pdf')
out.savefig()
out.close()
|
Add pyplot script for plotting prior miss timeseries# Run h264dec on a single file compiled with PRIOR_STATS and then run this script
# Outputs timeseries plot at /tmp/misses.pdf
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import os
def temporal_misses(key):
values = data[key]
numbins = 100
binsize = len(values) // numbins
bins = [[]]
for v in values:
if len(bins[-1]) >= binsize:
bins.append([])
bins[-1].append(v)
x = range(len(bins))
total_misses = float(sum(values))
y = [100 * float(sum(b)) / total_misses for b in bins]
return plt.plot(x, y, label=key)[0]
paths = filter(lambda s: 'misses.log' in s, os.listdir('/tmp/'))
data = {p.split('_misses.')[0]: map(lambda c: c == '0', open('/tmp/' + p).read()) for p in paths}
handles = []
plt.figure(figsize=(20,10))
keys = data.keys()
for k in keys:
handles.append(temporal_misses(k))
plt.axis((0, 100, 0, 2))
plt.xlabel('temporal %')
plt.ylabel('% total misses')
plt.legend(handles, keys, bbox_to_anchor=(1, 1), bbox_transform=plt.gcf().transFigure)
out = PdfPages('/tmp/misses.pdf')
out.savefig()
out.close()
|
<commit_before><commit_msg>Add pyplot script for plotting prior miss timeseries<commit_after># Run h264dec on a single file compiled with PRIOR_STATS and then run this script
# Outputs timeseries plot at /tmp/misses.pdf
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import os
def temporal_misses(key):
values = data[key]
numbins = 100
binsize = len(values) // numbins
bins = [[]]
for v in values:
if len(bins[-1]) >= binsize:
bins.append([])
bins[-1].append(v)
x = range(len(bins))
total_misses = float(sum(values))
y = [100 * float(sum(b)) / total_misses for b in bins]
return plt.plot(x, y, label=key)[0]
paths = filter(lambda s: 'misses.log' in s, os.listdir('/tmp/'))
data = {p.split('_misses.')[0]: map(lambda c: c == '0', open('/tmp/' + p).read()) for p in paths}
handles = []
plt.figure(figsize=(20,10))
keys = data.keys()
for k in keys:
handles.append(temporal_misses(k))
plt.axis((0, 100, 0, 2))
plt.xlabel('temporal %')
plt.ylabel('% total misses')
plt.legend(handles, keys, bbox_to_anchor=(1, 1), bbox_transform=plt.gcf().transFigure)
out = PdfPages('/tmp/misses.pdf')
out.savefig()
out.close()
|
|
91d37572785bb3a336407fb085ba47ea281f6729
|
recordlinkage/rl_logging.py
|
recordlinkage/rl_logging.py
|
"""Logging utilities."""
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
#
# Modifications copyright Jonathan de Bruin 2017
import logging as _logging
import sys as _sys
# Determine whether we are in an interactive environment
_interactive = False
try:
# This is only defined in interactive shells
if _sys.ps1:
_interactive = True
except AttributeError:
# Even now, we may be in an interactive shell with `python -i`.
_interactive = _sys.flags.interactive
# Scope the tensorflow logger to not conflict with users' loggers
_logger = _logging.getLogger('recordlinkage')
# If we are in an interactive environment (like jupyter), set loglevel to info
# and pipe the output to stdout
if _interactive:
_logger.setLevel(_logger.INFO)
_logging_target = _sys.stdout
else:
_logging_target = _sys.stderr
# Add the output handler
_handler = _logging.StreamHandler(_logging_target)
_handler.setFormatter(_logging.Formatter(_logging.BASIC_FORMAT, None))
_logger.addHandler(_handler)
log = _logger.log
debug = _logger.debug
error = _logger.error
fatal = _logger.fatal
info = _logger.info
warn = _logger.warn
warning = _logger.warning
def get_verbosity():
"""Return how much logging output will be produced."""
return _logger.getEffectiveLevel()
def set_verbosity(verbosity):
"""Sets the threshold for what messages will be logged."""
_logger.setLevel(verbosity)
|
Add logging module based on tensorflow
|
Add logging module based on tensorflow
|
Python
|
bsd-3-clause
|
J535D165/recordlinkage,J535D165/recordlinkage
|
Add logging module based on tensorflow
|
"""Logging utilities."""
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
#
# Modifications copyright Jonathan de Bruin 2017
import logging as _logging
import sys as _sys
# Determine whether we are in an interactive environment
_interactive = False
try:
# This is only defined in interactive shells
if _sys.ps1:
_interactive = True
except AttributeError:
# Even now, we may be in an interactive shell with `python -i`.
_interactive = _sys.flags.interactive
# Scope the tensorflow logger to not conflict with users' loggers
_logger = _logging.getLogger('recordlinkage')
# If we are in an interactive environment (like jupyter), set loglevel to info
# and pipe the output to stdout
if _interactive:
_logger.setLevel(_logger.INFO)
_logging_target = _sys.stdout
else:
_logging_target = _sys.stderr
# Add the output handler
_handler = _logging.StreamHandler(_logging_target)
_handler.setFormatter(_logging.Formatter(_logging.BASIC_FORMAT, None))
_logger.addHandler(_handler)
log = _logger.log
debug = _logger.debug
error = _logger.error
fatal = _logger.fatal
info = _logger.info
warn = _logger.warn
warning = _logger.warning
def get_verbosity():
"""Return how much logging output will be produced."""
return _logger.getEffectiveLevel()
def set_verbosity(verbosity):
"""Sets the threshold for what messages will be logged."""
_logger.setLevel(verbosity)
|
<commit_before><commit_msg>Add logging module based on tensorflow<commit_after>
|
"""Logging utilities."""
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
#
# Modifications copyright Jonathan de Bruin 2017
import logging as _logging
import sys as _sys
# Determine whether we are in an interactive environment
_interactive = False
try:
# This is only defined in interactive shells
if _sys.ps1:
_interactive = True
except AttributeError:
# Even now, we may be in an interactive shell with `python -i`.
_interactive = _sys.flags.interactive
# Scope the tensorflow logger to not conflict with users' loggers
_logger = _logging.getLogger('recordlinkage')
# If we are in an interactive environment (like jupyter), set loglevel to info
# and pipe the output to stdout
if _interactive:
_logger.setLevel(_logger.INFO)
_logging_target = _sys.stdout
else:
_logging_target = _sys.stderr
# Add the output handler
_handler = _logging.StreamHandler(_logging_target)
_handler.setFormatter(_logging.Formatter(_logging.BASIC_FORMAT, None))
_logger.addHandler(_handler)
log = _logger.log
debug = _logger.debug
error = _logger.error
fatal = _logger.fatal
info = _logger.info
warn = _logger.warn
warning = _logger.warning
def get_verbosity():
"""Return how much logging output will be produced."""
return _logger.getEffectiveLevel()
def set_verbosity(verbosity):
"""Sets the threshold for what messages will be logged."""
_logger.setLevel(verbosity)
|
Add logging module based on tensorflow"""Logging utilities."""
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
#
# Modifications copyright Jonathan de Bruin 2017
import logging as _logging
import sys as _sys
# Determine whether we are in an interactive environment
_interactive = False
try:
# This is only defined in interactive shells
if _sys.ps1:
_interactive = True
except AttributeError:
# Even now, we may be in an interactive shell with `python -i`.
_interactive = _sys.flags.interactive
# Scope the tensorflow logger to not conflict with users' loggers
_logger = _logging.getLogger('recordlinkage')
# If we are in an interactive environment (like jupyter), set loglevel to info
# and pipe the output to stdout
if _interactive:
_logger.setLevel(_logger.INFO)
_logging_target = _sys.stdout
else:
_logging_target = _sys.stderr
# Add the output handler
_handler = _logging.StreamHandler(_logging_target)
_handler.setFormatter(_logging.Formatter(_logging.BASIC_FORMAT, None))
_logger.addHandler(_handler)
log = _logger.log
debug = _logger.debug
error = _logger.error
fatal = _logger.fatal
info = _logger.info
warn = _logger.warn
warning = _logger.warning
def get_verbosity():
"""Return how much logging output will be produced."""
return _logger.getEffectiveLevel()
def set_verbosity(verbosity):
"""Sets the threshold for what messages will be logged."""
_logger.setLevel(verbosity)
|
<commit_before><commit_msg>Add logging module based on tensorflow<commit_after>"""Logging utilities."""
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
#
# Modifications copyright Jonathan de Bruin 2017
import logging as _logging
import sys as _sys
# Determine whether we are in an interactive environment
_interactive = False
try:
# This is only defined in interactive shells
if _sys.ps1:
_interactive = True
except AttributeError:
# Even now, we may be in an interactive shell with `python -i`.
_interactive = _sys.flags.interactive
# Scope the tensorflow logger to not conflict with users' loggers
_logger = _logging.getLogger('recordlinkage')
# If we are in an interactive environment (like jupyter), set loglevel to info
# and pipe the output to stdout
if _interactive:
_logger.setLevel(_logger.INFO)
_logging_target = _sys.stdout
else:
_logging_target = _sys.stderr
# Add the output handler
_handler = _logging.StreamHandler(_logging_target)
_handler.setFormatter(_logging.Formatter(_logging.BASIC_FORMAT, None))
_logger.addHandler(_handler)
log = _logger.log
debug = _logger.debug
error = _logger.error
fatal = _logger.fatal
info = _logger.info
warn = _logger.warn
warning = _logger.warning
def get_verbosity():
"""Return how much logging output will be produced."""
return _logger.getEffectiveLevel()
def set_verbosity(verbosity):
"""Sets the threshold for what messages will be logged."""
_logger.setLevel(verbosity)
|
|
5e7866a897562b87f0c0ffa763c08f91c1aacccd
|
tests/test_cli.py
|
tests/test_cli.py
|
from __future__ import print_function, division, absolute_import
import glob
from os.path import basename
import py.path
import pytest
from psautohint.autohint import ACFontError
from psautohint.__main__ import main as psautohint
from . import DATA_DIR
UFO_FONTS = glob.glob("%s/*/*/font.ufo" % DATA_DIR)
OTF_FONTS = glob.glob("%s/*/*/font.otf" % DATA_DIR)
FONTS = (UFO_FONTS[0], OTF_FONTS[0])
@pytest.mark.parametrize("path", FONTS)
def test_basic(path, tmpdir):
# the input font is modified in-place, make a temp copy first
pathSrc = py.path.local(path)
pathDst = tmpdir / pathSrc.basename
pathSrc.copy(pathDst)
psautohint([str(pathDst)])
@pytest.mark.parametrize("path", FONTS)
def test_outpath(path, tmpdir):
out = str(tmpdir / basename(path)) + ".out"
psautohint([path, '-o', out])
@pytest.mark.parametrize("path", glob.glob("%s/*/*/font.pfa" % DATA_DIR))
def test_pfa(path, tmpdir):
out = str(tmpdir / basename(path)) + ".out"
with pytest.raises(ACFontError):
psautohint([path, '-o', out])
|
Add some basic CLI tests
|
Add some basic CLI tests
Wasnβt that hard!
|
Python
|
apache-2.0
|
khaledhosny/psautohint,khaledhosny/psautohint
|
Add some basic CLI tests
Wasnβt that hard!
|
from __future__ import print_function, division, absolute_import
import glob
from os.path import basename
import py.path
import pytest
from psautohint.autohint import ACFontError
from psautohint.__main__ import main as psautohint
from . import DATA_DIR
UFO_FONTS = glob.glob("%s/*/*/font.ufo" % DATA_DIR)
OTF_FONTS = glob.glob("%s/*/*/font.otf" % DATA_DIR)
FONTS = (UFO_FONTS[0], OTF_FONTS[0])
@pytest.mark.parametrize("path", FONTS)
def test_basic(path, tmpdir):
# the input font is modified in-place, make a temp copy first
pathSrc = py.path.local(path)
pathDst = tmpdir / pathSrc.basename
pathSrc.copy(pathDst)
psautohint([str(pathDst)])
@pytest.mark.parametrize("path", FONTS)
def test_outpath(path, tmpdir):
out = str(tmpdir / basename(path)) + ".out"
psautohint([path, '-o', out])
@pytest.mark.parametrize("path", glob.glob("%s/*/*/font.pfa" % DATA_DIR))
def test_pfa(path, tmpdir):
out = str(tmpdir / basename(path)) + ".out"
with pytest.raises(ACFontError):
psautohint([path, '-o', out])
|
<commit_before><commit_msg>Add some basic CLI tests
Wasnβt that hard!<commit_after>
|
from __future__ import print_function, division, absolute_import
import glob
from os.path import basename
import py.path
import pytest
from psautohint.autohint import ACFontError
from psautohint.__main__ import main as psautohint
from . import DATA_DIR
UFO_FONTS = glob.glob("%s/*/*/font.ufo" % DATA_DIR)
OTF_FONTS = glob.glob("%s/*/*/font.otf" % DATA_DIR)
FONTS = (UFO_FONTS[0], OTF_FONTS[0])
@pytest.mark.parametrize("path", FONTS)
def test_basic(path, tmpdir):
# the input font is modified in-place, make a temp copy first
pathSrc = py.path.local(path)
pathDst = tmpdir / pathSrc.basename
pathSrc.copy(pathDst)
psautohint([str(pathDst)])
@pytest.mark.parametrize("path", FONTS)
def test_outpath(path, tmpdir):
out = str(tmpdir / basename(path)) + ".out"
psautohint([path, '-o', out])
@pytest.mark.parametrize("path", glob.glob("%s/*/*/font.pfa" % DATA_DIR))
def test_pfa(path, tmpdir):
out = str(tmpdir / basename(path)) + ".out"
with pytest.raises(ACFontError):
psautohint([path, '-o', out])
|
Add some basic CLI tests
Wasnβt that hard!from __future__ import print_function, division, absolute_import
import glob
from os.path import basename
import py.path
import pytest
from psautohint.autohint import ACFontError
from psautohint.__main__ import main as psautohint
from . import DATA_DIR
UFO_FONTS = glob.glob("%s/*/*/font.ufo" % DATA_DIR)
OTF_FONTS = glob.glob("%s/*/*/font.otf" % DATA_DIR)
FONTS = (UFO_FONTS[0], OTF_FONTS[0])
@pytest.mark.parametrize("path", FONTS)
def test_basic(path, tmpdir):
# the input font is modified in-place, make a temp copy first
pathSrc = py.path.local(path)
pathDst = tmpdir / pathSrc.basename
pathSrc.copy(pathDst)
psautohint([str(pathDst)])
@pytest.mark.parametrize("path", FONTS)
def test_outpath(path, tmpdir):
out = str(tmpdir / basename(path)) + ".out"
psautohint([path, '-o', out])
@pytest.mark.parametrize("path", glob.glob("%s/*/*/font.pfa" % DATA_DIR))
def test_pfa(path, tmpdir):
out = str(tmpdir / basename(path)) + ".out"
with pytest.raises(ACFontError):
psautohint([path, '-o', out])
|
<commit_before><commit_msg>Add some basic CLI tests
Wasnβt that hard!<commit_after>from __future__ import print_function, division, absolute_import
import glob
from os.path import basename
import py.path
import pytest
from psautohint.autohint import ACFontError
from psautohint.__main__ import main as psautohint
from . import DATA_DIR
UFO_FONTS = glob.glob("%s/*/*/font.ufo" % DATA_DIR)
OTF_FONTS = glob.glob("%s/*/*/font.otf" % DATA_DIR)
FONTS = (UFO_FONTS[0], OTF_FONTS[0])
@pytest.mark.parametrize("path", FONTS)
def test_basic(path, tmpdir):
# the input font is modified in-place, make a temp copy first
pathSrc = py.path.local(path)
pathDst = tmpdir / pathSrc.basename
pathSrc.copy(pathDst)
psautohint([str(pathDst)])
@pytest.mark.parametrize("path", FONTS)
def test_outpath(path, tmpdir):
out = str(tmpdir / basename(path)) + ".out"
psautohint([path, '-o', out])
@pytest.mark.parametrize("path", glob.glob("%s/*/*/font.pfa" % DATA_DIR))
def test_pfa(path, tmpdir):
out = str(tmpdir / basename(path)) + ".out"
with pytest.raises(ACFontError):
psautohint([path, '-o', out])
|
|
e77bb03ecb85bd90cd542eb5dddbda8c2af3df53
|
tp/netlib/objects/ObjectExtra/Wormhole.py
|
tp/netlib/objects/ObjectExtra/Wormhole.py
|
from xstruct import pack
from objects import Object
class Wormhole(Object):
"""\
The Wormhole is a top level object that links to locations together.
It was added as a quick hack to make the Risk ruleset a little easier to play.
It has 3 int64 arguments which are the "other end" of the wormhole.
"""
subtype = 5
substruct = "qqq"
def __init__(self, sequence, \
id, type, name, \
size, \
startx, starty, startz, \
velx, vely, velz, \
contains, \
order_types, \
order_number, \
modify_time, \
endx, endy, endz):
Object.__init__(self, sequence, \
id, type, name, \
size, \
startx, starty, startz, \
velx, vely, velz, \
contains, \
order_types, \
order_number, \
modify_time)
self.length += 8*3
self.start = self.pos
self.end = (endx, endy, endz)
def __str__(self):
output = Object.__str__(self)
output += pack(self.substruct, *self.end)
return output
|
Add a wormhole object as a quick-hack to make risk connections work. (Pending tp04 being finished.)
|
Add a wormhole object as a quick-hack to make risk connections work.
(Pending tp04 being finished.)
|
Python
|
lgpl-2.1
|
thousandparsec/libtpproto-py,thousandparsec/libtpproto-py
|
Add a wormhole object as a quick-hack to make risk connections work.
(Pending tp04 being finished.)
|
from xstruct import pack
from objects import Object
class Wormhole(Object):
"""\
The Wormhole is a top level object that links to locations together.
It was added as a quick hack to make the Risk ruleset a little easier to play.
It has 3 int64 arguments which are the "other end" of the wormhole.
"""
subtype = 5
substruct = "qqq"
def __init__(self, sequence, \
id, type, name, \
size, \
startx, starty, startz, \
velx, vely, velz, \
contains, \
order_types, \
order_number, \
modify_time, \
endx, endy, endz):
Object.__init__(self, sequence, \
id, type, name, \
size, \
startx, starty, startz, \
velx, vely, velz, \
contains, \
order_types, \
order_number, \
modify_time)
self.length += 8*3
self.start = self.pos
self.end = (endx, endy, endz)
def __str__(self):
output = Object.__str__(self)
output += pack(self.substruct, *self.end)
return output
|
<commit_before><commit_msg>Add a wormhole object as a quick-hack to make risk connections work.
(Pending tp04 being finished.)<commit_after>
|
from xstruct import pack
from objects import Object
class Wormhole(Object):
"""\
The Wormhole is a top level object that links to locations together.
It was added as a quick hack to make the Risk ruleset a little easier to play.
It has 3 int64 arguments which are the "other end" of the wormhole.
"""
subtype = 5
substruct = "qqq"
def __init__(self, sequence, \
id, type, name, \
size, \
startx, starty, startz, \
velx, vely, velz, \
contains, \
order_types, \
order_number, \
modify_time, \
endx, endy, endz):
Object.__init__(self, sequence, \
id, type, name, \
size, \
startx, starty, startz, \
velx, vely, velz, \
contains, \
order_types, \
order_number, \
modify_time)
self.length += 8*3
self.start = self.pos
self.end = (endx, endy, endz)
def __str__(self):
output = Object.__str__(self)
output += pack(self.substruct, *self.end)
return output
|
Add a wormhole object as a quick-hack to make risk connections work.
(Pending tp04 being finished.)
from xstruct import pack
from objects import Object
class Wormhole(Object):
"""\
The Wormhole is a top level object that links to locations together.
It was added as a quick hack to make the Risk ruleset a little easier to play.
It has 3 int64 arguments which are the "other end" of the wormhole.
"""
subtype = 5
substruct = "qqq"
def __init__(self, sequence, \
id, type, name, \
size, \
startx, starty, startz, \
velx, vely, velz, \
contains, \
order_types, \
order_number, \
modify_time, \
endx, endy, endz):
Object.__init__(self, sequence, \
id, type, name, \
size, \
startx, starty, startz, \
velx, vely, velz, \
contains, \
order_types, \
order_number, \
modify_time)
self.length += 8*3
self.start = self.pos
self.end = (endx, endy, endz)
def __str__(self):
output = Object.__str__(self)
output += pack(self.substruct, *self.end)
return output
|
<commit_before><commit_msg>Add a wormhole object as a quick-hack to make risk connections work.
(Pending tp04 being finished.)<commit_after>
from xstruct import pack
from objects import Object
class Wormhole(Object):
"""\
The Wormhole is a top level object that links to locations together.
It was added as a quick hack to make the Risk ruleset a little easier to play.
It has 3 int64 arguments which are the "other end" of the wormhole.
"""
subtype = 5
substruct = "qqq"
def __init__(self, sequence, \
id, type, name, \
size, \
startx, starty, startz, \
velx, vely, velz, \
contains, \
order_types, \
order_number, \
modify_time, \
endx, endy, endz):
Object.__init__(self, sequence, \
id, type, name, \
size, \
startx, starty, startz, \
velx, vely, velz, \
contains, \
order_types, \
order_number, \
modify_time)
self.length += 8*3
self.start = self.pos
self.end = (endx, endy, endz)
def __str__(self):
output = Object.__str__(self)
output += pack(self.substruct, *self.end)
return output
|
|
15fec2dc39dbac26f006171b7a5d0547fb40f254
|
get_google_token.py
|
get_google_token.py
|
#!/usr/bin/env python
import os
import sys
import webbrowser
from oauth2client.client import OAuth2WebServerFlow
def get_credentials(scopes):
flow = OAuth2WebServerFlow(
client_id=os.environ['GOOGLE_CLIENT_ID'],
client_secret=os.environ['GOOGLE_CLIENT_SECRET'],
scope=' '.join(scopes),
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
auth_uri = flow.step1_get_authorize_url()
webbrowser.open(auth_uri)
auth_code = input('Enter the authorization code you receive here: ')
credentials = flow.step2_exchange(auth_code)
return credentials
def main(*scopes):
if not scopes:
sys.stderr.write('You need to specify at least one scope.\n')
sys.exit(1)
credentials = get_credentials(scopes)
refresh_token = credentials.refresh_token
sys.stdout.write('Refresh token: {0}\n'.format(refresh_token))
if __name__ == '__main__':
main(*sys.argv[1:])
|
Add first draft of Google refresh-token script
|
Add first draft of Google refresh-token script
|
Python
|
bsd-2-clause
|
myersjustinc/stitchbot
|
Add first draft of Google refresh-token script
|
#!/usr/bin/env python
import os
import sys
import webbrowser
from oauth2client.client import OAuth2WebServerFlow
def get_credentials(scopes):
flow = OAuth2WebServerFlow(
client_id=os.environ['GOOGLE_CLIENT_ID'],
client_secret=os.environ['GOOGLE_CLIENT_SECRET'],
scope=' '.join(scopes),
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
auth_uri = flow.step1_get_authorize_url()
webbrowser.open(auth_uri)
auth_code = input('Enter the authorization code you receive here: ')
credentials = flow.step2_exchange(auth_code)
return credentials
def main(*scopes):
if not scopes:
sys.stderr.write('You need to specify at least one scope.\n')
sys.exit(1)
credentials = get_credentials(scopes)
refresh_token = credentials.refresh_token
sys.stdout.write('Refresh token: {0}\n'.format(refresh_token))
if __name__ == '__main__':
main(*sys.argv[1:])
|
<commit_before><commit_msg>Add first draft of Google refresh-token script<commit_after>
|
#!/usr/bin/env python
import os
import sys
import webbrowser
from oauth2client.client import OAuth2WebServerFlow
def get_credentials(scopes):
flow = OAuth2WebServerFlow(
client_id=os.environ['GOOGLE_CLIENT_ID'],
client_secret=os.environ['GOOGLE_CLIENT_SECRET'],
scope=' '.join(scopes),
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
auth_uri = flow.step1_get_authorize_url()
webbrowser.open(auth_uri)
auth_code = input('Enter the authorization code you receive here: ')
credentials = flow.step2_exchange(auth_code)
return credentials
def main(*scopes):
if not scopes:
sys.stderr.write('You need to specify at least one scope.\n')
sys.exit(1)
credentials = get_credentials(scopes)
refresh_token = credentials.refresh_token
sys.stdout.write('Refresh token: {0}\n'.format(refresh_token))
if __name__ == '__main__':
main(*sys.argv[1:])
|
Add first draft of Google refresh-token script#!/usr/bin/env python
import os
import sys
import webbrowser
from oauth2client.client import OAuth2WebServerFlow
def get_credentials(scopes):
flow = OAuth2WebServerFlow(
client_id=os.environ['GOOGLE_CLIENT_ID'],
client_secret=os.environ['GOOGLE_CLIENT_SECRET'],
scope=' '.join(scopes),
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
auth_uri = flow.step1_get_authorize_url()
webbrowser.open(auth_uri)
auth_code = input('Enter the authorization code you receive here: ')
credentials = flow.step2_exchange(auth_code)
return credentials
def main(*scopes):
if not scopes:
sys.stderr.write('You need to specify at least one scope.\n')
sys.exit(1)
credentials = get_credentials(scopes)
refresh_token = credentials.refresh_token
sys.stdout.write('Refresh token: {0}\n'.format(refresh_token))
if __name__ == '__main__':
main(*sys.argv[1:])
|
<commit_before><commit_msg>Add first draft of Google refresh-token script<commit_after>#!/usr/bin/env python
import os
import sys
import webbrowser
from oauth2client.client import OAuth2WebServerFlow
def get_credentials(scopes):
flow = OAuth2WebServerFlow(
client_id=os.environ['GOOGLE_CLIENT_ID'],
client_secret=os.environ['GOOGLE_CLIENT_SECRET'],
scope=' '.join(scopes),
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
auth_uri = flow.step1_get_authorize_url()
webbrowser.open(auth_uri)
auth_code = input('Enter the authorization code you receive here: ')
credentials = flow.step2_exchange(auth_code)
return credentials
def main(*scopes):
if not scopes:
sys.stderr.write('You need to specify at least one scope.\n')
sys.exit(1)
credentials = get_credentials(scopes)
refresh_token = credentials.refresh_token
sys.stdout.write('Refresh token: {0}\n'.format(refresh_token))
if __name__ == '__main__':
main(*sys.argv[1:])
|
|
bbfc5549fb632d535ed1934e0d2bd1226ccd4507
|
openquake/commands/webui.py
|
openquake/commands/webui.py
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess
from openquake.risklib import valid
from openquake.commonlib import sap
from openquake.engine import config
from openquake.server.dbserver import get_status
from openquake.commands.dbserver import runserver
def rundjango(subcmd):
subprocess.call([sys.executable, '-m', 'openquake.server.manage', subcmd])
def webui(cmd):
"""
start the webui server in foreground or perform other operation on the
django application
"""
dbstatus = get_status()
if dbstatus == 'not-running':
if valid.boolean(config.get('dbserver', 'multi_user')):
sys.exit('Please start the DbServer: '
'see the documentation for details')
runserver()
if cmd == 'start':
rundjango('runserver')
elif cmd == 'syncdb':
rundjango(cmd)
parser = sap.Parser(webui)
parser.arg('cmd', 'webui command',
choices='start syncdb'.split())
|
Add a command to start the WebUI using oq
|
Add a command to start the WebUI using oq
The django development server is started in foreground
on localhost:8800. Meant to be used with multi_user = false
but works also if true.
|
Python
|
agpl-3.0
|
gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine,gem/oq-engine
|
Add a command to start the WebUI using oq
The django development server is started in foreground
on localhost:8800. Meant to be used with multi_user = false
but works also if true.
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess
from openquake.risklib import valid
from openquake.commonlib import sap
from openquake.engine import config
from openquake.server.dbserver import get_status
from openquake.commands.dbserver import runserver
def rundjango(subcmd):
subprocess.call([sys.executable, '-m', 'openquake.server.manage', subcmd])
def webui(cmd):
"""
start the webui server in foreground or perform other operation on the
django application
"""
dbstatus = get_status()
if dbstatus == 'not-running':
if valid.boolean(config.get('dbserver', 'multi_user')):
sys.exit('Please start the DbServer: '
'see the documentation for details')
runserver()
if cmd == 'start':
rundjango('runserver')
elif cmd == 'syncdb':
rundjango(cmd)
parser = sap.Parser(webui)
parser.arg('cmd', 'webui command',
choices='start syncdb'.split())
|
<commit_before><commit_msg>Add a command to start the WebUI using oq
The django development server is started in foreground
on localhost:8800. Meant to be used with multi_user = false
but works also if true.<commit_after>
|
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess
from openquake.risklib import valid
from openquake.commonlib import sap
from openquake.engine import config
from openquake.server.dbserver import get_status
from openquake.commands.dbserver import runserver
def rundjango(subcmd):
subprocess.call([sys.executable, '-m', 'openquake.server.manage', subcmd])
def webui(cmd):
"""
start the webui server in foreground or perform other operation on the
django application
"""
dbstatus = get_status()
if dbstatus == 'not-running':
if valid.boolean(config.get('dbserver', 'multi_user')):
sys.exit('Please start the DbServer: '
'see the documentation for details')
runserver()
if cmd == 'start':
rundjango('runserver')
elif cmd == 'syncdb':
rundjango(cmd)
parser = sap.Parser(webui)
parser.arg('cmd', 'webui command',
choices='start syncdb'.split())
|
Add a command to start the WebUI using oq
The django development server is started in foreground
on localhost:8800. Meant to be used with multi_user = false
but works also if true.# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess
from openquake.risklib import valid
from openquake.commonlib import sap
from openquake.engine import config
from openquake.server.dbserver import get_status
from openquake.commands.dbserver import runserver
def rundjango(subcmd):
subprocess.call([sys.executable, '-m', 'openquake.server.manage', subcmd])
def webui(cmd):
"""
start the webui server in foreground or perform other operation on the
django application
"""
dbstatus = get_status()
if dbstatus == 'not-running':
if valid.boolean(config.get('dbserver', 'multi_user')):
sys.exit('Please start the DbServer: '
'see the documentation for details')
runserver()
if cmd == 'start':
rundjango('runserver')
elif cmd == 'syncdb':
rundjango(cmd)
parser = sap.Parser(webui)
parser.arg('cmd', 'webui command',
choices='start syncdb'.split())
|
<commit_before><commit_msg>Add a command to start the WebUI using oq
The django development server is started in foreground
on localhost:8800. Meant to be used with multi_user = false
but works also if true.<commit_after># -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2016, GEM Foundation
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import sys
import subprocess
from openquake.risklib import valid
from openquake.commonlib import sap
from openquake.engine import config
from openquake.server.dbserver import get_status
from openquake.commands.dbserver import runserver
def rundjango(subcmd):
subprocess.call([sys.executable, '-m', 'openquake.server.manage', subcmd])
def webui(cmd):
"""
start the webui server in foreground or perform other operation on the
django application
"""
dbstatus = get_status()
if dbstatus == 'not-running':
if valid.boolean(config.get('dbserver', 'multi_user')):
sys.exit('Please start the DbServer: '
'see the documentation for details')
runserver()
if cmd == 'start':
rundjango('runserver')
elif cmd == 'syncdb':
rundjango(cmd)
parser = sap.Parser(webui)
parser.arg('cmd', 'webui command',
choices='start syncdb'.split())
|
|
0433c32cd4212947517e57b6dc24d7d215123df9
|
workers/data_refinery_workers/downloaders/test_utils.py
|
workers/data_refinery_workers/downloaders/test_utils.py
|
import os
from django.test import TestCase, tag
from typing import List
from unittest.mock import patch, call
from urllib.error import URLError
from data_refinery_workers.downloaders import utils
class UtilsTestCase(TestCase):
def test_no_jobs_to_create(self):
"""Make sure this function doesn't raise an exception with no files."""
create_processor_job_for_original_files([])
self.assertTrue(True)
|
Add unit test to make sure no work isn't an error.
|
Add unit test to make sure no work isn't an error.
|
Python
|
bsd-3-clause
|
data-refinery/data_refinery,data-refinery/data_refinery,data-refinery/data_refinery
|
Add unit test to make sure no work isn't an error.
|
import os
from django.test import TestCase, tag
from typing import List
from unittest.mock import patch, call
from urllib.error import URLError
from data_refinery_workers.downloaders import utils
class UtilsTestCase(TestCase):
def test_no_jobs_to_create(self):
"""Make sure this function doesn't raise an exception with no files."""
create_processor_job_for_original_files([])
self.assertTrue(True)
|
<commit_before><commit_msg>Add unit test to make sure no work isn't an error.<commit_after>
|
import os
from django.test import TestCase, tag
from typing import List
from unittest.mock import patch, call
from urllib.error import URLError
from data_refinery_workers.downloaders import utils
class UtilsTestCase(TestCase):
def test_no_jobs_to_create(self):
"""Make sure this function doesn't raise an exception with no files."""
create_processor_job_for_original_files([])
self.assertTrue(True)
|
Add unit test to make sure no work isn't an error.import os
from django.test import TestCase, tag
from typing import List
from unittest.mock import patch, call
from urllib.error import URLError
from data_refinery_workers.downloaders import utils
class UtilsTestCase(TestCase):
def test_no_jobs_to_create(self):
"""Make sure this function doesn't raise an exception with no files."""
create_processor_job_for_original_files([])
self.assertTrue(True)
|
<commit_before><commit_msg>Add unit test to make sure no work isn't an error.<commit_after>import os
from django.test import TestCase, tag
from typing import List
from unittest.mock import patch, call
from urllib.error import URLError
from data_refinery_workers.downloaders import utils
class UtilsTestCase(TestCase):
def test_no_jobs_to_create(self):
"""Make sure this function doesn't raise an exception with no files."""
create_processor_job_for_original_files([])
self.assertTrue(True)
|
|
8f21fcc4611bba391761df517de8dec3c8e53d9a
|
scripts/data_download/rais/create_all_files.py
|
scripts/data_download/rais/create_all_files.py
|
import os
import commands
import time
import logging
import sys
if len(sys.argv) != 3 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! Use:\n python scripts/data_download/rais/create_files.py en/pt output_path\n"
exit()
logging.basicConfig(filename=os.path.abspath(os.path.join(sys.argv[2],str(sys.argv[0].split('/')[2]) + '-all-data-download.log' )),level=logging.DEBUG)
for year in range(2002, 2015):
logging.info("python scripts/data_download/rais/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year) + "\n")
ret = commands.getoutput("python scripts/data_download/rais/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year))
logging.info(str(ret) + "\nYear: " + str(year) + " ok =D\n\n")
|
Add file to create all files to rais.
|
Add file to create all files to rais.
|
Python
|
mit
|
DataViva/dataviva-site,DataViva/dataviva-site,DataViva/dataviva-site,DataViva/dataviva-site
|
Add file to create all files to rais.
|
import os
import commands
import time
import logging
import sys
if len(sys.argv) != 3 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! Use:\n python scripts/data_download/rais/create_files.py en/pt output_path\n"
exit()
logging.basicConfig(filename=os.path.abspath(os.path.join(sys.argv[2],str(sys.argv[0].split('/')[2]) + '-all-data-download.log' )),level=logging.DEBUG)
for year in range(2002, 2015):
logging.info("python scripts/data_download/rais/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year) + "\n")
ret = commands.getoutput("python scripts/data_download/rais/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year))
logging.info(str(ret) + "\nYear: " + str(year) + " ok =D\n\n")
|
<commit_before><commit_msg>Add file to create all files to rais.<commit_after>
|
import os
import commands
import time
import logging
import sys
if len(sys.argv) != 3 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! Use:\n python scripts/data_download/rais/create_files.py en/pt output_path\n"
exit()
logging.basicConfig(filename=os.path.abspath(os.path.join(sys.argv[2],str(sys.argv[0].split('/')[2]) + '-all-data-download.log' )),level=logging.DEBUG)
for year in range(2002, 2015):
logging.info("python scripts/data_download/rais/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year) + "\n")
ret = commands.getoutput("python scripts/data_download/rais/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year))
logging.info(str(ret) + "\nYear: " + str(year) + " ok =D\n\n")
|
Add file to create all files to rais.import os
import commands
import time
import logging
import sys
if len(sys.argv) != 3 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! Use:\n python scripts/data_download/rais/create_files.py en/pt output_path\n"
exit()
logging.basicConfig(filename=os.path.abspath(os.path.join(sys.argv[2],str(sys.argv[0].split('/')[2]) + '-all-data-download.log' )),level=logging.DEBUG)
for year in range(2002, 2015):
logging.info("python scripts/data_download/rais/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year) + "\n")
ret = commands.getoutput("python scripts/data_download/rais/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year))
logging.info(str(ret) + "\nYear: " + str(year) + " ok =D\n\n")
|
<commit_before><commit_msg>Add file to create all files to rais.<commit_after>import os
import commands
import time
import logging
import sys
if len(sys.argv) != 3 or (sys.argv[1:][0] not in ['pt', 'en']):
print "ERROR! Use:\n python scripts/data_download/rais/create_files.py en/pt output_path\n"
exit()
logging.basicConfig(filename=os.path.abspath(os.path.join(sys.argv[2],str(sys.argv[0].split('/')[2]) + '-all-data-download.log' )),level=logging.DEBUG)
for year in range(2002, 2015):
logging.info("python scripts/data_download/rais/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year) + "\n")
ret = commands.getoutput("python scripts/data_download/rais/create_files.py "+str(sys.argv[1])+" "+str(sys.argv[2])+" "+ str(year))
logging.info(str(ret) + "\nYear: " + str(year) + " ok =D\n\n")
|
|
177a1fdb394eee4a41a3667b0f138a1f2d8b59ca
|
viewer_examples/plugins/probabilistic_hough.py
|
viewer_examples/plugins/probabilistic_hough.py
|
import numpy as np
from skimage import data
from skimage import draw
from skimage.transform import probabilistic_hough_line
from skimage.viewer import ImageViewer
from skimage.viewer.widgets import Slider
from skimage.viewer.plugins.overlayplugin import OverlayPlugin
from skimage.viewer.plugins.canny import CannyPlugin
def line_image(shape, lines):
image = np.zeros(shape, dtype=bool)
for end_points in lines:
# hough lines returns (x, y) points, draw.line wants (row, columns)
end_points = np.asarray(end_points)[:, ::-1]
image[draw.line(*np.ravel(end_points))] = 1
return image
def hough_lines(image, *args, **kwargs):
# Set threshold to 0.5 since we're working with a binary image (from canny)
lines = probabilistic_hough_line(image, threshold=0.5, *args, **kwargs)
image = line_image(image.shape, lines)
return image
image = data.camera()
canny_viewer = ImageViewer(image)
canny_plugin = CannyPlugin()
canny_viewer += canny_plugin
hough_plugin = OverlayPlugin(image_filter=hough_lines)
hough_plugin += Slider('line length', 0, 100, update_on='release')
hough_plugin += Slider('line gap', 0, 20, update_on='release')
# Passing a plugin to a viewer connects the output of the plugin to the viewer.
hough_viewer = ImageViewer(canny_plugin)
hough_viewer += hough_plugin
# Show viewers displays both viewers since `hough_viewer` is connected to
# `canny_viewer` through `canny_plugin`
canny_viewer.show()
|
Add example of connected viewers/plugins
|
Add example of connected viewers/plugins
|
Python
|
bsd-3-clause
|
chintak/scikit-image,blink1073/scikit-image,michaelpacer/scikit-image,rjeli/scikit-image,dpshelio/scikit-image,michaelaye/scikit-image,oew1v07/scikit-image,almarklein/scikit-image,paalge/scikit-image,robintw/scikit-image,bsipocz/scikit-image,ClinicalGraphics/scikit-image,SamHames/scikit-image,chintak/scikit-image,jwiggins/scikit-image,SamHames/scikit-image,warmspringwinds/scikit-image,juliusbierk/scikit-image,ofgulban/scikit-image,SamHames/scikit-image,ajaybhat/scikit-image,Midafi/scikit-image,dpshelio/scikit-image,WarrenWeckesser/scikits-image,emon10005/scikit-image,youprofit/scikit-image,Hiyorimi/scikit-image,chintak/scikit-image,bsipocz/scikit-image,WarrenWeckesser/scikits-image,SamHames/scikit-image,oew1v07/scikit-image,Hiyorimi/scikit-image,ajaybhat/scikit-image,chriscrosscutler/scikit-image,jwiggins/scikit-image,paalge/scikit-image,robintw/scikit-image,bennlich/scikit-image,newville/scikit-image,almarklein/scikit-image,michaelaye/scikit-image,pratapvardhan/scikit-image,emon10005/scikit-image,vighneshbirodkar/scikit-image,Britefury/scikit-image,vighneshbirodkar/scikit-image,juliusbierk/scikit-image,paalge/scikit-image,Midafi/scikit-image,almarklein/scikit-image,michaelpacer/scikit-image,chintak/scikit-image,chriscrosscutler/scikit-image,ClinicalGraphics/scikit-image,newville/scikit-image,keflavich/scikit-image,rjeli/scikit-image,GaZ3ll3/scikit-image,pratapvardhan/scikit-image,warmspringwinds/scikit-image,ofgulban/scikit-image,almarklein/scikit-image,GaZ3ll3/scikit-image,bennlich/scikit-image,youprofit/scikit-image,Britefury/scikit-image,rjeli/scikit-image,blink1073/scikit-image,vighneshbirodkar/scikit-image,keflavich/scikit-image,ofgulban/scikit-image
|
Add example of connected viewers/plugins
|
import numpy as np
from skimage import data
from skimage import draw
from skimage.transform import probabilistic_hough_line
from skimage.viewer import ImageViewer
from skimage.viewer.widgets import Slider
from skimage.viewer.plugins.overlayplugin import OverlayPlugin
from skimage.viewer.plugins.canny import CannyPlugin
def line_image(shape, lines):
image = np.zeros(shape, dtype=bool)
for end_points in lines:
# hough lines returns (x, y) points, draw.line wants (row, columns)
end_points = np.asarray(end_points)[:, ::-1]
image[draw.line(*np.ravel(end_points))] = 1
return image
def hough_lines(image, *args, **kwargs):
# Set threshold to 0.5 since we're working with a binary image (from canny)
lines = probabilistic_hough_line(image, threshold=0.5, *args, **kwargs)
image = line_image(image.shape, lines)
return image
image = data.camera()
canny_viewer = ImageViewer(image)
canny_plugin = CannyPlugin()
canny_viewer += canny_plugin
hough_plugin = OverlayPlugin(image_filter=hough_lines)
hough_plugin += Slider('line length', 0, 100, update_on='release')
hough_plugin += Slider('line gap', 0, 20, update_on='release')
# Passing a plugin to a viewer connects the output of the plugin to the viewer.
hough_viewer = ImageViewer(canny_plugin)
hough_viewer += hough_plugin
# Show viewers displays both viewers since `hough_viewer` is connected to
# `canny_viewer` through `canny_plugin`
canny_viewer.show()
|
<commit_before><commit_msg>Add example of connected viewers/plugins<commit_after>
|
import numpy as np
from skimage import data
from skimage import draw
from skimage.transform import probabilistic_hough_line
from skimage.viewer import ImageViewer
from skimage.viewer.widgets import Slider
from skimage.viewer.plugins.overlayplugin import OverlayPlugin
from skimage.viewer.plugins.canny import CannyPlugin
def line_image(shape, lines):
image = np.zeros(shape, dtype=bool)
for end_points in lines:
# hough lines returns (x, y) points, draw.line wants (row, columns)
end_points = np.asarray(end_points)[:, ::-1]
image[draw.line(*np.ravel(end_points))] = 1
return image
def hough_lines(image, *args, **kwargs):
# Set threshold to 0.5 since we're working with a binary image (from canny)
lines = probabilistic_hough_line(image, threshold=0.5, *args, **kwargs)
image = line_image(image.shape, lines)
return image
image = data.camera()
canny_viewer = ImageViewer(image)
canny_plugin = CannyPlugin()
canny_viewer += canny_plugin
hough_plugin = OverlayPlugin(image_filter=hough_lines)
hough_plugin += Slider('line length', 0, 100, update_on='release')
hough_plugin += Slider('line gap', 0, 20, update_on='release')
# Passing a plugin to a viewer connects the output of the plugin to the viewer.
hough_viewer = ImageViewer(canny_plugin)
hough_viewer += hough_plugin
# Show viewers displays both viewers since `hough_viewer` is connected to
# `canny_viewer` through `canny_plugin`
canny_viewer.show()
|
Add example of connected viewers/pluginsimport numpy as np
from skimage import data
from skimage import draw
from skimage.transform import probabilistic_hough_line
from skimage.viewer import ImageViewer
from skimage.viewer.widgets import Slider
from skimage.viewer.plugins.overlayplugin import OverlayPlugin
from skimage.viewer.plugins.canny import CannyPlugin
def line_image(shape, lines):
image = np.zeros(shape, dtype=bool)
for end_points in lines:
# hough lines returns (x, y) points, draw.line wants (row, columns)
end_points = np.asarray(end_points)[:, ::-1]
image[draw.line(*np.ravel(end_points))] = 1
return image
def hough_lines(image, *args, **kwargs):
# Set threshold to 0.5 since we're working with a binary image (from canny)
lines = probabilistic_hough_line(image, threshold=0.5, *args, **kwargs)
image = line_image(image.shape, lines)
return image
image = data.camera()
canny_viewer = ImageViewer(image)
canny_plugin = CannyPlugin()
canny_viewer += canny_plugin
hough_plugin = OverlayPlugin(image_filter=hough_lines)
hough_plugin += Slider('line length', 0, 100, update_on='release')
hough_plugin += Slider('line gap', 0, 20, update_on='release')
# Passing a plugin to a viewer connects the output of the plugin to the viewer.
hough_viewer = ImageViewer(canny_plugin)
hough_viewer += hough_plugin
# Show viewers displays both viewers since `hough_viewer` is connected to
# `canny_viewer` through `canny_plugin`
canny_viewer.show()
|
<commit_before><commit_msg>Add example of connected viewers/plugins<commit_after>import numpy as np
from skimage import data
from skimage import draw
from skimage.transform import probabilistic_hough_line
from skimage.viewer import ImageViewer
from skimage.viewer.widgets import Slider
from skimage.viewer.plugins.overlayplugin import OverlayPlugin
from skimage.viewer.plugins.canny import CannyPlugin
def line_image(shape, lines):
image = np.zeros(shape, dtype=bool)
for end_points in lines:
# hough lines returns (x, y) points, draw.line wants (row, columns)
end_points = np.asarray(end_points)[:, ::-1]
image[draw.line(*np.ravel(end_points))] = 1
return image
def hough_lines(image, *args, **kwargs):
# Set threshold to 0.5 since we're working with a binary image (from canny)
lines = probabilistic_hough_line(image, threshold=0.5, *args, **kwargs)
image = line_image(image.shape, lines)
return image
image = data.camera()
canny_viewer = ImageViewer(image)
canny_plugin = CannyPlugin()
canny_viewer += canny_plugin
hough_plugin = OverlayPlugin(image_filter=hough_lines)
hough_plugin += Slider('line length', 0, 100, update_on='release')
hough_plugin += Slider('line gap', 0, 20, update_on='release')
# Passing a plugin to a viewer connects the output of the plugin to the viewer.
hough_viewer = ImageViewer(canny_plugin)
hough_viewer += hough_plugin
# Show viewers displays both viewers since `hough_viewer` is connected to
# `canny_viewer` through `canny_plugin`
canny_viewer.show()
|
|
47410ebd3ae30cc22df106d233e2184d417c2b42
|
cerbero/tools/depstracker.py
|
cerbero/tools/depstracker.py
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2013 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.config import Platform
from cerbero.utils import shell
class ObjdumpLister():
def list_deps():
pass
class OtoolLister():
def list_deps(path):
pass
class LddLister():
def list_deps(self, prefix, path):
files = shell.check_call('ldd %s' % path).split('\n')
return [x.split(' ')[2] for x in files if prefix in x]
class DepsTracker():
BACKENDS = {
Platform.WINDOWS: ObjdumpLister,
Platform.LINUX: LddLister,
Platform.DARWIN: OtoolLister}
def __init__(self, platform, prefix):
self.libs_deps = {}
self.prefix = prefix
if self.prefix[:-1] != '/':
self.prefix += '/'
self.lister = self.BACKENDS[platform]()
def list_deps(self, path):
deps = self.lister.list_deps(self.prefix, path)
rdeps = []
for d in deps:
if os.path.islink(d):
rdeps.append(os.path.realpath(d))
return [x.replace(self.prefix, '') for x in deps + rdeps]
|
Add a new tool to list libraries deps
|
Add a new tool to list libraries deps
|
Python
|
lgpl-2.1
|
freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,BigBrother-International/gst-cerbero,BigBrother-International/gst-cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,BigBrother-International/gst-cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,BigBrother-International/gst-cerbero,freedesktop-unofficial-mirror/gstreamer-sdk__cerbero,BigBrother-International/gst-cerbero
|
Add a new tool to list libraries deps
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2013 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.config import Platform
from cerbero.utils import shell
class ObjdumpLister():
def list_deps():
pass
class OtoolLister():
def list_deps(path):
pass
class LddLister():
def list_deps(self, prefix, path):
files = shell.check_call('ldd %s' % path).split('\n')
return [x.split(' ')[2] for x in files if prefix in x]
class DepsTracker():
BACKENDS = {
Platform.WINDOWS: ObjdumpLister,
Platform.LINUX: LddLister,
Platform.DARWIN: OtoolLister}
def __init__(self, platform, prefix):
self.libs_deps = {}
self.prefix = prefix
if self.prefix[:-1] != '/':
self.prefix += '/'
self.lister = self.BACKENDS[platform]()
def list_deps(self, path):
deps = self.lister.list_deps(self.prefix, path)
rdeps = []
for d in deps:
if os.path.islink(d):
rdeps.append(os.path.realpath(d))
return [x.replace(self.prefix, '') for x in deps + rdeps]
|
<commit_before><commit_msg>Add a new tool to list libraries deps<commit_after>
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2013 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.config import Platform
from cerbero.utils import shell
class ObjdumpLister():
def list_deps():
pass
class OtoolLister():
def list_deps(path):
pass
class LddLister():
def list_deps(self, prefix, path):
files = shell.check_call('ldd %s' % path).split('\n')
return [x.split(' ')[2] for x in files if prefix in x]
class DepsTracker():
BACKENDS = {
Platform.WINDOWS: ObjdumpLister,
Platform.LINUX: LddLister,
Platform.DARWIN: OtoolLister}
def __init__(self, platform, prefix):
self.libs_deps = {}
self.prefix = prefix
if self.prefix[:-1] != '/':
self.prefix += '/'
self.lister = self.BACKENDS[platform]()
def list_deps(self, path):
deps = self.lister.list_deps(self.prefix, path)
rdeps = []
for d in deps:
if os.path.islink(d):
rdeps.append(os.path.realpath(d))
return [x.replace(self.prefix, '') for x in deps + rdeps]
|
Add a new tool to list libraries deps# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2013 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.config import Platform
from cerbero.utils import shell
class ObjdumpLister():
def list_deps():
pass
class OtoolLister():
def list_deps(path):
pass
class LddLister():
def list_deps(self, prefix, path):
files = shell.check_call('ldd %s' % path).split('\n')
return [x.split(' ')[2] for x in files if prefix in x]
class DepsTracker():
BACKENDS = {
Platform.WINDOWS: ObjdumpLister,
Platform.LINUX: LddLister,
Platform.DARWIN: OtoolLister}
def __init__(self, platform, prefix):
self.libs_deps = {}
self.prefix = prefix
if self.prefix[:-1] != '/':
self.prefix += '/'
self.lister = self.BACKENDS[platform]()
def list_deps(self, path):
deps = self.lister.list_deps(self.prefix, path)
rdeps = []
for d in deps:
if os.path.islink(d):
rdeps.append(os.path.realpath(d))
return [x.replace(self.prefix, '') for x in deps + rdeps]
|
<commit_before><commit_msg>Add a new tool to list libraries deps<commit_after># cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2013 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.config import Platform
from cerbero.utils import shell
class ObjdumpLister():
def list_deps():
pass
class OtoolLister():
def list_deps(path):
pass
class LddLister():
def list_deps(self, prefix, path):
files = shell.check_call('ldd %s' % path).split('\n')
return [x.split(' ')[2] for x in files if prefix in x]
class DepsTracker():
BACKENDS = {
Platform.WINDOWS: ObjdumpLister,
Platform.LINUX: LddLister,
Platform.DARWIN: OtoolLister}
def __init__(self, platform, prefix):
self.libs_deps = {}
self.prefix = prefix
if self.prefix[:-1] != '/':
self.prefix += '/'
self.lister = self.BACKENDS[platform]()
def list_deps(self, path):
deps = self.lister.list_deps(self.prefix, path)
rdeps = []
for d in deps:
if os.path.islink(d):
rdeps.append(os.path.realpath(d))
return [x.replace(self.prefix, '') for x in deps + rdeps]
|
|
6038c707fe91a4cbf54a364c12f7086b19505f8b
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import setup
setup(
name='datacube-experiments',
description='Experimental Datacube v2 Ingestor',
version='0.0.1',
packages=['ingestor'],
url='http://github.com/omad/datacube-experiments',
install_requires=[
'click',
'eodatasets',
'gdal',
'pathlib',
'pyyaml'
],
entry_points='''
[console_scripts]
datacube_ingest=ingestor.ingest_from_yaml:main
''',
)
|
Allow package to be installed with command line ingestor tool
|
Allow package to be installed with command line ingestor tool
|
Python
|
bsd-3-clause
|
omad/datacube-experiments
|
Allow package to be installed with command line ingestor tool
|
#!/usr/bin/env python
from setuptools import setup
setup(
name='datacube-experiments',
description='Experimental Datacube v2 Ingestor',
version='0.0.1',
packages=['ingestor'],
url='http://github.com/omad/datacube-experiments',
install_requires=[
'click',
'eodatasets',
'gdal',
'pathlib',
'pyyaml'
],
entry_points='''
[console_scripts]
datacube_ingest=ingestor.ingest_from_yaml:main
''',
)
|
<commit_before><commit_msg>Allow package to be installed with command line ingestor tool<commit_after>
|
#!/usr/bin/env python
from setuptools import setup
setup(
name='datacube-experiments',
description='Experimental Datacube v2 Ingestor',
version='0.0.1',
packages=['ingestor'],
url='http://github.com/omad/datacube-experiments',
install_requires=[
'click',
'eodatasets',
'gdal',
'pathlib',
'pyyaml'
],
entry_points='''
[console_scripts]
datacube_ingest=ingestor.ingest_from_yaml:main
''',
)
|
Allow package to be installed with command line ingestor tool#!/usr/bin/env python
from setuptools import setup
setup(
name='datacube-experiments',
description='Experimental Datacube v2 Ingestor',
version='0.0.1',
packages=['ingestor'],
url='http://github.com/omad/datacube-experiments',
install_requires=[
'click',
'eodatasets',
'gdal',
'pathlib',
'pyyaml'
],
entry_points='''
[console_scripts]
datacube_ingest=ingestor.ingest_from_yaml:main
''',
)
|
<commit_before><commit_msg>Allow package to be installed with command line ingestor tool<commit_after>#!/usr/bin/env python
from setuptools import setup
setup(
name='datacube-experiments',
description='Experimental Datacube v2 Ingestor',
version='0.0.1',
packages=['ingestor'],
url='http://github.com/omad/datacube-experiments',
install_requires=[
'click',
'eodatasets',
'gdal',
'pathlib',
'pyyaml'
],
entry_points='''
[console_scripts]
datacube_ingest=ingestor.ingest_from_yaml:main
''',
)
|
|
87e8d128fcd944c265c06bc82d8947fcdbb2c360
|
pyconcz_2016/team/models.py
|
pyconcz_2016/team/models.py
|
from django.db import models
class Organizer(models.Model):
full_name = models.CharField(max_length=200)
email = models.EmailField(
default='', blank=True,
help_text="This is private")
twitter = models.CharField(max_length=255, blank=True)
github = models.CharField(max_length=255, blank=True)
photo = models.ImageField(upload_to='team/pyconcz2016/')
published = models.BooleanField(default=False)
|
from django.db import models
class Organizer(models.Model):
full_name = models.CharField(max_length=200)
email = models.EmailField(
default='', blank=True,
help_text="This is private")
twitter = models.CharField(max_length=255, blank=True)
github = models.CharField(max_length=255, blank=True)
photo = models.ImageField(upload_to='team/pyconcz2016/')
published = models.BooleanField(default=False)
def __str__(self):
return self.full_name
|
Revert "Revert "Add string representation of organizer object""
|
Revert "Revert "Add string representation of organizer object""
This reverts commit 36aff772ba6720acb8f629e89954162f98a932e3.
|
Python
|
mit
|
benabraham/cz.pycon.org-2017,pyvec/cz.pycon.org-2017,pyvec/cz.pycon.org-2016,benabraham/cz.pycon.org-2017,benabraham/cz.pycon.org-2017,pyvec/cz.pycon.org-2016,pyvec/cz.pycon.org-2016,pyvec/cz.pycon.org-2017,pyvec/cz.pycon.org-2017
|
from django.db import models
class Organizer(models.Model):
full_name = models.CharField(max_length=200)
email = models.EmailField(
default='', blank=True,
help_text="This is private")
twitter = models.CharField(max_length=255, blank=True)
github = models.CharField(max_length=255, blank=True)
photo = models.ImageField(upload_to='team/pyconcz2016/')
published = models.BooleanField(default=False)
Revert "Revert "Add string representation of organizer object""
This reverts commit 36aff772ba6720acb8f629e89954162f98a932e3.
|
from django.db import models
class Organizer(models.Model):
full_name = models.CharField(max_length=200)
email = models.EmailField(
default='', blank=True,
help_text="This is private")
twitter = models.CharField(max_length=255, blank=True)
github = models.CharField(max_length=255, blank=True)
photo = models.ImageField(upload_to='team/pyconcz2016/')
published = models.BooleanField(default=False)
def __str__(self):
return self.full_name
|
<commit_before>from django.db import models
class Organizer(models.Model):
full_name = models.CharField(max_length=200)
email = models.EmailField(
default='', blank=True,
help_text="This is private")
twitter = models.CharField(max_length=255, blank=True)
github = models.CharField(max_length=255, blank=True)
photo = models.ImageField(upload_to='team/pyconcz2016/')
published = models.BooleanField(default=False)
<commit_msg>Revert "Revert "Add string representation of organizer object""
This reverts commit 36aff772ba6720acb8f629e89954162f98a932e3.<commit_after>
|
from django.db import models
class Organizer(models.Model):
full_name = models.CharField(max_length=200)
email = models.EmailField(
default='', blank=True,
help_text="This is private")
twitter = models.CharField(max_length=255, blank=True)
github = models.CharField(max_length=255, blank=True)
photo = models.ImageField(upload_to='team/pyconcz2016/')
published = models.BooleanField(default=False)
def __str__(self):
return self.full_name
|
from django.db import models
class Organizer(models.Model):
full_name = models.CharField(max_length=200)
email = models.EmailField(
default='', blank=True,
help_text="This is private")
twitter = models.CharField(max_length=255, blank=True)
github = models.CharField(max_length=255, blank=True)
photo = models.ImageField(upload_to='team/pyconcz2016/')
published = models.BooleanField(default=False)
Revert "Revert "Add string representation of organizer object""
This reverts commit 36aff772ba6720acb8f629e89954162f98a932e3.from django.db import models
class Organizer(models.Model):
full_name = models.CharField(max_length=200)
email = models.EmailField(
default='', blank=True,
help_text="This is private")
twitter = models.CharField(max_length=255, blank=True)
github = models.CharField(max_length=255, blank=True)
photo = models.ImageField(upload_to='team/pyconcz2016/')
published = models.BooleanField(default=False)
def __str__(self):
return self.full_name
|
<commit_before>from django.db import models
class Organizer(models.Model):
full_name = models.CharField(max_length=200)
email = models.EmailField(
default='', blank=True,
help_text="This is private")
twitter = models.CharField(max_length=255, blank=True)
github = models.CharField(max_length=255, blank=True)
photo = models.ImageField(upload_to='team/pyconcz2016/')
published = models.BooleanField(default=False)
<commit_msg>Revert "Revert "Add string representation of organizer object""
This reverts commit 36aff772ba6720acb8f629e89954162f98a932e3.<commit_after>from django.db import models
class Organizer(models.Model):
full_name = models.CharField(max_length=200)
email = models.EmailField(
default='', blank=True,
help_text="This is private")
twitter = models.CharField(max_length=255, blank=True)
github = models.CharField(max_length=255, blank=True)
photo = models.ImageField(upload_to='team/pyconcz2016/')
published = models.BooleanField(default=False)
def __str__(self):
return self.full_name
|
e9fe49f04c23580755f3c828e01fcdd4ddb9385f
|
intern/migrations/0038_auto_20190525_2221.py
|
intern/migrations/0038_auto_20190525_2221.py
|
# Generated by Django 2.2.1 on 2019-05-25 20:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('intern', '0037_auto_20190525_2142'),
]
operations = [
migrations.AlterField(
model_name='date',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='Erstellt am'),
),
migrations.AlterField(
model_name='state',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='Erstellt am'),
),
]
|
Add auto now fields to intern models
|
Add auto now fields to intern models
|
Python
|
mit
|
n2o/dpb,n2o/dpb,n2o/dpb,n2o/dpb,n2o/dpb,n2o/dpb,n2o/dpb,n2o/dpb
|
Add auto now fields to intern models
|
# Generated by Django 2.2.1 on 2019-05-25 20:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('intern', '0037_auto_20190525_2142'),
]
operations = [
migrations.AlterField(
model_name='date',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='Erstellt am'),
),
migrations.AlterField(
model_name='state',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='Erstellt am'),
),
]
|
<commit_before><commit_msg>Add auto now fields to intern models<commit_after>
|
# Generated by Django 2.2.1 on 2019-05-25 20:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('intern', '0037_auto_20190525_2142'),
]
operations = [
migrations.AlterField(
model_name='date',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='Erstellt am'),
),
migrations.AlterField(
model_name='state',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='Erstellt am'),
),
]
|
Add auto now fields to intern models# Generated by Django 2.2.1 on 2019-05-25 20:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('intern', '0037_auto_20190525_2142'),
]
operations = [
migrations.AlterField(
model_name='date',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='Erstellt am'),
),
migrations.AlterField(
model_name='state',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='Erstellt am'),
),
]
|
<commit_before><commit_msg>Add auto now fields to intern models<commit_after># Generated by Django 2.2.1 on 2019-05-25 20:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('intern', '0037_auto_20190525_2142'),
]
operations = [
migrations.AlterField(
model_name='date',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='Erstellt am'),
),
migrations.AlterField(
model_name='state',
name='created',
field=models.DateTimeField(auto_now_add=True, verbose_name='Erstellt am'),
),
]
|
|
c9ddb5075e5157ddecaea5b1a975700faabdfb4b
|
tests/api/test_committee_meeting_attendance_excel_export.py
|
tests/api/test_committee_meeting_attendance_excel_export.py
|
from tests import PMGTestCase
from tests.fixtures import dbfixture, CommitteeMeetingData
class TestCommitteeMeetingAttendanceExcelExport(PMGTestCase):
def setUp(self):
super(TestCommitteeMeetingAttendanceExcelExport, self).setUp()
self.fx = dbfixture.data(CommitteeMeetingData)
self.fx.setup()
def tearDown(self):
self.fx.teardown()
super(TestCommitteeMeetingAttendanceExcelExport, self).tearDown()
def test_get_committee_meeting_attendance_xlsx_smoke_test(self):
res = self.client.get("committee-meeting-attendance/data.xlsx",
base_url="http://api.pmg.test:5000/",
)
self.assertEqual(200, res.status_code)
|
Add smoke test for attendance excel export
|
Add smoke test for attendance excel export
|
Python
|
apache-2.0
|
Code4SA/pmg-cms-2,Code4SA/pmg-cms-2,Code4SA/pmg-cms-2
|
Add smoke test for attendance excel export
|
from tests import PMGTestCase
from tests.fixtures import dbfixture, CommitteeMeetingData
class TestCommitteeMeetingAttendanceExcelExport(PMGTestCase):
def setUp(self):
super(TestCommitteeMeetingAttendanceExcelExport, self).setUp()
self.fx = dbfixture.data(CommitteeMeetingData)
self.fx.setup()
def tearDown(self):
self.fx.teardown()
super(TestCommitteeMeetingAttendanceExcelExport, self).tearDown()
def test_get_committee_meeting_attendance_xlsx_smoke_test(self):
res = self.client.get("committee-meeting-attendance/data.xlsx",
base_url="http://api.pmg.test:5000/",
)
self.assertEqual(200, res.status_code)
|
<commit_before><commit_msg>Add smoke test for attendance excel export<commit_after>
|
from tests import PMGTestCase
from tests.fixtures import dbfixture, CommitteeMeetingData
class TestCommitteeMeetingAttendanceExcelExport(PMGTestCase):
def setUp(self):
super(TestCommitteeMeetingAttendanceExcelExport, self).setUp()
self.fx = dbfixture.data(CommitteeMeetingData)
self.fx.setup()
def tearDown(self):
self.fx.teardown()
super(TestCommitteeMeetingAttendanceExcelExport, self).tearDown()
def test_get_committee_meeting_attendance_xlsx_smoke_test(self):
res = self.client.get("committee-meeting-attendance/data.xlsx",
base_url="http://api.pmg.test:5000/",
)
self.assertEqual(200, res.status_code)
|
Add smoke test for attendance excel exportfrom tests import PMGTestCase
from tests.fixtures import dbfixture, CommitteeMeetingData
class TestCommitteeMeetingAttendanceExcelExport(PMGTestCase):
def setUp(self):
super(TestCommitteeMeetingAttendanceExcelExport, self).setUp()
self.fx = dbfixture.data(CommitteeMeetingData)
self.fx.setup()
def tearDown(self):
self.fx.teardown()
super(TestCommitteeMeetingAttendanceExcelExport, self).tearDown()
def test_get_committee_meeting_attendance_xlsx_smoke_test(self):
res = self.client.get("committee-meeting-attendance/data.xlsx",
base_url="http://api.pmg.test:5000/",
)
self.assertEqual(200, res.status_code)
|
<commit_before><commit_msg>Add smoke test for attendance excel export<commit_after>from tests import PMGTestCase
from tests.fixtures import dbfixture, CommitteeMeetingData
class TestCommitteeMeetingAttendanceExcelExport(PMGTestCase):
def setUp(self):
super(TestCommitteeMeetingAttendanceExcelExport, self).setUp()
self.fx = dbfixture.data(CommitteeMeetingData)
self.fx.setup()
def tearDown(self):
self.fx.teardown()
super(TestCommitteeMeetingAttendanceExcelExport, self).tearDown()
def test_get_committee_meeting_attendance_xlsx_smoke_test(self):
res = self.client.get("committee-meeting-attendance/data.xlsx",
base_url="http://api.pmg.test:5000/",
)
self.assertEqual(200, res.status_code)
|
|
81b1cf6973dde3ca23bbe5ac071d3decad81079a
|
pydsa/sleep_sort.py
|
pydsa/sleep_sort.py
|
from time import sleep
from threading import Timer
# Sleep Sort ;)
# Complexity: O(max(input)+n)
def sleep_sort(a):
"""
Sorts the list 'a' using Sleep sort algorithm
>>> from pydsa import sleep_sort
>>> a = [3, 4, 2]
>>> sleep_sort(a)
[2, 3, 4]
"""
sleep_sort.result = []
def add1(x):
sleep_sort.result.append(x)
mx = a[0]
for v in a:
if mx < v: mx = v
Timer(v, add1, [v]).start()
sleep(mx + 1)
return sleep_sort.result
|
from time import sleep
from threading import Timer
# Sleep Sort ;)
# Complexity: O(max(input)+n)
def sleep_sort(a):
"""
Sorts the list 'a' using Sleep sort algorithm
>>> from pydsa import sleep_sort
>>> a = [3, 4, 2]
>>> sleep_sort(a)
[2, 3, 4]
"""
sleep_sort.result = []
def add1(x):
sleep_sort.result.append(x)
mx = a[0]
for v in a:
if mx < v:
mx = v
Timer(v, add1, [v]).start()
sleep(mx + 1)
return sleep_sort.result
|
Format code according to PEP8
|
Format code according to PEP8
|
Python
|
bsd-3-clause
|
rehassachdeva/pydsa,aktech/pydsa
|
from time import sleep
from threading import Timer
# Sleep Sort ;)
# Complexity: O(max(input)+n)
def sleep_sort(a):
"""
Sorts the list 'a' using Sleep sort algorithm
>>> from pydsa import sleep_sort
>>> a = [3, 4, 2]
>>> sleep_sort(a)
[2, 3, 4]
"""
sleep_sort.result = []
def add1(x):
sleep_sort.result.append(x)
mx = a[0]
for v in a:
if mx < v: mx = v
Timer(v, add1, [v]).start()
sleep(mx + 1)
return sleep_sort.result
Format code according to PEP8
|
from time import sleep
from threading import Timer
# Sleep Sort ;)
# Complexity: O(max(input)+n)
def sleep_sort(a):
"""
Sorts the list 'a' using Sleep sort algorithm
>>> from pydsa import sleep_sort
>>> a = [3, 4, 2]
>>> sleep_sort(a)
[2, 3, 4]
"""
sleep_sort.result = []
def add1(x):
sleep_sort.result.append(x)
mx = a[0]
for v in a:
if mx < v:
mx = v
Timer(v, add1, [v]).start()
sleep(mx + 1)
return sleep_sort.result
|
<commit_before>from time import sleep
from threading import Timer
# Sleep Sort ;)
# Complexity: O(max(input)+n)
def sleep_sort(a):
"""
Sorts the list 'a' using Sleep sort algorithm
>>> from pydsa import sleep_sort
>>> a = [3, 4, 2]
>>> sleep_sort(a)
[2, 3, 4]
"""
sleep_sort.result = []
def add1(x):
sleep_sort.result.append(x)
mx = a[0]
for v in a:
if mx < v: mx = v
Timer(v, add1, [v]).start()
sleep(mx + 1)
return sleep_sort.result
<commit_msg>Format code according to PEP8<commit_after>
|
from time import sleep
from threading import Timer
# Sleep Sort ;)
# Complexity: O(max(input)+n)
def sleep_sort(a):
"""
Sorts the list 'a' using Sleep sort algorithm
>>> from pydsa import sleep_sort
>>> a = [3, 4, 2]
>>> sleep_sort(a)
[2, 3, 4]
"""
sleep_sort.result = []
def add1(x):
sleep_sort.result.append(x)
mx = a[0]
for v in a:
if mx < v:
mx = v
Timer(v, add1, [v]).start()
sleep(mx + 1)
return sleep_sort.result
|
from time import sleep
from threading import Timer
# Sleep Sort ;)
# Complexity: O(max(input)+n)
def sleep_sort(a):
"""
Sorts the list 'a' using Sleep sort algorithm
>>> from pydsa import sleep_sort
>>> a = [3, 4, 2]
>>> sleep_sort(a)
[2, 3, 4]
"""
sleep_sort.result = []
def add1(x):
sleep_sort.result.append(x)
mx = a[0]
for v in a:
if mx < v: mx = v
Timer(v, add1, [v]).start()
sleep(mx + 1)
return sleep_sort.result
Format code according to PEP8from time import sleep
from threading import Timer
# Sleep Sort ;)
# Complexity: O(max(input)+n)
def sleep_sort(a):
"""
Sorts the list 'a' using Sleep sort algorithm
>>> from pydsa import sleep_sort
>>> a = [3, 4, 2]
>>> sleep_sort(a)
[2, 3, 4]
"""
sleep_sort.result = []
def add1(x):
sleep_sort.result.append(x)
mx = a[0]
for v in a:
if mx < v:
mx = v
Timer(v, add1, [v]).start()
sleep(mx + 1)
return sleep_sort.result
|
<commit_before>from time import sleep
from threading import Timer
# Sleep Sort ;)
# Complexity: O(max(input)+n)
def sleep_sort(a):
"""
Sorts the list 'a' using Sleep sort algorithm
>>> from pydsa import sleep_sort
>>> a = [3, 4, 2]
>>> sleep_sort(a)
[2, 3, 4]
"""
sleep_sort.result = []
def add1(x):
sleep_sort.result.append(x)
mx = a[0]
for v in a:
if mx < v: mx = v
Timer(v, add1, [v]).start()
sleep(mx + 1)
return sleep_sort.result
<commit_msg>Format code according to PEP8<commit_after>from time import sleep
from threading import Timer
# Sleep Sort ;)
# Complexity: O(max(input)+n)
def sleep_sort(a):
"""
Sorts the list 'a' using Sleep sort algorithm
>>> from pydsa import sleep_sort
>>> a = [3, 4, 2]
>>> sleep_sort(a)
[2, 3, 4]
"""
sleep_sort.result = []
def add1(x):
sleep_sort.result.append(x)
mx = a[0]
for v in a:
if mx < v:
mx = v
Timer(v, add1, [v]).start()
sleep(mx + 1)
return sleep_sort.result
|
f5c654941f8dd5eb64775d680d677993a829963c
|
lookupdict.py
|
lookupdict.py
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (C) USC Information Sciences Institute
# Author: Vladimir M. Zaytsev <zaytsev@usc.edu>
# URL: <http://nlg.isi.edu/>
# For more information, see README.md
# For license information, see LICENSE
"""
Simple dictionary lookup script.
Finds given word in mokujin dictionary and returns its id.
Usage:
$ python lookupdict.py <path_to_index> <word>
"""
import sys
import logging
import argparse
from mokujin.index import DepTupleIndex
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
try:
_, index_path, term = sys.argv
except:
logging.error("Wrong syntax. Usage:\n\t lookupdict.py <path_to_index> <word>")
exit(0)
indexer = DepTupleIndex(index_path)
term_id = indexer.term2id.get(term)
if term_id is not None:
sys.stdout.write("\n\tFound term '%s' with id=%d\n\n" % (term, term_id))
else:
sys.stedout.write("\n\tTerm '%s' not found in dictionary.\n\n" % term)
|
Add script to lookup term dictionary.
|
Add script to lookup term dictionary.
|
Python
|
apache-2.0
|
zaycev/mokujin
|
Add script to lookup term dictionary.
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (C) USC Information Sciences Institute
# Author: Vladimir M. Zaytsev <zaytsev@usc.edu>
# URL: <http://nlg.isi.edu/>
# For more information, see README.md
# For license information, see LICENSE
"""
Simple dictionary lookup script.
Finds given word in mokujin dictionary and returns its id.
Usage:
$ python lookupdict.py <path_to_index> <word>
"""
import sys
import logging
import argparse
from mokujin.index import DepTupleIndex
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
try:
_, index_path, term = sys.argv
except:
logging.error("Wrong syntax. Usage:\n\t lookupdict.py <path_to_index> <word>")
exit(0)
indexer = DepTupleIndex(index_path)
term_id = indexer.term2id.get(term)
if term_id is not None:
sys.stdout.write("\n\tFound term '%s' with id=%d\n\n" % (term, term_id))
else:
sys.stedout.write("\n\tTerm '%s' not found in dictionary.\n\n" % term)
|
<commit_before><commit_msg>Add script to lookup term dictionary.<commit_after>
|
#!/usr/bin/env python
# coding: utf-8
# Copyright (C) USC Information Sciences Institute
# Author: Vladimir M. Zaytsev <zaytsev@usc.edu>
# URL: <http://nlg.isi.edu/>
# For more information, see README.md
# For license information, see LICENSE
"""
Simple dictionary lookup script.
Finds given word in mokujin dictionary and returns its id.
Usage:
$ python lookupdict.py <path_to_index> <word>
"""
import sys
import logging
import argparse
from mokujin.index import DepTupleIndex
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
try:
_, index_path, term = sys.argv
except:
logging.error("Wrong syntax. Usage:\n\t lookupdict.py <path_to_index> <word>")
exit(0)
indexer = DepTupleIndex(index_path)
term_id = indexer.term2id.get(term)
if term_id is not None:
sys.stdout.write("\n\tFound term '%s' with id=%d\n\n" % (term, term_id))
else:
sys.stedout.write("\n\tTerm '%s' not found in dictionary.\n\n" % term)
|
Add script to lookup term dictionary.#!/usr/bin/env python
# coding: utf-8
# Copyright (C) USC Information Sciences Institute
# Author: Vladimir M. Zaytsev <zaytsev@usc.edu>
# URL: <http://nlg.isi.edu/>
# For more information, see README.md
# For license information, see LICENSE
"""
Simple dictionary lookup script.
Finds given word in mokujin dictionary and returns its id.
Usage:
$ python lookupdict.py <path_to_index> <word>
"""
import sys
import logging
import argparse
from mokujin.index import DepTupleIndex
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
try:
_, index_path, term = sys.argv
except:
logging.error("Wrong syntax. Usage:\n\t lookupdict.py <path_to_index> <word>")
exit(0)
indexer = DepTupleIndex(index_path)
term_id = indexer.term2id.get(term)
if term_id is not None:
sys.stdout.write("\n\tFound term '%s' with id=%d\n\n" % (term, term_id))
else:
sys.stedout.write("\n\tTerm '%s' not found in dictionary.\n\n" % term)
|
<commit_before><commit_msg>Add script to lookup term dictionary.<commit_after>#!/usr/bin/env python
# coding: utf-8
# Copyright (C) USC Information Sciences Institute
# Author: Vladimir M. Zaytsev <zaytsev@usc.edu>
# URL: <http://nlg.isi.edu/>
# For more information, see README.md
# For license information, see LICENSE
"""
Simple dictionary lookup script.
Finds given word in mokujin dictionary and returns its id.
Usage:
$ python lookupdict.py <path_to_index> <word>
"""
import sys
import logging
import argparse
from mokujin.index import DepTupleIndex
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
try:
_, index_path, term = sys.argv
except:
logging.error("Wrong syntax. Usage:\n\t lookupdict.py <path_to_index> <word>")
exit(0)
indexer = DepTupleIndex(index_path)
term_id = indexer.term2id.get(term)
if term_id is not None:
sys.stdout.write("\n\tFound term '%s' with id=%d\n\n" % (term, term_id))
else:
sys.stedout.write("\n\tTerm '%s' not found in dictionary.\n\n" % term)
|
|
cc1c0d386b2e657c9f1f80a0e1ac1a4375df377b
|
mygpoauth/login/migrations/0001_case_insensitive_username.py
|
mygpoauth/login/migrations/0001_case_insensitive_username.py
|
from django.db import migrations
class Migration(migrations.Migration):
""" Create a unique case-insensitive index on the username column """
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.RunSQL(
'CREATE UNIQUE INDEX user_case_insensitive_unique '
'ON auth_user ((lower(username)));',
'DROP INDEX user_case_insensitive_unique',
),
]
|
Add case-insensitive unique index for username
|
Add case-insensitive unique index for username
|
Python
|
agpl-3.0
|
gpodder/mygpo-auth,gpodder/mygpo-auth
|
Add case-insensitive unique index for username
|
from django.db import migrations
class Migration(migrations.Migration):
""" Create a unique case-insensitive index on the username column """
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.RunSQL(
'CREATE UNIQUE INDEX user_case_insensitive_unique '
'ON auth_user ((lower(username)));',
'DROP INDEX user_case_insensitive_unique',
),
]
|
<commit_before><commit_msg>Add case-insensitive unique index for username<commit_after>
|
from django.db import migrations
class Migration(migrations.Migration):
""" Create a unique case-insensitive index on the username column """
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.RunSQL(
'CREATE UNIQUE INDEX user_case_insensitive_unique '
'ON auth_user ((lower(username)));',
'DROP INDEX user_case_insensitive_unique',
),
]
|
Add case-insensitive unique index for usernamefrom django.db import migrations
class Migration(migrations.Migration):
""" Create a unique case-insensitive index on the username column """
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.RunSQL(
'CREATE UNIQUE INDEX user_case_insensitive_unique '
'ON auth_user ((lower(username)));',
'DROP INDEX user_case_insensitive_unique',
),
]
|
<commit_before><commit_msg>Add case-insensitive unique index for username<commit_after>from django.db import migrations
class Migration(migrations.Migration):
""" Create a unique case-insensitive index on the username column """
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.RunSQL(
'CREATE UNIQUE INDEX user_case_insensitive_unique '
'ON auth_user ((lower(username)));',
'DROP INDEX user_case_insensitive_unique',
),
]
|
|
422b5c763039865d69ee15f3b95a84596ba41c77
|
api/migrations/0006_alter_site_metadata_fields.py
|
api/migrations/0006_alter_site_metadata_fields.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0005_sitemetadata'),
]
operations = [
migrations.AlterModelOptions(
name='sitemetadata',
options={'verbose_name': 'Site metadata', 'verbose_name_plural': 'Site metadata'},
),
migrations.AlterField(
model_name='sitemetadata',
name='user_portal_link_text',
field=models.CharField(default=b'CyVerse User Management Portal', help_text=b'\n Text used for User Portal hyperlink; state exactly as should appear.\n', max_length=254),
),
]
|
Apply Meta changed & field default to SiteMetadata
|
Apply Meta changed & field default to SiteMetadata
|
Python
|
apache-2.0
|
CCI-MOC/GUI-Frontend,CCI-MOC/GUI-Frontend,CCI-MOC/GUI-Frontend,CCI-MOC/GUI-Frontend,CCI-MOC/GUI-Frontend
|
Apply Meta changed & field default to SiteMetadata
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0005_sitemetadata'),
]
operations = [
migrations.AlterModelOptions(
name='sitemetadata',
options={'verbose_name': 'Site metadata', 'verbose_name_plural': 'Site metadata'},
),
migrations.AlterField(
model_name='sitemetadata',
name='user_portal_link_text',
field=models.CharField(default=b'CyVerse User Management Portal', help_text=b'\n Text used for User Portal hyperlink; state exactly as should appear.\n', max_length=254),
),
]
|
<commit_before><commit_msg>Apply Meta changed & field default to SiteMetadata<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0005_sitemetadata'),
]
operations = [
migrations.AlterModelOptions(
name='sitemetadata',
options={'verbose_name': 'Site metadata', 'verbose_name_plural': 'Site metadata'},
),
migrations.AlterField(
model_name='sitemetadata',
name='user_portal_link_text',
field=models.CharField(default=b'CyVerse User Management Portal', help_text=b'\n Text used for User Portal hyperlink; state exactly as should appear.\n', max_length=254),
),
]
|
Apply Meta changed & field default to SiteMetadata# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0005_sitemetadata'),
]
operations = [
migrations.AlterModelOptions(
name='sitemetadata',
options={'verbose_name': 'Site metadata', 'verbose_name_plural': 'Site metadata'},
),
migrations.AlterField(
model_name='sitemetadata',
name='user_portal_link_text',
field=models.CharField(default=b'CyVerse User Management Portal', help_text=b'\n Text used for User Portal hyperlink; state exactly as should appear.\n', max_length=254),
),
]
|
<commit_before><commit_msg>Apply Meta changed & field default to SiteMetadata<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0005_sitemetadata'),
]
operations = [
migrations.AlterModelOptions(
name='sitemetadata',
options={'verbose_name': 'Site metadata', 'verbose_name_plural': 'Site metadata'},
),
migrations.AlterField(
model_name='sitemetadata',
name='user_portal_link_text',
field=models.CharField(default=b'CyVerse User Management Portal', help_text=b'\n Text used for User Portal hyperlink; state exactly as should appear.\n', max_length=254),
),
]
|
|
1c5f2bd553666607328ca16816db882cd5496364
|
tests/test_client.py
|
tests/test_client.py
|
from flask import url_for
from app import user_datastore
from app.models import Post
from tests.general import AppTestCase
class TestClient(AppTestCase):
def setUp(self):
super().setUp()
self.client = self.app.test_client(use_cookies=True)
# Create user and log in
user_datastore.create_user(email='foo@bar.com', password='foobar')
self.client.post(url_for('security.login'), data={
'email': 'foo@bar.com',
'password': 'foobar'
}, follow_redirects=True)
def test_new_post(self):
self.client.post(url_for('admin.new_post'), data={
'title': 'foo',
'body': 'bar',
'tags': 'foobar'
})
post = Post.query.first()
self.assertIsNotNone(post)
|
Add test case for post creation
|
Add test case for post creation
|
Python
|
mit
|
Encrylize/flask-blogger,Encrylize/flask-blogger,Encrylize/flask-blogger
|
Add test case for post creation
|
from flask import url_for
from app import user_datastore
from app.models import Post
from tests.general import AppTestCase
class TestClient(AppTestCase):
def setUp(self):
super().setUp()
self.client = self.app.test_client(use_cookies=True)
# Create user and log in
user_datastore.create_user(email='foo@bar.com', password='foobar')
self.client.post(url_for('security.login'), data={
'email': 'foo@bar.com',
'password': 'foobar'
}, follow_redirects=True)
def test_new_post(self):
self.client.post(url_for('admin.new_post'), data={
'title': 'foo',
'body': 'bar',
'tags': 'foobar'
})
post = Post.query.first()
self.assertIsNotNone(post)
|
<commit_before><commit_msg>Add test case for post creation<commit_after>
|
from flask import url_for
from app import user_datastore
from app.models import Post
from tests.general import AppTestCase
class TestClient(AppTestCase):
def setUp(self):
super().setUp()
self.client = self.app.test_client(use_cookies=True)
# Create user and log in
user_datastore.create_user(email='foo@bar.com', password='foobar')
self.client.post(url_for('security.login'), data={
'email': 'foo@bar.com',
'password': 'foobar'
}, follow_redirects=True)
def test_new_post(self):
self.client.post(url_for('admin.new_post'), data={
'title': 'foo',
'body': 'bar',
'tags': 'foobar'
})
post = Post.query.first()
self.assertIsNotNone(post)
|
Add test case for post creationfrom flask import url_for
from app import user_datastore
from app.models import Post
from tests.general import AppTestCase
class TestClient(AppTestCase):
def setUp(self):
super().setUp()
self.client = self.app.test_client(use_cookies=True)
# Create user and log in
user_datastore.create_user(email='foo@bar.com', password='foobar')
self.client.post(url_for('security.login'), data={
'email': 'foo@bar.com',
'password': 'foobar'
}, follow_redirects=True)
def test_new_post(self):
self.client.post(url_for('admin.new_post'), data={
'title': 'foo',
'body': 'bar',
'tags': 'foobar'
})
post = Post.query.first()
self.assertIsNotNone(post)
|
<commit_before><commit_msg>Add test case for post creation<commit_after>from flask import url_for
from app import user_datastore
from app.models import Post
from tests.general import AppTestCase
class TestClient(AppTestCase):
def setUp(self):
super().setUp()
self.client = self.app.test_client(use_cookies=True)
# Create user and log in
user_datastore.create_user(email='foo@bar.com', password='foobar')
self.client.post(url_for('security.login'), data={
'email': 'foo@bar.com',
'password': 'foobar'
}, follow_redirects=True)
def test_new_post(self):
self.client.post(url_for('admin.new_post'), data={
'title': 'foo',
'body': 'bar',
'tags': 'foobar'
})
post = Post.query.first()
self.assertIsNotNone(post)
|
|
aff3f5dab0d01c1b7e74adca88ba3096cb0b9106
|
tests/people/test_utils.py
|
tests/people/test_utils.py
|
import datetime
from components.people.utils import calculate_age, calculate_average_age
def test_calculate_age():
birthdate = datetime.date.today() - datetime.timedelta(days=731)
target = datetime.date.today() - datetime.timedelta(days=365)
assert calculate_age(birthdate) == 2
assert calculate_age(birthdate, target) == 1
def test_cacluate_average_age():
birthdates = [
datetime.date.today() - datetime.timedelta(days=731),
datetime.date.today() - datetime.timedelta(days=366)
]
assert calculate_average_age(birthdates) == 1.5
|
Test our age calculation utilities.
|
Test our age calculation utilities.
|
Python
|
apache-2.0
|
hello-base/web,hello-base/web,hello-base/web,hello-base/web
|
Test our age calculation utilities.
|
import datetime
from components.people.utils import calculate_age, calculate_average_age
def test_calculate_age():
birthdate = datetime.date.today() - datetime.timedelta(days=731)
target = datetime.date.today() - datetime.timedelta(days=365)
assert calculate_age(birthdate) == 2
assert calculate_age(birthdate, target) == 1
def test_cacluate_average_age():
birthdates = [
datetime.date.today() - datetime.timedelta(days=731),
datetime.date.today() - datetime.timedelta(days=366)
]
assert calculate_average_age(birthdates) == 1.5
|
<commit_before><commit_msg>Test our age calculation utilities.<commit_after>
|
import datetime
from components.people.utils import calculate_age, calculate_average_age
def test_calculate_age():
birthdate = datetime.date.today() - datetime.timedelta(days=731)
target = datetime.date.today() - datetime.timedelta(days=365)
assert calculate_age(birthdate) == 2
assert calculate_age(birthdate, target) == 1
def test_cacluate_average_age():
birthdates = [
datetime.date.today() - datetime.timedelta(days=731),
datetime.date.today() - datetime.timedelta(days=366)
]
assert calculate_average_age(birthdates) == 1.5
|
Test our age calculation utilities.import datetime
from components.people.utils import calculate_age, calculate_average_age
def test_calculate_age():
birthdate = datetime.date.today() - datetime.timedelta(days=731)
target = datetime.date.today() - datetime.timedelta(days=365)
assert calculate_age(birthdate) == 2
assert calculate_age(birthdate, target) == 1
def test_cacluate_average_age():
birthdates = [
datetime.date.today() - datetime.timedelta(days=731),
datetime.date.today() - datetime.timedelta(days=366)
]
assert calculate_average_age(birthdates) == 1.5
|
<commit_before><commit_msg>Test our age calculation utilities.<commit_after>import datetime
from components.people.utils import calculate_age, calculate_average_age
def test_calculate_age():
birthdate = datetime.date.today() - datetime.timedelta(days=731)
target = datetime.date.today() - datetime.timedelta(days=365)
assert calculate_age(birthdate) == 2
assert calculate_age(birthdate, target) == 1
def test_cacluate_average_age():
birthdates = [
datetime.date.today() - datetime.timedelta(days=731),
datetime.date.today() - datetime.timedelta(days=366)
]
assert calculate_average_age(birthdates) == 1.5
|
|
7caf1988d5bf3c509f8c05c12cc3c9c6aa1ab33a
|
poster/graphics/export_tensorboard_values.py
|
poster/graphics/export_tensorboard_values.py
|
"""
This script has to be called with the event file as argument
The resulting values can be plotted
"""
import sys
import tensorflow as tf
CEEs = []
my_tag = "CAE/cross_entropy_error"
for e in tf.train.summary_iterator(sys.argv[1]):
for v in e.summary.value:
if v.tag == my_tag:
CEEs.append((e.step, v.simple_value))
print(CEEs)
MSEs = []
my_tag = "CAE/mean_squared_error"
for e in tf.train.summary_iterator(sys.argv[1]):
for v in e.summary.value:
if v.tag == my_tag:
MSEs.append((e.step, v.simple_value))
print(MSEs)
|
Add script for exporting values from tensorboard event file
|
Add script for exporting values from tensorboard event file
|
Python
|
apache-2.0
|
gangchill/nip-convnet,gangchill/nip-convnet
|
Add script for exporting values from tensorboard event file
|
"""
This script has to be called with the event file as argument
The resulting values can be plotted
"""
import sys
import tensorflow as tf
CEEs = []
my_tag = "CAE/cross_entropy_error"
for e in tf.train.summary_iterator(sys.argv[1]):
for v in e.summary.value:
if v.tag == my_tag:
CEEs.append((e.step, v.simple_value))
print(CEEs)
MSEs = []
my_tag = "CAE/mean_squared_error"
for e in tf.train.summary_iterator(sys.argv[1]):
for v in e.summary.value:
if v.tag == my_tag:
MSEs.append((e.step, v.simple_value))
print(MSEs)
|
<commit_before><commit_msg>Add script for exporting values from tensorboard event file<commit_after>
|
"""
This script has to be called with the event file as argument
The resulting values can be plotted
"""
import sys
import tensorflow as tf
CEEs = []
my_tag = "CAE/cross_entropy_error"
for e in tf.train.summary_iterator(sys.argv[1]):
for v in e.summary.value:
if v.tag == my_tag:
CEEs.append((e.step, v.simple_value))
print(CEEs)
MSEs = []
my_tag = "CAE/mean_squared_error"
for e in tf.train.summary_iterator(sys.argv[1]):
for v in e.summary.value:
if v.tag == my_tag:
MSEs.append((e.step, v.simple_value))
print(MSEs)
|
Add script for exporting values from tensorboard event file"""
This script has to be called with the event file as argument
The resulting values can be plotted
"""
import sys
import tensorflow as tf
CEEs = []
my_tag = "CAE/cross_entropy_error"
for e in tf.train.summary_iterator(sys.argv[1]):
for v in e.summary.value:
if v.tag == my_tag:
CEEs.append((e.step, v.simple_value))
print(CEEs)
MSEs = []
my_tag = "CAE/mean_squared_error"
for e in tf.train.summary_iterator(sys.argv[1]):
for v in e.summary.value:
if v.tag == my_tag:
MSEs.append((e.step, v.simple_value))
print(MSEs)
|
<commit_before><commit_msg>Add script for exporting values from tensorboard event file<commit_after>"""
This script has to be called with the event file as argument
The resulting values can be plotted
"""
import sys
import tensorflow as tf
CEEs = []
my_tag = "CAE/cross_entropy_error"
for e in tf.train.summary_iterator(sys.argv[1]):
for v in e.summary.value:
if v.tag == my_tag:
CEEs.append((e.step, v.simple_value))
print(CEEs)
MSEs = []
my_tag = "CAE/mean_squared_error"
for e in tf.train.summary_iterator(sys.argv[1]):
for v in e.summary.value:
if v.tag == my_tag:
MSEs.append((e.step, v.simple_value))
print(MSEs)
|
|
54ad132d9abe545d61b1af34ffe9d7f5c2822a57
|
strings/string_permutation/python/spicyyboi_permute_string.py
|
strings/string_permutation/python/spicyyboi_permute_string.py
|
"""
Gives all permutations of a string given as user input.
Example:
Input: Please enter the string: test
Output:
t
e
te
s
ts
es
tes
t
tt
et
tet
st
tst
est
test
"""
def permute(string):
"""Iterable yielding all permutations of a string"""
length = len(string)
powers = [2**i for i in range(length)]
for i in range(2**length):
rval = ""
for p, st in zip(powers, string):
if i & p:
rval += st
if rval != "":
yield rval
inp = input("Please enter the string: ")
for s in permute(inp):
print(s)
|
Add string permutation in python
|
Add string permutation in python
Add a second implementation of string permutation in python.
The implementation doesn't use external libraries such as iterables.
|
Python
|
cc0-1.0
|
ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms,ZoranPandovski/al-go-rithms
|
Add string permutation in python
Add a second implementation of string permutation in python.
The implementation doesn't use external libraries such as iterables.
|
"""
Gives all permutations of a string given as user input.
Example:
Input: Please enter the string: test
Output:
t
e
te
s
ts
es
tes
t
tt
et
tet
st
tst
est
test
"""
def permute(string):
"""Iterable yielding all permutations of a string"""
length = len(string)
powers = [2**i for i in range(length)]
for i in range(2**length):
rval = ""
for p, st in zip(powers, string):
if i & p:
rval += st
if rval != "":
yield rval
inp = input("Please enter the string: ")
for s in permute(inp):
print(s)
|
<commit_before><commit_msg>Add string permutation in python
Add a second implementation of string permutation in python.
The implementation doesn't use external libraries such as iterables.<commit_after>
|
"""
Gives all permutations of a string given as user input.
Example:
Input: Please enter the string: test
Output:
t
e
te
s
ts
es
tes
t
tt
et
tet
st
tst
est
test
"""
def permute(string):
"""Iterable yielding all permutations of a string"""
length = len(string)
powers = [2**i for i in range(length)]
for i in range(2**length):
rval = ""
for p, st in zip(powers, string):
if i & p:
rval += st
if rval != "":
yield rval
inp = input("Please enter the string: ")
for s in permute(inp):
print(s)
|
Add string permutation in python
Add a second implementation of string permutation in python.
The implementation doesn't use external libraries such as iterables."""
Gives all permutations of a string given as user input.
Example:
Input: Please enter the string: test
Output:
t
e
te
s
ts
es
tes
t
tt
et
tet
st
tst
est
test
"""
def permute(string):
"""Iterable yielding all permutations of a string"""
length = len(string)
powers = [2**i for i in range(length)]
for i in range(2**length):
rval = ""
for p, st in zip(powers, string):
if i & p:
rval += st
if rval != "":
yield rval
inp = input("Please enter the string: ")
for s in permute(inp):
print(s)
|
<commit_before><commit_msg>Add string permutation in python
Add a second implementation of string permutation in python.
The implementation doesn't use external libraries such as iterables.<commit_after>"""
Gives all permutations of a string given as user input.
Example:
Input: Please enter the string: test
Output:
t
e
te
s
ts
es
tes
t
tt
et
tet
st
tst
est
test
"""
def permute(string):
"""Iterable yielding all permutations of a string"""
length = len(string)
powers = [2**i for i in range(length)]
for i in range(2**length):
rval = ""
for p, st in zip(powers, string):
if i & p:
rval += st
if rval != "":
yield rval
inp = input("Please enter the string: ")
for s in permute(inp):
print(s)
|
|
c275c2611643d80d275af0430c5df2a94594caf0
|
cms_genome_browser/cms_app.py
|
cms_genome_browser/cms_app.py
|
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class BrowserApp(CMSApp):
name = _("Genome Browser App")
urls = ["cms_genome_browser.urls"]
app_name = "cms_genome_browser"
apphook_pool.register(BrowserApp)
|
Create BrowserApp app hook for CMS
|
Create BrowserApp app hook for CMS
|
Python
|
bsd-3-clause
|
mfcovington/djangocms-genome-browser,mfcovington/djangocms-genome-browser,mfcovington/djangocms-genome-browser
|
Create BrowserApp app hook for CMS
|
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class BrowserApp(CMSApp):
name = _("Genome Browser App")
urls = ["cms_genome_browser.urls"]
app_name = "cms_genome_browser"
apphook_pool.register(BrowserApp)
|
<commit_before><commit_msg>Create BrowserApp app hook for CMS<commit_after>
|
from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class BrowserApp(CMSApp):
name = _("Genome Browser App")
urls = ["cms_genome_browser.urls"]
app_name = "cms_genome_browser"
apphook_pool.register(BrowserApp)
|
Create BrowserApp app hook for CMSfrom cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class BrowserApp(CMSApp):
name = _("Genome Browser App")
urls = ["cms_genome_browser.urls"]
app_name = "cms_genome_browser"
apphook_pool.register(BrowserApp)
|
<commit_before><commit_msg>Create BrowserApp app hook for CMS<commit_after>from cms.app_base import CMSApp
from cms.apphook_pool import apphook_pool
from django.utils.translation import ugettext_lazy as _
class BrowserApp(CMSApp):
name = _("Genome Browser App")
urls = ["cms_genome_browser.urls"]
app_name = "cms_genome_browser"
apphook_pool.register(BrowserApp)
|
|
1015225a00b37f6b2322a6ad0450079178c03d17
|
examples/fdroid_clean_repos.py
|
examples/fdroid_clean_repos.py
|
#!/usr/bin/env python3
#
# an fdroid plugin for resetting app VCSs to the latest version for the metadata
import argparse
import logging
from fdroidserver import _, common, metadata
from fdserver.exeption import VCSException
fdroid_summary = 'reset app VCSs to the latest version'
def main():
parser = argparse.ArgumentParser(usage="%(prog)s [options] [APPID[:VERCODE] [APPID[:VERCODE] ...]]")
common.setup_global_opts(parser)
parser.add_argument("appid", nargs='*', help=_("applicationId with optional versionCode in the form APPID[:VERCODE]"))
metadata.add_metadata_arguments(parser)
options = parser.parse_args()
common.options = options
pkgs = common.read_pkg_args(options.appid, True)
allapps = metadata.read_metadata(pkgs)
apps = common.read_app_args(options.appid, allapps, True)
common.read_config(options)
for appid, app in apps.items():
if "Builds" in app and len(app["Builds"]) > 0:
logging.info(_("Cleaning up '{appid}' VCS").format(appid=appid))
try:
vcs, build_dir = common.setup_vcs(app)
vcs.gotorevision(app["Builds"][-1].commit)
except VCSException:
pass
if __name__ == "__main__":
main()
|
Add plugin to cleanup app VCSs
|
Add plugin to cleanup app VCSs
|
Python
|
agpl-3.0
|
f-droid/fdroidserver,f-droid/fdroidserver,f-droid/fdroidserver,f-droid/fdroidserver,f-droid/fdroidserver
|
Add plugin to cleanup app VCSs
|
#!/usr/bin/env python3
#
# an fdroid plugin for resetting app VCSs to the latest version for the metadata
import argparse
import logging
from fdroidserver import _, common, metadata
from fdserver.exeption import VCSException
fdroid_summary = 'reset app VCSs to the latest version'
def main():
parser = argparse.ArgumentParser(usage="%(prog)s [options] [APPID[:VERCODE] [APPID[:VERCODE] ...]]")
common.setup_global_opts(parser)
parser.add_argument("appid", nargs='*', help=_("applicationId with optional versionCode in the form APPID[:VERCODE]"))
metadata.add_metadata_arguments(parser)
options = parser.parse_args()
common.options = options
pkgs = common.read_pkg_args(options.appid, True)
allapps = metadata.read_metadata(pkgs)
apps = common.read_app_args(options.appid, allapps, True)
common.read_config(options)
for appid, app in apps.items():
if "Builds" in app and len(app["Builds"]) > 0:
logging.info(_("Cleaning up '{appid}' VCS").format(appid=appid))
try:
vcs, build_dir = common.setup_vcs(app)
vcs.gotorevision(app["Builds"][-1].commit)
except VCSException:
pass
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add plugin to cleanup app VCSs<commit_after>
|
#!/usr/bin/env python3
#
# an fdroid plugin for resetting app VCSs to the latest version for the metadata
import argparse
import logging
from fdroidserver import _, common, metadata
from fdserver.exeption import VCSException
fdroid_summary = 'reset app VCSs to the latest version'
def main():
parser = argparse.ArgumentParser(usage="%(prog)s [options] [APPID[:VERCODE] [APPID[:VERCODE] ...]]")
common.setup_global_opts(parser)
parser.add_argument("appid", nargs='*', help=_("applicationId with optional versionCode in the form APPID[:VERCODE]"))
metadata.add_metadata_arguments(parser)
options = parser.parse_args()
common.options = options
pkgs = common.read_pkg_args(options.appid, True)
allapps = metadata.read_metadata(pkgs)
apps = common.read_app_args(options.appid, allapps, True)
common.read_config(options)
for appid, app in apps.items():
if "Builds" in app and len(app["Builds"]) > 0:
logging.info(_("Cleaning up '{appid}' VCS").format(appid=appid))
try:
vcs, build_dir = common.setup_vcs(app)
vcs.gotorevision(app["Builds"][-1].commit)
except VCSException:
pass
if __name__ == "__main__":
main()
|
Add plugin to cleanup app VCSs#!/usr/bin/env python3
#
# an fdroid plugin for resetting app VCSs to the latest version for the metadata
import argparse
import logging
from fdroidserver import _, common, metadata
from fdserver.exeption import VCSException
fdroid_summary = 'reset app VCSs to the latest version'
def main():
parser = argparse.ArgumentParser(usage="%(prog)s [options] [APPID[:VERCODE] [APPID[:VERCODE] ...]]")
common.setup_global_opts(parser)
parser.add_argument("appid", nargs='*', help=_("applicationId with optional versionCode in the form APPID[:VERCODE]"))
metadata.add_metadata_arguments(parser)
options = parser.parse_args()
common.options = options
pkgs = common.read_pkg_args(options.appid, True)
allapps = metadata.read_metadata(pkgs)
apps = common.read_app_args(options.appid, allapps, True)
common.read_config(options)
for appid, app in apps.items():
if "Builds" in app and len(app["Builds"]) > 0:
logging.info(_("Cleaning up '{appid}' VCS").format(appid=appid))
try:
vcs, build_dir = common.setup_vcs(app)
vcs.gotorevision(app["Builds"][-1].commit)
except VCSException:
pass
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add plugin to cleanup app VCSs<commit_after>#!/usr/bin/env python3
#
# an fdroid plugin for resetting app VCSs to the latest version for the metadata
import argparse
import logging
from fdroidserver import _, common, metadata
from fdserver.exeption import VCSException
fdroid_summary = 'reset app VCSs to the latest version'
def main():
parser = argparse.ArgumentParser(usage="%(prog)s [options] [APPID[:VERCODE] [APPID[:VERCODE] ...]]")
common.setup_global_opts(parser)
parser.add_argument("appid", nargs='*', help=_("applicationId with optional versionCode in the form APPID[:VERCODE]"))
metadata.add_metadata_arguments(parser)
options = parser.parse_args()
common.options = options
pkgs = common.read_pkg_args(options.appid, True)
allapps = metadata.read_metadata(pkgs)
apps = common.read_app_args(options.appid, allapps, True)
common.read_config(options)
for appid, app in apps.items():
if "Builds" in app and len(app["Builds"]) > 0:
logging.info(_("Cleaning up '{appid}' VCS").format(appid=appid))
try:
vcs, build_dir = common.setup_vcs(app)
vcs.gotorevision(app["Builds"][-1].commit)
except VCSException:
pass
if __name__ == "__main__":
main()
|
|
74305170ee64a0129d5c8eec8d908e4a8696d038
|
jobs/migrations/0012_auto_20170809_1849.py
|
jobs/migrations/0012_auto_20170809_1849.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import apps as global_apps
from django.contrib.contenttypes.management import update_contenttypes
from django.db import models, migrations
from django.utils.timezone import now
MARKER = '.. Migrated from django_comments_xtd.Comment model.\n\n'
comments_app_name = 'django_comments_xtd'
content_type = 'job'
def migrate_old_content(apps, schema_editor):
try:
Comment = apps.get_model(comments_app_name, 'XtdComment')
except LookupError:
# django_comments_xtd isn't installed.
return
update_contenttypes(apps.app_configs['contenttypes'])
JobReviewComment = apps.get_model('jobs', 'JobReviewComment')
Job = apps.get_model('jobs', 'Job')
ContentType = apps.get_model('contenttypes', 'ContentType')
db_alias = schema_editor.connection.alias
try:
job_contenttype = ContentType.objects.using(db_alias).get(name=content_type)
except ContentType.DoesNotExist:
return
old_comments = Comment.objects.using(db_alias).filter(
content_type=job_contenttype.pk, is_public=True, is_removed=False,
)
found_jobs = {}
comments = []
for comment in old_comments:
try:
job = found_jobs[comment.object_pk]
except KeyError:
try:
job = Job.objects.using(db_alias).get(pk=comment.object_pk)
found_jobs[comment.object_pk] = job
except Job.DoesNotExist:
continue
review_comment = JobReviewComment(
job=job,
comment=MARKER + comment.comment,
creator=comment.user,
created=comment.submit_date,
updated=now(),
)
comments.append(review_comment)
JobReviewComment.objects.using(db_alias).bulk_create(comments)
def delete_migrated_content(apps, schema_editor):
JobReviewComment = apps.get_model('jobs', 'JobReviewComment')
db_alias = schema_editor.connection.alias
JobReviewComment.objects.using(db_alias).filter(comment__startswith=MARKER).delete()
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
('jobs', '0011_jobreviewcomment'),
]
if global_apps.is_installed(comments_app_name):
dependencies.append((comments_app_name, '0001_initial'))
operations = [
migrations.RunPython(migrate_old_content, delete_migrated_content),
]
|
Migrate old comments over jobs.JobReviewComment
|
Migrate old comments over jobs.JobReviewComment
Fixes #591
|
Python
|
apache-2.0
|
manhhomienbienthuy/pythondotorg,python/pythondotorg,manhhomienbienthuy/pythondotorg,manhhomienbienthuy/pythondotorg,proevo/pythondotorg,Mariatta/pythondotorg,Mariatta/pythondotorg,python/pythondotorg,python/pythondotorg,Mariatta/pythondotorg,Mariatta/pythondotorg,proevo/pythondotorg,proevo/pythondotorg,python/pythondotorg,manhhomienbienthuy/pythondotorg,proevo/pythondotorg
|
Migrate old comments over jobs.JobReviewComment
Fixes #591
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import apps as global_apps
from django.contrib.contenttypes.management import update_contenttypes
from django.db import models, migrations
from django.utils.timezone import now
MARKER = '.. Migrated from django_comments_xtd.Comment model.\n\n'
comments_app_name = 'django_comments_xtd'
content_type = 'job'
def migrate_old_content(apps, schema_editor):
try:
Comment = apps.get_model(comments_app_name, 'XtdComment')
except LookupError:
# django_comments_xtd isn't installed.
return
update_contenttypes(apps.app_configs['contenttypes'])
JobReviewComment = apps.get_model('jobs', 'JobReviewComment')
Job = apps.get_model('jobs', 'Job')
ContentType = apps.get_model('contenttypes', 'ContentType')
db_alias = schema_editor.connection.alias
try:
job_contenttype = ContentType.objects.using(db_alias).get(name=content_type)
except ContentType.DoesNotExist:
return
old_comments = Comment.objects.using(db_alias).filter(
content_type=job_contenttype.pk, is_public=True, is_removed=False,
)
found_jobs = {}
comments = []
for comment in old_comments:
try:
job = found_jobs[comment.object_pk]
except KeyError:
try:
job = Job.objects.using(db_alias).get(pk=comment.object_pk)
found_jobs[comment.object_pk] = job
except Job.DoesNotExist:
continue
review_comment = JobReviewComment(
job=job,
comment=MARKER + comment.comment,
creator=comment.user,
created=comment.submit_date,
updated=now(),
)
comments.append(review_comment)
JobReviewComment.objects.using(db_alias).bulk_create(comments)
def delete_migrated_content(apps, schema_editor):
JobReviewComment = apps.get_model('jobs', 'JobReviewComment')
db_alias = schema_editor.connection.alias
JobReviewComment.objects.using(db_alias).filter(comment__startswith=MARKER).delete()
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
('jobs', '0011_jobreviewcomment'),
]
if global_apps.is_installed(comments_app_name):
dependencies.append((comments_app_name, '0001_initial'))
operations = [
migrations.RunPython(migrate_old_content, delete_migrated_content),
]
|
<commit_before><commit_msg>Migrate old comments over jobs.JobReviewComment
Fixes #591<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import apps as global_apps
from django.contrib.contenttypes.management import update_contenttypes
from django.db import models, migrations
from django.utils.timezone import now
MARKER = '.. Migrated from django_comments_xtd.Comment model.\n\n'
comments_app_name = 'django_comments_xtd'
content_type = 'job'
def migrate_old_content(apps, schema_editor):
try:
Comment = apps.get_model(comments_app_name, 'XtdComment')
except LookupError:
# django_comments_xtd isn't installed.
return
update_contenttypes(apps.app_configs['contenttypes'])
JobReviewComment = apps.get_model('jobs', 'JobReviewComment')
Job = apps.get_model('jobs', 'Job')
ContentType = apps.get_model('contenttypes', 'ContentType')
db_alias = schema_editor.connection.alias
try:
job_contenttype = ContentType.objects.using(db_alias).get(name=content_type)
except ContentType.DoesNotExist:
return
old_comments = Comment.objects.using(db_alias).filter(
content_type=job_contenttype.pk, is_public=True, is_removed=False,
)
found_jobs = {}
comments = []
for comment in old_comments:
try:
job = found_jobs[comment.object_pk]
except KeyError:
try:
job = Job.objects.using(db_alias).get(pk=comment.object_pk)
found_jobs[comment.object_pk] = job
except Job.DoesNotExist:
continue
review_comment = JobReviewComment(
job=job,
comment=MARKER + comment.comment,
creator=comment.user,
created=comment.submit_date,
updated=now(),
)
comments.append(review_comment)
JobReviewComment.objects.using(db_alias).bulk_create(comments)
def delete_migrated_content(apps, schema_editor):
JobReviewComment = apps.get_model('jobs', 'JobReviewComment')
db_alias = schema_editor.connection.alias
JobReviewComment.objects.using(db_alias).filter(comment__startswith=MARKER).delete()
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
('jobs', '0011_jobreviewcomment'),
]
if global_apps.is_installed(comments_app_name):
dependencies.append((comments_app_name, '0001_initial'))
operations = [
migrations.RunPython(migrate_old_content, delete_migrated_content),
]
|
Migrate old comments over jobs.JobReviewComment
Fixes #591# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import apps as global_apps
from django.contrib.contenttypes.management import update_contenttypes
from django.db import models, migrations
from django.utils.timezone import now
MARKER = '.. Migrated from django_comments_xtd.Comment model.\n\n'
comments_app_name = 'django_comments_xtd'
content_type = 'job'
def migrate_old_content(apps, schema_editor):
try:
Comment = apps.get_model(comments_app_name, 'XtdComment')
except LookupError:
# django_comments_xtd isn't installed.
return
update_contenttypes(apps.app_configs['contenttypes'])
JobReviewComment = apps.get_model('jobs', 'JobReviewComment')
Job = apps.get_model('jobs', 'Job')
ContentType = apps.get_model('contenttypes', 'ContentType')
db_alias = schema_editor.connection.alias
try:
job_contenttype = ContentType.objects.using(db_alias).get(name=content_type)
except ContentType.DoesNotExist:
return
old_comments = Comment.objects.using(db_alias).filter(
content_type=job_contenttype.pk, is_public=True, is_removed=False,
)
found_jobs = {}
comments = []
for comment in old_comments:
try:
job = found_jobs[comment.object_pk]
except KeyError:
try:
job = Job.objects.using(db_alias).get(pk=comment.object_pk)
found_jobs[comment.object_pk] = job
except Job.DoesNotExist:
continue
review_comment = JobReviewComment(
job=job,
comment=MARKER + comment.comment,
creator=comment.user,
created=comment.submit_date,
updated=now(),
)
comments.append(review_comment)
JobReviewComment.objects.using(db_alias).bulk_create(comments)
def delete_migrated_content(apps, schema_editor):
JobReviewComment = apps.get_model('jobs', 'JobReviewComment')
db_alias = schema_editor.connection.alias
JobReviewComment.objects.using(db_alias).filter(comment__startswith=MARKER).delete()
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
('jobs', '0011_jobreviewcomment'),
]
if global_apps.is_installed(comments_app_name):
dependencies.append((comments_app_name, '0001_initial'))
operations = [
migrations.RunPython(migrate_old_content, delete_migrated_content),
]
|
<commit_before><commit_msg>Migrate old comments over jobs.JobReviewComment
Fixes #591<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import apps as global_apps
from django.contrib.contenttypes.management import update_contenttypes
from django.db import models, migrations
from django.utils.timezone import now
MARKER = '.. Migrated from django_comments_xtd.Comment model.\n\n'
comments_app_name = 'django_comments_xtd'
content_type = 'job'
def migrate_old_content(apps, schema_editor):
try:
Comment = apps.get_model(comments_app_name, 'XtdComment')
except LookupError:
# django_comments_xtd isn't installed.
return
update_contenttypes(apps.app_configs['contenttypes'])
JobReviewComment = apps.get_model('jobs', 'JobReviewComment')
Job = apps.get_model('jobs', 'Job')
ContentType = apps.get_model('contenttypes', 'ContentType')
db_alias = schema_editor.connection.alias
try:
job_contenttype = ContentType.objects.using(db_alias).get(name=content_type)
except ContentType.DoesNotExist:
return
old_comments = Comment.objects.using(db_alias).filter(
content_type=job_contenttype.pk, is_public=True, is_removed=False,
)
found_jobs = {}
comments = []
for comment in old_comments:
try:
job = found_jobs[comment.object_pk]
except KeyError:
try:
job = Job.objects.using(db_alias).get(pk=comment.object_pk)
found_jobs[comment.object_pk] = job
except Job.DoesNotExist:
continue
review_comment = JobReviewComment(
job=job,
comment=MARKER + comment.comment,
creator=comment.user,
created=comment.submit_date,
updated=now(),
)
comments.append(review_comment)
JobReviewComment.objects.using(db_alias).bulk_create(comments)
def delete_migrated_content(apps, schema_editor):
JobReviewComment = apps.get_model('jobs', 'JobReviewComment')
db_alias = schema_editor.connection.alias
JobReviewComment.objects.using(db_alias).filter(comment__startswith=MARKER).delete()
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
('jobs', '0011_jobreviewcomment'),
]
if global_apps.is_installed(comments_app_name):
dependencies.append((comments_app_name, '0001_initial'))
operations = [
migrations.RunPython(migrate_old_content, delete_migrated_content),
]
|
|
1921c82e1a8f0a60ed665184c49647b38ea4978c
|
scripts/proc_sleep_timer.py
|
scripts/proc_sleep_timer.py
|
# Kills process by image name after a specified timeout
# Useful as a sleep timer for some apps that don't support it natively, like VLC
# Works only on Windows!
# Example usage: python proc_sleep_timer vlc.exe 3600
import sched, time, subprocess, sys
if (len(sys.argv) != 3):
print("Usage: python proc_sleep_timer.py IMAGE_NAME TIMEOUT")
sys.exit(1)
image_name = sys.argv[1]
timeout = int(sys.argv[2])
def taskkill_img(image_name):
p = subprocess.Popen(['taskkill', '/IM', image_name])
s = sched.scheduler(time.monotonic, time.sleep)
s.enter(timeout, 1, taskkill_img, argument=(image_name,))
a=s.run()
|
Add simple sleep timer script for Windows
|
Add simple sleep timer script for Windows
|
Python
|
mit
|
iluxonchik/python-general-repo
|
Add simple sleep timer script for Windows
|
# Kills process by image name after a specified timeout
# Useful as a sleep timer for some apps that don't support it natively, like VLC
# Works only on Windows!
# Example usage: python proc_sleep_timer vlc.exe 3600
import sched, time, subprocess, sys
if (len(sys.argv) != 3):
print("Usage: python proc_sleep_timer.py IMAGE_NAME TIMEOUT")
sys.exit(1)
image_name = sys.argv[1]
timeout = int(sys.argv[2])
def taskkill_img(image_name):
p = subprocess.Popen(['taskkill', '/IM', image_name])
s = sched.scheduler(time.monotonic, time.sleep)
s.enter(timeout, 1, taskkill_img, argument=(image_name,))
a=s.run()
|
<commit_before><commit_msg>Add simple sleep timer script for Windows<commit_after>
|
# Kills process by image name after a specified timeout
# Useful as a sleep timer for some apps that don't support it natively, like VLC
# Works only on Windows!
# Example usage: python proc_sleep_timer vlc.exe 3600
import sched, time, subprocess, sys
if (len(sys.argv) != 3):
print("Usage: python proc_sleep_timer.py IMAGE_NAME TIMEOUT")
sys.exit(1)
image_name = sys.argv[1]
timeout = int(sys.argv[2])
def taskkill_img(image_name):
p = subprocess.Popen(['taskkill', '/IM', image_name])
s = sched.scheduler(time.monotonic, time.sleep)
s.enter(timeout, 1, taskkill_img, argument=(image_name,))
a=s.run()
|
Add simple sleep timer script for Windows# Kills process by image name after a specified timeout
# Useful as a sleep timer for some apps that don't support it natively, like VLC
# Works only on Windows!
# Example usage: python proc_sleep_timer vlc.exe 3600
import sched, time, subprocess, sys
if (len(sys.argv) != 3):
print("Usage: python proc_sleep_timer.py IMAGE_NAME TIMEOUT")
sys.exit(1)
image_name = sys.argv[1]
timeout = int(sys.argv[2])
def taskkill_img(image_name):
p = subprocess.Popen(['taskkill', '/IM', image_name])
s = sched.scheduler(time.monotonic, time.sleep)
s.enter(timeout, 1, taskkill_img, argument=(image_name,))
a=s.run()
|
<commit_before><commit_msg>Add simple sleep timer script for Windows<commit_after># Kills process by image name after a specified timeout
# Useful as a sleep timer for some apps that don't support it natively, like VLC
# Works only on Windows!
# Example usage: python proc_sleep_timer vlc.exe 3600
import sched, time, subprocess, sys
if (len(sys.argv) != 3):
print("Usage: python proc_sleep_timer.py IMAGE_NAME TIMEOUT")
sys.exit(1)
image_name = sys.argv[1]
timeout = int(sys.argv[2])
def taskkill_img(image_name):
p = subprocess.Popen(['taskkill', '/IM', image_name])
s = sched.scheduler(time.monotonic, time.sleep)
s.enter(timeout, 1, taskkill_img, argument=(image_name,))
a=s.run()
|
|
4a9d402b5917b052cae60f4d9d15cbf906dadf7d
|
split_data.py
|
split_data.py
|
import glob
import os
import tqdm
import numpy as np
import shutil
from bird import loader as l
source_dir = "./datasets/birdClef2016Whole/"
classes = os.listdir(os.path.join(source_dir, "train"))
percentage_validation_sampels = 0.10
progress = tqdm.tqdm(range(len(classes)))
class_segmentss = [(c, glob.glob(os.path.join(source_dir, "train", c, "*.wav"))) for c
in classes]
unique_sampels = [(c, l.group_segments(class_segments)) for (c, class_segments) in
class_segmentss]
print("Found ", sum([len(segs) for (c, segs) in unique_sampels]), " unique sampels")
for ((c, segments), p) in zip(unique_sampels, progress):
nb_samples = len(segments)
nb_validation_samples = int(np.ceil(nb_samples * percentage_validation_sampels))
valid_class_path = os.path.join(source_dir, "valid", c)
if not os.path.exists(valid_class_path):
print("os.makedirs("+valid_class_path+")")
#os.makedirs(valid_class_path)
i_valid_samples = np.random.choice(range(len(segments)),
nb_validation_samples, replace=False)
valid_samples = [segments[i] for i in i_valid_samples]
for sample in valid_samples:
print(c, "validation")
for segment in sample:
print("shutil.move("+segment+","+valid_class_path+")")
#shutil.move(segment, valid_class_path)
|
Add script to split dataset into validation and training data
|
Add script to split dataset into validation and training data
|
Python
|
mit
|
johnmartinsson/bird-species-classification,johnmartinsson/bird-species-classification
|
Add script to split dataset into validation and training data
|
import glob
import os
import tqdm
import numpy as np
import shutil
from bird import loader as l
source_dir = "./datasets/birdClef2016Whole/"
classes = os.listdir(os.path.join(source_dir, "train"))
percentage_validation_sampels = 0.10
progress = tqdm.tqdm(range(len(classes)))
class_segmentss = [(c, glob.glob(os.path.join(source_dir, "train", c, "*.wav"))) for c
in classes]
unique_sampels = [(c, l.group_segments(class_segments)) for (c, class_segments) in
class_segmentss]
print("Found ", sum([len(segs) for (c, segs) in unique_sampels]), " unique sampels")
for ((c, segments), p) in zip(unique_sampels, progress):
nb_samples = len(segments)
nb_validation_samples = int(np.ceil(nb_samples * percentage_validation_sampels))
valid_class_path = os.path.join(source_dir, "valid", c)
if not os.path.exists(valid_class_path):
print("os.makedirs("+valid_class_path+")")
#os.makedirs(valid_class_path)
i_valid_samples = np.random.choice(range(len(segments)),
nb_validation_samples, replace=False)
valid_samples = [segments[i] for i in i_valid_samples]
for sample in valid_samples:
print(c, "validation")
for segment in sample:
print("shutil.move("+segment+","+valid_class_path+")")
#shutil.move(segment, valid_class_path)
|
<commit_before><commit_msg>Add script to split dataset into validation and training data<commit_after>
|
import glob
import os
import tqdm
import numpy as np
import shutil
from bird import loader as l
source_dir = "./datasets/birdClef2016Whole/"
classes = os.listdir(os.path.join(source_dir, "train"))
percentage_validation_sampels = 0.10
progress = tqdm.tqdm(range(len(classes)))
class_segmentss = [(c, glob.glob(os.path.join(source_dir, "train", c, "*.wav"))) for c
in classes]
unique_sampels = [(c, l.group_segments(class_segments)) for (c, class_segments) in
class_segmentss]
print("Found ", sum([len(segs) for (c, segs) in unique_sampels]), " unique sampels")
for ((c, segments), p) in zip(unique_sampels, progress):
nb_samples = len(segments)
nb_validation_samples = int(np.ceil(nb_samples * percentage_validation_sampels))
valid_class_path = os.path.join(source_dir, "valid", c)
if not os.path.exists(valid_class_path):
print("os.makedirs("+valid_class_path+")")
#os.makedirs(valid_class_path)
i_valid_samples = np.random.choice(range(len(segments)),
nb_validation_samples, replace=False)
valid_samples = [segments[i] for i in i_valid_samples]
for sample in valid_samples:
print(c, "validation")
for segment in sample:
print("shutil.move("+segment+","+valid_class_path+")")
#shutil.move(segment, valid_class_path)
|
Add script to split dataset into validation and training dataimport glob
import os
import tqdm
import numpy as np
import shutil
from bird import loader as l
source_dir = "./datasets/birdClef2016Whole/"
classes = os.listdir(os.path.join(source_dir, "train"))
percentage_validation_sampels = 0.10
progress = tqdm.tqdm(range(len(classes)))
class_segmentss = [(c, glob.glob(os.path.join(source_dir, "train", c, "*.wav"))) for c
in classes]
unique_sampels = [(c, l.group_segments(class_segments)) for (c, class_segments) in
class_segmentss]
print("Found ", sum([len(segs) for (c, segs) in unique_sampels]), " unique sampels")
for ((c, segments), p) in zip(unique_sampels, progress):
nb_samples = len(segments)
nb_validation_samples = int(np.ceil(nb_samples * percentage_validation_sampels))
valid_class_path = os.path.join(source_dir, "valid", c)
if not os.path.exists(valid_class_path):
print("os.makedirs("+valid_class_path+")")
#os.makedirs(valid_class_path)
i_valid_samples = np.random.choice(range(len(segments)),
nb_validation_samples, replace=False)
valid_samples = [segments[i] for i in i_valid_samples]
for sample in valid_samples:
print(c, "validation")
for segment in sample:
print("shutil.move("+segment+","+valid_class_path+")")
#shutil.move(segment, valid_class_path)
|
<commit_before><commit_msg>Add script to split dataset into validation and training data<commit_after>import glob
import os
import tqdm
import numpy as np
import shutil
from bird import loader as l
source_dir = "./datasets/birdClef2016Whole/"
classes = os.listdir(os.path.join(source_dir, "train"))
percentage_validation_sampels = 0.10
progress = tqdm.tqdm(range(len(classes)))
class_segmentss = [(c, glob.glob(os.path.join(source_dir, "train", c, "*.wav"))) for c
in classes]
unique_sampels = [(c, l.group_segments(class_segments)) for (c, class_segments) in
class_segmentss]
print("Found ", sum([len(segs) for (c, segs) in unique_sampels]), " unique sampels")
for ((c, segments), p) in zip(unique_sampels, progress):
nb_samples = len(segments)
nb_validation_samples = int(np.ceil(nb_samples * percentage_validation_sampels))
valid_class_path = os.path.join(source_dir, "valid", c)
if not os.path.exists(valid_class_path):
print("os.makedirs("+valid_class_path+")")
#os.makedirs(valid_class_path)
i_valid_samples = np.random.choice(range(len(segments)),
nb_validation_samples, replace=False)
valid_samples = [segments[i] for i in i_valid_samples]
for sample in valid_samples:
print(c, "validation")
for segment in sample:
print("shutil.move("+segment+","+valid_class_path+")")
#shutil.move(segment, valid_class_path)
|
|
50117a2576ab8d002d95e36b5dccfb4e16122622
|
move_forvo.py
|
move_forvo.py
|
"""Move and rename pronunciations downloaded from forvo for easier Anki creation."""
import os
import shutil
import time
FROM_DIR = ''
TO_DIR = ''
while True:
for f in os.listdir(FROM_DIR):
if f[:17] == 'pronunciation_de_':
print('moving ' + f)
shutil.move(os.path.join(FROM_DIR, f), os.path.join(TO_DIR, f[17:]))
time.sleep(1)
|
Move forvo downloads for easier Anki creation.
|
Move forvo downloads for easier Anki creation.
|
Python
|
mit
|
AndrzejR/Scripts
|
Move forvo downloads for easier Anki creation.
|
"""Move and rename pronunciations downloaded from forvo for easier Anki creation."""
import os
import shutil
import time
FROM_DIR = ''
TO_DIR = ''
while True:
for f in os.listdir(FROM_DIR):
if f[:17] == 'pronunciation_de_':
print('moving ' + f)
shutil.move(os.path.join(FROM_DIR, f), os.path.join(TO_DIR, f[17:]))
time.sleep(1)
|
<commit_before><commit_msg>Move forvo downloads for easier Anki creation.<commit_after>
|
"""Move and rename pronunciations downloaded from forvo for easier Anki creation."""
import os
import shutil
import time
FROM_DIR = ''
TO_DIR = ''
while True:
for f in os.listdir(FROM_DIR):
if f[:17] == 'pronunciation_de_':
print('moving ' + f)
shutil.move(os.path.join(FROM_DIR, f), os.path.join(TO_DIR, f[17:]))
time.sleep(1)
|
Move forvo downloads for easier Anki creation."""Move and rename pronunciations downloaded from forvo for easier Anki creation."""
import os
import shutil
import time
FROM_DIR = ''
TO_DIR = ''
while True:
for f in os.listdir(FROM_DIR):
if f[:17] == 'pronunciation_de_':
print('moving ' + f)
shutil.move(os.path.join(FROM_DIR, f), os.path.join(TO_DIR, f[17:]))
time.sleep(1)
|
<commit_before><commit_msg>Move forvo downloads for easier Anki creation.<commit_after>"""Move and rename pronunciations downloaded from forvo for easier Anki creation."""
import os
import shutil
import time
FROM_DIR = ''
TO_DIR = ''
while True:
for f in os.listdir(FROM_DIR):
if f[:17] == 'pronunciation_de_':
print('moving ' + f)
shutil.move(os.path.join(FROM_DIR, f), os.path.join(TO_DIR, f[17:]))
time.sleep(1)
|
|
eca911a1b1623368f991dbf47002c0b59abc15db
|
script/lib/config.py
|
script/lib/config.py
|
#!/usr/bin/env python
NODE_VERSION = 'v0.11.10'
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = '1df8e7cdac8aa74c91c19ae0691ce512d560ab3e'
|
#!/usr/bin/env python
NODE_VERSION = 'v0.11.10'
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'aa4874a6bcc51fdd87ca7ae0928514ce83645988'
|
Update libchromiumcontent: Suppress CFAllocator warning.
|
Update libchromiumcontent: Suppress CFAllocator warning.
|
Python
|
mit
|
jjz/electron,Ivshti/electron,howmuchcomputer/electron,simonfork/electron,takashi/electron,SufianHassan/electron,micalan/electron,tylergibson/electron,Gerhut/electron,mattdesl/electron,edulan/electron,edulan/electron,d-salas/electron,gerhardberger/electron,micalan/electron,brenca/electron,stevekinney/electron,pandoraui/electron,rreimann/electron,Jacobichou/electron,SufianHassan/electron,bright-sparks/electron,sshiting/electron,dongjoon-hyun/electron,bpasero/electron,neutrous/electron,mattotodd/electron,shennushi/electron,SufianHassan/electron,dahal/electron,DivyaKMenon/electron,dongjoon-hyun/electron,roadev/electron,aichingm/electron,kokdemo/electron,coderhaoxin/electron,JussMee15/electron,Jonekee/electron,faizalpribadi/electron,pirafrank/electron,mjaniszew/electron,shiftkey/electron,mjaniszew/electron,felixrieseberg/electron,deepak1556/atom-shell,aliib/electron,RIAEvangelist/electron,wan-qy/electron,thingsinjars/electron,takashi/electron,vHanda/electron,cqqccqc/electron,kokdemo/electron,kikong/electron,natgolov/electron,darwin/electron,LadyNaggaga/electron,iftekeriba/electron,d-salas/electron,Zagorakiss/electron,jacksondc/electron,bpasero/electron,eric-seekas/electron,Ivshti/electron,ianscrivener/electron,eric-seekas/electron,farmisen/electron,tonyganch/electron,yalexx/electron,michaelchiche/electron,GoooIce/electron,kcrt/electron,DivyaKMenon/electron,aliib/electron,jtburke/electron,cos2004/electron,Neron-X5/electron,shaundunne/electron,systembugtj/electron,brave/muon,kazupon/electron,jacksondc/electron,coderhaoxin/electron,micalan/electron,kenmozi/electron,arusakov/electron,abhishekgahlot/electron,simonfork/electron,jjz/electron,deed02392/electron,natgolov/electron,gbn972/electron,arturts/electron,preco21/electron,JesselJohn/electron,natgolov/electron,mrwizard82d1/electron,kenmozi/electron,subblue/electron,beni55/electron,yalexx/electron,adcentury/electron,biblerule/UMCTelnetHub,mjaniszew/electron,astoilkov/electron,ervinb/electron,aliib/electron,christian-bromann/electron,astoilkov/electron,fffej/electron,seanchas116/electron,Floato/electron,thompsonemerson/electron,etiktin/electron,shennushi/electron,chrisswk/electron,adamjgray/electron,jjz/electron,iftekeriba/electron,thingsinjars/electron,simongregory/electron,christian-bromann/electron,seanchas116/electron,MaxWhere/electron,gabrielPeart/electron,MaxGraey/electron,trigrass2/electron,soulteary/electron,fabien-d/electron,bobwol/electron,vHanda/electron,neutrous/electron,beni55/electron,voidbridge/electron,miniak/electron,John-Lin/electron,saronwei/electron,brave/electron,jhen0409/electron,tomashanacek/electron,BionicClick/electron,matiasinsaurralde/electron,LadyNaggaga/electron,systembugtj/electron,webmechanicx/electron,micalan/electron,subblue/electron,rhencke/electron,abhishekgahlot/electron,leethomas/electron,abhishekgahlot/electron,vHanda/electron,howmuchcomputer/electron,destan/electron,mirrh/electron,trigrass2/electron,nicobot/electron,pombredanne/electron,jonatasfreitasv/electron,JussMee15/electron,iftekeriba/electron,ervinb/electron,neutrous/electron,takashi/electron,gabrielPeart/electron,preco21/electron,ankitaggarwal011/electron,xiruibing/electron,simongregory/electron,gabriel/electron,the-ress/electron,mjaniszew/electron,gamedevsam/electron,felixrieseberg/electron,bright-sparks/electron,nagyistoce/electron-atom-shell,seanchas116/electron,JussMee15/electron,minggo/electron,synaptek/electron,gerhardberger/electron,zhakui/electron,anko/electron,RobertJGabriel/electron,medixdev/electron,bpasero/electron,jjz/electron,lzpfmh/electron,pandoraui/electron,nicholasess/electron,evgenyzinoviev/electron,pirafrank/electron,mirrh/electron,vaginessa/electron,kcrt/electron,pandoraui/electron,timruffles/electron,aaron-goshine/electron,beni55/electron,GoooIce/electron,RobertJGabriel/electron,bitemyapp/electron,kazupon/electron,robinvandernoord/electron,eric-seekas/electron,eriser/electron,MaxWhere/electron,yalexx/electron,lrlna/electron,jannishuebl/electron,Evercoder/electron,natgolov/electron,cqqccqc/electron,digideskio/electron,kenmozi/electron,renaesop/electron,lrlna/electron,kostia/electron,tomashanacek/electron,jlord/electron,stevekinney/electron,joaomoreno/atom-shell,the-ress/electron,chriskdon/electron,farmisen/electron,jjz/electron,shaundunne/electron,vipulroxx/electron,jsutcodes/electron,sshiting/electron,christian-bromann/electron,simongregory/electron,miniak/electron,beni55/electron,christian-bromann/electron,bbondy/electron,tonyganch/electron,voidbridge/electron,setzer777/electron,icattlecoder/electron,kokdemo/electron,chriskdon/electron,jtburke/electron,carsonmcdonald/electron,joneit/electron,destan/electron,tonyganch/electron,twolfson/electron,felixrieseberg/electron,Faiz7412/electron,yalexx/electron,wan-qy/electron,aichingm/electron,jannishuebl/electron,leethomas/electron,sircharleswatson/electron,Jonekee/electron,nagyistoce/electron-atom-shell,miniak/electron,Faiz7412/electron,Andrey-Pavlov/electron,dahal/electron,arturts/electron,setzer777/electron,thompsonemerson/electron,d-salas/electron,evgenyzinoviev/electron,destan/electron,electron/electron,arusakov/electron,fabien-d/electron,matiasinsaurralde/electron,fabien-d/electron,Floato/electron,mrwizard82d1/electron,xiruibing/electron,ervinb/electron,mattotodd/electron,ianscrivener/electron,kikong/electron,nekuz0r/electron,gbn972/electron,medixdev/electron,Jacobichou/electron,tonyganch/electron,vHanda/electron,bright-sparks/electron,gerhardberger/electron,bbondy/electron,timruffles/electron,Gerhut/electron,tylergibson/electron,subblue/electron,nicobot/electron,gamedevsam/electron,posix4e/electron,sircharleswatson/electron,yan-foto/electron,nekuz0r/electron,setzer777/electron,bpasero/electron,brave/electron,pandoraui/electron,posix4e/electron,MaxWhere/electron,rreimann/electron,timruffles/electron,subblue/electron,jlhbaseball15/electron,sshiting/electron,twolfson/electron,gamedevsam/electron,ianscrivener/electron,Jonekee/electron,xfstudio/electron,tomashanacek/electron,evgenyzinoviev/electron,pirafrank/electron,nagyistoce/electron-atom-shell,jacksondc/electron,carsonmcdonald/electron,aichingm/electron,vaginessa/electron,bwiggs/electron,adcentury/electron,MaxWhere/electron,aliib/electron,dongjoon-hyun/electron,Faiz7412/electron,wan-qy/electron,mirrh/electron,subblue/electron,shockone/electron,bobwol/electron,electron/electron,roadev/electron,cqqccqc/electron,shiftkey/electron,edulan/electron,jlord/electron,deed02392/electron,John-Lin/electron,GoooIce/electron,noikiy/electron,digideskio/electron,timruffles/electron,MaxWhere/electron,rprichard/electron,trigrass2/electron,chrisswk/electron,ankitaggarwal011/electron,fabien-d/electron,jonatasfreitasv/electron,rhencke/electron,rreimann/electron,RIAEvangelist/electron,jiaz/electron,carsonmcdonald/electron,kokdemo/electron,webmechanicx/electron,lrlna/electron,yan-foto/electron,edulan/electron,wolfflow/electron,jhen0409/electron,the-ress/electron,edulan/electron,miniak/electron,smczk/electron,maxogden/atom-shell,fomojola/electron,brave/muon,RIAEvangelist/electron,jcblw/electron,jcblw/electron,nekuz0r/electron,Evercoder/electron,icattlecoder/electron,sky7sea/electron,gamedevsam/electron,fomojola/electron,fritx/electron,electron/electron,shockone/electron,bruce/electron,zhakui/electron,gerhardberger/electron,shennushi/electron,systembugtj/electron,adcentury/electron,arturts/electron,LadyNaggaga/electron,icattlecoder/electron,gamedevsam/electron,astoilkov/electron,gbn972/electron,bruce/electron,mattdesl/electron,mrwizard82d1/electron,brave/electron,mattotodd/electron,deepak1556/atom-shell,kcrt/electron,jlord/electron,rajatsingla28/electron,IonicaBizauKitchen/electron,bpasero/electron,tylergibson/electron,wolfflow/electron,pombredanne/electron,baiwyc119/electron,howmuchcomputer/electron,leolujuyi/electron,brenca/electron,Ivshti/electron,jonatasfreitasv/electron,jannishuebl/electron,jlhbaseball15/electron,synaptek/electron,dkfiresky/electron,greyhwndz/electron,bwiggs/electron,Andrey-Pavlov/electron,miniak/electron,iftekeriba/electron,medixdev/electron,bright-sparks/electron,nicobot/electron,noikiy/electron,Evercoder/electron,mubassirhayat/electron,bwiggs/electron,renaesop/electron,gabriel/electron,trankmichael/electron,soulteary/electron,felixrieseberg/electron,oiledCode/electron,mattotodd/electron,the-ress/electron,thingsinjars/electron,felixrieseberg/electron,tylergibson/electron,nicholasess/electron,minggo/electron,icattlecoder/electron,lrlna/electron,fireball-x/atom-shell,synaptek/electron,jsutcodes/electron,jjz/electron,shockone/electron,smczk/electron,fomojola/electron,yalexx/electron,tinydew4/electron,egoist/electron,mirrh/electron,meowlab/electron,wolfflow/electron,dahal/electron,sircharleswatson/electron,tincan24/electron,Gerhut/electron,voidbridge/electron,jlhbaseball15/electron,cos2004/electron,stevekinney/electron,neutrous/electron,preco21/electron,stevemao/electron,meowlab/electron,gabrielPeart/electron,kikong/electron,faizalpribadi/electron,arusakov/electron,Zagorakiss/electron,mattdesl/electron,tylergibson/electron,SufianHassan/electron,yan-foto/electron,dkfiresky/electron,mirrh/electron,LadyNaggaga/electron,cos2004/electron,kenmozi/electron,stevekinney/electron,RIAEvangelist/electron,brenca/electron,fritx/electron,astoilkov/electron,ianscrivener/electron,carsonmcdonald/electron,biblerule/UMCTelnetHub,Faiz7412/electron,farmisen/electron,arturts/electron,leolujuyi/electron,Neron-X5/electron,digideskio/electron,jtburke/electron,deed02392/electron,Rokt33r/electron,JesselJohn/electron,thompsonemerson/electron,neutrous/electron,yalexx/electron,coderhaoxin/electron,xiruibing/electron,bbondy/electron,the-ress/electron,jacksondc/electron,IonicaBizauKitchen/electron,renaesop/electron,BionicClick/electron,ianscrivener/electron,bpasero/electron,baiwyc119/electron,jannishuebl/electron,joneit/electron,thomsonreuters/electron,trankmichael/electron,eriser/electron,gerhardberger/electron,webmechanicx/electron,leftstick/electron,jaanus/electron,maxogden/atom-shell,Rokt33r/electron,MaxGraey/electron,rajatsingla28/electron,joaomoreno/atom-shell,eriser/electron,leolujuyi/electron,JesselJohn/electron,gamedevsam/electron,digideskio/electron,Jacobichou/electron,etiktin/electron,matiasinsaurralde/electron,robinvandernoord/electron,JussMee15/electron,greyhwndz/electron,rhencke/electron,minggo/electron,anko/electron,jsutcodes/electron,fffej/electron,jtburke/electron,deepak1556/atom-shell,jannishuebl/electron,bwiggs/electron,evgenyzinoviev/electron,tomashanacek/electron,noikiy/electron,vaginessa/electron,faizalpribadi/electron,DivyaKMenon/electron,davazp/electron,dongjoon-hyun/electron,davazp/electron,mattotodd/electron,jtburke/electron,deed02392/electron,fireball-x/atom-shell,RobertJGabriel/electron,Andrey-Pavlov/electron,stevekinney/electron,anko/electron,egoist/electron,takashi/electron,astoilkov/electron,sircharleswatson/electron,dahal/electron,mhkeller/electron,nekuz0r/electron,minggo/electron,coderhaoxin/electron,adamjgray/electron,gabrielPeart/electron,JussMee15/electron,aaron-goshine/electron,tincan24/electron,the-ress/electron,aaron-goshine/electron,jaanus/electron,LadyNaggaga/electron,jsutcodes/electron,stevekinney/electron,abhishekgahlot/electron,mattotodd/electron,Gerhut/electron,adamjgray/electron,tincan24/electron,preco21/electron,saronwei/electron,baiwyc119/electron,ankitaggarwal011/electron,wan-qy/electron,JesselJohn/electron,jsutcodes/electron,posix4e/electron,John-Lin/electron,wan-qy/electron,Andrey-Pavlov/electron,posix4e/electron,gstack/infinium-shell,pirafrank/electron,John-Lin/electron,aaron-goshine/electron,RobertJGabriel/electron,ankitaggarwal011/electron,synaptek/electron,electron/electron,arusakov/electron,leethomas/electron,shiftkey/electron,roadev/electron,miniak/electron,chriskdon/electron,shockone/electron,xfstudio/electron,seanchas116/electron,medixdev/electron,cos2004/electron,Jonekee/electron,rajatsingla28/electron,electron/electron,rsvip/electron,RIAEvangelist/electron,rreimann/electron,Evercoder/electron,gabriel/electron,carsonmcdonald/electron,fireball-x/atom-shell,posix4e/electron,Jacobichou/electron,pombredanne/electron,bruce/electron,DivyaKMenon/electron,brave/electron,smczk/electron,minggo/electron,shaundunne/electron,stevemao/electron,jtburke/electron,subblue/electron,jhen0409/electron,jonatasfreitasv/electron,jiaz/electron,fritx/electron,systembugtj/electron,oiledCode/electron,greyhwndz/electron,baiwyc119/electron,cos2004/electron,rsvip/electron,Floato/electron,mjaniszew/electron,jiaz/electron,Neron-X5/electron,hokein/atom-shell,IonicaBizauKitchen/electron,digideskio/electron,tinydew4/electron,adcentury/electron,fireball-x/atom-shell,sky7sea/electron,nicobot/electron,vipulroxx/electron,biblerule/UMCTelnetHub,nicholasess/electron,LadyNaggaga/electron,ervinb/electron,ankitaggarwal011/electron,biblerule/UMCTelnetHub,aliib/electron,darwin/electron,etiktin/electron,chriskdon/electron,webmechanicx/electron,michaelchiche/electron,MaxGraey/electron,lzpfmh/electron,bitemyapp/electron,iftekeriba/electron,MaxGraey/electron,edulan/electron,brenca/electron,baiwyc119/electron,gabriel/electron,Ivshti/electron,bruce/electron,tinydew4/electron,tinydew4/electron,IonicaBizauKitchen/electron,rhencke/electron,eriser/electron,kazupon/electron,kokdemo/electron,fritx/electron,joaomoreno/atom-shell,benweissmann/electron,bruce/electron,thompsonemerson/electron,fireball-x/atom-shell,fomojola/electron,lrlna/electron,voidbridge/electron,kostia/electron,leftstick/electron,mhkeller/electron,pandoraui/electron,ervinb/electron,meowlab/electron,fffej/electron,aecca/electron,webmechanicx/electron,Gerhut/electron,aaron-goshine/electron,nicobot/electron,sky7sea/electron,trankmichael/electron,tylergibson/electron,JussMee15/electron,mubassirhayat/electron,Floato/electron,rajatsingla28/electron,brave/muon,shennushi/electron,ianscrivener/electron,xfstudio/electron,shockone/electron,Gerhut/electron,coderhaoxin/electron,eric-seekas/electron,medixdev/electron,fritx/electron,meowlab/electron,lzpfmh/electron,davazp/electron,cqqccqc/electron,lzpfmh/electron,GoooIce/electron,sircharleswatson/electron,sshiting/electron,faizalpribadi/electron,egoist/electron,bright-sparks/electron,rreimann/electron,christian-bromann/electron,synaptek/electron,voidbridge/electron,stevemao/electron,stevemao/electron,yan-foto/electron,gabrielPeart/electron,timruffles/electron,michaelchiche/electron,vaginessa/electron,Zagorakiss/electron,trigrass2/electron,tincan24/electron,shennushi/electron,thomsonreuters/electron,icattlecoder/electron,tinydew4/electron,Rokt33r/electron,destan/electron,roadev/electron,rsvip/electron,xfstudio/electron,gbn972/electron,egoist/electron,jlhbaseball15/electron,systembugtj/electron,anko/electron,d-salas/electron,leolujuyi/electron,Evercoder/electron,kostia/electron,bobwol/electron,aecca/electron,deed02392/electron,GoooIce/electron,jlord/electron,systembugtj/electron,d-salas/electron,jiaz/electron,mjaniszew/electron,lzpfmh/electron,zhakui/electron,Jonekee/electron,rhencke/electron,howmuchcomputer/electron,vHanda/electron,jlhbaseball15/electron,electron/electron,seanchas116/electron,kikong/electron,kazupon/electron,MaxGraey/electron,shiftkey/electron,michaelchiche/electron,simonfork/electron,micalan/electron,leftstick/electron,voidbridge/electron,jiaz/electron,bobwol/electron,kostia/electron,bbondy/electron,GoooIce/electron,tinydew4/electron,cqqccqc/electron,chrisswk/electron,hokein/atom-shell,robinvandernoord/electron,robinvandernoord/electron,maxogden/atom-shell,John-Lin/electron,jaanus/electron,tincan24/electron,shiftkey/electron,natgolov/electron,astoilkov/electron,pandoraui/electron,mirrh/electron,michaelchiche/electron,rreimann/electron,oiledCode/electron,deed02392/electron,mhkeller/electron,thingsinjars/electron,mattdesl/electron,RobertJGabriel/electron,noikiy/electron,brave/electron,BionicClick/electron,beni55/electron,thomsonreuters/electron,renaesop/electron,matiasinsaurralde/electron,gabrielPeart/electron,simongregory/electron,greyhwndz/electron,dahal/electron,nicobot/electron,tomashanacek/electron,jacksondc/electron,Evercoder/electron,bitemyapp/electron,saronwei/electron,jhen0409/electron,brave/electron,vHanda/electron,benweissmann/electron,brenca/electron,wolfflow/electron,tonyganch/electron,Floato/electron,Ivshti/electron,smczk/electron,deepak1556/atom-shell,shiftkey/electron,hokein/atom-shell,mhkeller/electron,ervinb/electron,zhakui/electron,christian-bromann/electron,aecca/electron,jonatasfreitasv/electron,beni55/electron,yan-foto/electron,joneit/electron,adamjgray/electron,twolfson/electron,zhakui/electron,bruce/electron,digideskio/electron,trankmichael/electron,oiledCode/electron,saronwei/electron,Zagorakiss/electron,rajatsingla28/electron,rprichard/electron,seanchas116/electron,Rokt33r/electron,jacksondc/electron,arusakov/electron,nicholasess/electron,mubassirhayat/electron,nekuz0r/electron,shaundunne/electron,icattlecoder/electron,gstack/infinium-shell,fritx/electron,howmuchcomputer/electron,anko/electron,aliib/electron,leftstick/electron,bitemyapp/electron,wolfflow/electron,rprichard/electron,mrwizard82d1/electron,trigrass2/electron,twolfson/electron,dkfiresky/electron,nagyistoce/electron-atom-shell,shennushi/electron,nicholasess/electron,kikong/electron,kostia/electron,maxogden/atom-shell,natgolov/electron,brave/muon,gerhardberger/electron,arusakov/electron,rsvip/electron,benweissmann/electron,Jacobichou/electron,oiledCode/electron,bwiggs/electron,sshiting/electron,fabien-d/electron,soulteary/electron,the-ress/electron,egoist/electron,kcrt/electron,jhen0409/electron,rprichard/electron,d-salas/electron,fffej/electron,adamjgray/electron,meowlab/electron,darwin/electron,bobwol/electron,renaesop/electron,kostia/electron,adcentury/electron,jcblw/electron,etiktin/electron,saronwei/electron,Zagorakiss/electron,joaomoreno/atom-shell,davazp/electron,thomsonreuters/electron,lrlna/electron,posix4e/electron,joneit/electron,simongregory/electron,thompsonemerson/electron,faizalpribadi/electron,dkfiresky/electron,gstack/infinium-shell,jannishuebl/electron,bobwol/electron,pombredanne/electron,jcblw/electron,howmuchcomputer/electron,Jacobichou/electron,gbn972/electron,Neron-X5/electron,oiledCode/electron,wolfflow/electron,brave/muon,destan/electron,Floato/electron,benweissmann/electron,tincan24/electron,soulteary/electron,kcrt/electron,bitemyapp/electron,simonfork/electron,evgenyzinoviev/electron,trigrass2/electron,biblerule/UMCTelnetHub,coderhaoxin/electron,shockone/electron,iftekeriba/electron,thompsonemerson/electron,jaanus/electron,stevemao/electron,Jonekee/electron,farmisen/electron,medixdev/electron,mrwizard82d1/electron,BionicClick/electron,jaanus/electron,adamjgray/electron,baiwyc119/electron,Andrey-Pavlov/electron,matiasinsaurralde/electron,maxogden/atom-shell,setzer777/electron,benweissmann/electron,nicholasess/electron,electron/electron,MaxWhere/electron,twolfson/electron,biblerule/UMCTelnetHub,gabriel/electron,vaginessa/electron,DivyaKMenon/electron,robinvandernoord/electron,jhen0409/electron,dkfiresky/electron,setzer777/electron,leftstick/electron,vaginessa/electron,rajatsingla28/electron,deepak1556/atom-shell,vipulroxx/electron,bitemyapp/electron,aaron-goshine/electron,davazp/electron,jcblw/electron,mattdesl/electron,wan-qy/electron,trankmichael/electron,trankmichael/electron,farmisen/electron,rsvip/electron,vipulroxx/electron,aichingm/electron,eric-seekas/electron,jsutcodes/electron,micalan/electron,aecca/electron,thomsonreuters/electron,davazp/electron,ankitaggarwal011/electron,fffej/electron,mrwizard82d1/electron,webmechanicx/electron,xiruibing/electron,adcentury/electron,synaptek/electron,matiasinsaurralde/electron,lzpfmh/electron,DivyaKMenon/electron,soulteary/electron,kazupon/electron,RobertJGabriel/electron,sky7sea/electron,chrisswk/electron,IonicaBizauKitchen/electron,Neron-X5/electron,nekuz0r/electron,jonatasfreitasv/electron,michaelchiche/electron,gstack/infinium-shell,etiktin/electron,xfstudio/electron,mubassirhayat/electron,Andrey-Pavlov/electron,chrisswk/electron,nagyistoce/electron-atom-shell,fomojola/electron,smczk/electron,stevemao/electron,kenmozi/electron,carsonmcdonald/electron,eric-seekas/electron,neutrous/electron,thomsonreuters/electron,evgenyzinoviev/electron,simonfork/electron,leethomas/electron,vipulroxx/electron,takashi/electron,leethomas/electron,SufianHassan/electron,dahal/electron,JesselJohn/electron,joneit/electron,sky7sea/electron,jaanus/electron,darwin/electron,simonfork/electron,bpasero/electron,xfstudio/electron,greyhwndz/electron,twolfson/electron,leftstick/electron,aecca/electron,arturts/electron,jiaz/electron,chriskdon/electron,mhkeller/electron,roadev/electron,egoist/electron,sircharleswatson/electron,kazupon/electron,noikiy/electron,bbondy/electron,brenca/electron,joneit/electron,fomojola/electron,thingsinjars/electron,dongjoon-hyun/electron,minggo/electron,tomashanacek/electron,Neron-X5/electron,aecca/electron,dongjoon-hyun/electron,pirafrank/electron,zhakui/electron,greyhwndz/electron,yan-foto/electron,Rokt33r/electron,xiruibing/electron,xiruibing/electron,roadev/electron,soulteary/electron,mhkeller/electron,John-Lin/electron,leolujuyi/electron,gstack/infinium-shell,smczk/electron,destan/electron,setzer777/electron,Zagorakiss/electron,bbondy/electron,gerhardberger/electron,preco21/electron,chriskdon/electron,jcblw/electron,bright-sparks/electron,eriser/electron,Rokt33r/electron,gabriel/electron,leolujuyi/electron,fffej/electron,renaesop/electron,tonyganch/electron,meowlab/electron,aichingm/electron,sky7sea/electron,robinvandernoord/electron,sshiting/electron,vipulroxx/electron,leethomas/electron,benweissmann/electron,darwin/electron,thingsinjars/electron,preco21/electron,abhishekgahlot/electron,joaomoreno/atom-shell,mubassirhayat/electron,kokdemo/electron,farmisen/electron,bwiggs/electron,cos2004/electron,joaomoreno/atom-shell,shaundunne/electron,kcrt/electron,hokein/atom-shell,SufianHassan/electron,felixrieseberg/electron,Faiz7412/electron,eriser/electron,aichingm/electron,mattdesl/electron,RIAEvangelist/electron,gbn972/electron,dkfiresky/electron,takashi/electron,noikiy/electron,IonicaBizauKitchen/electron,jlord/electron,shaundunne/electron,JesselJohn/electron,pirafrank/electron,BionicClick/electron,anko/electron,pombredanne/electron,abhishekgahlot/electron,faizalpribadi/electron,simongregory/electron,rhencke/electron,arturts/electron,hokein/atom-shell,saronwei/electron,cqqccqc/electron,brave/muon,kenmozi/electron,etiktin/electron,pombredanne/electron,jlhbaseball15/electron,BionicClick/electron
|
#!/usr/bin/env python
NODE_VERSION = 'v0.11.10'
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = '1df8e7cdac8aa74c91c19ae0691ce512d560ab3e'
Update libchromiumcontent: Suppress CFAllocator warning.
|
#!/usr/bin/env python
NODE_VERSION = 'v0.11.10'
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'aa4874a6bcc51fdd87ca7ae0928514ce83645988'
|
<commit_before>#!/usr/bin/env python
NODE_VERSION = 'v0.11.10'
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = '1df8e7cdac8aa74c91c19ae0691ce512d560ab3e'
<commit_msg>Update libchromiumcontent: Suppress CFAllocator warning.<commit_after>
|
#!/usr/bin/env python
NODE_VERSION = 'v0.11.10'
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'aa4874a6bcc51fdd87ca7ae0928514ce83645988'
|
#!/usr/bin/env python
NODE_VERSION = 'v0.11.10'
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = '1df8e7cdac8aa74c91c19ae0691ce512d560ab3e'
Update libchromiumcontent: Suppress CFAllocator warning.#!/usr/bin/env python
NODE_VERSION = 'v0.11.10'
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'aa4874a6bcc51fdd87ca7ae0928514ce83645988'
|
<commit_before>#!/usr/bin/env python
NODE_VERSION = 'v0.11.10'
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = '1df8e7cdac8aa74c91c19ae0691ce512d560ab3e'
<commit_msg>Update libchromiumcontent: Suppress CFAllocator warning.<commit_after>#!/usr/bin/env python
NODE_VERSION = 'v0.11.10'
BASE_URL = 'https://gh-contractor-zcbenz.s3.amazonaws.com/libchromiumcontent'
LIBCHROMIUMCONTENT_COMMIT = 'aa4874a6bcc51fdd87ca7ae0928514ce83645988'
|
c4f1e95860febcb738d9b9c0cca5ad3c6365024b
|
control/calibrate_esc.py
|
control/calibrate_esc.py
|
"""Programs the ESC."""
from control import driver as driver_module
from control.driver import Driver
from control.test.dummy_logger import DummyLogger
from control.test.dummy_telemetry import DummyTelemetry
def main():
"""Main function."""
# First, shut the damn car up
throttle_percentage = 0.0
# And reset the steering
steering_percentage = 0.0
logger = DummyLogger()
telemetry = DummyTelemetry(logger, (40.0182663, -105.2761267))
driver = Driver(telemetry, logger)
# driver limits the reverse throttle to 25% to prevent motor damage
driver._get_throttle = lambda percentage: \
int(
driver_module.THROTTLE_NEUTRAL_US
+ driver_module.THROTTLE_DIFF
* percentage
) // 10 * 10
driver.drive(0.0, 0.0)
input('''
Disconnect the motor cables. While holding down the setup button on the ESC,
switch on the power. The LED should start changing colors from red -> green ->
orange. Red is for calibrating the throttle high and low points for forward and
reverse. Press setup when the LED is red; the LED will start to single flash
red.
Press enter to continue.
''')
driver.drive(1.0, 0.25)
input('''
Press the set button. The LED should start to double flash red
Press enter to continue.
''')
driver.drive(-1.0, -0.25)
input('''
Press the set button. The LED should turn off. That's it!
Press enter to exit.
''')
driver.drive(0.0, 0.0)
if __name__ == '__main__':
main()
|
Add script to calibrate the ESC
|
Add script to calibrate the ESC
|
Python
|
mit
|
bskari/sparkfun-avc,bskari/sparkfun-avc,bskari/sparkfun-avc,bskari/sparkfun-avc,bskari/sparkfun-avc,bskari/sparkfun-avc
|
Add script to calibrate the ESC
|
"""Programs the ESC."""
from control import driver as driver_module
from control.driver import Driver
from control.test.dummy_logger import DummyLogger
from control.test.dummy_telemetry import DummyTelemetry
def main():
"""Main function."""
# First, shut the damn car up
throttle_percentage = 0.0
# And reset the steering
steering_percentage = 0.0
logger = DummyLogger()
telemetry = DummyTelemetry(logger, (40.0182663, -105.2761267))
driver = Driver(telemetry, logger)
# driver limits the reverse throttle to 25% to prevent motor damage
driver._get_throttle = lambda percentage: \
int(
driver_module.THROTTLE_NEUTRAL_US
+ driver_module.THROTTLE_DIFF
* percentage
) // 10 * 10
driver.drive(0.0, 0.0)
input('''
Disconnect the motor cables. While holding down the setup button on the ESC,
switch on the power. The LED should start changing colors from red -> green ->
orange. Red is for calibrating the throttle high and low points for forward and
reverse. Press setup when the LED is red; the LED will start to single flash
red.
Press enter to continue.
''')
driver.drive(1.0, 0.25)
input('''
Press the set button. The LED should start to double flash red
Press enter to continue.
''')
driver.drive(-1.0, -0.25)
input('''
Press the set button. The LED should turn off. That's it!
Press enter to exit.
''')
driver.drive(0.0, 0.0)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to calibrate the ESC<commit_after>
|
"""Programs the ESC."""
from control import driver as driver_module
from control.driver import Driver
from control.test.dummy_logger import DummyLogger
from control.test.dummy_telemetry import DummyTelemetry
def main():
"""Main function."""
# First, shut the damn car up
throttle_percentage = 0.0
# And reset the steering
steering_percentage = 0.0
logger = DummyLogger()
telemetry = DummyTelemetry(logger, (40.0182663, -105.2761267))
driver = Driver(telemetry, logger)
# driver limits the reverse throttle to 25% to prevent motor damage
driver._get_throttle = lambda percentage: \
int(
driver_module.THROTTLE_NEUTRAL_US
+ driver_module.THROTTLE_DIFF
* percentage
) // 10 * 10
driver.drive(0.0, 0.0)
input('''
Disconnect the motor cables. While holding down the setup button on the ESC,
switch on the power. The LED should start changing colors from red -> green ->
orange. Red is for calibrating the throttle high and low points for forward and
reverse. Press setup when the LED is red; the LED will start to single flash
red.
Press enter to continue.
''')
driver.drive(1.0, 0.25)
input('''
Press the set button. The LED should start to double flash red
Press enter to continue.
''')
driver.drive(-1.0, -0.25)
input('''
Press the set button. The LED should turn off. That's it!
Press enter to exit.
''')
driver.drive(0.0, 0.0)
if __name__ == '__main__':
main()
|
Add script to calibrate the ESC"""Programs the ESC."""
from control import driver as driver_module
from control.driver import Driver
from control.test.dummy_logger import DummyLogger
from control.test.dummy_telemetry import DummyTelemetry
def main():
"""Main function."""
# First, shut the damn car up
throttle_percentage = 0.0
# And reset the steering
steering_percentage = 0.0
logger = DummyLogger()
telemetry = DummyTelemetry(logger, (40.0182663, -105.2761267))
driver = Driver(telemetry, logger)
# driver limits the reverse throttle to 25% to prevent motor damage
driver._get_throttle = lambda percentage: \
int(
driver_module.THROTTLE_NEUTRAL_US
+ driver_module.THROTTLE_DIFF
* percentage
) // 10 * 10
driver.drive(0.0, 0.0)
input('''
Disconnect the motor cables. While holding down the setup button on the ESC,
switch on the power. The LED should start changing colors from red -> green ->
orange. Red is for calibrating the throttle high and low points for forward and
reverse. Press setup when the LED is red; the LED will start to single flash
red.
Press enter to continue.
''')
driver.drive(1.0, 0.25)
input('''
Press the set button. The LED should start to double flash red
Press enter to continue.
''')
driver.drive(-1.0, -0.25)
input('''
Press the set button. The LED should turn off. That's it!
Press enter to exit.
''')
driver.drive(0.0, 0.0)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to calibrate the ESC<commit_after>"""Programs the ESC."""
from control import driver as driver_module
from control.driver import Driver
from control.test.dummy_logger import DummyLogger
from control.test.dummy_telemetry import DummyTelemetry
def main():
"""Main function."""
# First, shut the damn car up
throttle_percentage = 0.0
# And reset the steering
steering_percentage = 0.0
logger = DummyLogger()
telemetry = DummyTelemetry(logger, (40.0182663, -105.2761267))
driver = Driver(telemetry, logger)
# driver limits the reverse throttle to 25% to prevent motor damage
driver._get_throttle = lambda percentage: \
int(
driver_module.THROTTLE_NEUTRAL_US
+ driver_module.THROTTLE_DIFF
* percentage
) // 10 * 10
driver.drive(0.0, 0.0)
input('''
Disconnect the motor cables. While holding down the setup button on the ESC,
switch on the power. The LED should start changing colors from red -> green ->
orange. Red is for calibrating the throttle high and low points for forward and
reverse. Press setup when the LED is red; the LED will start to single flash
red.
Press enter to continue.
''')
driver.drive(1.0, 0.25)
input('''
Press the set button. The LED should start to double flash red
Press enter to continue.
''')
driver.drive(-1.0, -0.25)
input('''
Press the set button. The LED should turn off. That's it!
Press enter to exit.
''')
driver.drive(0.0, 0.0)
if __name__ == '__main__':
main()
|
|
2daf2087583b57c3339a923889c9bb4132f1db34
|
benchexec/tools/coveritest.py
|
benchexec/tools/coveritest.py
|
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.tools.cpachecker as cpachecker
class Tool(cpachecker.Tool):
"""Tool info for CoVeriTest."""
REQUIRED_PATHS = [
"lib/java/runtime",
"lib/*.jar",
"lib/native/x86_64-linux",
"scripts",
"cpachecker.jar",
"config",
"resources",
]
def name(self):
return "CoVeriTest"
|
Add tool info for CoVeriTest
|
Add tool info for CoVeriTest
|
Python
|
apache-2.0
|
sosy-lab/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,dbeyer/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,dbeyer/benchexec,sosy-lab/benchexec,dbeyer/benchexec,ultimate-pa/benchexec,dbeyer/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec
|
Add tool info for CoVeriTest
|
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.tools.cpachecker as cpachecker
class Tool(cpachecker.Tool):
"""Tool info for CoVeriTest."""
REQUIRED_PATHS = [
"lib/java/runtime",
"lib/*.jar",
"lib/native/x86_64-linux",
"scripts",
"cpachecker.jar",
"config",
"resources",
]
def name(self):
return "CoVeriTest"
|
<commit_before><commit_msg>Add tool info for CoVeriTest<commit_after>
|
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.tools.cpachecker as cpachecker
class Tool(cpachecker.Tool):
"""Tool info for CoVeriTest."""
REQUIRED_PATHS = [
"lib/java/runtime",
"lib/*.jar",
"lib/native/x86_64-linux",
"scripts",
"cpachecker.jar",
"config",
"resources",
]
def name(self):
return "CoVeriTest"
|
Add tool info for CoVeriTest# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.tools.cpachecker as cpachecker
class Tool(cpachecker.Tool):
"""Tool info for CoVeriTest."""
REQUIRED_PATHS = [
"lib/java/runtime",
"lib/*.jar",
"lib/native/x86_64-linux",
"scripts",
"cpachecker.jar",
"config",
"resources",
]
def name(self):
return "CoVeriTest"
|
<commit_before><commit_msg>Add tool info for CoVeriTest<commit_after># This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.tools.cpachecker as cpachecker
class Tool(cpachecker.Tool):
"""Tool info for CoVeriTest."""
REQUIRED_PATHS = [
"lib/java/runtime",
"lib/*.jar",
"lib/native/x86_64-linux",
"scripts",
"cpachecker.jar",
"config",
"resources",
]
def name(self):
return "CoVeriTest"
|
|
cab9ef8d58adeceaf94d8855aa6d598d7fa01f33
|
src/tests/ggrc/converters/test_export_csv.py
|
src/tests/ggrc/converters/test_export_csv.py
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from os.path import abspath, dirname, join
from flask.json import dumps
from tests.ggrc import TestCase
THIS_ABS_PATH = abspath(dirname(__file__))
CSV_DIR = join(THIS_ABS_PATH, 'example_csvs/')
class TestExportEmptyTemplate(TestCase):
def setUp(self):
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "gGRC"
}
def test_basic_policy_template(self):
data = {"policy": []}
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Title*", response.data)
self.assertIn("Policy", response.data)
def test_multiple_empty_objects(self):
data = {
"policy": [],
"regulation": [],
"contract": [],
"clause": [],
"org group": [],
}
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Title*", response.data)
self.assertIn("Policy", response.data)
self.assertIn("Regulation", response.data)
self.assertIn("Contract", response.data)
self.assertIn("Clause", response.data)
self.assertIn("Org Group", response.data)
|
Add export csv template tests
|
Add export csv template tests
|
Python
|
apache-2.0
|
edofic/ggrc-core,edofic/ggrc-core,NejcZupec/ggrc-core,hasanalom/ggrc-core,jmakov/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,jmakov/ggrc-core,hyperNURb/ggrc-core,hyperNURb/ggrc-core,hyperNURb/ggrc-core,plamut/ggrc-core,NejcZupec/ggrc-core,josthkko/ggrc-core,prasannav7/ggrc-core,hasanalom/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,NejcZupec/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,hyperNURb/ggrc-core,edofic/ggrc-core,prasannav7/ggrc-core,hasanalom/ggrc-core,edofic/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,prasannav7/ggrc-core,jmakov/ggrc-core,AleksNeStu/ggrc-core,jmakov/ggrc-core,hasanalom/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,hyperNURb/ggrc-core,plamut/ggrc-core,hasanalom/ggrc-core,VinnieJohns/ggrc-core,NejcZupec/ggrc-core,kr41/ggrc-core,AleksNeStu/ggrc-core,prasannav7/ggrc-core,plamut/ggrc-core,jmakov/ggrc-core,andrei-karalionak/ggrc-core
|
Add export csv template tests
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from os.path import abspath, dirname, join
from flask.json import dumps
from tests.ggrc import TestCase
THIS_ABS_PATH = abspath(dirname(__file__))
CSV_DIR = join(THIS_ABS_PATH, 'example_csvs/')
class TestExportEmptyTemplate(TestCase):
def setUp(self):
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "gGRC"
}
def test_basic_policy_template(self):
data = {"policy": []}
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Title*", response.data)
self.assertIn("Policy", response.data)
def test_multiple_empty_objects(self):
data = {
"policy": [],
"regulation": [],
"contract": [],
"clause": [],
"org group": [],
}
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Title*", response.data)
self.assertIn("Policy", response.data)
self.assertIn("Regulation", response.data)
self.assertIn("Contract", response.data)
self.assertIn("Clause", response.data)
self.assertIn("Org Group", response.data)
|
<commit_before><commit_msg>Add export csv template tests<commit_after>
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from os.path import abspath, dirname, join
from flask.json import dumps
from tests.ggrc import TestCase
THIS_ABS_PATH = abspath(dirname(__file__))
CSV_DIR = join(THIS_ABS_PATH, 'example_csvs/')
class TestExportEmptyTemplate(TestCase):
def setUp(self):
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "gGRC"
}
def test_basic_policy_template(self):
data = {"policy": []}
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Title*", response.data)
self.assertIn("Policy", response.data)
def test_multiple_empty_objects(self):
data = {
"policy": [],
"regulation": [],
"contract": [],
"clause": [],
"org group": [],
}
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Title*", response.data)
self.assertIn("Policy", response.data)
self.assertIn("Regulation", response.data)
self.assertIn("Contract", response.data)
self.assertIn("Clause", response.data)
self.assertIn("Org Group", response.data)
|
Add export csv template tests# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from os.path import abspath, dirname, join
from flask.json import dumps
from tests.ggrc import TestCase
THIS_ABS_PATH = abspath(dirname(__file__))
CSV_DIR = join(THIS_ABS_PATH, 'example_csvs/')
class TestExportEmptyTemplate(TestCase):
def setUp(self):
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "gGRC"
}
def test_basic_policy_template(self):
data = {"policy": []}
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Title*", response.data)
self.assertIn("Policy", response.data)
def test_multiple_empty_objects(self):
data = {
"policy": [],
"regulation": [],
"contract": [],
"clause": [],
"org group": [],
}
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Title*", response.data)
self.assertIn("Policy", response.data)
self.assertIn("Regulation", response.data)
self.assertIn("Contract", response.data)
self.assertIn("Clause", response.data)
self.assertIn("Org Group", response.data)
|
<commit_before><commit_msg>Add export csv template tests<commit_after># Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from os.path import abspath, dirname, join
from flask.json import dumps
from tests.ggrc import TestCase
THIS_ABS_PATH = abspath(dirname(__file__))
CSV_DIR = join(THIS_ABS_PATH, 'example_csvs/')
class TestExportEmptyTemplate(TestCase):
def setUp(self):
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "gGRC"
}
def test_basic_policy_template(self):
data = {"policy": []}
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Title*", response.data)
self.assertIn("Policy", response.data)
def test_multiple_empty_objects(self):
data = {
"policy": [],
"regulation": [],
"contract": [],
"clause": [],
"org group": [],
}
response = self.client.post("/_service/export_csv",
data=dumps(data), headers=self.headers)
self.assertEqual(response.status_code, 200)
self.assertIn("Title*", response.data)
self.assertIn("Policy", response.data)
self.assertIn("Regulation", response.data)
self.assertIn("Contract", response.data)
self.assertIn("Clause", response.data)
self.assertIn("Org Group", response.data)
|
|
8da9a8ffe4f5fc8773c96cc6aef96bf4f0b7c01c
|
examples/find-bt.py
|
examples/find-bt.py
|
#!/usr/bin/env python2
#
# This file is Public Domain and provided only for documentation purposes.
#
# Run : python2 ./find-bt.py
#
# Note: This will happily run with Python3 too, I just picked a common baseline
#
import gi
gi.require_version('Ldm', '1.0')
from gi.repository import Ldm, GObject
class BluezPlugin(Ldm.Plugin):
""" Quick and dirty plugin to force bluetooth detection.
"""
# Not really needed but good practice
__gtype_name__ = "BluezPlugin"
def __init__(self):
Ldm.Plugin.__init__(self)
def do_get_provider(self, device):
if not device.has_type(Ldm.DeviceType.BLUETOOTH):
return None
# Construct new LdmProvider for this plugin + device,
# setting the package/bundle name to "pretendy-package"
return Ldm.Provider.new(self, device, "bluez-bundle")
def main():
manager = Ldm.Manager()
manager.add_plugin(BluezPlugin())
# An alternative is just to see if the len(devices) is not empty.
devices = manager.get_devices(Ldm.DeviceType.BLUETOOTH)
providerset = [manager.get_providers(x) for x in devices if x.has_attribute(Ldm.DeviceAttribute.HOST)]
for providers in providerset:
device = providers[0].get_device()
for provider in providers:
print("Provider for {} ({} {}): {}".format(
device.get_path(),
device.get_vendor(),
device.get_name(),
provider.get_package()))
if __name__ == "__main__":
main()
|
Add demo on finding bluetooth host controllers
|
examples: Add demo on finding bluetooth host controllers
Signed-off-by: Ikey Doherty <d8d992cf0016e35c2a8339d5e7d44bebd12a2d77@solus-project.com>
|
Python
|
lgpl-2.1
|
solus-project/linux-driver-management,solus-project/linux-driver-management
|
examples: Add demo on finding bluetooth host controllers
Signed-off-by: Ikey Doherty <d8d992cf0016e35c2a8339d5e7d44bebd12a2d77@solus-project.com>
|
#!/usr/bin/env python2
#
# This file is Public Domain and provided only for documentation purposes.
#
# Run : python2 ./find-bt.py
#
# Note: This will happily run with Python3 too, I just picked a common baseline
#
import gi
gi.require_version('Ldm', '1.0')
from gi.repository import Ldm, GObject
class BluezPlugin(Ldm.Plugin):
""" Quick and dirty plugin to force bluetooth detection.
"""
# Not really needed but good practice
__gtype_name__ = "BluezPlugin"
def __init__(self):
Ldm.Plugin.__init__(self)
def do_get_provider(self, device):
if not device.has_type(Ldm.DeviceType.BLUETOOTH):
return None
# Construct new LdmProvider for this plugin + device,
# setting the package/bundle name to "pretendy-package"
return Ldm.Provider.new(self, device, "bluez-bundle")
def main():
manager = Ldm.Manager()
manager.add_plugin(BluezPlugin())
# An alternative is just to see if the len(devices) is not empty.
devices = manager.get_devices(Ldm.DeviceType.BLUETOOTH)
providerset = [manager.get_providers(x) for x in devices if x.has_attribute(Ldm.DeviceAttribute.HOST)]
for providers in providerset:
device = providers[0].get_device()
for provider in providers:
print("Provider for {} ({} {}): {}".format(
device.get_path(),
device.get_vendor(),
device.get_name(),
provider.get_package()))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>examples: Add demo on finding bluetooth host controllers
Signed-off-by: Ikey Doherty <d8d992cf0016e35c2a8339d5e7d44bebd12a2d77@solus-project.com><commit_after>
|
#!/usr/bin/env python2
#
# This file is Public Domain and provided only for documentation purposes.
#
# Run : python2 ./find-bt.py
#
# Note: This will happily run with Python3 too, I just picked a common baseline
#
import gi
gi.require_version('Ldm', '1.0')
from gi.repository import Ldm, GObject
class BluezPlugin(Ldm.Plugin):
""" Quick and dirty plugin to force bluetooth detection.
"""
# Not really needed but good practice
__gtype_name__ = "BluezPlugin"
def __init__(self):
Ldm.Plugin.__init__(self)
def do_get_provider(self, device):
if not device.has_type(Ldm.DeviceType.BLUETOOTH):
return None
# Construct new LdmProvider for this plugin + device,
# setting the package/bundle name to "pretendy-package"
return Ldm.Provider.new(self, device, "bluez-bundle")
def main():
manager = Ldm.Manager()
manager.add_plugin(BluezPlugin())
# An alternative is just to see if the len(devices) is not empty.
devices = manager.get_devices(Ldm.DeviceType.BLUETOOTH)
providerset = [manager.get_providers(x) for x in devices if x.has_attribute(Ldm.DeviceAttribute.HOST)]
for providers in providerset:
device = providers[0].get_device()
for provider in providers:
print("Provider for {} ({} {}): {}".format(
device.get_path(),
device.get_vendor(),
device.get_name(),
provider.get_package()))
if __name__ == "__main__":
main()
|
examples: Add demo on finding bluetooth host controllers
Signed-off-by: Ikey Doherty <d8d992cf0016e35c2a8339d5e7d44bebd12a2d77@solus-project.com>#!/usr/bin/env python2
#
# This file is Public Domain and provided only for documentation purposes.
#
# Run : python2 ./find-bt.py
#
# Note: This will happily run with Python3 too, I just picked a common baseline
#
import gi
gi.require_version('Ldm', '1.0')
from gi.repository import Ldm, GObject
class BluezPlugin(Ldm.Plugin):
""" Quick and dirty plugin to force bluetooth detection.
"""
# Not really needed but good practice
__gtype_name__ = "BluezPlugin"
def __init__(self):
Ldm.Plugin.__init__(self)
def do_get_provider(self, device):
if not device.has_type(Ldm.DeviceType.BLUETOOTH):
return None
# Construct new LdmProvider for this plugin + device,
# setting the package/bundle name to "pretendy-package"
return Ldm.Provider.new(self, device, "bluez-bundle")
def main():
manager = Ldm.Manager()
manager.add_plugin(BluezPlugin())
# An alternative is just to see if the len(devices) is not empty.
devices = manager.get_devices(Ldm.DeviceType.BLUETOOTH)
providerset = [manager.get_providers(x) for x in devices if x.has_attribute(Ldm.DeviceAttribute.HOST)]
for providers in providerset:
device = providers[0].get_device()
for provider in providers:
print("Provider for {} ({} {}): {}".format(
device.get_path(),
device.get_vendor(),
device.get_name(),
provider.get_package()))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>examples: Add demo on finding bluetooth host controllers
Signed-off-by: Ikey Doherty <d8d992cf0016e35c2a8339d5e7d44bebd12a2d77@solus-project.com><commit_after>#!/usr/bin/env python2
#
# This file is Public Domain and provided only for documentation purposes.
#
# Run : python2 ./find-bt.py
#
# Note: This will happily run with Python3 too, I just picked a common baseline
#
import gi
gi.require_version('Ldm', '1.0')
from gi.repository import Ldm, GObject
class BluezPlugin(Ldm.Plugin):
""" Quick and dirty plugin to force bluetooth detection.
"""
# Not really needed but good practice
__gtype_name__ = "BluezPlugin"
def __init__(self):
Ldm.Plugin.__init__(self)
def do_get_provider(self, device):
if not device.has_type(Ldm.DeviceType.BLUETOOTH):
return None
# Construct new LdmProvider for this plugin + device,
# setting the package/bundle name to "pretendy-package"
return Ldm.Provider.new(self, device, "bluez-bundle")
def main():
manager = Ldm.Manager()
manager.add_plugin(BluezPlugin())
# An alternative is just to see if the len(devices) is not empty.
devices = manager.get_devices(Ldm.DeviceType.BLUETOOTH)
providerset = [manager.get_providers(x) for x in devices if x.has_attribute(Ldm.DeviceAttribute.HOST)]
for providers in providerset:
device = providers[0].get_device()
for provider in providers:
print("Provider for {} ({} {}): {}".format(
device.get_path(),
device.get_vendor(),
device.get_name(),
provider.get_package()))
if __name__ == "__main__":
main()
|
|
b3ad1a33715f3db4b06e1196958b4248f9a8039d
|
django_generic_counter/south_migrations/0003_auto__chg_field_counter_count.py
|
django_generic_counter/south_migrations/0003_auto__chg_field_counter_count.py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Counter.count'
db.alter_column(u'django_generic_counter_counter', 'count', self.gf('django.db.models.fields.BigIntegerField')())
def backwards(self, orm):
# Changing field 'Counter.count'
db.alter_column(u'django_generic_counter_counter', 'count', self.gf('django.db.models.fields.IntegerField')())
models = {
u'django_generic_counter.counter': {
'Meta': {'object_name': 'Counter'},
'count': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
}
}
complete_apps = ['django_generic_counter']
|
Add south migration for new field type.
|
Add south migration for new field type.
|
Python
|
unlicense
|
0x07Ltd/django-generic-counter,0x07Ltd/django-generic-counter
|
Add south migration for new field type.
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Counter.count'
db.alter_column(u'django_generic_counter_counter', 'count', self.gf('django.db.models.fields.BigIntegerField')())
def backwards(self, orm):
# Changing field 'Counter.count'
db.alter_column(u'django_generic_counter_counter', 'count', self.gf('django.db.models.fields.IntegerField')())
models = {
u'django_generic_counter.counter': {
'Meta': {'object_name': 'Counter'},
'count': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
}
}
complete_apps = ['django_generic_counter']
|
<commit_before><commit_msg>Add south migration for new field type.<commit_after>
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Counter.count'
db.alter_column(u'django_generic_counter_counter', 'count', self.gf('django.db.models.fields.BigIntegerField')())
def backwards(self, orm):
# Changing field 'Counter.count'
db.alter_column(u'django_generic_counter_counter', 'count', self.gf('django.db.models.fields.IntegerField')())
models = {
u'django_generic_counter.counter': {
'Meta': {'object_name': 'Counter'},
'count': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
}
}
complete_apps = ['django_generic_counter']
|
Add south migration for new field type.# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Counter.count'
db.alter_column(u'django_generic_counter_counter', 'count', self.gf('django.db.models.fields.BigIntegerField')())
def backwards(self, orm):
# Changing field 'Counter.count'
db.alter_column(u'django_generic_counter_counter', 'count', self.gf('django.db.models.fields.IntegerField')())
models = {
u'django_generic_counter.counter': {
'Meta': {'object_name': 'Counter'},
'count': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
}
}
complete_apps = ['django_generic_counter']
|
<commit_before><commit_msg>Add south migration for new field type.<commit_after># -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Counter.count'
db.alter_column(u'django_generic_counter_counter', 'count', self.gf('django.db.models.fields.BigIntegerField')())
def backwards(self, orm):
# Changing field 'Counter.count'
db.alter_column(u'django_generic_counter_counter', 'count', self.gf('django.db.models.fields.IntegerField')())
models = {
u'django_generic_counter.counter': {
'Meta': {'object_name': 'Counter'},
'count': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
}
}
complete_apps = ['django_generic_counter']
|
|
3189b82167955e713fcc6e92d34b220feeb122a7
|
tests/test_verbosity.py
|
tests/test_verbosity.py
|
import unittest
import numpy as np
import cpnest.model
class GaussianModel(cpnest.model.Model):
"""
A simple gaussian model with parameters mean and sigma
"""
names=['mean','sigma']
bounds=[[-5,5],[0.05,1]]
data = np.array([x for x in np.random.normal(0.5,0.5,size=10)])
analyticZ = np.log(0.05)
@classmethod
def log_likelihood(cls,x):
return -0.5*x['mean']**2/x['sigma']**2 - np.log(x['sigma']) - 0.5*np.log(2.0*np.pi)
def log_prior(self,p):
if not self.in_bounds(p): return -np.inf
return -np.log(p['sigma']) - np.log(10) - np.log(0.95)
def force(self,x):
return np.zeros(1, dtype = {'names':x.names, 'formats':['f8' for _ in x.names]})
class GaussianTestCase(unittest.TestCase):
"""
Test the gaussian model with different verbose levels
"""
def setUp(self):
self.model = GaussianModel()
self.runs=[]
for v in range(4):
self.runs.append(cpnest.CPNest(self.model,verbose=v,nthreads=8,nlive=100,maxmcmc=100))
def test_run(self):
for r in self.runs:
r.run()
print('Analytic evidence: {0}'.format(self.model.analyticZ))
def test_all():
unittest.main(verbosity=2)
if __name__=='__main__':
unittest.main(verbosity=2)
|
Add unit tests for different verbosity levels
|
Add unit tests for different verbosity levels
|
Python
|
mit
|
johnveitch/cpnest
|
Add unit tests for different verbosity levels
|
import unittest
import numpy as np
import cpnest.model
class GaussianModel(cpnest.model.Model):
"""
A simple gaussian model with parameters mean and sigma
"""
names=['mean','sigma']
bounds=[[-5,5],[0.05,1]]
data = np.array([x for x in np.random.normal(0.5,0.5,size=10)])
analyticZ = np.log(0.05)
@classmethod
def log_likelihood(cls,x):
return -0.5*x['mean']**2/x['sigma']**2 - np.log(x['sigma']) - 0.5*np.log(2.0*np.pi)
def log_prior(self,p):
if not self.in_bounds(p): return -np.inf
return -np.log(p['sigma']) - np.log(10) - np.log(0.95)
def force(self,x):
return np.zeros(1, dtype = {'names':x.names, 'formats':['f8' for _ in x.names]})
class GaussianTestCase(unittest.TestCase):
"""
Test the gaussian model with different verbose levels
"""
def setUp(self):
self.model = GaussianModel()
self.runs=[]
for v in range(4):
self.runs.append(cpnest.CPNest(self.model,verbose=v,nthreads=8,nlive=100,maxmcmc=100))
def test_run(self):
for r in self.runs:
r.run()
print('Analytic evidence: {0}'.format(self.model.analyticZ))
def test_all():
unittest.main(verbosity=2)
if __name__=='__main__':
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add unit tests for different verbosity levels<commit_after>
|
import unittest
import numpy as np
import cpnest.model
class GaussianModel(cpnest.model.Model):
"""
A simple gaussian model with parameters mean and sigma
"""
names=['mean','sigma']
bounds=[[-5,5],[0.05,1]]
data = np.array([x for x in np.random.normal(0.5,0.5,size=10)])
analyticZ = np.log(0.05)
@classmethod
def log_likelihood(cls,x):
return -0.5*x['mean']**2/x['sigma']**2 - np.log(x['sigma']) - 0.5*np.log(2.0*np.pi)
def log_prior(self,p):
if not self.in_bounds(p): return -np.inf
return -np.log(p['sigma']) - np.log(10) - np.log(0.95)
def force(self,x):
return np.zeros(1, dtype = {'names':x.names, 'formats':['f8' for _ in x.names]})
class GaussianTestCase(unittest.TestCase):
"""
Test the gaussian model with different verbose levels
"""
def setUp(self):
self.model = GaussianModel()
self.runs=[]
for v in range(4):
self.runs.append(cpnest.CPNest(self.model,verbose=v,nthreads=8,nlive=100,maxmcmc=100))
def test_run(self):
for r in self.runs:
r.run()
print('Analytic evidence: {0}'.format(self.model.analyticZ))
def test_all():
unittest.main(verbosity=2)
if __name__=='__main__':
unittest.main(verbosity=2)
|
Add unit tests for different verbosity levelsimport unittest
import numpy as np
import cpnest.model
class GaussianModel(cpnest.model.Model):
"""
A simple gaussian model with parameters mean and sigma
"""
names=['mean','sigma']
bounds=[[-5,5],[0.05,1]]
data = np.array([x for x in np.random.normal(0.5,0.5,size=10)])
analyticZ = np.log(0.05)
@classmethod
def log_likelihood(cls,x):
return -0.5*x['mean']**2/x['sigma']**2 - np.log(x['sigma']) - 0.5*np.log(2.0*np.pi)
def log_prior(self,p):
if not self.in_bounds(p): return -np.inf
return -np.log(p['sigma']) - np.log(10) - np.log(0.95)
def force(self,x):
return np.zeros(1, dtype = {'names':x.names, 'formats':['f8' for _ in x.names]})
class GaussianTestCase(unittest.TestCase):
"""
Test the gaussian model with different verbose levels
"""
def setUp(self):
self.model = GaussianModel()
self.runs=[]
for v in range(4):
self.runs.append(cpnest.CPNest(self.model,verbose=v,nthreads=8,nlive=100,maxmcmc=100))
def test_run(self):
for r in self.runs:
r.run()
print('Analytic evidence: {0}'.format(self.model.analyticZ))
def test_all():
unittest.main(verbosity=2)
if __name__=='__main__':
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add unit tests for different verbosity levels<commit_after>import unittest
import numpy as np
import cpnest.model
class GaussianModel(cpnest.model.Model):
"""
A simple gaussian model with parameters mean and sigma
"""
names=['mean','sigma']
bounds=[[-5,5],[0.05,1]]
data = np.array([x for x in np.random.normal(0.5,0.5,size=10)])
analyticZ = np.log(0.05)
@classmethod
def log_likelihood(cls,x):
return -0.5*x['mean']**2/x['sigma']**2 - np.log(x['sigma']) - 0.5*np.log(2.0*np.pi)
def log_prior(self,p):
if not self.in_bounds(p): return -np.inf
return -np.log(p['sigma']) - np.log(10) - np.log(0.95)
def force(self,x):
return np.zeros(1, dtype = {'names':x.names, 'formats':['f8' for _ in x.names]})
class GaussianTestCase(unittest.TestCase):
"""
Test the gaussian model with different verbose levels
"""
def setUp(self):
self.model = GaussianModel()
self.runs=[]
for v in range(4):
self.runs.append(cpnest.CPNest(self.model,verbose=v,nthreads=8,nlive=100,maxmcmc=100))
def test_run(self):
for r in self.runs:
r.run()
print('Analytic evidence: {0}'.format(self.model.analyticZ))
def test_all():
unittest.main(verbosity=2)
if __name__=='__main__':
unittest.main(verbosity=2)
|
|
c88243305254cab3a504a6cfb6990173da0e478b
|
nucleus/rna/migrations/0004_auto_20211005_1522.py
|
nucleus/rna/migrations/0004_auto_20211005_1522.py
|
# Generated by Django 2.2.13 on 2021-10-05 15:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rna', '0003_auto_20210930_1841'),
]
operations = [
migrations.AlterField(
model_name='release',
name='product',
field=models.CharField(choices=[('Firefox', 'Firefox'), ('Firefox for Android', 'Firefox for Android'), ('Firefox for iOS', 'Firefox for iOS'), ('Firefox Extended Support Release', 'Firefox Extended Support Release'), ('Thunderbird', 'Thunderbird')], max_length=255),
),
]
|
Add migration for new release products list
|
Add migration for new release products list
|
Python
|
mpl-2.0
|
mozilla/nucleus,mozilla/nucleus,mozilla/nucleus,mozilla/nucleus
|
Add migration for new release products list
|
# Generated by Django 2.2.13 on 2021-10-05 15:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rna', '0003_auto_20210930_1841'),
]
operations = [
migrations.AlterField(
model_name='release',
name='product',
field=models.CharField(choices=[('Firefox', 'Firefox'), ('Firefox for Android', 'Firefox for Android'), ('Firefox for iOS', 'Firefox for iOS'), ('Firefox Extended Support Release', 'Firefox Extended Support Release'), ('Thunderbird', 'Thunderbird')], max_length=255),
),
]
|
<commit_before><commit_msg>Add migration for new release products list<commit_after>
|
# Generated by Django 2.2.13 on 2021-10-05 15:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rna', '0003_auto_20210930_1841'),
]
operations = [
migrations.AlterField(
model_name='release',
name='product',
field=models.CharField(choices=[('Firefox', 'Firefox'), ('Firefox for Android', 'Firefox for Android'), ('Firefox for iOS', 'Firefox for iOS'), ('Firefox Extended Support Release', 'Firefox Extended Support Release'), ('Thunderbird', 'Thunderbird')], max_length=255),
),
]
|
Add migration for new release products list# Generated by Django 2.2.13 on 2021-10-05 15:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rna', '0003_auto_20210930_1841'),
]
operations = [
migrations.AlterField(
model_name='release',
name='product',
field=models.CharField(choices=[('Firefox', 'Firefox'), ('Firefox for Android', 'Firefox for Android'), ('Firefox for iOS', 'Firefox for iOS'), ('Firefox Extended Support Release', 'Firefox Extended Support Release'), ('Thunderbird', 'Thunderbird')], max_length=255),
),
]
|
<commit_before><commit_msg>Add migration for new release products list<commit_after># Generated by Django 2.2.13 on 2021-10-05 15:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rna', '0003_auto_20210930_1841'),
]
operations = [
migrations.AlterField(
model_name='release',
name='product',
field=models.CharField(choices=[('Firefox', 'Firefox'), ('Firefox for Android', 'Firefox for Android'), ('Firefox for iOS', 'Firefox for iOS'), ('Firefox Extended Support Release', 'Firefox Extended Support Release'), ('Thunderbird', 'Thunderbird')], max_length=255),
),
]
|
|
8f7769a0122fb0d9479209ed2239dd0687f301a1
|
plugin/core/test_url.py
|
plugin/core/test_url.py
|
from .url import (filename_to_uri, uri_to_filename)
import unittest
class WindowsTests(unittest.TestCase):
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_converts_path_to_uri(self):
self.assertEqual("file:///C:/dir%20ectory/file.txt", filename_to_uri("c:\\dir ectory\\file.txt"))
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_converts_uri_to_path(self):
self.assertEqual("C:\\dir ectory\\file.txt", uri_to_filename("file:///c:/dir ectory/file.txt"))
|
Add test for url handling in Windows
|
Add test for url handling in Windows
|
Python
|
mit
|
tomv564/LSP
|
Add test for url handling in Windows
|
from .url import (filename_to_uri, uri_to_filename)
import unittest
class WindowsTests(unittest.TestCase):
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_converts_path_to_uri(self):
self.assertEqual("file:///C:/dir%20ectory/file.txt", filename_to_uri("c:\\dir ectory\\file.txt"))
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_converts_uri_to_path(self):
self.assertEqual("C:\\dir ectory\\file.txt", uri_to_filename("file:///c:/dir ectory/file.txt"))
|
<commit_before><commit_msg>Add test for url handling in Windows<commit_after>
|
from .url import (filename_to_uri, uri_to_filename)
import unittest
class WindowsTests(unittest.TestCase):
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_converts_path_to_uri(self):
self.assertEqual("file:///C:/dir%20ectory/file.txt", filename_to_uri("c:\\dir ectory\\file.txt"))
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_converts_uri_to_path(self):
self.assertEqual("C:\\dir ectory\\file.txt", uri_to_filename("file:///c:/dir ectory/file.txt"))
|
Add test for url handling in Windowsfrom .url import (filename_to_uri, uri_to_filename)
import unittest
class WindowsTests(unittest.TestCase):
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_converts_path_to_uri(self):
self.assertEqual("file:///C:/dir%20ectory/file.txt", filename_to_uri("c:\\dir ectory\\file.txt"))
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_converts_uri_to_path(self):
self.assertEqual("C:\\dir ectory\\file.txt", uri_to_filename("file:///c:/dir ectory/file.txt"))
|
<commit_before><commit_msg>Add test for url handling in Windows<commit_after>from .url import (filename_to_uri, uri_to_filename)
import unittest
class WindowsTests(unittest.TestCase):
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_converts_path_to_uri(self):
self.assertEqual("file:///C:/dir%20ectory/file.txt", filename_to_uri("c:\\dir ectory\\file.txt"))
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
def test_converts_uri_to_path(self):
self.assertEqual("C:\\dir ectory\\file.txt", uri_to_filename("file:///c:/dir ectory/file.txt"))
|
|
b4048b9a9ba9f7a9d1ac03e4a0d57c5c6e1b4471
|
editorconfig/versiontools.py
|
editorconfig/versiontools.py
|
"""EditorConfig version tools
Provides ``join_version`` and ``split_version`` classes for converting
__version__ strings to VERSION tuples and vice versa.
"""
import re
__all__ = ['join_version', 'split_version']
_version_re = re.compile(r'^(\d+)\.(\d+)\.(\d+)(\..*)?$', re.VERBOSE)
def join_version(version_tuple):
"""Return a string representation of version from given VERSION tuple"""
version = "%s.%s.%s" % version_tuple[:3]
if version_tuple[3] != "final":
version += ".%s" % version_tuple[3]
return version
def split_version(version):
"""Return VERSION tuple for given string representation of version"""
match = _version_re.search(version)
if not match:
return None
else:
split_version = list(match.groups())
if split_version[3] is None:
split_version[3] = "final"
split_version = map(int, split_version[:3]) + split_version[3:]
return tuple(split_version)
|
"""EditorConfig version tools
Provides ``join_version`` and ``split_version`` classes for converting
__version__ strings to VERSION tuples and vice versa.
"""
import re
__all__ = ['join_version', 'split_version']
_version_re = re.compile(r'^(\d+)\.(\d+)\.(\d+)(\..*)?$', re.VERBOSE)
def join_version(version_tuple):
"""Return a string representation of version from given VERSION tuple"""
version = "%s.%s.%s" % version_tuple[:3]
if version_tuple[3] != "final":
version += "-%s" % version_tuple[3]
return version
def split_version(version):
"""Return VERSION tuple for given string representation of version"""
match = _version_re.search(version)
if not match:
return None
else:
split_version = list(match.groups())
if split_version[3] is None:
split_version[3] = "final"
split_version = map(int, split_version[:3]) + split_version[3:]
return tuple(split_version)
|
Fix version string format for development versions
|
Fix version string format for development versions
|
Python
|
bsd-2-clause
|
benjifisher/editorconfig-vim,benjifisher/editorconfig-vim,VictorBjelkholm/editorconfig-vim,pocke/editorconfig-vim,johnfraney/editorconfig-vim,pocke/editorconfig-vim,johnfraney/editorconfig-vim,pocke/editorconfig-vim,benjifisher/editorconfig-vim,VictorBjelkholm/editorconfig-vim,VictorBjelkholm/editorconfig-vim,johnfraney/editorconfig-vim
|
"""EditorConfig version tools
Provides ``join_version`` and ``split_version`` classes for converting
__version__ strings to VERSION tuples and vice versa.
"""
import re
__all__ = ['join_version', 'split_version']
_version_re = re.compile(r'^(\d+)\.(\d+)\.(\d+)(\..*)?$', re.VERBOSE)
def join_version(version_tuple):
"""Return a string representation of version from given VERSION tuple"""
version = "%s.%s.%s" % version_tuple[:3]
if version_tuple[3] != "final":
version += ".%s" % version_tuple[3]
return version
def split_version(version):
"""Return VERSION tuple for given string representation of version"""
match = _version_re.search(version)
if not match:
return None
else:
split_version = list(match.groups())
if split_version[3] is None:
split_version[3] = "final"
split_version = map(int, split_version[:3]) + split_version[3:]
return tuple(split_version)
Fix version string format for development versions
|
"""EditorConfig version tools
Provides ``join_version`` and ``split_version`` classes for converting
__version__ strings to VERSION tuples and vice versa.
"""
import re
__all__ = ['join_version', 'split_version']
_version_re = re.compile(r'^(\d+)\.(\d+)\.(\d+)(\..*)?$', re.VERBOSE)
def join_version(version_tuple):
"""Return a string representation of version from given VERSION tuple"""
version = "%s.%s.%s" % version_tuple[:3]
if version_tuple[3] != "final":
version += "-%s" % version_tuple[3]
return version
def split_version(version):
"""Return VERSION tuple for given string representation of version"""
match = _version_re.search(version)
if not match:
return None
else:
split_version = list(match.groups())
if split_version[3] is None:
split_version[3] = "final"
split_version = map(int, split_version[:3]) + split_version[3:]
return tuple(split_version)
|
<commit_before>"""EditorConfig version tools
Provides ``join_version`` and ``split_version`` classes for converting
__version__ strings to VERSION tuples and vice versa.
"""
import re
__all__ = ['join_version', 'split_version']
_version_re = re.compile(r'^(\d+)\.(\d+)\.(\d+)(\..*)?$', re.VERBOSE)
def join_version(version_tuple):
"""Return a string representation of version from given VERSION tuple"""
version = "%s.%s.%s" % version_tuple[:3]
if version_tuple[3] != "final":
version += ".%s" % version_tuple[3]
return version
def split_version(version):
"""Return VERSION tuple for given string representation of version"""
match = _version_re.search(version)
if not match:
return None
else:
split_version = list(match.groups())
if split_version[3] is None:
split_version[3] = "final"
split_version = map(int, split_version[:3]) + split_version[3:]
return tuple(split_version)
<commit_msg>Fix version string format for development versions<commit_after>
|
"""EditorConfig version tools
Provides ``join_version`` and ``split_version`` classes for converting
__version__ strings to VERSION tuples and vice versa.
"""
import re
__all__ = ['join_version', 'split_version']
_version_re = re.compile(r'^(\d+)\.(\d+)\.(\d+)(\..*)?$', re.VERBOSE)
def join_version(version_tuple):
"""Return a string representation of version from given VERSION tuple"""
version = "%s.%s.%s" % version_tuple[:3]
if version_tuple[3] != "final":
version += "-%s" % version_tuple[3]
return version
def split_version(version):
"""Return VERSION tuple for given string representation of version"""
match = _version_re.search(version)
if not match:
return None
else:
split_version = list(match.groups())
if split_version[3] is None:
split_version[3] = "final"
split_version = map(int, split_version[:3]) + split_version[3:]
return tuple(split_version)
|
"""EditorConfig version tools
Provides ``join_version`` and ``split_version`` classes for converting
__version__ strings to VERSION tuples and vice versa.
"""
import re
__all__ = ['join_version', 'split_version']
_version_re = re.compile(r'^(\d+)\.(\d+)\.(\d+)(\..*)?$', re.VERBOSE)
def join_version(version_tuple):
"""Return a string representation of version from given VERSION tuple"""
version = "%s.%s.%s" % version_tuple[:3]
if version_tuple[3] != "final":
version += ".%s" % version_tuple[3]
return version
def split_version(version):
"""Return VERSION tuple for given string representation of version"""
match = _version_re.search(version)
if not match:
return None
else:
split_version = list(match.groups())
if split_version[3] is None:
split_version[3] = "final"
split_version = map(int, split_version[:3]) + split_version[3:]
return tuple(split_version)
Fix version string format for development versions"""EditorConfig version tools
Provides ``join_version`` and ``split_version`` classes for converting
__version__ strings to VERSION tuples and vice versa.
"""
import re
__all__ = ['join_version', 'split_version']
_version_re = re.compile(r'^(\d+)\.(\d+)\.(\d+)(\..*)?$', re.VERBOSE)
def join_version(version_tuple):
"""Return a string representation of version from given VERSION tuple"""
version = "%s.%s.%s" % version_tuple[:3]
if version_tuple[3] != "final":
version += "-%s" % version_tuple[3]
return version
def split_version(version):
"""Return VERSION tuple for given string representation of version"""
match = _version_re.search(version)
if not match:
return None
else:
split_version = list(match.groups())
if split_version[3] is None:
split_version[3] = "final"
split_version = map(int, split_version[:3]) + split_version[3:]
return tuple(split_version)
|
<commit_before>"""EditorConfig version tools
Provides ``join_version`` and ``split_version`` classes for converting
__version__ strings to VERSION tuples and vice versa.
"""
import re
__all__ = ['join_version', 'split_version']
_version_re = re.compile(r'^(\d+)\.(\d+)\.(\d+)(\..*)?$', re.VERBOSE)
def join_version(version_tuple):
"""Return a string representation of version from given VERSION tuple"""
version = "%s.%s.%s" % version_tuple[:3]
if version_tuple[3] != "final":
version += ".%s" % version_tuple[3]
return version
def split_version(version):
"""Return VERSION tuple for given string representation of version"""
match = _version_re.search(version)
if not match:
return None
else:
split_version = list(match.groups())
if split_version[3] is None:
split_version[3] = "final"
split_version = map(int, split_version[:3]) + split_version[3:]
return tuple(split_version)
<commit_msg>Fix version string format for development versions<commit_after>"""EditorConfig version tools
Provides ``join_version`` and ``split_version`` classes for converting
__version__ strings to VERSION tuples and vice versa.
"""
import re
__all__ = ['join_version', 'split_version']
_version_re = re.compile(r'^(\d+)\.(\d+)\.(\d+)(\..*)?$', re.VERBOSE)
def join_version(version_tuple):
"""Return a string representation of version from given VERSION tuple"""
version = "%s.%s.%s" % version_tuple[:3]
if version_tuple[3] != "final":
version += "-%s" % version_tuple[3]
return version
def split_version(version):
"""Return VERSION tuple for given string representation of version"""
match = _version_re.search(version)
if not match:
return None
else:
split_version = list(match.groups())
if split_version[3] is None:
split_version[3] = "final"
split_version = map(int, split_version[:3]) + split_version[3:]
return tuple(split_version)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.