commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ee62d6a972e5af72fc9a5e2e36d1a7822a1703af
|
samples/remote_veh_segv.py
|
samples/remote_veh_segv.py
|
import windows
import windows.test
from windows.generated_def.winstructs import *
#c = windows.test.pop_calc_64()
c = windows.test.pop_calc_64(dwCreationFlags=CREATE_SUSPENDED)
python_code = """
import windows
import ctypes
import windows
from windows.vectored_exception import VectoredException
import windows.generated_def.windef as windef
from windows.generated_def.winstructs import *
windows.utils.create_console()
@VectoredException
def handler(exc):
print("POUET")
if exc[0].ExceptionRecord[0].ExceptionCode == EXCEPTION_ACCESS_VIOLATION:
target_addr = ctypes.cast(exc[0].ExceptionRecord[0].ExceptionInformation[1], ctypes.c_void_p).value
print("Instr at {0} accessed to addr {1}".format(hex(exc[0].ExceptionRecord[0].ExceptionAddress), hex(target_addr)))
#return windef.EXCEPTION_CONTINUE_EXECUTION
return windef.EXCEPTION_CONTINUE_SEARCH
windows.winproxy.AddVectoredExceptionHandler(0, handler)
print("YOLO<3")
print(ctypes.c_uint.from_address(0x42424242).value)
"""
x = c.execute_python(python_code)
|
Add sample on veh handle setup in remote process
|
Add sample on veh handle setup in remote process
|
Python
|
bsd-3-clause
|
hakril/PythonForWindows
|
Add sample on veh handle setup in remote process
|
import windows
import windows.test
from windows.generated_def.winstructs import *
#c = windows.test.pop_calc_64()
c = windows.test.pop_calc_64(dwCreationFlags=CREATE_SUSPENDED)
python_code = """
import windows
import ctypes
import windows
from windows.vectored_exception import VectoredException
import windows.generated_def.windef as windef
from windows.generated_def.winstructs import *
windows.utils.create_console()
@VectoredException
def handler(exc):
print("POUET")
if exc[0].ExceptionRecord[0].ExceptionCode == EXCEPTION_ACCESS_VIOLATION:
target_addr = ctypes.cast(exc[0].ExceptionRecord[0].ExceptionInformation[1], ctypes.c_void_p).value
print("Instr at {0} accessed to addr {1}".format(hex(exc[0].ExceptionRecord[0].ExceptionAddress), hex(target_addr)))
#return windef.EXCEPTION_CONTINUE_EXECUTION
return windef.EXCEPTION_CONTINUE_SEARCH
windows.winproxy.AddVectoredExceptionHandler(0, handler)
print("YOLO<3")
print(ctypes.c_uint.from_address(0x42424242).value)
"""
x = c.execute_python(python_code)
|
<commit_before><commit_msg>Add sample on veh handle setup in remote process<commit_after>
|
import windows
import windows.test
from windows.generated_def.winstructs import *
#c = windows.test.pop_calc_64()
c = windows.test.pop_calc_64(dwCreationFlags=CREATE_SUSPENDED)
python_code = """
import windows
import ctypes
import windows
from windows.vectored_exception import VectoredException
import windows.generated_def.windef as windef
from windows.generated_def.winstructs import *
windows.utils.create_console()
@VectoredException
def handler(exc):
print("POUET")
if exc[0].ExceptionRecord[0].ExceptionCode == EXCEPTION_ACCESS_VIOLATION:
target_addr = ctypes.cast(exc[0].ExceptionRecord[0].ExceptionInformation[1], ctypes.c_void_p).value
print("Instr at {0} accessed to addr {1}".format(hex(exc[0].ExceptionRecord[0].ExceptionAddress), hex(target_addr)))
#return windef.EXCEPTION_CONTINUE_EXECUTION
return windef.EXCEPTION_CONTINUE_SEARCH
windows.winproxy.AddVectoredExceptionHandler(0, handler)
print("YOLO<3")
print(ctypes.c_uint.from_address(0x42424242).value)
"""
x = c.execute_python(python_code)
|
Add sample on veh handle setup in remote processimport windows
import windows.test
from windows.generated_def.winstructs import *
#c = windows.test.pop_calc_64()
c = windows.test.pop_calc_64(dwCreationFlags=CREATE_SUSPENDED)
python_code = """
import windows
import ctypes
import windows
from windows.vectored_exception import VectoredException
import windows.generated_def.windef as windef
from windows.generated_def.winstructs import *
windows.utils.create_console()
@VectoredException
def handler(exc):
print("POUET")
if exc[0].ExceptionRecord[0].ExceptionCode == EXCEPTION_ACCESS_VIOLATION:
target_addr = ctypes.cast(exc[0].ExceptionRecord[0].ExceptionInformation[1], ctypes.c_void_p).value
print("Instr at {0} accessed to addr {1}".format(hex(exc[0].ExceptionRecord[0].ExceptionAddress), hex(target_addr)))
#return windef.EXCEPTION_CONTINUE_EXECUTION
return windef.EXCEPTION_CONTINUE_SEARCH
windows.winproxy.AddVectoredExceptionHandler(0, handler)
print("YOLO<3")
print(ctypes.c_uint.from_address(0x42424242).value)
"""
x = c.execute_python(python_code)
|
<commit_before><commit_msg>Add sample on veh handle setup in remote process<commit_after>import windows
import windows.test
from windows.generated_def.winstructs import *
#c = windows.test.pop_calc_64()
c = windows.test.pop_calc_64(dwCreationFlags=CREATE_SUSPENDED)
python_code = """
import windows
import ctypes
import windows
from windows.vectored_exception import VectoredException
import windows.generated_def.windef as windef
from windows.generated_def.winstructs import *
windows.utils.create_console()
@VectoredException
def handler(exc):
print("POUET")
if exc[0].ExceptionRecord[0].ExceptionCode == EXCEPTION_ACCESS_VIOLATION:
target_addr = ctypes.cast(exc[0].ExceptionRecord[0].ExceptionInformation[1], ctypes.c_void_p).value
print("Instr at {0} accessed to addr {1}".format(hex(exc[0].ExceptionRecord[0].ExceptionAddress), hex(target_addr)))
#return windef.EXCEPTION_CONTINUE_EXECUTION
return windef.EXCEPTION_CONTINUE_SEARCH
windows.winproxy.AddVectoredExceptionHandler(0, handler)
print("YOLO<3")
print(ctypes.c_uint.from_address(0x42424242).value)
"""
x = c.execute_python(python_code)
|
|
52a8a1cd093f8bdbaf0abfc85eff2d3682e24b12
|
scripts/check-questions.py
|
scripts/check-questions.py
|
#!/usr/bin/env python3
import os
import sys
import json
import collections
import unicodedata
TEXT_FIELD = "t"
OPTIONS_FIELD = "o"
KIND_FIELD = "k"
CORRECT_FIELD = "c"
MANDATORY_FIELDS = {TEXT_FIELD, OPTIONS_FIELD, CORRECT_FIELD}
def norm(s):
return unicodedata.normalize("NFD", s)
def error(message, *, n):
raise ValueError(" ".join((message, "({})".format(n))))
def check(questions):
text_occurences = collections.defaultdict(list)
for n, question in enumerate(questions, start=1):
# Contains mandatory fields.
missing = MANDATORY_FIELDS - set(question.keys())
if missing:
error("missing {}".format(", ".join(missing)), n=n)
text_occurences[norm(question[TEXT_FIELD])].append(n)
# Kind, if present, is "tr".
if KIND_FIELD in question and question[KIND_FIELD] != "tr":
error("{} != tr".format(KIND_FIELD), n=n)
# There are at least four options & they are unique.
options = tuple(map(norm, question[OPTIONS_FIELD]))
options_count = len(options)
if len(set(options)) != options_count or options_count < 4:
error(">= 4 unique options are required", n=n)
# There is at least one correct index.
correct = question[CORRECT_FIELD]
if len(correct) < 1:
error(">= 1 correct index is required", n=n)
for index in correct:
try:
options[index]
except IndexError:
error("index {} is not adressable".format(index), n=n)
# Text is not repeated.
for text, ns in text_occurences.items():
if len(ns) > 1:
error(
"t {} is repeated at {}".format(
text, ", ".join(map(str, ns[1:]))), n=ns[0])
def main():
questions_path = os.path.normpath(
os.path.join(
os.path.dirname(__file__), "..", "src", "questions.json"))
with open(questions_path, "r", encoding="utf-8") as file:
questions = json.load(file)
try:
check(questions)
except ValueError as e:
print(e, file=sys.stderr)
exit(1)
if __name__ == "__main__":
main()
|
Add Python script for questions linting
|
Add Python script for questions linting
|
Python
|
bsd-3-clause
|
PavloKapyshin/rusk,PavloKapyshin/rusk,PavloKapyshin/rusk
|
Add Python script for questions linting
|
#!/usr/bin/env python3
import os
import sys
import json
import collections
import unicodedata
TEXT_FIELD = "t"
OPTIONS_FIELD = "o"
KIND_FIELD = "k"
CORRECT_FIELD = "c"
MANDATORY_FIELDS = {TEXT_FIELD, OPTIONS_FIELD, CORRECT_FIELD}
def norm(s):
return unicodedata.normalize("NFD", s)
def error(message, *, n):
raise ValueError(" ".join((message, "({})".format(n))))
def check(questions):
text_occurences = collections.defaultdict(list)
for n, question in enumerate(questions, start=1):
# Contains mandatory fields.
missing = MANDATORY_FIELDS - set(question.keys())
if missing:
error("missing {}".format(", ".join(missing)), n=n)
text_occurences[norm(question[TEXT_FIELD])].append(n)
# Kind, if present, is "tr".
if KIND_FIELD in question and question[KIND_FIELD] != "tr":
error("{} != tr".format(KIND_FIELD), n=n)
# There are at least four options & they are unique.
options = tuple(map(norm, question[OPTIONS_FIELD]))
options_count = len(options)
if len(set(options)) != options_count or options_count < 4:
error(">= 4 unique options are required", n=n)
# There is at least one correct index.
correct = question[CORRECT_FIELD]
if len(correct) < 1:
error(">= 1 correct index is required", n=n)
for index in correct:
try:
options[index]
except IndexError:
error("index {} is not adressable".format(index), n=n)
# Text is not repeated.
for text, ns in text_occurences.items():
if len(ns) > 1:
error(
"t {} is repeated at {}".format(
text, ", ".join(map(str, ns[1:]))), n=ns[0])
def main():
questions_path = os.path.normpath(
os.path.join(
os.path.dirname(__file__), "..", "src", "questions.json"))
with open(questions_path, "r", encoding="utf-8") as file:
questions = json.load(file)
try:
check(questions)
except ValueError as e:
print(e, file=sys.stderr)
exit(1)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add Python script for questions linting<commit_after>
|
#!/usr/bin/env python3
import os
import sys
import json
import collections
import unicodedata
TEXT_FIELD = "t"
OPTIONS_FIELD = "o"
KIND_FIELD = "k"
CORRECT_FIELD = "c"
MANDATORY_FIELDS = {TEXT_FIELD, OPTIONS_FIELD, CORRECT_FIELD}
def norm(s):
return unicodedata.normalize("NFD", s)
def error(message, *, n):
raise ValueError(" ".join((message, "({})".format(n))))
def check(questions):
text_occurences = collections.defaultdict(list)
for n, question in enumerate(questions, start=1):
# Contains mandatory fields.
missing = MANDATORY_FIELDS - set(question.keys())
if missing:
error("missing {}".format(", ".join(missing)), n=n)
text_occurences[norm(question[TEXT_FIELD])].append(n)
# Kind, if present, is "tr".
if KIND_FIELD in question and question[KIND_FIELD] != "tr":
error("{} != tr".format(KIND_FIELD), n=n)
# There are at least four options & they are unique.
options = tuple(map(norm, question[OPTIONS_FIELD]))
options_count = len(options)
if len(set(options)) != options_count or options_count < 4:
error(">= 4 unique options are required", n=n)
# There is at least one correct index.
correct = question[CORRECT_FIELD]
if len(correct) < 1:
error(">= 1 correct index is required", n=n)
for index in correct:
try:
options[index]
except IndexError:
error("index {} is not adressable".format(index), n=n)
# Text is not repeated.
for text, ns in text_occurences.items():
if len(ns) > 1:
error(
"t {} is repeated at {}".format(
text, ", ".join(map(str, ns[1:]))), n=ns[0])
def main():
questions_path = os.path.normpath(
os.path.join(
os.path.dirname(__file__), "..", "src", "questions.json"))
with open(questions_path, "r", encoding="utf-8") as file:
questions = json.load(file)
try:
check(questions)
except ValueError as e:
print(e, file=sys.stderr)
exit(1)
if __name__ == "__main__":
main()
|
Add Python script for questions linting#!/usr/bin/env python3
import os
import sys
import json
import collections
import unicodedata
TEXT_FIELD = "t"
OPTIONS_FIELD = "o"
KIND_FIELD = "k"
CORRECT_FIELD = "c"
MANDATORY_FIELDS = {TEXT_FIELD, OPTIONS_FIELD, CORRECT_FIELD}
def norm(s):
return unicodedata.normalize("NFD", s)
def error(message, *, n):
raise ValueError(" ".join((message, "({})".format(n))))
def check(questions):
text_occurences = collections.defaultdict(list)
for n, question in enumerate(questions, start=1):
# Contains mandatory fields.
missing = MANDATORY_FIELDS - set(question.keys())
if missing:
error("missing {}".format(", ".join(missing)), n=n)
text_occurences[norm(question[TEXT_FIELD])].append(n)
# Kind, if present, is "tr".
if KIND_FIELD in question and question[KIND_FIELD] != "tr":
error("{} != tr".format(KIND_FIELD), n=n)
# There are at least four options & they are unique.
options = tuple(map(norm, question[OPTIONS_FIELD]))
options_count = len(options)
if len(set(options)) != options_count or options_count < 4:
error(">= 4 unique options are required", n=n)
# There is at least one correct index.
correct = question[CORRECT_FIELD]
if len(correct) < 1:
error(">= 1 correct index is required", n=n)
for index in correct:
try:
options[index]
except IndexError:
error("index {} is not adressable".format(index), n=n)
# Text is not repeated.
for text, ns in text_occurences.items():
if len(ns) > 1:
error(
"t {} is repeated at {}".format(
text, ", ".join(map(str, ns[1:]))), n=ns[0])
def main():
questions_path = os.path.normpath(
os.path.join(
os.path.dirname(__file__), "..", "src", "questions.json"))
with open(questions_path, "r", encoding="utf-8") as file:
questions = json.load(file)
try:
check(questions)
except ValueError as e:
print(e, file=sys.stderr)
exit(1)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add Python script for questions linting<commit_after>#!/usr/bin/env python3
import os
import sys
import json
import collections
import unicodedata
TEXT_FIELD = "t"
OPTIONS_FIELD = "o"
KIND_FIELD = "k"
CORRECT_FIELD = "c"
MANDATORY_FIELDS = {TEXT_FIELD, OPTIONS_FIELD, CORRECT_FIELD}
def norm(s):
return unicodedata.normalize("NFD", s)
def error(message, *, n):
raise ValueError(" ".join((message, "({})".format(n))))
def check(questions):
text_occurences = collections.defaultdict(list)
for n, question in enumerate(questions, start=1):
# Contains mandatory fields.
missing = MANDATORY_FIELDS - set(question.keys())
if missing:
error("missing {}".format(", ".join(missing)), n=n)
text_occurences[norm(question[TEXT_FIELD])].append(n)
# Kind, if present, is "tr".
if KIND_FIELD in question and question[KIND_FIELD] != "tr":
error("{} != tr".format(KIND_FIELD), n=n)
# There are at least four options & they are unique.
options = tuple(map(norm, question[OPTIONS_FIELD]))
options_count = len(options)
if len(set(options)) != options_count or options_count < 4:
error(">= 4 unique options are required", n=n)
# There is at least one correct index.
correct = question[CORRECT_FIELD]
if len(correct) < 1:
error(">= 1 correct index is required", n=n)
for index in correct:
try:
options[index]
except IndexError:
error("index {} is not adressable".format(index), n=n)
# Text is not repeated.
for text, ns in text_occurences.items():
if len(ns) > 1:
error(
"t {} is repeated at {}".format(
text, ", ".join(map(str, ns[1:]))), n=ns[0])
def main():
questions_path = os.path.normpath(
os.path.join(
os.path.dirname(__file__), "..", "src", "questions.json"))
with open(questions_path, "r", encoding="utf-8") as file:
questions = json.load(file)
try:
check(questions)
except ValueError as e:
print(e, file=sys.stderr)
exit(1)
if __name__ == "__main__":
main()
|
|
3a662b5820ea90c0cd63116a610ede25558c5562
|
sourceterm/tests/test_srcequations.py
|
sourceterm/tests/test_srcequations.py
|
'''
Created on 25 Aug 2010
@author: ith
'''
import unittest
class Test(unittest.TestCase):
def testName(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Add tests directory to sourceterm package and start test module.
|
Add tests directory to sourceterm package and start test module.
|
Python
|
bsd-3-clause
|
ihuston/pyflation,ihuston/pyflation
|
Add tests directory to sourceterm package and start test module.
|
'''
Created on 25 Aug 2010
@author: ith
'''
import unittest
class Test(unittest.TestCase):
def testName(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
<commit_before><commit_msg>Add tests directory to sourceterm package and start test module.<commit_after>
|
'''
Created on 25 Aug 2010
@author: ith
'''
import unittest
class Test(unittest.TestCase):
def testName(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Add tests directory to sourceterm package and start test module.'''
Created on 25 Aug 2010
@author: ith
'''
import unittest
class Test(unittest.TestCase):
def testName(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
<commit_before><commit_msg>Add tests directory to sourceterm package and start test module.<commit_after>'''
Created on 25 Aug 2010
@author: ith
'''
import unittest
class Test(unittest.TestCase):
def testName(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
|
3dc9204c80f2f7be5f82200c059a6a62f02bf6c1
|
www/pelicanconf.py
|
www/pelicanconf.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'IPython development team and Enthought, Inc.'
SITENAME = u'DistArray'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'America/Chicago'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),
('You can modify those links in your config file', '#'),)
# Social widget
SOCIAL = (('You can add links in your config file', '#'),
('Another social link', '#'),)
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'IPython development team and Enthought, Inc.'
SITENAME = u'DistArray'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'America/Chicago'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
# Blogroll
LINKS = (('NumPy', 'http://www.numpy.org/'),
('SciPy', 'http://www.scipy.org'),
('IPython', 'http://ipython.org/'),
('Enthought', 'http://www.enthought.com/'),
)
# Social widget
SOCIAL = (('github', 'https://github.com/enthought/distarray'),)
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
|
Update blogroll and social links.
|
Update blogroll and social links.
|
Python
|
bsd-3-clause
|
enthought/distarray,enthought/distarray,RaoUmer/distarray,RaoUmer/distarray
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'IPython development team and Enthought, Inc.'
SITENAME = u'DistArray'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'America/Chicago'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),
('You can modify those links in your config file', '#'),)
# Social widget
SOCIAL = (('You can add links in your config file', '#'),
('Another social link', '#'),)
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
Update blogroll and social links.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'IPython development team and Enthought, Inc.'
SITENAME = u'DistArray'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'America/Chicago'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
# Blogroll
LINKS = (('NumPy', 'http://www.numpy.org/'),
('SciPy', 'http://www.scipy.org'),
('IPython', 'http://ipython.org/'),
('Enthought', 'http://www.enthought.com/'),
)
# Social widget
SOCIAL = (('github', 'https://github.com/enthought/distarray'),)
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
|
<commit_before>#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'IPython development team and Enthought, Inc.'
SITENAME = u'DistArray'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'America/Chicago'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),
('You can modify those links in your config file', '#'),)
# Social widget
SOCIAL = (('You can add links in your config file', '#'),
('Another social link', '#'),)
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
<commit_msg>Update blogroll and social links.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'IPython development team and Enthought, Inc.'
SITENAME = u'DistArray'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'America/Chicago'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
# Blogroll
LINKS = (('NumPy', 'http://www.numpy.org/'),
('SciPy', 'http://www.scipy.org'),
('IPython', 'http://ipython.org/'),
('Enthought', 'http://www.enthought.com/'),
)
# Social widget
SOCIAL = (('github', 'https://github.com/enthought/distarray'),)
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'IPython development team and Enthought, Inc.'
SITENAME = u'DistArray'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'America/Chicago'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),
('You can modify those links in your config file', '#'),)
# Social widget
SOCIAL = (('You can add links in your config file', '#'),
('Another social link', '#'),)
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
Update blogroll and social links.#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'IPython development team and Enthought, Inc.'
SITENAME = u'DistArray'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'America/Chicago'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
# Blogroll
LINKS = (('NumPy', 'http://www.numpy.org/'),
('SciPy', 'http://www.scipy.org'),
('IPython', 'http://ipython.org/'),
('Enthought', 'http://www.enthought.com/'),
)
# Social widget
SOCIAL = (('github', 'https://github.com/enthought/distarray'),)
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
|
<commit_before>#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'IPython development team and Enthought, Inc.'
SITENAME = u'DistArray'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'America/Chicago'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
# Blogroll
LINKS = (('Pelican', 'http://getpelican.com/'),
('Python.org', 'http://python.org/'),
('Jinja2', 'http://jinja.pocoo.org/'),
('You can modify those links in your config file', '#'),)
# Social widget
SOCIAL = (('You can add links in your config file', '#'),
('Another social link', '#'),)
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
<commit_msg>Update blogroll and social links.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = u'IPython development team and Enthought, Inc.'
SITENAME = u'DistArray'
SITEURL = ''
PATH = 'content'
TIMEZONE = 'America/Chicago'
DEFAULT_LANG = u'en'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
# Blogroll
LINKS = (('NumPy', 'http://www.numpy.org/'),
('SciPy', 'http://www.scipy.org'),
('IPython', 'http://ipython.org/'),
('Enthought', 'http://www.enthought.com/'),
)
# Social widget
SOCIAL = (('github', 'https://github.com/enthought/distarray'),)
DEFAULT_PAGINATION = False
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
|
c4015ed868b65ce5c7ed660c84e252a950294642
|
r1.py
|
r1.py
|
from datetime import date
import bs4
import itertools as it
import re
import requests
def grouper(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return it.izip_longest(fillvalue=fillvalue, *args)
def extract_name(bsitem):
return bsitem.find('span').text
def extract_price(bsitem):
reg = re.compile(r'CHF ([\d\.]+)')
return float(reg.findall(bsitem.text)[0])
def extract_table(response):
items = bs4.BeautifulSoup(response.text).find(
'table',
class_='menuRestaurant').findAll('table',
class_='HauteurMenu')
return [(extract_name(i), extract_price(i)) for i in items[1::2]]
def create_payload(page):
return {'fa_afficheSemaine_menurestaurant': 'Page {}'.format(page),
'fn_changeType': 2,
'fn_jourSemaine': '{}'.format(date.today()),
'fn_limite': 2 * page - 1,
'fn_refresh': 1,
'fn_numpage': page}
def split_days(items):
xs = [grouper(i, n) for i, n in zip(items, (3, 2, 2))]
return [list(it.chain(*i)) for i in zip(*xs)]
def get_menu():
URL1 = 'http://extranet.novae-restauration.ch/index.php?frame=1&x=d894ddae3c17b40b4fe7e16519f950f0&y=c7b3f79848b99a8e562a1df1d6285365&z=33'
URL2 = 'http://extranet.novae-restauration.ch/novae/traiteur/restauration/restaurant-cern.html?frame=1'
s = requests.Session()
return split_days([extract_table(s.get(URL1)), extract_table(
s.post(URL2,
data=create_payload(2))), extract_table(
s.post(URL2,
data=create_payload(3)))])
|
Add basic functionality to query the (horrible) website.
|
Add basic functionality to query the (horrible) website.
|
Python
|
mit
|
kdungs/R1D2
|
Add basic functionality to query the (horrible) website.
|
from datetime import date
import bs4
import itertools as it
import re
import requests
def grouper(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return it.izip_longest(fillvalue=fillvalue, *args)
def extract_name(bsitem):
return bsitem.find('span').text
def extract_price(bsitem):
reg = re.compile(r'CHF ([\d\.]+)')
return float(reg.findall(bsitem.text)[0])
def extract_table(response):
items = bs4.BeautifulSoup(response.text).find(
'table',
class_='menuRestaurant').findAll('table',
class_='HauteurMenu')
return [(extract_name(i), extract_price(i)) for i in items[1::2]]
def create_payload(page):
return {'fa_afficheSemaine_menurestaurant': 'Page {}'.format(page),
'fn_changeType': 2,
'fn_jourSemaine': '{}'.format(date.today()),
'fn_limite': 2 * page - 1,
'fn_refresh': 1,
'fn_numpage': page}
def split_days(items):
xs = [grouper(i, n) for i, n in zip(items, (3, 2, 2))]
return [list(it.chain(*i)) for i in zip(*xs)]
def get_menu():
URL1 = 'http://extranet.novae-restauration.ch/index.php?frame=1&x=d894ddae3c17b40b4fe7e16519f950f0&y=c7b3f79848b99a8e562a1df1d6285365&z=33'
URL2 = 'http://extranet.novae-restauration.ch/novae/traiteur/restauration/restaurant-cern.html?frame=1'
s = requests.Session()
return split_days([extract_table(s.get(URL1)), extract_table(
s.post(URL2,
data=create_payload(2))), extract_table(
s.post(URL2,
data=create_payload(3)))])
|
<commit_before><commit_msg>Add basic functionality to query the (horrible) website.<commit_after>
|
from datetime import date
import bs4
import itertools as it
import re
import requests
def grouper(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return it.izip_longest(fillvalue=fillvalue, *args)
def extract_name(bsitem):
return bsitem.find('span').text
def extract_price(bsitem):
reg = re.compile(r'CHF ([\d\.]+)')
return float(reg.findall(bsitem.text)[0])
def extract_table(response):
items = bs4.BeautifulSoup(response.text).find(
'table',
class_='menuRestaurant').findAll('table',
class_='HauteurMenu')
return [(extract_name(i), extract_price(i)) for i in items[1::2]]
def create_payload(page):
return {'fa_afficheSemaine_menurestaurant': 'Page {}'.format(page),
'fn_changeType': 2,
'fn_jourSemaine': '{}'.format(date.today()),
'fn_limite': 2 * page - 1,
'fn_refresh': 1,
'fn_numpage': page}
def split_days(items):
xs = [grouper(i, n) for i, n in zip(items, (3, 2, 2))]
return [list(it.chain(*i)) for i in zip(*xs)]
def get_menu():
URL1 = 'http://extranet.novae-restauration.ch/index.php?frame=1&x=d894ddae3c17b40b4fe7e16519f950f0&y=c7b3f79848b99a8e562a1df1d6285365&z=33'
URL2 = 'http://extranet.novae-restauration.ch/novae/traiteur/restauration/restaurant-cern.html?frame=1'
s = requests.Session()
return split_days([extract_table(s.get(URL1)), extract_table(
s.post(URL2,
data=create_payload(2))), extract_table(
s.post(URL2,
data=create_payload(3)))])
|
Add basic functionality to query the (horrible) website.from datetime import date
import bs4
import itertools as it
import re
import requests
def grouper(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return it.izip_longest(fillvalue=fillvalue, *args)
def extract_name(bsitem):
return bsitem.find('span').text
def extract_price(bsitem):
reg = re.compile(r'CHF ([\d\.]+)')
return float(reg.findall(bsitem.text)[0])
def extract_table(response):
items = bs4.BeautifulSoup(response.text).find(
'table',
class_='menuRestaurant').findAll('table',
class_='HauteurMenu')
return [(extract_name(i), extract_price(i)) for i in items[1::2]]
def create_payload(page):
return {'fa_afficheSemaine_menurestaurant': 'Page {}'.format(page),
'fn_changeType': 2,
'fn_jourSemaine': '{}'.format(date.today()),
'fn_limite': 2 * page - 1,
'fn_refresh': 1,
'fn_numpage': page}
def split_days(items):
xs = [grouper(i, n) for i, n in zip(items, (3, 2, 2))]
return [list(it.chain(*i)) for i in zip(*xs)]
def get_menu():
URL1 = 'http://extranet.novae-restauration.ch/index.php?frame=1&x=d894ddae3c17b40b4fe7e16519f950f0&y=c7b3f79848b99a8e562a1df1d6285365&z=33'
URL2 = 'http://extranet.novae-restauration.ch/novae/traiteur/restauration/restaurant-cern.html?frame=1'
s = requests.Session()
return split_days([extract_table(s.get(URL1)), extract_table(
s.post(URL2,
data=create_payload(2))), extract_table(
s.post(URL2,
data=create_payload(3)))])
|
<commit_before><commit_msg>Add basic functionality to query the (horrible) website.<commit_after>from datetime import date
import bs4
import itertools as it
import re
import requests
def grouper(iterable, n, fillvalue=None):
args = [iter(iterable)] * n
return it.izip_longest(fillvalue=fillvalue, *args)
def extract_name(bsitem):
return bsitem.find('span').text
def extract_price(bsitem):
reg = re.compile(r'CHF ([\d\.]+)')
return float(reg.findall(bsitem.text)[0])
def extract_table(response):
items = bs4.BeautifulSoup(response.text).find(
'table',
class_='menuRestaurant').findAll('table',
class_='HauteurMenu')
return [(extract_name(i), extract_price(i)) for i in items[1::2]]
def create_payload(page):
return {'fa_afficheSemaine_menurestaurant': 'Page {}'.format(page),
'fn_changeType': 2,
'fn_jourSemaine': '{}'.format(date.today()),
'fn_limite': 2 * page - 1,
'fn_refresh': 1,
'fn_numpage': page}
def split_days(items):
xs = [grouper(i, n) for i, n in zip(items, (3, 2, 2))]
return [list(it.chain(*i)) for i in zip(*xs)]
def get_menu():
URL1 = 'http://extranet.novae-restauration.ch/index.php?frame=1&x=d894ddae3c17b40b4fe7e16519f950f0&y=c7b3f79848b99a8e562a1df1d6285365&z=33'
URL2 = 'http://extranet.novae-restauration.ch/novae/traiteur/restauration/restaurant-cern.html?frame=1'
s = requests.Session()
return split_days([extract_table(s.get(URL1)), extract_table(
s.post(URL2,
data=create_payload(2))), extract_table(
s.post(URL2,
data=create_payload(3)))])
|
|
265bedb193f8615f99daa63c921b572408921605
|
test_quick_sort.py
|
test_quick_sort.py
|
# -*- coding: utf-8 -*-
from quick_sort import quick_sort
def test_sorted():
my_list = list(range(100))
quick_sort(my_list)
assert my_list == list(range(100))
def test_reverse():
my_list = list(range(100))[::-1]
quick_sort(my_list)
assert my_list == list(range(100))
def test_empty():
my_list = []
quick_sort(my_list)
assert my_list == []
def test_abc():
my_list = ['a', 'b', 'c', 'd', 'e']
quick_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
my_list = ['e', 'd', 'c', 'b', 'a']
quick_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
def test_unicode():
my_list = ['π']
quick_sort(my_list)
assert my_list == ['\xcf\x80']
def test_duplicate():
my_list = [1, 2, 2, 5, 3]
quick_sort(my_list)
assert my_list == [1, 2, 2, 3, 5]
def test_combo():
my_list = [42, 1, 'a', 500]
quick_sort(my_list)
assert my_list == [1, 42, 500, 'a']
my_list = [42, '1', 'a', '500']
quick_sort(my_list)
assert my_list == [42, '1', '500', 'a']
def test_function():
my_list = []
new_list = [quick_sort(my_list)]
assert new_list == [None]
|
Add tests for quick sort
|
Add tests for quick sort
|
Python
|
mit
|
nbeck90/data_structures_2
|
Add tests for quick sort
|
# -*- coding: utf-8 -*-
from quick_sort import quick_sort
def test_sorted():
my_list = list(range(100))
quick_sort(my_list)
assert my_list == list(range(100))
def test_reverse():
my_list = list(range(100))[::-1]
quick_sort(my_list)
assert my_list == list(range(100))
def test_empty():
my_list = []
quick_sort(my_list)
assert my_list == []
def test_abc():
my_list = ['a', 'b', 'c', 'd', 'e']
quick_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
my_list = ['e', 'd', 'c', 'b', 'a']
quick_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
def test_unicode():
my_list = ['π']
quick_sort(my_list)
assert my_list == ['\xcf\x80']
def test_duplicate():
my_list = [1, 2, 2, 5, 3]
quick_sort(my_list)
assert my_list == [1, 2, 2, 3, 5]
def test_combo():
my_list = [42, 1, 'a', 500]
quick_sort(my_list)
assert my_list == [1, 42, 500, 'a']
my_list = [42, '1', 'a', '500']
quick_sort(my_list)
assert my_list == [42, '1', '500', 'a']
def test_function():
my_list = []
new_list = [quick_sort(my_list)]
assert new_list == [None]
|
<commit_before><commit_msg>Add tests for quick sort<commit_after>
|
# -*- coding: utf-8 -*-
from quick_sort import quick_sort
def test_sorted():
my_list = list(range(100))
quick_sort(my_list)
assert my_list == list(range(100))
def test_reverse():
my_list = list(range(100))[::-1]
quick_sort(my_list)
assert my_list == list(range(100))
def test_empty():
my_list = []
quick_sort(my_list)
assert my_list == []
def test_abc():
my_list = ['a', 'b', 'c', 'd', 'e']
quick_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
my_list = ['e', 'd', 'c', 'b', 'a']
quick_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
def test_unicode():
my_list = ['π']
quick_sort(my_list)
assert my_list == ['\xcf\x80']
def test_duplicate():
my_list = [1, 2, 2, 5, 3]
quick_sort(my_list)
assert my_list == [1, 2, 2, 3, 5]
def test_combo():
my_list = [42, 1, 'a', 500]
quick_sort(my_list)
assert my_list == [1, 42, 500, 'a']
my_list = [42, '1', 'a', '500']
quick_sort(my_list)
assert my_list == [42, '1', '500', 'a']
def test_function():
my_list = []
new_list = [quick_sort(my_list)]
assert new_list == [None]
|
Add tests for quick sort# -*- coding: utf-8 -*-
from quick_sort import quick_sort
def test_sorted():
my_list = list(range(100))
quick_sort(my_list)
assert my_list == list(range(100))
def test_reverse():
my_list = list(range(100))[::-1]
quick_sort(my_list)
assert my_list == list(range(100))
def test_empty():
my_list = []
quick_sort(my_list)
assert my_list == []
def test_abc():
my_list = ['a', 'b', 'c', 'd', 'e']
quick_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
my_list = ['e', 'd', 'c', 'b', 'a']
quick_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
def test_unicode():
my_list = ['π']
quick_sort(my_list)
assert my_list == ['\xcf\x80']
def test_duplicate():
my_list = [1, 2, 2, 5, 3]
quick_sort(my_list)
assert my_list == [1, 2, 2, 3, 5]
def test_combo():
my_list = [42, 1, 'a', 500]
quick_sort(my_list)
assert my_list == [1, 42, 500, 'a']
my_list = [42, '1', 'a', '500']
quick_sort(my_list)
assert my_list == [42, '1', '500', 'a']
def test_function():
my_list = []
new_list = [quick_sort(my_list)]
assert new_list == [None]
|
<commit_before><commit_msg>Add tests for quick sort<commit_after># -*- coding: utf-8 -*-
from quick_sort import quick_sort
def test_sorted():
my_list = list(range(100))
quick_sort(my_list)
assert my_list == list(range(100))
def test_reverse():
my_list = list(range(100))[::-1]
quick_sort(my_list)
assert my_list == list(range(100))
def test_empty():
my_list = []
quick_sort(my_list)
assert my_list == []
def test_abc():
my_list = ['a', 'b', 'c', 'd', 'e']
quick_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
my_list = ['e', 'd', 'c', 'b', 'a']
quick_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
def test_unicode():
my_list = ['π']
quick_sort(my_list)
assert my_list == ['\xcf\x80']
def test_duplicate():
my_list = [1, 2, 2, 5, 3]
quick_sort(my_list)
assert my_list == [1, 2, 2, 3, 5]
def test_combo():
my_list = [42, 1, 'a', 500]
quick_sort(my_list)
assert my_list == [1, 42, 500, 'a']
my_list = [42, '1', 'a', '500']
quick_sort(my_list)
assert my_list == [42, '1', '500', 'a']
def test_function():
my_list = []
new_list = [quick_sort(my_list)]
assert new_list == [None]
|
|
d4c7869d62635eca3108d743c2bc12c9f394d68a
|
tests/test_item.py
|
tests/test_item.py
|
import os, sys
inc_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, inc_path)
import archive
def test_item():
item = archive.Item('stairs')
assert item.metadata['metadata']['identifier'] == 'stairs'
def test_file():
item = archive.Item('stairs')
filename = 'glogo.png'
file = item.file(filename)
assert not os.path.exists(filename)
file.download()
assert os.stat(filename).st_size == file.size
os.unlink(filename)
|
Add archive.File class, which allows downloading from archive.org
|
Add archive.File class, which allows downloading from archive.org
|
Python
|
agpl-3.0
|
brycedrennan/internetarchive,JesseWeinstein/internetarchive,dattasaurabh82/internetarchive,jjjake/internetarchive,wumpus/internetarchive
|
Add archive.File class, which allows downloading from archive.org
|
import os, sys
inc_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, inc_path)
import archive
def test_item():
item = archive.Item('stairs')
assert item.metadata['metadata']['identifier'] == 'stairs'
def test_file():
item = archive.Item('stairs')
filename = 'glogo.png'
file = item.file(filename)
assert not os.path.exists(filename)
file.download()
assert os.stat(filename).st_size == file.size
os.unlink(filename)
|
<commit_before><commit_msg>Add archive.File class, which allows downloading from archive.org<commit_after>
|
import os, sys
inc_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, inc_path)
import archive
def test_item():
item = archive.Item('stairs')
assert item.metadata['metadata']['identifier'] == 'stairs'
def test_file():
item = archive.Item('stairs')
filename = 'glogo.png'
file = item.file(filename)
assert not os.path.exists(filename)
file.download()
assert os.stat(filename).st_size == file.size
os.unlink(filename)
|
Add archive.File class, which allows downloading from archive.orgimport os, sys
inc_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, inc_path)
import archive
def test_item():
item = archive.Item('stairs')
assert item.metadata['metadata']['identifier'] == 'stairs'
def test_file():
item = archive.Item('stairs')
filename = 'glogo.png'
file = item.file(filename)
assert not os.path.exists(filename)
file.download()
assert os.stat(filename).st_size == file.size
os.unlink(filename)
|
<commit_before><commit_msg>Add archive.File class, which allows downloading from archive.org<commit_after>import os, sys
inc_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, inc_path)
import archive
def test_item():
item = archive.Item('stairs')
assert item.metadata['metadata']['identifier'] == 'stairs'
def test_file():
item = archive.Item('stairs')
filename = 'glogo.png'
file = item.file(filename)
assert not os.path.exists(filename)
file.download()
assert os.stat(filename).st_size == file.size
os.unlink(filename)
|
|
fa4b4de37b38f0ff800bbd2ac007ab6521720258
|
scripts/tests/test_box_migrate_to_external_account.py
|
scripts/tests/test_box_migrate_to_external_account.py
|
from nose.tools import *
from scripts.box.migrate_to_external_account import do_migration, get_targets
from framework.auth import Auth
from tests.base import OsfTestCase
from tests.factories import ProjectFactory, UserFactory
from website.addons.box.model import BoxUserSettings
from website.addons.box.tests.factories import BoxOAuthSettingsFactory
class TestBoxMigration(OsfTestCase):
# Note: BoxUserSettings.user_settings has to be changed to foreign_user_settings (model and mongo). See migration instructions
def test_migration_no_project(self):
user = UserFactory()
user.add_addon('box')
user_addon = user.get_addon('box')
user_addon.oauth_settings = BoxOAuthSettingsFactory()
user_addon.save()
do_migration([user_addon])
user_addon.reload()
assert_is_none(user_addon.oauth_settings)
assert_equal(len(user.external_accounts), 1)
account = user.external_accounts[0]
assert_equal(account.provider, 'box')
assert_equal(account.oauth_key, 'abcdef1')
def test_migration_removes_targets(self):
BoxUserSettings.remove()
user = UserFactory()
project = ProjectFactory(creator=user)
user.add_addon('box', auth=Auth(user))
user_addon = user.get_addon('box')
user_addon.oauth_settings = BoxOAuthSettingsFactory()
user_addon.save()
project.add_addon('box', auth=Auth(user))
node_addon = project.get_addon('box')
node_addon.foreign_user_settings = user_addon
node_addon.save()
assert_equal(get_targets().count(), 1)
do_migration([user_addon])
user_addon.reload()
assert_equal(get_targets().count(), 0)
def test_migration_multiple_users(self):
user1 = UserFactory()
user2 = UserFactory()
oauth_settings = BoxOAuthSettingsFactory()
user1.add_addon('box')
user1_addon = user1.get_addon('box')
user1_addon.oauth_settings = oauth_settings
user1_addon.save()
user2.add_addon('box')
user2_addon = user2.get_addon('box')
user2_addon.oauth_settings = oauth_settings
user2_addon.save()
do_migration([user1_addon, user2_addon])
user1_addon.reload()
user2_addon.reload()
assert_equal(
user1.external_accounts[0],
user2.external_accounts[0],
)
def test_get_targets(self):
BoxUserSettings.remove()
addons = [
BoxUserSettings(),
BoxUserSettings(oauth_settings=BoxOAuthSettingsFactory()),
]
for addon in addons:
addon.save()
targets = get_targets()
assert_equal(targets.count(), 1)
assert_equal(targets[0]._id, addons[-1]._id)
|
Add test for box migration script
|
Add test for box migration script
|
Python
|
apache-2.0
|
ticklemepierce/osf.io,haoyuchen1992/osf.io,Nesiehr/osf.io,wearpants/osf.io,zachjanicki/osf.io,acshi/osf.io,chennan47/osf.io,ZobairAlijan/osf.io,cwisecarver/osf.io,njantrania/osf.io,TomHeatwole/osf.io,chrisseto/osf.io,felliott/osf.io,erinspace/osf.io,TomBaxter/osf.io,abought/osf.io,jnayak1/osf.io,rdhyee/osf.io,aaxelb/osf.io,billyhunt/osf.io,jnayak1/osf.io,binoculars/osf.io,emetsger/osf.io,wearpants/osf.io,saradbowman/osf.io,alexschiller/osf.io,Johnetordoff/osf.io,HalcyonChimera/osf.io,chrisseto/osf.io,caseyrollins/osf.io,binoculars/osf.io,Nesiehr/osf.io,mfraezz/osf.io,SSJohns/osf.io,hmoco/osf.io,DanielSBrown/osf.io,ticklemepierce/osf.io,KAsante95/osf.io,GageGaskins/osf.io,Johnetordoff/osf.io,kch8qx/osf.io,doublebits/osf.io,cslzchen/osf.io,caseyrygt/osf.io,kch8qx/osf.io,emetsger/osf.io,amyshi188/osf.io,danielneis/osf.io,RomanZWang/osf.io,RomanZWang/osf.io,icereval/osf.io,mattclark/osf.io,DanielSBrown/osf.io,amyshi188/osf.io,jnayak1/osf.io,billyhunt/osf.io,haoyuchen1992/osf.io,GageGaskins/osf.io,amyshi188/osf.io,monikagrabowska/osf.io,Johnetordoff/osf.io,mluke93/osf.io,wearpants/osf.io,alexschiller/osf.io,mluo613/osf.io,kch8qx/osf.io,haoyuchen1992/osf.io,caneruguz/osf.io,icereval/osf.io,brandonPurvis/osf.io,samanehsan/osf.io,chrisseto/osf.io,abought/osf.io,cwisecarver/osf.io,KAsante95/osf.io,chennan47/osf.io,GageGaskins/osf.io,mfraezz/osf.io,KAsante95/osf.io,leb2dg/osf.io,ticklemepierce/osf.io,cosenal/osf.io,adlius/osf.io,chennan47/osf.io,HalcyonChimera/osf.io,caneruguz/osf.io,aaxelb/osf.io,caneruguz/osf.io,haoyuchen1992/osf.io,CenterForOpenScience/osf.io,kwierman/osf.io,acshi/osf.io,Ghalko/osf.io,baylee-d/osf.io,mattclark/osf.io,ZobairAlijan/osf.io,amyshi188/osf.io,Ghalko/osf.io,cosenal/osf.io,billyhunt/osf.io,GageGaskins/osf.io,brianjgeiger/osf.io,felliott/osf.io,acshi/osf.io,cslzchen/osf.io,Nesiehr/osf.io,wearpants/osf.io,zachjanicki/osf.io,mluo613/osf.io,Ghalko/osf.io,sloria/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,doublebits/osf.io,mluke93/osf.io,zamattiac/osf.io,acshi/osf.io,TomBaxter/osf.io,alexschiller/osf.io,caneruguz/osf.io,crcresearch/osf.io,pattisdr/osf.io,billyhunt/osf.io,samanehsan/osf.io,monikagrabowska/osf.io,aaxelb/osf.io,monikagrabowska/osf.io,hmoco/osf.io,asanfilippo7/osf.io,Ghalko/osf.io,acshi/osf.io,HalcyonChimera/osf.io,adlius/osf.io,crcresearch/osf.io,njantrania/osf.io,caseyrollins/osf.io,zamattiac/osf.io,caseyrygt/osf.io,emetsger/osf.io,cwisecarver/osf.io,laurenrevere/osf.io,hmoco/osf.io,kwierman/osf.io,erinspace/osf.io,GageGaskins/osf.io,CenterForOpenScience/osf.io,caseyrygt/osf.io,RomanZWang/osf.io,danielneis/osf.io,sloria/osf.io,billyhunt/osf.io,rdhyee/osf.io,binoculars/osf.io,HalcyonChimera/osf.io,mluo613/osf.io,TomBaxter/osf.io,TomHeatwole/osf.io,ZobairAlijan/osf.io,erinspace/osf.io,doublebits/osf.io,leb2dg/osf.io,TomHeatwole/osf.io,felliott/osf.io,Johnetordoff/osf.io,monikagrabowska/osf.io,alexschiller/osf.io,zachjanicki/osf.io,mluo613/osf.io,samchrisinger/osf.io,brandonPurvis/osf.io,caseyrollins/osf.io,samanehsan/osf.io,kwierman/osf.io,rdhyee/osf.io,danielneis/osf.io,samanehsan/osf.io,alexschiller/osf.io,mattclark/osf.io,saradbowman/osf.io,DanielSBrown/osf.io,laurenrevere/osf.io,samchrisinger/osf.io,crcresearch/osf.io,njantrania/osf.io,zamattiac/osf.io,abought/osf.io,rdhyee/osf.io,jnayak1/osf.io,pattisdr/osf.io,cosenal/osf.io,asanfilippo7/osf.io,samchrisinger/osf.io,brianjgeiger/osf.io,doublebits/osf.io,baylee-d/osf.io,aaxelb/osf.io,monikagrabowska/osf.io,leb2dg/osf.io,SSJohns/osf.io,mluke93/osf.io,ticklemepierce/osf.io,brandonPurvis/osf.io,brianjgeiger/osf.io,mfraezz/osf.io,mluo613/osf.io,caseyrygt/osf.io,doublebits/osf.io,icereval/osf.io,hmoco/osf.io,ZobairAlijan/osf.io,cosenal/osf.io,asanfilippo7/osf.io,adlius/osf.io,SSJohns/osf.io,cslzchen/osf.io,cwisecarver/osf.io,danielneis/osf.io,SSJohns/osf.io,kwierman/osf.io,zamattiac/osf.io,emetsger/osf.io,baylee-d/osf.io,Nesiehr/osf.io,adlius/osf.io,sloria/osf.io,leb2dg/osf.io,mluke93/osf.io,njantrania/osf.io,chrisseto/osf.io,kch8qx/osf.io,laurenrevere/osf.io,TomHeatwole/osf.io,brandonPurvis/osf.io,kch8qx/osf.io,DanielSBrown/osf.io,KAsante95/osf.io,RomanZWang/osf.io,RomanZWang/osf.io,pattisdr/osf.io,CenterForOpenScience/osf.io,mfraezz/osf.io,brandonPurvis/osf.io,samchrisinger/osf.io,abought/osf.io,CenterForOpenScience/osf.io,zachjanicki/osf.io,asanfilippo7/osf.io,felliott/osf.io,KAsante95/osf.io
|
Add test for box migration script
|
from nose.tools import *
from scripts.box.migrate_to_external_account import do_migration, get_targets
from framework.auth import Auth
from tests.base import OsfTestCase
from tests.factories import ProjectFactory, UserFactory
from website.addons.box.model import BoxUserSettings
from website.addons.box.tests.factories import BoxOAuthSettingsFactory
class TestBoxMigration(OsfTestCase):
# Note: BoxUserSettings.user_settings has to be changed to foreign_user_settings (model and mongo). See migration instructions
def test_migration_no_project(self):
user = UserFactory()
user.add_addon('box')
user_addon = user.get_addon('box')
user_addon.oauth_settings = BoxOAuthSettingsFactory()
user_addon.save()
do_migration([user_addon])
user_addon.reload()
assert_is_none(user_addon.oauth_settings)
assert_equal(len(user.external_accounts), 1)
account = user.external_accounts[0]
assert_equal(account.provider, 'box')
assert_equal(account.oauth_key, 'abcdef1')
def test_migration_removes_targets(self):
BoxUserSettings.remove()
user = UserFactory()
project = ProjectFactory(creator=user)
user.add_addon('box', auth=Auth(user))
user_addon = user.get_addon('box')
user_addon.oauth_settings = BoxOAuthSettingsFactory()
user_addon.save()
project.add_addon('box', auth=Auth(user))
node_addon = project.get_addon('box')
node_addon.foreign_user_settings = user_addon
node_addon.save()
assert_equal(get_targets().count(), 1)
do_migration([user_addon])
user_addon.reload()
assert_equal(get_targets().count(), 0)
def test_migration_multiple_users(self):
user1 = UserFactory()
user2 = UserFactory()
oauth_settings = BoxOAuthSettingsFactory()
user1.add_addon('box')
user1_addon = user1.get_addon('box')
user1_addon.oauth_settings = oauth_settings
user1_addon.save()
user2.add_addon('box')
user2_addon = user2.get_addon('box')
user2_addon.oauth_settings = oauth_settings
user2_addon.save()
do_migration([user1_addon, user2_addon])
user1_addon.reload()
user2_addon.reload()
assert_equal(
user1.external_accounts[0],
user2.external_accounts[0],
)
def test_get_targets(self):
BoxUserSettings.remove()
addons = [
BoxUserSettings(),
BoxUserSettings(oauth_settings=BoxOAuthSettingsFactory()),
]
for addon in addons:
addon.save()
targets = get_targets()
assert_equal(targets.count(), 1)
assert_equal(targets[0]._id, addons[-1]._id)
|
<commit_before><commit_msg>Add test for box migration script<commit_after>
|
from nose.tools import *
from scripts.box.migrate_to_external_account import do_migration, get_targets
from framework.auth import Auth
from tests.base import OsfTestCase
from tests.factories import ProjectFactory, UserFactory
from website.addons.box.model import BoxUserSettings
from website.addons.box.tests.factories import BoxOAuthSettingsFactory
class TestBoxMigration(OsfTestCase):
# Note: BoxUserSettings.user_settings has to be changed to foreign_user_settings (model and mongo). See migration instructions
def test_migration_no_project(self):
user = UserFactory()
user.add_addon('box')
user_addon = user.get_addon('box')
user_addon.oauth_settings = BoxOAuthSettingsFactory()
user_addon.save()
do_migration([user_addon])
user_addon.reload()
assert_is_none(user_addon.oauth_settings)
assert_equal(len(user.external_accounts), 1)
account = user.external_accounts[0]
assert_equal(account.provider, 'box')
assert_equal(account.oauth_key, 'abcdef1')
def test_migration_removes_targets(self):
BoxUserSettings.remove()
user = UserFactory()
project = ProjectFactory(creator=user)
user.add_addon('box', auth=Auth(user))
user_addon = user.get_addon('box')
user_addon.oauth_settings = BoxOAuthSettingsFactory()
user_addon.save()
project.add_addon('box', auth=Auth(user))
node_addon = project.get_addon('box')
node_addon.foreign_user_settings = user_addon
node_addon.save()
assert_equal(get_targets().count(), 1)
do_migration([user_addon])
user_addon.reload()
assert_equal(get_targets().count(), 0)
def test_migration_multiple_users(self):
user1 = UserFactory()
user2 = UserFactory()
oauth_settings = BoxOAuthSettingsFactory()
user1.add_addon('box')
user1_addon = user1.get_addon('box')
user1_addon.oauth_settings = oauth_settings
user1_addon.save()
user2.add_addon('box')
user2_addon = user2.get_addon('box')
user2_addon.oauth_settings = oauth_settings
user2_addon.save()
do_migration([user1_addon, user2_addon])
user1_addon.reload()
user2_addon.reload()
assert_equal(
user1.external_accounts[0],
user2.external_accounts[0],
)
def test_get_targets(self):
BoxUserSettings.remove()
addons = [
BoxUserSettings(),
BoxUserSettings(oauth_settings=BoxOAuthSettingsFactory()),
]
for addon in addons:
addon.save()
targets = get_targets()
assert_equal(targets.count(), 1)
assert_equal(targets[0]._id, addons[-1]._id)
|
Add test for box migration scriptfrom nose.tools import *
from scripts.box.migrate_to_external_account import do_migration, get_targets
from framework.auth import Auth
from tests.base import OsfTestCase
from tests.factories import ProjectFactory, UserFactory
from website.addons.box.model import BoxUserSettings
from website.addons.box.tests.factories import BoxOAuthSettingsFactory
class TestBoxMigration(OsfTestCase):
# Note: BoxUserSettings.user_settings has to be changed to foreign_user_settings (model and mongo). See migration instructions
def test_migration_no_project(self):
user = UserFactory()
user.add_addon('box')
user_addon = user.get_addon('box')
user_addon.oauth_settings = BoxOAuthSettingsFactory()
user_addon.save()
do_migration([user_addon])
user_addon.reload()
assert_is_none(user_addon.oauth_settings)
assert_equal(len(user.external_accounts), 1)
account = user.external_accounts[0]
assert_equal(account.provider, 'box')
assert_equal(account.oauth_key, 'abcdef1')
def test_migration_removes_targets(self):
BoxUserSettings.remove()
user = UserFactory()
project = ProjectFactory(creator=user)
user.add_addon('box', auth=Auth(user))
user_addon = user.get_addon('box')
user_addon.oauth_settings = BoxOAuthSettingsFactory()
user_addon.save()
project.add_addon('box', auth=Auth(user))
node_addon = project.get_addon('box')
node_addon.foreign_user_settings = user_addon
node_addon.save()
assert_equal(get_targets().count(), 1)
do_migration([user_addon])
user_addon.reload()
assert_equal(get_targets().count(), 0)
def test_migration_multiple_users(self):
user1 = UserFactory()
user2 = UserFactory()
oauth_settings = BoxOAuthSettingsFactory()
user1.add_addon('box')
user1_addon = user1.get_addon('box')
user1_addon.oauth_settings = oauth_settings
user1_addon.save()
user2.add_addon('box')
user2_addon = user2.get_addon('box')
user2_addon.oauth_settings = oauth_settings
user2_addon.save()
do_migration([user1_addon, user2_addon])
user1_addon.reload()
user2_addon.reload()
assert_equal(
user1.external_accounts[0],
user2.external_accounts[0],
)
def test_get_targets(self):
BoxUserSettings.remove()
addons = [
BoxUserSettings(),
BoxUserSettings(oauth_settings=BoxOAuthSettingsFactory()),
]
for addon in addons:
addon.save()
targets = get_targets()
assert_equal(targets.count(), 1)
assert_equal(targets[0]._id, addons[-1]._id)
|
<commit_before><commit_msg>Add test for box migration script<commit_after>from nose.tools import *
from scripts.box.migrate_to_external_account import do_migration, get_targets
from framework.auth import Auth
from tests.base import OsfTestCase
from tests.factories import ProjectFactory, UserFactory
from website.addons.box.model import BoxUserSettings
from website.addons.box.tests.factories import BoxOAuthSettingsFactory
class TestBoxMigration(OsfTestCase):
# Note: BoxUserSettings.user_settings has to be changed to foreign_user_settings (model and mongo). See migration instructions
def test_migration_no_project(self):
user = UserFactory()
user.add_addon('box')
user_addon = user.get_addon('box')
user_addon.oauth_settings = BoxOAuthSettingsFactory()
user_addon.save()
do_migration([user_addon])
user_addon.reload()
assert_is_none(user_addon.oauth_settings)
assert_equal(len(user.external_accounts), 1)
account = user.external_accounts[0]
assert_equal(account.provider, 'box')
assert_equal(account.oauth_key, 'abcdef1')
def test_migration_removes_targets(self):
BoxUserSettings.remove()
user = UserFactory()
project = ProjectFactory(creator=user)
user.add_addon('box', auth=Auth(user))
user_addon = user.get_addon('box')
user_addon.oauth_settings = BoxOAuthSettingsFactory()
user_addon.save()
project.add_addon('box', auth=Auth(user))
node_addon = project.get_addon('box')
node_addon.foreign_user_settings = user_addon
node_addon.save()
assert_equal(get_targets().count(), 1)
do_migration([user_addon])
user_addon.reload()
assert_equal(get_targets().count(), 0)
def test_migration_multiple_users(self):
user1 = UserFactory()
user2 = UserFactory()
oauth_settings = BoxOAuthSettingsFactory()
user1.add_addon('box')
user1_addon = user1.get_addon('box')
user1_addon.oauth_settings = oauth_settings
user1_addon.save()
user2.add_addon('box')
user2_addon = user2.get_addon('box')
user2_addon.oauth_settings = oauth_settings
user2_addon.save()
do_migration([user1_addon, user2_addon])
user1_addon.reload()
user2_addon.reload()
assert_equal(
user1.external_accounts[0],
user2.external_accounts[0],
)
def test_get_targets(self):
BoxUserSettings.remove()
addons = [
BoxUserSettings(),
BoxUserSettings(oauth_settings=BoxOAuthSettingsFactory()),
]
for addon in addons:
addon.save()
targets = get_targets()
assert_equal(targets.count(), 1)
assert_equal(targets[0]._id, addons[-1]._id)
|
|
86cae13f7dde04f7031ae111e596f2d8c03d5420
|
tests/test_recorders.py
|
tests/test_recorders.py
|
import pytest
from plumbium.processresult import record, pipeline, call
from plumbium.recorders import CSVFile, StdOut
from collections import OrderedDict
@pytest.fixture
def simple_pipeline():
@record()
def recorded_function():
call(['echo', '6.35'])
def a_pipeline():
recorded_function()
return a_pipeline
def test_csvfile(simple_pipeline, tmpdir):
with tmpdir.as_cwd():
recorder = CSVFile(
'test.csv',
OrderedDict([
('id', lambda x: x['metadata']['id']),
('data', lambda x: x['processes'][0]['printed_output'].strip())
])
)
pipeline.run(
'test',
simple_pipeline,
str(tmpdir),
metadata={'id': 1},
recorder=recorder
)
with open('test.csv') as f:
assert f.readline().strip() == 'id,data'
assert f.readline().strip() == '1,6.35'
def test_stdout(simple_pipeline, tmpdir, capsys):
with tmpdir.as_cwd():
recorder = StdOut(
OrderedDict([
('id', lambda x: x['metadata']['id']),
('data', lambda x: x['processes'][0]['printed_output'].strip())
])
)
pipeline.run(
'test',
simple_pipeline,
str(tmpdir),
metadata={'id': 1},
recorder=recorder
)
out, err = capsys.readouterr()
assert out == 'id: 1\ndata: 6.35\n'
|
Add tests of CSVFile and StdOut recorders
|
Add tests of CSVFile and StdOut recorders
|
Python
|
mit
|
jstutters/Plumbium
|
Add tests of CSVFile and StdOut recorders
|
import pytest
from plumbium.processresult import record, pipeline, call
from plumbium.recorders import CSVFile, StdOut
from collections import OrderedDict
@pytest.fixture
def simple_pipeline():
@record()
def recorded_function():
call(['echo', '6.35'])
def a_pipeline():
recorded_function()
return a_pipeline
def test_csvfile(simple_pipeline, tmpdir):
with tmpdir.as_cwd():
recorder = CSVFile(
'test.csv',
OrderedDict([
('id', lambda x: x['metadata']['id']),
('data', lambda x: x['processes'][0]['printed_output'].strip())
])
)
pipeline.run(
'test',
simple_pipeline,
str(tmpdir),
metadata={'id': 1},
recorder=recorder
)
with open('test.csv') as f:
assert f.readline().strip() == 'id,data'
assert f.readline().strip() == '1,6.35'
def test_stdout(simple_pipeline, tmpdir, capsys):
with tmpdir.as_cwd():
recorder = StdOut(
OrderedDict([
('id', lambda x: x['metadata']['id']),
('data', lambda x: x['processes'][0]['printed_output'].strip())
])
)
pipeline.run(
'test',
simple_pipeline,
str(tmpdir),
metadata={'id': 1},
recorder=recorder
)
out, err = capsys.readouterr()
assert out == 'id: 1\ndata: 6.35\n'
|
<commit_before><commit_msg>Add tests of CSVFile and StdOut recorders<commit_after>
|
import pytest
from plumbium.processresult import record, pipeline, call
from plumbium.recorders import CSVFile, StdOut
from collections import OrderedDict
@pytest.fixture
def simple_pipeline():
@record()
def recorded_function():
call(['echo', '6.35'])
def a_pipeline():
recorded_function()
return a_pipeline
def test_csvfile(simple_pipeline, tmpdir):
with tmpdir.as_cwd():
recorder = CSVFile(
'test.csv',
OrderedDict([
('id', lambda x: x['metadata']['id']),
('data', lambda x: x['processes'][0]['printed_output'].strip())
])
)
pipeline.run(
'test',
simple_pipeline,
str(tmpdir),
metadata={'id': 1},
recorder=recorder
)
with open('test.csv') as f:
assert f.readline().strip() == 'id,data'
assert f.readline().strip() == '1,6.35'
def test_stdout(simple_pipeline, tmpdir, capsys):
with tmpdir.as_cwd():
recorder = StdOut(
OrderedDict([
('id', lambda x: x['metadata']['id']),
('data', lambda x: x['processes'][0]['printed_output'].strip())
])
)
pipeline.run(
'test',
simple_pipeline,
str(tmpdir),
metadata={'id': 1},
recorder=recorder
)
out, err = capsys.readouterr()
assert out == 'id: 1\ndata: 6.35\n'
|
Add tests of CSVFile and StdOut recordersimport pytest
from plumbium.processresult import record, pipeline, call
from plumbium.recorders import CSVFile, StdOut
from collections import OrderedDict
@pytest.fixture
def simple_pipeline():
@record()
def recorded_function():
call(['echo', '6.35'])
def a_pipeline():
recorded_function()
return a_pipeline
def test_csvfile(simple_pipeline, tmpdir):
with tmpdir.as_cwd():
recorder = CSVFile(
'test.csv',
OrderedDict([
('id', lambda x: x['metadata']['id']),
('data', lambda x: x['processes'][0]['printed_output'].strip())
])
)
pipeline.run(
'test',
simple_pipeline,
str(tmpdir),
metadata={'id': 1},
recorder=recorder
)
with open('test.csv') as f:
assert f.readline().strip() == 'id,data'
assert f.readline().strip() == '1,6.35'
def test_stdout(simple_pipeline, tmpdir, capsys):
with tmpdir.as_cwd():
recorder = StdOut(
OrderedDict([
('id', lambda x: x['metadata']['id']),
('data', lambda x: x['processes'][0]['printed_output'].strip())
])
)
pipeline.run(
'test',
simple_pipeline,
str(tmpdir),
metadata={'id': 1},
recorder=recorder
)
out, err = capsys.readouterr()
assert out == 'id: 1\ndata: 6.35\n'
|
<commit_before><commit_msg>Add tests of CSVFile and StdOut recorders<commit_after>import pytest
from plumbium.processresult import record, pipeline, call
from plumbium.recorders import CSVFile, StdOut
from collections import OrderedDict
@pytest.fixture
def simple_pipeline():
@record()
def recorded_function():
call(['echo', '6.35'])
def a_pipeline():
recorded_function()
return a_pipeline
def test_csvfile(simple_pipeline, tmpdir):
with tmpdir.as_cwd():
recorder = CSVFile(
'test.csv',
OrderedDict([
('id', lambda x: x['metadata']['id']),
('data', lambda x: x['processes'][0]['printed_output'].strip())
])
)
pipeline.run(
'test',
simple_pipeline,
str(tmpdir),
metadata={'id': 1},
recorder=recorder
)
with open('test.csv') as f:
assert f.readline().strip() == 'id,data'
assert f.readline().strip() == '1,6.35'
def test_stdout(simple_pipeline, tmpdir, capsys):
with tmpdir.as_cwd():
recorder = StdOut(
OrderedDict([
('id', lambda x: x['metadata']['id']),
('data', lambda x: x['processes'][0]['printed_output'].strip())
])
)
pipeline.run(
'test',
simple_pipeline,
str(tmpdir),
metadata={'id': 1},
recorder=recorder
)
out, err = capsys.readouterr()
assert out == 'id: 1\ndata: 6.35\n'
|
|
4bd9e4db4af430ae34ed87f695d72ae99ba5bb70
|
solver.py
|
solver.py
|
from constraint import *
# Empty space is 0
# Brick is a 1
# Block is a 2
# West facing player - 3
# East facing player - 4
# Door - 5
level = [[1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,5,0,0,0,0,0,3,0,1],
[1,1,1,1,1,1,1,1,1,1]]
problem = Problem()
# moves
# move east - e
# move west - w
# more nortwest - nw
# more norteast - ne
# pickup block - p
# drop block - d
# fall - f
problem.addVariable("e", [[4,0],[3,0],[3,1],[3,2]] )
problem.addVariable("w", [[0,3],[0,4], [1,4],[2,4]])
problem.addVariable("nw", [0])
problem.addVariable("ne", [0])
problem.addVariable("p", [0])
problem.addVariable("d", [0])
solutions = problem.getSolutions()
print(solutions)
|
Set up first test level, started to create constraints
|
Set up first test level, started to create constraints
|
Python
|
mit
|
joeYeager/BlockDudeSolver
|
Set up first test level, started to create constraints
|
from constraint import *
# Empty space is 0
# Brick is a 1
# Block is a 2
# West facing player - 3
# East facing player - 4
# Door - 5
level = [[1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,5,0,0,0,0,0,3,0,1],
[1,1,1,1,1,1,1,1,1,1]]
problem = Problem()
# moves
# move east - e
# move west - w
# more nortwest - nw
# more norteast - ne
# pickup block - p
# drop block - d
# fall - f
problem.addVariable("e", [[4,0],[3,0],[3,1],[3,2]] )
problem.addVariable("w", [[0,3],[0,4], [1,4],[2,4]])
problem.addVariable("nw", [0])
problem.addVariable("ne", [0])
problem.addVariable("p", [0])
problem.addVariable("d", [0])
solutions = problem.getSolutions()
print(solutions)
|
<commit_before><commit_msg>Set up first test level, started to create constraints<commit_after>
|
from constraint import *
# Empty space is 0
# Brick is a 1
# Block is a 2
# West facing player - 3
# East facing player - 4
# Door - 5
level = [[1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,5,0,0,0,0,0,3,0,1],
[1,1,1,1,1,1,1,1,1,1]]
problem = Problem()
# moves
# move east - e
# move west - w
# more nortwest - nw
# more norteast - ne
# pickup block - p
# drop block - d
# fall - f
problem.addVariable("e", [[4,0],[3,0],[3,1],[3,2]] )
problem.addVariable("w", [[0,3],[0,4], [1,4],[2,4]])
problem.addVariable("nw", [0])
problem.addVariable("ne", [0])
problem.addVariable("p", [0])
problem.addVariable("d", [0])
solutions = problem.getSolutions()
print(solutions)
|
Set up first test level, started to create constraintsfrom constraint import *
# Empty space is 0
# Brick is a 1
# Block is a 2
# West facing player - 3
# East facing player - 4
# Door - 5
level = [[1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,5,0,0,0,0,0,3,0,1],
[1,1,1,1,1,1,1,1,1,1]]
problem = Problem()
# moves
# move east - e
# move west - w
# more nortwest - nw
# more norteast - ne
# pickup block - p
# drop block - d
# fall - f
problem.addVariable("e", [[4,0],[3,0],[3,1],[3,2]] )
problem.addVariable("w", [[0,3],[0,4], [1,4],[2,4]])
problem.addVariable("nw", [0])
problem.addVariable("ne", [0])
problem.addVariable("p", [0])
problem.addVariable("d", [0])
solutions = problem.getSolutions()
print(solutions)
|
<commit_before><commit_msg>Set up first test level, started to create constraints<commit_after>from constraint import *
# Empty space is 0
# Brick is a 1
# Block is a 2
# West facing player - 3
# East facing player - 4
# Door - 5
level = [[1,1,1,1,1,1,1,1,1,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,0,0,0,0,0,0,0,0,1],
[1,5,0,0,0,0,0,3,0,1],
[1,1,1,1,1,1,1,1,1,1]]
problem = Problem()
# moves
# move east - e
# move west - w
# more nortwest - nw
# more norteast - ne
# pickup block - p
# drop block - d
# fall - f
problem.addVariable("e", [[4,0],[3,0],[3,1],[3,2]] )
problem.addVariable("w", [[0,3],[0,4], [1,4],[2,4]])
problem.addVariable("nw", [0])
problem.addVariable("ne", [0])
problem.addVariable("p", [0])
problem.addVariable("d", [0])
solutions = problem.getSolutions()
print(solutions)
|
|
2fb5557aed14d047d1ae120f0ff91c0e355d779f
|
ref.py
|
ref.py
|
#!/usr/bin/env python2
import sys
import subprocess
"""
Usage:
./ref.py ./main -B 1000000 -t 3 -T 31
"""
system = subprocess.check_output
githash = system("git rev-parse HEAD", shell=True).strip()
date = system("date -Ihours", shell=True).strip()
filename = "reference.%s.%s" % (githash, date)
benchargs = sys.argv[1:]
with open(filename, "wb") as fh:
fh.write(" ".join(benchargs) + "\n")
system(benchargs) # warm up
results = system(benchargs)
fh.write(results)
print "Wrote", filename
|
Add simple perf measuring tool
|
Add simple perf measuring tool
|
Python
|
mit
|
cemeyer/xkcd-skein-brute,cemeyer/xkcd-skein-brute,cemeyer/xkcd-skein-brute
|
Add simple perf measuring tool
|
#!/usr/bin/env python2
import sys
import subprocess
"""
Usage:
./ref.py ./main -B 1000000 -t 3 -T 31
"""
system = subprocess.check_output
githash = system("git rev-parse HEAD", shell=True).strip()
date = system("date -Ihours", shell=True).strip()
filename = "reference.%s.%s" % (githash, date)
benchargs = sys.argv[1:]
with open(filename, "wb") as fh:
fh.write(" ".join(benchargs) + "\n")
system(benchargs) # warm up
results = system(benchargs)
fh.write(results)
print "Wrote", filename
|
<commit_before><commit_msg>Add simple perf measuring tool<commit_after>
|
#!/usr/bin/env python2
import sys
import subprocess
"""
Usage:
./ref.py ./main -B 1000000 -t 3 -T 31
"""
system = subprocess.check_output
githash = system("git rev-parse HEAD", shell=True).strip()
date = system("date -Ihours", shell=True).strip()
filename = "reference.%s.%s" % (githash, date)
benchargs = sys.argv[1:]
with open(filename, "wb") as fh:
fh.write(" ".join(benchargs) + "\n")
system(benchargs) # warm up
results = system(benchargs)
fh.write(results)
print "Wrote", filename
|
Add simple perf measuring tool#!/usr/bin/env python2
import sys
import subprocess
"""
Usage:
./ref.py ./main -B 1000000 -t 3 -T 31
"""
system = subprocess.check_output
githash = system("git rev-parse HEAD", shell=True).strip()
date = system("date -Ihours", shell=True).strip()
filename = "reference.%s.%s" % (githash, date)
benchargs = sys.argv[1:]
with open(filename, "wb") as fh:
fh.write(" ".join(benchargs) + "\n")
system(benchargs) # warm up
results = system(benchargs)
fh.write(results)
print "Wrote", filename
|
<commit_before><commit_msg>Add simple perf measuring tool<commit_after>#!/usr/bin/env python2
import sys
import subprocess
"""
Usage:
./ref.py ./main -B 1000000 -t 3 -T 31
"""
system = subprocess.check_output
githash = system("git rev-parse HEAD", shell=True).strip()
date = system("date -Ihours", shell=True).strip()
filename = "reference.%s.%s" % (githash, date)
benchargs = sys.argv[1:]
with open(filename, "wb") as fh:
fh.write(" ".join(benchargs) + "\n")
system(benchargs) # warm up
results = system(benchargs)
fh.write(results)
print "Wrote", filename
|
|
e0786c5798b35b911193de1b4e3694b7ad8cad76
|
tests/test_generate_admin_metadata.py
|
tests/test_generate_admin_metadata.py
|
"""Test the generate_admin_metadata helper function."""
def test_generate_admin_metadata():
import dtoolcore
from dtoolcore import generate_admin_metadata
admin_metadata = generate_admin_metadata("ds-name", "creator-name")
assert len(admin_metadata["uuid"]) == 36
assert admin_metadata["dtoolcore_version"] == dtoolcore.__version__
assert admin_metadata["name"] == "ds-name"
assert admin_metadata["type"] == "protodataset"
assert admin_metadata["creator_username"] == "creator-name"
|
Add unit test for generate_admin_metadata helper function
|
Add unit test for generate_admin_metadata helper function
|
Python
|
mit
|
JIC-CSB/dtoolcore
|
Add unit test for generate_admin_metadata helper function
|
"""Test the generate_admin_metadata helper function."""
def test_generate_admin_metadata():
import dtoolcore
from dtoolcore import generate_admin_metadata
admin_metadata = generate_admin_metadata("ds-name", "creator-name")
assert len(admin_metadata["uuid"]) == 36
assert admin_metadata["dtoolcore_version"] == dtoolcore.__version__
assert admin_metadata["name"] == "ds-name"
assert admin_metadata["type"] == "protodataset"
assert admin_metadata["creator_username"] == "creator-name"
|
<commit_before><commit_msg>Add unit test for generate_admin_metadata helper function<commit_after>
|
"""Test the generate_admin_metadata helper function."""
def test_generate_admin_metadata():
import dtoolcore
from dtoolcore import generate_admin_metadata
admin_metadata = generate_admin_metadata("ds-name", "creator-name")
assert len(admin_metadata["uuid"]) == 36
assert admin_metadata["dtoolcore_version"] == dtoolcore.__version__
assert admin_metadata["name"] == "ds-name"
assert admin_metadata["type"] == "protodataset"
assert admin_metadata["creator_username"] == "creator-name"
|
Add unit test for generate_admin_metadata helper function"""Test the generate_admin_metadata helper function."""
def test_generate_admin_metadata():
import dtoolcore
from dtoolcore import generate_admin_metadata
admin_metadata = generate_admin_metadata("ds-name", "creator-name")
assert len(admin_metadata["uuid"]) == 36
assert admin_metadata["dtoolcore_version"] == dtoolcore.__version__
assert admin_metadata["name"] == "ds-name"
assert admin_metadata["type"] == "protodataset"
assert admin_metadata["creator_username"] == "creator-name"
|
<commit_before><commit_msg>Add unit test for generate_admin_metadata helper function<commit_after>"""Test the generate_admin_metadata helper function."""
def test_generate_admin_metadata():
import dtoolcore
from dtoolcore import generate_admin_metadata
admin_metadata = generate_admin_metadata("ds-name", "creator-name")
assert len(admin_metadata["uuid"]) == 36
assert admin_metadata["dtoolcore_version"] == dtoolcore.__version__
assert admin_metadata["name"] == "ds-name"
assert admin_metadata["type"] == "protodataset"
assert admin_metadata["creator_username"] == "creator-name"
|
|
724dc6ff77e9494e9519cb507cf43644034d5ca6
|
run_2.py
|
run_2.py
|
#!/usr/bin/python3
#
# start pytrain
#
import os
import sys
MYDIR = os.path.dirname(sys.argv[0])
os.system(MYDIR+"/run.sh")
if len(sys.argv)==1:
sys.argv.append("10.0.0.6")
os.system("chromium http://%s:8000/index.html"%(sys.argv[1]))
|
Integrate switch 10 and introduce couplings.
|
Integrate switch 10 and introduce couplings.
|
Python
|
apache-2.0
|
wglas85/pytrain,wglas85/pytrain,wglas85/pytrain,wglas85/pytrain
|
Integrate switch 10 and introduce couplings.
|
#!/usr/bin/python3
#
# start pytrain
#
import os
import sys
MYDIR = os.path.dirname(sys.argv[0])
os.system(MYDIR+"/run.sh")
if len(sys.argv)==1:
sys.argv.append("10.0.0.6")
os.system("chromium http://%s:8000/index.html"%(sys.argv[1]))
|
<commit_before><commit_msg>Integrate switch 10 and introduce couplings.<commit_after>
|
#!/usr/bin/python3
#
# start pytrain
#
import os
import sys
MYDIR = os.path.dirname(sys.argv[0])
os.system(MYDIR+"/run.sh")
if len(sys.argv)==1:
sys.argv.append("10.0.0.6")
os.system("chromium http://%s:8000/index.html"%(sys.argv[1]))
|
Integrate switch 10 and introduce couplings.#!/usr/bin/python3
#
# start pytrain
#
import os
import sys
MYDIR = os.path.dirname(sys.argv[0])
os.system(MYDIR+"/run.sh")
if len(sys.argv)==1:
sys.argv.append("10.0.0.6")
os.system("chromium http://%s:8000/index.html"%(sys.argv[1]))
|
<commit_before><commit_msg>Integrate switch 10 and introduce couplings.<commit_after>#!/usr/bin/python3
#
# start pytrain
#
import os
import sys
MYDIR = os.path.dirname(sys.argv[0])
os.system(MYDIR+"/run.sh")
if len(sys.argv)==1:
sys.argv.append("10.0.0.6")
os.system("chromium http://%s:8000/index.html"%(sys.argv[1]))
|
|
6953c04104eb4cc3eb908026f2420e3978371616
|
doc/viewcwl-json.py
|
doc/viewcwl-json.py
|
#!/usr/bin/env python
import fnmatch
import requests
import time
import os
import glob
# You can alternatively define these in travis.yml as env vars or arguments
BASE_URL = 'https://view.commonwl.org/workflows'
#get the cwl in l7g/cwl-version
matches = []
for root, dirnames, filenames in os.walk('cwl-version'):
for filename in fnmatch.filter(filenames, '*.cwl'):
matches.append(os.path.join(root, filename))
print matches
REPO_SLUG = 'curoverse/l7g/blob/master/'
# Headers
HEADERS = {
'user-agent': 'my-app/0.0.1',
'accept': 'application/json'
}
#Testing WORKFLOW_PATH
#WORKFLOW_PATH = 'cwl-version/clean/cwl/tiling_clean_gvcf.cwl'
#This will loop through matches, need to indent everything after to make work
for WORKFLOW_PATH in matches:
# Whole workflow URL on github
workflowURL = 'https://github.com/' + REPO_SLUG + WORKFLOW_PATH
print '\n',workflowURL,'\n'
# Add new workflow with the specific commit ID of this build
addResponse = requests.post(BASE_URL,
data={'url': workflowURL},
headers = HEADERS)
print BASE_URL,'\n',workflowURL,'\n\n'
print(addResponse)
print(addResponse.encoding)
print(addResponse.content)
print(addResponse.url)
print(addResponse.request)
print(addResponse.raw)
print(addResponse.headers)
print('\n\n End Sarah\'s code \n\n')
print('Sleep 1 second\n\n')
time.sleep(1)
|
Move working view.cwl script to doc folder
|
Move working view.cwl script to doc folder
Arvados-DCO-1.1-Signed-off-by: Benjamin Carr <ben@curii.com>
|
Python
|
agpl-3.0
|
curoverse/l7g,curoverse/l7g,curoverse/l7g,curoverse/l7g,curoverse/l7g,curoverse/l7g,curoverse/l7g
|
Move working view.cwl script to doc folder
Arvados-DCO-1.1-Signed-off-by: Benjamin Carr <ben@curii.com>
|
#!/usr/bin/env python
import fnmatch
import requests
import time
import os
import glob
# You can alternatively define these in travis.yml as env vars or arguments
BASE_URL = 'https://view.commonwl.org/workflows'
#get the cwl in l7g/cwl-version
matches = []
for root, dirnames, filenames in os.walk('cwl-version'):
for filename in fnmatch.filter(filenames, '*.cwl'):
matches.append(os.path.join(root, filename))
print matches
REPO_SLUG = 'curoverse/l7g/blob/master/'
# Headers
HEADERS = {
'user-agent': 'my-app/0.0.1',
'accept': 'application/json'
}
#Testing WORKFLOW_PATH
#WORKFLOW_PATH = 'cwl-version/clean/cwl/tiling_clean_gvcf.cwl'
#This will loop through matches, need to indent everything after to make work
for WORKFLOW_PATH in matches:
# Whole workflow URL on github
workflowURL = 'https://github.com/' + REPO_SLUG + WORKFLOW_PATH
print '\n',workflowURL,'\n'
# Add new workflow with the specific commit ID of this build
addResponse = requests.post(BASE_URL,
data={'url': workflowURL},
headers = HEADERS)
print BASE_URL,'\n',workflowURL,'\n\n'
print(addResponse)
print(addResponse.encoding)
print(addResponse.content)
print(addResponse.url)
print(addResponse.request)
print(addResponse.raw)
print(addResponse.headers)
print('\n\n End Sarah\'s code \n\n')
print('Sleep 1 second\n\n')
time.sleep(1)
|
<commit_before><commit_msg>Move working view.cwl script to doc folder
Arvados-DCO-1.1-Signed-off-by: Benjamin Carr <ben@curii.com><commit_after>
|
#!/usr/bin/env python
import fnmatch
import requests
import time
import os
import glob
# You can alternatively define these in travis.yml as env vars or arguments
BASE_URL = 'https://view.commonwl.org/workflows'
#get the cwl in l7g/cwl-version
matches = []
for root, dirnames, filenames in os.walk('cwl-version'):
for filename in fnmatch.filter(filenames, '*.cwl'):
matches.append(os.path.join(root, filename))
print matches
REPO_SLUG = 'curoverse/l7g/blob/master/'
# Headers
HEADERS = {
'user-agent': 'my-app/0.0.1',
'accept': 'application/json'
}
#Testing WORKFLOW_PATH
#WORKFLOW_PATH = 'cwl-version/clean/cwl/tiling_clean_gvcf.cwl'
#This will loop through matches, need to indent everything after to make work
for WORKFLOW_PATH in matches:
# Whole workflow URL on github
workflowURL = 'https://github.com/' + REPO_SLUG + WORKFLOW_PATH
print '\n',workflowURL,'\n'
# Add new workflow with the specific commit ID of this build
addResponse = requests.post(BASE_URL,
data={'url': workflowURL},
headers = HEADERS)
print BASE_URL,'\n',workflowURL,'\n\n'
print(addResponse)
print(addResponse.encoding)
print(addResponse.content)
print(addResponse.url)
print(addResponse.request)
print(addResponse.raw)
print(addResponse.headers)
print('\n\n End Sarah\'s code \n\n')
print('Sleep 1 second\n\n')
time.sleep(1)
|
Move working view.cwl script to doc folder
Arvados-DCO-1.1-Signed-off-by: Benjamin Carr <ben@curii.com>#!/usr/bin/env python
import fnmatch
import requests
import time
import os
import glob
# You can alternatively define these in travis.yml as env vars or arguments
BASE_URL = 'https://view.commonwl.org/workflows'
#get the cwl in l7g/cwl-version
matches = []
for root, dirnames, filenames in os.walk('cwl-version'):
for filename in fnmatch.filter(filenames, '*.cwl'):
matches.append(os.path.join(root, filename))
print matches
REPO_SLUG = 'curoverse/l7g/blob/master/'
# Headers
HEADERS = {
'user-agent': 'my-app/0.0.1',
'accept': 'application/json'
}
#Testing WORKFLOW_PATH
#WORKFLOW_PATH = 'cwl-version/clean/cwl/tiling_clean_gvcf.cwl'
#This will loop through matches, need to indent everything after to make work
for WORKFLOW_PATH in matches:
# Whole workflow URL on github
workflowURL = 'https://github.com/' + REPO_SLUG + WORKFLOW_PATH
print '\n',workflowURL,'\n'
# Add new workflow with the specific commit ID of this build
addResponse = requests.post(BASE_URL,
data={'url': workflowURL},
headers = HEADERS)
print BASE_URL,'\n',workflowURL,'\n\n'
print(addResponse)
print(addResponse.encoding)
print(addResponse.content)
print(addResponse.url)
print(addResponse.request)
print(addResponse.raw)
print(addResponse.headers)
print('\n\n End Sarah\'s code \n\n')
print('Sleep 1 second\n\n')
time.sleep(1)
|
<commit_before><commit_msg>Move working view.cwl script to doc folder
Arvados-DCO-1.1-Signed-off-by: Benjamin Carr <ben@curii.com><commit_after>#!/usr/bin/env python
import fnmatch
import requests
import time
import os
import glob
# You can alternatively define these in travis.yml as env vars or arguments
BASE_URL = 'https://view.commonwl.org/workflows'
#get the cwl in l7g/cwl-version
matches = []
for root, dirnames, filenames in os.walk('cwl-version'):
for filename in fnmatch.filter(filenames, '*.cwl'):
matches.append(os.path.join(root, filename))
print matches
REPO_SLUG = 'curoverse/l7g/blob/master/'
# Headers
HEADERS = {
'user-agent': 'my-app/0.0.1',
'accept': 'application/json'
}
#Testing WORKFLOW_PATH
#WORKFLOW_PATH = 'cwl-version/clean/cwl/tiling_clean_gvcf.cwl'
#This will loop through matches, need to indent everything after to make work
for WORKFLOW_PATH in matches:
# Whole workflow URL on github
workflowURL = 'https://github.com/' + REPO_SLUG + WORKFLOW_PATH
print '\n',workflowURL,'\n'
# Add new workflow with the specific commit ID of this build
addResponse = requests.post(BASE_URL,
data={'url': workflowURL},
headers = HEADERS)
print BASE_URL,'\n',workflowURL,'\n\n'
print(addResponse)
print(addResponse.encoding)
print(addResponse.content)
print(addResponse.url)
print(addResponse.request)
print(addResponse.raw)
print(addResponse.headers)
print('\n\n End Sarah\'s code \n\n')
print('Sleep 1 second\n\n')
time.sleep(1)
|
|
aee2363f6c6995a124b3c0ad358e83dc815ea808
|
alembic/versions/3fc4c97dc6bd_remove_redundant_user_subscription_.py
|
alembic/versions/3fc4c97dc6bd_remove_redundant_user_subscription_.py
|
"""remove redundant user subscription fields
Revision ID: 3fc4c97dc6bd
Revises: 3d723944025f
Create Date: 2015-01-27 18:11:15.822193
"""
# revision identifiers, used by Alembic.
revision = '3fc4c97dc6bd'
down_revision = '3d723944025f'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', u'subscribe_bill')
op.drop_column('user', u'subscribe_call_for_comment')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column(u'subscribe_call_for_comment', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.add_column('user', sa.Column(u'subscribe_bill', sa.BOOLEAN(), autoincrement=False, nullable=True))
### end Alembic commands ###
|
Remove redundant subscription fields from user model.
|
Remove redundant subscription fields from user model.
|
Python
|
apache-2.0
|
Code4SA/pmg-cms-2,Code4SA/pmg-cms-2,Code4SA/pmg-cms-2
|
Remove redundant subscription fields from user model.
|
"""remove redundant user subscription fields
Revision ID: 3fc4c97dc6bd
Revises: 3d723944025f
Create Date: 2015-01-27 18:11:15.822193
"""
# revision identifiers, used by Alembic.
revision = '3fc4c97dc6bd'
down_revision = '3d723944025f'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', u'subscribe_bill')
op.drop_column('user', u'subscribe_call_for_comment')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column(u'subscribe_call_for_comment', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.add_column('user', sa.Column(u'subscribe_bill', sa.BOOLEAN(), autoincrement=False, nullable=True))
### end Alembic commands ###
|
<commit_before><commit_msg>Remove redundant subscription fields from user model.<commit_after>
|
"""remove redundant user subscription fields
Revision ID: 3fc4c97dc6bd
Revises: 3d723944025f
Create Date: 2015-01-27 18:11:15.822193
"""
# revision identifiers, used by Alembic.
revision = '3fc4c97dc6bd'
down_revision = '3d723944025f'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', u'subscribe_bill')
op.drop_column('user', u'subscribe_call_for_comment')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column(u'subscribe_call_for_comment', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.add_column('user', sa.Column(u'subscribe_bill', sa.BOOLEAN(), autoincrement=False, nullable=True))
### end Alembic commands ###
|
Remove redundant subscription fields from user model."""remove redundant user subscription fields
Revision ID: 3fc4c97dc6bd
Revises: 3d723944025f
Create Date: 2015-01-27 18:11:15.822193
"""
# revision identifiers, used by Alembic.
revision = '3fc4c97dc6bd'
down_revision = '3d723944025f'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', u'subscribe_bill')
op.drop_column('user', u'subscribe_call_for_comment')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column(u'subscribe_call_for_comment', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.add_column('user', sa.Column(u'subscribe_bill', sa.BOOLEAN(), autoincrement=False, nullable=True))
### end Alembic commands ###
|
<commit_before><commit_msg>Remove redundant subscription fields from user model.<commit_after>"""remove redundant user subscription fields
Revision ID: 3fc4c97dc6bd
Revises: 3d723944025f
Create Date: 2015-01-27 18:11:15.822193
"""
# revision identifiers, used by Alembic.
revision = '3fc4c97dc6bd'
down_revision = '3d723944025f'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', u'subscribe_bill')
op.drop_column('user', u'subscribe_call_for_comment')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column(u'subscribe_call_for_comment', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.add_column('user', sa.Column(u'subscribe_bill', sa.BOOLEAN(), autoincrement=False, nullable=True))
### end Alembic commands ###
|
|
76d2386bfa9e61ac17bca396384772ae70fb4563
|
gauss.py
|
gauss.py
|
#!/usr/bin/env python3
#Copyright 2015 BRendan Perrine
import random
random.seed()
print (random.gauss(0,1), "Is a normal distribution with mean zero and standard deviation and varience of one")
|
Add one liner to add ability to print a normal distribution with mean zero and varience one
|
Add one liner to add ability to print a normal distribution with mean zero and varience one
|
Python
|
mit
|
ianorlin/pyrandtoys
|
Add one liner to add ability to print a normal distribution with mean zero and varience one
|
#!/usr/bin/env python3
#Copyright 2015 BRendan Perrine
import random
random.seed()
print (random.gauss(0,1), "Is a normal distribution with mean zero and standard deviation and varience of one")
|
<commit_before><commit_msg>Add one liner to add ability to print a normal distribution with mean zero and varience one<commit_after>
|
#!/usr/bin/env python3
#Copyright 2015 BRendan Perrine
import random
random.seed()
print (random.gauss(0,1), "Is a normal distribution with mean zero and standard deviation and varience of one")
|
Add one liner to add ability to print a normal distribution with mean zero and varience one#!/usr/bin/env python3
#Copyright 2015 BRendan Perrine
import random
random.seed()
print (random.gauss(0,1), "Is a normal distribution with mean zero and standard deviation and varience of one")
|
<commit_before><commit_msg>Add one liner to add ability to print a normal distribution with mean zero and varience one<commit_after>#!/usr/bin/env python3
#Copyright 2015 BRendan Perrine
import random
random.seed()
print (random.gauss(0,1), "Is a normal distribution with mean zero and standard deviation and varience of one")
|
|
9fb6d0ea74aacc77f06d36805760270854e53eba
|
setup.py
|
setup.py
|
import os
from setuptools import setup, find_packages
import calendarium
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except IOError:
return ''
setup(
name="django-calendarium",
version=calendarium.__version__,
description=read('DESCRIPTION'),
long_description=read('README.rst'),
license='The MIT License',
platforms=['OS Independent'],
keywords='django, calendar, app, widget, events, schedule',
author='Daniel Kaufhold',
author_email='daniel.kaufhold@bitmazk.com',
url="https://github.com/bitmazk/django-calendarium",
packages=find_packages(),
include_package_data=True,
tests_require=[
'fabric',
'factory_boy<2.0.0',
'django-nose',
'coverage',
'django-coverage',
'mock',
],
test_suite='calendarium.tests.runtests.runtests',
)
|
import os
from setuptools import setup, find_packages
import calendarium
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except IOError:
return ''
setup(
name="django-calendarium",
version=calendarium.__version__,
description=read('DESCRIPTION'),
long_description=read('README.rst'),
license='The MIT License',
platforms=['OS Independent'],
keywords='django, calendar, app, widget, events, schedule',
author='Daniel Kaufhold',
author_email='daniel.kaufhold@bitmazk.com',
url="https://github.com/bitmazk/django-calendarium",
packages=find_packages(),
include_package_data=True,
tests_require=[
'fabric',
'factory_boy<2.0.0',
'django_libs',
'django-nose',
'coverage',
'django-coverage',
'mock',
],
test_suite='calendarium.tests.runtests.runtests',
)
|
Add missing django_libs test requirement
|
Add missing django_libs test requirement
|
Python
|
mit
|
claudep/django-calendarium,claudep/django-calendarium,bitmazk/django-calendarium,bitmazk/django-calendarium,claudep/django-calendarium,bitmazk/django-calendarium,claudep/django-calendarium
|
import os
from setuptools import setup, find_packages
import calendarium
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except IOError:
return ''
setup(
name="django-calendarium",
version=calendarium.__version__,
description=read('DESCRIPTION'),
long_description=read('README.rst'),
license='The MIT License',
platforms=['OS Independent'],
keywords='django, calendar, app, widget, events, schedule',
author='Daniel Kaufhold',
author_email='daniel.kaufhold@bitmazk.com',
url="https://github.com/bitmazk/django-calendarium",
packages=find_packages(),
include_package_data=True,
tests_require=[
'fabric',
'factory_boy<2.0.0',
'django-nose',
'coverage',
'django-coverage',
'mock',
],
test_suite='calendarium.tests.runtests.runtests',
)
Add missing django_libs test requirement
|
import os
from setuptools import setup, find_packages
import calendarium
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except IOError:
return ''
setup(
name="django-calendarium",
version=calendarium.__version__,
description=read('DESCRIPTION'),
long_description=read('README.rst'),
license='The MIT License',
platforms=['OS Independent'],
keywords='django, calendar, app, widget, events, schedule',
author='Daniel Kaufhold',
author_email='daniel.kaufhold@bitmazk.com',
url="https://github.com/bitmazk/django-calendarium",
packages=find_packages(),
include_package_data=True,
tests_require=[
'fabric',
'factory_boy<2.0.0',
'django_libs',
'django-nose',
'coverage',
'django-coverage',
'mock',
],
test_suite='calendarium.tests.runtests.runtests',
)
|
<commit_before>import os
from setuptools import setup, find_packages
import calendarium
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except IOError:
return ''
setup(
name="django-calendarium",
version=calendarium.__version__,
description=read('DESCRIPTION'),
long_description=read('README.rst'),
license='The MIT License',
platforms=['OS Independent'],
keywords='django, calendar, app, widget, events, schedule',
author='Daniel Kaufhold',
author_email='daniel.kaufhold@bitmazk.com',
url="https://github.com/bitmazk/django-calendarium",
packages=find_packages(),
include_package_data=True,
tests_require=[
'fabric',
'factory_boy<2.0.0',
'django-nose',
'coverage',
'django-coverage',
'mock',
],
test_suite='calendarium.tests.runtests.runtests',
)
<commit_msg>Add missing django_libs test requirement<commit_after>
|
import os
from setuptools import setup, find_packages
import calendarium
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except IOError:
return ''
setup(
name="django-calendarium",
version=calendarium.__version__,
description=read('DESCRIPTION'),
long_description=read('README.rst'),
license='The MIT License',
platforms=['OS Independent'],
keywords='django, calendar, app, widget, events, schedule',
author='Daniel Kaufhold',
author_email='daniel.kaufhold@bitmazk.com',
url="https://github.com/bitmazk/django-calendarium",
packages=find_packages(),
include_package_data=True,
tests_require=[
'fabric',
'factory_boy<2.0.0',
'django_libs',
'django-nose',
'coverage',
'django-coverage',
'mock',
],
test_suite='calendarium.tests.runtests.runtests',
)
|
import os
from setuptools import setup, find_packages
import calendarium
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except IOError:
return ''
setup(
name="django-calendarium",
version=calendarium.__version__,
description=read('DESCRIPTION'),
long_description=read('README.rst'),
license='The MIT License',
platforms=['OS Independent'],
keywords='django, calendar, app, widget, events, schedule',
author='Daniel Kaufhold',
author_email='daniel.kaufhold@bitmazk.com',
url="https://github.com/bitmazk/django-calendarium",
packages=find_packages(),
include_package_data=True,
tests_require=[
'fabric',
'factory_boy<2.0.0',
'django-nose',
'coverage',
'django-coverage',
'mock',
],
test_suite='calendarium.tests.runtests.runtests',
)
Add missing django_libs test requirementimport os
from setuptools import setup, find_packages
import calendarium
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except IOError:
return ''
setup(
name="django-calendarium",
version=calendarium.__version__,
description=read('DESCRIPTION'),
long_description=read('README.rst'),
license='The MIT License',
platforms=['OS Independent'],
keywords='django, calendar, app, widget, events, schedule',
author='Daniel Kaufhold',
author_email='daniel.kaufhold@bitmazk.com',
url="https://github.com/bitmazk/django-calendarium",
packages=find_packages(),
include_package_data=True,
tests_require=[
'fabric',
'factory_boy<2.0.0',
'django_libs',
'django-nose',
'coverage',
'django-coverage',
'mock',
],
test_suite='calendarium.tests.runtests.runtests',
)
|
<commit_before>import os
from setuptools import setup, find_packages
import calendarium
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except IOError:
return ''
setup(
name="django-calendarium",
version=calendarium.__version__,
description=read('DESCRIPTION'),
long_description=read('README.rst'),
license='The MIT License',
platforms=['OS Independent'],
keywords='django, calendar, app, widget, events, schedule',
author='Daniel Kaufhold',
author_email='daniel.kaufhold@bitmazk.com',
url="https://github.com/bitmazk/django-calendarium",
packages=find_packages(),
include_package_data=True,
tests_require=[
'fabric',
'factory_boy<2.0.0',
'django-nose',
'coverage',
'django-coverage',
'mock',
],
test_suite='calendarium.tests.runtests.runtests',
)
<commit_msg>Add missing django_libs test requirement<commit_after>import os
from setuptools import setup, find_packages
import calendarium
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except IOError:
return ''
setup(
name="django-calendarium",
version=calendarium.__version__,
description=read('DESCRIPTION'),
long_description=read('README.rst'),
license='The MIT License',
platforms=['OS Independent'],
keywords='django, calendar, app, widget, events, schedule',
author='Daniel Kaufhold',
author_email='daniel.kaufhold@bitmazk.com',
url="https://github.com/bitmazk/django-calendarium",
packages=find_packages(),
include_package_data=True,
tests_require=[
'fabric',
'factory_boy<2.0.0',
'django_libs',
'django-nose',
'coverage',
'django-coverage',
'mock',
],
test_suite='calendarium.tests.runtests.runtests',
)
|
95fdd1f96ad4d54fb75ea134ea2195808d4c1116
|
bigO.py
|
bigO.py
|
import timeit
import random
for i in range (10000, 100000, 20000):
t = timeit.Timer("random.randrange(%d) in x"%i, "from __main__ import random, x")
x = list(range(i))
list_time = t.timeit(number = 1000)
x = {j:None for j in range(i)}
dict_time = t.timeit(number = 1000)
print "Counter: " + str(i) + " List: " + str(list_time) + " Dict: " + str(dict_time)
|
Add python script to check big-O notation
|
Add python script to check big-O notation
|
Python
|
mit
|
prabhugs/scripts,prabhugs/scripts
|
Add python script to check big-O notation
|
import timeit
import random
for i in range (10000, 100000, 20000):
t = timeit.Timer("random.randrange(%d) in x"%i, "from __main__ import random, x")
x = list(range(i))
list_time = t.timeit(number = 1000)
x = {j:None for j in range(i)}
dict_time = t.timeit(number = 1000)
print "Counter: " + str(i) + " List: " + str(list_time) + " Dict: " + str(dict_time)
|
<commit_before><commit_msg>Add python script to check big-O notation<commit_after>
|
import timeit
import random
for i in range (10000, 100000, 20000):
t = timeit.Timer("random.randrange(%d) in x"%i, "from __main__ import random, x")
x = list(range(i))
list_time = t.timeit(number = 1000)
x = {j:None for j in range(i)}
dict_time = t.timeit(number = 1000)
print "Counter: " + str(i) + " List: " + str(list_time) + " Dict: " + str(dict_time)
|
Add python script to check big-O notationimport timeit
import random
for i in range (10000, 100000, 20000):
t = timeit.Timer("random.randrange(%d) in x"%i, "from __main__ import random, x")
x = list(range(i))
list_time = t.timeit(number = 1000)
x = {j:None for j in range(i)}
dict_time = t.timeit(number = 1000)
print "Counter: " + str(i) + " List: " + str(list_time) + " Dict: " + str(dict_time)
|
<commit_before><commit_msg>Add python script to check big-O notation<commit_after>import timeit
import random
for i in range (10000, 100000, 20000):
t = timeit.Timer("random.randrange(%d) in x"%i, "from __main__ import random, x")
x = list(range(i))
list_time = t.timeit(number = 1000)
x = {j:None for j in range(i)}
dict_time = t.timeit(number = 1000)
print "Counter: " + str(i) + " List: " + str(list_time) + " Dict: " + str(dict_time)
|
|
a0ae12ddf581eb77af5ce5c6498c26745bd2cfcb
|
stats.py
|
stats.py
|
#!/usr/bin/python
# encoding: utf-8
from __future__ import with_statement
import argparse, re, sys
def filter(args):
bytes_extractor = re.compile(r"([0-9]+) bytes")
with args.output:
with args.input:
for line in args.input:
if line.startswith("avr-size"):
# Find example name (everything after last /)
example = line[line.rfind("/") + 1:-1]
elif line.startswith("Program:"):
# Find number of bytes of flash
matcher = bytes_extractor.search(line)
program = matcher.group(1)
elif line.startswith("Data:"):
# Find number of bytes of SRAM
matcher = bytes_extractor.search(line)
data = matcher.group(1)
# Write new line to output
args.output.write("%s\t%s\t%s\n" % (example, program, data))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'XXXXXXXX')
parser.add_argument('input', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('output', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
filter(args)
|
Add script to extract code/data size after make examples.
|
Add script to extract code/data size after make examples.
|
Python
|
lgpl-2.1
|
jfpoilpret/fast-arduino-lib,jfpoilpret/fast-arduino-lib,jfpoilpret/fast-arduino-lib,jfpoilpret/fast-arduino-lib
|
Add script to extract code/data size after make examples.
|
#!/usr/bin/python
# encoding: utf-8
from __future__ import with_statement
import argparse, re, sys
def filter(args):
bytes_extractor = re.compile(r"([0-9]+) bytes")
with args.output:
with args.input:
for line in args.input:
if line.startswith("avr-size"):
# Find example name (everything after last /)
example = line[line.rfind("/") + 1:-1]
elif line.startswith("Program:"):
# Find number of bytes of flash
matcher = bytes_extractor.search(line)
program = matcher.group(1)
elif line.startswith("Data:"):
# Find number of bytes of SRAM
matcher = bytes_extractor.search(line)
data = matcher.group(1)
# Write new line to output
args.output.write("%s\t%s\t%s\n" % (example, program, data))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'XXXXXXXX')
parser.add_argument('input', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('output', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
filter(args)
|
<commit_before><commit_msg>Add script to extract code/data size after make examples.<commit_after>
|
#!/usr/bin/python
# encoding: utf-8
from __future__ import with_statement
import argparse, re, sys
def filter(args):
bytes_extractor = re.compile(r"([0-9]+) bytes")
with args.output:
with args.input:
for line in args.input:
if line.startswith("avr-size"):
# Find example name (everything after last /)
example = line[line.rfind("/") + 1:-1]
elif line.startswith("Program:"):
# Find number of bytes of flash
matcher = bytes_extractor.search(line)
program = matcher.group(1)
elif line.startswith("Data:"):
# Find number of bytes of SRAM
matcher = bytes_extractor.search(line)
data = matcher.group(1)
# Write new line to output
args.output.write("%s\t%s\t%s\n" % (example, program, data))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'XXXXXXXX')
parser.add_argument('input', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('output', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
filter(args)
|
Add script to extract code/data size after make examples.#!/usr/bin/python
# encoding: utf-8
from __future__ import with_statement
import argparse, re, sys
def filter(args):
bytes_extractor = re.compile(r"([0-9]+) bytes")
with args.output:
with args.input:
for line in args.input:
if line.startswith("avr-size"):
# Find example name (everything after last /)
example = line[line.rfind("/") + 1:-1]
elif line.startswith("Program:"):
# Find number of bytes of flash
matcher = bytes_extractor.search(line)
program = matcher.group(1)
elif line.startswith("Data:"):
# Find number of bytes of SRAM
matcher = bytes_extractor.search(line)
data = matcher.group(1)
# Write new line to output
args.output.write("%s\t%s\t%s\n" % (example, program, data))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'XXXXXXXX')
parser.add_argument('input', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('output', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
filter(args)
|
<commit_before><commit_msg>Add script to extract code/data size after make examples.<commit_after>#!/usr/bin/python
# encoding: utf-8
from __future__ import with_statement
import argparse, re, sys
def filter(args):
bytes_extractor = re.compile(r"([0-9]+) bytes")
with args.output:
with args.input:
for line in args.input:
if line.startswith("avr-size"):
# Find example name (everything after last /)
example = line[line.rfind("/") + 1:-1]
elif line.startswith("Program:"):
# Find number of bytes of flash
matcher = bytes_extractor.search(line)
program = matcher.group(1)
elif line.startswith("Data:"):
# Find number of bytes of SRAM
matcher = bytes_extractor.search(line)
data = matcher.group(1)
# Write new line to output
args.output.write("%s\t%s\t%s\n" % (example, program, data))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'XXXXXXXX')
parser.add_argument('input', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('output', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
filter(args)
|
|
7b0005eb7d2b2e05a9fd833a2771573aec69c199
|
tests/compare_test_data.py
|
tests/compare_test_data.py
|
import os
import sys
import glob
import h5py
from QGL import *
import QGL
BASE_AWG_DIR = QGL.config.AWGDir
BASE_TEST_DIR = './test_data/awg/'
def compare_sequences():
test_subdirs = ['TestAPS1', 'TestAPS2']
for subdir in test_subdirs:
testdirs = glob.glob(os.path.join(BASE_TEST_DIR, subdir, '*'))
for test in testdirs:
# build up subdirectory name
_,name = os.path.split(test)
testfiles = glob.glob(os.path.join(test, '*'))
# recurse into subdirectories
while len(testfiles) == 1 and os.path.isdir(testfiles[0]):
_,subname = os.path.split(testfiles[0])
name = os.path.join(name, subname)
testfiles = glob.glob(os.path.join(testfiles[0], '*'))
newpath = os.path.join(BASE_AWG_DIR, subdir, name)
print("{0} comparing to {1}".format(test, newpath))
newfiles = glob.glob(os.path.join(newpath, '*'))
PulseSequencePlotter.plot_pulse_files_compare(testfiles, newfiles)
c = input('Enter to continue (q to quit)')
if c == 'q':
break
def update_test_files():
for device in ['APS1', 'APS2']:
testdirs = glob.glob(os.path.join(BASE_TEST_DIR, 'Test'+device, '*'))
for test in testdirs:
testfiles = glob.glob(os.path.join(test, '*'))
# recurse into subdirectories
while len(testfiles) == 1 and os.path.isdir(testfiles[0]):
testfiles = glob.glob(os.path.join(testfiles[0], '*'))
for tfile in testfiles:
FID = h5py.File(tfile)
FID['/'].attrs['target hardware'] = device
FID.close()
if __name__ == '__main__':
# run the following line if you are comparing to older h5 files that don't
# have the 'target hardware' attribute
# update_test_files()
output_file()
compare_sequences()
|
Add script for comparing test vectors.
|
Add script for comparing test vectors.
Note: does not yet include the `plot_pulse_files_compare` method because that
needs some cleanup.
|
Python
|
apache-2.0
|
BBN-Q/QGL,BBN-Q/QGL
|
Add script for comparing test vectors.
Note: does not yet include the `plot_pulse_files_compare` method because that
needs some cleanup.
|
import os
import sys
import glob
import h5py
from QGL import *
import QGL
BASE_AWG_DIR = QGL.config.AWGDir
BASE_TEST_DIR = './test_data/awg/'
def compare_sequences():
test_subdirs = ['TestAPS1', 'TestAPS2']
for subdir in test_subdirs:
testdirs = glob.glob(os.path.join(BASE_TEST_DIR, subdir, '*'))
for test in testdirs:
# build up subdirectory name
_,name = os.path.split(test)
testfiles = glob.glob(os.path.join(test, '*'))
# recurse into subdirectories
while len(testfiles) == 1 and os.path.isdir(testfiles[0]):
_,subname = os.path.split(testfiles[0])
name = os.path.join(name, subname)
testfiles = glob.glob(os.path.join(testfiles[0], '*'))
newpath = os.path.join(BASE_AWG_DIR, subdir, name)
print("{0} comparing to {1}".format(test, newpath))
newfiles = glob.glob(os.path.join(newpath, '*'))
PulseSequencePlotter.plot_pulse_files_compare(testfiles, newfiles)
c = input('Enter to continue (q to quit)')
if c == 'q':
break
def update_test_files():
for device in ['APS1', 'APS2']:
testdirs = glob.glob(os.path.join(BASE_TEST_DIR, 'Test'+device, '*'))
for test in testdirs:
testfiles = glob.glob(os.path.join(test, '*'))
# recurse into subdirectories
while len(testfiles) == 1 and os.path.isdir(testfiles[0]):
testfiles = glob.glob(os.path.join(testfiles[0], '*'))
for tfile in testfiles:
FID = h5py.File(tfile)
FID['/'].attrs['target hardware'] = device
FID.close()
if __name__ == '__main__':
# run the following line if you are comparing to older h5 files that don't
# have the 'target hardware' attribute
# update_test_files()
output_file()
compare_sequences()
|
<commit_before><commit_msg>Add script for comparing test vectors.
Note: does not yet include the `plot_pulse_files_compare` method because that
needs some cleanup.<commit_after>
|
import os
import sys
import glob
import h5py
from QGL import *
import QGL
BASE_AWG_DIR = QGL.config.AWGDir
BASE_TEST_DIR = './test_data/awg/'
def compare_sequences():
test_subdirs = ['TestAPS1', 'TestAPS2']
for subdir in test_subdirs:
testdirs = glob.glob(os.path.join(BASE_TEST_DIR, subdir, '*'))
for test in testdirs:
# build up subdirectory name
_,name = os.path.split(test)
testfiles = glob.glob(os.path.join(test, '*'))
# recurse into subdirectories
while len(testfiles) == 1 and os.path.isdir(testfiles[0]):
_,subname = os.path.split(testfiles[0])
name = os.path.join(name, subname)
testfiles = glob.glob(os.path.join(testfiles[0], '*'))
newpath = os.path.join(BASE_AWG_DIR, subdir, name)
print("{0} comparing to {1}".format(test, newpath))
newfiles = glob.glob(os.path.join(newpath, '*'))
PulseSequencePlotter.plot_pulse_files_compare(testfiles, newfiles)
c = input('Enter to continue (q to quit)')
if c == 'q':
break
def update_test_files():
for device in ['APS1', 'APS2']:
testdirs = glob.glob(os.path.join(BASE_TEST_DIR, 'Test'+device, '*'))
for test in testdirs:
testfiles = glob.glob(os.path.join(test, '*'))
# recurse into subdirectories
while len(testfiles) == 1 and os.path.isdir(testfiles[0]):
testfiles = glob.glob(os.path.join(testfiles[0], '*'))
for tfile in testfiles:
FID = h5py.File(tfile)
FID['/'].attrs['target hardware'] = device
FID.close()
if __name__ == '__main__':
# run the following line if you are comparing to older h5 files that don't
# have the 'target hardware' attribute
# update_test_files()
output_file()
compare_sequences()
|
Add script for comparing test vectors.
Note: does not yet include the `plot_pulse_files_compare` method because that
needs some cleanup.import os
import sys
import glob
import h5py
from QGL import *
import QGL
BASE_AWG_DIR = QGL.config.AWGDir
BASE_TEST_DIR = './test_data/awg/'
def compare_sequences():
test_subdirs = ['TestAPS1', 'TestAPS2']
for subdir in test_subdirs:
testdirs = glob.glob(os.path.join(BASE_TEST_DIR, subdir, '*'))
for test in testdirs:
# build up subdirectory name
_,name = os.path.split(test)
testfiles = glob.glob(os.path.join(test, '*'))
# recurse into subdirectories
while len(testfiles) == 1 and os.path.isdir(testfiles[0]):
_,subname = os.path.split(testfiles[0])
name = os.path.join(name, subname)
testfiles = glob.glob(os.path.join(testfiles[0], '*'))
newpath = os.path.join(BASE_AWG_DIR, subdir, name)
print("{0} comparing to {1}".format(test, newpath))
newfiles = glob.glob(os.path.join(newpath, '*'))
PulseSequencePlotter.plot_pulse_files_compare(testfiles, newfiles)
c = input('Enter to continue (q to quit)')
if c == 'q':
break
def update_test_files():
for device in ['APS1', 'APS2']:
testdirs = glob.glob(os.path.join(BASE_TEST_DIR, 'Test'+device, '*'))
for test in testdirs:
testfiles = glob.glob(os.path.join(test, '*'))
# recurse into subdirectories
while len(testfiles) == 1 and os.path.isdir(testfiles[0]):
testfiles = glob.glob(os.path.join(testfiles[0], '*'))
for tfile in testfiles:
FID = h5py.File(tfile)
FID['/'].attrs['target hardware'] = device
FID.close()
if __name__ == '__main__':
# run the following line if you are comparing to older h5 files that don't
# have the 'target hardware' attribute
# update_test_files()
output_file()
compare_sequences()
|
<commit_before><commit_msg>Add script for comparing test vectors.
Note: does not yet include the `plot_pulse_files_compare` method because that
needs some cleanup.<commit_after>import os
import sys
import glob
import h5py
from QGL import *
import QGL
BASE_AWG_DIR = QGL.config.AWGDir
BASE_TEST_DIR = './test_data/awg/'
def compare_sequences():
test_subdirs = ['TestAPS1', 'TestAPS2']
for subdir in test_subdirs:
testdirs = glob.glob(os.path.join(BASE_TEST_DIR, subdir, '*'))
for test in testdirs:
# build up subdirectory name
_,name = os.path.split(test)
testfiles = glob.glob(os.path.join(test, '*'))
# recurse into subdirectories
while len(testfiles) == 1 and os.path.isdir(testfiles[0]):
_,subname = os.path.split(testfiles[0])
name = os.path.join(name, subname)
testfiles = glob.glob(os.path.join(testfiles[0], '*'))
newpath = os.path.join(BASE_AWG_DIR, subdir, name)
print("{0} comparing to {1}".format(test, newpath))
newfiles = glob.glob(os.path.join(newpath, '*'))
PulseSequencePlotter.plot_pulse_files_compare(testfiles, newfiles)
c = input('Enter to continue (q to quit)')
if c == 'q':
break
def update_test_files():
for device in ['APS1', 'APS2']:
testdirs = glob.glob(os.path.join(BASE_TEST_DIR, 'Test'+device, '*'))
for test in testdirs:
testfiles = glob.glob(os.path.join(test, '*'))
# recurse into subdirectories
while len(testfiles) == 1 and os.path.isdir(testfiles[0]):
testfiles = glob.glob(os.path.join(testfiles[0], '*'))
for tfile in testfiles:
FID = h5py.File(tfile)
FID['/'].attrs['target hardware'] = device
FID.close()
if __name__ == '__main__':
# run the following line if you are comparing to older h5 files that don't
# have the 'target hardware' attribute
# update_test_files()
output_file()
compare_sequences()
|
|
514f744bc39129a241e704e4ea282befcd31b1b7
|
tests/functional/test_about_page.py
|
tests/functional/test_about_page.py
|
from .base import FunctionalTest
class AboutPageTest(FunctionalTest):
def test_about_page_navigation(self):
self.browser.get(self.live_server_url)
self.browser.set_window_size(1024, 768)
about_link = self.browser.find_element_by_link_text('ABOUT US')
about_link.click()
# Assert that the About Us link in the navbar works
self.assertIn("About Us", self.browser.title)
self.assertEqual(self.browser.find_element_by_tag_name('h1').text, 'About Us')
|
Add about page functional test
|
Add about page functional test
Test the about page navigation and expected content
|
Python
|
bsd-3-clause
|
andela-kndungu/compshop,kevgathuku/compshop,andela-kndungu/compshop,andela-kndungu/compshop,kevgathuku/compshop,kevgathuku/compshop,kevgathuku/compshop,andela-kndungu/compshop
|
Add about page functional test
Test the about page navigation and expected content
|
from .base import FunctionalTest
class AboutPageTest(FunctionalTest):
def test_about_page_navigation(self):
self.browser.get(self.live_server_url)
self.browser.set_window_size(1024, 768)
about_link = self.browser.find_element_by_link_text('ABOUT US')
about_link.click()
# Assert that the About Us link in the navbar works
self.assertIn("About Us", self.browser.title)
self.assertEqual(self.browser.find_element_by_tag_name('h1').text, 'About Us')
|
<commit_before><commit_msg>Add about page functional test
Test the about page navigation and expected content<commit_after>
|
from .base import FunctionalTest
class AboutPageTest(FunctionalTest):
def test_about_page_navigation(self):
self.browser.get(self.live_server_url)
self.browser.set_window_size(1024, 768)
about_link = self.browser.find_element_by_link_text('ABOUT US')
about_link.click()
# Assert that the About Us link in the navbar works
self.assertIn("About Us", self.browser.title)
self.assertEqual(self.browser.find_element_by_tag_name('h1').text, 'About Us')
|
Add about page functional test
Test the about page navigation and expected contentfrom .base import FunctionalTest
class AboutPageTest(FunctionalTest):
def test_about_page_navigation(self):
self.browser.get(self.live_server_url)
self.browser.set_window_size(1024, 768)
about_link = self.browser.find_element_by_link_text('ABOUT US')
about_link.click()
# Assert that the About Us link in the navbar works
self.assertIn("About Us", self.browser.title)
self.assertEqual(self.browser.find_element_by_tag_name('h1').text, 'About Us')
|
<commit_before><commit_msg>Add about page functional test
Test the about page navigation and expected content<commit_after>from .base import FunctionalTest
class AboutPageTest(FunctionalTest):
def test_about_page_navigation(self):
self.browser.get(self.live_server_url)
self.browser.set_window_size(1024, 768)
about_link = self.browser.find_element_by_link_text('ABOUT US')
about_link.click()
# Assert that the About Us link in the navbar works
self.assertIn("About Us", self.browser.title)
self.assertEqual(self.browser.find_element_by_tag_name('h1').text, 'About Us')
|
|
a45b62ab76324db2ae4a0842b901fec8e463e2f0
|
tests/test_vector2_ctor.py
|
tests/test_vector2_ctor.py
|
import pytest # type: ignore
from hypothesis import given
from utils import floats, vectors, vector_likes
from ppb_vector import Vector2
class V(Vector2): pass
@pytest.mark.parametrize('cls', [Vector2, V])
@given(x=vectors())
def test_ctor_vector_like(cls, x: Vector2):
for x_like in vector_likes(x):
vector = cls(x_like)
assert vector == x == x_like
assert isinstance(vector, cls)
@pytest.mark.parametrize('cls', [Vector2, V])
@given(x=floats(), y=floats())
def test_ctor_coordinates(cls, x: float, y: float):
assert cls(x, y) == cls((x, y))
|
Add tests for the constructor
|
tests: Add tests for the constructor
|
Python
|
artistic-2.0
|
ppb/ppb-vector,ppb/ppb-vector
|
tests: Add tests for the constructor
|
import pytest # type: ignore
from hypothesis import given
from utils import floats, vectors, vector_likes
from ppb_vector import Vector2
class V(Vector2): pass
@pytest.mark.parametrize('cls', [Vector2, V])
@given(x=vectors())
def test_ctor_vector_like(cls, x: Vector2):
for x_like in vector_likes(x):
vector = cls(x_like)
assert vector == x == x_like
assert isinstance(vector, cls)
@pytest.mark.parametrize('cls', [Vector2, V])
@given(x=floats(), y=floats())
def test_ctor_coordinates(cls, x: float, y: float):
assert cls(x, y) == cls((x, y))
|
<commit_before><commit_msg>tests: Add tests for the constructor<commit_after>
|
import pytest # type: ignore
from hypothesis import given
from utils import floats, vectors, vector_likes
from ppb_vector import Vector2
class V(Vector2): pass
@pytest.mark.parametrize('cls', [Vector2, V])
@given(x=vectors())
def test_ctor_vector_like(cls, x: Vector2):
for x_like in vector_likes(x):
vector = cls(x_like)
assert vector == x == x_like
assert isinstance(vector, cls)
@pytest.mark.parametrize('cls', [Vector2, V])
@given(x=floats(), y=floats())
def test_ctor_coordinates(cls, x: float, y: float):
assert cls(x, y) == cls((x, y))
|
tests: Add tests for the constructorimport pytest # type: ignore
from hypothesis import given
from utils import floats, vectors, vector_likes
from ppb_vector import Vector2
class V(Vector2): pass
@pytest.mark.parametrize('cls', [Vector2, V])
@given(x=vectors())
def test_ctor_vector_like(cls, x: Vector2):
for x_like in vector_likes(x):
vector = cls(x_like)
assert vector == x == x_like
assert isinstance(vector, cls)
@pytest.mark.parametrize('cls', [Vector2, V])
@given(x=floats(), y=floats())
def test_ctor_coordinates(cls, x: float, y: float):
assert cls(x, y) == cls((x, y))
|
<commit_before><commit_msg>tests: Add tests for the constructor<commit_after>import pytest # type: ignore
from hypothesis import given
from utils import floats, vectors, vector_likes
from ppb_vector import Vector2
class V(Vector2): pass
@pytest.mark.parametrize('cls', [Vector2, V])
@given(x=vectors())
def test_ctor_vector_like(cls, x: Vector2):
for x_like in vector_likes(x):
vector = cls(x_like)
assert vector == x == x_like
assert isinstance(vector, cls)
@pytest.mark.parametrize('cls', [Vector2, V])
@given(x=floats(), y=floats())
def test_ctor_coordinates(cls, x: float, y: float):
assert cls(x, y) == cls((x, y))
|
|
21bee0c5b92d03a4803baf237c460223308ebb9f
|
examples/fakecode.py
|
examples/fakecode.py
|
# Get the hash
# 01/07/2017
# Melissa Hoffman
# Get the current repo
import os
import subprocess
testdir='/Users/melissahoffman1/'
repo = testdir
# Check if the repo is a git repo and get githash
def get_git_hash(path):
os.chdir(path)
try:
sha = subprocess.check_output(['git','rev-parse','HEAD'],shell=False).strip()
except subprocess.CalledProcessError as e:
print("ERROR, not a git repository")
return {}
return sha
githash = get_git_hash(repo)
#print(githash)
k = githash
#print(type(k))
v = 'git hash'
#print {k:v}
|
Add a fake source code so you can embed it in the example
|
Add a fake source code so you can embed it in the example
|
Python
|
mit
|
MetaPlot/MetaPlot
|
Add a fake source code so you can embed it in the example
|
# Get the hash
# 01/07/2017
# Melissa Hoffman
# Get the current repo
import os
import subprocess
testdir='/Users/melissahoffman1/'
repo = testdir
# Check if the repo is a git repo and get githash
def get_git_hash(path):
os.chdir(path)
try:
sha = subprocess.check_output(['git','rev-parse','HEAD'],shell=False).strip()
except subprocess.CalledProcessError as e:
print("ERROR, not a git repository")
return {}
return sha
githash = get_git_hash(repo)
#print(githash)
k = githash
#print(type(k))
v = 'git hash'
#print {k:v}
|
<commit_before><commit_msg>Add a fake source code so you can embed it in the example<commit_after>
|
# Get the hash
# 01/07/2017
# Melissa Hoffman
# Get the current repo
import os
import subprocess
testdir='/Users/melissahoffman1/'
repo = testdir
# Check if the repo is a git repo and get githash
def get_git_hash(path):
os.chdir(path)
try:
sha = subprocess.check_output(['git','rev-parse','HEAD'],shell=False).strip()
except subprocess.CalledProcessError as e:
print("ERROR, not a git repository")
return {}
return sha
githash = get_git_hash(repo)
#print(githash)
k = githash
#print(type(k))
v = 'git hash'
#print {k:v}
|
Add a fake source code so you can embed it in the example# Get the hash
# 01/07/2017
# Melissa Hoffman
# Get the current repo
import os
import subprocess
testdir='/Users/melissahoffman1/'
repo = testdir
# Check if the repo is a git repo and get githash
def get_git_hash(path):
os.chdir(path)
try:
sha = subprocess.check_output(['git','rev-parse','HEAD'],shell=False).strip()
except subprocess.CalledProcessError as e:
print("ERROR, not a git repository")
return {}
return sha
githash = get_git_hash(repo)
#print(githash)
k = githash
#print(type(k))
v = 'git hash'
#print {k:v}
|
<commit_before><commit_msg>Add a fake source code so you can embed it in the example<commit_after># Get the hash
# 01/07/2017
# Melissa Hoffman
# Get the current repo
import os
import subprocess
testdir='/Users/melissahoffman1/'
repo = testdir
# Check if the repo is a git repo and get githash
def get_git_hash(path):
os.chdir(path)
try:
sha = subprocess.check_output(['git','rev-parse','HEAD'],shell=False).strip()
except subprocess.CalledProcessError as e:
print("ERROR, not a git repository")
return {}
return sha
githash = get_git_hash(repo)
#print(githash)
k = githash
#print(type(k))
v = 'git hash'
#print {k:v}
|
|
210f9c6acefdf2f51d33baa1ed7a2c131729fb93
|
common/djangoapps/third_party_auth/migrations/0004_auto_20200919_0955.py
|
common/djangoapps/third_party_auth/migrations/0004_auto_20200919_0955.py
|
# Generated by Django 2.2.16 on 2020-09-19 09:55
from django.db import migrations, models
import openedx.core.lib.hash_utils
class Migration(migrations.Migration):
dependencies = [
('third_party_auth', '0003_samlconfiguration_is_public'),
]
operations = [
migrations.AlterField(
model_name='ltiproviderconfig',
name='lti_consumer_secret',
field=models.CharField(blank=True, default=openedx.core.lib.hash_utils.create_hash256, help_text='The shared secret that the LTI Tool Consumer will use to authenticate requests. Only this edX instance and this tool consumer instance should know this value. For increased security, you can avoid storing this in your database by leaving this field blank and setting SOCIAL_AUTH_LTI_CONSUMER_SECRETS = {"consumer key": "secret", ...} in your instance\'s Django setttigs (or lms.yml)', max_length=255),
),
migrations.AlterField(
model_name='oauth2providerconfig',
name='secret',
field=models.TextField(blank=True, help_text='For increased security, you can avoid storing this in your database by leaving this field blank and setting SOCIAL_AUTH_OAUTH_SECRETS = {"(backend name)": "secret", ...} in your instance\'s Django settings (or lms.yml)', verbose_name='Client Secret'),
),
migrations.AlterField(
model_name='samlconfiguration',
name='private_key',
field=models.TextField(blank=True, help_text='To generate a key pair as two files, run "openssl req -new -x509 -days 3652 -nodes -out saml.crt -keyout saml.key". Paste the contents of saml.key here. For increased security, you can avoid storing this in your database by leaving this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PRIVATE_KEY setting in your instance\'s Django settings (or lms.yml).'),
),
migrations.AlterField(
model_name='samlconfiguration',
name='public_key',
field=models.TextField(blank=True, help_text="Public key certificate. For increased security, you can avoid storing this in your database by leaving this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PUBLIC_CERT setting in your instance's Django settings (or lms.yml)."),
),
]
|
Update migrations to use lms.yml in the help text
|
refactor(lms): Update migrations to use lms.yml in the help text
|
Python
|
agpl-3.0
|
stvstnfrd/edx-platform,eduNEXT/edx-platform,angelapper/edx-platform,angelapper/edx-platform,EDUlib/edx-platform,eduNEXT/edx-platform,arbrandes/edx-platform,edx/edx-platform,arbrandes/edx-platform,eduNEXT/edx-platform,arbrandes/edx-platform,stvstnfrd/edx-platform,eduNEXT/edunext-platform,angelapper/edx-platform,EDUlib/edx-platform,arbrandes/edx-platform,angelapper/edx-platform,stvstnfrd/edx-platform,EDUlib/edx-platform,eduNEXT/edunext-platform,eduNEXT/edunext-platform,eduNEXT/edunext-platform,EDUlib/edx-platform,edx/edx-platform,edx/edx-platform,edx/edx-platform,stvstnfrd/edx-platform,eduNEXT/edx-platform
|
refactor(lms): Update migrations to use lms.yml in the help text
|
# Generated by Django 2.2.16 on 2020-09-19 09:55
from django.db import migrations, models
import openedx.core.lib.hash_utils
class Migration(migrations.Migration):
dependencies = [
('third_party_auth', '0003_samlconfiguration_is_public'),
]
operations = [
migrations.AlterField(
model_name='ltiproviderconfig',
name='lti_consumer_secret',
field=models.CharField(blank=True, default=openedx.core.lib.hash_utils.create_hash256, help_text='The shared secret that the LTI Tool Consumer will use to authenticate requests. Only this edX instance and this tool consumer instance should know this value. For increased security, you can avoid storing this in your database by leaving this field blank and setting SOCIAL_AUTH_LTI_CONSUMER_SECRETS = {"consumer key": "secret", ...} in your instance\'s Django setttigs (or lms.yml)', max_length=255),
),
migrations.AlterField(
model_name='oauth2providerconfig',
name='secret',
field=models.TextField(blank=True, help_text='For increased security, you can avoid storing this in your database by leaving this field blank and setting SOCIAL_AUTH_OAUTH_SECRETS = {"(backend name)": "secret", ...} in your instance\'s Django settings (or lms.yml)', verbose_name='Client Secret'),
),
migrations.AlterField(
model_name='samlconfiguration',
name='private_key',
field=models.TextField(blank=True, help_text='To generate a key pair as two files, run "openssl req -new -x509 -days 3652 -nodes -out saml.crt -keyout saml.key". Paste the contents of saml.key here. For increased security, you can avoid storing this in your database by leaving this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PRIVATE_KEY setting in your instance\'s Django settings (or lms.yml).'),
),
migrations.AlterField(
model_name='samlconfiguration',
name='public_key',
field=models.TextField(blank=True, help_text="Public key certificate. For increased security, you can avoid storing this in your database by leaving this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PUBLIC_CERT setting in your instance's Django settings (or lms.yml)."),
),
]
|
<commit_before><commit_msg>refactor(lms): Update migrations to use lms.yml in the help text<commit_after>
|
# Generated by Django 2.2.16 on 2020-09-19 09:55
from django.db import migrations, models
import openedx.core.lib.hash_utils
class Migration(migrations.Migration):
dependencies = [
('third_party_auth', '0003_samlconfiguration_is_public'),
]
operations = [
migrations.AlterField(
model_name='ltiproviderconfig',
name='lti_consumer_secret',
field=models.CharField(blank=True, default=openedx.core.lib.hash_utils.create_hash256, help_text='The shared secret that the LTI Tool Consumer will use to authenticate requests. Only this edX instance and this tool consumer instance should know this value. For increased security, you can avoid storing this in your database by leaving this field blank and setting SOCIAL_AUTH_LTI_CONSUMER_SECRETS = {"consumer key": "secret", ...} in your instance\'s Django setttigs (or lms.yml)', max_length=255),
),
migrations.AlterField(
model_name='oauth2providerconfig',
name='secret',
field=models.TextField(blank=True, help_text='For increased security, you can avoid storing this in your database by leaving this field blank and setting SOCIAL_AUTH_OAUTH_SECRETS = {"(backend name)": "secret", ...} in your instance\'s Django settings (or lms.yml)', verbose_name='Client Secret'),
),
migrations.AlterField(
model_name='samlconfiguration',
name='private_key',
field=models.TextField(blank=True, help_text='To generate a key pair as two files, run "openssl req -new -x509 -days 3652 -nodes -out saml.crt -keyout saml.key". Paste the contents of saml.key here. For increased security, you can avoid storing this in your database by leaving this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PRIVATE_KEY setting in your instance\'s Django settings (or lms.yml).'),
),
migrations.AlterField(
model_name='samlconfiguration',
name='public_key',
field=models.TextField(blank=True, help_text="Public key certificate. For increased security, you can avoid storing this in your database by leaving this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PUBLIC_CERT setting in your instance's Django settings (or lms.yml)."),
),
]
|
refactor(lms): Update migrations to use lms.yml in the help text# Generated by Django 2.2.16 on 2020-09-19 09:55
from django.db import migrations, models
import openedx.core.lib.hash_utils
class Migration(migrations.Migration):
dependencies = [
('third_party_auth', '0003_samlconfiguration_is_public'),
]
operations = [
migrations.AlterField(
model_name='ltiproviderconfig',
name='lti_consumer_secret',
field=models.CharField(blank=True, default=openedx.core.lib.hash_utils.create_hash256, help_text='The shared secret that the LTI Tool Consumer will use to authenticate requests. Only this edX instance and this tool consumer instance should know this value. For increased security, you can avoid storing this in your database by leaving this field blank and setting SOCIAL_AUTH_LTI_CONSUMER_SECRETS = {"consumer key": "secret", ...} in your instance\'s Django setttigs (or lms.yml)', max_length=255),
),
migrations.AlterField(
model_name='oauth2providerconfig',
name='secret',
field=models.TextField(blank=True, help_text='For increased security, you can avoid storing this in your database by leaving this field blank and setting SOCIAL_AUTH_OAUTH_SECRETS = {"(backend name)": "secret", ...} in your instance\'s Django settings (or lms.yml)', verbose_name='Client Secret'),
),
migrations.AlterField(
model_name='samlconfiguration',
name='private_key',
field=models.TextField(blank=True, help_text='To generate a key pair as two files, run "openssl req -new -x509 -days 3652 -nodes -out saml.crt -keyout saml.key". Paste the contents of saml.key here. For increased security, you can avoid storing this in your database by leaving this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PRIVATE_KEY setting in your instance\'s Django settings (or lms.yml).'),
),
migrations.AlterField(
model_name='samlconfiguration',
name='public_key',
field=models.TextField(blank=True, help_text="Public key certificate. For increased security, you can avoid storing this in your database by leaving this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PUBLIC_CERT setting in your instance's Django settings (or lms.yml)."),
),
]
|
<commit_before><commit_msg>refactor(lms): Update migrations to use lms.yml in the help text<commit_after># Generated by Django 2.2.16 on 2020-09-19 09:55
from django.db import migrations, models
import openedx.core.lib.hash_utils
class Migration(migrations.Migration):
dependencies = [
('third_party_auth', '0003_samlconfiguration_is_public'),
]
operations = [
migrations.AlterField(
model_name='ltiproviderconfig',
name='lti_consumer_secret',
field=models.CharField(blank=True, default=openedx.core.lib.hash_utils.create_hash256, help_text='The shared secret that the LTI Tool Consumer will use to authenticate requests. Only this edX instance and this tool consumer instance should know this value. For increased security, you can avoid storing this in your database by leaving this field blank and setting SOCIAL_AUTH_LTI_CONSUMER_SECRETS = {"consumer key": "secret", ...} in your instance\'s Django setttigs (or lms.yml)', max_length=255),
),
migrations.AlterField(
model_name='oauth2providerconfig',
name='secret',
field=models.TextField(blank=True, help_text='For increased security, you can avoid storing this in your database by leaving this field blank and setting SOCIAL_AUTH_OAUTH_SECRETS = {"(backend name)": "secret", ...} in your instance\'s Django settings (or lms.yml)', verbose_name='Client Secret'),
),
migrations.AlterField(
model_name='samlconfiguration',
name='private_key',
field=models.TextField(blank=True, help_text='To generate a key pair as two files, run "openssl req -new -x509 -days 3652 -nodes -out saml.crt -keyout saml.key". Paste the contents of saml.key here. For increased security, you can avoid storing this in your database by leaving this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PRIVATE_KEY setting in your instance\'s Django settings (or lms.yml).'),
),
migrations.AlterField(
model_name='samlconfiguration',
name='public_key',
field=models.TextField(blank=True, help_text="Public key certificate. For increased security, you can avoid storing this in your database by leaving this field blank and setting it via the SOCIAL_AUTH_SAML_SP_PUBLIC_CERT setting in your instance's Django settings (or lms.yml)."),
),
]
|
|
b044ba312b126cb17bf906b1984e7b407509fcc6
|
Geneagrapher/makedist.py
|
Geneagrapher/makedist.py
|
"""This tool sets up a distribution of the software by automating
several tasks that need to be done.
The directory should be in pristine condition when this is run (i.e.,
devoid of files that need to be removed before packaging begins). It
is best to run this on a fresh check out of the repository."""
import os
import licensify
if __name__ == '__main__':
# "Licensify" the source files.
files = ['geneagrapher/GGraph.py', 'geneagrapher/geneagrapher.py',
'geneagrapher/grab.py', 'geneagrapher/ggrapher.py']
license = 'COPYING'
for file in files:
res = licensify.prependLicense(file, license)
fout = open(file, "w")
fout.write(res)
fout.close()
# Remove files (including this one) that are not to be in the
# distribution.
os.system('svn rm licensify.py')
os.system('rm -f licensify.pyc')
os.system('svn rm makedist.py')
os.system('rm -f makedist.pyc')
# Make the distribution.
os.system('python setup.py sdist --format gztar,zip')
# Compute digests and signatures.
os.chdir('dist')
dirl = os.listdir('.')
for file in dirl:
comm = 'sha1sum %s > %s.sha1' % (file, file)
os.system(comm)
comm = 'gpg -abs %s' % (file)
os.system(comm)
os.chdir('..')
# Add files to repository.
os.system('svn add Geneagrapher.egg-info')
os.system('svn add dist')
|
Add script to assist in packaging.
|
Add script to assist in packaging.
|
Python
|
mit
|
davidalber/Geneagrapher,davidalber/Geneagrapher
|
Add script to assist in packaging.
|
"""This tool sets up a distribution of the software by automating
several tasks that need to be done.
The directory should be in pristine condition when this is run (i.e.,
devoid of files that need to be removed before packaging begins). It
is best to run this on a fresh check out of the repository."""
import os
import licensify
if __name__ == '__main__':
# "Licensify" the source files.
files = ['geneagrapher/GGraph.py', 'geneagrapher/geneagrapher.py',
'geneagrapher/grab.py', 'geneagrapher/ggrapher.py']
license = 'COPYING'
for file in files:
res = licensify.prependLicense(file, license)
fout = open(file, "w")
fout.write(res)
fout.close()
# Remove files (including this one) that are not to be in the
# distribution.
os.system('svn rm licensify.py')
os.system('rm -f licensify.pyc')
os.system('svn rm makedist.py')
os.system('rm -f makedist.pyc')
# Make the distribution.
os.system('python setup.py sdist --format gztar,zip')
# Compute digests and signatures.
os.chdir('dist')
dirl = os.listdir('.')
for file in dirl:
comm = 'sha1sum %s > %s.sha1' % (file, file)
os.system(comm)
comm = 'gpg -abs %s' % (file)
os.system(comm)
os.chdir('..')
# Add files to repository.
os.system('svn add Geneagrapher.egg-info')
os.system('svn add dist')
|
<commit_before><commit_msg>Add script to assist in packaging.<commit_after>
|
"""This tool sets up a distribution of the software by automating
several tasks that need to be done.
The directory should be in pristine condition when this is run (i.e.,
devoid of files that need to be removed before packaging begins). It
is best to run this on a fresh check out of the repository."""
import os
import licensify
if __name__ == '__main__':
# "Licensify" the source files.
files = ['geneagrapher/GGraph.py', 'geneagrapher/geneagrapher.py',
'geneagrapher/grab.py', 'geneagrapher/ggrapher.py']
license = 'COPYING'
for file in files:
res = licensify.prependLicense(file, license)
fout = open(file, "w")
fout.write(res)
fout.close()
# Remove files (including this one) that are not to be in the
# distribution.
os.system('svn rm licensify.py')
os.system('rm -f licensify.pyc')
os.system('svn rm makedist.py')
os.system('rm -f makedist.pyc')
# Make the distribution.
os.system('python setup.py sdist --format gztar,zip')
# Compute digests and signatures.
os.chdir('dist')
dirl = os.listdir('.')
for file in dirl:
comm = 'sha1sum %s > %s.sha1' % (file, file)
os.system(comm)
comm = 'gpg -abs %s' % (file)
os.system(comm)
os.chdir('..')
# Add files to repository.
os.system('svn add Geneagrapher.egg-info')
os.system('svn add dist')
|
Add script to assist in packaging."""This tool sets up a distribution of the software by automating
several tasks that need to be done.
The directory should be in pristine condition when this is run (i.e.,
devoid of files that need to be removed before packaging begins). It
is best to run this on a fresh check out of the repository."""
import os
import licensify
if __name__ == '__main__':
# "Licensify" the source files.
files = ['geneagrapher/GGraph.py', 'geneagrapher/geneagrapher.py',
'geneagrapher/grab.py', 'geneagrapher/ggrapher.py']
license = 'COPYING'
for file in files:
res = licensify.prependLicense(file, license)
fout = open(file, "w")
fout.write(res)
fout.close()
# Remove files (including this one) that are not to be in the
# distribution.
os.system('svn rm licensify.py')
os.system('rm -f licensify.pyc')
os.system('svn rm makedist.py')
os.system('rm -f makedist.pyc')
# Make the distribution.
os.system('python setup.py sdist --format gztar,zip')
# Compute digests and signatures.
os.chdir('dist')
dirl = os.listdir('.')
for file in dirl:
comm = 'sha1sum %s > %s.sha1' % (file, file)
os.system(comm)
comm = 'gpg -abs %s' % (file)
os.system(comm)
os.chdir('..')
# Add files to repository.
os.system('svn add Geneagrapher.egg-info')
os.system('svn add dist')
|
<commit_before><commit_msg>Add script to assist in packaging.<commit_after>"""This tool sets up a distribution of the software by automating
several tasks that need to be done.
The directory should be in pristine condition when this is run (i.e.,
devoid of files that need to be removed before packaging begins). It
is best to run this on a fresh check out of the repository."""
import os
import licensify
if __name__ == '__main__':
# "Licensify" the source files.
files = ['geneagrapher/GGraph.py', 'geneagrapher/geneagrapher.py',
'geneagrapher/grab.py', 'geneagrapher/ggrapher.py']
license = 'COPYING'
for file in files:
res = licensify.prependLicense(file, license)
fout = open(file, "w")
fout.write(res)
fout.close()
# Remove files (including this one) that are not to be in the
# distribution.
os.system('svn rm licensify.py')
os.system('rm -f licensify.pyc')
os.system('svn rm makedist.py')
os.system('rm -f makedist.pyc')
# Make the distribution.
os.system('python setup.py sdist --format gztar,zip')
# Compute digests and signatures.
os.chdir('dist')
dirl = os.listdir('.')
for file in dirl:
comm = 'sha1sum %s > %s.sha1' % (file, file)
os.system(comm)
comm = 'gpg -abs %s' % (file)
os.system(comm)
os.chdir('..')
# Add files to repository.
os.system('svn add Geneagrapher.egg-info')
os.system('svn add dist')
|
|
410b354cb0e72ba741439a337aba4ef4c3cda8b1
|
src/ossa.py
|
src/ossa.py
|
#!/usr/bin/python
import re
import sys
""" Taken from http://stackoverflow.com/questions/2669059/how-to-sort-alpha-numeric-set-in-python"""
def sorted_nicely(l):
""" Sort the given iterable in the way that humans expect."""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
print "Hello World"
# Usage
if len(sys.argv) == 1:
print "Usage: %s <path>+" % sys.argv[0]
sys.exit(-1)
functionsFound = set()
filenames = sys.argv[1:]
for filename in filenames:
lsl = file(filename).readlines()
scriptFuncRe = re.compile("\s+((?:(?:ll)|(?:os)|(?:mod)|(?:Json)|(?:ls))\w+)\(");
for line in lsl:
# print "Analyzing %s" % line
match = scriptFuncRe.search(line)
if match != None:
# print "Found match %s: %s" % (fn, match.group(1))
functionsFound.add(match.group(1))
for fn in sorted_nicely(functionsFound):
print "Found %s" % fn
print "%s functions used" % len(functionsFound)
print "Fin"
|
Add existing python file for performing a very crude analysis on a set of lsl files (as taken from an untarred OAR, for example)
|
Add existing python file for performing a very crude analysis on a set of lsl files (as taken from an untarred OAR, for example)
|
Python
|
bsd-3-clause
|
justinccdev/opensimulator-tools,justinccdev/opensimulator-tools,justinccdev/opensimulator-tools,justinccdev/opensimulator-tools
|
Add existing python file for performing a very crude analysis on a set of lsl files (as taken from an untarred OAR, for example)
|
#!/usr/bin/python
import re
import sys
""" Taken from http://stackoverflow.com/questions/2669059/how-to-sort-alpha-numeric-set-in-python"""
def sorted_nicely(l):
""" Sort the given iterable in the way that humans expect."""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
print "Hello World"
# Usage
if len(sys.argv) == 1:
print "Usage: %s <path>+" % sys.argv[0]
sys.exit(-1)
functionsFound = set()
filenames = sys.argv[1:]
for filename in filenames:
lsl = file(filename).readlines()
scriptFuncRe = re.compile("\s+((?:(?:ll)|(?:os)|(?:mod)|(?:Json)|(?:ls))\w+)\(");
for line in lsl:
# print "Analyzing %s" % line
match = scriptFuncRe.search(line)
if match != None:
# print "Found match %s: %s" % (fn, match.group(1))
functionsFound.add(match.group(1))
for fn in sorted_nicely(functionsFound):
print "Found %s" % fn
print "%s functions used" % len(functionsFound)
print "Fin"
|
<commit_before><commit_msg>Add existing python file for performing a very crude analysis on a set of lsl files (as taken from an untarred OAR, for example)<commit_after>
|
#!/usr/bin/python
import re
import sys
""" Taken from http://stackoverflow.com/questions/2669059/how-to-sort-alpha-numeric-set-in-python"""
def sorted_nicely(l):
""" Sort the given iterable in the way that humans expect."""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
print "Hello World"
# Usage
if len(sys.argv) == 1:
print "Usage: %s <path>+" % sys.argv[0]
sys.exit(-1)
functionsFound = set()
filenames = sys.argv[1:]
for filename in filenames:
lsl = file(filename).readlines()
scriptFuncRe = re.compile("\s+((?:(?:ll)|(?:os)|(?:mod)|(?:Json)|(?:ls))\w+)\(");
for line in lsl:
# print "Analyzing %s" % line
match = scriptFuncRe.search(line)
if match != None:
# print "Found match %s: %s" % (fn, match.group(1))
functionsFound.add(match.group(1))
for fn in sorted_nicely(functionsFound):
print "Found %s" % fn
print "%s functions used" % len(functionsFound)
print "Fin"
|
Add existing python file for performing a very crude analysis on a set of lsl files (as taken from an untarred OAR, for example)#!/usr/bin/python
import re
import sys
""" Taken from http://stackoverflow.com/questions/2669059/how-to-sort-alpha-numeric-set-in-python"""
def sorted_nicely(l):
""" Sort the given iterable in the way that humans expect."""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
print "Hello World"
# Usage
if len(sys.argv) == 1:
print "Usage: %s <path>+" % sys.argv[0]
sys.exit(-1)
functionsFound = set()
filenames = sys.argv[1:]
for filename in filenames:
lsl = file(filename).readlines()
scriptFuncRe = re.compile("\s+((?:(?:ll)|(?:os)|(?:mod)|(?:Json)|(?:ls))\w+)\(");
for line in lsl:
# print "Analyzing %s" % line
match = scriptFuncRe.search(line)
if match != None:
# print "Found match %s: %s" % (fn, match.group(1))
functionsFound.add(match.group(1))
for fn in sorted_nicely(functionsFound):
print "Found %s" % fn
print "%s functions used" % len(functionsFound)
print "Fin"
|
<commit_before><commit_msg>Add existing python file for performing a very crude analysis on a set of lsl files (as taken from an untarred OAR, for example)<commit_after>#!/usr/bin/python
import re
import sys
""" Taken from http://stackoverflow.com/questions/2669059/how-to-sort-alpha-numeric-set-in-python"""
def sorted_nicely(l):
""" Sort the given iterable in the way that humans expect."""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key = alphanum_key)
print "Hello World"
# Usage
if len(sys.argv) == 1:
print "Usage: %s <path>+" % sys.argv[0]
sys.exit(-1)
functionsFound = set()
filenames = sys.argv[1:]
for filename in filenames:
lsl = file(filename).readlines()
scriptFuncRe = re.compile("\s+((?:(?:ll)|(?:os)|(?:mod)|(?:Json)|(?:ls))\w+)\(");
for line in lsl:
# print "Analyzing %s" % line
match = scriptFuncRe.search(line)
if match != None:
# print "Found match %s: %s" % (fn, match.group(1))
functionsFound.add(match.group(1))
for fn in sorted_nicely(functionsFound):
print "Found %s" % fn
print "%s functions used" % len(functionsFound)
print "Fin"
|
|
71d8ef8a872656df8a2319032855cb2b5ea5ed4b
|
examples/bench/rlserver.py
|
examples/bench/rlserver.py
|
import argparse
import asyncio
import gc
import uvloop
import os.path
import socket as socket_module
from socket import *
PRINT = 0
async def echo_client_streams(reader, writer):
sock = writer.get_extra_info('socket')
try:
sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
except (OSError, NameError):
pass
if PRINT:
print('Connection from', sock.getpeername())
while True:
data = await reader.readline()
if not data:
break
writer.write(data)
if PRINT:
print('Connection closed')
writer.close()
async def print_debug(loop):
while True:
print(chr(27) + "[2J") # clear screen
loop.print_debug_info()
await asyncio.sleep(0.5, loop=loop)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--uvloop', default=False, action='store_true')
parser.add_argument('--addr', default='127.0.0.1:25000', type=str)
parser.add_argument('--print', default=False, action='store_true')
args = parser.parse_args()
if args.uvloop:
loop = uvloop.new_event_loop()
print('using UVLoop')
else:
loop = asyncio.new_event_loop()
print('using asyncio loop')
asyncio.set_event_loop(loop)
loop.set_debug(False)
if args.print:
PRINT = 1
if hasattr(loop, 'print_debug_info'):
loop.create_task(print_debug(loop))
PRINT = 0
unix = False
if args.addr.startswith('file:'):
unix = True
addr = args.addr[5:]
if os.path.exists(addr):
os.remove(addr)
else:
addr = args.addr.split(':')
addr[1] = int(addr[1])
addr = tuple(addr)
print('readline performance test')
print('serving on: {}'.format(addr))
print('using asyncio/streams')
if unix:
coro = asyncio.start_unix_server(echo_client_streams,
addr, loop=loop, limit=256000)
else:
coro = asyncio.start_server(echo_client_streams,
*addr, loop=loop, limit=256000)
srv = loop.run_until_complete(coro)
try:
loop.run_forever()
finally:
if hasattr(loop, 'print_debug_info'):
gc.collect()
print(chr(27) + "[2J")
loop.print_debug_info()
loop.close()
|
Add a new benchmark - readline server
|
examples/bench: Add a new benchmark - readline server
|
Python
|
apache-2.0
|
MagicStack/uvloop,1st1/uvloop,MagicStack/uvloop
|
examples/bench: Add a new benchmark - readline server
|
import argparse
import asyncio
import gc
import uvloop
import os.path
import socket as socket_module
from socket import *
PRINT = 0
async def echo_client_streams(reader, writer):
sock = writer.get_extra_info('socket')
try:
sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
except (OSError, NameError):
pass
if PRINT:
print('Connection from', sock.getpeername())
while True:
data = await reader.readline()
if not data:
break
writer.write(data)
if PRINT:
print('Connection closed')
writer.close()
async def print_debug(loop):
while True:
print(chr(27) + "[2J") # clear screen
loop.print_debug_info()
await asyncio.sleep(0.5, loop=loop)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--uvloop', default=False, action='store_true')
parser.add_argument('--addr', default='127.0.0.1:25000', type=str)
parser.add_argument('--print', default=False, action='store_true')
args = parser.parse_args()
if args.uvloop:
loop = uvloop.new_event_loop()
print('using UVLoop')
else:
loop = asyncio.new_event_loop()
print('using asyncio loop')
asyncio.set_event_loop(loop)
loop.set_debug(False)
if args.print:
PRINT = 1
if hasattr(loop, 'print_debug_info'):
loop.create_task(print_debug(loop))
PRINT = 0
unix = False
if args.addr.startswith('file:'):
unix = True
addr = args.addr[5:]
if os.path.exists(addr):
os.remove(addr)
else:
addr = args.addr.split(':')
addr[1] = int(addr[1])
addr = tuple(addr)
print('readline performance test')
print('serving on: {}'.format(addr))
print('using asyncio/streams')
if unix:
coro = asyncio.start_unix_server(echo_client_streams,
addr, loop=loop, limit=256000)
else:
coro = asyncio.start_server(echo_client_streams,
*addr, loop=loop, limit=256000)
srv = loop.run_until_complete(coro)
try:
loop.run_forever()
finally:
if hasattr(loop, 'print_debug_info'):
gc.collect()
print(chr(27) + "[2J")
loop.print_debug_info()
loop.close()
|
<commit_before><commit_msg>examples/bench: Add a new benchmark - readline server<commit_after>
|
import argparse
import asyncio
import gc
import uvloop
import os.path
import socket as socket_module
from socket import *
PRINT = 0
async def echo_client_streams(reader, writer):
sock = writer.get_extra_info('socket')
try:
sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
except (OSError, NameError):
pass
if PRINT:
print('Connection from', sock.getpeername())
while True:
data = await reader.readline()
if not data:
break
writer.write(data)
if PRINT:
print('Connection closed')
writer.close()
async def print_debug(loop):
while True:
print(chr(27) + "[2J") # clear screen
loop.print_debug_info()
await asyncio.sleep(0.5, loop=loop)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--uvloop', default=False, action='store_true')
parser.add_argument('--addr', default='127.0.0.1:25000', type=str)
parser.add_argument('--print', default=False, action='store_true')
args = parser.parse_args()
if args.uvloop:
loop = uvloop.new_event_loop()
print('using UVLoop')
else:
loop = asyncio.new_event_loop()
print('using asyncio loop')
asyncio.set_event_loop(loop)
loop.set_debug(False)
if args.print:
PRINT = 1
if hasattr(loop, 'print_debug_info'):
loop.create_task(print_debug(loop))
PRINT = 0
unix = False
if args.addr.startswith('file:'):
unix = True
addr = args.addr[5:]
if os.path.exists(addr):
os.remove(addr)
else:
addr = args.addr.split(':')
addr[1] = int(addr[1])
addr = tuple(addr)
print('readline performance test')
print('serving on: {}'.format(addr))
print('using asyncio/streams')
if unix:
coro = asyncio.start_unix_server(echo_client_streams,
addr, loop=loop, limit=256000)
else:
coro = asyncio.start_server(echo_client_streams,
*addr, loop=loop, limit=256000)
srv = loop.run_until_complete(coro)
try:
loop.run_forever()
finally:
if hasattr(loop, 'print_debug_info'):
gc.collect()
print(chr(27) + "[2J")
loop.print_debug_info()
loop.close()
|
examples/bench: Add a new benchmark - readline serverimport argparse
import asyncio
import gc
import uvloop
import os.path
import socket as socket_module
from socket import *
PRINT = 0
async def echo_client_streams(reader, writer):
sock = writer.get_extra_info('socket')
try:
sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
except (OSError, NameError):
pass
if PRINT:
print('Connection from', sock.getpeername())
while True:
data = await reader.readline()
if not data:
break
writer.write(data)
if PRINT:
print('Connection closed')
writer.close()
async def print_debug(loop):
while True:
print(chr(27) + "[2J") # clear screen
loop.print_debug_info()
await asyncio.sleep(0.5, loop=loop)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--uvloop', default=False, action='store_true')
parser.add_argument('--addr', default='127.0.0.1:25000', type=str)
parser.add_argument('--print', default=False, action='store_true')
args = parser.parse_args()
if args.uvloop:
loop = uvloop.new_event_loop()
print('using UVLoop')
else:
loop = asyncio.new_event_loop()
print('using asyncio loop')
asyncio.set_event_loop(loop)
loop.set_debug(False)
if args.print:
PRINT = 1
if hasattr(loop, 'print_debug_info'):
loop.create_task(print_debug(loop))
PRINT = 0
unix = False
if args.addr.startswith('file:'):
unix = True
addr = args.addr[5:]
if os.path.exists(addr):
os.remove(addr)
else:
addr = args.addr.split(':')
addr[1] = int(addr[1])
addr = tuple(addr)
print('readline performance test')
print('serving on: {}'.format(addr))
print('using asyncio/streams')
if unix:
coro = asyncio.start_unix_server(echo_client_streams,
addr, loop=loop, limit=256000)
else:
coro = asyncio.start_server(echo_client_streams,
*addr, loop=loop, limit=256000)
srv = loop.run_until_complete(coro)
try:
loop.run_forever()
finally:
if hasattr(loop, 'print_debug_info'):
gc.collect()
print(chr(27) + "[2J")
loop.print_debug_info()
loop.close()
|
<commit_before><commit_msg>examples/bench: Add a new benchmark - readline server<commit_after>import argparse
import asyncio
import gc
import uvloop
import os.path
import socket as socket_module
from socket import *
PRINT = 0
async def echo_client_streams(reader, writer):
sock = writer.get_extra_info('socket')
try:
sock.setsockopt(IPPROTO_TCP, TCP_NODELAY, 1)
except (OSError, NameError):
pass
if PRINT:
print('Connection from', sock.getpeername())
while True:
data = await reader.readline()
if not data:
break
writer.write(data)
if PRINT:
print('Connection closed')
writer.close()
async def print_debug(loop):
while True:
print(chr(27) + "[2J") # clear screen
loop.print_debug_info()
await asyncio.sleep(0.5, loop=loop)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--uvloop', default=False, action='store_true')
parser.add_argument('--addr', default='127.0.0.1:25000', type=str)
parser.add_argument('--print', default=False, action='store_true')
args = parser.parse_args()
if args.uvloop:
loop = uvloop.new_event_loop()
print('using UVLoop')
else:
loop = asyncio.new_event_loop()
print('using asyncio loop')
asyncio.set_event_loop(loop)
loop.set_debug(False)
if args.print:
PRINT = 1
if hasattr(loop, 'print_debug_info'):
loop.create_task(print_debug(loop))
PRINT = 0
unix = False
if args.addr.startswith('file:'):
unix = True
addr = args.addr[5:]
if os.path.exists(addr):
os.remove(addr)
else:
addr = args.addr.split(':')
addr[1] = int(addr[1])
addr = tuple(addr)
print('readline performance test')
print('serving on: {}'.format(addr))
print('using asyncio/streams')
if unix:
coro = asyncio.start_unix_server(echo_client_streams,
addr, loop=loop, limit=256000)
else:
coro = asyncio.start_server(echo_client_streams,
*addr, loop=loop, limit=256000)
srv = loop.run_until_complete(coro)
try:
loop.run_forever()
finally:
if hasattr(loop, 'print_debug_info'):
gc.collect()
print(chr(27) + "[2J")
loop.print_debug_info()
loop.close()
|
|
c1fb3eb548b15ab8049841696b7ae74604c8ed89
|
tests/conftest.py
|
tests/conftest.py
|
"""
Config instructions and test fixtures
"""
import pytest
import os
import sys
# # these are just some fun dividiers to make the output pretty
# # completely unnecessary, I was just playing with autouse fixtures
# @pytest.fixture(scope="function", autouse=True)
# def divider_function(request):
# print('\n --- function %s() start ---' % request.function.__name__)
# def fin():
# print(' --- function %s() done ---' % request.function.__name__)
# request.addfinalizer(fin)
@pytest.fixture(scope="session", autouse=True)
def set_up_ini(request):
print("in set_up_ini")
try:
# need to back up a directory
path = os.path.dirname(os.path.abspath("conftest.py"))
print(path)
if not os.path.isfile(path + "/pytest.ini"):
raise FileNotFoundError("Pytest.ini not found.")
except FileNotFoundError as args:
print(args)
try:
import shutil
print("Creating pytest.ini")
shutil.copyfile(path + "/example-pytest.ini", path + "/pytest.ini")
except OSError as args:
print("Error creating pytest.ini. ", args)
else:
print("Don't forget to add node admin credentials to pytest.ini!")
|
Test for pytest.ini as session-scoped fixture
|
Test for pytest.ini as session-scoped fixture
|
Python
|
agpl-3.0
|
opentechinstitute/commotion-router-test-suite
|
Test for pytest.ini as session-scoped fixture
|
"""
Config instructions and test fixtures
"""
import pytest
import os
import sys
# # these are just some fun dividiers to make the output pretty
# # completely unnecessary, I was just playing with autouse fixtures
# @pytest.fixture(scope="function", autouse=True)
# def divider_function(request):
# print('\n --- function %s() start ---' % request.function.__name__)
# def fin():
# print(' --- function %s() done ---' % request.function.__name__)
# request.addfinalizer(fin)
@pytest.fixture(scope="session", autouse=True)
def set_up_ini(request):
print("in set_up_ini")
try:
# need to back up a directory
path = os.path.dirname(os.path.abspath("conftest.py"))
print(path)
if not os.path.isfile(path + "/pytest.ini"):
raise FileNotFoundError("Pytest.ini not found.")
except FileNotFoundError as args:
print(args)
try:
import shutil
print("Creating pytest.ini")
shutil.copyfile(path + "/example-pytest.ini", path + "/pytest.ini")
except OSError as args:
print("Error creating pytest.ini. ", args)
else:
print("Don't forget to add node admin credentials to pytest.ini!")
|
<commit_before><commit_msg>Test for pytest.ini as session-scoped fixture<commit_after>
|
"""
Config instructions and test fixtures
"""
import pytest
import os
import sys
# # these are just some fun dividiers to make the output pretty
# # completely unnecessary, I was just playing with autouse fixtures
# @pytest.fixture(scope="function", autouse=True)
# def divider_function(request):
# print('\n --- function %s() start ---' % request.function.__name__)
# def fin():
# print(' --- function %s() done ---' % request.function.__name__)
# request.addfinalizer(fin)
@pytest.fixture(scope="session", autouse=True)
def set_up_ini(request):
print("in set_up_ini")
try:
# need to back up a directory
path = os.path.dirname(os.path.abspath("conftest.py"))
print(path)
if not os.path.isfile(path + "/pytest.ini"):
raise FileNotFoundError("Pytest.ini not found.")
except FileNotFoundError as args:
print(args)
try:
import shutil
print("Creating pytest.ini")
shutil.copyfile(path + "/example-pytest.ini", path + "/pytest.ini")
except OSError as args:
print("Error creating pytest.ini. ", args)
else:
print("Don't forget to add node admin credentials to pytest.ini!")
|
Test for pytest.ini as session-scoped fixture"""
Config instructions and test fixtures
"""
import pytest
import os
import sys
# # these are just some fun dividiers to make the output pretty
# # completely unnecessary, I was just playing with autouse fixtures
# @pytest.fixture(scope="function", autouse=True)
# def divider_function(request):
# print('\n --- function %s() start ---' % request.function.__name__)
# def fin():
# print(' --- function %s() done ---' % request.function.__name__)
# request.addfinalizer(fin)
@pytest.fixture(scope="session", autouse=True)
def set_up_ini(request):
print("in set_up_ini")
try:
# need to back up a directory
path = os.path.dirname(os.path.abspath("conftest.py"))
print(path)
if not os.path.isfile(path + "/pytest.ini"):
raise FileNotFoundError("Pytest.ini not found.")
except FileNotFoundError as args:
print(args)
try:
import shutil
print("Creating pytest.ini")
shutil.copyfile(path + "/example-pytest.ini", path + "/pytest.ini")
except OSError as args:
print("Error creating pytest.ini. ", args)
else:
print("Don't forget to add node admin credentials to pytest.ini!")
|
<commit_before><commit_msg>Test for pytest.ini as session-scoped fixture<commit_after>"""
Config instructions and test fixtures
"""
import pytest
import os
import sys
# # these are just some fun dividiers to make the output pretty
# # completely unnecessary, I was just playing with autouse fixtures
# @pytest.fixture(scope="function", autouse=True)
# def divider_function(request):
# print('\n --- function %s() start ---' % request.function.__name__)
# def fin():
# print(' --- function %s() done ---' % request.function.__name__)
# request.addfinalizer(fin)
@pytest.fixture(scope="session", autouse=True)
def set_up_ini(request):
print("in set_up_ini")
try:
# need to back up a directory
path = os.path.dirname(os.path.abspath("conftest.py"))
print(path)
if not os.path.isfile(path + "/pytest.ini"):
raise FileNotFoundError("Pytest.ini not found.")
except FileNotFoundError as args:
print(args)
try:
import shutil
print("Creating pytest.ini")
shutil.copyfile(path + "/example-pytest.ini", path + "/pytest.ini")
except OSError as args:
print("Error creating pytest.ini. ", args)
else:
print("Don't forget to add node admin credentials to pytest.ini!")
|
|
002e903c978a30f27ed24316bb85958e5c69a259
|
CodeFights/countVisitors.py
|
CodeFights/countVisitors.py
|
#!/usr/local/bin/python
# Code Fights Count Visitors Problem
class Counter(object):
def __init__(self, value):
self.value = value
def inc(self):
self.value += 1
def get(self):
return self.value
def countVisitors(beta, k, visitors):
counter = Counter(beta)
for visitor in visitors:
if visitor >= k:
counter.inc()
return counter.get()
def main():
tests = [
[22, 5, [4, 6, 6, 5, 2, 2, 5], 26],
[1, 5, [], 1],
[34, 8, [1, 2, 3, 4, 5, 6, 7], 34],
[4, 5, [3, 4, 65, 3, 2, 4, 5, 3, 5], 7],
[38, 20, [20], 39]
]
for t in tests:
res = countVisitors(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: countVisitors({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: countVisitors({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights count visitors problem
|
Solve Code Fights count visitors problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights count visitors problem
|
#!/usr/local/bin/python
# Code Fights Count Visitors Problem
class Counter(object):
def __init__(self, value):
self.value = value
def inc(self):
self.value += 1
def get(self):
return self.value
def countVisitors(beta, k, visitors):
counter = Counter(beta)
for visitor in visitors:
if visitor >= k:
counter.inc()
return counter.get()
def main():
tests = [
[22, 5, [4, 6, 6, 5, 2, 2, 5], 26],
[1, 5, [], 1],
[34, 8, [1, 2, 3, 4, 5, 6, 7], 34],
[4, 5, [3, 4, 65, 3, 2, 4, 5, 3, 5], 7],
[38, 20, [20], 39]
]
for t in tests:
res = countVisitors(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: countVisitors({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: countVisitors({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights count visitors problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Count Visitors Problem
class Counter(object):
def __init__(self, value):
self.value = value
def inc(self):
self.value += 1
def get(self):
return self.value
def countVisitors(beta, k, visitors):
counter = Counter(beta)
for visitor in visitors:
if visitor >= k:
counter.inc()
return counter.get()
def main():
tests = [
[22, 5, [4, 6, 6, 5, 2, 2, 5], 26],
[1, 5, [], 1],
[34, 8, [1, 2, 3, 4, 5, 6, 7], 34],
[4, 5, [3, 4, 65, 3, 2, 4, 5, 3, 5], 7],
[38, 20, [20], 39]
]
for t in tests:
res = countVisitors(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: countVisitors({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: countVisitors({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights count visitors problem#!/usr/local/bin/python
# Code Fights Count Visitors Problem
class Counter(object):
def __init__(self, value):
self.value = value
def inc(self):
self.value += 1
def get(self):
return self.value
def countVisitors(beta, k, visitors):
counter = Counter(beta)
for visitor in visitors:
if visitor >= k:
counter.inc()
return counter.get()
def main():
tests = [
[22, 5, [4, 6, 6, 5, 2, 2, 5], 26],
[1, 5, [], 1],
[34, 8, [1, 2, 3, 4, 5, 6, 7], 34],
[4, 5, [3, 4, 65, 3, 2, 4, 5, 3, 5], 7],
[38, 20, [20], 39]
]
for t in tests:
res = countVisitors(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: countVisitors({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: countVisitors({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights count visitors problem<commit_after>#!/usr/local/bin/python
# Code Fights Count Visitors Problem
class Counter(object):
def __init__(self, value):
self.value = value
def inc(self):
self.value += 1
def get(self):
return self.value
def countVisitors(beta, k, visitors):
counter = Counter(beta)
for visitor in visitors:
if visitor >= k:
counter.inc()
return counter.get()
def main():
tests = [
[22, 5, [4, 6, 6, 5, 2, 2, 5], 26],
[1, 5, [], 1],
[34, 8, [1, 2, 3, 4, 5, 6, 7], 34],
[4, 5, [3, 4, 65, 3, 2, 4, 5, 3, 5], 7],
[38, 20, [20], 39]
]
for t in tests:
res = countVisitors(t[0], t[1], t[2])
ans = t[3]
if ans == res:
print("PASSED: countVisitors({}, {}, {}) returned {}"
.format(t[0], t[1], t[2], res))
else:
print(("FAILED: countVisitors({}, {}, {}) returned {},"
"answer: {}").format(t[0], t[1], t[2], res, ans))
if __name__ == '__main__':
main()
|
|
f53aef9fdcd01fdb8607984e38b4fb8c5813aacf
|
CodeFights/fibonacciList.py
|
CodeFights/fibonacciList.py
|
#!/usr/local/bin/python
# Code Fights Fibonacci List Problem
from functools import reduce
def fibonacciList(n):
return [[0] * x for x in reduce(lambda x, n: x + [sum(x[-2:])],
range(n - 2), [0, 1])]
def main():
tests = [
[
6,
[[],
[0],
[0],
[0, 0],
[0, 0, 0],
[0, 0, 0, 0, 0]]
],
[
2,
[[],
[0]]
],
[
3,
[[],
[0],
[0]]
],
[
8,
[[],
[0],
[0],
[0, 0],
[0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
],
[
5,
[[],
[0],
[0],
[0, 0],
[0, 0, 0]]
]
]
for t in tests:
res = fibonacciList(t[0])
ans = t[1]
if ans == res:
print("PASSED: fibonacciList({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: fibonacciList({}) returned {}, answer: {}")
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights fibonacci list problem
|
Solve Code Fights fibonacci list problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights fibonacci list problem
|
#!/usr/local/bin/python
# Code Fights Fibonacci List Problem
from functools import reduce
def fibonacciList(n):
return [[0] * x for x in reduce(lambda x, n: x + [sum(x[-2:])],
range(n - 2), [0, 1])]
def main():
tests = [
[
6,
[[],
[0],
[0],
[0, 0],
[0, 0, 0],
[0, 0, 0, 0, 0]]
],
[
2,
[[],
[0]]
],
[
3,
[[],
[0],
[0]]
],
[
8,
[[],
[0],
[0],
[0, 0],
[0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
],
[
5,
[[],
[0],
[0],
[0, 0],
[0, 0, 0]]
]
]
for t in tests:
res = fibonacciList(t[0])
ans = t[1]
if ans == res:
print("PASSED: fibonacciList({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: fibonacciList({}) returned {}, answer: {}")
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights fibonacci list problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Fibonacci List Problem
from functools import reduce
def fibonacciList(n):
return [[0] * x for x in reduce(lambda x, n: x + [sum(x[-2:])],
range(n - 2), [0, 1])]
def main():
tests = [
[
6,
[[],
[0],
[0],
[0, 0],
[0, 0, 0],
[0, 0, 0, 0, 0]]
],
[
2,
[[],
[0]]
],
[
3,
[[],
[0],
[0]]
],
[
8,
[[],
[0],
[0],
[0, 0],
[0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
],
[
5,
[[],
[0],
[0],
[0, 0],
[0, 0, 0]]
]
]
for t in tests:
res = fibonacciList(t[0])
ans = t[1]
if ans == res:
print("PASSED: fibonacciList({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: fibonacciList({}) returned {}, answer: {}")
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights fibonacci list problem#!/usr/local/bin/python
# Code Fights Fibonacci List Problem
from functools import reduce
def fibonacciList(n):
return [[0] * x for x in reduce(lambda x, n: x + [sum(x[-2:])],
range(n - 2), [0, 1])]
def main():
tests = [
[
6,
[[],
[0],
[0],
[0, 0],
[0, 0, 0],
[0, 0, 0, 0, 0]]
],
[
2,
[[],
[0]]
],
[
3,
[[],
[0],
[0]]
],
[
8,
[[],
[0],
[0],
[0, 0],
[0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
],
[
5,
[[],
[0],
[0],
[0, 0],
[0, 0, 0]]
]
]
for t in tests:
res = fibonacciList(t[0])
ans = t[1]
if ans == res:
print("PASSED: fibonacciList({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: fibonacciList({}) returned {}, answer: {}")
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights fibonacci list problem<commit_after>#!/usr/local/bin/python
# Code Fights Fibonacci List Problem
from functools import reduce
def fibonacciList(n):
return [[0] * x for x in reduce(lambda x, n: x + [sum(x[-2:])],
range(n - 2), [0, 1])]
def main():
tests = [
[
6,
[[],
[0],
[0],
[0, 0],
[0, 0, 0],
[0, 0, 0, 0, 0]]
],
[
2,
[[],
[0]]
],
[
3,
[[],
[0],
[0]]
],
[
8,
[[],
[0],
[0],
[0, 0],
[0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]
],
[
5,
[[],
[0],
[0],
[0, 0],
[0, 0, 0]]
]
]
for t in tests:
res = fibonacciList(t[0])
ans = t[1]
if ans == res:
print("PASSED: fibonacciList({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: fibonacciList({}) returned {}, answer: {}")
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
|
11296e24228ee10be009b04a9909504a8e8d5ace
|
tests/models/character/test_saver.py
|
tests/models/character/test_saver.py
|
import unittest
import database.main
from tests.create_test_db import engine, session, Base
database.main.engine = engine
database.main.session = session
database.main.Base = Base
import models.main
from classes import Paladin
from models.characters.saved_character import SavedCharacterSchema
from models.items.item_template import ItemTemplateSchema
from tests.models.character.character_mock import character, char_equipment, entry
from models.characters.saver import save_character
class SavedCharacterSaverTests(unittest.TestCase):
"""
Get the Mock character, change his name and try to save him in the DB
"""
def setUp(self):
self.expected_character = character
self.expected_character.name = 'Tester'
def test_save_character(self):
save_character(self.expected_character)
received_character = session.query(SavedCharacterSchema).filter_by(name=self.expected_character.name).first()
self.assertIsNotNone(received_character)
received_character = received_character.convert_to_character_object()
# assert they're the same
self.assertEqual(vars(received_character), vars(self.expected_character))
def tearDownModule():
import tests.delete_test_db # module that deletes the DB :)
if __name__ == '__main__':
unittest.main()
|
Test for the save_character() function
|
Test for the save_character() function
|
Python
|
mit
|
Enether/python_wow
|
Test for the save_character() function
|
import unittest
import database.main
from tests.create_test_db import engine, session, Base
database.main.engine = engine
database.main.session = session
database.main.Base = Base
import models.main
from classes import Paladin
from models.characters.saved_character import SavedCharacterSchema
from models.items.item_template import ItemTemplateSchema
from tests.models.character.character_mock import character, char_equipment, entry
from models.characters.saver import save_character
class SavedCharacterSaverTests(unittest.TestCase):
"""
Get the Mock character, change his name and try to save him in the DB
"""
def setUp(self):
self.expected_character = character
self.expected_character.name = 'Tester'
def test_save_character(self):
save_character(self.expected_character)
received_character = session.query(SavedCharacterSchema).filter_by(name=self.expected_character.name).first()
self.assertIsNotNone(received_character)
received_character = received_character.convert_to_character_object()
# assert they're the same
self.assertEqual(vars(received_character), vars(self.expected_character))
def tearDownModule():
import tests.delete_test_db # module that deletes the DB :)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test for the save_character() function<commit_after>
|
import unittest
import database.main
from tests.create_test_db import engine, session, Base
database.main.engine = engine
database.main.session = session
database.main.Base = Base
import models.main
from classes import Paladin
from models.characters.saved_character import SavedCharacterSchema
from models.items.item_template import ItemTemplateSchema
from tests.models.character.character_mock import character, char_equipment, entry
from models.characters.saver import save_character
class SavedCharacterSaverTests(unittest.TestCase):
"""
Get the Mock character, change his name and try to save him in the DB
"""
def setUp(self):
self.expected_character = character
self.expected_character.name = 'Tester'
def test_save_character(self):
save_character(self.expected_character)
received_character = session.query(SavedCharacterSchema).filter_by(name=self.expected_character.name).first()
self.assertIsNotNone(received_character)
received_character = received_character.convert_to_character_object()
# assert they're the same
self.assertEqual(vars(received_character), vars(self.expected_character))
def tearDownModule():
import tests.delete_test_db # module that deletes the DB :)
if __name__ == '__main__':
unittest.main()
|
Test for the save_character() functionimport unittest
import database.main
from tests.create_test_db import engine, session, Base
database.main.engine = engine
database.main.session = session
database.main.Base = Base
import models.main
from classes import Paladin
from models.characters.saved_character import SavedCharacterSchema
from models.items.item_template import ItemTemplateSchema
from tests.models.character.character_mock import character, char_equipment, entry
from models.characters.saver import save_character
class SavedCharacterSaverTests(unittest.TestCase):
"""
Get the Mock character, change his name and try to save him in the DB
"""
def setUp(self):
self.expected_character = character
self.expected_character.name = 'Tester'
def test_save_character(self):
save_character(self.expected_character)
received_character = session.query(SavedCharacterSchema).filter_by(name=self.expected_character.name).first()
self.assertIsNotNone(received_character)
received_character = received_character.convert_to_character_object()
# assert they're the same
self.assertEqual(vars(received_character), vars(self.expected_character))
def tearDownModule():
import tests.delete_test_db # module that deletes the DB :)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test for the save_character() function<commit_after>import unittest
import database.main
from tests.create_test_db import engine, session, Base
database.main.engine = engine
database.main.session = session
database.main.Base = Base
import models.main
from classes import Paladin
from models.characters.saved_character import SavedCharacterSchema
from models.items.item_template import ItemTemplateSchema
from tests.models.character.character_mock import character, char_equipment, entry
from models.characters.saver import save_character
class SavedCharacterSaverTests(unittest.TestCase):
"""
Get the Mock character, change his name and try to save him in the DB
"""
def setUp(self):
self.expected_character = character
self.expected_character.name = 'Tester'
def test_save_character(self):
save_character(self.expected_character)
received_character = session.query(SavedCharacterSchema).filter_by(name=self.expected_character.name).first()
self.assertIsNotNone(received_character)
received_character = received_character.convert_to_character_object()
# assert they're the same
self.assertEqual(vars(received_character), vars(self.expected_character))
def tearDownModule():
import tests.delete_test_db # module that deletes the DB :)
if __name__ == '__main__':
unittest.main()
|
|
a78d93dbc23d832ca5eaae6535a45bfa478e4e56
|
altair/vegalite/v2/examples/us_state_capitals.py
|
altair/vegalite/v2/examples/us_state_capitals.py
|
"""
U.S. state capitals overlayed on a map of the U.S
================================================-
This is a geographic visualization that shows US capitals
overlayed on a map.
"""
import altair as alt
from vega_datasets import data
states = alt.UrlData(data.us_10m.url,
format=alt.TopoDataFormat(type='topojson',
feature='states'))
capitals = data.us_state_capitals.url
# US states background
background = alt.Chart(states).mark_geoshape(
fill='lightgray',
stroke='white'
).properties(
projection={'type': 'albersUsa'},
width=800,
height=500
)
# State capitals labeled on background
points = alt.Chart(capitals).mark_text().encode(
alt.Text('city', type='nominal'),
alt.X('lon', type='longitude'),
alt.Y('lat', type='latitude'),
)
chart = background + points
|
Add US state capitals from vega-lite.
|
Add US state capitals from vega-lite.
|
Python
|
bsd-3-clause
|
altair-viz/altair,ellisonbg/altair,jakevdp/altair
|
Add US state capitals from vega-lite.
|
"""
U.S. state capitals overlayed on a map of the U.S
================================================-
This is a geographic visualization that shows US capitals
overlayed on a map.
"""
import altair as alt
from vega_datasets import data
states = alt.UrlData(data.us_10m.url,
format=alt.TopoDataFormat(type='topojson',
feature='states'))
capitals = data.us_state_capitals.url
# US states background
background = alt.Chart(states).mark_geoshape(
fill='lightgray',
stroke='white'
).properties(
projection={'type': 'albersUsa'},
width=800,
height=500
)
# State capitals labeled on background
points = alt.Chart(capitals).mark_text().encode(
alt.Text('city', type='nominal'),
alt.X('lon', type='longitude'),
alt.Y('lat', type='latitude'),
)
chart = background + points
|
<commit_before><commit_msg>Add US state capitals from vega-lite.<commit_after>
|
"""
U.S. state capitals overlayed on a map of the U.S
================================================-
This is a geographic visualization that shows US capitals
overlayed on a map.
"""
import altair as alt
from vega_datasets import data
states = alt.UrlData(data.us_10m.url,
format=alt.TopoDataFormat(type='topojson',
feature='states'))
capitals = data.us_state_capitals.url
# US states background
background = alt.Chart(states).mark_geoshape(
fill='lightgray',
stroke='white'
).properties(
projection={'type': 'albersUsa'},
width=800,
height=500
)
# State capitals labeled on background
points = alt.Chart(capitals).mark_text().encode(
alt.Text('city', type='nominal'),
alt.X('lon', type='longitude'),
alt.Y('lat', type='latitude'),
)
chart = background + points
|
Add US state capitals from vega-lite."""
U.S. state capitals overlayed on a map of the U.S
================================================-
This is a geographic visualization that shows US capitals
overlayed on a map.
"""
import altair as alt
from vega_datasets import data
states = alt.UrlData(data.us_10m.url,
format=alt.TopoDataFormat(type='topojson',
feature='states'))
capitals = data.us_state_capitals.url
# US states background
background = alt.Chart(states).mark_geoshape(
fill='lightgray',
stroke='white'
).properties(
projection={'type': 'albersUsa'},
width=800,
height=500
)
# State capitals labeled on background
points = alt.Chart(capitals).mark_text().encode(
alt.Text('city', type='nominal'),
alt.X('lon', type='longitude'),
alt.Y('lat', type='latitude'),
)
chart = background + points
|
<commit_before><commit_msg>Add US state capitals from vega-lite.<commit_after>"""
U.S. state capitals overlayed on a map of the U.S
================================================-
This is a geographic visualization that shows US capitals
overlayed on a map.
"""
import altair as alt
from vega_datasets import data
states = alt.UrlData(data.us_10m.url,
format=alt.TopoDataFormat(type='topojson',
feature='states'))
capitals = data.us_state_capitals.url
# US states background
background = alt.Chart(states).mark_geoshape(
fill='lightgray',
stroke='white'
).properties(
projection={'type': 'albersUsa'},
width=800,
height=500
)
# State capitals labeled on background
points = alt.Chart(capitals).mark_text().encode(
alt.Text('city', type='nominal'),
alt.X('lon', type='longitude'),
alt.Y('lat', type='latitude'),
)
chart = background + points
|
|
f22abf2b8a31d9621a891191db84364edb167390
|
zephyr/management/commands/knight.py
|
zephyr/management/commands/knight.py
|
from __future__ import absolute_import
import sys
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError
from django.core import validators
from guardian.shortcuts import assign_perm
from zephyr.models import Realm, UserProfile
class Command(BaseCommand):
help = """Give an existing user administrative permissions over their (own) Realm.
ONLY perform this on customer request from an authorized person.
"""
option_list = BaseCommand.option_list + (
def handle(self, *args, **options):
try:
email = args[0]
except ValueError:
raise CommandError("""Please specify a user.""")
try:
profile = UserProfile.objects.get(email=email)
except ValidationError:
raise CommandError("No such user.")
if profile.has_perm('administer', profile.realm):
raise CommandError("User already has permission for this realm.")
else:
assign_perm('administer', profile, profile.realm)
print "Done!"
|
Add a management command to create realm administrators.
|
Add a management command to create realm administrators.
(imported from commit ab2dd580a206f29086c0d5a4e717c1bfd65a7435)
|
Python
|
apache-2.0
|
wdaher/zulip,xuxiao/zulip,showell/zulip,jonesgithub/zulip,levixie/zulip,Gabriel0402/zulip,ryansnowboarder/zulip,zacps/zulip,rht/zulip,suxinde2009/zulip,hustlzp/zulip,bowlofstew/zulip,sharmaeklavya2/zulip,seapasulli/zulip,SmartPeople/zulip,dxq-git/zulip,sup95/zulip,dhcrzf/zulip,timabbott/zulip,tommyip/zulip,zacps/zulip,aps-sids/zulip,esander91/zulip,tbutter/zulip,JanzTam/zulip,swinghu/zulip,adnanh/zulip,aps-sids/zulip,jphilipsen05/zulip,showell/zulip,hustlzp/zulip,JanzTam/zulip,aakash-cr7/zulip,hj3938/zulip,PaulPetring/zulip,hackerkid/zulip,ryanbackman/zulip,souravbadami/zulip,shubhamdhama/zulip,cosmicAsymmetry/zulip,Juanvulcano/zulip,AZtheAsian/zulip,amanharitsh123/zulip,j831/zulip,proliming/zulip,nicholasbs/zulip,shaunstanislaus/zulip,sonali0901/zulip,jerryge/zulip,ashwinirudrappa/zulip,karamcnair/zulip,synicalsyntax/zulip,AZtheAsian/zulip,wangdeshui/zulip,sharmaeklavya2/zulip,adnanh/zulip,zulip/zulip,ashwinirudrappa/zulip,alliejones/zulip,developerfm/zulip,johnny9/zulip,rht/zulip,so0k/zulip,ufosky-server/zulip,tommyip/zulip,zachallaun/zulip,itnihao/zulip,vakila/zulip,JPJPJPOPOP/zulip,swinghu/zulip,johnny9/zulip,yocome/zulip,mohsenSy/zulip,ikasumiwt/zulip,Qgap/zulip,LAndreas/zulip,ikasumiwt/zulip,karamcnair/zulip,aps-sids/zulip,timabbott/zulip,reyha/zulip,Diptanshu8/zulip,ericzhou2008/zulip,hafeez3000/zulip,tdr130/zulip,niftynei/zulip,ipernet/zulip,PaulPetring/zulip,punchagan/zulip,mdavid/zulip,blaze225/zulip,cosmicAsymmetry/zulip,joyhchen/zulip,m1ssou/zulip,EasonYi/zulip,dattatreya303/zulip,luyifan/zulip,dhcrzf/zulip,atomic-labs/zulip,vakila/zulip,susansls/zulip,avastu/zulip,suxinde2009/zulip,zorojean/zulip,luyifan/zulip,xuxiao/zulip,niftynei/zulip,babbage/zulip,saitodisse/zulip,hafeez3000/zulip,he15his/zulip,huangkebo/zulip,tdr130/zulip,susansls/zulip,seapasulli/zulip,dxq-git/zulip,kokoar/zulip,Frouk/zulip,sonali0901/zulip,mohsenSy/zulip,wweiradio/zulip,zwily/zulip,hustlzp/zulip,joshisa/zulip,jessedhillon/zulip,LeeRisk/zulip,hj3938/zulip,esander91/zulip,praveenaki/zulip,atomic-labs/zulip,christi3k/zulip,huangkebo/zulip,wangdeshui/zulip,ryanbackman/zulip,schatt/zulip,swinghu/zulip,calvinleenyc/zulip,Drooids/zulip,amyliu345/zulip,guiquanz/zulip,he15his/zulip,rishig/zulip,zofuthan/zulip,ApsOps/zulip,eastlhu/zulip,dnmfarrell/zulip,brainwane/zulip,sup95/zulip,calvinleenyc/zulip,arpitpanwar/zulip,qq1012803704/zulip,zhaoweigg/zulip,gigawhitlocks/zulip,yuvipanda/zulip,tommyip/zulip,brockwhittaker/zulip,LeeRisk/zulip,sharmaeklavya2/zulip,hustlzp/zulip,fw1121/zulip,willingc/zulip,mdavid/zulip,MayB/zulip,susansls/zulip,joyhchen/zulip,LeeRisk/zulip,bitemyapp/zulip,pradiptad/zulip,Gabriel0402/zulip,jonesgithub/zulip,atomic-labs/zulip,guiquanz/zulip,thomasboyt/zulip,arpitpanwar/zulip,akuseru/zulip,sonali0901/zulip,krtkmj/zulip,amanharitsh123/zulip,jimmy54/zulip,hackerkid/zulip,dotcool/zulip,arpitpanwar/zulip,ikasumiwt/zulip,yocome/zulip,Suninus/zulip,synicalsyntax/zulip,joyhchen/zulip,grave-w-grave/zulip,zofuthan/zulip,brockwhittaker/zulip,peiwei/zulip,bitemyapp/zulip,ipernet/zulip,jphilipsen05/zulip,proliming/zulip,deer-hope/zulip,noroot/zulip,zofuthan/zulip,thomasboyt/zulip,ApsOps/zulip,yuvipanda/zulip,jonesgithub/zulip,levixie/zulip,Frouk/zulip,arpith/zulip,mahim97/zulip,Batterfii/zulip,bitemyapp/zulip,udxxabp/zulip,levixie/zulip,Jianchun1/zulip,zulip/zulip,aliceriot/zulip,alliejones/zulip,Galexrt/zulip,stamhe/zulip,stamhe/zulip,aakash-cr7/zulip,tiansiyuan/zulip,ipernet/zulip,Frouk/zulip,yocome/zulip,jackrzhang/zulip,bluesea/zulip,swinghu/zulip,ahmadassaf/zulip,TigorC/zulip,easyfmxu/zulip,blaze225/zulip,rht/zulip,calvinleenyc/zulip,levixie/zulip,so0k/zulip,Gabriel0402/zulip,KingxBanana/zulip,schatt/zulip,atomic-labs/zulip,synicalsyntax/zulip,dxq-git/zulip,easyfmxu/zulip,voidException/zulip,shubhamdhama/zulip,calvinleenyc/zulip,xuanhan863/zulip,xuanhan863/zulip,technicalpickles/zulip,calvinleenyc/zulip,Jianchun1/zulip,xuanhan863/zulip,hustlzp/zulip,saitodisse/zulip,ApsOps/zulip,moria/zulip,hengqujushi/zulip,karamcnair/zulip,yuvipanda/zulip,nicholasbs/zulip,showell/zulip,jerryge/zulip,andersk/zulip,PaulPetring/zulip,glovebx/zulip,timabbott/zulip,hayderimran7/zulip,hj3938/zulip,itnihao/zulip,EasonYi/zulip,ikasumiwt/zulip,themass/zulip,armooo/zulip,developerfm/zulip,babbage/zulip,zacps/zulip,littledogboy/zulip,ryanbackman/zulip,vikas-parashar/zulip,andersk/zulip,so0k/zulip,RobotCaleb/zulip,technicalpickles/zulip,brockwhittaker/zulip,AZtheAsian/zulip,yuvipanda/zulip,susansls/zulip,MayB/zulip,littledogboy/zulip,qq1012803704/zulip,glovebx/zulip,aps-sids/zulip,wavelets/zulip,littledogboy/zulip,johnnygaddarr/zulip,Cheppers/zulip,timabbott/zulip,swinghu/zulip,babbage/zulip,firstblade/zulip,zorojean/zulip,hayderimran7/zulip,Gabriel0402/zulip,karamcnair/zulip,ashwinirudrappa/zulip,PhilSk/zulip,grave-w-grave/zulip,kou/zulip,itnihao/zulip,levixie/zulip,xuanhan863/zulip,levixie/zulip,vikas-parashar/zulip,mdavid/zulip,jackrzhang/zulip,akuseru/zulip,Vallher/zulip,souravbadami/zulip,Suninus/zulip,pradiptad/zulip,hj3938/zulip,eastlhu/zulip,lfranchi/zulip,sonali0901/zulip,LAndreas/zulip,jainayush975/zulip,gigawhitlocks/zulip,Gabriel0402/zulip,vabs22/zulip,kaiyuanheshang/zulip,codeKonami/zulip,punchagan/zulip,qq1012803704/zulip,huangkebo/zulip,alliejones/zulip,rishig/zulip,jonesgithub/zulip,so0k/zulip,dhcrzf/zulip,vabs22/zulip,thomasboyt/zulip,vabs22/zulip,esander91/zulip,lfranchi/zulip,deer-hope/zulip,ApsOps/zulip,RobotCaleb/zulip,wdaher/zulip,akuseru/zulip,vakila/zulip,ipernet/zulip,bluesea/zulip,kokoar/zulip,stamhe/zulip,dxq-git/zulip,bastianh/zulip,ryansnowboarder/zulip,natanovia/zulip,DazWorrall/zulip,dhcrzf/zulip,zulip/zulip,JanzTam/zulip,zwily/zulip,udxxabp/zulip,rishig/zulip,samatdav/zulip,reyha/zulip,vakila/zulip,zacps/zulip,babbage/zulip,dwrpayne/zulip,saitodisse/zulip,itnihao/zulip,bastianh/zulip,natanovia/zulip,ericzhou2008/zulip,tommyip/zulip,kou/zulip,he15his/zulip,m1ssou/zulip,arpitpanwar/zulip,krtkmj/zulip,willingc/zulip,brockwhittaker/zulip,sharmaeklavya2/zulip,karamcnair/zulip,luyifan/zulip,dawran6/zulip,mahim97/zulip,bastianh/zulip,joyhchen/zulip,alliejones/zulip,nicholasbs/zulip,PhilSk/zulip,MariaFaBella85/zulip,bowlofstew/zulip,andersk/zulip,shrikrishnaholla/zulip,proliming/zulip,gkotian/zulip,nicholasbs/zulip,showell/zulip,timabbott/zulip,so0k/zulip,eastlhu/zulip,EasonYi/zulip,glovebx/zulip,zachallaun/zulip,eeshangarg/zulip,m1ssou/zulip,jackrzhang/zulip,Jianchun1/zulip,johnny9/zulip,nicholasbs/zulip,Vallher/zulip,moria/zulip,wdaher/zulip,wangdeshui/zulip,suxinde2009/zulip,moria/zulip,rht/zulip,zhaoweigg/zulip,dotcool/zulip,MayB/zulip,udxxabp/zulip,KingxBanana/zulip,mohsenSy/zulip,rht/zulip,dwrpayne/zulip,gigawhitlocks/zulip,Jianchun1/zulip,guiquanz/zulip,pradiptad/zulip,mansilladev/zulip,LeeRisk/zulip,peguin40/zulip,noroot/zulip,technicalpickles/zulip,zhaoweigg/zulip,easyfmxu/zulip,shrikrishnaholla/zulip,wavelets/zulip,bowlofstew/zulip,easyfmxu/zulip,rht/zulip,tbutter/zulip,tdr130/zulip,KingxBanana/zulip,hustlzp/zulip,nicholasbs/zulip,shaunstanislaus/zulip,he15his/zulip,TigorC/zulip,ashwinirudrappa/zulip,gkotian/zulip,kaiyuanheshang/zulip,ahmadassaf/zulip,bssrdf/zulip,Suninus/zulip,natanovia/zulip,fw1121/zulip,sup95/zulip,MayB/zulip,wdaher/zulip,wdaher/zulip,stamhe/zulip,vaidap/zulip,qq1012803704/zulip,armooo/zulip,jimmy54/zulip,joshisa/zulip,arpith/zulip,firstblade/zulip,KingxBanana/zulip,jeffcao/zulip,PaulPetring/zulip,tbutter/zulip,eastlhu/zulip,dxq-git/zulip,bastianh/zulip,PhilSk/zulip,isht3/zulip,esander91/zulip,amyliu345/zulip,Qgap/zulip,Galexrt/zulip,dxq-git/zulip,ryanbackman/zulip,wangdeshui/zulip,Qgap/zulip,mohsenSy/zulip,MayB/zulip,avastu/zulip,lfranchi/zulip,ahmadassaf/zulip,vaidap/zulip,ryansnowboarder/zulip,krtkmj/zulip,tbutter/zulip,Frouk/zulip,eastlhu/zulip,ashwinirudrappa/zulip,bssrdf/zulip,qq1012803704/zulip,EasonYi/zulip,proliming/zulip,amallia/zulip,vabs22/zulip,deer-hope/zulip,hengqujushi/zulip,KJin99/zulip,themass/zulip,jessedhillon/zulip,hafeez3000/zulip,hayderimran7/zulip,wweiradio/zulip,Qgap/zulip,punchagan/zulip,aliceriot/zulip,swinghu/zulip,timabbott/zulip,jrowan/zulip,jimmy54/zulip,esander91/zulip,atomic-labs/zulip,gigawhitlocks/zulip,verma-varsha/zulip,andersk/zulip,kokoar/zulip,firstblade/zulip,gkotian/zulip,pradiptad/zulip,zwily/zulip,jrowan/zulip,m1ssou/zulip,shrikrishnaholla/zulip,mdavid/zulip,stamhe/zulip,peguin40/zulip,samatdav/zulip,TigorC/zulip,DazWorrall/zulip,yocome/zulip,jphilipsen05/zulip,zofuthan/zulip,natanovia/zulip,amyliu345/zulip,Batterfii/zulip,hengqujushi/zulip,Cheppers/zulip,amyliu345/zulip,MayB/zulip,shrikrishnaholla/zulip,seapasulli/zulip,littledogboy/zulip,bitemyapp/zulip,verma-varsha/zulip,lfranchi/zulip,jrowan/zulip,joshisa/zulip,hafeez3000/zulip,krtkmj/zulip,jphilipsen05/zulip,wweiradio/zulip,codeKonami/zulip,aliceriot/zulip,johnnygaddarr/zulip,RobotCaleb/zulip,babbage/zulip,hackerkid/zulip,RobotCaleb/zulip,zhaoweigg/zulip,JanzTam/zulip,Drooids/zulip,KingxBanana/zulip,brainwane/zulip,littledogboy/zulip,he15his/zulip,voidException/zulip,christi3k/zulip,zorojean/zulip,luyifan/zulip,suxinde2009/zulip,vaidap/zulip,MariaFaBella85/zulip,sup95/zulip,mansilladev/zulip,brainwane/zulip,brainwane/zulip,bssrdf/zulip,kou/zulip,ericzhou2008/zulip,rht/zulip,ipernet/zulip,zwily/zulip,dnmfarrell/zulip,j831/zulip,jimmy54/zulip,jessedhillon/zulip,mansilladev/zulip,thomasboyt/zulip,seapasulli/zulip,huangkebo/zulip,wweiradio/zulip,itnihao/zulip,swinghu/zulip,ikasumiwt/zulip,eastlhu/zulip,niftynei/zulip,natanovia/zulip,Juanvulcano/zulip,verma-varsha/zulip,alliejones/zulip,EasonYi/zulip,zwily/zulip,JPJPJPOPOP/zulip,noroot/zulip,voidException/zulip,voidException/zulip,wavelets/zulip,umkay/zulip,Qgap/zulip,codeKonami/zulip,johnny9/zulip,so0k/zulip,Vallher/zulip,jphilipsen05/zulip,mansilladev/zulip,glovebx/zulip,zulip/zulip,JanzTam/zulip,JPJPJPOPOP/zulip,Vallher/zulip,Batterfii/zulip,RobotCaleb/zulip,j831/zulip,sonali0901/zulip,samatdav/zulip,udxxabp/zulip,christi3k/zulip,blaze225/zulip,verma-varsha/zulip,Diptanshu8/zulip,isht3/zulip,dwrpayne/zulip,jonesgithub/zulip,paxapy/zulip,willingc/zulip,littledogboy/zulip,KJin99/zulip,developerfm/zulip,mdavid/zulip,technicalpickles/zulip,punchagan/zulip,zachallaun/zulip,Frouk/zulip,ufosky-server/zulip,hengqujushi/zulip,huangkebo/zulip,ericzhou2008/zulip,punchagan/zulip,Diptanshu8/zulip,jeffcao/zulip,Juanvulcano/zulip,suxinde2009/zulip,bluesea/zulip,firstblade/zulip,tommyip/zulip,Batterfii/zulip,praveenaki/zulip,vikas-parashar/zulip,willingc/zulip,lfranchi/zulip,stamhe/zulip,KJin99/zulip,ufosky-server/zulip,codeKonami/zulip,shaunstanislaus/zulip,wweiradio/zulip,LeeRisk/zulip,ufosky-server/zulip,LAndreas/zulip,Juanvulcano/zulip,RobotCaleb/zulip,voidException/zulip,Drooids/zulip,wangdeshui/zulip,hengqujushi/zulip,dwrpayne/zulip,deer-hope/zulip,umkay/zulip,dwrpayne/zulip,MariaFaBella85/zulip,ericzhou2008/zulip,eeshangarg/zulip,peiwei/zulip,thomasboyt/zulip,vaidap/zulip,themass/zulip,akuseru/zulip,LAndreas/zulip,technicalpickles/zulip,amyliu345/zulip,sup95/zulip,alliejones/zulip,shaunstanislaus/zulip,mohsenSy/zulip,bssrdf/zulip,voidException/zulip,tiansiyuan/zulip,bluesea/zulip,he15his/zulip,Cheppers/zulip,johnny9/zulip,synicalsyntax/zulip,bluesea/zulip,Galexrt/zulip,tbutter/zulip,EasonYi/zulip,brainwane/zulip,noroot/zulip,hengqujushi/zulip,aliceriot/zulip,jessedhillon/zulip,shaunstanislaus/zulip,developerfm/zulip,zorojean/zulip,hackerkid/zulip,yuvipanda/zulip,ahmadassaf/zulip,reyha/zulip,huangkebo/zulip,Batterfii/zulip,eeshangarg/zulip,souravbadami/zulip,mansilladev/zulip,christi3k/zulip,tbutter/zulip,dwrpayne/zulip,amallia/zulip,hj3938/zulip,Cheppers/zulip,kaiyuanheshang/zulip,zorojean/zulip,verma-varsha/zulip,vakila/zulip,saitodisse/zulip,AZtheAsian/zulip,zwily/zulip,niftynei/zulip,blaze225/zulip,tiansiyuan/zulip,joyhchen/zulip,wavelets/zulip,mahim97/zulip,bowlofstew/zulip,synicalsyntax/zulip,aliceriot/zulip,zachallaun/zulip,esander91/zulip,pradiptad/zulip,amanharitsh123/zulip,armooo/zulip,seapasulli/zulip,hengqujushi/zulip,wavelets/zulip,jrowan/zulip,Juanvulcano/zulip,zorojean/zulip,ryansnowboarder/zulip,armooo/zulip,Qgap/zulip,Vallher/zulip,PaulPetring/zulip,Suninus/zulip,bssrdf/zulip,joshisa/zulip,Galexrt/zulip,ryanbackman/zulip,glovebx/zulip,developerfm/zulip,jeffcao/zulip,SmartPeople/zulip,schatt/zulip,Drooids/zulip,jessedhillon/zulip,ikasumiwt/zulip,joshisa/zulip,tiansiyuan/zulip,arpith/zulip,TigorC/zulip,Cheppers/zulip,codeKonami/zulip,firstblade/zulip,zachallaun/zulip,punchagan/zulip,hayderimran7/zulip,christi3k/zulip,kokoar/zulip,moria/zulip,bssrdf/zulip,brainwane/zulip,zhaoweigg/zulip,moria/zulip,reyha/zulip,vikas-parashar/zulip,brainwane/zulip,m1ssou/zulip,MariaFaBella85/zulip,jerryge/zulip,krtkmj/zulip,rishig/zulip,DazWorrall/zulip,jeffcao/zulip,Diptanshu8/zulip,gigawhitlocks/zulip,tbutter/zulip,blaze225/zulip,ahmadassaf/zulip,Gabriel0402/zulip,jainayush975/zulip,isht3/zulip,qq1012803704/zulip,guiquanz/zulip,xuxiao/zulip,codeKonami/zulip,bitemyapp/zulip,yocome/zulip,cosmicAsymmetry/zulip,amallia/zulip,PaulPetring/zulip,jonesgithub/zulip,hayderimran7/zulip,kou/zulip,luyifan/zulip,deer-hope/zulip,johnnygaddarr/zulip,thomasboyt/zulip,natanovia/zulip,ashwinirudrappa/zulip,gkotian/zulip,SmartPeople/zulip,eeshangarg/zulip,reyha/zulip,avastu/zulip,mahim97/zulip,praveenaki/zulip,Batterfii/zulip,peguin40/zulip,noroot/zulip,MariaFaBella85/zulip,udxxabp/zulip,armooo/zulip,developerfm/zulip,arpitpanwar/zulip,vaidap/zulip,zacps/zulip,timabbott/zulip,eeshangarg/zulip,kokoar/zulip,jackrzhang/zulip,wdaher/zulip,dhcrzf/zulip,umkay/zulip,jerryge/zulip,so0k/zulip,zacps/zulip,calvinleenyc/zulip,he15his/zulip,bowlofstew/zulip,mansilladev/zulip,peguin40/zulip,synicalsyntax/zulip,peguin40/zulip,KJin99/zulip,ahmadassaf/zulip,dattatreya303/zulip,seapasulli/zulip,bitemyapp/zulip,sharmaeklavya2/zulip,aliceriot/zulip,Qgap/zulip,shrikrishnaholla/zulip,qq1012803704/zulip,Suninus/zulip,johnnygaddarr/zulip,krtkmj/zulip,peiwei/zulip,jainayush975/zulip,willingc/zulip,ryansnowboarder/zulip,showell/zulip,JanzTam/zulip,jainayush975/zulip,jeffcao/zulip,tdr130/zulip,TigorC/zulip,arpith/zulip,ashwinirudrappa/zulip,SmartPeople/zulip,grave-w-grave/zulip,umkay/zulip,Batterfii/zulip,themass/zulip,guiquanz/zulip,wavelets/zulip,esander91/zulip,andersk/zulip,aakash-cr7/zulip,schatt/zulip,grave-w-grave/zulip,joshisa/zulip,tiansiyuan/zulip,luyifan/zulip,avastu/zulip,peiwei/zulip,dawran6/zulip,ufosky-server/zulip,deer-hope/zulip,natanovia/zulip,bowlofstew/zulip,shubhamdhama/zulip,paxapy/zulip,dattatreya303/zulip,guiquanz/zulip,themass/zulip,dotcool/zulip,andersk/zulip,vakila/zulip,jerryge/zulip,amanharitsh123/zulip,xuanhan863/zulip,samatdav/zulip,samatdav/zulip,RobotCaleb/zulip,dnmfarrell/zulip,yuvipanda/zulip,souravbadami/zulip,paxapy/zulip,noroot/zulip,isht3/zulip,aliceriot/zulip,armooo/zulip,brockwhittaker/zulip,samatdav/zulip,glovebx/zulip,easyfmxu/zulip,bowlofstew/zulip,arpith/zulip,bluesea/zulip,Drooids/zulip,dnmfarrell/zulip,Diptanshu8/zulip,Frouk/zulip,DazWorrall/zulip,ryansnowboarder/zulip,ApsOps/zulip,codeKonami/zulip,jrowan/zulip,praveenaki/zulip,atomic-labs/zulip,dotcool/zulip,JPJPJPOPOP/zulip,amyliu345/zulip,suxinde2009/zulip,hustlzp/zulip,zorojean/zulip,tommyip/zulip,jackrzhang/zulip,j831/zulip,themass/zulip,gkotian/zulip,zhaoweigg/zulip,reyha/zulip,mdavid/zulip,adnanh/zulip,showell/zulip,Gabriel0402/zulip,hayderimran7/zulip,easyfmxu/zulip,gigawhitlocks/zulip,bssrdf/zulip,ufosky-server/zulip,arpitpanwar/zulip,Vallher/zulip,jeffcao/zulip,amallia/zulip,jimmy54/zulip,moria/zulip,babbage/zulip,dhcrzf/zulip,PhilSk/zulip,pradiptad/zulip,vikas-parashar/zulip,vaidap/zulip,jeffcao/zulip,ApsOps/zulip,isht3/zulip,kaiyuanheshang/zulip,sharmaeklavya2/zulip,DazWorrall/zulip,hj3938/zulip,sonali0901/zulip,aps-sids/zulip,mahim97/zulip,Drooids/zulip,KingxBanana/zulip,blaze225/zulip,jonesgithub/zulip,Suninus/zulip,hafeez3000/zulip,johnny9/zulip,Galexrt/zulip,dhcrzf/zulip,andersk/zulip,ikasumiwt/zulip,levixie/zulip,ipernet/zulip,souravbadami/zulip,DazWorrall/zulip,jerryge/zulip,avastu/zulip,zulip/zulip,bastianh/zulip,glovebx/zulip,johnny9/zulip,dnmfarrell/zulip,souravbadami/zulip,rishig/zulip,wdaher/zulip,xuxiao/zulip,fw1121/zulip,PhilSk/zulip,johnnygaddarr/zulip,arpitpanwar/zulip,dattatreya303/zulip,johnnygaddarr/zulip,firstblade/zulip,zachallaun/zulip,jphilipsen05/zulip,seapasulli/zulip,ufosky-server/zulip,hackerkid/zulip,schatt/zulip,praveenaki/zulip,hafeez3000/zulip,praveenaki/zulip,shubhamdhama/zulip,kaiyuanheshang/zulip,udxxabp/zulip,Cheppers/zulip,firstblade/zulip,stamhe/zulip,dawran6/zulip,LeeRisk/zulip,saitodisse/zulip,akuseru/zulip,atomic-labs/zulip,suxinde2009/zulip,xuxiao/zulip,littledogboy/zulip,ericzhou2008/zulip,hayderimran7/zulip,ipernet/zulip,eastlhu/zulip,bastianh/zulip,LAndreas/zulip,TigorC/zulip,shrikrishnaholla/zulip,praveenaki/zulip,arpith/zulip,cosmicAsymmetry/zulip,aakash-cr7/zulip,Cheppers/zulip,karamcnair/zulip,saitodisse/zulip,avastu/zulip,KJin99/zulip,themass/zulip,gkotian/zulip,Galexrt/zulip,LAndreas/zulip,sup95/zulip,j831/zulip,karamcnair/zulip,fw1121/zulip,avastu/zulip,armooo/zulip,johnnygaddarr/zulip,j831/zulip,tiansiyuan/zulip,deer-hope/zulip,JanzTam/zulip,dotcool/zulip,eeshangarg/zulip,hackerkid/zulip,hackerkid/zulip,Frouk/zulip,paxapy/zulip,christi3k/zulip,zofuthan/zulip,moria/zulip,technicalpickles/zulip,xuanhan863/zulip,gigawhitlocks/zulip,nicholasbs/zulip,amallia/zulip,SmartPeople/zulip,cosmicAsymmetry/zulip,m1ssou/zulip,zofuthan/zulip,easyfmxu/zulip,PaulPetring/zulip,mansilladev/zulip,niftynei/zulip,Diptanshu8/zulip,dattatreya303/zulip,kou/zulip,jerryge/zulip,jimmy54/zulip,umkay/zulip,adnanh/zulip,developerfm/zulip,fw1121/zulip,adnanh/zulip,bluesea/zulip,shaunstanislaus/zulip,jainayush975/zulip,zachallaun/zulip,tiansiyuan/zulip,zwily/zulip,AZtheAsian/zulip,susansls/zulip,KJin99/zulip,grave-w-grave/zulip,dawran6/zulip,joshisa/zulip,huangkebo/zulip,wangdeshui/zulip,aakash-cr7/zulip,wavelets/zulip,vabs22/zulip,MariaFaBella85/zulip,EasonYi/zulip,jackrzhang/zulip,schatt/zulip,dotcool/zulip,technicalpickles/zulip,jessedhillon/zulip,jainayush975/zulip,synicalsyntax/zulip,itnihao/zulip,tdr130/zulip,xuanhan863/zulip,shubhamdhama/zulip,peguin40/zulip,wweiradio/zulip,xuxiao/zulip,rishig/zulip,adnanh/zulip,vikas-parashar/zulip,jessedhillon/zulip,jackrzhang/zulip,ApsOps/zulip,vabs22/zulip,cosmicAsymmetry/zulip,kokoar/zulip,kou/zulip,zulip/zulip,verma-varsha/zulip,noroot/zulip,shubhamdhama/zulip,amallia/zulip,proliming/zulip,udxxabp/zulip,amallia/zulip,dotcool/zulip,jrowan/zulip,Juanvulcano/zulip,dwrpayne/zulip,aps-sids/zulip,dnmfarrell/zulip,bitemyapp/zulip,zulip/zulip,yocome/zulip,xuxiao/zulip,kokoar/zulip,yocome/zulip,dawran6/zulip,akuseru/zulip,proliming/zulip,jimmy54/zulip,schatt/zulip,Jianchun1/zulip,lfranchi/zulip,wangdeshui/zulip,eeshangarg/zulip,aps-sids/zulip,shaunstanislaus/zulip,peiwei/zulip,bastianh/zulip,dxq-git/zulip,ahmadassaf/zulip,umkay/zulip,wweiradio/zulip,Jianchun1/zulip,zofuthan/zulip,paxapy/zulip,luyifan/zulip,PhilSk/zulip,m1ssou/zulip,punchagan/zulip,hafeez3000/zulip,mahim97/zulip,amanharitsh123/zulip,niftynei/zulip,susansls/zulip,pradiptad/zulip,AZtheAsian/zulip,umkay/zulip,fw1121/zulip,dawran6/zulip,ryansnowboarder/zulip,isht3/zulip,krtkmj/zulip,dnmfarrell/zulip,zhaoweigg/zulip,ryanbackman/zulip,hj3938/zulip,yuvipanda/zulip,shrikrishnaholla/zulip,Suninus/zulip,KJin99/zulip,gkotian/zulip,amanharitsh123/zulip,joyhchen/zulip,fw1121/zulip,MayB/zulip,kaiyuanheshang/zulip,dattatreya303/zulip,ericzhou2008/zulip,rishig/zulip,willingc/zulip,itnihao/zulip,mdavid/zulip,MariaFaBella85/zulip,tdr130/zulip,tommyip/zulip,LAndreas/zulip,Galexrt/zulip,JPJPJPOPOP/zulip,shubhamdhama/zulip,aakash-cr7/zulip,adnanh/zulip,SmartPeople/zulip,voidException/zulip,DazWorrall/zulip,LeeRisk/zulip,Drooids/zulip,babbage/zulip,alliejones/zulip,lfranchi/zulip,proliming/zulip,paxapy/zulip,guiquanz/zulip,willingc/zulip,peiwei/zulip,thomasboyt/zulip,Vallher/zulip,kou/zulip,grave-w-grave/zulip,akuseru/zulip,vakila/zulip,saitodisse/zulip,kaiyuanheshang/zulip,mohsenSy/zulip,peiwei/zulip,showell/zulip,brockwhittaker/zulip,JPJPJPOPOP/zulip,tdr130/zulip
|
Add a management command to create realm administrators.
(imported from commit ab2dd580a206f29086c0d5a4e717c1bfd65a7435)
|
from __future__ import absolute_import
import sys
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError
from django.core import validators
from guardian.shortcuts import assign_perm
from zephyr.models import Realm, UserProfile
class Command(BaseCommand):
help = """Give an existing user administrative permissions over their (own) Realm.
ONLY perform this on customer request from an authorized person.
"""
option_list = BaseCommand.option_list + (
def handle(self, *args, **options):
try:
email = args[0]
except ValueError:
raise CommandError("""Please specify a user.""")
try:
profile = UserProfile.objects.get(email=email)
except ValidationError:
raise CommandError("No such user.")
if profile.has_perm('administer', profile.realm):
raise CommandError("User already has permission for this realm.")
else:
assign_perm('administer', profile, profile.realm)
print "Done!"
|
<commit_before><commit_msg>Add a management command to create realm administrators.
(imported from commit ab2dd580a206f29086c0d5a4e717c1bfd65a7435)<commit_after>
|
from __future__ import absolute_import
import sys
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError
from django.core import validators
from guardian.shortcuts import assign_perm
from zephyr.models import Realm, UserProfile
class Command(BaseCommand):
help = """Give an existing user administrative permissions over their (own) Realm.
ONLY perform this on customer request from an authorized person.
"""
option_list = BaseCommand.option_list + (
def handle(self, *args, **options):
try:
email = args[0]
except ValueError:
raise CommandError("""Please specify a user.""")
try:
profile = UserProfile.objects.get(email=email)
except ValidationError:
raise CommandError("No such user.")
if profile.has_perm('administer', profile.realm):
raise CommandError("User already has permission for this realm.")
else:
assign_perm('administer', profile, profile.realm)
print "Done!"
|
Add a management command to create realm administrators.
(imported from commit ab2dd580a206f29086c0d5a4e717c1bfd65a7435)from __future__ import absolute_import
import sys
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError
from django.core import validators
from guardian.shortcuts import assign_perm
from zephyr.models import Realm, UserProfile
class Command(BaseCommand):
help = """Give an existing user administrative permissions over their (own) Realm.
ONLY perform this on customer request from an authorized person.
"""
option_list = BaseCommand.option_list + (
def handle(self, *args, **options):
try:
email = args[0]
except ValueError:
raise CommandError("""Please specify a user.""")
try:
profile = UserProfile.objects.get(email=email)
except ValidationError:
raise CommandError("No such user.")
if profile.has_perm('administer', profile.realm):
raise CommandError("User already has permission for this realm.")
else:
assign_perm('administer', profile, profile.realm)
print "Done!"
|
<commit_before><commit_msg>Add a management command to create realm administrators.
(imported from commit ab2dd580a206f29086c0d5a4e717c1bfd65a7435)<commit_after>from __future__ import absolute_import
import sys
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ValidationError
from django.db.utils import IntegrityError
from django.core import validators
from guardian.shortcuts import assign_perm
from zephyr.models import Realm, UserProfile
class Command(BaseCommand):
help = """Give an existing user administrative permissions over their (own) Realm.
ONLY perform this on customer request from an authorized person.
"""
option_list = BaseCommand.option_list + (
def handle(self, *args, **options):
try:
email = args[0]
except ValueError:
raise CommandError("""Please specify a user.""")
try:
profile = UserProfile.objects.get(email=email)
except ValidationError:
raise CommandError("No such user.")
if profile.has_perm('administer', profile.realm):
raise CommandError("User already has permission for this realm.")
else:
assign_perm('administer', profile, profile.realm)
print "Done!"
|
|
4f1e1874f3ed9af8922aa26eb20230dbee5e6d73
|
examples/partitioning.py
|
examples/partitioning.py
|
import logging
import sys
import os
from common import set_up_logging
from common import create_sparse_file
from common import tear_down_disk_images
from common import print_devices
# doing this before importing blivet gets the logging from format class
# registrations and other stuff triggered by the import
set_up_logging()
blivet_log = logging.getLogger("blivet")
blivet_log.info(sys.argv[0])
import blivet
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create two disk image files on which to create new devices
disk1_file = create_sparse_file(b, "disk1", 100000)
b.config.diskImages["disk1"] = disk1_file
disk2_file = create_sparse_file(b, "disk2", 100000)
b.config.diskImages["disk2"] = disk2_file
b.reset()
try:
disk1 = b.devicetree.getDeviceByName("disk1")
disk2 = b.devicetree.getDeviceByName("disk2")
b.initializeDisk(disk1)
b.initializeDisk(disk2)
# new partition on either disk1 or disk2 with base size 10000 MiB and growth
# up to a maximum size of 50000 MiB
dev = b.newPartition(size=10000, grow=True, maxsize=50000,
parents=[disk1, disk2])
b.createDevice(dev)
# new partition on disk1 with base size 5000 MiB and unbounded growth and an
# ext4 filesystem
dev = b.newPartition(fmt_type="ext4", size=5000, grow=True, parents=[disk1])
b.createDevice(dev)
# new partition on any suitable disk with a fixed size of 2000 MiB formatted
# as swap space
dev = b.newPartition(fmt_type="swap", size=2000)
b.createDevice(dev)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.doPartitioning(b)
print_devices(b)
# write the new partitions to disk and format them as specified
b.doIt()
print_devices(b)
finally:
tear_down_disk_images(b)
os.unlink(disk1_file)
os.unlink(disk2_file)
|
Add some example code for creation of disk partitions.
|
Add some example code for creation of disk partitions.
|
Python
|
lgpl-2.1
|
vpodzime/blivet,dwlehman/blivet,rhinstaller/blivet,vojtechtrefny/blivet,jkonecny12/blivet,vojtechtrefny/blivet,AdamWill/blivet,AdamWill/blivet,rvykydal/blivet,rvykydal/blivet,rhinstaller/blivet,jkonecny12/blivet,dwlehman/blivet,vpodzime/blivet
|
Add some example code for creation of disk partitions.
|
import logging
import sys
import os
from common import set_up_logging
from common import create_sparse_file
from common import tear_down_disk_images
from common import print_devices
# doing this before importing blivet gets the logging from format class
# registrations and other stuff triggered by the import
set_up_logging()
blivet_log = logging.getLogger("blivet")
blivet_log.info(sys.argv[0])
import blivet
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create two disk image files on which to create new devices
disk1_file = create_sparse_file(b, "disk1", 100000)
b.config.diskImages["disk1"] = disk1_file
disk2_file = create_sparse_file(b, "disk2", 100000)
b.config.diskImages["disk2"] = disk2_file
b.reset()
try:
disk1 = b.devicetree.getDeviceByName("disk1")
disk2 = b.devicetree.getDeviceByName("disk2")
b.initializeDisk(disk1)
b.initializeDisk(disk2)
# new partition on either disk1 or disk2 with base size 10000 MiB and growth
# up to a maximum size of 50000 MiB
dev = b.newPartition(size=10000, grow=True, maxsize=50000,
parents=[disk1, disk2])
b.createDevice(dev)
# new partition on disk1 with base size 5000 MiB and unbounded growth and an
# ext4 filesystem
dev = b.newPartition(fmt_type="ext4", size=5000, grow=True, parents=[disk1])
b.createDevice(dev)
# new partition on any suitable disk with a fixed size of 2000 MiB formatted
# as swap space
dev = b.newPartition(fmt_type="swap", size=2000)
b.createDevice(dev)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.doPartitioning(b)
print_devices(b)
# write the new partitions to disk and format them as specified
b.doIt()
print_devices(b)
finally:
tear_down_disk_images(b)
os.unlink(disk1_file)
os.unlink(disk2_file)
|
<commit_before><commit_msg>Add some example code for creation of disk partitions.<commit_after>
|
import logging
import sys
import os
from common import set_up_logging
from common import create_sparse_file
from common import tear_down_disk_images
from common import print_devices
# doing this before importing blivet gets the logging from format class
# registrations and other stuff triggered by the import
set_up_logging()
blivet_log = logging.getLogger("blivet")
blivet_log.info(sys.argv[0])
import blivet
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create two disk image files on which to create new devices
disk1_file = create_sparse_file(b, "disk1", 100000)
b.config.diskImages["disk1"] = disk1_file
disk2_file = create_sparse_file(b, "disk2", 100000)
b.config.diskImages["disk2"] = disk2_file
b.reset()
try:
disk1 = b.devicetree.getDeviceByName("disk1")
disk2 = b.devicetree.getDeviceByName("disk2")
b.initializeDisk(disk1)
b.initializeDisk(disk2)
# new partition on either disk1 or disk2 with base size 10000 MiB and growth
# up to a maximum size of 50000 MiB
dev = b.newPartition(size=10000, grow=True, maxsize=50000,
parents=[disk1, disk2])
b.createDevice(dev)
# new partition on disk1 with base size 5000 MiB and unbounded growth and an
# ext4 filesystem
dev = b.newPartition(fmt_type="ext4", size=5000, grow=True, parents=[disk1])
b.createDevice(dev)
# new partition on any suitable disk with a fixed size of 2000 MiB formatted
# as swap space
dev = b.newPartition(fmt_type="swap", size=2000)
b.createDevice(dev)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.doPartitioning(b)
print_devices(b)
# write the new partitions to disk and format them as specified
b.doIt()
print_devices(b)
finally:
tear_down_disk_images(b)
os.unlink(disk1_file)
os.unlink(disk2_file)
|
Add some example code for creation of disk partitions.import logging
import sys
import os
from common import set_up_logging
from common import create_sparse_file
from common import tear_down_disk_images
from common import print_devices
# doing this before importing blivet gets the logging from format class
# registrations and other stuff triggered by the import
set_up_logging()
blivet_log = logging.getLogger("blivet")
blivet_log.info(sys.argv[0])
import blivet
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create two disk image files on which to create new devices
disk1_file = create_sparse_file(b, "disk1", 100000)
b.config.diskImages["disk1"] = disk1_file
disk2_file = create_sparse_file(b, "disk2", 100000)
b.config.diskImages["disk2"] = disk2_file
b.reset()
try:
disk1 = b.devicetree.getDeviceByName("disk1")
disk2 = b.devicetree.getDeviceByName("disk2")
b.initializeDisk(disk1)
b.initializeDisk(disk2)
# new partition on either disk1 or disk2 with base size 10000 MiB and growth
# up to a maximum size of 50000 MiB
dev = b.newPartition(size=10000, grow=True, maxsize=50000,
parents=[disk1, disk2])
b.createDevice(dev)
# new partition on disk1 with base size 5000 MiB and unbounded growth and an
# ext4 filesystem
dev = b.newPartition(fmt_type="ext4", size=5000, grow=True, parents=[disk1])
b.createDevice(dev)
# new partition on any suitable disk with a fixed size of 2000 MiB formatted
# as swap space
dev = b.newPartition(fmt_type="swap", size=2000)
b.createDevice(dev)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.doPartitioning(b)
print_devices(b)
# write the new partitions to disk and format them as specified
b.doIt()
print_devices(b)
finally:
tear_down_disk_images(b)
os.unlink(disk1_file)
os.unlink(disk2_file)
|
<commit_before><commit_msg>Add some example code for creation of disk partitions.<commit_after>import logging
import sys
import os
from common import set_up_logging
from common import create_sparse_file
from common import tear_down_disk_images
from common import print_devices
# doing this before importing blivet gets the logging from format class
# registrations and other stuff triggered by the import
set_up_logging()
blivet_log = logging.getLogger("blivet")
blivet_log.info(sys.argv[0])
import blivet
b = blivet.Blivet() # create an instance of Blivet (don't add system devices)
# create two disk image files on which to create new devices
disk1_file = create_sparse_file(b, "disk1", 100000)
b.config.diskImages["disk1"] = disk1_file
disk2_file = create_sparse_file(b, "disk2", 100000)
b.config.diskImages["disk2"] = disk2_file
b.reset()
try:
disk1 = b.devicetree.getDeviceByName("disk1")
disk2 = b.devicetree.getDeviceByName("disk2")
b.initializeDisk(disk1)
b.initializeDisk(disk2)
# new partition on either disk1 or disk2 with base size 10000 MiB and growth
# up to a maximum size of 50000 MiB
dev = b.newPartition(size=10000, grow=True, maxsize=50000,
parents=[disk1, disk2])
b.createDevice(dev)
# new partition on disk1 with base size 5000 MiB and unbounded growth and an
# ext4 filesystem
dev = b.newPartition(fmt_type="ext4", size=5000, grow=True, parents=[disk1])
b.createDevice(dev)
# new partition on any suitable disk with a fixed size of 2000 MiB formatted
# as swap space
dev = b.newPartition(fmt_type="swap", size=2000)
b.createDevice(dev)
# allocate the partitions (decide where and on which disks they'll reside)
blivet.partitioning.doPartitioning(b)
print_devices(b)
# write the new partitions to disk and format them as specified
b.doIt()
print_devices(b)
finally:
tear_down_disk_images(b)
os.unlink(disk1_file)
os.unlink(disk2_file)
|
|
a0c23d3fc448f916ffdd668a2daf56408dd9c0c0
|
mothermayi/pre_commit.py
|
mothermayi/pre_commit.py
|
import mothermayi.entryway
import mothermayi.git
def handle_plugins(entries):
for entry in entries:
result = entry()
def run():
with mothermayi.git.stash():
entries = mothermayi.entryway.get_entries('pre-commit')
handle_plugins(entries)
|
Add simple implementation for a pre-commit hook
|
Add simple implementation for a pre-commit hook
This will be routed via the main entryway, mothermayi
|
Python
|
mit
|
EliRibble/mothermayi
|
Add simple implementation for a pre-commit hook
This will be routed via the main entryway, mothermayi
|
import mothermayi.entryway
import mothermayi.git
def handle_plugins(entries):
for entry in entries:
result = entry()
def run():
with mothermayi.git.stash():
entries = mothermayi.entryway.get_entries('pre-commit')
handle_plugins(entries)
|
<commit_before><commit_msg>Add simple implementation for a pre-commit hook
This will be routed via the main entryway, mothermayi<commit_after>
|
import mothermayi.entryway
import mothermayi.git
def handle_plugins(entries):
for entry in entries:
result = entry()
def run():
with mothermayi.git.stash():
entries = mothermayi.entryway.get_entries('pre-commit')
handle_plugins(entries)
|
Add simple implementation for a pre-commit hook
This will be routed via the main entryway, mothermayiimport mothermayi.entryway
import mothermayi.git
def handle_plugins(entries):
for entry in entries:
result = entry()
def run():
with mothermayi.git.stash():
entries = mothermayi.entryway.get_entries('pre-commit')
handle_plugins(entries)
|
<commit_before><commit_msg>Add simple implementation for a pre-commit hook
This will be routed via the main entryway, mothermayi<commit_after>import mothermayi.entryway
import mothermayi.git
def handle_plugins(entries):
for entry in entries:
result = entry()
def run():
with mothermayi.git.stash():
entries = mothermayi.entryway.get_entries('pre-commit')
handle_plugins(entries)
|
|
16615d7794b127e9752b1a2b0bd8e70adfb0954c
|
anndata/tests/test_inplace_subset.py
|
anndata/tests/test_inplace_subset.py
|
import numpy as np
import pytest
from sklearn.utils.testing import (
assert_array_equal
)
from scipy import sparse
from anndata.tests.helpers import (
gen_adata,
subset_func,
asarray
)
@pytest.fixture(
params=[np.array, sparse.csr_matrix, sparse.csc_matrix],
ids=["np_array", "scipy_csr", "scipy_csc"]
)
def matrix_type(request):
return request.param
# TODO: Test values of .uns
def test_inplace_subset_var(matrix_type, subset_func):
orig = gen_adata((30, 30), X_type=matrix_type)
subset_idx = subset_func(orig.var_names)
modified = orig.copy()
from_view = orig[:, subset_idx].copy()
modified._inplace_subset_var(subset_idx)
assert_array_equal(asarray(from_view.X), asarray(modified.X))
assert_array_equal(from_view.obs, modified.obs)
assert_array_equal(from_view.var, modified.var)
for k in from_view.obsm:
assert_array_equal(asarray(from_view.obsm[k]), asarray(modified.obsm[k]))
assert_array_equal(asarray(orig.obsm[k]), asarray(modified.obsm[k]))
for k in from_view.varm:
assert_array_equal(asarray(from_view.varm[k]), asarray(modified.varm[k]))
for k in from_view.layers:
assert_array_equal(asarray(from_view.layers[k]), asarray(modified.layers[k]))
def test_inplace_subset_obs(matrix_type, subset_func):
orig = gen_adata((30, 30), X_type=matrix_type)
subset_idx = subset_func(orig.obs_names)
modified = orig.copy()
from_view = orig[subset_idx, :].copy()
modified._inplace_subset_obs(subset_idx)
assert_array_equal(asarray(from_view.X), asarray(modified.X))
assert_array_equal(from_view.obs, modified.obs)
assert_array_equal(from_view.var, modified.var)
for k in from_view.obsm:
assert_array_equal(asarray(from_view.obsm[k]), asarray(modified.obsm[k]))
for k in from_view.varm:
assert_array_equal(asarray(from_view.varm[k]), asarray(modified.varm[k]))
assert_array_equal(asarray(orig.varm[k]), asarray(modified.varm[k]))
for k in from_view.layers:
assert_array_equal(asarray(from_view.layers[k]), asarray(modified.layers[k]))
|
Add tests for inplace subset
|
Add tests for inplace subset
|
Python
|
bsd-3-clause
|
theislab/anndata
|
Add tests for inplace subset
|
import numpy as np
import pytest
from sklearn.utils.testing import (
assert_array_equal
)
from scipy import sparse
from anndata.tests.helpers import (
gen_adata,
subset_func,
asarray
)
@pytest.fixture(
params=[np.array, sparse.csr_matrix, sparse.csc_matrix],
ids=["np_array", "scipy_csr", "scipy_csc"]
)
def matrix_type(request):
return request.param
# TODO: Test values of .uns
def test_inplace_subset_var(matrix_type, subset_func):
orig = gen_adata((30, 30), X_type=matrix_type)
subset_idx = subset_func(orig.var_names)
modified = orig.copy()
from_view = orig[:, subset_idx].copy()
modified._inplace_subset_var(subset_idx)
assert_array_equal(asarray(from_view.X), asarray(modified.X))
assert_array_equal(from_view.obs, modified.obs)
assert_array_equal(from_view.var, modified.var)
for k in from_view.obsm:
assert_array_equal(asarray(from_view.obsm[k]), asarray(modified.obsm[k]))
assert_array_equal(asarray(orig.obsm[k]), asarray(modified.obsm[k]))
for k in from_view.varm:
assert_array_equal(asarray(from_view.varm[k]), asarray(modified.varm[k]))
for k in from_view.layers:
assert_array_equal(asarray(from_view.layers[k]), asarray(modified.layers[k]))
def test_inplace_subset_obs(matrix_type, subset_func):
orig = gen_adata((30, 30), X_type=matrix_type)
subset_idx = subset_func(orig.obs_names)
modified = orig.copy()
from_view = orig[subset_idx, :].copy()
modified._inplace_subset_obs(subset_idx)
assert_array_equal(asarray(from_view.X), asarray(modified.X))
assert_array_equal(from_view.obs, modified.obs)
assert_array_equal(from_view.var, modified.var)
for k in from_view.obsm:
assert_array_equal(asarray(from_view.obsm[k]), asarray(modified.obsm[k]))
for k in from_view.varm:
assert_array_equal(asarray(from_view.varm[k]), asarray(modified.varm[k]))
assert_array_equal(asarray(orig.varm[k]), asarray(modified.varm[k]))
for k in from_view.layers:
assert_array_equal(asarray(from_view.layers[k]), asarray(modified.layers[k]))
|
<commit_before><commit_msg>Add tests for inplace subset<commit_after>
|
import numpy as np
import pytest
from sklearn.utils.testing import (
assert_array_equal
)
from scipy import sparse
from anndata.tests.helpers import (
gen_adata,
subset_func,
asarray
)
@pytest.fixture(
params=[np.array, sparse.csr_matrix, sparse.csc_matrix],
ids=["np_array", "scipy_csr", "scipy_csc"]
)
def matrix_type(request):
return request.param
# TODO: Test values of .uns
def test_inplace_subset_var(matrix_type, subset_func):
orig = gen_adata((30, 30), X_type=matrix_type)
subset_idx = subset_func(orig.var_names)
modified = orig.copy()
from_view = orig[:, subset_idx].copy()
modified._inplace_subset_var(subset_idx)
assert_array_equal(asarray(from_view.X), asarray(modified.X))
assert_array_equal(from_view.obs, modified.obs)
assert_array_equal(from_view.var, modified.var)
for k in from_view.obsm:
assert_array_equal(asarray(from_view.obsm[k]), asarray(modified.obsm[k]))
assert_array_equal(asarray(orig.obsm[k]), asarray(modified.obsm[k]))
for k in from_view.varm:
assert_array_equal(asarray(from_view.varm[k]), asarray(modified.varm[k]))
for k in from_view.layers:
assert_array_equal(asarray(from_view.layers[k]), asarray(modified.layers[k]))
def test_inplace_subset_obs(matrix_type, subset_func):
orig = gen_adata((30, 30), X_type=matrix_type)
subset_idx = subset_func(orig.obs_names)
modified = orig.copy()
from_view = orig[subset_idx, :].copy()
modified._inplace_subset_obs(subset_idx)
assert_array_equal(asarray(from_view.X), asarray(modified.X))
assert_array_equal(from_view.obs, modified.obs)
assert_array_equal(from_view.var, modified.var)
for k in from_view.obsm:
assert_array_equal(asarray(from_view.obsm[k]), asarray(modified.obsm[k]))
for k in from_view.varm:
assert_array_equal(asarray(from_view.varm[k]), asarray(modified.varm[k]))
assert_array_equal(asarray(orig.varm[k]), asarray(modified.varm[k]))
for k in from_view.layers:
assert_array_equal(asarray(from_view.layers[k]), asarray(modified.layers[k]))
|
Add tests for inplace subsetimport numpy as np
import pytest
from sklearn.utils.testing import (
assert_array_equal
)
from scipy import sparse
from anndata.tests.helpers import (
gen_adata,
subset_func,
asarray
)
@pytest.fixture(
params=[np.array, sparse.csr_matrix, sparse.csc_matrix],
ids=["np_array", "scipy_csr", "scipy_csc"]
)
def matrix_type(request):
return request.param
# TODO: Test values of .uns
def test_inplace_subset_var(matrix_type, subset_func):
orig = gen_adata((30, 30), X_type=matrix_type)
subset_idx = subset_func(orig.var_names)
modified = orig.copy()
from_view = orig[:, subset_idx].copy()
modified._inplace_subset_var(subset_idx)
assert_array_equal(asarray(from_view.X), asarray(modified.X))
assert_array_equal(from_view.obs, modified.obs)
assert_array_equal(from_view.var, modified.var)
for k in from_view.obsm:
assert_array_equal(asarray(from_view.obsm[k]), asarray(modified.obsm[k]))
assert_array_equal(asarray(orig.obsm[k]), asarray(modified.obsm[k]))
for k in from_view.varm:
assert_array_equal(asarray(from_view.varm[k]), asarray(modified.varm[k]))
for k in from_view.layers:
assert_array_equal(asarray(from_view.layers[k]), asarray(modified.layers[k]))
def test_inplace_subset_obs(matrix_type, subset_func):
orig = gen_adata((30, 30), X_type=matrix_type)
subset_idx = subset_func(orig.obs_names)
modified = orig.copy()
from_view = orig[subset_idx, :].copy()
modified._inplace_subset_obs(subset_idx)
assert_array_equal(asarray(from_view.X), asarray(modified.X))
assert_array_equal(from_view.obs, modified.obs)
assert_array_equal(from_view.var, modified.var)
for k in from_view.obsm:
assert_array_equal(asarray(from_view.obsm[k]), asarray(modified.obsm[k]))
for k in from_view.varm:
assert_array_equal(asarray(from_view.varm[k]), asarray(modified.varm[k]))
assert_array_equal(asarray(orig.varm[k]), asarray(modified.varm[k]))
for k in from_view.layers:
assert_array_equal(asarray(from_view.layers[k]), asarray(modified.layers[k]))
|
<commit_before><commit_msg>Add tests for inplace subset<commit_after>import numpy as np
import pytest
from sklearn.utils.testing import (
assert_array_equal
)
from scipy import sparse
from anndata.tests.helpers import (
gen_adata,
subset_func,
asarray
)
@pytest.fixture(
params=[np.array, sparse.csr_matrix, sparse.csc_matrix],
ids=["np_array", "scipy_csr", "scipy_csc"]
)
def matrix_type(request):
return request.param
# TODO: Test values of .uns
def test_inplace_subset_var(matrix_type, subset_func):
orig = gen_adata((30, 30), X_type=matrix_type)
subset_idx = subset_func(orig.var_names)
modified = orig.copy()
from_view = orig[:, subset_idx].copy()
modified._inplace_subset_var(subset_idx)
assert_array_equal(asarray(from_view.X), asarray(modified.X))
assert_array_equal(from_view.obs, modified.obs)
assert_array_equal(from_view.var, modified.var)
for k in from_view.obsm:
assert_array_equal(asarray(from_view.obsm[k]), asarray(modified.obsm[k]))
assert_array_equal(asarray(orig.obsm[k]), asarray(modified.obsm[k]))
for k in from_view.varm:
assert_array_equal(asarray(from_view.varm[k]), asarray(modified.varm[k]))
for k in from_view.layers:
assert_array_equal(asarray(from_view.layers[k]), asarray(modified.layers[k]))
def test_inplace_subset_obs(matrix_type, subset_func):
orig = gen_adata((30, 30), X_type=matrix_type)
subset_idx = subset_func(orig.obs_names)
modified = orig.copy()
from_view = orig[subset_idx, :].copy()
modified._inplace_subset_obs(subset_idx)
assert_array_equal(asarray(from_view.X), asarray(modified.X))
assert_array_equal(from_view.obs, modified.obs)
assert_array_equal(from_view.var, modified.var)
for k in from_view.obsm:
assert_array_equal(asarray(from_view.obsm[k]), asarray(modified.obsm[k]))
for k in from_view.varm:
assert_array_equal(asarray(from_view.varm[k]), asarray(modified.varm[k]))
assert_array_equal(asarray(orig.varm[k]), asarray(modified.varm[k]))
for k in from_view.layers:
assert_array_equal(asarray(from_view.layers[k]), asarray(modified.layers[k]))
|
|
eb87d38a65620c7e4a716dca8a8b9488b3a338d3
|
src/collectors/numa/test/testnuma.py
|
src/collectors/numa/test/testnuma.py
|
#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import patch
from diamond.collector import Collector
from numa import NumaCollector
################################################################################
class TestExampleCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NumaCollector', {
'interval': 10
})
self.collector = NumaCollector(config, None)
def test_import(self):
self.assertTrue(NumaCollector)
@patch.object(Collector, 'publish')
def test(self, publish_mock):
self.collector.collect()
metrics = {
'node_0_free_MB': 42
'node_0_size_MB': 402
}
self.setDocNuma(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import patch
from diamond.collector import Collector
from numa import NumaCollector
################################################################################
class TestExampleCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NumaCollector', {
'interval': 10
})
self.collector = NumaCollector(config, None)
def test_import(self):
self.assertTrue(NumaCollector)
@patch.object(Collector, 'publish')
def test(self, publish_mock):
self.collector.collect()
metrics = {
'node_0_free_MB': 42,
'node_0_size_MB': 402
}
self.setDocNuma(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
|
Fix syntax error in collector test
|
Fix syntax error in collector test
|
Python
|
mit
|
tellapart/Diamond,socialwareinc/Diamond,python-diamond/Diamond,works-mobile/Diamond,tuenti/Diamond,Ensighten/Diamond,TinLe/Diamond,sebbrandt87/Diamond,actmd/Diamond,Nihn/Diamond-1,EzyInsights/Diamond,bmhatfield/Diamond,zoidbergwill/Diamond,jaingaurav/Diamond,krbaker/Diamond,TAKEALOT/Diamond,thardie/Diamond,thardie/Diamond,cannium/Diamond,cannium/Diamond,tusharmakkar08/Diamond,acquia/Diamond,acquia/Diamond,signalfx/Diamond,timchenxiaoyu/Diamond,eMerzh/Diamond-1,signalfx/Diamond,anandbhoraskar/Diamond,szibis/Diamond,szibis/Diamond,skbkontur/Diamond,Slach/Diamond,stuartbfox/Diamond,russss/Diamond,Ormod/Diamond,timchenxiaoyu/Diamond,python-diamond/Diamond,hvnsweeting/Diamond,jaingaurav/Diamond,ramjothikumar/Diamond,Nihn/Diamond-1,mzupan/Diamond,Netuitive/Diamond,mfriedenhagen/Diamond,stuartbfox/Diamond,cannium/Diamond,Ormod/Diamond,saucelabs/Diamond,socialwareinc/Diamond,Slach/Diamond,sebbrandt87/Diamond,eMerzh/Diamond-1,ramjothikumar/Diamond,russss/Diamond,tusharmakkar08/Diamond,thardie/Diamond,russss/Diamond,Ensighten/Diamond,Precis/Diamond,dcsquared13/Diamond,Ssawa/Diamond,gg7/diamond,h00dy/Diamond,hvnsweeting/Diamond,codepython/Diamond,tusharmakkar08/Diamond,MichaelDoyle/Diamond,janisz/Diamond-1,disqus/Diamond,EzyInsights/Diamond,rtoma/Diamond,Ssawa/Diamond,Clever/Diamond,rtoma/Diamond,zoidbergwill/Diamond,anandbhoraskar/Diamond,signalfx/Diamond,hamelg/Diamond,EzyInsights/Diamond,TAKEALOT/Diamond,tellapart/Diamond,python-diamond/Diamond,krbaker/Diamond,EzyInsights/Diamond,works-mobile/Diamond,hamelg/Diamond,MichaelDoyle/Diamond,Ensighten/Diamond,tuenti/Diamond,joel-airspring/Diamond,ramjothikumar/Diamond,hvnsweeting/Diamond,mzupan/Diamond,actmd/Diamond,Netuitive/netuitive-diamond,tuenti/Diamond,anandbhoraskar/Diamond,CYBERBUGJR/Diamond,jumping/Diamond,tuenti/Diamond,eMerzh/Diamond-1,codepython/Diamond,Netuitive/netuitive-diamond,TinLe/Diamond,Netuitive/Diamond,Clever/Diamond,joel-airspring/Diamond,codepython/Diamond,mfriedenhagen/Diamond,gg7/diamond,skbkontur/Diamond,actmd/Diamond,szibis/Diamond,Basis/Diamond,jaingaurav/Diamond,stuartbfox/Diamond,Precis/Diamond,Basis/Diamond,Netuitive/Diamond,Netuitive/netuitive-diamond,TinLe/Diamond,hvnsweeting/Diamond,tusharmakkar08/Diamond,joel-airspring/Diamond,skbkontur/Diamond,thardie/Diamond,russss/Diamond,rtoma/Diamond,stuartbfox/Diamond,rtoma/Diamond,zoidbergwill/Diamond,Basis/Diamond,jriguera/Diamond,skbkontur/Diamond,krbaker/Diamond,actmd/Diamond,janisz/Diamond-1,disqus/Diamond,h00dy/Diamond,mfriedenhagen/Diamond,Netuitive/Diamond,saucelabs/Diamond,gg7/diamond,saucelabs/Diamond,Nihn/Diamond-1,Clever/Diamond,h00dy/Diamond,TAKEALOT/Diamond,ramjothikumar/Diamond,jumping/Diamond,works-mobile/Diamond,Slach/Diamond,codepython/Diamond,dcsquared13/Diamond,zoidbergwill/Diamond,jriguera/Diamond,bmhatfield/Diamond,acquia/Diamond,Netuitive/netuitive-diamond,Ssawa/Diamond,eMerzh/Diamond-1,bmhatfield/Diamond,CYBERBUGJR/Diamond,TinLe/Diamond,Precis/Diamond,mzupan/Diamond,krbaker/Diamond,sebbrandt87/Diamond,timchenxiaoyu/Diamond,TAKEALOT/Diamond,CYBERBUGJR/Diamond,signalfx/Diamond,dcsquared13/Diamond,jriguera/Diamond,socialwareinc/Diamond,szibis/Diamond,CYBERBUGJR/Diamond,Ensighten/Diamond,timchenxiaoyu/Diamond,tellapart/Diamond,Ormod/Diamond,disqus/Diamond,hamelg/Diamond,anandbhoraskar/Diamond,janisz/Diamond-1,Slach/Diamond,socialwareinc/Diamond,jumping/Diamond,tellapart/Diamond,hamelg/Diamond,MichaelDoyle/Diamond,Clever/Diamond,Ssawa/Diamond,Precis/Diamond,works-mobile/Diamond,cannium/Diamond,saucelabs/Diamond,dcsquared13/Diamond,mfriedenhagen/Diamond,Basis/Diamond,MichaelDoyle/Diamond,bmhatfield/Diamond,joel-airspring/Diamond,jriguera/Diamond,jumping/Diamond,mzupan/Diamond,Ormod/Diamond,Nihn/Diamond-1,jaingaurav/Diamond,h00dy/Diamond,janisz/Diamond-1,acquia/Diamond,gg7/diamond,sebbrandt87/Diamond
|
#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import patch
from diamond.collector import Collector
from numa import NumaCollector
################################################################################
class TestExampleCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NumaCollector', {
'interval': 10
})
self.collector = NumaCollector(config, None)
def test_import(self):
self.assertTrue(NumaCollector)
@patch.object(Collector, 'publish')
def test(self, publish_mock):
self.collector.collect()
metrics = {
'node_0_free_MB': 42
'node_0_size_MB': 402
}
self.setDocNuma(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
Fix syntax error in collector test
|
#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import patch
from diamond.collector import Collector
from numa import NumaCollector
################################################################################
class TestExampleCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NumaCollector', {
'interval': 10
})
self.collector = NumaCollector(config, None)
def test_import(self):
self.assertTrue(NumaCollector)
@patch.object(Collector, 'publish')
def test(self, publish_mock):
self.collector.collect()
metrics = {
'node_0_free_MB': 42,
'node_0_size_MB': 402
}
self.setDocNuma(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
|
<commit_before>#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import patch
from diamond.collector import Collector
from numa import NumaCollector
################################################################################
class TestExampleCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NumaCollector', {
'interval': 10
})
self.collector = NumaCollector(config, None)
def test_import(self):
self.assertTrue(NumaCollector)
@patch.object(Collector, 'publish')
def test(self, publish_mock):
self.collector.collect()
metrics = {
'node_0_free_MB': 42
'node_0_size_MB': 402
}
self.setDocNuma(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
<commit_msg>Fix syntax error in collector test<commit_after>
|
#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import patch
from diamond.collector import Collector
from numa import NumaCollector
################################################################################
class TestExampleCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NumaCollector', {
'interval': 10
})
self.collector = NumaCollector(config, None)
def test_import(self):
self.assertTrue(NumaCollector)
@patch.object(Collector, 'publish')
def test(self, publish_mock):
self.collector.collect()
metrics = {
'node_0_free_MB': 42,
'node_0_size_MB': 402
}
self.setDocNuma(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
|
#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import patch
from diamond.collector import Collector
from numa import NumaCollector
################################################################################
class TestExampleCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NumaCollector', {
'interval': 10
})
self.collector = NumaCollector(config, None)
def test_import(self):
self.assertTrue(NumaCollector)
@patch.object(Collector, 'publish')
def test(self, publish_mock):
self.collector.collect()
metrics = {
'node_0_free_MB': 42
'node_0_size_MB': 402
}
self.setDocNuma(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
Fix syntax error in collector test#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import patch
from diamond.collector import Collector
from numa import NumaCollector
################################################################################
class TestExampleCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NumaCollector', {
'interval': 10
})
self.collector = NumaCollector(config, None)
def test_import(self):
self.assertTrue(NumaCollector)
@patch.object(Collector, 'publish')
def test(self, publish_mock):
self.collector.collect()
metrics = {
'node_0_free_MB': 42,
'node_0_size_MB': 402
}
self.setDocNuma(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
|
<commit_before>#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import patch
from diamond.collector import Collector
from numa import NumaCollector
################################################################################
class TestExampleCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NumaCollector', {
'interval': 10
})
self.collector = NumaCollector(config, None)
def test_import(self):
self.assertTrue(NumaCollector)
@patch.object(Collector, 'publish')
def test(self, publish_mock):
self.collector.collect()
metrics = {
'node_0_free_MB': 42
'node_0_size_MB': 402
}
self.setDocNuma(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
<commit_msg>Fix syntax error in collector test<commit_after>#!/usr/bin/python
# coding=utf-8
################################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import patch
from diamond.collector import Collector
from numa import NumaCollector
################################################################################
class TestExampleCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NumaCollector', {
'interval': 10
})
self.collector = NumaCollector(config, None)
def test_import(self):
self.assertTrue(NumaCollector)
@patch.object(Collector, 'publish')
def test(self, publish_mock):
self.collector.collect()
metrics = {
'node_0_free_MB': 42,
'node_0_size_MB': 402
}
self.setDocNuma(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
|
412828bea81f5aad917188881c1e7e4d6ce52400
|
usingnamespace/tests/test_views_management.py
|
usingnamespace/tests/test_views_management.py
|
import unittest
from pyramid import testing
class ManagementViewsTest(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def makeOne(self, context, request):
from usingnamespace.views.management import Management
return Management(context, request)
def makeWithInfo(self):
self.request = testing.DummyRequest()
self.context = testing.DummyResource()
self.request.context = self.context
return self.makeOne(self.context, self.request)
def test_verify_context_request(self):
view_class = self.makeOne("1", "2")
self.assertEqual(view_class.context, "1")
self.assertEqual(view_class.request, "2")
def test_management_home(self):
view_class = self.makeWithInfo()
self.assertEqual(view_class.home(), {})
class ManagementNotAuthorizedViewsTest(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
self.request = None
self.context = None
def tearDown(self):
testing.tearDown()
def makeOne(self, context, request):
from usingnamespace.views.management import ManagementNotAuthorized
return ManagementNotAuthorized(context, request)
def makeWithInfo(self):
self.request = testing.DummyRequest()
self.context = testing.DummyResource()
self.request.context = self.context
return self.makeOne(self.context, self.request)
def test_view_forbidden(self):
from pyramid.httpexceptions import HTTPForbidden
view_class = self.makeWithInfo()
self.assertRaises(HTTPForbidden, view_class.management_not_authed)
def test_view_not_found(self):
view_class = self.makeWithInfo()
view_class.management_not_found()
self.assertEqual(self.request.response.status_int, 404)
|
Add tests for the management views
|
Add tests for the management views
|
Python
|
isc
|
usingnamespace/usingnamespace
|
Add tests for the management views
|
import unittest
from pyramid import testing
class ManagementViewsTest(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def makeOne(self, context, request):
from usingnamespace.views.management import Management
return Management(context, request)
def makeWithInfo(self):
self.request = testing.DummyRequest()
self.context = testing.DummyResource()
self.request.context = self.context
return self.makeOne(self.context, self.request)
def test_verify_context_request(self):
view_class = self.makeOne("1", "2")
self.assertEqual(view_class.context, "1")
self.assertEqual(view_class.request, "2")
def test_management_home(self):
view_class = self.makeWithInfo()
self.assertEqual(view_class.home(), {})
class ManagementNotAuthorizedViewsTest(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
self.request = None
self.context = None
def tearDown(self):
testing.tearDown()
def makeOne(self, context, request):
from usingnamespace.views.management import ManagementNotAuthorized
return ManagementNotAuthorized(context, request)
def makeWithInfo(self):
self.request = testing.DummyRequest()
self.context = testing.DummyResource()
self.request.context = self.context
return self.makeOne(self.context, self.request)
def test_view_forbidden(self):
from pyramid.httpexceptions import HTTPForbidden
view_class = self.makeWithInfo()
self.assertRaises(HTTPForbidden, view_class.management_not_authed)
def test_view_not_found(self):
view_class = self.makeWithInfo()
view_class.management_not_found()
self.assertEqual(self.request.response.status_int, 404)
|
<commit_before><commit_msg>Add tests for the management views<commit_after>
|
import unittest
from pyramid import testing
class ManagementViewsTest(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def makeOne(self, context, request):
from usingnamespace.views.management import Management
return Management(context, request)
def makeWithInfo(self):
self.request = testing.DummyRequest()
self.context = testing.DummyResource()
self.request.context = self.context
return self.makeOne(self.context, self.request)
def test_verify_context_request(self):
view_class = self.makeOne("1", "2")
self.assertEqual(view_class.context, "1")
self.assertEqual(view_class.request, "2")
def test_management_home(self):
view_class = self.makeWithInfo()
self.assertEqual(view_class.home(), {})
class ManagementNotAuthorizedViewsTest(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
self.request = None
self.context = None
def tearDown(self):
testing.tearDown()
def makeOne(self, context, request):
from usingnamespace.views.management import ManagementNotAuthorized
return ManagementNotAuthorized(context, request)
def makeWithInfo(self):
self.request = testing.DummyRequest()
self.context = testing.DummyResource()
self.request.context = self.context
return self.makeOne(self.context, self.request)
def test_view_forbidden(self):
from pyramid.httpexceptions import HTTPForbidden
view_class = self.makeWithInfo()
self.assertRaises(HTTPForbidden, view_class.management_not_authed)
def test_view_not_found(self):
view_class = self.makeWithInfo()
view_class.management_not_found()
self.assertEqual(self.request.response.status_int, 404)
|
Add tests for the management viewsimport unittest
from pyramid import testing
class ManagementViewsTest(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def makeOne(self, context, request):
from usingnamespace.views.management import Management
return Management(context, request)
def makeWithInfo(self):
self.request = testing.DummyRequest()
self.context = testing.DummyResource()
self.request.context = self.context
return self.makeOne(self.context, self.request)
def test_verify_context_request(self):
view_class = self.makeOne("1", "2")
self.assertEqual(view_class.context, "1")
self.assertEqual(view_class.request, "2")
def test_management_home(self):
view_class = self.makeWithInfo()
self.assertEqual(view_class.home(), {})
class ManagementNotAuthorizedViewsTest(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
self.request = None
self.context = None
def tearDown(self):
testing.tearDown()
def makeOne(self, context, request):
from usingnamespace.views.management import ManagementNotAuthorized
return ManagementNotAuthorized(context, request)
def makeWithInfo(self):
self.request = testing.DummyRequest()
self.context = testing.DummyResource()
self.request.context = self.context
return self.makeOne(self.context, self.request)
def test_view_forbidden(self):
from pyramid.httpexceptions import HTTPForbidden
view_class = self.makeWithInfo()
self.assertRaises(HTTPForbidden, view_class.management_not_authed)
def test_view_not_found(self):
view_class = self.makeWithInfo()
view_class.management_not_found()
self.assertEqual(self.request.response.status_int, 404)
|
<commit_before><commit_msg>Add tests for the management views<commit_after>import unittest
from pyramid import testing
class ManagementViewsTest(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def makeOne(self, context, request):
from usingnamespace.views.management import Management
return Management(context, request)
def makeWithInfo(self):
self.request = testing.DummyRequest()
self.context = testing.DummyResource()
self.request.context = self.context
return self.makeOne(self.context, self.request)
def test_verify_context_request(self):
view_class = self.makeOne("1", "2")
self.assertEqual(view_class.context, "1")
self.assertEqual(view_class.request, "2")
def test_management_home(self):
view_class = self.makeWithInfo()
self.assertEqual(view_class.home(), {})
class ManagementNotAuthorizedViewsTest(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
self.request = None
self.context = None
def tearDown(self):
testing.tearDown()
def makeOne(self, context, request):
from usingnamespace.views.management import ManagementNotAuthorized
return ManagementNotAuthorized(context, request)
def makeWithInfo(self):
self.request = testing.DummyRequest()
self.context = testing.DummyResource()
self.request.context = self.context
return self.makeOne(self.context, self.request)
def test_view_forbidden(self):
from pyramid.httpexceptions import HTTPForbidden
view_class = self.makeWithInfo()
self.assertRaises(HTTPForbidden, view_class.management_not_authed)
def test_view_not_found(self):
view_class = self.makeWithInfo()
view_class.management_not_found()
self.assertEqual(self.request.response.status_int, 404)
|
|
430b5daebbd5385551203c2a0cf23bb355a2c027
|
doc/source/scripts/06-cat-of-cats.py
|
doc/source/scripts/06-cat-of-cats.py
|
import os
import photomosaic as pm
import photomosaic.flickr
import matplotlib.pyplot as plt
# For these published examples we use os.environ to keep our API key private.
# Just set your own Flickr API key here.
FLICKR_API_KEY = os.environ['FLICKR_API_KEY']
# Get a pool of cat photos from Flickr.
pm.set_options(flickr_api_key=FLICKR_API_KEY)
photomosaic.flickr.from_search('cats', 'cats/', 1000)
pool = pm.make_pool('cats/*.jpg')
pm.export_pool(pool, 'cats/pool.json') # save color analysis for future reuse
# Build mosaic.
mosaic = pm.basic_mosaic(img, pool, (30, 30), depth=4)
plt.plot(mosaic)
plt.show()
|
Add a script that uses the Flickr API.
|
DOC: Add a script that uses the Flickr API.
|
Python
|
bsd-3-clause
|
danielballan/photomosaic
|
DOC: Add a script that uses the Flickr API.
|
import os
import photomosaic as pm
import photomosaic.flickr
import matplotlib.pyplot as plt
# For these published examples we use os.environ to keep our API key private.
# Just set your own Flickr API key here.
FLICKR_API_KEY = os.environ['FLICKR_API_KEY']
# Get a pool of cat photos from Flickr.
pm.set_options(flickr_api_key=FLICKR_API_KEY)
photomosaic.flickr.from_search('cats', 'cats/', 1000)
pool = pm.make_pool('cats/*.jpg')
pm.export_pool(pool, 'cats/pool.json') # save color analysis for future reuse
# Build mosaic.
mosaic = pm.basic_mosaic(img, pool, (30, 30), depth=4)
plt.plot(mosaic)
plt.show()
|
<commit_before><commit_msg>DOC: Add a script that uses the Flickr API.<commit_after>
|
import os
import photomosaic as pm
import photomosaic.flickr
import matplotlib.pyplot as plt
# For these published examples we use os.environ to keep our API key private.
# Just set your own Flickr API key here.
FLICKR_API_KEY = os.environ['FLICKR_API_KEY']
# Get a pool of cat photos from Flickr.
pm.set_options(flickr_api_key=FLICKR_API_KEY)
photomosaic.flickr.from_search('cats', 'cats/', 1000)
pool = pm.make_pool('cats/*.jpg')
pm.export_pool(pool, 'cats/pool.json') # save color analysis for future reuse
# Build mosaic.
mosaic = pm.basic_mosaic(img, pool, (30, 30), depth=4)
plt.plot(mosaic)
plt.show()
|
DOC: Add a script that uses the Flickr API.import os
import photomosaic as pm
import photomosaic.flickr
import matplotlib.pyplot as plt
# For these published examples we use os.environ to keep our API key private.
# Just set your own Flickr API key here.
FLICKR_API_KEY = os.environ['FLICKR_API_KEY']
# Get a pool of cat photos from Flickr.
pm.set_options(flickr_api_key=FLICKR_API_KEY)
photomosaic.flickr.from_search('cats', 'cats/', 1000)
pool = pm.make_pool('cats/*.jpg')
pm.export_pool(pool, 'cats/pool.json') # save color analysis for future reuse
# Build mosaic.
mosaic = pm.basic_mosaic(img, pool, (30, 30), depth=4)
plt.plot(mosaic)
plt.show()
|
<commit_before><commit_msg>DOC: Add a script that uses the Flickr API.<commit_after>import os
import photomosaic as pm
import photomosaic.flickr
import matplotlib.pyplot as plt
# For these published examples we use os.environ to keep our API key private.
# Just set your own Flickr API key here.
FLICKR_API_KEY = os.environ['FLICKR_API_KEY']
# Get a pool of cat photos from Flickr.
pm.set_options(flickr_api_key=FLICKR_API_KEY)
photomosaic.flickr.from_search('cats', 'cats/', 1000)
pool = pm.make_pool('cats/*.jpg')
pm.export_pool(pool, 'cats/pool.json') # save color analysis for future reuse
# Build mosaic.
mosaic = pm.basic_mosaic(img, pool, (30, 30), depth=4)
plt.plot(mosaic)
plt.show()
|
|
b66b02be95e7b0c36a9ced53b07d91298190ca4a
|
test/test_dl.py
|
test/test_dl.py
|
from mpi4py import dl
import mpiunittest as unittest
import sys
import os
class TestDL(unittest.TestCase):
def testDL1(self):
if sys.platform == 'darwin':
libm = 'libm.dylib'
else:
libm = 'libm.so'
handle = dl.dlopen(libm, dl.RTLD_LOCAL|dl.RTLD_LAZY)
self.assertTrue(handle != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, 'sqrt')
self.assertTrue(symbol != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, 'xxxxx')
self.assertTrue(symbol == 0)
self.assertTrue(dl.dlerror() is not None)
ierr = dl.dlclose(handle)
self.assertTrue(ierr == 0)
self.assertTrue(dl.dlerror() is None)
def testDL2(self):
handle = dl.dlopen(None, dl.RTLD_GLOBAL|dl.RTLD_NOW)
self.assertTrue(handle != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, 'malloc')
self.assertTrue(symbol != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, '!@#$%^&*()')
self.assertTrue(symbol == 0)
self.assertTrue(dl.dlerror() is not None)
ierr = dl.dlclose(handle)
self.assertTrue(ierr == 0)
self.assertTrue(dl.dlerror() is None)
def testDL3(self):
handle = dl.dlopen('xxxxx', dl.RTLD_LOCAL|dl.RTLD_LAZY)
self.assertTrue(handle == 0)
self.assertTrue(dl.dlerror() is not None)
if os.name != 'posix':
del TestDL
if __name__ == '__main__':
unittest.main()
|
Add tests for mpi4py.dl module
|
test: Add tests for mpi4py.dl module
|
Python
|
bsd-2-clause
|
mpi4py/mpi4py,mpi4py/mpi4py,pressel/mpi4py,pressel/mpi4py,mpi4py/mpi4py,pressel/mpi4py,pressel/mpi4py
|
test: Add tests for mpi4py.dl module
|
from mpi4py import dl
import mpiunittest as unittest
import sys
import os
class TestDL(unittest.TestCase):
def testDL1(self):
if sys.platform == 'darwin':
libm = 'libm.dylib'
else:
libm = 'libm.so'
handle = dl.dlopen(libm, dl.RTLD_LOCAL|dl.RTLD_LAZY)
self.assertTrue(handle != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, 'sqrt')
self.assertTrue(symbol != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, 'xxxxx')
self.assertTrue(symbol == 0)
self.assertTrue(dl.dlerror() is not None)
ierr = dl.dlclose(handle)
self.assertTrue(ierr == 0)
self.assertTrue(dl.dlerror() is None)
def testDL2(self):
handle = dl.dlopen(None, dl.RTLD_GLOBAL|dl.RTLD_NOW)
self.assertTrue(handle != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, 'malloc')
self.assertTrue(symbol != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, '!@#$%^&*()')
self.assertTrue(symbol == 0)
self.assertTrue(dl.dlerror() is not None)
ierr = dl.dlclose(handle)
self.assertTrue(ierr == 0)
self.assertTrue(dl.dlerror() is None)
def testDL3(self):
handle = dl.dlopen('xxxxx', dl.RTLD_LOCAL|dl.RTLD_LAZY)
self.assertTrue(handle == 0)
self.assertTrue(dl.dlerror() is not None)
if os.name != 'posix':
del TestDL
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>test: Add tests for mpi4py.dl module<commit_after>
|
from mpi4py import dl
import mpiunittest as unittest
import sys
import os
class TestDL(unittest.TestCase):
def testDL1(self):
if sys.platform == 'darwin':
libm = 'libm.dylib'
else:
libm = 'libm.so'
handle = dl.dlopen(libm, dl.RTLD_LOCAL|dl.RTLD_LAZY)
self.assertTrue(handle != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, 'sqrt')
self.assertTrue(symbol != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, 'xxxxx')
self.assertTrue(symbol == 0)
self.assertTrue(dl.dlerror() is not None)
ierr = dl.dlclose(handle)
self.assertTrue(ierr == 0)
self.assertTrue(dl.dlerror() is None)
def testDL2(self):
handle = dl.dlopen(None, dl.RTLD_GLOBAL|dl.RTLD_NOW)
self.assertTrue(handle != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, 'malloc')
self.assertTrue(symbol != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, '!@#$%^&*()')
self.assertTrue(symbol == 0)
self.assertTrue(dl.dlerror() is not None)
ierr = dl.dlclose(handle)
self.assertTrue(ierr == 0)
self.assertTrue(dl.dlerror() is None)
def testDL3(self):
handle = dl.dlopen('xxxxx', dl.RTLD_LOCAL|dl.RTLD_LAZY)
self.assertTrue(handle == 0)
self.assertTrue(dl.dlerror() is not None)
if os.name != 'posix':
del TestDL
if __name__ == '__main__':
unittest.main()
|
test: Add tests for mpi4py.dl modulefrom mpi4py import dl
import mpiunittest as unittest
import sys
import os
class TestDL(unittest.TestCase):
def testDL1(self):
if sys.platform == 'darwin':
libm = 'libm.dylib'
else:
libm = 'libm.so'
handle = dl.dlopen(libm, dl.RTLD_LOCAL|dl.RTLD_LAZY)
self.assertTrue(handle != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, 'sqrt')
self.assertTrue(symbol != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, 'xxxxx')
self.assertTrue(symbol == 0)
self.assertTrue(dl.dlerror() is not None)
ierr = dl.dlclose(handle)
self.assertTrue(ierr == 0)
self.assertTrue(dl.dlerror() is None)
def testDL2(self):
handle = dl.dlopen(None, dl.RTLD_GLOBAL|dl.RTLD_NOW)
self.assertTrue(handle != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, 'malloc')
self.assertTrue(symbol != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, '!@#$%^&*()')
self.assertTrue(symbol == 0)
self.assertTrue(dl.dlerror() is not None)
ierr = dl.dlclose(handle)
self.assertTrue(ierr == 0)
self.assertTrue(dl.dlerror() is None)
def testDL3(self):
handle = dl.dlopen('xxxxx', dl.RTLD_LOCAL|dl.RTLD_LAZY)
self.assertTrue(handle == 0)
self.assertTrue(dl.dlerror() is not None)
if os.name != 'posix':
del TestDL
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>test: Add tests for mpi4py.dl module<commit_after>from mpi4py import dl
import mpiunittest as unittest
import sys
import os
class TestDL(unittest.TestCase):
def testDL1(self):
if sys.platform == 'darwin':
libm = 'libm.dylib'
else:
libm = 'libm.so'
handle = dl.dlopen(libm, dl.RTLD_LOCAL|dl.RTLD_LAZY)
self.assertTrue(handle != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, 'sqrt')
self.assertTrue(symbol != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, 'xxxxx')
self.assertTrue(symbol == 0)
self.assertTrue(dl.dlerror() is not None)
ierr = dl.dlclose(handle)
self.assertTrue(ierr == 0)
self.assertTrue(dl.dlerror() is None)
def testDL2(self):
handle = dl.dlopen(None, dl.RTLD_GLOBAL|dl.RTLD_NOW)
self.assertTrue(handle != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, 'malloc')
self.assertTrue(symbol != 0)
self.assertTrue(dl.dlerror() is None)
symbol = dl.dlsym(handle, '!@#$%^&*()')
self.assertTrue(symbol == 0)
self.assertTrue(dl.dlerror() is not None)
ierr = dl.dlclose(handle)
self.assertTrue(ierr == 0)
self.assertTrue(dl.dlerror() is None)
def testDL3(self):
handle = dl.dlopen('xxxxx', dl.RTLD_LOCAL|dl.RTLD_LAZY)
self.assertTrue(handle == 0)
self.assertTrue(dl.dlerror() is not None)
if os.name != 'posix':
del TestDL
if __name__ == '__main__':
unittest.main()
|
|
cf3cae6493a369173244e05d190cceae41b9abbd
|
bluesky/tests/test_olog_cb.py
|
bluesky/tests/test_olog_cb.py
|
from bluesky import Msg
from bluesky.callbacks.olog import logbook_cb_factory
text = []
def f(**kwargs):
text.append(kwargs['text'])
def test_default_template(fresh_RE):
text.clear()
fresh_RE.subscribe('start', logbook_cb_factory(f))
fresh_RE([Msg('open_run', plan_args={}), Msg('close_run')])
assert len(text[0]) > 0
def test_trivial_template(fresh_RE):
text.clear()
fresh_RE.subscribe('start', logbook_cb_factory(f, desc_template='hello'))
fresh_RE([Msg('open_run', plan_args={}), Msg('close_run')])
assert text[0] == 'hello'
# smoke test the long_template
fresh_RE.subscribe('start', logbook_cb_factory(f, long_template='hello'))
fresh_RE([Msg('open_run', plan_args={}), Msg('close_run')])
def test_template_dispatch(fresh_RE):
disp = {'a': 'A', 'b': 'B'}
text.clear()
fresh_RE.subscribe('start', logbook_cb_factory(f, desc_dispatch=disp))
fresh_RE([Msg('open_run', plan_name='a', plan_args={}),
Msg('close_run')])
fresh_RE([Msg('open_run', plan_name='b', plan_args={}),
Msg('close_run')])
assert text[0] == 'A'
assert text[1] == 'B'
# smoke test the long_dispatch
fresh_RE.subscribe('start', logbook_cb_factory(f, long_dispatch=disp))
fresh_RE([Msg('open_run', plan_name='a', plan_args={}),
Msg('close_run')])
fresh_RE([Msg('open_run', plan_name='b', plan_args={}),
Msg('close_run')])
|
Add some coverage for olog callback.
|
TST: Add some coverage for olog callback.
|
Python
|
bsd-3-clause
|
ericdill/bluesky,ericdill/bluesky
|
TST: Add some coverage for olog callback.
|
from bluesky import Msg
from bluesky.callbacks.olog import logbook_cb_factory
text = []
def f(**kwargs):
text.append(kwargs['text'])
def test_default_template(fresh_RE):
text.clear()
fresh_RE.subscribe('start', logbook_cb_factory(f))
fresh_RE([Msg('open_run', plan_args={}), Msg('close_run')])
assert len(text[0]) > 0
def test_trivial_template(fresh_RE):
text.clear()
fresh_RE.subscribe('start', logbook_cb_factory(f, desc_template='hello'))
fresh_RE([Msg('open_run', plan_args={}), Msg('close_run')])
assert text[0] == 'hello'
# smoke test the long_template
fresh_RE.subscribe('start', logbook_cb_factory(f, long_template='hello'))
fresh_RE([Msg('open_run', plan_args={}), Msg('close_run')])
def test_template_dispatch(fresh_RE):
disp = {'a': 'A', 'b': 'B'}
text.clear()
fresh_RE.subscribe('start', logbook_cb_factory(f, desc_dispatch=disp))
fresh_RE([Msg('open_run', plan_name='a', plan_args={}),
Msg('close_run')])
fresh_RE([Msg('open_run', plan_name='b', plan_args={}),
Msg('close_run')])
assert text[0] == 'A'
assert text[1] == 'B'
# smoke test the long_dispatch
fresh_RE.subscribe('start', logbook_cb_factory(f, long_dispatch=disp))
fresh_RE([Msg('open_run', plan_name='a', plan_args={}),
Msg('close_run')])
fresh_RE([Msg('open_run', plan_name='b', plan_args={}),
Msg('close_run')])
|
<commit_before><commit_msg>TST: Add some coverage for olog callback.<commit_after>
|
from bluesky import Msg
from bluesky.callbacks.olog import logbook_cb_factory
text = []
def f(**kwargs):
text.append(kwargs['text'])
def test_default_template(fresh_RE):
text.clear()
fresh_RE.subscribe('start', logbook_cb_factory(f))
fresh_RE([Msg('open_run', plan_args={}), Msg('close_run')])
assert len(text[0]) > 0
def test_trivial_template(fresh_RE):
text.clear()
fresh_RE.subscribe('start', logbook_cb_factory(f, desc_template='hello'))
fresh_RE([Msg('open_run', plan_args={}), Msg('close_run')])
assert text[0] == 'hello'
# smoke test the long_template
fresh_RE.subscribe('start', logbook_cb_factory(f, long_template='hello'))
fresh_RE([Msg('open_run', plan_args={}), Msg('close_run')])
def test_template_dispatch(fresh_RE):
disp = {'a': 'A', 'b': 'B'}
text.clear()
fresh_RE.subscribe('start', logbook_cb_factory(f, desc_dispatch=disp))
fresh_RE([Msg('open_run', plan_name='a', plan_args={}),
Msg('close_run')])
fresh_RE([Msg('open_run', plan_name='b', plan_args={}),
Msg('close_run')])
assert text[0] == 'A'
assert text[1] == 'B'
# smoke test the long_dispatch
fresh_RE.subscribe('start', logbook_cb_factory(f, long_dispatch=disp))
fresh_RE([Msg('open_run', plan_name='a', plan_args={}),
Msg('close_run')])
fresh_RE([Msg('open_run', plan_name='b', plan_args={}),
Msg('close_run')])
|
TST: Add some coverage for olog callback.from bluesky import Msg
from bluesky.callbacks.olog import logbook_cb_factory
text = []
def f(**kwargs):
text.append(kwargs['text'])
def test_default_template(fresh_RE):
text.clear()
fresh_RE.subscribe('start', logbook_cb_factory(f))
fresh_RE([Msg('open_run', plan_args={}), Msg('close_run')])
assert len(text[0]) > 0
def test_trivial_template(fresh_RE):
text.clear()
fresh_RE.subscribe('start', logbook_cb_factory(f, desc_template='hello'))
fresh_RE([Msg('open_run', plan_args={}), Msg('close_run')])
assert text[0] == 'hello'
# smoke test the long_template
fresh_RE.subscribe('start', logbook_cb_factory(f, long_template='hello'))
fresh_RE([Msg('open_run', plan_args={}), Msg('close_run')])
def test_template_dispatch(fresh_RE):
disp = {'a': 'A', 'b': 'B'}
text.clear()
fresh_RE.subscribe('start', logbook_cb_factory(f, desc_dispatch=disp))
fresh_RE([Msg('open_run', plan_name='a', plan_args={}),
Msg('close_run')])
fresh_RE([Msg('open_run', plan_name='b', plan_args={}),
Msg('close_run')])
assert text[0] == 'A'
assert text[1] == 'B'
# smoke test the long_dispatch
fresh_RE.subscribe('start', logbook_cb_factory(f, long_dispatch=disp))
fresh_RE([Msg('open_run', plan_name='a', plan_args={}),
Msg('close_run')])
fresh_RE([Msg('open_run', plan_name='b', plan_args={}),
Msg('close_run')])
|
<commit_before><commit_msg>TST: Add some coverage for olog callback.<commit_after>from bluesky import Msg
from bluesky.callbacks.olog import logbook_cb_factory
text = []
def f(**kwargs):
text.append(kwargs['text'])
def test_default_template(fresh_RE):
text.clear()
fresh_RE.subscribe('start', logbook_cb_factory(f))
fresh_RE([Msg('open_run', plan_args={}), Msg('close_run')])
assert len(text[0]) > 0
def test_trivial_template(fresh_RE):
text.clear()
fresh_RE.subscribe('start', logbook_cb_factory(f, desc_template='hello'))
fresh_RE([Msg('open_run', plan_args={}), Msg('close_run')])
assert text[0] == 'hello'
# smoke test the long_template
fresh_RE.subscribe('start', logbook_cb_factory(f, long_template='hello'))
fresh_RE([Msg('open_run', plan_args={}), Msg('close_run')])
def test_template_dispatch(fresh_RE):
disp = {'a': 'A', 'b': 'B'}
text.clear()
fresh_RE.subscribe('start', logbook_cb_factory(f, desc_dispatch=disp))
fresh_RE([Msg('open_run', plan_name='a', plan_args={}),
Msg('close_run')])
fresh_RE([Msg('open_run', plan_name='b', plan_args={}),
Msg('close_run')])
assert text[0] == 'A'
assert text[1] == 'B'
# smoke test the long_dispatch
fresh_RE.subscribe('start', logbook_cb_factory(f, long_dispatch=disp))
fresh_RE([Msg('open_run', plan_name='a', plan_args={}),
Msg('close_run')])
fresh_RE([Msg('open_run', plan_name='b', plan_args={}),
Msg('close_run')])
|
|
e3d92ce2cd17a967ac19aecad2998c4094f2ae11
|
run.py
|
run.py
|
#!/usr/bin/env python
"""
This script generates a force and velocity vector diagram for a cross-flow
turbine.
"""
import gizeh as gz
import numpy as np
import matplotlib.pyplot as plt
def gen_naca_points(naca="0020", c=100, npoints=100):
"""Generate points for a NACA foil."""
x = np.linspace(0, 1, npoints)*c
t = float(naca[2:])/100.0
y = 5.0*t*c*(0.2969*np.sqrt(x/c) - 0.1260*(x/c) - 0.3516*(x/c)**2 \
+ 0.2843*(x/c)**3 - 0.1015*(x/c)**4)
y = np.append(y, -y[::-1])
x = np.append(x, x[::-1])
points = [(x0, y0) for x0, y0 in zip(x, y)]
return points
def test_gen_naca_points():
points = gen_naca_points()
x = []
y = []
for p in points:
x.append(p[0])
y.append(p[1])
fig, ax = plt.subplots()
ax.plot(x, y, "o")
ax.set_aspect(1)
plt.show()
def draw_foil(naca="0020", c=100):
"""Draw NACA 0020 foil."""
points = gen_naca_points(naca, c)
line = gz.polyline(points, close_path=False, stroke_width=2, xy=(300, 300))
return line
def main():
canvas = gz.Surface(width=700, height=700)
foil = draw_foil()
foil.draw(canvas)
canvas.write_to_png("cft-vectors.png")
if __name__ == "__main__":
main()
|
Add script to draw NACA foil
|
Add script to draw NACA foil
|
Python
|
mit
|
petebachant/CFT-vectors
|
Add script to draw NACA foil
|
#!/usr/bin/env python
"""
This script generates a force and velocity vector diagram for a cross-flow
turbine.
"""
import gizeh as gz
import numpy as np
import matplotlib.pyplot as plt
def gen_naca_points(naca="0020", c=100, npoints=100):
"""Generate points for a NACA foil."""
x = np.linspace(0, 1, npoints)*c
t = float(naca[2:])/100.0
y = 5.0*t*c*(0.2969*np.sqrt(x/c) - 0.1260*(x/c) - 0.3516*(x/c)**2 \
+ 0.2843*(x/c)**3 - 0.1015*(x/c)**4)
y = np.append(y, -y[::-1])
x = np.append(x, x[::-1])
points = [(x0, y0) for x0, y0 in zip(x, y)]
return points
def test_gen_naca_points():
points = gen_naca_points()
x = []
y = []
for p in points:
x.append(p[0])
y.append(p[1])
fig, ax = plt.subplots()
ax.plot(x, y, "o")
ax.set_aspect(1)
plt.show()
def draw_foil(naca="0020", c=100):
"""Draw NACA 0020 foil."""
points = gen_naca_points(naca, c)
line = gz.polyline(points, close_path=False, stroke_width=2, xy=(300, 300))
return line
def main():
canvas = gz.Surface(width=700, height=700)
foil = draw_foil()
foil.draw(canvas)
canvas.write_to_png("cft-vectors.png")
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to draw NACA foil<commit_after>
|
#!/usr/bin/env python
"""
This script generates a force and velocity vector diagram for a cross-flow
turbine.
"""
import gizeh as gz
import numpy as np
import matplotlib.pyplot as plt
def gen_naca_points(naca="0020", c=100, npoints=100):
"""Generate points for a NACA foil."""
x = np.linspace(0, 1, npoints)*c
t = float(naca[2:])/100.0
y = 5.0*t*c*(0.2969*np.sqrt(x/c) - 0.1260*(x/c) - 0.3516*(x/c)**2 \
+ 0.2843*(x/c)**3 - 0.1015*(x/c)**4)
y = np.append(y, -y[::-1])
x = np.append(x, x[::-1])
points = [(x0, y0) for x0, y0 in zip(x, y)]
return points
def test_gen_naca_points():
points = gen_naca_points()
x = []
y = []
for p in points:
x.append(p[0])
y.append(p[1])
fig, ax = plt.subplots()
ax.plot(x, y, "o")
ax.set_aspect(1)
plt.show()
def draw_foil(naca="0020", c=100):
"""Draw NACA 0020 foil."""
points = gen_naca_points(naca, c)
line = gz.polyline(points, close_path=False, stroke_width=2, xy=(300, 300))
return line
def main():
canvas = gz.Surface(width=700, height=700)
foil = draw_foil()
foil.draw(canvas)
canvas.write_to_png("cft-vectors.png")
if __name__ == "__main__":
main()
|
Add script to draw NACA foil#!/usr/bin/env python
"""
This script generates a force and velocity vector diagram for a cross-flow
turbine.
"""
import gizeh as gz
import numpy as np
import matplotlib.pyplot as plt
def gen_naca_points(naca="0020", c=100, npoints=100):
"""Generate points for a NACA foil."""
x = np.linspace(0, 1, npoints)*c
t = float(naca[2:])/100.0
y = 5.0*t*c*(0.2969*np.sqrt(x/c) - 0.1260*(x/c) - 0.3516*(x/c)**2 \
+ 0.2843*(x/c)**3 - 0.1015*(x/c)**4)
y = np.append(y, -y[::-1])
x = np.append(x, x[::-1])
points = [(x0, y0) for x0, y0 in zip(x, y)]
return points
def test_gen_naca_points():
points = gen_naca_points()
x = []
y = []
for p in points:
x.append(p[0])
y.append(p[1])
fig, ax = plt.subplots()
ax.plot(x, y, "o")
ax.set_aspect(1)
plt.show()
def draw_foil(naca="0020", c=100):
"""Draw NACA 0020 foil."""
points = gen_naca_points(naca, c)
line = gz.polyline(points, close_path=False, stroke_width=2, xy=(300, 300))
return line
def main():
canvas = gz.Surface(width=700, height=700)
foil = draw_foil()
foil.draw(canvas)
canvas.write_to_png("cft-vectors.png")
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to draw NACA foil<commit_after>#!/usr/bin/env python
"""
This script generates a force and velocity vector diagram for a cross-flow
turbine.
"""
import gizeh as gz
import numpy as np
import matplotlib.pyplot as plt
def gen_naca_points(naca="0020", c=100, npoints=100):
"""Generate points for a NACA foil."""
x = np.linspace(0, 1, npoints)*c
t = float(naca[2:])/100.0
y = 5.0*t*c*(0.2969*np.sqrt(x/c) - 0.1260*(x/c) - 0.3516*(x/c)**2 \
+ 0.2843*(x/c)**3 - 0.1015*(x/c)**4)
y = np.append(y, -y[::-1])
x = np.append(x, x[::-1])
points = [(x0, y0) for x0, y0 in zip(x, y)]
return points
def test_gen_naca_points():
points = gen_naca_points()
x = []
y = []
for p in points:
x.append(p[0])
y.append(p[1])
fig, ax = plt.subplots()
ax.plot(x, y, "o")
ax.set_aspect(1)
plt.show()
def draw_foil(naca="0020", c=100):
"""Draw NACA 0020 foil."""
points = gen_naca_points(naca, c)
line = gz.polyline(points, close_path=False, stroke_width=2, xy=(300, 300))
return line
def main():
canvas = gz.Surface(width=700, height=700)
foil = draw_foil()
foil.draw(canvas)
canvas.write_to_png("cft-vectors.png")
if __name__ == "__main__":
main()
|
|
d6120537ec982f50d08fa188e91c68c023809db3
|
txircd/modules/rfc/response_error.py
|
txircd/modules/rfc/response_error.py
|
from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class ErrorResponse(ModuleData):
implements(IPlugin, IModuleData)
name = "errorResponse"
core = True
def actions(self):
return [("quit", 10, self.sendError)]
def sendError(self, user, reason):
user.sendMessage("ERROR", ":Closing Link: {}@{} [{}]".format(user.ident, user.host, reason), to=None, prefix=None)
errorResponse = ErrorResponse()
|
Send ERROR when the user disconnects
|
Send ERROR when the user disconnects
|
Python
|
bsd-3-clause
|
Heufneutje/txircd,ElementalAlchemist/txircd
|
Send ERROR when the user disconnects
|
from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class ErrorResponse(ModuleData):
implements(IPlugin, IModuleData)
name = "errorResponse"
core = True
def actions(self):
return [("quit", 10, self.sendError)]
def sendError(self, user, reason):
user.sendMessage("ERROR", ":Closing Link: {}@{} [{}]".format(user.ident, user.host, reason), to=None, prefix=None)
errorResponse = ErrorResponse()
|
<commit_before><commit_msg>Send ERROR when the user disconnects<commit_after>
|
from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class ErrorResponse(ModuleData):
implements(IPlugin, IModuleData)
name = "errorResponse"
core = True
def actions(self):
return [("quit", 10, self.sendError)]
def sendError(self, user, reason):
user.sendMessage("ERROR", ":Closing Link: {}@{} [{}]".format(user.ident, user.host, reason), to=None, prefix=None)
errorResponse = ErrorResponse()
|
Send ERROR when the user disconnectsfrom twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class ErrorResponse(ModuleData):
implements(IPlugin, IModuleData)
name = "errorResponse"
core = True
def actions(self):
return [("quit", 10, self.sendError)]
def sendError(self, user, reason):
user.sendMessage("ERROR", ":Closing Link: {}@{} [{}]".format(user.ident, user.host, reason), to=None, prefix=None)
errorResponse = ErrorResponse()
|
<commit_before><commit_msg>Send ERROR when the user disconnects<commit_after>from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class ErrorResponse(ModuleData):
implements(IPlugin, IModuleData)
name = "errorResponse"
core = True
def actions(self):
return [("quit", 10, self.sendError)]
def sendError(self, user, reason):
user.sendMessage("ERROR", ":Closing Link: {}@{} [{}]".format(user.ident, user.host, reason), to=None, prefix=None)
errorResponse = ErrorResponse()
|
|
ac44a041e3e7808305b025e1087f48b7d4a9234a
|
tools/bitly/delete_bitly_blobs.py
|
tools/bitly/delete_bitly_blobs.py
|
#!/usr/bin/env python3
import argparse
import boto3
import os
from typing import List
from mediawords.util.log import create_logger
l = create_logger(__name__)
def delete_bitly_blobs(story_ids: List[int]):
session = boto3.Session(profile_name='mediacloud')
s3 = session.resource('s3')
bucket = s3.Bucket('mediacloud-bitly-processing-results')
chunk_size = 999 # up to 1000 objects to be deleted at once
story_ids_chunks = [story_ids[x:x + chunk_size] for x in range(0, len(story_ids), chunk_size)]
l.info('Deleting %d Bit.ly blobs, split into %d chunks...' % (len(story_ids), len(story_ids_chunks)))
for chunk in story_ids_chunks:
objects_to_delete = []
for stories_id in chunk:
objects_to_delete.append({'Key': 'json_blobs/%d' % stories_id})
bucket.delete_objects(
Delete={
'Objects': objects_to_delete,
}
)
l.info('Done deleting %d Bit.ly blobs.' % len(story_ids))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Delete Bit.ly raw results from S3.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input_file', type=str, required=True, help='Input file with Bit.ly story IDs.')
args = parser.parse_args()
if not os.path.isfile(args.input_file):
raise Exception('Input file "%s" does not exist.' % args.input_file)
bitly_story_ids = []
with open(args.input_file, 'r') as fh:
for line in fh:
line = line.rstrip("\n")
if line:
line = int(line)
bitly_story_ids.append(line)
delete_bitly_blobs(story_ids=bitly_story_ids)
|
Add script to delete Bit.ly raw results from S3
|
Add script to delete Bit.ly raw results from S3
|
Python
|
agpl-3.0
|
berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud
|
Add script to delete Bit.ly raw results from S3
|
#!/usr/bin/env python3
import argparse
import boto3
import os
from typing import List
from mediawords.util.log import create_logger
l = create_logger(__name__)
def delete_bitly_blobs(story_ids: List[int]):
session = boto3.Session(profile_name='mediacloud')
s3 = session.resource('s3')
bucket = s3.Bucket('mediacloud-bitly-processing-results')
chunk_size = 999 # up to 1000 objects to be deleted at once
story_ids_chunks = [story_ids[x:x + chunk_size] for x in range(0, len(story_ids), chunk_size)]
l.info('Deleting %d Bit.ly blobs, split into %d chunks...' % (len(story_ids), len(story_ids_chunks)))
for chunk in story_ids_chunks:
objects_to_delete = []
for stories_id in chunk:
objects_to_delete.append({'Key': 'json_blobs/%d' % stories_id})
bucket.delete_objects(
Delete={
'Objects': objects_to_delete,
}
)
l.info('Done deleting %d Bit.ly blobs.' % len(story_ids))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Delete Bit.ly raw results from S3.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input_file', type=str, required=True, help='Input file with Bit.ly story IDs.')
args = parser.parse_args()
if not os.path.isfile(args.input_file):
raise Exception('Input file "%s" does not exist.' % args.input_file)
bitly_story_ids = []
with open(args.input_file, 'r') as fh:
for line in fh:
line = line.rstrip("\n")
if line:
line = int(line)
bitly_story_ids.append(line)
delete_bitly_blobs(story_ids=bitly_story_ids)
|
<commit_before><commit_msg>Add script to delete Bit.ly raw results from S3<commit_after>
|
#!/usr/bin/env python3
import argparse
import boto3
import os
from typing import List
from mediawords.util.log import create_logger
l = create_logger(__name__)
def delete_bitly_blobs(story_ids: List[int]):
session = boto3.Session(profile_name='mediacloud')
s3 = session.resource('s3')
bucket = s3.Bucket('mediacloud-bitly-processing-results')
chunk_size = 999 # up to 1000 objects to be deleted at once
story_ids_chunks = [story_ids[x:x + chunk_size] for x in range(0, len(story_ids), chunk_size)]
l.info('Deleting %d Bit.ly blobs, split into %d chunks...' % (len(story_ids), len(story_ids_chunks)))
for chunk in story_ids_chunks:
objects_to_delete = []
for stories_id in chunk:
objects_to_delete.append({'Key': 'json_blobs/%d' % stories_id})
bucket.delete_objects(
Delete={
'Objects': objects_to_delete,
}
)
l.info('Done deleting %d Bit.ly blobs.' % len(story_ids))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Delete Bit.ly raw results from S3.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input_file', type=str, required=True, help='Input file with Bit.ly story IDs.')
args = parser.parse_args()
if not os.path.isfile(args.input_file):
raise Exception('Input file "%s" does not exist.' % args.input_file)
bitly_story_ids = []
with open(args.input_file, 'r') as fh:
for line in fh:
line = line.rstrip("\n")
if line:
line = int(line)
bitly_story_ids.append(line)
delete_bitly_blobs(story_ids=bitly_story_ids)
|
Add script to delete Bit.ly raw results from S3#!/usr/bin/env python3
import argparse
import boto3
import os
from typing import List
from mediawords.util.log import create_logger
l = create_logger(__name__)
def delete_bitly_blobs(story_ids: List[int]):
session = boto3.Session(profile_name='mediacloud')
s3 = session.resource('s3')
bucket = s3.Bucket('mediacloud-bitly-processing-results')
chunk_size = 999 # up to 1000 objects to be deleted at once
story_ids_chunks = [story_ids[x:x + chunk_size] for x in range(0, len(story_ids), chunk_size)]
l.info('Deleting %d Bit.ly blobs, split into %d chunks...' % (len(story_ids), len(story_ids_chunks)))
for chunk in story_ids_chunks:
objects_to_delete = []
for stories_id in chunk:
objects_to_delete.append({'Key': 'json_blobs/%d' % stories_id})
bucket.delete_objects(
Delete={
'Objects': objects_to_delete,
}
)
l.info('Done deleting %d Bit.ly blobs.' % len(story_ids))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Delete Bit.ly raw results from S3.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input_file', type=str, required=True, help='Input file with Bit.ly story IDs.')
args = parser.parse_args()
if not os.path.isfile(args.input_file):
raise Exception('Input file "%s" does not exist.' % args.input_file)
bitly_story_ids = []
with open(args.input_file, 'r') as fh:
for line in fh:
line = line.rstrip("\n")
if line:
line = int(line)
bitly_story_ids.append(line)
delete_bitly_blobs(story_ids=bitly_story_ids)
|
<commit_before><commit_msg>Add script to delete Bit.ly raw results from S3<commit_after>#!/usr/bin/env python3
import argparse
import boto3
import os
from typing import List
from mediawords.util.log import create_logger
l = create_logger(__name__)
def delete_bitly_blobs(story_ids: List[int]):
session = boto3.Session(profile_name='mediacloud')
s3 = session.resource('s3')
bucket = s3.Bucket('mediacloud-bitly-processing-results')
chunk_size = 999 # up to 1000 objects to be deleted at once
story_ids_chunks = [story_ids[x:x + chunk_size] for x in range(0, len(story_ids), chunk_size)]
l.info('Deleting %d Bit.ly blobs, split into %d chunks...' % (len(story_ids), len(story_ids_chunks)))
for chunk in story_ids_chunks:
objects_to_delete = []
for stories_id in chunk:
objects_to_delete.append({'Key': 'json_blobs/%d' % stories_id})
bucket.delete_objects(
Delete={
'Objects': objects_to_delete,
}
)
l.info('Done deleting %d Bit.ly blobs.' % len(story_ids))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Delete Bit.ly raw results from S3.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i', '--input_file', type=str, required=True, help='Input file with Bit.ly story IDs.')
args = parser.parse_args()
if not os.path.isfile(args.input_file):
raise Exception('Input file "%s" does not exist.' % args.input_file)
bitly_story_ids = []
with open(args.input_file, 'r') as fh:
for line in fh:
line = line.rstrip("\n")
if line:
line = int(line)
bitly_story_ids.append(line)
delete_bitly_blobs(story_ids=bitly_story_ids)
|
|
37c0257fcc5e65b67fabfd17c2bf884ad8fe03e1
|
recipe-server/normandy/recipes/migrations/0038_remove_invalid_signatures.py
|
recipe-server/normandy/recipes/migrations/0038_remove_invalid_signatures.py
|
"""
Removes signatures, so they can be easily recreated during deployment.
This migration is intended to be used between "eras" of signatures. As
the serialization format of recipes changes, the signatures need to
also change. This could be handled automatically, but it is easier to
deploy if we just remove everything in a migration, and allow the
normal processes to regenerate the signatures.
"""
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2017-01-27 00:03
from __future__ import unicode_literals
from django.db import migrations
def remove_signatures(apps, schema_editor):
Recipe = apps.get_model('recipes', 'Recipe')
Signature = apps.get_model('recipes', 'Signature')
for recipe in Recipe.objects.exclude(signature=None):
sig = recipe.signature
recipe.signature = None
recipe.save()
sig.delete()
for sig in Signature.objects.all():
sig.delete()
class Migration(migrations.Migration):
dependencies = [
('recipes', '0037_auto_20170113_0627'),
]
operations = [
# This function as both a forward and reverse migration
migrations.RunPython(remove_signatures, remove_signatures),
]
|
Add migration to reset signatures
|
recipe-server: Add migration to reset signatures
Fixes #452
|
Python
|
mpl-2.0
|
Osmose/normandy,mozilla/normandy,mozilla/normandy,Osmose/normandy,Osmose/normandy,mozilla/normandy,mozilla/normandy,Osmose/normandy
|
recipe-server: Add migration to reset signatures
Fixes #452
|
"""
Removes signatures, so they can be easily recreated during deployment.
This migration is intended to be used between "eras" of signatures. As
the serialization format of recipes changes, the signatures need to
also change. This could be handled automatically, but it is easier to
deploy if we just remove everything in a migration, and allow the
normal processes to regenerate the signatures.
"""
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2017-01-27 00:03
from __future__ import unicode_literals
from django.db import migrations
def remove_signatures(apps, schema_editor):
Recipe = apps.get_model('recipes', 'Recipe')
Signature = apps.get_model('recipes', 'Signature')
for recipe in Recipe.objects.exclude(signature=None):
sig = recipe.signature
recipe.signature = None
recipe.save()
sig.delete()
for sig in Signature.objects.all():
sig.delete()
class Migration(migrations.Migration):
dependencies = [
('recipes', '0037_auto_20170113_0627'),
]
operations = [
# This function as both a forward and reverse migration
migrations.RunPython(remove_signatures, remove_signatures),
]
|
<commit_before><commit_msg>recipe-server: Add migration to reset signatures
Fixes #452<commit_after>
|
"""
Removes signatures, so they can be easily recreated during deployment.
This migration is intended to be used between "eras" of signatures. As
the serialization format of recipes changes, the signatures need to
also change. This could be handled automatically, but it is easier to
deploy if we just remove everything in a migration, and allow the
normal processes to regenerate the signatures.
"""
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2017-01-27 00:03
from __future__ import unicode_literals
from django.db import migrations
def remove_signatures(apps, schema_editor):
Recipe = apps.get_model('recipes', 'Recipe')
Signature = apps.get_model('recipes', 'Signature')
for recipe in Recipe.objects.exclude(signature=None):
sig = recipe.signature
recipe.signature = None
recipe.save()
sig.delete()
for sig in Signature.objects.all():
sig.delete()
class Migration(migrations.Migration):
dependencies = [
('recipes', '0037_auto_20170113_0627'),
]
operations = [
# This function as both a forward and reverse migration
migrations.RunPython(remove_signatures, remove_signatures),
]
|
recipe-server: Add migration to reset signatures
Fixes #452"""
Removes signatures, so they can be easily recreated during deployment.
This migration is intended to be used between "eras" of signatures. As
the serialization format of recipes changes, the signatures need to
also change. This could be handled automatically, but it is easier to
deploy if we just remove everything in a migration, and allow the
normal processes to regenerate the signatures.
"""
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2017-01-27 00:03
from __future__ import unicode_literals
from django.db import migrations
def remove_signatures(apps, schema_editor):
Recipe = apps.get_model('recipes', 'Recipe')
Signature = apps.get_model('recipes', 'Signature')
for recipe in Recipe.objects.exclude(signature=None):
sig = recipe.signature
recipe.signature = None
recipe.save()
sig.delete()
for sig in Signature.objects.all():
sig.delete()
class Migration(migrations.Migration):
dependencies = [
('recipes', '0037_auto_20170113_0627'),
]
operations = [
# This function as both a forward and reverse migration
migrations.RunPython(remove_signatures, remove_signatures),
]
|
<commit_before><commit_msg>recipe-server: Add migration to reset signatures
Fixes #452<commit_after>"""
Removes signatures, so they can be easily recreated during deployment.
This migration is intended to be used between "eras" of signatures. As
the serialization format of recipes changes, the signatures need to
also change. This could be handled automatically, but it is easier to
deploy if we just remove everything in a migration, and allow the
normal processes to regenerate the signatures.
"""
# -*- coding: utf-8 -*-
# Generated by Django 1.9.11 on 2017-01-27 00:03
from __future__ import unicode_literals
from django.db import migrations
def remove_signatures(apps, schema_editor):
Recipe = apps.get_model('recipes', 'Recipe')
Signature = apps.get_model('recipes', 'Signature')
for recipe in Recipe.objects.exclude(signature=None):
sig = recipe.signature
recipe.signature = None
recipe.save()
sig.delete()
for sig in Signature.objects.all():
sig.delete()
class Migration(migrations.Migration):
dependencies = [
('recipes', '0037_auto_20170113_0627'),
]
operations = [
# This function as both a forward and reverse migration
migrations.RunPython(remove_signatures, remove_signatures),
]
|
|
c87f42579826cf236953bc955d15a9cc98c67d05
|
applications/migrations/0029_application_proposed_development_description.py
|
applications/migrations/0029_application_proposed_development_description.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-30 05:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('applications', '0028_auto_20170329_1445'),
]
operations = [
migrations.AddField(
model_name='application',
name='proposed_development_description',
field=models.TextField(blank=True, null=True),
),
]
|
Add Migration File this time.
|
Add Migration File this time.
|
Python
|
apache-2.0
|
ropable/statdev,xzzy/statdev,xzzy/statdev,brendanc-dpaw/statdev,parksandwildlife/statdev,ropable/statdev,parksandwildlife/statdev,xzzy/statdev,parksandwildlife/statdev,brendanc-dpaw/statdev
|
Add Migration File this time.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-30 05:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('applications', '0028_auto_20170329_1445'),
]
operations = [
migrations.AddField(
model_name='application',
name='proposed_development_description',
field=models.TextField(blank=True, null=True),
),
]
|
<commit_before><commit_msg>Add Migration File this time.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-30 05:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('applications', '0028_auto_20170329_1445'),
]
operations = [
migrations.AddField(
model_name='application',
name='proposed_development_description',
field=models.TextField(blank=True, null=True),
),
]
|
Add Migration File this time.# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-30 05:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('applications', '0028_auto_20170329_1445'),
]
operations = [
migrations.AddField(
model_name='application',
name='proposed_development_description',
field=models.TextField(blank=True, null=True),
),
]
|
<commit_before><commit_msg>Add Migration File this time.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-30 05:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('applications', '0028_auto_20170329_1445'),
]
operations = [
migrations.AddField(
model_name='application',
name='proposed_development_description',
field=models.TextField(blank=True, null=True),
),
]
|
|
0a610a44f0d20170ba9c3e6f9ec4eafaac937be1
|
test/unit/filterer/test_pattern.py
|
test/unit/filterer/test_pattern.py
|
# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import pytest
from bark.log import Log
from bark.filterer.pattern import Pattern
def test_missing_key_passes():
'''Test log record with missing key passes.'''
log = Log()
filterer = Pattern('bark\.test\..*')
assert filterer.filter(log) is False
def test_non_string_key_fails():
'''Test log record with non-string key fails.'''
log = Log(name=None)
filterer = Pattern('bark\.test\..*')
assert filterer.filter(log) is True
def test_include_mode():
'''Test only logs with matching value pass when mode is INCLUDE.'''
log = Log(name='bark.test.one')
filterer = Pattern('bark\.test\..*', mode=Pattern.INCLUDE)
assert filterer.filter(log) is False
log = Log(name='bark.other.one')
assert filterer.filter(log) is True
def test_exclude_mode():
'''Test only logs with matching value fail when mode is EXCLUDE.'''
log = Log(name='bark.test.one')
filterer = Pattern('bark\.test\..*', mode=Pattern.EXCLUDE)
assert filterer.filter(log) is True
log = Log(name='bark.other.one')
assert filterer.filter(log) is False
def test_different_key():
'''Test using key other than name.'''
log = Log()
filterer = Pattern('A message', key='message')
assert filterer.filter(log) is False
log = Log(message='A message')
filterer = Pattern('A message', key='message')
assert filterer.filter(log) is False
log = Log(message='Another message')
filterer = Pattern('A message', key='message')
assert filterer.filter(log) is True
log = Log(message='A message')
filterer = Pattern('A message', key='message', mode=Pattern.EXCLUDE)
assert filterer.filter(log) is True
|
Add unit test for Pattern filterer.
|
Add unit test for Pattern filterer.
|
Python
|
apache-2.0
|
4degrees/mill,4degrees/sawmill
|
Add unit test for Pattern filterer.
|
# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import pytest
from bark.log import Log
from bark.filterer.pattern import Pattern
def test_missing_key_passes():
'''Test log record with missing key passes.'''
log = Log()
filterer = Pattern('bark\.test\..*')
assert filterer.filter(log) is False
def test_non_string_key_fails():
'''Test log record with non-string key fails.'''
log = Log(name=None)
filterer = Pattern('bark\.test\..*')
assert filterer.filter(log) is True
def test_include_mode():
'''Test only logs with matching value pass when mode is INCLUDE.'''
log = Log(name='bark.test.one')
filterer = Pattern('bark\.test\..*', mode=Pattern.INCLUDE)
assert filterer.filter(log) is False
log = Log(name='bark.other.one')
assert filterer.filter(log) is True
def test_exclude_mode():
'''Test only logs with matching value fail when mode is EXCLUDE.'''
log = Log(name='bark.test.one')
filterer = Pattern('bark\.test\..*', mode=Pattern.EXCLUDE)
assert filterer.filter(log) is True
log = Log(name='bark.other.one')
assert filterer.filter(log) is False
def test_different_key():
'''Test using key other than name.'''
log = Log()
filterer = Pattern('A message', key='message')
assert filterer.filter(log) is False
log = Log(message='A message')
filterer = Pattern('A message', key='message')
assert filterer.filter(log) is False
log = Log(message='Another message')
filterer = Pattern('A message', key='message')
assert filterer.filter(log) is True
log = Log(message='A message')
filterer = Pattern('A message', key='message', mode=Pattern.EXCLUDE)
assert filterer.filter(log) is True
|
<commit_before><commit_msg>Add unit test for Pattern filterer.<commit_after>
|
# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import pytest
from bark.log import Log
from bark.filterer.pattern import Pattern
def test_missing_key_passes():
'''Test log record with missing key passes.'''
log = Log()
filterer = Pattern('bark\.test\..*')
assert filterer.filter(log) is False
def test_non_string_key_fails():
'''Test log record with non-string key fails.'''
log = Log(name=None)
filterer = Pattern('bark\.test\..*')
assert filterer.filter(log) is True
def test_include_mode():
'''Test only logs with matching value pass when mode is INCLUDE.'''
log = Log(name='bark.test.one')
filterer = Pattern('bark\.test\..*', mode=Pattern.INCLUDE)
assert filterer.filter(log) is False
log = Log(name='bark.other.one')
assert filterer.filter(log) is True
def test_exclude_mode():
'''Test only logs with matching value fail when mode is EXCLUDE.'''
log = Log(name='bark.test.one')
filterer = Pattern('bark\.test\..*', mode=Pattern.EXCLUDE)
assert filterer.filter(log) is True
log = Log(name='bark.other.one')
assert filterer.filter(log) is False
def test_different_key():
'''Test using key other than name.'''
log = Log()
filterer = Pattern('A message', key='message')
assert filterer.filter(log) is False
log = Log(message='A message')
filterer = Pattern('A message', key='message')
assert filterer.filter(log) is False
log = Log(message='Another message')
filterer = Pattern('A message', key='message')
assert filterer.filter(log) is True
log = Log(message='A message')
filterer = Pattern('A message', key='message', mode=Pattern.EXCLUDE)
assert filterer.filter(log) is True
|
Add unit test for Pattern filterer.# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import pytest
from bark.log import Log
from bark.filterer.pattern import Pattern
def test_missing_key_passes():
'''Test log record with missing key passes.'''
log = Log()
filterer = Pattern('bark\.test\..*')
assert filterer.filter(log) is False
def test_non_string_key_fails():
'''Test log record with non-string key fails.'''
log = Log(name=None)
filterer = Pattern('bark\.test\..*')
assert filterer.filter(log) is True
def test_include_mode():
'''Test only logs with matching value pass when mode is INCLUDE.'''
log = Log(name='bark.test.one')
filterer = Pattern('bark\.test\..*', mode=Pattern.INCLUDE)
assert filterer.filter(log) is False
log = Log(name='bark.other.one')
assert filterer.filter(log) is True
def test_exclude_mode():
'''Test only logs with matching value fail when mode is EXCLUDE.'''
log = Log(name='bark.test.one')
filterer = Pattern('bark\.test\..*', mode=Pattern.EXCLUDE)
assert filterer.filter(log) is True
log = Log(name='bark.other.one')
assert filterer.filter(log) is False
def test_different_key():
'''Test using key other than name.'''
log = Log()
filterer = Pattern('A message', key='message')
assert filterer.filter(log) is False
log = Log(message='A message')
filterer = Pattern('A message', key='message')
assert filterer.filter(log) is False
log = Log(message='Another message')
filterer = Pattern('A message', key='message')
assert filterer.filter(log) is True
log = Log(message='A message')
filterer = Pattern('A message', key='message', mode=Pattern.EXCLUDE)
assert filterer.filter(log) is True
|
<commit_before><commit_msg>Add unit test for Pattern filterer.<commit_after># :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import pytest
from bark.log import Log
from bark.filterer.pattern import Pattern
def test_missing_key_passes():
'''Test log record with missing key passes.'''
log = Log()
filterer = Pattern('bark\.test\..*')
assert filterer.filter(log) is False
def test_non_string_key_fails():
'''Test log record with non-string key fails.'''
log = Log(name=None)
filterer = Pattern('bark\.test\..*')
assert filterer.filter(log) is True
def test_include_mode():
'''Test only logs with matching value pass when mode is INCLUDE.'''
log = Log(name='bark.test.one')
filterer = Pattern('bark\.test\..*', mode=Pattern.INCLUDE)
assert filterer.filter(log) is False
log = Log(name='bark.other.one')
assert filterer.filter(log) is True
def test_exclude_mode():
'''Test only logs with matching value fail when mode is EXCLUDE.'''
log = Log(name='bark.test.one')
filterer = Pattern('bark\.test\..*', mode=Pattern.EXCLUDE)
assert filterer.filter(log) is True
log = Log(name='bark.other.one')
assert filterer.filter(log) is False
def test_different_key():
'''Test using key other than name.'''
log = Log()
filterer = Pattern('A message', key='message')
assert filterer.filter(log) is False
log = Log(message='A message')
filterer = Pattern('A message', key='message')
assert filterer.filter(log) is False
log = Log(message='Another message')
filterer = Pattern('A message', key='message')
assert filterer.filter(log) is True
log = Log(message='A message')
filterer = Pattern('A message', key='message', mode=Pattern.EXCLUDE)
assert filterer.filter(log) is True
|
|
f09ee3772d6e15a104af284ed6864005cf8450ef
|
ch11/radix_sort8.py
|
ch11/radix_sort8.py
|
"""
Listing 11.4: An eight-element radix sort
"""
from io import open
import numpy as np
import pyopencl as cl
import utility
NUM_SHORTS = 8
kernel_src = '''
__kernel void radix_sort8(__global ushort8 *global_data) {
typedef union {
ushort8 vec;
ushort array[8];
} vec_array;
uint one_count, zero_count;
uint cmp_value = 1;
vec_array mask, ones, data;
data.vec = global_data[0];
/* Rearrange elements according to bits */
for(int i=0; i<3; i++) {
zero_count = 0;
one_count = 0;
/* Iterate through each element in the input vector */
for(int j = 0; j < 8; j++) {
if(data.array[j] & cmp_value)
/* Place element in ones vector */
ones.array[one_count++] = data.array[j];
else {
/* Increment number of elements with zero */
mask.array[zero_count++] = j;
}
}
/* Create sorted vector */
for(int j = zero_count; j < 8; j++)
mask.array[j] = 8 - zero_count + j;
data.vec = shuffle2(data.vec, ones.vec, mask.vec);
cmp_value <<= 1;
}
global_data[0] = data.vec;
}
'''
# Get device and context, create command queue and program
dev = utility.get_default_device()
context = cl.Context(devices=[dev], properties=None, dev_type=None, cache_dir=None)
queue = cl.CommandQueue(context, dev, properties=None)
# Build program in the specified context using the kernel source code
prog = cl.Program(context, kernel_src)
try:
prog.build(options=['-Werror'], devices=[dev], cache_dir=None)
except:
print('Build log:')
print(prog.get_build_info(dev, cl.program_build_info.LOG))
raise
# Data and device buffers
data = np.arange(start=0, stop=NUM_SHORTS, dtype=np.uint16)
np.random.shuffle(data)
print('Input: ' + str(data))
mf = cl.mem_flags
data_buffer = cl.Buffer(context, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=data)
# Execute kernel
# radix_sort8(__global ushort8 *global_data)
kernel = prog.radix_sort8
kernel.set_arg(0, data_buffer)
cl.enqueue_task(queue, kernel)
cl.enqueue_copy(queue, dest=data, src=data_buffer, is_blocking=True)
print('Output: ' + str(data))
|
Add example from listing 11.4
|
Add example from listing 11.4
|
Python
|
mit
|
oysstu/pyopencl-in-action
|
Add example from listing 11.4
|
"""
Listing 11.4: An eight-element radix sort
"""
from io import open
import numpy as np
import pyopencl as cl
import utility
NUM_SHORTS = 8
kernel_src = '''
__kernel void radix_sort8(__global ushort8 *global_data) {
typedef union {
ushort8 vec;
ushort array[8];
} vec_array;
uint one_count, zero_count;
uint cmp_value = 1;
vec_array mask, ones, data;
data.vec = global_data[0];
/* Rearrange elements according to bits */
for(int i=0; i<3; i++) {
zero_count = 0;
one_count = 0;
/* Iterate through each element in the input vector */
for(int j = 0; j < 8; j++) {
if(data.array[j] & cmp_value)
/* Place element in ones vector */
ones.array[one_count++] = data.array[j];
else {
/* Increment number of elements with zero */
mask.array[zero_count++] = j;
}
}
/* Create sorted vector */
for(int j = zero_count; j < 8; j++)
mask.array[j] = 8 - zero_count + j;
data.vec = shuffle2(data.vec, ones.vec, mask.vec);
cmp_value <<= 1;
}
global_data[0] = data.vec;
}
'''
# Get device and context, create command queue and program
dev = utility.get_default_device()
context = cl.Context(devices=[dev], properties=None, dev_type=None, cache_dir=None)
queue = cl.CommandQueue(context, dev, properties=None)
# Build program in the specified context using the kernel source code
prog = cl.Program(context, kernel_src)
try:
prog.build(options=['-Werror'], devices=[dev], cache_dir=None)
except:
print('Build log:')
print(prog.get_build_info(dev, cl.program_build_info.LOG))
raise
# Data and device buffers
data = np.arange(start=0, stop=NUM_SHORTS, dtype=np.uint16)
np.random.shuffle(data)
print('Input: ' + str(data))
mf = cl.mem_flags
data_buffer = cl.Buffer(context, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=data)
# Execute kernel
# radix_sort8(__global ushort8 *global_data)
kernel = prog.radix_sort8
kernel.set_arg(0, data_buffer)
cl.enqueue_task(queue, kernel)
cl.enqueue_copy(queue, dest=data, src=data_buffer, is_blocking=True)
print('Output: ' + str(data))
|
<commit_before><commit_msg>Add example from listing 11.4<commit_after>
|
"""
Listing 11.4: An eight-element radix sort
"""
from io import open
import numpy as np
import pyopencl as cl
import utility
NUM_SHORTS = 8
kernel_src = '''
__kernel void radix_sort8(__global ushort8 *global_data) {
typedef union {
ushort8 vec;
ushort array[8];
} vec_array;
uint one_count, zero_count;
uint cmp_value = 1;
vec_array mask, ones, data;
data.vec = global_data[0];
/* Rearrange elements according to bits */
for(int i=0; i<3; i++) {
zero_count = 0;
one_count = 0;
/* Iterate through each element in the input vector */
for(int j = 0; j < 8; j++) {
if(data.array[j] & cmp_value)
/* Place element in ones vector */
ones.array[one_count++] = data.array[j];
else {
/* Increment number of elements with zero */
mask.array[zero_count++] = j;
}
}
/* Create sorted vector */
for(int j = zero_count; j < 8; j++)
mask.array[j] = 8 - zero_count + j;
data.vec = shuffle2(data.vec, ones.vec, mask.vec);
cmp_value <<= 1;
}
global_data[0] = data.vec;
}
'''
# Get device and context, create command queue and program
dev = utility.get_default_device()
context = cl.Context(devices=[dev], properties=None, dev_type=None, cache_dir=None)
queue = cl.CommandQueue(context, dev, properties=None)
# Build program in the specified context using the kernel source code
prog = cl.Program(context, kernel_src)
try:
prog.build(options=['-Werror'], devices=[dev], cache_dir=None)
except:
print('Build log:')
print(prog.get_build_info(dev, cl.program_build_info.LOG))
raise
# Data and device buffers
data = np.arange(start=0, stop=NUM_SHORTS, dtype=np.uint16)
np.random.shuffle(data)
print('Input: ' + str(data))
mf = cl.mem_flags
data_buffer = cl.Buffer(context, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=data)
# Execute kernel
# radix_sort8(__global ushort8 *global_data)
kernel = prog.radix_sort8
kernel.set_arg(0, data_buffer)
cl.enqueue_task(queue, kernel)
cl.enqueue_copy(queue, dest=data, src=data_buffer, is_blocking=True)
print('Output: ' + str(data))
|
Add example from listing 11.4"""
Listing 11.4: An eight-element radix sort
"""
from io import open
import numpy as np
import pyopencl as cl
import utility
NUM_SHORTS = 8
kernel_src = '''
__kernel void radix_sort8(__global ushort8 *global_data) {
typedef union {
ushort8 vec;
ushort array[8];
} vec_array;
uint one_count, zero_count;
uint cmp_value = 1;
vec_array mask, ones, data;
data.vec = global_data[0];
/* Rearrange elements according to bits */
for(int i=0; i<3; i++) {
zero_count = 0;
one_count = 0;
/* Iterate through each element in the input vector */
for(int j = 0; j < 8; j++) {
if(data.array[j] & cmp_value)
/* Place element in ones vector */
ones.array[one_count++] = data.array[j];
else {
/* Increment number of elements with zero */
mask.array[zero_count++] = j;
}
}
/* Create sorted vector */
for(int j = zero_count; j < 8; j++)
mask.array[j] = 8 - zero_count + j;
data.vec = shuffle2(data.vec, ones.vec, mask.vec);
cmp_value <<= 1;
}
global_data[0] = data.vec;
}
'''
# Get device and context, create command queue and program
dev = utility.get_default_device()
context = cl.Context(devices=[dev], properties=None, dev_type=None, cache_dir=None)
queue = cl.CommandQueue(context, dev, properties=None)
# Build program in the specified context using the kernel source code
prog = cl.Program(context, kernel_src)
try:
prog.build(options=['-Werror'], devices=[dev], cache_dir=None)
except:
print('Build log:')
print(prog.get_build_info(dev, cl.program_build_info.LOG))
raise
# Data and device buffers
data = np.arange(start=0, stop=NUM_SHORTS, dtype=np.uint16)
np.random.shuffle(data)
print('Input: ' + str(data))
mf = cl.mem_flags
data_buffer = cl.Buffer(context, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=data)
# Execute kernel
# radix_sort8(__global ushort8 *global_data)
kernel = prog.radix_sort8
kernel.set_arg(0, data_buffer)
cl.enqueue_task(queue, kernel)
cl.enqueue_copy(queue, dest=data, src=data_buffer, is_blocking=True)
print('Output: ' + str(data))
|
<commit_before><commit_msg>Add example from listing 11.4<commit_after>"""
Listing 11.4: An eight-element radix sort
"""
from io import open
import numpy as np
import pyopencl as cl
import utility
NUM_SHORTS = 8
kernel_src = '''
__kernel void radix_sort8(__global ushort8 *global_data) {
typedef union {
ushort8 vec;
ushort array[8];
} vec_array;
uint one_count, zero_count;
uint cmp_value = 1;
vec_array mask, ones, data;
data.vec = global_data[0];
/* Rearrange elements according to bits */
for(int i=0; i<3; i++) {
zero_count = 0;
one_count = 0;
/* Iterate through each element in the input vector */
for(int j = 0; j < 8; j++) {
if(data.array[j] & cmp_value)
/* Place element in ones vector */
ones.array[one_count++] = data.array[j];
else {
/* Increment number of elements with zero */
mask.array[zero_count++] = j;
}
}
/* Create sorted vector */
for(int j = zero_count; j < 8; j++)
mask.array[j] = 8 - zero_count + j;
data.vec = shuffle2(data.vec, ones.vec, mask.vec);
cmp_value <<= 1;
}
global_data[0] = data.vec;
}
'''
# Get device and context, create command queue and program
dev = utility.get_default_device()
context = cl.Context(devices=[dev], properties=None, dev_type=None, cache_dir=None)
queue = cl.CommandQueue(context, dev, properties=None)
# Build program in the specified context using the kernel source code
prog = cl.Program(context, kernel_src)
try:
prog.build(options=['-Werror'], devices=[dev], cache_dir=None)
except:
print('Build log:')
print(prog.get_build_info(dev, cl.program_build_info.LOG))
raise
# Data and device buffers
data = np.arange(start=0, stop=NUM_SHORTS, dtype=np.uint16)
np.random.shuffle(data)
print('Input: ' + str(data))
mf = cl.mem_flags
data_buffer = cl.Buffer(context, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=data)
# Execute kernel
# radix_sort8(__global ushort8 *global_data)
kernel = prog.radix_sort8
kernel.set_arg(0, data_buffer)
cl.enqueue_task(queue, kernel)
cl.enqueue_copy(queue, dest=data, src=data_buffer, is_blocking=True)
print('Output: ' + str(data))
|
|
e7faa99d9816745338ada38d1a7d974bf3a739ae
|
s5v3.py
|
s5v3.py
|
from s5v2 import *
from prettytable import PrettyTable
def my_table(): # no arguments are passed in, which seems a bit weird. We're hard-coding a function that only does one thing.
x = PrettyTable(['Style', 'Average Price']) # setup a new pretty table list and give and give it two list items
x.add_row(['Print', pretty_average(print_ties)]) # add_row is a built-in function of prettytable. We're going to add a row and assign it the vales of 'Print' and the average price of all print ties
x.add_row(['Solid', pretty_average(solid_ties)])
x.add_row(['Paisley', pretty_average(paisley_ties)])
x.add_row(['Striped', pretty_average(striped_ties)])
x.add_row(['Gucci', pretty_average(gucci_ties)])
print(x) # print the table
def pretty_average(my_number):
pretty_avg = "${:03.2f}".format(find_average(my_number)) # assign a variable pretty_avg to the average of my number and then use the format specification mini-language to add three decimal places before the decimal point and 2 after the decimal point. the FSML says that 'f' is a fixed point and displays the number as a fixed-point number. That's like floating point number, but with a fixed amount of float? As far as what kind of variable it is (string, integer, float / decimal) it's still a decimal or float, just a fixed amount of float. See I was calling this string formatting, but really it's format specification mini-language and it doesn't automatically convert the result to a string (like I originally thought).
return pretty_avg
# my_table() # run the function
|
Create pretty table of tie averages + function for pretty averages
|
Create pretty table of tie averages + function for pretty averages
|
Python
|
mit
|
alexmilesyounger/ds_basics
|
Create pretty table of tie averages + function for pretty averages
|
from s5v2 import *
from prettytable import PrettyTable
def my_table(): # no arguments are passed in, which seems a bit weird. We're hard-coding a function that only does one thing.
x = PrettyTable(['Style', 'Average Price']) # setup a new pretty table list and give and give it two list items
x.add_row(['Print', pretty_average(print_ties)]) # add_row is a built-in function of prettytable. We're going to add a row and assign it the vales of 'Print' and the average price of all print ties
x.add_row(['Solid', pretty_average(solid_ties)])
x.add_row(['Paisley', pretty_average(paisley_ties)])
x.add_row(['Striped', pretty_average(striped_ties)])
x.add_row(['Gucci', pretty_average(gucci_ties)])
print(x) # print the table
def pretty_average(my_number):
pretty_avg = "${:03.2f}".format(find_average(my_number)) # assign a variable pretty_avg to the average of my number and then use the format specification mini-language to add three decimal places before the decimal point and 2 after the decimal point. the FSML says that 'f' is a fixed point and displays the number as a fixed-point number. That's like floating point number, but with a fixed amount of float? As far as what kind of variable it is (string, integer, float / decimal) it's still a decimal or float, just a fixed amount of float. See I was calling this string formatting, but really it's format specification mini-language and it doesn't automatically convert the result to a string (like I originally thought).
return pretty_avg
# my_table() # run the function
|
<commit_before><commit_msg>Create pretty table of tie averages + function for pretty averages<commit_after>
|
from s5v2 import *
from prettytable import PrettyTable
def my_table(): # no arguments are passed in, which seems a bit weird. We're hard-coding a function that only does one thing.
x = PrettyTable(['Style', 'Average Price']) # setup a new pretty table list and give and give it two list items
x.add_row(['Print', pretty_average(print_ties)]) # add_row is a built-in function of prettytable. We're going to add a row and assign it the vales of 'Print' and the average price of all print ties
x.add_row(['Solid', pretty_average(solid_ties)])
x.add_row(['Paisley', pretty_average(paisley_ties)])
x.add_row(['Striped', pretty_average(striped_ties)])
x.add_row(['Gucci', pretty_average(gucci_ties)])
print(x) # print the table
def pretty_average(my_number):
pretty_avg = "${:03.2f}".format(find_average(my_number)) # assign a variable pretty_avg to the average of my number and then use the format specification mini-language to add three decimal places before the decimal point and 2 after the decimal point. the FSML says that 'f' is a fixed point and displays the number as a fixed-point number. That's like floating point number, but with a fixed amount of float? As far as what kind of variable it is (string, integer, float / decimal) it's still a decimal or float, just a fixed amount of float. See I was calling this string formatting, but really it's format specification mini-language and it doesn't automatically convert the result to a string (like I originally thought).
return pretty_avg
# my_table() # run the function
|
Create pretty table of tie averages + function for pretty averagesfrom s5v2 import *
from prettytable import PrettyTable
def my_table(): # no arguments are passed in, which seems a bit weird. We're hard-coding a function that only does one thing.
x = PrettyTable(['Style', 'Average Price']) # setup a new pretty table list and give and give it two list items
x.add_row(['Print', pretty_average(print_ties)]) # add_row is a built-in function of prettytable. We're going to add a row and assign it the vales of 'Print' and the average price of all print ties
x.add_row(['Solid', pretty_average(solid_ties)])
x.add_row(['Paisley', pretty_average(paisley_ties)])
x.add_row(['Striped', pretty_average(striped_ties)])
x.add_row(['Gucci', pretty_average(gucci_ties)])
print(x) # print the table
def pretty_average(my_number):
pretty_avg = "${:03.2f}".format(find_average(my_number)) # assign a variable pretty_avg to the average of my number and then use the format specification mini-language to add three decimal places before the decimal point and 2 after the decimal point. the FSML says that 'f' is a fixed point and displays the number as a fixed-point number. That's like floating point number, but with a fixed amount of float? As far as what kind of variable it is (string, integer, float / decimal) it's still a decimal or float, just a fixed amount of float. See I was calling this string formatting, but really it's format specification mini-language and it doesn't automatically convert the result to a string (like I originally thought).
return pretty_avg
# my_table() # run the function
|
<commit_before><commit_msg>Create pretty table of tie averages + function for pretty averages<commit_after>from s5v2 import *
from prettytable import PrettyTable
def my_table(): # no arguments are passed in, which seems a bit weird. We're hard-coding a function that only does one thing.
x = PrettyTable(['Style', 'Average Price']) # setup a new pretty table list and give and give it two list items
x.add_row(['Print', pretty_average(print_ties)]) # add_row is a built-in function of prettytable. We're going to add a row and assign it the vales of 'Print' and the average price of all print ties
x.add_row(['Solid', pretty_average(solid_ties)])
x.add_row(['Paisley', pretty_average(paisley_ties)])
x.add_row(['Striped', pretty_average(striped_ties)])
x.add_row(['Gucci', pretty_average(gucci_ties)])
print(x) # print the table
def pretty_average(my_number):
pretty_avg = "${:03.2f}".format(find_average(my_number)) # assign a variable pretty_avg to the average of my number and then use the format specification mini-language to add three decimal places before the decimal point and 2 after the decimal point. the FSML says that 'f' is a fixed point and displays the number as a fixed-point number. That's like floating point number, but with a fixed amount of float? As far as what kind of variable it is (string, integer, float / decimal) it's still a decimal or float, just a fixed amount of float. See I was calling this string formatting, but really it's format specification mini-language and it doesn't automatically convert the result to a string (like I originally thought).
return pretty_avg
# my_table() # run the function
|
|
fe5100f5d13ed7461619c8beff791d40306f83ff
|
addons/document/migrations/8.0.2.1/pre-migration.py
|
addons/document/migrations/8.0.2.1/pre-migration.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.openupgrade import openupgrade
@openupgrade.migrate()
def migrate(cr, version):
# Drop view that inhibits changing field types. It will be recreated BTW
cr.execute('drop view if exists report_document_user cascade')
|
Remove annoying SQL view that prevents some operations
|
[IMP] document: Remove annoying SQL view that prevents some operations
|
Python
|
agpl-3.0
|
blaggacao/OpenUpgrade,kirca/OpenUpgrade,csrocha/OpenUpgrade,bwrsandman/OpenUpgrade,Endika/OpenUpgrade,grap/OpenUpgrade,bwrsandman/OpenUpgrade,pedrobaeza/OpenUpgrade,sebalix/OpenUpgrade,mvaled/OpenUpgrade,pedrobaeza/OpenUpgrade,OpenUpgrade/OpenUpgrade,sebalix/OpenUpgrade,OpenUpgrade/OpenUpgrade,mvaled/OpenUpgrade,blaggacao/OpenUpgrade,mvaled/OpenUpgrade,sebalix/OpenUpgrade,kirca/OpenUpgrade,grap/OpenUpgrade,csrocha/OpenUpgrade,blaggacao/OpenUpgrade,csrocha/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,Endika/OpenUpgrade,grap/OpenUpgrade,hifly/OpenUpgrade,Endika/OpenUpgrade,OpenUpgrade/OpenUpgrade,grap/OpenUpgrade,blaggacao/OpenUpgrade,OpenUpgrade/OpenUpgrade,OpenUpgrade/OpenUpgrade,pedrobaeza/OpenUpgrade,csrocha/OpenUpgrade,pedrobaeza/OpenUpgrade,kirca/OpenUpgrade,damdam-s/OpenUpgrade,hifly/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,damdam-s/OpenUpgrade,sebalix/OpenUpgrade,0k/OpenUpgrade,grap/OpenUpgrade,grap/OpenUpgrade,0k/OpenUpgrade,damdam-s/OpenUpgrade,kirca/OpenUpgrade,pedrobaeza/OpenUpgrade,pedrobaeza/OpenUpgrade,damdam-s/OpenUpgrade,blaggacao/OpenUpgrade,hifly/OpenUpgrade,OpenUpgrade/OpenUpgrade,hifly/OpenUpgrade,0k/OpenUpgrade,OpenUpgrade/OpenUpgrade,csrocha/OpenUpgrade,sebalix/OpenUpgrade,csrocha/OpenUpgrade,pedrobaeza/OpenUpgrade,Endika/OpenUpgrade,grap/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,Endika/OpenUpgrade,bwrsandman/OpenUpgrade,csrocha/OpenUpgrade,sebalix/OpenUpgrade,hifly/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,blaggacao/OpenUpgrade,mvaled/OpenUpgrade,sebalix/OpenUpgrade,Endika/OpenUpgrade,blaggacao/OpenUpgrade,damdam-s/OpenUpgrade,damdam-s/OpenUpgrade,hifly/OpenUpgrade,0k/OpenUpgrade,Endika/OpenUpgrade,kirca/OpenUpgrade,0k/OpenUpgrade,bwrsandman/OpenUpgrade,OpenUpgrade-dev/OpenUpgrade,0k/OpenUpgrade,damdam-s/OpenUpgrade,mvaled/OpenUpgrade,mvaled/OpenUpgrade,hifly/OpenUpgrade,bwrsandman/OpenUpgrade,kirca/OpenUpgrade,mvaled/OpenUpgrade,bwrsandman/OpenUpgrade,bwrsandman/OpenUpgrade,kirca/OpenUpgrade
|
[IMP] document: Remove annoying SQL view that prevents some operations
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.openupgrade import openupgrade
@openupgrade.migrate()
def migrate(cr, version):
# Drop view that inhibits changing field types. It will be recreated BTW
cr.execute('drop view if exists report_document_user cascade')
|
<commit_before><commit_msg>[IMP] document: Remove annoying SQL view that prevents some operations<commit_after>
|
# -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.openupgrade import openupgrade
@openupgrade.migrate()
def migrate(cr, version):
# Drop view that inhibits changing field types. It will be recreated BTW
cr.execute('drop view if exists report_document_user cascade')
|
[IMP] document: Remove annoying SQL view that prevents some operations # -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.openupgrade import openupgrade
@openupgrade.migrate()
def migrate(cr, version):
# Drop view that inhibits changing field types. It will be recreated BTW
cr.execute('drop view if exists report_document_user cascade')
|
<commit_before><commit_msg>[IMP] document: Remove annoying SQL view that prevents some operations<commit_after> # -*- coding: utf-8 -*-
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.openupgrade import openupgrade
@openupgrade.migrate()
def migrate(cr, version):
# Drop view that inhibits changing field types. It will be recreated BTW
cr.execute('drop view if exists report_document_user cascade')
|
|
935115215259ce011f3f0b46781655119413e720
|
pelican/rstdirectives.py
|
pelican/rstdirectives.py
|
# -*- coding: utf-8 -*-
from docutils import nodes
from docutils.parsers.rst import directives, Directive
from pygments.formatters import HtmlFormatter
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
INLINESTYLES = False
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
VARIANTS = {
'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
class Pygments(Directive):
""" Source code syntax hightlighting.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = dict([(key, directives.flag) for key in VARIANTS])
has_content = True
def run(self):
self.assert_has_content()
try:
lexer = get_lexer_by_name(self.arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = self.options and VARIANTS[self.options.keys()[0]] \
or DEFAULT
parsed = highlight(u'\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
directives.register_directive('code-block', Pygments)
|
Add a directives file for pygments support.
|
Add a directives file for pygments support.
|
Python
|
agpl-3.0
|
btnpushnmunky/pelican,koobs/pelican,garbas/pelican,JeremyMorgan/pelican,jo-tham/pelican,lazycoder-ru/pelican,crmackay/pelican,iurisilvio/pelican,btnpushnmunky/pelican,TC01/pelican,douglaskastle/pelican,Summonee/pelican,51itclub/pelican,number5/pelican,koobs/pelican,levanhien8/pelican,florianjacob/pelican,zackw/pelican,abrahamvarricatt/pelican,farseerfc/pelican,garbas/pelican,Rogdham/pelican,avaris/pelican,lucasplus/pelican,sunzhongwei/pelican,catdog2/pelican,Polyconseil/pelican,ls2uper/pelican,51itclub/pelican,levanhien8/pelican,goerz/pelican,lucasplus/pelican,Scheirle/pelican,UdeskDeveloper/pelican,deanishe/pelican,sunzhongwei/pelican,joetboole/pelican,Summonee/pelican,janaurka/git-debug-presentiation,kennethlyn/pelican,florianjacob/pelican,kennethlyn/pelican,janaurka/git-debug-presentiation,Scheirle/pelican,crmackay/pelican,janaurka/git-debug-presentiation,gymglish/pelican,HyperGroups/pelican,justinmayer/pelican,HyperGroups/pelican,gymglish/pelican,rbarraud/pelican,deanishe/pelican,eevee/pelican,liyonghelpme/myBlog,treyhunner/pelican,eevee/pelican,goerz/pelican,Polyconseil/pelican,zackw/pelican,rbarraud/pelican,alexras/pelican,catdog2/pelican,GiovanniMoretti/pelican,11craft/pelican,jvehent/pelican,simonjj/pelican,catdog2/pelican,TC01/pelican,ls2uper/pelican,number5/pelican,treyhunner/pelican,getpelican/pelican,joetboole/pelican,jvehent/pelican,iurisilvio/pelican,alexras/pelican,liyonghelpme/myBlog,karlcow/pelican,douglaskastle/pelican,deved69/pelican-1,jimperio/pelican,TC01/pelican,koobs/pelican,51itclub/pelican,douglaskastle/pelican,joetboole/pelican,lazycoder-ru/pelican,0xMF/pelican,liyonghelpme/myBlog,UdeskDeveloper/pelican,karlcow/pelican,JeremyMorgan/pelican,simonjj/pelican,iurisilvio/pelican,iKevinY/pelican,fbs/pelican,11craft/pelican,Rogdham/pelican,jo-tham/pelican,alexras/pelican,ls2uper/pelican,Summonee/pelican,11craft/pelican,GiovanniMoretti/pelican,rbarraud/pelican,kernc/pelican,HyperGroups/pelican,JeremyMorgan/pelican,gymglish/pelican,simonjj/pelican,Scheirle/pelican,goerz/pelican,talha131/pelican,ehashman/pelican,karlcow/pelican,btnpushnmunky/pelican,deved69/pelican-1,ionelmc/pelican,eevee/pelican,crmackay/pelican,liyonghelpme/myBlog,kernc/pelican,farseerfc/pelican,number5/pelican,UdeskDeveloper/pelican,avaris/pelican,treyhunner/pelican,levanhien8/pelican,getpelican/pelican,florianjacob/pelican,ingwinlu/pelican,ehashman/pelican,kennethlyn/pelican,liyonghelpme/myBlog,iKevinY/pelican,talha131/pelican,ehashman/pelican,lazycoder-ru/pelican,ingwinlu/pelican,Rogdham/pelican,abrahamvarricatt/pelican,garbas/pelican,arty-name/pelican,jvehent/pelican,kernc/pelican,abrahamvarricatt/pelican,GiovanniMoretti/pelican,lucasplus/pelican,zackw/pelican,jimperio/pelican,Natim/pelican,deved69/pelican-1,deanishe/pelican,sunzhongwei/pelican,jimperio/pelican,sunzhongwei/pelican
|
Add a directives file for pygments support.
|
# -*- coding: utf-8 -*-
from docutils import nodes
from docutils.parsers.rst import directives, Directive
from pygments.formatters import HtmlFormatter
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
INLINESTYLES = False
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
VARIANTS = {
'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
class Pygments(Directive):
""" Source code syntax hightlighting.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = dict([(key, directives.flag) for key in VARIANTS])
has_content = True
def run(self):
self.assert_has_content()
try:
lexer = get_lexer_by_name(self.arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = self.options and VARIANTS[self.options.keys()[0]] \
or DEFAULT
parsed = highlight(u'\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
directives.register_directive('code-block', Pygments)
|
<commit_before><commit_msg>Add a directives file for pygments support.<commit_after>
|
# -*- coding: utf-8 -*-
from docutils import nodes
from docutils.parsers.rst import directives, Directive
from pygments.formatters import HtmlFormatter
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
INLINESTYLES = False
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
VARIANTS = {
'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
class Pygments(Directive):
""" Source code syntax hightlighting.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = dict([(key, directives.flag) for key in VARIANTS])
has_content = True
def run(self):
self.assert_has_content()
try:
lexer = get_lexer_by_name(self.arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = self.options and VARIANTS[self.options.keys()[0]] \
or DEFAULT
parsed = highlight(u'\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
directives.register_directive('code-block', Pygments)
|
Add a directives file for pygments support.# -*- coding: utf-8 -*-
from docutils import nodes
from docutils.parsers.rst import directives, Directive
from pygments.formatters import HtmlFormatter
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
INLINESTYLES = False
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
VARIANTS = {
'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
class Pygments(Directive):
""" Source code syntax hightlighting.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = dict([(key, directives.flag) for key in VARIANTS])
has_content = True
def run(self):
self.assert_has_content()
try:
lexer = get_lexer_by_name(self.arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = self.options and VARIANTS[self.options.keys()[0]] \
or DEFAULT
parsed = highlight(u'\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
directives.register_directive('code-block', Pygments)
|
<commit_before><commit_msg>Add a directives file for pygments support.<commit_after># -*- coding: utf-8 -*-
from docutils import nodes
from docutils.parsers.rst import directives, Directive
from pygments.formatters import HtmlFormatter
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
INLINESTYLES = False
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES)
VARIANTS = {
'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
class Pygments(Directive):
""" Source code syntax hightlighting.
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = dict([(key, directives.flag) for key in VARIANTS])
has_content = True
def run(self):
self.assert_has_content()
try:
lexer = get_lexer_by_name(self.arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = self.options and VARIANTS[self.options.keys()[0]] \
or DEFAULT
parsed = highlight(u'\n'.join(self.content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
directives.register_directive('code-block', Pygments)
|
|
312bbdefa256413b4891cd0e13e6ccf3c614541f
|
util.py
|
util.py
|
"""
util
===
Common utilities across the Crank system.
"""
from datetime import datetime
DATETIME_FORMAT = '%Y %b %d @ %H%M'
def get_timestamp_header():
return datetime.now().strftime(DATETIME_FORMAT)
|
Add datetime header to Crank
|
Add datetime header to Crank
|
Python
|
mit
|
jad-b/Crank
|
Add datetime header to Crank
|
"""
util
===
Common utilities across the Crank system.
"""
from datetime import datetime
DATETIME_FORMAT = '%Y %b %d @ %H%M'
def get_timestamp_header():
return datetime.now().strftime(DATETIME_FORMAT)
|
<commit_before><commit_msg>Add datetime header to Crank<commit_after>
|
"""
util
===
Common utilities across the Crank system.
"""
from datetime import datetime
DATETIME_FORMAT = '%Y %b %d @ %H%M'
def get_timestamp_header():
return datetime.now().strftime(DATETIME_FORMAT)
|
Add datetime header to Crank"""
util
===
Common utilities across the Crank system.
"""
from datetime import datetime
DATETIME_FORMAT = '%Y %b %d @ %H%M'
def get_timestamp_header():
return datetime.now().strftime(DATETIME_FORMAT)
|
<commit_before><commit_msg>Add datetime header to Crank<commit_after>"""
util
===
Common utilities across the Crank system.
"""
from datetime import datetime
DATETIME_FORMAT = '%Y %b %d @ %H%M'
def get_timestamp_header():
return datetime.now().strftime(DATETIME_FORMAT)
|
|
10c83fbc01dee9d95290466338f262abffc12a3e
|
samples/create_folder_in_datacenter.py
|
samples/create_folder_in_datacenter.py
|
#!/usr/bin/env python
"""
Written by Chinmaya Bharadwaj
Github: https://github.com/chinmayb/
Email: acbharadwaj@gmail.com
Create a folder in a datacenter
"""
from __future__ import print_function
from pyVmomi import vim
from pyVim.connect import SmartConnect, Disconnect
import argparse
import atexit
import getpass
def GetArgs():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(
description='Process args for retrieving all the Virtual Machines')
parser.add_argument('-s', '--host', required=True, action='store',
help='Remote host to connect to')
parser.add_argument('-o', '--port', type=int, default=443, action='store',
help='Port to connect on')
parser.add_argument('-u', '--user', required=True, action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password', required=False, action='store',
help='Password to use when connecting to host')
parser.add_argument('-d', '--datacenter', required=True,
help='name of the datacenter'),
parser.add_argument('-f', '--folder', required=True,
help='name of the folder')
args = parser.parse_args()
return args
def get_obj(content, vimtype, name):
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def create_folder(content, host_folder, folder_name):
host_folder.CreateFolder(folder_name)
def main():
"""
Simple command-line program for listing the virtual machines on a system.
"""
args = GetArgs()
if args.password:
password = args.password
else:
password = getpass.getpass(prompt='Enter password for host %s and '
'user %s: ' % (args.host, args.user))
si = SmartConnect(host=args.host,
user=args.user,
pwd=password,
port=int(args.port))
if not si:
print("Could not connect to the specified host using specified "
"username and password")
return -1
atexit.register(Disconnect, si)
content = si.RetrieveContent()
dc = get_obj(content, [vim.Datacenter], args.datacenter)
if (get_obj(content, [vim.Folder], args.folder)):
print("Folder '%s' already exists" % args.folder)
return 0
create_folder(content, dc.hostFolder, args.folder)
print("Successfully created the folder '%s'" % args.folder)
return 0
# Start program
if __name__ == "__main__":
main()
|
Create a folder in a datacenter if not exists
|
Example: Create a folder in a datacenter if not exists
|
Python
|
apache-2.0
|
vmware/pyvmomi-community-samples,pfitzer/pyvmomi-community-samples,jm66/pyvmomi-community-samples,ddcrjlalumiere/pyvmomi-community-samples,prziborowski/pyvmomi-community-samples,pathcl/pyvmomi-community-samples
|
Example: Create a folder in a datacenter if not exists
|
#!/usr/bin/env python
"""
Written by Chinmaya Bharadwaj
Github: https://github.com/chinmayb/
Email: acbharadwaj@gmail.com
Create a folder in a datacenter
"""
from __future__ import print_function
from pyVmomi import vim
from pyVim.connect import SmartConnect, Disconnect
import argparse
import atexit
import getpass
def GetArgs():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(
description='Process args for retrieving all the Virtual Machines')
parser.add_argument('-s', '--host', required=True, action='store',
help='Remote host to connect to')
parser.add_argument('-o', '--port', type=int, default=443, action='store',
help='Port to connect on')
parser.add_argument('-u', '--user', required=True, action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password', required=False, action='store',
help='Password to use when connecting to host')
parser.add_argument('-d', '--datacenter', required=True,
help='name of the datacenter'),
parser.add_argument('-f', '--folder', required=True,
help='name of the folder')
args = parser.parse_args()
return args
def get_obj(content, vimtype, name):
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def create_folder(content, host_folder, folder_name):
host_folder.CreateFolder(folder_name)
def main():
"""
Simple command-line program for listing the virtual machines on a system.
"""
args = GetArgs()
if args.password:
password = args.password
else:
password = getpass.getpass(prompt='Enter password for host %s and '
'user %s: ' % (args.host, args.user))
si = SmartConnect(host=args.host,
user=args.user,
pwd=password,
port=int(args.port))
if not si:
print("Could not connect to the specified host using specified "
"username and password")
return -1
atexit.register(Disconnect, si)
content = si.RetrieveContent()
dc = get_obj(content, [vim.Datacenter], args.datacenter)
if (get_obj(content, [vim.Folder], args.folder)):
print("Folder '%s' already exists" % args.folder)
return 0
create_folder(content, dc.hostFolder, args.folder)
print("Successfully created the folder '%s'" % args.folder)
return 0
# Start program
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Example: Create a folder in a datacenter if not exists<commit_after>
|
#!/usr/bin/env python
"""
Written by Chinmaya Bharadwaj
Github: https://github.com/chinmayb/
Email: acbharadwaj@gmail.com
Create a folder in a datacenter
"""
from __future__ import print_function
from pyVmomi import vim
from pyVim.connect import SmartConnect, Disconnect
import argparse
import atexit
import getpass
def GetArgs():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(
description='Process args for retrieving all the Virtual Machines')
parser.add_argument('-s', '--host', required=True, action='store',
help='Remote host to connect to')
parser.add_argument('-o', '--port', type=int, default=443, action='store',
help='Port to connect on')
parser.add_argument('-u', '--user', required=True, action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password', required=False, action='store',
help='Password to use when connecting to host')
parser.add_argument('-d', '--datacenter', required=True,
help='name of the datacenter'),
parser.add_argument('-f', '--folder', required=True,
help='name of the folder')
args = parser.parse_args()
return args
def get_obj(content, vimtype, name):
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def create_folder(content, host_folder, folder_name):
host_folder.CreateFolder(folder_name)
def main():
"""
Simple command-line program for listing the virtual machines on a system.
"""
args = GetArgs()
if args.password:
password = args.password
else:
password = getpass.getpass(prompt='Enter password for host %s and '
'user %s: ' % (args.host, args.user))
si = SmartConnect(host=args.host,
user=args.user,
pwd=password,
port=int(args.port))
if not si:
print("Could not connect to the specified host using specified "
"username and password")
return -1
atexit.register(Disconnect, si)
content = si.RetrieveContent()
dc = get_obj(content, [vim.Datacenter], args.datacenter)
if (get_obj(content, [vim.Folder], args.folder)):
print("Folder '%s' already exists" % args.folder)
return 0
create_folder(content, dc.hostFolder, args.folder)
print("Successfully created the folder '%s'" % args.folder)
return 0
# Start program
if __name__ == "__main__":
main()
|
Example: Create a folder in a datacenter if not exists#!/usr/bin/env python
"""
Written by Chinmaya Bharadwaj
Github: https://github.com/chinmayb/
Email: acbharadwaj@gmail.com
Create a folder in a datacenter
"""
from __future__ import print_function
from pyVmomi import vim
from pyVim.connect import SmartConnect, Disconnect
import argparse
import atexit
import getpass
def GetArgs():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(
description='Process args for retrieving all the Virtual Machines')
parser.add_argument('-s', '--host', required=True, action='store',
help='Remote host to connect to')
parser.add_argument('-o', '--port', type=int, default=443, action='store',
help='Port to connect on')
parser.add_argument('-u', '--user', required=True, action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password', required=False, action='store',
help='Password to use when connecting to host')
parser.add_argument('-d', '--datacenter', required=True,
help='name of the datacenter'),
parser.add_argument('-f', '--folder', required=True,
help='name of the folder')
args = parser.parse_args()
return args
def get_obj(content, vimtype, name):
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def create_folder(content, host_folder, folder_name):
host_folder.CreateFolder(folder_name)
def main():
"""
Simple command-line program for listing the virtual machines on a system.
"""
args = GetArgs()
if args.password:
password = args.password
else:
password = getpass.getpass(prompt='Enter password for host %s and '
'user %s: ' % (args.host, args.user))
si = SmartConnect(host=args.host,
user=args.user,
pwd=password,
port=int(args.port))
if not si:
print("Could not connect to the specified host using specified "
"username and password")
return -1
atexit.register(Disconnect, si)
content = si.RetrieveContent()
dc = get_obj(content, [vim.Datacenter], args.datacenter)
if (get_obj(content, [vim.Folder], args.folder)):
print("Folder '%s' already exists" % args.folder)
return 0
create_folder(content, dc.hostFolder, args.folder)
print("Successfully created the folder '%s'" % args.folder)
return 0
# Start program
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Example: Create a folder in a datacenter if not exists<commit_after>#!/usr/bin/env python
"""
Written by Chinmaya Bharadwaj
Github: https://github.com/chinmayb/
Email: acbharadwaj@gmail.com
Create a folder in a datacenter
"""
from __future__ import print_function
from pyVmomi import vim
from pyVim.connect import SmartConnect, Disconnect
import argparse
import atexit
import getpass
def GetArgs():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(
description='Process args for retrieving all the Virtual Machines')
parser.add_argument('-s', '--host', required=True, action='store',
help='Remote host to connect to')
parser.add_argument('-o', '--port', type=int, default=443, action='store',
help='Port to connect on')
parser.add_argument('-u', '--user', required=True, action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password', required=False, action='store',
help='Password to use when connecting to host')
parser.add_argument('-d', '--datacenter', required=True,
help='name of the datacenter'),
parser.add_argument('-f', '--folder', required=True,
help='name of the folder')
args = parser.parse_args()
return args
def get_obj(content, vimtype, name):
obj = None
container = content.viewManager.CreateContainerView(
content.rootFolder, vimtype, True)
for c in container.view:
if c.name == name:
obj = c
break
return obj
def create_folder(content, host_folder, folder_name):
host_folder.CreateFolder(folder_name)
def main():
"""
Simple command-line program for listing the virtual machines on a system.
"""
args = GetArgs()
if args.password:
password = args.password
else:
password = getpass.getpass(prompt='Enter password for host %s and '
'user %s: ' % (args.host, args.user))
si = SmartConnect(host=args.host,
user=args.user,
pwd=password,
port=int(args.port))
if not si:
print("Could not connect to the specified host using specified "
"username and password")
return -1
atexit.register(Disconnect, si)
content = si.RetrieveContent()
dc = get_obj(content, [vim.Datacenter], args.datacenter)
if (get_obj(content, [vim.Folder], args.folder)):
print("Folder '%s' already exists" % args.folder)
return 0
create_folder(content, dc.hostFolder, args.folder)
print("Successfully created the folder '%s'" % args.folder)
return 0
# Start program
if __name__ == "__main__":
main()
|
|
f78a485000ef8dacb584db1f03b7157b79bd5fe7
|
d1_libclient_python/src/d1_client/tests/mock_get.py
|
d1_libclient_python/src/d1_client/tests/mock_get.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock MNRead.get() → OctetStream
GET /object/{id}
Will always return the same bytes for a given PID.
"""
# Stdlib
import datetime
import hashlib
import random
import re
import urlparse
# 3rd party
import responses # pip install responses
import requests
# D1
import d1_common.types.dataoneTypes_v2_0 as v2
# App
import d1_common.const
import d1_common.url
# Config
NUM_SCIOBJ_BYTES = 1024
GET_ENDPOINT_RX = r'v([123])/object/(.*)'
def init(base_url):
endpoint_rx_str = r'^' + d1_common.url.joinPathElements(base_url, GET_ENDPOINT_RX)
endpoint_rx = re.compile(endpoint_rx_str)
responses.add_callback(
responses.GET,
endpoint_rx,
callback=_request_callback,
content_type=d1_common.const.CONTENT_TYPE_OCTETSTREAM,
)
def _request_callback(request):
major_version, pid = _parse_url(request.url)
try:
status_int = int(pid)
except ValueError:
body_str = _generate_sciobj_bytes(pid, NUM_SCIOBJ_BYTES)
return 200, {}, body_str
else:
body_str = 'Return code: {}'.format(status_int)
return status_int, {}, body_str
def _parse_url(url):
url_obj = urlparse.urlparse(url)
url = url_obj._replace(query=None).geturl()
m = re.search(GET_ENDPOINT_RX, url)
assert m, 'Should always match since we\'re using the same regex as in add_callback()'
return m.group(1), m.group(2)
def _generate_sciobj_bytes(pid, n_count):
pid_hash_int = int(hashlib.md5(pid).hexdigest(), 16)
random.seed(pid_hash_int)
return bytearray(random.getrandbits(8) for _ in xrange(n_count))
|
Add module for mocking node.get() with Responses
|
Add module for mocking node.get() with Responses
|
Python
|
apache-2.0
|
DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python
|
Add module for mocking node.get() with Responses
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock MNRead.get() → OctetStream
GET /object/{id}
Will always return the same bytes for a given PID.
"""
# Stdlib
import datetime
import hashlib
import random
import re
import urlparse
# 3rd party
import responses # pip install responses
import requests
# D1
import d1_common.types.dataoneTypes_v2_0 as v2
# App
import d1_common.const
import d1_common.url
# Config
NUM_SCIOBJ_BYTES = 1024
GET_ENDPOINT_RX = r'v([123])/object/(.*)'
def init(base_url):
endpoint_rx_str = r'^' + d1_common.url.joinPathElements(base_url, GET_ENDPOINT_RX)
endpoint_rx = re.compile(endpoint_rx_str)
responses.add_callback(
responses.GET,
endpoint_rx,
callback=_request_callback,
content_type=d1_common.const.CONTENT_TYPE_OCTETSTREAM,
)
def _request_callback(request):
major_version, pid = _parse_url(request.url)
try:
status_int = int(pid)
except ValueError:
body_str = _generate_sciobj_bytes(pid, NUM_SCIOBJ_BYTES)
return 200, {}, body_str
else:
body_str = 'Return code: {}'.format(status_int)
return status_int, {}, body_str
def _parse_url(url):
url_obj = urlparse.urlparse(url)
url = url_obj._replace(query=None).geturl()
m = re.search(GET_ENDPOINT_RX, url)
assert m, 'Should always match since we\'re using the same regex as in add_callback()'
return m.group(1), m.group(2)
def _generate_sciobj_bytes(pid, n_count):
pid_hash_int = int(hashlib.md5(pid).hexdigest(), 16)
random.seed(pid_hash_int)
return bytearray(random.getrandbits(8) for _ in xrange(n_count))
|
<commit_before><commit_msg>Add module for mocking node.get() with Responses<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock MNRead.get() → OctetStream
GET /object/{id}
Will always return the same bytes for a given PID.
"""
# Stdlib
import datetime
import hashlib
import random
import re
import urlparse
# 3rd party
import responses # pip install responses
import requests
# D1
import d1_common.types.dataoneTypes_v2_0 as v2
# App
import d1_common.const
import d1_common.url
# Config
NUM_SCIOBJ_BYTES = 1024
GET_ENDPOINT_RX = r'v([123])/object/(.*)'
def init(base_url):
endpoint_rx_str = r'^' + d1_common.url.joinPathElements(base_url, GET_ENDPOINT_RX)
endpoint_rx = re.compile(endpoint_rx_str)
responses.add_callback(
responses.GET,
endpoint_rx,
callback=_request_callback,
content_type=d1_common.const.CONTENT_TYPE_OCTETSTREAM,
)
def _request_callback(request):
major_version, pid = _parse_url(request.url)
try:
status_int = int(pid)
except ValueError:
body_str = _generate_sciobj_bytes(pid, NUM_SCIOBJ_BYTES)
return 200, {}, body_str
else:
body_str = 'Return code: {}'.format(status_int)
return status_int, {}, body_str
def _parse_url(url):
url_obj = urlparse.urlparse(url)
url = url_obj._replace(query=None).geturl()
m = re.search(GET_ENDPOINT_RX, url)
assert m, 'Should always match since we\'re using the same regex as in add_callback()'
return m.group(1), m.group(2)
def _generate_sciobj_bytes(pid, n_count):
pid_hash_int = int(hashlib.md5(pid).hexdigest(), 16)
random.seed(pid_hash_int)
return bytearray(random.getrandbits(8) for _ in xrange(n_count))
|
Add module for mocking node.get() with Responses#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock MNRead.get() → OctetStream
GET /object/{id}
Will always return the same bytes for a given PID.
"""
# Stdlib
import datetime
import hashlib
import random
import re
import urlparse
# 3rd party
import responses # pip install responses
import requests
# D1
import d1_common.types.dataoneTypes_v2_0 as v2
# App
import d1_common.const
import d1_common.url
# Config
NUM_SCIOBJ_BYTES = 1024
GET_ENDPOINT_RX = r'v([123])/object/(.*)'
def init(base_url):
endpoint_rx_str = r'^' + d1_common.url.joinPathElements(base_url, GET_ENDPOINT_RX)
endpoint_rx = re.compile(endpoint_rx_str)
responses.add_callback(
responses.GET,
endpoint_rx,
callback=_request_callback,
content_type=d1_common.const.CONTENT_TYPE_OCTETSTREAM,
)
def _request_callback(request):
major_version, pid = _parse_url(request.url)
try:
status_int = int(pid)
except ValueError:
body_str = _generate_sciobj_bytes(pid, NUM_SCIOBJ_BYTES)
return 200, {}, body_str
else:
body_str = 'Return code: {}'.format(status_int)
return status_int, {}, body_str
def _parse_url(url):
url_obj = urlparse.urlparse(url)
url = url_obj._replace(query=None).geturl()
m = re.search(GET_ENDPOINT_RX, url)
assert m, 'Should always match since we\'re using the same regex as in add_callback()'
return m.group(1), m.group(2)
def _generate_sciobj_bytes(pid, n_count):
pid_hash_int = int(hashlib.md5(pid).hexdigest(), 16)
random.seed(pid_hash_int)
return bytearray(random.getrandbits(8) for _ in xrange(n_count))
|
<commit_before><commit_msg>Add module for mocking node.get() with Responses<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock MNRead.get() → OctetStream
GET /object/{id}
Will always return the same bytes for a given PID.
"""
# Stdlib
import datetime
import hashlib
import random
import re
import urlparse
# 3rd party
import responses # pip install responses
import requests
# D1
import d1_common.types.dataoneTypes_v2_0 as v2
# App
import d1_common.const
import d1_common.url
# Config
NUM_SCIOBJ_BYTES = 1024
GET_ENDPOINT_RX = r'v([123])/object/(.*)'
def init(base_url):
endpoint_rx_str = r'^' + d1_common.url.joinPathElements(base_url, GET_ENDPOINT_RX)
endpoint_rx = re.compile(endpoint_rx_str)
responses.add_callback(
responses.GET,
endpoint_rx,
callback=_request_callback,
content_type=d1_common.const.CONTENT_TYPE_OCTETSTREAM,
)
def _request_callback(request):
major_version, pid = _parse_url(request.url)
try:
status_int = int(pid)
except ValueError:
body_str = _generate_sciobj_bytes(pid, NUM_SCIOBJ_BYTES)
return 200, {}, body_str
else:
body_str = 'Return code: {}'.format(status_int)
return status_int, {}, body_str
def _parse_url(url):
url_obj = urlparse.urlparse(url)
url = url_obj._replace(query=None).geturl()
m = re.search(GET_ENDPOINT_RX, url)
assert m, 'Should always match since we\'re using the same regex as in add_callback()'
return m.group(1), m.group(2)
def _generate_sciobj_bytes(pid, n_count):
pid_hash_int = int(hashlib.md5(pid).hexdigest(), 16)
random.seed(pid_hash_int)
return bytearray(random.getrandbits(8) for _ in xrange(n_count))
|
|
8223e9ffa61a2772a7a6f52244c5f1bbde4956b8
|
py/longest-palindrome.py
|
py/longest-palindrome.py
|
from collections import Counter
class Solution(object):
def longestPalindrome(self, s):
"""
:type s: str
:rtype: int
"""
counter = Counter(s)
odd = 0
ans = 0
for char, cnt in counter.iteritems():
if cnt % 2 == 0:
ans += cnt
else:
odd = 1
ans += cnt - 1
return ans + odd
|
Add py solution for 409. Longest Palindrome
|
Add py solution for 409. Longest Palindrome
409. Longest Palindrome: https://leetcode.com/problems/longest-palindrome/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 409. Longest Palindrome
409. Longest Palindrome: https://leetcode.com/problems/longest-palindrome/
|
from collections import Counter
class Solution(object):
def longestPalindrome(self, s):
"""
:type s: str
:rtype: int
"""
counter = Counter(s)
odd = 0
ans = 0
for char, cnt in counter.iteritems():
if cnt % 2 == 0:
ans += cnt
else:
odd = 1
ans += cnt - 1
return ans + odd
|
<commit_before><commit_msg>Add py solution for 409. Longest Palindrome
409. Longest Palindrome: https://leetcode.com/problems/longest-palindrome/<commit_after>
|
from collections import Counter
class Solution(object):
def longestPalindrome(self, s):
"""
:type s: str
:rtype: int
"""
counter = Counter(s)
odd = 0
ans = 0
for char, cnt in counter.iteritems():
if cnt % 2 == 0:
ans += cnt
else:
odd = 1
ans += cnt - 1
return ans + odd
|
Add py solution for 409. Longest Palindrome
409. Longest Palindrome: https://leetcode.com/problems/longest-palindrome/from collections import Counter
class Solution(object):
def longestPalindrome(self, s):
"""
:type s: str
:rtype: int
"""
counter = Counter(s)
odd = 0
ans = 0
for char, cnt in counter.iteritems():
if cnt % 2 == 0:
ans += cnt
else:
odd = 1
ans += cnt - 1
return ans + odd
|
<commit_before><commit_msg>Add py solution for 409. Longest Palindrome
409. Longest Palindrome: https://leetcode.com/problems/longest-palindrome/<commit_after>from collections import Counter
class Solution(object):
def longestPalindrome(self, s):
"""
:type s: str
:rtype: int
"""
counter = Counter(s)
odd = 0
ans = 0
for char, cnt in counter.iteritems():
if cnt % 2 == 0:
ans += cnt
else:
odd = 1
ans += cnt - 1
return ans + odd
|
|
8223d62c22d4c4f7a66e1e468de53556796a03a9
|
src/functions/exercise7.py
|
src/functions/exercise7.py
|
"""Module docstring.
This serves as a long usage message.
"""
import sys
import getopt
def main():
# parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# process options
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit(0)
# process arguments
for arg in args:
process(arg) # process() is defined elsewhere
if __name__ == "__main__":
main()
|
Write a function that print something n times including relatives spaces
|
Write a function that print something n times including relatives spaces
|
Python
|
mit
|
let42/python-course
|
Write a function that print something n times including relatives spaces
|
"""Module docstring.
This serves as a long usage message.
"""
import sys
import getopt
def main():
# parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# process options
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit(0)
# process arguments
for arg in args:
process(arg) # process() is defined elsewhere
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Write a function that print something n times including relatives spaces<commit_after>
|
"""Module docstring.
This serves as a long usage message.
"""
import sys
import getopt
def main():
# parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# process options
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit(0)
# process arguments
for arg in args:
process(arg) # process() is defined elsewhere
if __name__ == "__main__":
main()
|
Write a function that print something n times including relatives spaces"""Module docstring.
This serves as a long usage message.
"""
import sys
import getopt
def main():
# parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# process options
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit(0)
# process arguments
for arg in args:
process(arg) # process() is defined elsewhere
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Write a function that print something n times including relatives spaces<commit_after>"""Module docstring.
This serves as a long usage message.
"""
import sys
import getopt
def main():
# parse command line options
try:
opts, args = getopt.getopt(sys.argv[1:], "h", ["help"])
except getopt.error, msg:
print msg
print "for help use --help"
sys.exit(2)
# process options
for o, a in opts:
if o in ("-h", "--help"):
print __doc__
sys.exit(0)
# process arguments
for arg in args:
process(arg) # process() is defined elsewhere
if __name__ == "__main__":
main()
|
|
98c07739702fbf3951ccd0359d04be80a303d9ce
|
run_time/src/gae_server/font_mapper.py
|
run_time/src/gae_server/font_mapper.py
|
"""
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# import logging
from os import path
# import StringIO
# from time import sleep
# from time import time
# import zipfile
# import webapp2
# from incremental_fonts_utils import prepare_bundle
tachyfont_major_version = 1
tachyfont_minor_version = 0
BASE_DIR = path.dirname(__file__)
def fontname_to_zipfile(fontname):
family_dir = ''
if fontname[0:10] == 'NotoSansJP':
family_dir = 'NotoSansJP/'
zip_path = BASE_DIR + '/fonts/' + family_dir + fontname + '.TachyFont.jar'
return zip_path
|
Add a fontname to TachyFont Jar file mapper.
|
Add a fontname to TachyFont Jar file mapper.
|
Python
|
apache-2.0
|
bstell/TachyFont,bstell/TachyFont,moyogo/tachyfont,bstell/TachyFont,moyogo/tachyfont,bstell/TachyFont,googlei18n/TachyFont,googlefonts/TachyFont,moyogo/tachyfont,googlefonts/TachyFont,googlei18n/TachyFont,googlefonts/TachyFont,googlei18n/TachyFont,googlefonts/TachyFont,bstell/TachyFont,googlefonts/TachyFont,googlei18n/TachyFont,moyogo/tachyfont,moyogo/tachyfont,googlei18n/TachyFont
|
Add a fontname to TachyFont Jar file mapper.
|
"""
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# import logging
from os import path
# import StringIO
# from time import sleep
# from time import time
# import zipfile
# import webapp2
# from incremental_fonts_utils import prepare_bundle
tachyfont_major_version = 1
tachyfont_minor_version = 0
BASE_DIR = path.dirname(__file__)
def fontname_to_zipfile(fontname):
family_dir = ''
if fontname[0:10] == 'NotoSansJP':
family_dir = 'NotoSansJP/'
zip_path = BASE_DIR + '/fonts/' + family_dir + fontname + '.TachyFont.jar'
return zip_path
|
<commit_before><commit_msg>Add a fontname to TachyFont Jar file mapper.<commit_after>
|
"""
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# import logging
from os import path
# import StringIO
# from time import sleep
# from time import time
# import zipfile
# import webapp2
# from incremental_fonts_utils import prepare_bundle
tachyfont_major_version = 1
tachyfont_minor_version = 0
BASE_DIR = path.dirname(__file__)
def fontname_to_zipfile(fontname):
family_dir = ''
if fontname[0:10] == 'NotoSansJP':
family_dir = 'NotoSansJP/'
zip_path = BASE_DIR + '/fonts/' + family_dir + fontname + '.TachyFont.jar'
return zip_path
|
Add a fontname to TachyFont Jar file mapper."""
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# import logging
from os import path
# import StringIO
# from time import sleep
# from time import time
# import zipfile
# import webapp2
# from incremental_fonts_utils import prepare_bundle
tachyfont_major_version = 1
tachyfont_minor_version = 0
BASE_DIR = path.dirname(__file__)
def fontname_to_zipfile(fontname):
family_dir = ''
if fontname[0:10] == 'NotoSansJP':
family_dir = 'NotoSansJP/'
zip_path = BASE_DIR + '/fonts/' + family_dir + fontname + '.TachyFont.jar'
return zip_path
|
<commit_before><commit_msg>Add a fontname to TachyFont Jar file mapper.<commit_after>"""
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# import logging
from os import path
# import StringIO
# from time import sleep
# from time import time
# import zipfile
# import webapp2
# from incremental_fonts_utils import prepare_bundle
tachyfont_major_version = 1
tachyfont_minor_version = 0
BASE_DIR = path.dirname(__file__)
def fontname_to_zipfile(fontname):
family_dir = ''
if fontname[0:10] == 'NotoSansJP':
family_dir = 'NotoSansJP/'
zip_path = BASE_DIR + '/fonts/' + family_dir + fontname + '.TachyFont.jar'
return zip_path
|
|
4074c4fae998ac1bb6f49bb47b34f4890dc90532
|
test_pylast.py
|
test_pylast.py
|
#!/usr/bin/env python
"""
Integration (not unit) tests for pylast.py
"""
import datetime
import time
import unittest
import pylast
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.username = "TODO"
password_hash = "TODO"
API_KEY = "TODO"
API_SECRET = "TODO"
self.network = pylast.LastFMNetwork(api_key = API_KEY, api_secret =
API_SECRET, username = self.username, password_hash = password_hash)
def test_scrobble(self):
# Arrange
artist = "Test Artist"
title = "Test Title"
timestamp = int(time.mktime(datetime.datetime.now().timetuple()))
lastfm_user = self.network.get_user(self.username)
# Act
self.network.scrobble(artist = artist, title = title, timestamp = timestamp)
# Assert
last_scrobble = lastfm_user.get_recent_tracks(limit = 1)[0]
self.assertEqual(str(last_scrobble.track.artist), str(artist))
self.assertEqual(str(last_scrobble.track.title), str(title))
self.assertEqual(str(last_scrobble.timestamp), str(timestamp))
def test_unscrobble(self):
# Arrange
artist = "Test Artist 2"
title = "Test Title 2"
timestamp = int(time.mktime(datetime.datetime.now().timetuple()))
library = pylast.Library(user = self.username, network = self.network)
self.network.scrobble(artist = artist, title = title, timestamp = timestamp)
lastfm_user = self.network.get_user(self.username)
# Act
library.remove_scrobble(artist = artist, title = title, timestamp = timestamp)
# Assert
last_scrobble = lastfm_user.get_recent_tracks(limit = 1)[0]
self.assertNotEqual(str(last_scrobble.timestamp), str(timestamp))
def test_add_album(self):
# Arrange
library = pylast.Library(user = self.username, network = self.network)
album = self.network.get_album("Test Artist", "Test Album")
# Act
library.add_album(album)
# Assert
# Nothing here, just that no exception occurred
def test_get_venue(self):
# Arrange
venue_name = "Last.fm Office"
country_name = "United Kingom"
# Act
venue_search = self.network.search_for_venue(venue_name, country_name)
venue = venue_search.get_next_page()[0]
# Assert
self.assertEqual(str(venue.id), "8778225")
if __name__ == '__main__':
unittest.main()
# End of file
|
Add integration tests for pylast.py
|
Add integration tests for pylast.py
|
Python
|
apache-2.0
|
knockoutMice/pylast,hugovk/pylast,pylast/pylast,yanggao1119/pylast,knockoutMice/pylast,yanggao1119/pylast
|
Add integration tests for pylast.py
|
#!/usr/bin/env python
"""
Integration (not unit) tests for pylast.py
"""
import datetime
import time
import unittest
import pylast
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.username = "TODO"
password_hash = "TODO"
API_KEY = "TODO"
API_SECRET = "TODO"
self.network = pylast.LastFMNetwork(api_key = API_KEY, api_secret =
API_SECRET, username = self.username, password_hash = password_hash)
def test_scrobble(self):
# Arrange
artist = "Test Artist"
title = "Test Title"
timestamp = int(time.mktime(datetime.datetime.now().timetuple()))
lastfm_user = self.network.get_user(self.username)
# Act
self.network.scrobble(artist = artist, title = title, timestamp = timestamp)
# Assert
last_scrobble = lastfm_user.get_recent_tracks(limit = 1)[0]
self.assertEqual(str(last_scrobble.track.artist), str(artist))
self.assertEqual(str(last_scrobble.track.title), str(title))
self.assertEqual(str(last_scrobble.timestamp), str(timestamp))
def test_unscrobble(self):
# Arrange
artist = "Test Artist 2"
title = "Test Title 2"
timestamp = int(time.mktime(datetime.datetime.now().timetuple()))
library = pylast.Library(user = self.username, network = self.network)
self.network.scrobble(artist = artist, title = title, timestamp = timestamp)
lastfm_user = self.network.get_user(self.username)
# Act
library.remove_scrobble(artist = artist, title = title, timestamp = timestamp)
# Assert
last_scrobble = lastfm_user.get_recent_tracks(limit = 1)[0]
self.assertNotEqual(str(last_scrobble.timestamp), str(timestamp))
def test_add_album(self):
# Arrange
library = pylast.Library(user = self.username, network = self.network)
album = self.network.get_album("Test Artist", "Test Album")
# Act
library.add_album(album)
# Assert
# Nothing here, just that no exception occurred
def test_get_venue(self):
# Arrange
venue_name = "Last.fm Office"
country_name = "United Kingom"
# Act
venue_search = self.network.search_for_venue(venue_name, country_name)
venue = venue_search.get_next_page()[0]
# Assert
self.assertEqual(str(venue.id), "8778225")
if __name__ == '__main__':
unittest.main()
# End of file
|
<commit_before><commit_msg>Add integration tests for pylast.py<commit_after>
|
#!/usr/bin/env python
"""
Integration (not unit) tests for pylast.py
"""
import datetime
import time
import unittest
import pylast
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.username = "TODO"
password_hash = "TODO"
API_KEY = "TODO"
API_SECRET = "TODO"
self.network = pylast.LastFMNetwork(api_key = API_KEY, api_secret =
API_SECRET, username = self.username, password_hash = password_hash)
def test_scrobble(self):
# Arrange
artist = "Test Artist"
title = "Test Title"
timestamp = int(time.mktime(datetime.datetime.now().timetuple()))
lastfm_user = self.network.get_user(self.username)
# Act
self.network.scrobble(artist = artist, title = title, timestamp = timestamp)
# Assert
last_scrobble = lastfm_user.get_recent_tracks(limit = 1)[0]
self.assertEqual(str(last_scrobble.track.artist), str(artist))
self.assertEqual(str(last_scrobble.track.title), str(title))
self.assertEqual(str(last_scrobble.timestamp), str(timestamp))
def test_unscrobble(self):
# Arrange
artist = "Test Artist 2"
title = "Test Title 2"
timestamp = int(time.mktime(datetime.datetime.now().timetuple()))
library = pylast.Library(user = self.username, network = self.network)
self.network.scrobble(artist = artist, title = title, timestamp = timestamp)
lastfm_user = self.network.get_user(self.username)
# Act
library.remove_scrobble(artist = artist, title = title, timestamp = timestamp)
# Assert
last_scrobble = lastfm_user.get_recent_tracks(limit = 1)[0]
self.assertNotEqual(str(last_scrobble.timestamp), str(timestamp))
def test_add_album(self):
# Arrange
library = pylast.Library(user = self.username, network = self.network)
album = self.network.get_album("Test Artist", "Test Album")
# Act
library.add_album(album)
# Assert
# Nothing here, just that no exception occurred
def test_get_venue(self):
# Arrange
venue_name = "Last.fm Office"
country_name = "United Kingom"
# Act
venue_search = self.network.search_for_venue(venue_name, country_name)
venue = venue_search.get_next_page()[0]
# Assert
self.assertEqual(str(venue.id), "8778225")
if __name__ == '__main__':
unittest.main()
# End of file
|
Add integration tests for pylast.py#!/usr/bin/env python
"""
Integration (not unit) tests for pylast.py
"""
import datetime
import time
import unittest
import pylast
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.username = "TODO"
password_hash = "TODO"
API_KEY = "TODO"
API_SECRET = "TODO"
self.network = pylast.LastFMNetwork(api_key = API_KEY, api_secret =
API_SECRET, username = self.username, password_hash = password_hash)
def test_scrobble(self):
# Arrange
artist = "Test Artist"
title = "Test Title"
timestamp = int(time.mktime(datetime.datetime.now().timetuple()))
lastfm_user = self.network.get_user(self.username)
# Act
self.network.scrobble(artist = artist, title = title, timestamp = timestamp)
# Assert
last_scrobble = lastfm_user.get_recent_tracks(limit = 1)[0]
self.assertEqual(str(last_scrobble.track.artist), str(artist))
self.assertEqual(str(last_scrobble.track.title), str(title))
self.assertEqual(str(last_scrobble.timestamp), str(timestamp))
def test_unscrobble(self):
# Arrange
artist = "Test Artist 2"
title = "Test Title 2"
timestamp = int(time.mktime(datetime.datetime.now().timetuple()))
library = pylast.Library(user = self.username, network = self.network)
self.network.scrobble(artist = artist, title = title, timestamp = timestamp)
lastfm_user = self.network.get_user(self.username)
# Act
library.remove_scrobble(artist = artist, title = title, timestamp = timestamp)
# Assert
last_scrobble = lastfm_user.get_recent_tracks(limit = 1)[0]
self.assertNotEqual(str(last_scrobble.timestamp), str(timestamp))
def test_add_album(self):
# Arrange
library = pylast.Library(user = self.username, network = self.network)
album = self.network.get_album("Test Artist", "Test Album")
# Act
library.add_album(album)
# Assert
# Nothing here, just that no exception occurred
def test_get_venue(self):
# Arrange
venue_name = "Last.fm Office"
country_name = "United Kingom"
# Act
venue_search = self.network.search_for_venue(venue_name, country_name)
venue = venue_search.get_next_page()[0]
# Assert
self.assertEqual(str(venue.id), "8778225")
if __name__ == '__main__':
unittest.main()
# End of file
|
<commit_before><commit_msg>Add integration tests for pylast.py<commit_after>#!/usr/bin/env python
"""
Integration (not unit) tests for pylast.py
"""
import datetime
import time
import unittest
import pylast
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.username = "TODO"
password_hash = "TODO"
API_KEY = "TODO"
API_SECRET = "TODO"
self.network = pylast.LastFMNetwork(api_key = API_KEY, api_secret =
API_SECRET, username = self.username, password_hash = password_hash)
def test_scrobble(self):
# Arrange
artist = "Test Artist"
title = "Test Title"
timestamp = int(time.mktime(datetime.datetime.now().timetuple()))
lastfm_user = self.network.get_user(self.username)
# Act
self.network.scrobble(artist = artist, title = title, timestamp = timestamp)
# Assert
last_scrobble = lastfm_user.get_recent_tracks(limit = 1)[0]
self.assertEqual(str(last_scrobble.track.artist), str(artist))
self.assertEqual(str(last_scrobble.track.title), str(title))
self.assertEqual(str(last_scrobble.timestamp), str(timestamp))
def test_unscrobble(self):
# Arrange
artist = "Test Artist 2"
title = "Test Title 2"
timestamp = int(time.mktime(datetime.datetime.now().timetuple()))
library = pylast.Library(user = self.username, network = self.network)
self.network.scrobble(artist = artist, title = title, timestamp = timestamp)
lastfm_user = self.network.get_user(self.username)
# Act
library.remove_scrobble(artist = artist, title = title, timestamp = timestamp)
# Assert
last_scrobble = lastfm_user.get_recent_tracks(limit = 1)[0]
self.assertNotEqual(str(last_scrobble.timestamp), str(timestamp))
def test_add_album(self):
# Arrange
library = pylast.Library(user = self.username, network = self.network)
album = self.network.get_album("Test Artist", "Test Album")
# Act
library.add_album(album)
# Assert
# Nothing here, just that no exception occurred
def test_get_venue(self):
# Arrange
venue_name = "Last.fm Office"
country_name = "United Kingom"
# Act
venue_search = self.network.search_for_venue(venue_name, country_name)
venue = venue_search.get_next_page()[0]
# Assert
self.assertEqual(str(venue.id), "8778225")
if __name__ == '__main__':
unittest.main()
# End of file
|
|
3def1498495e0abf230a3deb3873f6c502f3c6ad
|
molo/core/management/commands/remove_content_rotation_settings_from_sections.py
|
molo/core/management/commands/remove_content_rotation_settings_from_sections.py
|
from __future__ import absolute_import, unicode_literals
from django.core.management.base import BaseCommand
from molo.core.models import SectionPage
class Command(BaseCommand):
def handle(self, **options):
SectionPage.objects.all().update(
content_rotation_start_date=None,
content_rotation_end_date=None,
monday_rotation=False,
tuesday_rotation=False,
wednesday_rotation=False,
thursday_rotation=False,
friday_rotation=False,
saturday_rotation=False,
sunday_rotation=False,
time=None)
|
Add management command for removing content rotation's settings on sections
|
Add management command for removing content rotation's settings on sections
|
Python
|
bsd-2-clause
|
praekelt/molo,praekelt/molo,praekelt/molo,praekelt/molo
|
Add management command for removing content rotation's settings on sections
|
from __future__ import absolute_import, unicode_literals
from django.core.management.base import BaseCommand
from molo.core.models import SectionPage
class Command(BaseCommand):
def handle(self, **options):
SectionPage.objects.all().update(
content_rotation_start_date=None,
content_rotation_end_date=None,
monday_rotation=False,
tuesday_rotation=False,
wednesday_rotation=False,
thursday_rotation=False,
friday_rotation=False,
saturday_rotation=False,
sunday_rotation=False,
time=None)
|
<commit_before><commit_msg>Add management command for removing content rotation's settings on sections<commit_after>
|
from __future__ import absolute_import, unicode_literals
from django.core.management.base import BaseCommand
from molo.core.models import SectionPage
class Command(BaseCommand):
def handle(self, **options):
SectionPage.objects.all().update(
content_rotation_start_date=None,
content_rotation_end_date=None,
monday_rotation=False,
tuesday_rotation=False,
wednesday_rotation=False,
thursday_rotation=False,
friday_rotation=False,
saturday_rotation=False,
sunday_rotation=False,
time=None)
|
Add management command for removing content rotation's settings on sectionsfrom __future__ import absolute_import, unicode_literals
from django.core.management.base import BaseCommand
from molo.core.models import SectionPage
class Command(BaseCommand):
def handle(self, **options):
SectionPage.objects.all().update(
content_rotation_start_date=None,
content_rotation_end_date=None,
monday_rotation=False,
tuesday_rotation=False,
wednesday_rotation=False,
thursday_rotation=False,
friday_rotation=False,
saturday_rotation=False,
sunday_rotation=False,
time=None)
|
<commit_before><commit_msg>Add management command for removing content rotation's settings on sections<commit_after>from __future__ import absolute_import, unicode_literals
from django.core.management.base import BaseCommand
from molo.core.models import SectionPage
class Command(BaseCommand):
def handle(self, **options):
SectionPage.objects.all().update(
content_rotation_start_date=None,
content_rotation_end_date=None,
monday_rotation=False,
tuesday_rotation=False,
wednesday_rotation=False,
thursday_rotation=False,
friday_rotation=False,
saturday_rotation=False,
sunday_rotation=False,
time=None)
|
|
a610154749d081e613b8bf58acf62af55958de9c
|
rev.py
|
rev.py
|
import fileinput
import re
import sys
pattern = re.compile("\s*version='([0-9.]+)',")
line = ""
maj = ""
min = ""
ver = ""
for line in fileinput.FileInput("setup.py", inplace=1):
m = pattern.match(line)
if m:
version = m.groups()[0]
maj, min, rev = version.split('.')
line = line.replace(version, "{0}.{1}.{2}".format(maj, min, int(rev)+1))
sys.stdout.write(line)
|
Automate the update of the version number in setup.py.
|
Automate the update of the version number in setup.py.
|
Python
|
apache-2.0
|
jeffreydwalter/arlo
|
Automate the update of the version number in setup.py.
|
import fileinput
import re
import sys
pattern = re.compile("\s*version='([0-9.]+)',")
line = ""
maj = ""
min = ""
ver = ""
for line in fileinput.FileInput("setup.py", inplace=1):
m = pattern.match(line)
if m:
version = m.groups()[0]
maj, min, rev = version.split('.')
line = line.replace(version, "{0}.{1}.{2}".format(maj, min, int(rev)+1))
sys.stdout.write(line)
|
<commit_before><commit_msg>Automate the update of the version number in setup.py.<commit_after>
|
import fileinput
import re
import sys
pattern = re.compile("\s*version='([0-9.]+)',")
line = ""
maj = ""
min = ""
ver = ""
for line in fileinput.FileInput("setup.py", inplace=1):
m = pattern.match(line)
if m:
version = m.groups()[0]
maj, min, rev = version.split('.')
line = line.replace(version, "{0}.{1}.{2}".format(maj, min, int(rev)+1))
sys.stdout.write(line)
|
Automate the update of the version number in setup.py.import fileinput
import re
import sys
pattern = re.compile("\s*version='([0-9.]+)',")
line = ""
maj = ""
min = ""
ver = ""
for line in fileinput.FileInput("setup.py", inplace=1):
m = pattern.match(line)
if m:
version = m.groups()[0]
maj, min, rev = version.split('.')
line = line.replace(version, "{0}.{1}.{2}".format(maj, min, int(rev)+1))
sys.stdout.write(line)
|
<commit_before><commit_msg>Automate the update of the version number in setup.py.<commit_after>import fileinput
import re
import sys
pattern = re.compile("\s*version='([0-9.]+)',")
line = ""
maj = ""
min = ""
ver = ""
for line in fileinput.FileInput("setup.py", inplace=1):
m = pattern.match(line)
if m:
version = m.groups()[0]
maj, min, rev = version.split('.')
line = line.replace(version, "{0}.{1}.{2}".format(maj, min, int(rev)+1))
sys.stdout.write(line)
|
|
4e2172b8bd0953fd706f6c11029f9e4cfeb55407
|
tests/registryd/test_root_component.py
|
tests/registryd/test_root_component.py
|
import pytest
import dbus
COMPONENT_IFACE = 'org.a11y.atspi.Component'
COORD_TYPE_WINDOW = 1
LAYER_WIDGET = 3
def test_contains(registry_root, session_manager):
assert registry_root.Contains(0, 0, COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE) == False
def test_get_accessible_at_point(registry_root, session_manager):
(name, path) = registry_root.GetAccessibleAtPoint(0, 0, COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE)
assert path == '/org/a11y/atspi/null'
def test_get_extents(registry_root, session_manager):
assert registry_root.GetExtents(COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE) == (0, 0, 1024, 768)
def test_get_position(registry_root, session_manager):
assert registry_root.GetPosition(COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE) == (0, 0)
def test_get_size(registry_root, session_manager):
assert registry_root.GetSize(dbus_interface=COMPONENT_IFACE) == (1024, 768)
def test_get_layer(registry_root, session_manager):
assert registry_root.GetLayer(dbus_interface=COMPONENT_IFACE) == LAYER_WIDGET
def test_get_mdi_z_order(registry_root, session_manager):
assert registry_root.GetMDIZOrder(dbus_interface=COMPONENT_IFACE) == 0
def test_grab_focus(registry_root, session_manager):
assert registry_root.GrabFocus(dbus_interface=COMPONENT_IFACE) == False
def test_get_alpha(registry_root, session_manager):
assert registry_root.GetAlpha(dbus_interface=COMPONENT_IFACE) == 1.0
|
Add tests for the registry's root object's Component interface
|
Add tests for the registry's root object's Component interface
Not much to test, really, just characterizing what the implementation
does. All the methods are no-ops that return hardcoded values.
|
Python
|
lgpl-2.1
|
GNOME/at-spi2-core,GNOME/at-spi2-core,GNOME/at-spi2-core
|
Add tests for the registry's root object's Component interface
Not much to test, really, just characterizing what the implementation
does. All the methods are no-ops that return hardcoded values.
|
import pytest
import dbus
COMPONENT_IFACE = 'org.a11y.atspi.Component'
COORD_TYPE_WINDOW = 1
LAYER_WIDGET = 3
def test_contains(registry_root, session_manager):
assert registry_root.Contains(0, 0, COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE) == False
def test_get_accessible_at_point(registry_root, session_manager):
(name, path) = registry_root.GetAccessibleAtPoint(0, 0, COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE)
assert path == '/org/a11y/atspi/null'
def test_get_extents(registry_root, session_manager):
assert registry_root.GetExtents(COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE) == (0, 0, 1024, 768)
def test_get_position(registry_root, session_manager):
assert registry_root.GetPosition(COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE) == (0, 0)
def test_get_size(registry_root, session_manager):
assert registry_root.GetSize(dbus_interface=COMPONENT_IFACE) == (1024, 768)
def test_get_layer(registry_root, session_manager):
assert registry_root.GetLayer(dbus_interface=COMPONENT_IFACE) == LAYER_WIDGET
def test_get_mdi_z_order(registry_root, session_manager):
assert registry_root.GetMDIZOrder(dbus_interface=COMPONENT_IFACE) == 0
def test_grab_focus(registry_root, session_manager):
assert registry_root.GrabFocus(dbus_interface=COMPONENT_IFACE) == False
def test_get_alpha(registry_root, session_manager):
assert registry_root.GetAlpha(dbus_interface=COMPONENT_IFACE) == 1.0
|
<commit_before><commit_msg>Add tests for the registry's root object's Component interface
Not much to test, really, just characterizing what the implementation
does. All the methods are no-ops that return hardcoded values.<commit_after>
|
import pytest
import dbus
COMPONENT_IFACE = 'org.a11y.atspi.Component'
COORD_TYPE_WINDOW = 1
LAYER_WIDGET = 3
def test_contains(registry_root, session_manager):
assert registry_root.Contains(0, 0, COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE) == False
def test_get_accessible_at_point(registry_root, session_manager):
(name, path) = registry_root.GetAccessibleAtPoint(0, 0, COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE)
assert path == '/org/a11y/atspi/null'
def test_get_extents(registry_root, session_manager):
assert registry_root.GetExtents(COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE) == (0, 0, 1024, 768)
def test_get_position(registry_root, session_manager):
assert registry_root.GetPosition(COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE) == (0, 0)
def test_get_size(registry_root, session_manager):
assert registry_root.GetSize(dbus_interface=COMPONENT_IFACE) == (1024, 768)
def test_get_layer(registry_root, session_manager):
assert registry_root.GetLayer(dbus_interface=COMPONENT_IFACE) == LAYER_WIDGET
def test_get_mdi_z_order(registry_root, session_manager):
assert registry_root.GetMDIZOrder(dbus_interface=COMPONENT_IFACE) == 0
def test_grab_focus(registry_root, session_manager):
assert registry_root.GrabFocus(dbus_interface=COMPONENT_IFACE) == False
def test_get_alpha(registry_root, session_manager):
assert registry_root.GetAlpha(dbus_interface=COMPONENT_IFACE) == 1.0
|
Add tests for the registry's root object's Component interface
Not much to test, really, just characterizing what the implementation
does. All the methods are no-ops that return hardcoded values.import pytest
import dbus
COMPONENT_IFACE = 'org.a11y.atspi.Component'
COORD_TYPE_WINDOW = 1
LAYER_WIDGET = 3
def test_contains(registry_root, session_manager):
assert registry_root.Contains(0, 0, COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE) == False
def test_get_accessible_at_point(registry_root, session_manager):
(name, path) = registry_root.GetAccessibleAtPoint(0, 0, COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE)
assert path == '/org/a11y/atspi/null'
def test_get_extents(registry_root, session_manager):
assert registry_root.GetExtents(COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE) == (0, 0, 1024, 768)
def test_get_position(registry_root, session_manager):
assert registry_root.GetPosition(COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE) == (0, 0)
def test_get_size(registry_root, session_manager):
assert registry_root.GetSize(dbus_interface=COMPONENT_IFACE) == (1024, 768)
def test_get_layer(registry_root, session_manager):
assert registry_root.GetLayer(dbus_interface=COMPONENT_IFACE) == LAYER_WIDGET
def test_get_mdi_z_order(registry_root, session_manager):
assert registry_root.GetMDIZOrder(dbus_interface=COMPONENT_IFACE) == 0
def test_grab_focus(registry_root, session_manager):
assert registry_root.GrabFocus(dbus_interface=COMPONENT_IFACE) == False
def test_get_alpha(registry_root, session_manager):
assert registry_root.GetAlpha(dbus_interface=COMPONENT_IFACE) == 1.0
|
<commit_before><commit_msg>Add tests for the registry's root object's Component interface
Not much to test, really, just characterizing what the implementation
does. All the methods are no-ops that return hardcoded values.<commit_after>import pytest
import dbus
COMPONENT_IFACE = 'org.a11y.atspi.Component'
COORD_TYPE_WINDOW = 1
LAYER_WIDGET = 3
def test_contains(registry_root, session_manager):
assert registry_root.Contains(0, 0, COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE) == False
def test_get_accessible_at_point(registry_root, session_manager):
(name, path) = registry_root.GetAccessibleAtPoint(0, 0, COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE)
assert path == '/org/a11y/atspi/null'
def test_get_extents(registry_root, session_manager):
assert registry_root.GetExtents(COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE) == (0, 0, 1024, 768)
def test_get_position(registry_root, session_manager):
assert registry_root.GetPosition(COORD_TYPE_WINDOW, dbus_interface=COMPONENT_IFACE) == (0, 0)
def test_get_size(registry_root, session_manager):
assert registry_root.GetSize(dbus_interface=COMPONENT_IFACE) == (1024, 768)
def test_get_layer(registry_root, session_manager):
assert registry_root.GetLayer(dbus_interface=COMPONENT_IFACE) == LAYER_WIDGET
def test_get_mdi_z_order(registry_root, session_manager):
assert registry_root.GetMDIZOrder(dbus_interface=COMPONENT_IFACE) == 0
def test_grab_focus(registry_root, session_manager):
assert registry_root.GrabFocus(dbus_interface=COMPONENT_IFACE) == False
def test_get_alpha(registry_root, session_manager):
assert registry_root.GetAlpha(dbus_interface=COMPONENT_IFACE) == 1.0
|
|
f7a1595e39eeb754290c62e9194868d98d9755f4
|
tests/symbols/test_symbol_selection.py
|
tests/symbols/test_symbol_selection.py
|
import pytest
from tests.symbols import get_symbols
from thinglang.compiler.errors import NoMatchingOverload
from thinglang.compiler.references import Reference
from thinglang.lexer.values.identifier import Identifier
from thinglang.parser.values.named_access import NamedAccess
from thinglang.symbols.argument_selector import ArgumentSelector
SOURCE_OVERLOADING = '''
thing Container
thing Container1 extends Container
thing Container2 extends Container
as Container1
thing Container3 extends Container
as Container1
thing Container4 extends Container
as Container1
as Container2
thing Container1Child extends Container1
thing Container1Child2 extends Container1
as Container2
thing Container2Child extends Container2
thing A
does overloaded with Container1 container
does overloaded with Container2 container
does overloaded with Container2Child container
does overloaded with Container1 c1, Container2 c2
does overloaded with Container1 c1, Container2Child c2
'''
# TODO: verify no cast to base type!
SYMBOLS = get_symbols(SOURCE_OVERLOADING)
BASE = SYMBOLS.resolve(NamedAccess.auto('A.overloaded'))
def get_selection(*target_types):
selector = BASE.element.selector(SYMBOLS)
for target_type in target_types:
selector.constraint(Reference(Identifier(target_type)))
return selector.disambiguate(None)
def verify_selection(target_type, expected_index, expected_match):
target = get_selection(*target_type)
assert target.symbol.index == expected_index
assert target.match == expected_match
def test_exact_match():
verify_selection(['Container1'], 1, ArgumentSelector.EXACT)
verify_selection(['Container2'], 2, ArgumentSelector.EXACT) # Matches exactly, despite being castable
verify_selection(['Container2Child'], 3, ArgumentSelector.EXACT) # Matches exactly, despite being in an inheritance chain
verify_selection(['Container1', 'Container2'], 4, ArgumentSelector.EXACT)
verify_selection(['Container1', 'Container2Child'], 5, ArgumentSelector.EXACT)
def test_inheritance_match():
verify_selection(['Container1Child'], 1, ArgumentSelector.INHERITANCE)
verify_selection(['Container1Child2'], 1, ArgumentSelector.INHERITANCE) # Matches in an inheritance chain, despite being castable
def test_casted_match():
verify_selection(['Container3'], 1, ArgumentSelector.CAST)
def test_inheritance_directionality(): # Verify that a prent is not accepted in place of a child
with pytest.raises(NoMatchingOverload) as exc:
get_selection('Container')
assert not exc.value.exact_matches and not exc.value.inheritance_matches and not exc.value.cast_matches
def test_cast_ambiguity(): # Verify cast ambiguity
with pytest.raises(NoMatchingOverload) as exc:
get_selection('Container4')
assert not exc.value.exact_matches and not exc.value.inheritance_matches and len(exc.value.cast_matches) == 2
|
Add test for symbol selection
|
Add test for symbol selection
|
Python
|
mit
|
ytanay/thinglang,ytanay/thinglang,ytanay/thinglang,ytanay/thinglang
|
Add test for symbol selection
|
import pytest
from tests.symbols import get_symbols
from thinglang.compiler.errors import NoMatchingOverload
from thinglang.compiler.references import Reference
from thinglang.lexer.values.identifier import Identifier
from thinglang.parser.values.named_access import NamedAccess
from thinglang.symbols.argument_selector import ArgumentSelector
SOURCE_OVERLOADING = '''
thing Container
thing Container1 extends Container
thing Container2 extends Container
as Container1
thing Container3 extends Container
as Container1
thing Container4 extends Container
as Container1
as Container2
thing Container1Child extends Container1
thing Container1Child2 extends Container1
as Container2
thing Container2Child extends Container2
thing A
does overloaded with Container1 container
does overloaded with Container2 container
does overloaded with Container2Child container
does overloaded with Container1 c1, Container2 c2
does overloaded with Container1 c1, Container2Child c2
'''
# TODO: verify no cast to base type!
SYMBOLS = get_symbols(SOURCE_OVERLOADING)
BASE = SYMBOLS.resolve(NamedAccess.auto('A.overloaded'))
def get_selection(*target_types):
selector = BASE.element.selector(SYMBOLS)
for target_type in target_types:
selector.constraint(Reference(Identifier(target_type)))
return selector.disambiguate(None)
def verify_selection(target_type, expected_index, expected_match):
target = get_selection(*target_type)
assert target.symbol.index == expected_index
assert target.match == expected_match
def test_exact_match():
verify_selection(['Container1'], 1, ArgumentSelector.EXACT)
verify_selection(['Container2'], 2, ArgumentSelector.EXACT) # Matches exactly, despite being castable
verify_selection(['Container2Child'], 3, ArgumentSelector.EXACT) # Matches exactly, despite being in an inheritance chain
verify_selection(['Container1', 'Container2'], 4, ArgumentSelector.EXACT)
verify_selection(['Container1', 'Container2Child'], 5, ArgumentSelector.EXACT)
def test_inheritance_match():
verify_selection(['Container1Child'], 1, ArgumentSelector.INHERITANCE)
verify_selection(['Container1Child2'], 1, ArgumentSelector.INHERITANCE) # Matches in an inheritance chain, despite being castable
def test_casted_match():
verify_selection(['Container3'], 1, ArgumentSelector.CAST)
def test_inheritance_directionality(): # Verify that a prent is not accepted in place of a child
with pytest.raises(NoMatchingOverload) as exc:
get_selection('Container')
assert not exc.value.exact_matches and not exc.value.inheritance_matches and not exc.value.cast_matches
def test_cast_ambiguity(): # Verify cast ambiguity
with pytest.raises(NoMatchingOverload) as exc:
get_selection('Container4')
assert not exc.value.exact_matches and not exc.value.inheritance_matches and len(exc.value.cast_matches) == 2
|
<commit_before><commit_msg>Add test for symbol selection<commit_after>
|
import pytest
from tests.symbols import get_symbols
from thinglang.compiler.errors import NoMatchingOverload
from thinglang.compiler.references import Reference
from thinglang.lexer.values.identifier import Identifier
from thinglang.parser.values.named_access import NamedAccess
from thinglang.symbols.argument_selector import ArgumentSelector
SOURCE_OVERLOADING = '''
thing Container
thing Container1 extends Container
thing Container2 extends Container
as Container1
thing Container3 extends Container
as Container1
thing Container4 extends Container
as Container1
as Container2
thing Container1Child extends Container1
thing Container1Child2 extends Container1
as Container2
thing Container2Child extends Container2
thing A
does overloaded with Container1 container
does overloaded with Container2 container
does overloaded with Container2Child container
does overloaded with Container1 c1, Container2 c2
does overloaded with Container1 c1, Container2Child c2
'''
# TODO: verify no cast to base type!
SYMBOLS = get_symbols(SOURCE_OVERLOADING)
BASE = SYMBOLS.resolve(NamedAccess.auto('A.overloaded'))
def get_selection(*target_types):
selector = BASE.element.selector(SYMBOLS)
for target_type in target_types:
selector.constraint(Reference(Identifier(target_type)))
return selector.disambiguate(None)
def verify_selection(target_type, expected_index, expected_match):
target = get_selection(*target_type)
assert target.symbol.index == expected_index
assert target.match == expected_match
def test_exact_match():
verify_selection(['Container1'], 1, ArgumentSelector.EXACT)
verify_selection(['Container2'], 2, ArgumentSelector.EXACT) # Matches exactly, despite being castable
verify_selection(['Container2Child'], 3, ArgumentSelector.EXACT) # Matches exactly, despite being in an inheritance chain
verify_selection(['Container1', 'Container2'], 4, ArgumentSelector.EXACT)
verify_selection(['Container1', 'Container2Child'], 5, ArgumentSelector.EXACT)
def test_inheritance_match():
verify_selection(['Container1Child'], 1, ArgumentSelector.INHERITANCE)
verify_selection(['Container1Child2'], 1, ArgumentSelector.INHERITANCE) # Matches in an inheritance chain, despite being castable
def test_casted_match():
verify_selection(['Container3'], 1, ArgumentSelector.CAST)
def test_inheritance_directionality(): # Verify that a prent is not accepted in place of a child
with pytest.raises(NoMatchingOverload) as exc:
get_selection('Container')
assert not exc.value.exact_matches and not exc.value.inheritance_matches and not exc.value.cast_matches
def test_cast_ambiguity(): # Verify cast ambiguity
with pytest.raises(NoMatchingOverload) as exc:
get_selection('Container4')
assert not exc.value.exact_matches and not exc.value.inheritance_matches and len(exc.value.cast_matches) == 2
|
Add test for symbol selectionimport pytest
from tests.symbols import get_symbols
from thinglang.compiler.errors import NoMatchingOverload
from thinglang.compiler.references import Reference
from thinglang.lexer.values.identifier import Identifier
from thinglang.parser.values.named_access import NamedAccess
from thinglang.symbols.argument_selector import ArgumentSelector
SOURCE_OVERLOADING = '''
thing Container
thing Container1 extends Container
thing Container2 extends Container
as Container1
thing Container3 extends Container
as Container1
thing Container4 extends Container
as Container1
as Container2
thing Container1Child extends Container1
thing Container1Child2 extends Container1
as Container2
thing Container2Child extends Container2
thing A
does overloaded with Container1 container
does overloaded with Container2 container
does overloaded with Container2Child container
does overloaded with Container1 c1, Container2 c2
does overloaded with Container1 c1, Container2Child c2
'''
# TODO: verify no cast to base type!
SYMBOLS = get_symbols(SOURCE_OVERLOADING)
BASE = SYMBOLS.resolve(NamedAccess.auto('A.overloaded'))
def get_selection(*target_types):
selector = BASE.element.selector(SYMBOLS)
for target_type in target_types:
selector.constraint(Reference(Identifier(target_type)))
return selector.disambiguate(None)
def verify_selection(target_type, expected_index, expected_match):
target = get_selection(*target_type)
assert target.symbol.index == expected_index
assert target.match == expected_match
def test_exact_match():
verify_selection(['Container1'], 1, ArgumentSelector.EXACT)
verify_selection(['Container2'], 2, ArgumentSelector.EXACT) # Matches exactly, despite being castable
verify_selection(['Container2Child'], 3, ArgumentSelector.EXACT) # Matches exactly, despite being in an inheritance chain
verify_selection(['Container1', 'Container2'], 4, ArgumentSelector.EXACT)
verify_selection(['Container1', 'Container2Child'], 5, ArgumentSelector.EXACT)
def test_inheritance_match():
verify_selection(['Container1Child'], 1, ArgumentSelector.INHERITANCE)
verify_selection(['Container1Child2'], 1, ArgumentSelector.INHERITANCE) # Matches in an inheritance chain, despite being castable
def test_casted_match():
verify_selection(['Container3'], 1, ArgumentSelector.CAST)
def test_inheritance_directionality(): # Verify that a prent is not accepted in place of a child
with pytest.raises(NoMatchingOverload) as exc:
get_selection('Container')
assert not exc.value.exact_matches and not exc.value.inheritance_matches and not exc.value.cast_matches
def test_cast_ambiguity(): # Verify cast ambiguity
with pytest.raises(NoMatchingOverload) as exc:
get_selection('Container4')
assert not exc.value.exact_matches and not exc.value.inheritance_matches and len(exc.value.cast_matches) == 2
|
<commit_before><commit_msg>Add test for symbol selection<commit_after>import pytest
from tests.symbols import get_symbols
from thinglang.compiler.errors import NoMatchingOverload
from thinglang.compiler.references import Reference
from thinglang.lexer.values.identifier import Identifier
from thinglang.parser.values.named_access import NamedAccess
from thinglang.symbols.argument_selector import ArgumentSelector
SOURCE_OVERLOADING = '''
thing Container
thing Container1 extends Container
thing Container2 extends Container
as Container1
thing Container3 extends Container
as Container1
thing Container4 extends Container
as Container1
as Container2
thing Container1Child extends Container1
thing Container1Child2 extends Container1
as Container2
thing Container2Child extends Container2
thing A
does overloaded with Container1 container
does overloaded with Container2 container
does overloaded with Container2Child container
does overloaded with Container1 c1, Container2 c2
does overloaded with Container1 c1, Container2Child c2
'''
# TODO: verify no cast to base type!
SYMBOLS = get_symbols(SOURCE_OVERLOADING)
BASE = SYMBOLS.resolve(NamedAccess.auto('A.overloaded'))
def get_selection(*target_types):
selector = BASE.element.selector(SYMBOLS)
for target_type in target_types:
selector.constraint(Reference(Identifier(target_type)))
return selector.disambiguate(None)
def verify_selection(target_type, expected_index, expected_match):
target = get_selection(*target_type)
assert target.symbol.index == expected_index
assert target.match == expected_match
def test_exact_match():
verify_selection(['Container1'], 1, ArgumentSelector.EXACT)
verify_selection(['Container2'], 2, ArgumentSelector.EXACT) # Matches exactly, despite being castable
verify_selection(['Container2Child'], 3, ArgumentSelector.EXACT) # Matches exactly, despite being in an inheritance chain
verify_selection(['Container1', 'Container2'], 4, ArgumentSelector.EXACT)
verify_selection(['Container1', 'Container2Child'], 5, ArgumentSelector.EXACT)
def test_inheritance_match():
verify_selection(['Container1Child'], 1, ArgumentSelector.INHERITANCE)
verify_selection(['Container1Child2'], 1, ArgumentSelector.INHERITANCE) # Matches in an inheritance chain, despite being castable
def test_casted_match():
verify_selection(['Container3'], 1, ArgumentSelector.CAST)
def test_inheritance_directionality(): # Verify that a prent is not accepted in place of a child
with pytest.raises(NoMatchingOverload) as exc:
get_selection('Container')
assert not exc.value.exact_matches and not exc.value.inheritance_matches and not exc.value.cast_matches
def test_cast_ambiguity(): # Verify cast ambiguity
with pytest.raises(NoMatchingOverload) as exc:
get_selection('Container4')
assert not exc.value.exact_matches and not exc.value.inheritance_matches and len(exc.value.cast_matches) == 2
|
|
aa5c4fde763467cae63c205df8e4aaf7328ab713
|
device/PRESUBMIT.py
|
device/PRESUBMIT.py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def CheckChangeOnUpload(input_api, output_api):
results = []
results += input_api.canned_checks.CheckPatchFormatted(input_api, output_api)
return results
|
Add 'git cl format' presubmit check to src/device
|
Add 'git cl format' presubmit check to src/device
Review URL: https://codereview.chromium.org/1097743004
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#325940}
|
Python
|
bsd-3-clause
|
chuan9/chromium-crosswalk,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,Just-D/chromium-1,PeterWangIntel/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,hgl888/chromium-crosswalk,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,Chilledheart/chromium,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,Fireblend/chromium-crosswalk,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,hgl888/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,Chilledheart/chromium,chuan9/chromium-crosswalk,Just-D/chromium-1,Fireblend/chromium-crosswalk,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,Chilledheart/chromium,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,axinging/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,axinging/chromium-crosswalk,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,Just-D/chromium-1,Fireblend/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,chuan9/chromium-crosswalk,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,hgl888/chromium-crosswalk,Pluto-tv/chromium-crosswalk
|
Add 'git cl format' presubmit check to src/device
Review URL: https://codereview.chromium.org/1097743004
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#325940}
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def CheckChangeOnUpload(input_api, output_api):
results = []
results += input_api.canned_checks.CheckPatchFormatted(input_api, output_api)
return results
|
<commit_before><commit_msg>Add 'git cl format' presubmit check to src/device
Review URL: https://codereview.chromium.org/1097743004
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#325940}<commit_after>
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def CheckChangeOnUpload(input_api, output_api):
results = []
results += input_api.canned_checks.CheckPatchFormatted(input_api, output_api)
return results
|
Add 'git cl format' presubmit check to src/device
Review URL: https://codereview.chromium.org/1097743004
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#325940}# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def CheckChangeOnUpload(input_api, output_api):
results = []
results += input_api.canned_checks.CheckPatchFormatted(input_api, output_api)
return results
|
<commit_before><commit_msg>Add 'git cl format' presubmit check to src/device
Review URL: https://codereview.chromium.org/1097743004
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#325940}<commit_after># Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def CheckChangeOnUpload(input_api, output_api):
results = []
results += input_api.canned_checks.CheckPatchFormatted(input_api, output_api)
return results
|
|
fb3b3d2f9798872742541f7eae2d7b3e2a8a95ab
|
pygraphc/abstraction/AutoAbstractionRecursion.py
|
pygraphc/abstraction/AutoAbstractionRecursion.py
|
import networkx as nx
import os
from pygraphc.preprocess.CreateGraphModel import CreateGraphModel
from pygraphc.clustering.Louvain import Louvain
class AutoAbstraction(object):
def __init__(self, log_file):
self.log_file = log_file
self.clusters = []
def __prepare_graph(self, cluster=None):
# get subgraph
if cluster:
subgraph = [int(node) for node in cluster]
graph_noattributes = self.graph_noattributes.subgraph(subgraph)
# create graph
else:
self.graph_model = CreateGraphModel(self.log_file)
self.graph = self.graph_model.create_graph()
self.graph_noattributes = self.graph_model.create_graph_noattributes()
self.graph_copy = self.graph.copy()
graph_noattributes = self.graph_noattributes
# write to gexf file
gexf_file = os.path.join('/', 'tmp', self.log_file.split('/')[-1] + '.gexf')
nx.write_gexf(graph_noattributes, gexf_file)
return gexf_file
def __get_community(self, cluster=None):
# prepare graph or subgraph
if cluster:
gexf_file = self.__prepare_graph(cluster)
else:
gexf_file = self.__prepare_graph()
# graph clustering based on Louvain community detection
louvain = Louvain(gexf_file)
clusters = louvain.get_cluster()
# stop-recursion case: if there is no more partition
if len(clusters.keys()) == 1:
self.clusters.append(clusters.values()[0])
print 'cluster with len=1', clusters.values()[0]
# recursion case: graph clustering
else:
for cluster_id, cluster in clusters.iteritems():
self.__get_community(cluster)
def get_abstraction(self):
self.__get_community()
# aa = AutoAbstraction('/home/hudan/Git/datasets/casper-rw/logs/messages')
# aa.get_abstraction()
|
Add new abstraction with recursion
|
Add new abstraction with recursion
|
Python
|
mit
|
studiawan/pygraphc
|
Add new abstraction with recursion
|
import networkx as nx
import os
from pygraphc.preprocess.CreateGraphModel import CreateGraphModel
from pygraphc.clustering.Louvain import Louvain
class AutoAbstraction(object):
def __init__(self, log_file):
self.log_file = log_file
self.clusters = []
def __prepare_graph(self, cluster=None):
# get subgraph
if cluster:
subgraph = [int(node) for node in cluster]
graph_noattributes = self.graph_noattributes.subgraph(subgraph)
# create graph
else:
self.graph_model = CreateGraphModel(self.log_file)
self.graph = self.graph_model.create_graph()
self.graph_noattributes = self.graph_model.create_graph_noattributes()
self.graph_copy = self.graph.copy()
graph_noattributes = self.graph_noattributes
# write to gexf file
gexf_file = os.path.join('/', 'tmp', self.log_file.split('/')[-1] + '.gexf')
nx.write_gexf(graph_noattributes, gexf_file)
return gexf_file
def __get_community(self, cluster=None):
# prepare graph or subgraph
if cluster:
gexf_file = self.__prepare_graph(cluster)
else:
gexf_file = self.__prepare_graph()
# graph clustering based on Louvain community detection
louvain = Louvain(gexf_file)
clusters = louvain.get_cluster()
# stop-recursion case: if there is no more partition
if len(clusters.keys()) == 1:
self.clusters.append(clusters.values()[0])
print 'cluster with len=1', clusters.values()[0]
# recursion case: graph clustering
else:
for cluster_id, cluster in clusters.iteritems():
self.__get_community(cluster)
def get_abstraction(self):
self.__get_community()
# aa = AutoAbstraction('/home/hudan/Git/datasets/casper-rw/logs/messages')
# aa.get_abstraction()
|
<commit_before><commit_msg>Add new abstraction with recursion<commit_after>
|
import networkx as nx
import os
from pygraphc.preprocess.CreateGraphModel import CreateGraphModel
from pygraphc.clustering.Louvain import Louvain
class AutoAbstraction(object):
def __init__(self, log_file):
self.log_file = log_file
self.clusters = []
def __prepare_graph(self, cluster=None):
# get subgraph
if cluster:
subgraph = [int(node) for node in cluster]
graph_noattributes = self.graph_noattributes.subgraph(subgraph)
# create graph
else:
self.graph_model = CreateGraphModel(self.log_file)
self.graph = self.graph_model.create_graph()
self.graph_noattributes = self.graph_model.create_graph_noattributes()
self.graph_copy = self.graph.copy()
graph_noattributes = self.graph_noattributes
# write to gexf file
gexf_file = os.path.join('/', 'tmp', self.log_file.split('/')[-1] + '.gexf')
nx.write_gexf(graph_noattributes, gexf_file)
return gexf_file
def __get_community(self, cluster=None):
# prepare graph or subgraph
if cluster:
gexf_file = self.__prepare_graph(cluster)
else:
gexf_file = self.__prepare_graph()
# graph clustering based on Louvain community detection
louvain = Louvain(gexf_file)
clusters = louvain.get_cluster()
# stop-recursion case: if there is no more partition
if len(clusters.keys()) == 1:
self.clusters.append(clusters.values()[0])
print 'cluster with len=1', clusters.values()[0]
# recursion case: graph clustering
else:
for cluster_id, cluster in clusters.iteritems():
self.__get_community(cluster)
def get_abstraction(self):
self.__get_community()
# aa = AutoAbstraction('/home/hudan/Git/datasets/casper-rw/logs/messages')
# aa.get_abstraction()
|
Add new abstraction with recursionimport networkx as nx
import os
from pygraphc.preprocess.CreateGraphModel import CreateGraphModel
from pygraphc.clustering.Louvain import Louvain
class AutoAbstraction(object):
def __init__(self, log_file):
self.log_file = log_file
self.clusters = []
def __prepare_graph(self, cluster=None):
# get subgraph
if cluster:
subgraph = [int(node) for node in cluster]
graph_noattributes = self.graph_noattributes.subgraph(subgraph)
# create graph
else:
self.graph_model = CreateGraphModel(self.log_file)
self.graph = self.graph_model.create_graph()
self.graph_noattributes = self.graph_model.create_graph_noattributes()
self.graph_copy = self.graph.copy()
graph_noattributes = self.graph_noattributes
# write to gexf file
gexf_file = os.path.join('/', 'tmp', self.log_file.split('/')[-1] + '.gexf')
nx.write_gexf(graph_noattributes, gexf_file)
return gexf_file
def __get_community(self, cluster=None):
# prepare graph or subgraph
if cluster:
gexf_file = self.__prepare_graph(cluster)
else:
gexf_file = self.__prepare_graph()
# graph clustering based on Louvain community detection
louvain = Louvain(gexf_file)
clusters = louvain.get_cluster()
# stop-recursion case: if there is no more partition
if len(clusters.keys()) == 1:
self.clusters.append(clusters.values()[0])
print 'cluster with len=1', clusters.values()[0]
# recursion case: graph clustering
else:
for cluster_id, cluster in clusters.iteritems():
self.__get_community(cluster)
def get_abstraction(self):
self.__get_community()
# aa = AutoAbstraction('/home/hudan/Git/datasets/casper-rw/logs/messages')
# aa.get_abstraction()
|
<commit_before><commit_msg>Add new abstraction with recursion<commit_after>import networkx as nx
import os
from pygraphc.preprocess.CreateGraphModel import CreateGraphModel
from pygraphc.clustering.Louvain import Louvain
class AutoAbstraction(object):
def __init__(self, log_file):
self.log_file = log_file
self.clusters = []
def __prepare_graph(self, cluster=None):
# get subgraph
if cluster:
subgraph = [int(node) for node in cluster]
graph_noattributes = self.graph_noattributes.subgraph(subgraph)
# create graph
else:
self.graph_model = CreateGraphModel(self.log_file)
self.graph = self.graph_model.create_graph()
self.graph_noattributes = self.graph_model.create_graph_noattributes()
self.graph_copy = self.graph.copy()
graph_noattributes = self.graph_noattributes
# write to gexf file
gexf_file = os.path.join('/', 'tmp', self.log_file.split('/')[-1] + '.gexf')
nx.write_gexf(graph_noattributes, gexf_file)
return gexf_file
def __get_community(self, cluster=None):
# prepare graph or subgraph
if cluster:
gexf_file = self.__prepare_graph(cluster)
else:
gexf_file = self.__prepare_graph()
# graph clustering based on Louvain community detection
louvain = Louvain(gexf_file)
clusters = louvain.get_cluster()
# stop-recursion case: if there is no more partition
if len(clusters.keys()) == 1:
self.clusters.append(clusters.values()[0])
print 'cluster with len=1', clusters.values()[0]
# recursion case: graph clustering
else:
for cluster_id, cluster in clusters.iteritems():
self.__get_community(cluster)
def get_abstraction(self):
self.__get_community()
# aa = AutoAbstraction('/home/hudan/Git/datasets/casper-rw/logs/messages')
# aa.get_abstraction()
|
|
80fd2d73f7a206b5b517cb455da457fed9dc6403
|
migrations/versions/0180_another_letter_org.py
|
migrations/versions/0180_another_letter_org.py
|
"""empty message
Revision ID: 0180_another_letter_org
Revises: 0179_billing_primary_const
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0180_another_letter_org'
down_revision = '0179_billing_primary_const'
from alembic import op
NEW_ORGANISATIONS = [
('504', 'Rother District Council'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add Rother District Council logo for letters
|
Add Rother District Council logo for letters
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add Rother District Council logo for letters
|
"""empty message
Revision ID: 0180_another_letter_org
Revises: 0179_billing_primary_const
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0180_another_letter_org'
down_revision = '0179_billing_primary_const'
from alembic import op
NEW_ORGANISATIONS = [
('504', 'Rother District Council'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add Rother District Council logo for letters<commit_after>
|
"""empty message
Revision ID: 0180_another_letter_org
Revises: 0179_billing_primary_const
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0180_another_letter_org'
down_revision = '0179_billing_primary_const'
from alembic import op
NEW_ORGANISATIONS = [
('504', 'Rother District Council'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add Rother District Council logo for letters"""empty message
Revision ID: 0180_another_letter_org
Revises: 0179_billing_primary_const
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0180_another_letter_org'
down_revision = '0179_billing_primary_const'
from alembic import op
NEW_ORGANISATIONS = [
('504', 'Rother District Council'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add Rother District Council logo for letters<commit_after>"""empty message
Revision ID: 0180_another_letter_org
Revises: 0179_billing_primary_const
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0180_another_letter_org'
down_revision = '0179_billing_primary_const'
from alembic import op
NEW_ORGANISATIONS = [
('504', 'Rother District Council'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
|
16e52502bf55075c58022fa35e1673a8a0d5f4bc
|
networkx/utils/tests/test_unionfind.py
|
networkx/utils/tests/test_unionfind.py
|
from nose.tools import *
import networkx as nx
def test_unionfind():
# Fixed by: 2cddd5958689bdecdcd89b91ac9aaf6ce0e4f6b8
# Previously (in 2.x), the UnionFind class could handle mixed types.
# But in Python 3.x, this causes a TypeError such as:
# TypeError: unorderable types: str() > int()
#
# Now we just make sure that no exception is raised.
x = nx.utils.UnionFind()
x.union(0, 'a')
|
Add a test for UnionFind fix.
|
Add a test for UnionFind fix.
|
Python
|
bsd-3-clause
|
beni55/networkx,jni/networkx,aureooms/networkx,wasade/networkx,RMKD/networkx,dhimmel/networkx,RMKD/networkx,Sixshaman/networkx,sharifulgeo/networkx,blublud/networkx,jfinkels/networkx,harlowja/networkx,dmoliveira/networkx,kernc/networkx,jakevdp/networkx,ghdk/networkx,aureooms/networkx,blublud/networkx,jni/networkx,ghdk/networkx,michaelpacer/networkx,dhimmel/networkx,jcurbelo/networkx,ionanrozenfeld/networkx,blublud/networkx,harlowja/networkx,debsankha/networkx,yashu-seth/networkx,sharifulgeo/networkx,jni/networkx,cmtm/networkx,nathania/networkx,RMKD/networkx,NvanAdrichem/networkx,ltiao/networkx,bzero/networkx,debsankha/networkx,harlowja/networkx,chrisnatali/networkx,tmilicic/networkx,farhaanbukhsh/networkx,kernc/networkx,bzero/networkx,OrkoHunter/networkx,sharifulgeo/networkx,JamesClough/networkx,kernc/networkx,nathania/networkx,ghdk/networkx,jakevdp/networkx,SanketDG/networkx,dmoliveira/networkx,bzero/networkx,ionanrozenfeld/networkx,dmoliveira/networkx,farhaanbukhsh/networkx,chrisnatali/networkx,chrisnatali/networkx,ionanrozenfeld/networkx,nathania/networkx,goulu/networkx,aureooms/networkx,jakevdp/networkx,farhaanbukhsh/networkx,debsankha/networkx,andnovar/networkx,dhimmel/networkx
|
Add a test for UnionFind fix.
|
from nose.tools import *
import networkx as nx
def test_unionfind():
# Fixed by: 2cddd5958689bdecdcd89b91ac9aaf6ce0e4f6b8
# Previously (in 2.x), the UnionFind class could handle mixed types.
# But in Python 3.x, this causes a TypeError such as:
# TypeError: unorderable types: str() > int()
#
# Now we just make sure that no exception is raised.
x = nx.utils.UnionFind()
x.union(0, 'a')
|
<commit_before><commit_msg>Add a test for UnionFind fix.<commit_after>
|
from nose.tools import *
import networkx as nx
def test_unionfind():
# Fixed by: 2cddd5958689bdecdcd89b91ac9aaf6ce0e4f6b8
# Previously (in 2.x), the UnionFind class could handle mixed types.
# But in Python 3.x, this causes a TypeError such as:
# TypeError: unorderable types: str() > int()
#
# Now we just make sure that no exception is raised.
x = nx.utils.UnionFind()
x.union(0, 'a')
|
Add a test for UnionFind fix.from nose.tools import *
import networkx as nx
def test_unionfind():
# Fixed by: 2cddd5958689bdecdcd89b91ac9aaf6ce0e4f6b8
# Previously (in 2.x), the UnionFind class could handle mixed types.
# But in Python 3.x, this causes a TypeError such as:
# TypeError: unorderable types: str() > int()
#
# Now we just make sure that no exception is raised.
x = nx.utils.UnionFind()
x.union(0, 'a')
|
<commit_before><commit_msg>Add a test for UnionFind fix.<commit_after>from nose.tools import *
import networkx as nx
def test_unionfind():
# Fixed by: 2cddd5958689bdecdcd89b91ac9aaf6ce0e4f6b8
# Previously (in 2.x), the UnionFind class could handle mixed types.
# But in Python 3.x, this causes a TypeError such as:
# TypeError: unorderable types: str() > int()
#
# Now we just make sure that no exception is raised.
x = nx.utils.UnionFind()
x.union(0, 'a')
|
|
d168599b9167ede2098aa2fe82375aa95e5ab8b3
|
dockerpuller/app.py
|
dockerpuller/app.py
|
from flask import Flask
from flask import request
from flask import jsonify
import json
import subprocess
app = Flask(__name__)
config = None
@app.route('/', methods=['POST'])
def hook_listen():
if request.method == 'POST':
token = request.args.get('token')
if token == config['token']:
hook = request.args.get('hook')
hook_value = config['hooks'].get(hook)
if hook_value:
#payload = request.get_json()
try:
subprocess.call(hook_value)
return jsonify(success=True), 200
except OSError as e:
return jsonify(success=False, error=str(e)), 400
else:
return jsonify(success=False, error="Hook not found"), 404
else:
return jsonify(success=False, error="Invalid token"), 400
def load_config():
with open('config.json') as config_file:
return json.load(config_file)
if __name__ == '__main__':
config = load_config()
app.run(host=config['host'], port=config['port'])
|
from flask import Flask
from flask import request
from flask import jsonify
import json
import subprocess
app = Flask(__name__)
config = None
@app.route('/', methods=['POST'])
def hook_listen():
if request.method == 'POST':
token = request.args.get('token')
if token == config['token']:
hook = request.args.get('hook')
if hook:
hook_value = config['hooks'].get(hook)
if hook_value:
#payload = request.get_json()
try:
subprocess.call(hook_value)
return jsonify(success=True), 200
except OSError as e:
return jsonify(success=False, error=str(e)), 400
else:
return jsonify(success=False, error="Hook not found"), 404
else:
return jsonify(success=False, error="Invalid request: missing hook"), 400
else:
return jsonify(success=False, error="Invalid token"), 400
def load_config():
with open('config.json') as config_file:
return json.load(config_file)
if __name__ == '__main__':
config = load_config()
app.run(host=config['host'], port=config['port'])
|
Check if hook parameter is passed to the url
|
Check if hook parameter is passed to the url
|
Python
|
mit
|
nicocoffo/docker-puller,nicocoffo/docker-puller,glowdigitalmedia/docker-puller,glowdigitalmedia/docker-puller
|
from flask import Flask
from flask import request
from flask import jsonify
import json
import subprocess
app = Flask(__name__)
config = None
@app.route('/', methods=['POST'])
def hook_listen():
if request.method == 'POST':
token = request.args.get('token')
if token == config['token']:
hook = request.args.get('hook')
hook_value = config['hooks'].get(hook)
if hook_value:
#payload = request.get_json()
try:
subprocess.call(hook_value)
return jsonify(success=True), 200
except OSError as e:
return jsonify(success=False, error=str(e)), 400
else:
return jsonify(success=False, error="Hook not found"), 404
else:
return jsonify(success=False, error="Invalid token"), 400
def load_config():
with open('config.json') as config_file:
return json.load(config_file)
if __name__ == '__main__':
config = load_config()
app.run(host=config['host'], port=config['port'])
Check if hook parameter is passed to the url
|
from flask import Flask
from flask import request
from flask import jsonify
import json
import subprocess
app = Flask(__name__)
config = None
@app.route('/', methods=['POST'])
def hook_listen():
if request.method == 'POST':
token = request.args.get('token')
if token == config['token']:
hook = request.args.get('hook')
if hook:
hook_value = config['hooks'].get(hook)
if hook_value:
#payload = request.get_json()
try:
subprocess.call(hook_value)
return jsonify(success=True), 200
except OSError as e:
return jsonify(success=False, error=str(e)), 400
else:
return jsonify(success=False, error="Hook not found"), 404
else:
return jsonify(success=False, error="Invalid request: missing hook"), 400
else:
return jsonify(success=False, error="Invalid token"), 400
def load_config():
with open('config.json') as config_file:
return json.load(config_file)
if __name__ == '__main__':
config = load_config()
app.run(host=config['host'], port=config['port'])
|
<commit_before>from flask import Flask
from flask import request
from flask import jsonify
import json
import subprocess
app = Flask(__name__)
config = None
@app.route('/', methods=['POST'])
def hook_listen():
if request.method == 'POST':
token = request.args.get('token')
if token == config['token']:
hook = request.args.get('hook')
hook_value = config['hooks'].get(hook)
if hook_value:
#payload = request.get_json()
try:
subprocess.call(hook_value)
return jsonify(success=True), 200
except OSError as e:
return jsonify(success=False, error=str(e)), 400
else:
return jsonify(success=False, error="Hook not found"), 404
else:
return jsonify(success=False, error="Invalid token"), 400
def load_config():
with open('config.json') as config_file:
return json.load(config_file)
if __name__ == '__main__':
config = load_config()
app.run(host=config['host'], port=config['port'])
<commit_msg>Check if hook parameter is passed to the url<commit_after>
|
from flask import Flask
from flask import request
from flask import jsonify
import json
import subprocess
app = Flask(__name__)
config = None
@app.route('/', methods=['POST'])
def hook_listen():
if request.method == 'POST':
token = request.args.get('token')
if token == config['token']:
hook = request.args.get('hook')
if hook:
hook_value = config['hooks'].get(hook)
if hook_value:
#payload = request.get_json()
try:
subprocess.call(hook_value)
return jsonify(success=True), 200
except OSError as e:
return jsonify(success=False, error=str(e)), 400
else:
return jsonify(success=False, error="Hook not found"), 404
else:
return jsonify(success=False, error="Invalid request: missing hook"), 400
else:
return jsonify(success=False, error="Invalid token"), 400
def load_config():
with open('config.json') as config_file:
return json.load(config_file)
if __name__ == '__main__':
config = load_config()
app.run(host=config['host'], port=config['port'])
|
from flask import Flask
from flask import request
from flask import jsonify
import json
import subprocess
app = Flask(__name__)
config = None
@app.route('/', methods=['POST'])
def hook_listen():
if request.method == 'POST':
token = request.args.get('token')
if token == config['token']:
hook = request.args.get('hook')
hook_value = config['hooks'].get(hook)
if hook_value:
#payload = request.get_json()
try:
subprocess.call(hook_value)
return jsonify(success=True), 200
except OSError as e:
return jsonify(success=False, error=str(e)), 400
else:
return jsonify(success=False, error="Hook not found"), 404
else:
return jsonify(success=False, error="Invalid token"), 400
def load_config():
with open('config.json') as config_file:
return json.load(config_file)
if __name__ == '__main__':
config = load_config()
app.run(host=config['host'], port=config['port'])
Check if hook parameter is passed to the urlfrom flask import Flask
from flask import request
from flask import jsonify
import json
import subprocess
app = Flask(__name__)
config = None
@app.route('/', methods=['POST'])
def hook_listen():
if request.method == 'POST':
token = request.args.get('token')
if token == config['token']:
hook = request.args.get('hook')
if hook:
hook_value = config['hooks'].get(hook)
if hook_value:
#payload = request.get_json()
try:
subprocess.call(hook_value)
return jsonify(success=True), 200
except OSError as e:
return jsonify(success=False, error=str(e)), 400
else:
return jsonify(success=False, error="Hook not found"), 404
else:
return jsonify(success=False, error="Invalid request: missing hook"), 400
else:
return jsonify(success=False, error="Invalid token"), 400
def load_config():
with open('config.json') as config_file:
return json.load(config_file)
if __name__ == '__main__':
config = load_config()
app.run(host=config['host'], port=config['port'])
|
<commit_before>from flask import Flask
from flask import request
from flask import jsonify
import json
import subprocess
app = Flask(__name__)
config = None
@app.route('/', methods=['POST'])
def hook_listen():
if request.method == 'POST':
token = request.args.get('token')
if token == config['token']:
hook = request.args.get('hook')
hook_value = config['hooks'].get(hook)
if hook_value:
#payload = request.get_json()
try:
subprocess.call(hook_value)
return jsonify(success=True), 200
except OSError as e:
return jsonify(success=False, error=str(e)), 400
else:
return jsonify(success=False, error="Hook not found"), 404
else:
return jsonify(success=False, error="Invalid token"), 400
def load_config():
with open('config.json') as config_file:
return json.load(config_file)
if __name__ == '__main__':
config = load_config()
app.run(host=config['host'], port=config['port'])
<commit_msg>Check if hook parameter is passed to the url<commit_after>from flask import Flask
from flask import request
from flask import jsonify
import json
import subprocess
app = Flask(__name__)
config = None
@app.route('/', methods=['POST'])
def hook_listen():
if request.method == 'POST':
token = request.args.get('token')
if token == config['token']:
hook = request.args.get('hook')
if hook:
hook_value = config['hooks'].get(hook)
if hook_value:
#payload = request.get_json()
try:
subprocess.call(hook_value)
return jsonify(success=True), 200
except OSError as e:
return jsonify(success=False, error=str(e)), 400
else:
return jsonify(success=False, error="Hook not found"), 404
else:
return jsonify(success=False, error="Invalid request: missing hook"), 400
else:
return jsonify(success=False, error="Invalid token"), 400
def load_config():
with open('config.json') as config_file:
return json.load(config_file)
if __name__ == '__main__':
config = load_config()
app.run(host=config['host'], port=config['port'])
|
0bc0691c7714b7b5885ce2a9c05eb7eb35738c74
|
tests/test_decorators.py
|
tests/test_decorators.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from webhooks.exceptions import SenderNotCallable
from webhooks import webhook, unhashed_hook
def test_callable_sender():
@webhook(event="example200", sender_callable=123)
def basic(creator="pydanny"):
return {"husband": "Daniel Roy Greenfeld", "wife": "Audrey Roy Greenfeld"}
with pytest.raises(SenderNotCallable):
basic(creator='pydanny')
|
Add test for sender_callable check
|
Add test for sender_callable check
|
Python
|
bsd-3-clause
|
pydanny/webhooks
|
Add test for sender_callable check
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from webhooks.exceptions import SenderNotCallable
from webhooks import webhook, unhashed_hook
def test_callable_sender():
@webhook(event="example200", sender_callable=123)
def basic(creator="pydanny"):
return {"husband": "Daniel Roy Greenfeld", "wife": "Audrey Roy Greenfeld"}
with pytest.raises(SenderNotCallable):
basic(creator='pydanny')
|
<commit_before><commit_msg>Add test for sender_callable check<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from webhooks.exceptions import SenderNotCallable
from webhooks import webhook, unhashed_hook
def test_callable_sender():
@webhook(event="example200", sender_callable=123)
def basic(creator="pydanny"):
return {"husband": "Daniel Roy Greenfeld", "wife": "Audrey Roy Greenfeld"}
with pytest.raises(SenderNotCallable):
basic(creator='pydanny')
|
Add test for sender_callable check#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from webhooks.exceptions import SenderNotCallable
from webhooks import webhook, unhashed_hook
def test_callable_sender():
@webhook(event="example200", sender_callable=123)
def basic(creator="pydanny"):
return {"husband": "Daniel Roy Greenfeld", "wife": "Audrey Roy Greenfeld"}
with pytest.raises(SenderNotCallable):
basic(creator='pydanny')
|
<commit_before><commit_msg>Add test for sender_callable check<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from webhooks.exceptions import SenderNotCallable
from webhooks import webhook, unhashed_hook
def test_callable_sender():
@webhook(event="example200", sender_callable=123)
def basic(creator="pydanny"):
return {"husband": "Daniel Roy Greenfeld", "wife": "Audrey Roy Greenfeld"}
with pytest.raises(SenderNotCallable):
basic(creator='pydanny')
|
|
8c71a177c16762ab50dafe2528d24fab4ccf0925
|
py/minimum-moves-to-equal-array-elements-ii.py
|
py/minimum-moves-to-equal-array-elements-ii.py
|
class Solution(object):
def minMoves2(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort()
median = nums[len(nums) / 2]
return sum(abs(x - median) for x in nums)
|
Add py solution for 462. Minimum Moves to Equal Array Elements II
|
Add py solution for 462. Minimum Moves to Equal Array Elements II
462. Minimum Moves to Equal Array Elements II: https://leetcode.com/problems/minimum-moves-to-equal-array-elements-ii/
Could be optimized to O(n) by q-select
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 462. Minimum Moves to Equal Array Elements II
462. Minimum Moves to Equal Array Elements II: https://leetcode.com/problems/minimum-moves-to-equal-array-elements-ii/
Could be optimized to O(n) by q-select
|
class Solution(object):
def minMoves2(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort()
median = nums[len(nums) / 2]
return sum(abs(x - median) for x in nums)
|
<commit_before><commit_msg>Add py solution for 462. Minimum Moves to Equal Array Elements II
462. Minimum Moves to Equal Array Elements II: https://leetcode.com/problems/minimum-moves-to-equal-array-elements-ii/
Could be optimized to O(n) by q-select<commit_after>
|
class Solution(object):
def minMoves2(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort()
median = nums[len(nums) / 2]
return sum(abs(x - median) for x in nums)
|
Add py solution for 462. Minimum Moves to Equal Array Elements II
462. Minimum Moves to Equal Array Elements II: https://leetcode.com/problems/minimum-moves-to-equal-array-elements-ii/
Could be optimized to O(n) by q-selectclass Solution(object):
def minMoves2(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort()
median = nums[len(nums) / 2]
return sum(abs(x - median) for x in nums)
|
<commit_before><commit_msg>Add py solution for 462. Minimum Moves to Equal Array Elements II
462. Minimum Moves to Equal Array Elements II: https://leetcode.com/problems/minimum-moves-to-equal-array-elements-ii/
Could be optimized to O(n) by q-select<commit_after>class Solution(object):
def minMoves2(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort()
median = nums[len(nums) / 2]
return sum(abs(x - median) for x in nums)
|
|
b9245a8acf0bed7e19f709490c4ba3788028da93
|
server/ec2spotmanager/migrations/0003_auto_20150504_1440.py
|
server/ec2spotmanager/migrations/0003_auto_20150504_1440.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ec2spotmanager', '0002_instancestatusentry_poolstatusentry'),
]
operations = [
migrations.RemoveField(
model_name='poolstatusentry',
name='instance',
),
migrations.AddField(
model_name='poolstatusentry',
name='pool',
field=models.ForeignKey(default=0, to='ec2spotmanager.InstancePool'),
preserve_default=False,
),
]
|
Fix error in PoolStatusEntry model
|
Fix error in PoolStatusEntry model
|
Python
|
mpl-2.0
|
MozillaSecurity/FuzzManager,lazyparser/FuzzManager,MozillaSecurity/FuzzManager,sigma-random/FuzzManager,cihatix/FuzzManager,lazyparser/FuzzManager,lazyparser/FuzzManager,sigma-random/FuzzManager,MozillaSecurity/FuzzManager,lazyparser/FuzzManager,cihatix/FuzzManager,sigma-random/FuzzManager,cihatix/FuzzManager,cihatix/FuzzManager,sigma-random/FuzzManager,MozillaSecurity/FuzzManager
|
Fix error in PoolStatusEntry model
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ec2spotmanager', '0002_instancestatusentry_poolstatusentry'),
]
operations = [
migrations.RemoveField(
model_name='poolstatusentry',
name='instance',
),
migrations.AddField(
model_name='poolstatusentry',
name='pool',
field=models.ForeignKey(default=0, to='ec2spotmanager.InstancePool'),
preserve_default=False,
),
]
|
<commit_before><commit_msg>Fix error in PoolStatusEntry model<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ec2spotmanager', '0002_instancestatusentry_poolstatusentry'),
]
operations = [
migrations.RemoveField(
model_name='poolstatusentry',
name='instance',
),
migrations.AddField(
model_name='poolstatusentry',
name='pool',
field=models.ForeignKey(default=0, to='ec2spotmanager.InstancePool'),
preserve_default=False,
),
]
|
Fix error in PoolStatusEntry model# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ec2spotmanager', '0002_instancestatusentry_poolstatusentry'),
]
operations = [
migrations.RemoveField(
model_name='poolstatusentry',
name='instance',
),
migrations.AddField(
model_name='poolstatusentry',
name='pool',
field=models.ForeignKey(default=0, to='ec2spotmanager.InstancePool'),
preserve_default=False,
),
]
|
<commit_before><commit_msg>Fix error in PoolStatusEntry model<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ec2spotmanager', '0002_instancestatusentry_poolstatusentry'),
]
operations = [
migrations.RemoveField(
model_name='poolstatusentry',
name='instance',
),
migrations.AddField(
model_name='poolstatusentry',
name='pool',
field=models.ForeignKey(default=0, to='ec2spotmanager.InstancePool'),
preserve_default=False,
),
]
|
|
98bd10cdf2c380b17c16a47a8f962dc384b3a18d
|
python/py-set-discard-remove-pop.py
|
python/py-set-discard-remove-pop.py
|
num_elements = int(input())
s = set(map(int, input().split()))
num_operations = int(input())
for _ in range(num_operations):
operation = input().split(" ")
if(operation[0] == "pop"):
s.pop()
else:
op, val = operation
s.discard(int(val))
print(sum(s))
|
Solve py set discard remove pop
|
Solve py set discard remove pop
|
Python
|
mit
|
rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank,rootulp/hackerrank
|
Solve py set discard remove pop
|
num_elements = int(input())
s = set(map(int, input().split()))
num_operations = int(input())
for _ in range(num_operations):
operation = input().split(" ")
if(operation[0] == "pop"):
s.pop()
else:
op, val = operation
s.discard(int(val))
print(sum(s))
|
<commit_before><commit_msg>Solve py set discard remove pop<commit_after>
|
num_elements = int(input())
s = set(map(int, input().split()))
num_operations = int(input())
for _ in range(num_operations):
operation = input().split(" ")
if(operation[0] == "pop"):
s.pop()
else:
op, val = operation
s.discard(int(val))
print(sum(s))
|
Solve py set discard remove popnum_elements = int(input())
s = set(map(int, input().split()))
num_operations = int(input())
for _ in range(num_operations):
operation = input().split(" ")
if(operation[0] == "pop"):
s.pop()
else:
op, val = operation
s.discard(int(val))
print(sum(s))
|
<commit_before><commit_msg>Solve py set discard remove pop<commit_after>num_elements = int(input())
s = set(map(int, input().split()))
num_operations = int(input())
for _ in range(num_operations):
operation = input().split(" ")
if(operation[0] == "pop"):
s.pop()
else:
op, val = operation
s.discard(int(val))
print(sum(s))
|
|
8e94da2cf788115a1562db253c96b1932b495ef3
|
make_chord.py
|
make_chord.py
|
from collections import OrderedDict
from itertools import cycle
import sys
# build the pitch table
note_names = ['A', 'A#/Bb', 'B', 'C', 'C#/Db', 'D', 'D#/Eb', 'E', 'F', 'F#/Gb', 'G', 'G#/Ab']
note_cycle = cycle(note_names)
piano = []
onumber = 0
for i in range(1, 89):
note = note_cycle.next()
if note == 'C':
onumber += 1
piano.append({
'number': i,
'name': [n + str(onumber) for n in note.split('/')],
'freq': (2 ** ((i - 49.0) / 12)) * 440
})
# invert it
freqs = {}
for key in piano:
for name in key['name']:
freqs[name] = key['freq']
# look at arguments for pitch names and build samples
from wavebender import *
flist = []
requested = sys.argv[1:]
amp = 0.8 / len(requested)
for arg in requested:
flist.append(sine_wave(freqs[arg], amplitude=amp))
channels = (tuple(flist),)
nframes = 44100 * 10
samples = compute_samples(channels, nframes)
write_wavefile(sys.stdout, samples, nchannels=1, nframes=nframes)
|
Add script for generating chords, used to make some of the sounds.
|
Add script for generating chords, used to make some of the sounds.
|
Python
|
bsd-3-clause
|
apendleton/valve-installation,apendleton/valve-installation
|
Add script for generating chords, used to make some of the sounds.
|
from collections import OrderedDict
from itertools import cycle
import sys
# build the pitch table
note_names = ['A', 'A#/Bb', 'B', 'C', 'C#/Db', 'D', 'D#/Eb', 'E', 'F', 'F#/Gb', 'G', 'G#/Ab']
note_cycle = cycle(note_names)
piano = []
onumber = 0
for i in range(1, 89):
note = note_cycle.next()
if note == 'C':
onumber += 1
piano.append({
'number': i,
'name': [n + str(onumber) for n in note.split('/')],
'freq': (2 ** ((i - 49.0) / 12)) * 440
})
# invert it
freqs = {}
for key in piano:
for name in key['name']:
freqs[name] = key['freq']
# look at arguments for pitch names and build samples
from wavebender import *
flist = []
requested = sys.argv[1:]
amp = 0.8 / len(requested)
for arg in requested:
flist.append(sine_wave(freqs[arg], amplitude=amp))
channels = (tuple(flist),)
nframes = 44100 * 10
samples = compute_samples(channels, nframes)
write_wavefile(sys.stdout, samples, nchannels=1, nframes=nframes)
|
<commit_before><commit_msg>Add script for generating chords, used to make some of the sounds.<commit_after>
|
from collections import OrderedDict
from itertools import cycle
import sys
# build the pitch table
note_names = ['A', 'A#/Bb', 'B', 'C', 'C#/Db', 'D', 'D#/Eb', 'E', 'F', 'F#/Gb', 'G', 'G#/Ab']
note_cycle = cycle(note_names)
piano = []
onumber = 0
for i in range(1, 89):
note = note_cycle.next()
if note == 'C':
onumber += 1
piano.append({
'number': i,
'name': [n + str(onumber) for n in note.split('/')],
'freq': (2 ** ((i - 49.0) / 12)) * 440
})
# invert it
freqs = {}
for key in piano:
for name in key['name']:
freqs[name] = key['freq']
# look at arguments for pitch names and build samples
from wavebender import *
flist = []
requested = sys.argv[1:]
amp = 0.8 / len(requested)
for arg in requested:
flist.append(sine_wave(freqs[arg], amplitude=amp))
channels = (tuple(flist),)
nframes = 44100 * 10
samples = compute_samples(channels, nframes)
write_wavefile(sys.stdout, samples, nchannels=1, nframes=nframes)
|
Add script for generating chords, used to make some of the sounds.from collections import OrderedDict
from itertools import cycle
import sys
# build the pitch table
note_names = ['A', 'A#/Bb', 'B', 'C', 'C#/Db', 'D', 'D#/Eb', 'E', 'F', 'F#/Gb', 'G', 'G#/Ab']
note_cycle = cycle(note_names)
piano = []
onumber = 0
for i in range(1, 89):
note = note_cycle.next()
if note == 'C':
onumber += 1
piano.append({
'number': i,
'name': [n + str(onumber) for n in note.split('/')],
'freq': (2 ** ((i - 49.0) / 12)) * 440
})
# invert it
freqs = {}
for key in piano:
for name in key['name']:
freqs[name] = key['freq']
# look at arguments for pitch names and build samples
from wavebender import *
flist = []
requested = sys.argv[1:]
amp = 0.8 / len(requested)
for arg in requested:
flist.append(sine_wave(freqs[arg], amplitude=amp))
channels = (tuple(flist),)
nframes = 44100 * 10
samples = compute_samples(channels, nframes)
write_wavefile(sys.stdout, samples, nchannels=1, nframes=nframes)
|
<commit_before><commit_msg>Add script for generating chords, used to make some of the sounds.<commit_after>from collections import OrderedDict
from itertools import cycle
import sys
# build the pitch table
note_names = ['A', 'A#/Bb', 'B', 'C', 'C#/Db', 'D', 'D#/Eb', 'E', 'F', 'F#/Gb', 'G', 'G#/Ab']
note_cycle = cycle(note_names)
piano = []
onumber = 0
for i in range(1, 89):
note = note_cycle.next()
if note == 'C':
onumber += 1
piano.append({
'number': i,
'name': [n + str(onumber) for n in note.split('/')],
'freq': (2 ** ((i - 49.0) / 12)) * 440
})
# invert it
freqs = {}
for key in piano:
for name in key['name']:
freqs[name] = key['freq']
# look at arguments for pitch names and build samples
from wavebender import *
flist = []
requested = sys.argv[1:]
amp = 0.8 / len(requested)
for arg in requested:
flist.append(sine_wave(freqs[arg], amplitude=amp))
channels = (tuple(flist),)
nframes = 44100 * 10
samples = compute_samples(channels, nframes)
write_wavefile(sys.stdout, samples, nchannels=1, nframes=nframes)
|
|
c84e3394ed4829ff9a66167864a11a4ef6a2b62c
|
scripts/get_saml_cert_expiration.py
|
scripts/get_saml_cert_expiration.py
|
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from bluebottle.clients import properties
from bluebottle.clients.models import Client
from bluebottle.clients.utils import LocalTenant
def run(*args):
for client in Client.objects.all():
with LocalTenant(client):
try:
cert_string = '-----BEGIN CERTIFICATE-----\n{}\n-----END CERTIFICATE-----'.format(
properties.TOKEN_AUTH['sp']['x509cert']
)
cert = x509.load_pem_x509_certificate(bytes(cert_string), default_backend())
print client.name, cert.not_valid_after
except (AttributeError, KeyError):
pass
except Exception, e:
print e
|
Add script to get certificate expiration date
|
Add script to get certificate expiration date
|
Python
|
bsd-3-clause
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
Add script to get certificate expiration date
|
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from bluebottle.clients import properties
from bluebottle.clients.models import Client
from bluebottle.clients.utils import LocalTenant
def run(*args):
for client in Client.objects.all():
with LocalTenant(client):
try:
cert_string = '-----BEGIN CERTIFICATE-----\n{}\n-----END CERTIFICATE-----'.format(
properties.TOKEN_AUTH['sp']['x509cert']
)
cert = x509.load_pem_x509_certificate(bytes(cert_string), default_backend())
print client.name, cert.not_valid_after
except (AttributeError, KeyError):
pass
except Exception, e:
print e
|
<commit_before><commit_msg>Add script to get certificate expiration date<commit_after>
|
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from bluebottle.clients import properties
from bluebottle.clients.models import Client
from bluebottle.clients.utils import LocalTenant
def run(*args):
for client in Client.objects.all():
with LocalTenant(client):
try:
cert_string = '-----BEGIN CERTIFICATE-----\n{}\n-----END CERTIFICATE-----'.format(
properties.TOKEN_AUTH['sp']['x509cert']
)
cert = x509.load_pem_x509_certificate(bytes(cert_string), default_backend())
print client.name, cert.not_valid_after
except (AttributeError, KeyError):
pass
except Exception, e:
print e
|
Add script to get certificate expiration datefrom cryptography import x509
from cryptography.hazmat.backends import default_backend
from bluebottle.clients import properties
from bluebottle.clients.models import Client
from bluebottle.clients.utils import LocalTenant
def run(*args):
for client in Client.objects.all():
with LocalTenant(client):
try:
cert_string = '-----BEGIN CERTIFICATE-----\n{}\n-----END CERTIFICATE-----'.format(
properties.TOKEN_AUTH['sp']['x509cert']
)
cert = x509.load_pem_x509_certificate(bytes(cert_string), default_backend())
print client.name, cert.not_valid_after
except (AttributeError, KeyError):
pass
except Exception, e:
print e
|
<commit_before><commit_msg>Add script to get certificate expiration date<commit_after>from cryptography import x509
from cryptography.hazmat.backends import default_backend
from bluebottle.clients import properties
from bluebottle.clients.models import Client
from bluebottle.clients.utils import LocalTenant
def run(*args):
for client in Client.objects.all():
with LocalTenant(client):
try:
cert_string = '-----BEGIN CERTIFICATE-----\n{}\n-----END CERTIFICATE-----'.format(
properties.TOKEN_AUTH['sp']['x509cert']
)
cert = x509.load_pem_x509_certificate(bytes(cert_string), default_backend())
print client.name, cert.not_valid_after
except (AttributeError, KeyError):
pass
except Exception, e:
print e
|
|
3f685e3873c18e1eb28b7a4121c552bbb697e0a4
|
scripts/generator.py
|
scripts/generator.py
|
#!/usr/bin/python3
from random import randint
output = ""
filename = "data"
class Generator:
def gen_date(self):
return str(randint(2013, 2015)) + "-" \
+ str(randint(1, 12)) + "-" \
+ str(randint(1, 31))
def gen_price(self):
return str(10 * randint(10, 100))
def gen_author(self):
users = [
"Intey",
"Andrey",
"Tatiana",
"Nigger",
]
return users[randint(1, len(users)-1)]
def gen_parts(self):
return str(randint(0, 15))
def gen_row(self, s):
return ":".join([s,
self.gen_price(),
self.gen_author(),
self.gen_date(),
self.gen_parts()]) + '\n'
def prepare_file(file_name):
gena = Generator()
with open(file_name, 'r') as f:
file_lines = []
for x in f.readlines():
new_line = gena.gen_row(x.rstrip('\n'))
# print(new_line)
file_lines.append(new_line)
# file_lines.sort(key=lambda line: int(line.split(":")[-1]))
with open(file_name, 'w') as f:
f.writelines(file_lines)
if __name__ == "__main__":
prepare_file(filename)
|
Add script for generate events.
|
Add script for generate events.
|
Python
|
mit
|
Intey/OhMyBank,Intey/OhMyBank,Intey/OhMyBank,Intey/OhMyBank
|
Add script for generate events.
|
#!/usr/bin/python3
from random import randint
output = ""
filename = "data"
class Generator:
def gen_date(self):
return str(randint(2013, 2015)) + "-" \
+ str(randint(1, 12)) + "-" \
+ str(randint(1, 31))
def gen_price(self):
return str(10 * randint(10, 100))
def gen_author(self):
users = [
"Intey",
"Andrey",
"Tatiana",
"Nigger",
]
return users[randint(1, len(users)-1)]
def gen_parts(self):
return str(randint(0, 15))
def gen_row(self, s):
return ":".join([s,
self.gen_price(),
self.gen_author(),
self.gen_date(),
self.gen_parts()]) + '\n'
def prepare_file(file_name):
gena = Generator()
with open(file_name, 'r') as f:
file_lines = []
for x in f.readlines():
new_line = gena.gen_row(x.rstrip('\n'))
# print(new_line)
file_lines.append(new_line)
# file_lines.sort(key=lambda line: int(line.split(":")[-1]))
with open(file_name, 'w') as f:
f.writelines(file_lines)
if __name__ == "__main__":
prepare_file(filename)
|
<commit_before><commit_msg>Add script for generate events.<commit_after>
|
#!/usr/bin/python3
from random import randint
output = ""
filename = "data"
class Generator:
def gen_date(self):
return str(randint(2013, 2015)) + "-" \
+ str(randint(1, 12)) + "-" \
+ str(randint(1, 31))
def gen_price(self):
return str(10 * randint(10, 100))
def gen_author(self):
users = [
"Intey",
"Andrey",
"Tatiana",
"Nigger",
]
return users[randint(1, len(users)-1)]
def gen_parts(self):
return str(randint(0, 15))
def gen_row(self, s):
return ":".join([s,
self.gen_price(),
self.gen_author(),
self.gen_date(),
self.gen_parts()]) + '\n'
def prepare_file(file_name):
gena = Generator()
with open(file_name, 'r') as f:
file_lines = []
for x in f.readlines():
new_line = gena.gen_row(x.rstrip('\n'))
# print(new_line)
file_lines.append(new_line)
# file_lines.sort(key=lambda line: int(line.split(":")[-1]))
with open(file_name, 'w') as f:
f.writelines(file_lines)
if __name__ == "__main__":
prepare_file(filename)
|
Add script for generate events.#!/usr/bin/python3
from random import randint
output = ""
filename = "data"
class Generator:
def gen_date(self):
return str(randint(2013, 2015)) + "-" \
+ str(randint(1, 12)) + "-" \
+ str(randint(1, 31))
def gen_price(self):
return str(10 * randint(10, 100))
def gen_author(self):
users = [
"Intey",
"Andrey",
"Tatiana",
"Nigger",
]
return users[randint(1, len(users)-1)]
def gen_parts(self):
return str(randint(0, 15))
def gen_row(self, s):
return ":".join([s,
self.gen_price(),
self.gen_author(),
self.gen_date(),
self.gen_parts()]) + '\n'
def prepare_file(file_name):
gena = Generator()
with open(file_name, 'r') as f:
file_lines = []
for x in f.readlines():
new_line = gena.gen_row(x.rstrip('\n'))
# print(new_line)
file_lines.append(new_line)
# file_lines.sort(key=lambda line: int(line.split(":")[-1]))
with open(file_name, 'w') as f:
f.writelines(file_lines)
if __name__ == "__main__":
prepare_file(filename)
|
<commit_before><commit_msg>Add script for generate events.<commit_after>#!/usr/bin/python3
from random import randint
output = ""
filename = "data"
class Generator:
def gen_date(self):
return str(randint(2013, 2015)) + "-" \
+ str(randint(1, 12)) + "-" \
+ str(randint(1, 31))
def gen_price(self):
return str(10 * randint(10, 100))
def gen_author(self):
users = [
"Intey",
"Andrey",
"Tatiana",
"Nigger",
]
return users[randint(1, len(users)-1)]
def gen_parts(self):
return str(randint(0, 15))
def gen_row(self, s):
return ":".join([s,
self.gen_price(),
self.gen_author(),
self.gen_date(),
self.gen_parts()]) + '\n'
def prepare_file(file_name):
gena = Generator()
with open(file_name, 'r') as f:
file_lines = []
for x in f.readlines():
new_line = gena.gen_row(x.rstrip('\n'))
# print(new_line)
file_lines.append(new_line)
# file_lines.sort(key=lambda line: int(line.split(":")[-1]))
with open(file_name, 'w') as f:
f.writelines(file_lines)
if __name__ == "__main__":
prepare_file(filename)
|
|
f5284cc7da9166a43e3cfbd901205f4446295f7a
|
inspectors/cpsc.py
|
inspectors/cpsc.py
|
#!/usr/bin/env python
import datetime
import logging
import os
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from utils import utils, inspector
# https://www.cpsc.gov/en/about-cpsc/inspector-general/
# Oldest report: 2003
# options:
# standard since/year options for a year range to fetch from.
#
# Notes for IG's web team:
# - Fix the links for BLACKLIST_REPORT_URLS
REPORTS_URL = "https://www.cpsc.gov/en/about-cpsc/inspector-general/"
BLACKLIST_REPORT_URLS = [
'https://www.cpsc.gov/Media/Documents/About/OIG/Audits/CPSC-Fiscal-Year-2009-Financial-Statements-released-November-13-2009/',
]
def run(options):
year_range = inspector.year_range(options)
doc = BeautifulSoup(utils.download(REPORTS_URL))
results = doc.select("ul.summary-list li")
for result in results:
report = report_from(result, year_range)
if report:
inspector.save_report(report)
def report_from(result, year_range):
link = result.find("a")
report_url = urljoin(REPORTS_URL, link.get('href'))
if report_url in BLACKLIST_REPORT_URLS:
return
report_filename = report_url.split("/")[-1]
report_id, _ = os.path.splitext(report_filename)
if not report_id:
import pdb;pdb.set_trace()
title = link.text
published_on_text = result.select("span.date")[0].text
published_on = datetime.datetime.strptime(published_on_text, '%B %d, %Y')
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'cpsc',
'inspector_url': 'https://www.cpsc.gov/en/about-cpsc/inspector-general/',
'agency': 'cpsc',
'agency_name': 'Consumer Product Safety Commission',
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
return report
utils.run(run) if (__name__ == "__main__") else None
|
Add Consumer Product Safety Commission.
|
Add Consumer Product Safety Commission.
|
Python
|
cc0-1.0
|
divergentdave/inspectors-general,lukerosiak/inspectors-general
|
Add Consumer Product Safety Commission.
|
#!/usr/bin/env python
import datetime
import logging
import os
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from utils import utils, inspector
# https://www.cpsc.gov/en/about-cpsc/inspector-general/
# Oldest report: 2003
# options:
# standard since/year options for a year range to fetch from.
#
# Notes for IG's web team:
# - Fix the links for BLACKLIST_REPORT_URLS
REPORTS_URL = "https://www.cpsc.gov/en/about-cpsc/inspector-general/"
BLACKLIST_REPORT_URLS = [
'https://www.cpsc.gov/Media/Documents/About/OIG/Audits/CPSC-Fiscal-Year-2009-Financial-Statements-released-November-13-2009/',
]
def run(options):
year_range = inspector.year_range(options)
doc = BeautifulSoup(utils.download(REPORTS_URL))
results = doc.select("ul.summary-list li")
for result in results:
report = report_from(result, year_range)
if report:
inspector.save_report(report)
def report_from(result, year_range):
link = result.find("a")
report_url = urljoin(REPORTS_URL, link.get('href'))
if report_url in BLACKLIST_REPORT_URLS:
return
report_filename = report_url.split("/")[-1]
report_id, _ = os.path.splitext(report_filename)
if not report_id:
import pdb;pdb.set_trace()
title = link.text
published_on_text = result.select("span.date")[0].text
published_on = datetime.datetime.strptime(published_on_text, '%B %d, %Y')
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'cpsc',
'inspector_url': 'https://www.cpsc.gov/en/about-cpsc/inspector-general/',
'agency': 'cpsc',
'agency_name': 'Consumer Product Safety Commission',
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
return report
utils.run(run) if (__name__ == "__main__") else None
|
<commit_before><commit_msg>Add Consumer Product Safety Commission.<commit_after>
|
#!/usr/bin/env python
import datetime
import logging
import os
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from utils import utils, inspector
# https://www.cpsc.gov/en/about-cpsc/inspector-general/
# Oldest report: 2003
# options:
# standard since/year options for a year range to fetch from.
#
# Notes for IG's web team:
# - Fix the links for BLACKLIST_REPORT_URLS
REPORTS_URL = "https://www.cpsc.gov/en/about-cpsc/inspector-general/"
BLACKLIST_REPORT_URLS = [
'https://www.cpsc.gov/Media/Documents/About/OIG/Audits/CPSC-Fiscal-Year-2009-Financial-Statements-released-November-13-2009/',
]
def run(options):
year_range = inspector.year_range(options)
doc = BeautifulSoup(utils.download(REPORTS_URL))
results = doc.select("ul.summary-list li")
for result in results:
report = report_from(result, year_range)
if report:
inspector.save_report(report)
def report_from(result, year_range):
link = result.find("a")
report_url = urljoin(REPORTS_URL, link.get('href'))
if report_url in BLACKLIST_REPORT_URLS:
return
report_filename = report_url.split("/")[-1]
report_id, _ = os.path.splitext(report_filename)
if not report_id:
import pdb;pdb.set_trace()
title = link.text
published_on_text = result.select("span.date")[0].text
published_on = datetime.datetime.strptime(published_on_text, '%B %d, %Y')
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'cpsc',
'inspector_url': 'https://www.cpsc.gov/en/about-cpsc/inspector-general/',
'agency': 'cpsc',
'agency_name': 'Consumer Product Safety Commission',
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
return report
utils.run(run) if (__name__ == "__main__") else None
|
Add Consumer Product Safety Commission.#!/usr/bin/env python
import datetime
import logging
import os
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from utils import utils, inspector
# https://www.cpsc.gov/en/about-cpsc/inspector-general/
# Oldest report: 2003
# options:
# standard since/year options for a year range to fetch from.
#
# Notes for IG's web team:
# - Fix the links for BLACKLIST_REPORT_URLS
REPORTS_URL = "https://www.cpsc.gov/en/about-cpsc/inspector-general/"
BLACKLIST_REPORT_URLS = [
'https://www.cpsc.gov/Media/Documents/About/OIG/Audits/CPSC-Fiscal-Year-2009-Financial-Statements-released-November-13-2009/',
]
def run(options):
year_range = inspector.year_range(options)
doc = BeautifulSoup(utils.download(REPORTS_URL))
results = doc.select("ul.summary-list li")
for result in results:
report = report_from(result, year_range)
if report:
inspector.save_report(report)
def report_from(result, year_range):
link = result.find("a")
report_url = urljoin(REPORTS_URL, link.get('href'))
if report_url in BLACKLIST_REPORT_URLS:
return
report_filename = report_url.split("/")[-1]
report_id, _ = os.path.splitext(report_filename)
if not report_id:
import pdb;pdb.set_trace()
title = link.text
published_on_text = result.select("span.date")[0].text
published_on = datetime.datetime.strptime(published_on_text, '%B %d, %Y')
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'cpsc',
'inspector_url': 'https://www.cpsc.gov/en/about-cpsc/inspector-general/',
'agency': 'cpsc',
'agency_name': 'Consumer Product Safety Commission',
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
return report
utils.run(run) if (__name__ == "__main__") else None
|
<commit_before><commit_msg>Add Consumer Product Safety Commission.<commit_after>#!/usr/bin/env python
import datetime
import logging
import os
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from utils import utils, inspector
# https://www.cpsc.gov/en/about-cpsc/inspector-general/
# Oldest report: 2003
# options:
# standard since/year options for a year range to fetch from.
#
# Notes for IG's web team:
# - Fix the links for BLACKLIST_REPORT_URLS
REPORTS_URL = "https://www.cpsc.gov/en/about-cpsc/inspector-general/"
BLACKLIST_REPORT_URLS = [
'https://www.cpsc.gov/Media/Documents/About/OIG/Audits/CPSC-Fiscal-Year-2009-Financial-Statements-released-November-13-2009/',
]
def run(options):
year_range = inspector.year_range(options)
doc = BeautifulSoup(utils.download(REPORTS_URL))
results = doc.select("ul.summary-list li")
for result in results:
report = report_from(result, year_range)
if report:
inspector.save_report(report)
def report_from(result, year_range):
link = result.find("a")
report_url = urljoin(REPORTS_URL, link.get('href'))
if report_url in BLACKLIST_REPORT_URLS:
return
report_filename = report_url.split("/")[-1]
report_id, _ = os.path.splitext(report_filename)
if not report_id:
import pdb;pdb.set_trace()
title = link.text
published_on_text = result.select("span.date")[0].text
published_on = datetime.datetime.strptime(published_on_text, '%B %d, %Y')
if published_on.year not in year_range:
logging.debug("[%s] Skipping, not in requested range." % report_url)
return
report = {
'inspector': 'cpsc',
'inspector_url': 'https://www.cpsc.gov/en/about-cpsc/inspector-general/',
'agency': 'cpsc',
'agency_name': 'Consumer Product Safety Commission',
'report_id': report_id,
'url': report_url,
'title': title,
'published_on': datetime.datetime.strftime(published_on, "%Y-%m-%d"),
}
return report
utils.run(run) if (__name__ == "__main__") else None
|
|
b1a851d6f5dd47790459564a55405627d9b7a9e4
|
scripts/webscraping/ist_news_titles.py
|
scripts/webscraping/ist_news_titles.py
|
from urllib.request import urlopen
from bs4 import BeautifulSoup
import sys, io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,'cp437','backslashreplace')
html = urlopen("http://tecnico.ulisboa.pt/pt/noticias/")
bsObj = BeautifulSoup(html, "html.parser")
for news_wrapper in bsObj.find("div", {"id":"content_wrapper"}).findAll("div", {"class":"news_wrapper"}):
news_grid = news_wrapper.find("div", {"class":"grid_9 omega"})
print("Date: " + news_grid.p.get_text())
print("Title: " + news_grid.h3.a.get_text())
|
Add news date and title scrapper from ist's news page.
|
Add news date and title scrapper from ist's news page.
|
Python
|
mit
|
iluxonchik/python-general-repo
|
Add news date and title scrapper from ist's news page.
|
from urllib.request import urlopen
from bs4 import BeautifulSoup
import sys, io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,'cp437','backslashreplace')
html = urlopen("http://tecnico.ulisboa.pt/pt/noticias/")
bsObj = BeautifulSoup(html, "html.parser")
for news_wrapper in bsObj.find("div", {"id":"content_wrapper"}).findAll("div", {"class":"news_wrapper"}):
news_grid = news_wrapper.find("div", {"class":"grid_9 omega"})
print("Date: " + news_grid.p.get_text())
print("Title: " + news_grid.h3.a.get_text())
|
<commit_before><commit_msg>Add news date and title scrapper from ist's news page.<commit_after>
|
from urllib.request import urlopen
from bs4 import BeautifulSoup
import sys, io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,'cp437','backslashreplace')
html = urlopen("http://tecnico.ulisboa.pt/pt/noticias/")
bsObj = BeautifulSoup(html, "html.parser")
for news_wrapper in bsObj.find("div", {"id":"content_wrapper"}).findAll("div", {"class":"news_wrapper"}):
news_grid = news_wrapper.find("div", {"class":"grid_9 omega"})
print("Date: " + news_grid.p.get_text())
print("Title: " + news_grid.h3.a.get_text())
|
Add news date and title scrapper from ist's news page.from urllib.request import urlopen
from bs4 import BeautifulSoup
import sys, io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,'cp437','backslashreplace')
html = urlopen("http://tecnico.ulisboa.pt/pt/noticias/")
bsObj = BeautifulSoup(html, "html.parser")
for news_wrapper in bsObj.find("div", {"id":"content_wrapper"}).findAll("div", {"class":"news_wrapper"}):
news_grid = news_wrapper.find("div", {"class":"grid_9 omega"})
print("Date: " + news_grid.p.get_text())
print("Title: " + news_grid.h3.a.get_text())
|
<commit_before><commit_msg>Add news date and title scrapper from ist's news page.<commit_after>from urllib.request import urlopen
from bs4 import BeautifulSoup
import sys, io
sys.stdout = io.TextIOWrapper(sys.stdout.buffer,'cp437','backslashreplace')
html = urlopen("http://tecnico.ulisboa.pt/pt/noticias/")
bsObj = BeautifulSoup(html, "html.parser")
for news_wrapper in bsObj.find("div", {"id":"content_wrapper"}).findAll("div", {"class":"news_wrapper"}):
news_grid = news_wrapper.find("div", {"class":"grid_9 omega"})
print("Date: " + news_grid.p.get_text())
print("Title: " + news_grid.h3.a.get_text())
|
|
6f2a9cbf9e571855074e898d22480d61277a3eda
|
django_lightweight_queue/backends/db.py
|
django_lightweight_queue/backends/db.py
|
import time
import datetime
from django.db import connection, models, ProgrammingError
from ..job import Job
class DatabaseBackend(object):
TABLE = 'django_lightweight_queue'
FIELDS = (
models.AutoField(name='id', primary_key=True),
models.CharField(name='queue', max_length=255),
models.TextField(name='data'),
models.DateTimeField(name='created'),
)
def __init__(self):
qn = connection.ops.quote_name
sql = []
for x in self.FIELDS:
sql.append(' '.join((
qn(x.name),
x.db_type(connection=connection),
'PRIMARY KEY' if x.primary_key else '',
)))
cursor = connection.cursor()
cursor.execute('CREATE TABLE IF NOT EXISTS %s (\n%s\n);' % (
qn(self.TABLE),
',\n'.join(sql),
))
try:
cursor.execute('CREATE INDEX %s ON %s (%s, %s)' % (
qn('%s_idx' % self.TABLE),
qn(self.TABLE),
qn('queue'),
qn('created'),
))
except ProgrammingError:
# "IF NOT EXISTS" is not portable, so we just fail to create it
pass
# Don't share connections across fork()
connection.close()
def enqueue(self, job, queue):
cursor = connection.cursor()
cursor.execute("""
INSERT INTO %s (queue, data, created) VALUES (%%s, %%s, %%s)
""" % connection.ops.quote_name(self.TABLE), (
queue,
job.to_json(),
datetime.datetime.utcnow(),
))
def dequeue(self, queue, timeout):
cursor = connection.cursor()
cursor.execute("""
SELECT id, data FROM %s WHERE queue = %%s
ORDER BY created ASC LIMIT 1
""" % connection.ops.quote_name(self.TABLE), (queue,))
try:
id_, data = cursor.fetchall()[0]
except (IndexError, ProgrammingError):
time.sleep(timeout)
return
cursor.execute("""
DELETE FROM %s WHERE id = %%s
""" % connection.ops.quote_name(self.TABLE), (id_,))
try:
return Job.from_json(data)
except TypeError:
pass
|
Add experimental polling DB backend.
|
Add experimental polling DB backend.
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@thread.com>
|
Python
|
bsd-3-clause
|
thread/django-lightweight-queue,thread/django-lightweight-queue,lamby/django-lightweight-queue,prophile/django-lightweight-queue,prophile/django-lightweight-queue
|
Add experimental polling DB backend.
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@thread.com>
|
import time
import datetime
from django.db import connection, models, ProgrammingError
from ..job import Job
class DatabaseBackend(object):
TABLE = 'django_lightweight_queue'
FIELDS = (
models.AutoField(name='id', primary_key=True),
models.CharField(name='queue', max_length=255),
models.TextField(name='data'),
models.DateTimeField(name='created'),
)
def __init__(self):
qn = connection.ops.quote_name
sql = []
for x in self.FIELDS:
sql.append(' '.join((
qn(x.name),
x.db_type(connection=connection),
'PRIMARY KEY' if x.primary_key else '',
)))
cursor = connection.cursor()
cursor.execute('CREATE TABLE IF NOT EXISTS %s (\n%s\n);' % (
qn(self.TABLE),
',\n'.join(sql),
))
try:
cursor.execute('CREATE INDEX %s ON %s (%s, %s)' % (
qn('%s_idx' % self.TABLE),
qn(self.TABLE),
qn('queue'),
qn('created'),
))
except ProgrammingError:
# "IF NOT EXISTS" is not portable, so we just fail to create it
pass
# Don't share connections across fork()
connection.close()
def enqueue(self, job, queue):
cursor = connection.cursor()
cursor.execute("""
INSERT INTO %s (queue, data, created) VALUES (%%s, %%s, %%s)
""" % connection.ops.quote_name(self.TABLE), (
queue,
job.to_json(),
datetime.datetime.utcnow(),
))
def dequeue(self, queue, timeout):
cursor = connection.cursor()
cursor.execute("""
SELECT id, data FROM %s WHERE queue = %%s
ORDER BY created ASC LIMIT 1
""" % connection.ops.quote_name(self.TABLE), (queue,))
try:
id_, data = cursor.fetchall()[0]
except (IndexError, ProgrammingError):
time.sleep(timeout)
return
cursor.execute("""
DELETE FROM %s WHERE id = %%s
""" % connection.ops.quote_name(self.TABLE), (id_,))
try:
return Job.from_json(data)
except TypeError:
pass
|
<commit_before><commit_msg>Add experimental polling DB backend.
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@thread.com><commit_after>
|
import time
import datetime
from django.db import connection, models, ProgrammingError
from ..job import Job
class DatabaseBackend(object):
TABLE = 'django_lightweight_queue'
FIELDS = (
models.AutoField(name='id', primary_key=True),
models.CharField(name='queue', max_length=255),
models.TextField(name='data'),
models.DateTimeField(name='created'),
)
def __init__(self):
qn = connection.ops.quote_name
sql = []
for x in self.FIELDS:
sql.append(' '.join((
qn(x.name),
x.db_type(connection=connection),
'PRIMARY KEY' if x.primary_key else '',
)))
cursor = connection.cursor()
cursor.execute('CREATE TABLE IF NOT EXISTS %s (\n%s\n);' % (
qn(self.TABLE),
',\n'.join(sql),
))
try:
cursor.execute('CREATE INDEX %s ON %s (%s, %s)' % (
qn('%s_idx' % self.TABLE),
qn(self.TABLE),
qn('queue'),
qn('created'),
))
except ProgrammingError:
# "IF NOT EXISTS" is not portable, so we just fail to create it
pass
# Don't share connections across fork()
connection.close()
def enqueue(self, job, queue):
cursor = connection.cursor()
cursor.execute("""
INSERT INTO %s (queue, data, created) VALUES (%%s, %%s, %%s)
""" % connection.ops.quote_name(self.TABLE), (
queue,
job.to_json(),
datetime.datetime.utcnow(),
))
def dequeue(self, queue, timeout):
cursor = connection.cursor()
cursor.execute("""
SELECT id, data FROM %s WHERE queue = %%s
ORDER BY created ASC LIMIT 1
""" % connection.ops.quote_name(self.TABLE), (queue,))
try:
id_, data = cursor.fetchall()[0]
except (IndexError, ProgrammingError):
time.sleep(timeout)
return
cursor.execute("""
DELETE FROM %s WHERE id = %%s
""" % connection.ops.quote_name(self.TABLE), (id_,))
try:
return Job.from_json(data)
except TypeError:
pass
|
Add experimental polling DB backend.
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@thread.com>import time
import datetime
from django.db import connection, models, ProgrammingError
from ..job import Job
class DatabaseBackend(object):
TABLE = 'django_lightweight_queue'
FIELDS = (
models.AutoField(name='id', primary_key=True),
models.CharField(name='queue', max_length=255),
models.TextField(name='data'),
models.DateTimeField(name='created'),
)
def __init__(self):
qn = connection.ops.quote_name
sql = []
for x in self.FIELDS:
sql.append(' '.join((
qn(x.name),
x.db_type(connection=connection),
'PRIMARY KEY' if x.primary_key else '',
)))
cursor = connection.cursor()
cursor.execute('CREATE TABLE IF NOT EXISTS %s (\n%s\n);' % (
qn(self.TABLE),
',\n'.join(sql),
))
try:
cursor.execute('CREATE INDEX %s ON %s (%s, %s)' % (
qn('%s_idx' % self.TABLE),
qn(self.TABLE),
qn('queue'),
qn('created'),
))
except ProgrammingError:
# "IF NOT EXISTS" is not portable, so we just fail to create it
pass
# Don't share connections across fork()
connection.close()
def enqueue(self, job, queue):
cursor = connection.cursor()
cursor.execute("""
INSERT INTO %s (queue, data, created) VALUES (%%s, %%s, %%s)
""" % connection.ops.quote_name(self.TABLE), (
queue,
job.to_json(),
datetime.datetime.utcnow(),
))
def dequeue(self, queue, timeout):
cursor = connection.cursor()
cursor.execute("""
SELECT id, data FROM %s WHERE queue = %%s
ORDER BY created ASC LIMIT 1
""" % connection.ops.quote_name(self.TABLE), (queue,))
try:
id_, data = cursor.fetchall()[0]
except (IndexError, ProgrammingError):
time.sleep(timeout)
return
cursor.execute("""
DELETE FROM %s WHERE id = %%s
""" % connection.ops.quote_name(self.TABLE), (id_,))
try:
return Job.from_json(data)
except TypeError:
pass
|
<commit_before><commit_msg>Add experimental polling DB backend.
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@thread.com><commit_after>import time
import datetime
from django.db import connection, models, ProgrammingError
from ..job import Job
class DatabaseBackend(object):
TABLE = 'django_lightweight_queue'
FIELDS = (
models.AutoField(name='id', primary_key=True),
models.CharField(name='queue', max_length=255),
models.TextField(name='data'),
models.DateTimeField(name='created'),
)
def __init__(self):
qn = connection.ops.quote_name
sql = []
for x in self.FIELDS:
sql.append(' '.join((
qn(x.name),
x.db_type(connection=connection),
'PRIMARY KEY' if x.primary_key else '',
)))
cursor = connection.cursor()
cursor.execute('CREATE TABLE IF NOT EXISTS %s (\n%s\n);' % (
qn(self.TABLE),
',\n'.join(sql),
))
try:
cursor.execute('CREATE INDEX %s ON %s (%s, %s)' % (
qn('%s_idx' % self.TABLE),
qn(self.TABLE),
qn('queue'),
qn('created'),
))
except ProgrammingError:
# "IF NOT EXISTS" is not portable, so we just fail to create it
pass
# Don't share connections across fork()
connection.close()
def enqueue(self, job, queue):
cursor = connection.cursor()
cursor.execute("""
INSERT INTO %s (queue, data, created) VALUES (%%s, %%s, %%s)
""" % connection.ops.quote_name(self.TABLE), (
queue,
job.to_json(),
datetime.datetime.utcnow(),
))
def dequeue(self, queue, timeout):
cursor = connection.cursor()
cursor.execute("""
SELECT id, data FROM %s WHERE queue = %%s
ORDER BY created ASC LIMIT 1
""" % connection.ops.quote_name(self.TABLE), (queue,))
try:
id_, data = cursor.fetchall()[0]
except (IndexError, ProgrammingError):
time.sleep(timeout)
return
cursor.execute("""
DELETE FROM %s WHERE id = %%s
""" % connection.ops.quote_name(self.TABLE), (id_,))
try:
return Job.from_json(data)
except TypeError:
pass
|
|
3c8eb0563f3997fc068d039b18452eaa98da3122
|
download_avatars.py
|
download_avatars.py
|
#!/usr/bin/env python3
import PIL.Image
import io
import json
import requests
import post_list
import web_cache
# Split this file into two modules, because we need to move web_cache out of
# the way between the two steps. (We want to isolate the avatar HTTP requests)
# into its own thing.
def _make_avatar_url_list():
seen = set()
with open("avatar_urls", "wt") as fp:
for post in post_list.load_posts():
url = "https://thearchdruidreport.blogspot.com/feeds/%s/comments/default" \
"?alt=json&v=2&orderby=published&reverse=false&max-results=1000" % post.postid
js = json.loads(web_cache.get(url).decode("utf8"))
for comment in js["feed"]["entry"]:
(author,) = comment["author"]
avatar = author["gd$image"]
int(avatar["width"])
int(avatar["height"])
src = avatar["src"]
if src not in seen:
seen.add(src)
assert "\n" not in src
fp.write(src + "\n")
def _fetch_avatar_urls():
urls = open("avatar_urls", "r").read().splitlines()
for i, url in enumerate(urls):
print("[%d/%d] fetching %s ..." % (i + 1, len(urls), url))
try:
img = PIL.Image.open(io.BytesIO(web_cache.get(url)))
except:
print("WARNING: Bad avatar URL: %s" % url)
#_make_avatar_url_list()
_fetch_avatar_urls()
|
Add a script useful for downloading large avatar images from Atom feeds
|
Add a script useful for downloading large avatar images from Atom feeds
|
Python
|
mit
|
squirrel2038/thearchdruidreport-archive,squirrel2038/thearchdruidreport-archive,squirrel2038/thearchdruidreport-archive
|
Add a script useful for downloading large avatar images from Atom feeds
|
#!/usr/bin/env python3
import PIL.Image
import io
import json
import requests
import post_list
import web_cache
# Split this file into two modules, because we need to move web_cache out of
# the way between the two steps. (We want to isolate the avatar HTTP requests)
# into its own thing.
def _make_avatar_url_list():
seen = set()
with open("avatar_urls", "wt") as fp:
for post in post_list.load_posts():
url = "https://thearchdruidreport.blogspot.com/feeds/%s/comments/default" \
"?alt=json&v=2&orderby=published&reverse=false&max-results=1000" % post.postid
js = json.loads(web_cache.get(url).decode("utf8"))
for comment in js["feed"]["entry"]:
(author,) = comment["author"]
avatar = author["gd$image"]
int(avatar["width"])
int(avatar["height"])
src = avatar["src"]
if src not in seen:
seen.add(src)
assert "\n" not in src
fp.write(src + "\n")
def _fetch_avatar_urls():
urls = open("avatar_urls", "r").read().splitlines()
for i, url in enumerate(urls):
print("[%d/%d] fetching %s ..." % (i + 1, len(urls), url))
try:
img = PIL.Image.open(io.BytesIO(web_cache.get(url)))
except:
print("WARNING: Bad avatar URL: %s" % url)
#_make_avatar_url_list()
_fetch_avatar_urls()
|
<commit_before><commit_msg>Add a script useful for downloading large avatar images from Atom feeds<commit_after>
|
#!/usr/bin/env python3
import PIL.Image
import io
import json
import requests
import post_list
import web_cache
# Split this file into two modules, because we need to move web_cache out of
# the way between the two steps. (We want to isolate the avatar HTTP requests)
# into its own thing.
def _make_avatar_url_list():
seen = set()
with open("avatar_urls", "wt") as fp:
for post in post_list.load_posts():
url = "https://thearchdruidreport.blogspot.com/feeds/%s/comments/default" \
"?alt=json&v=2&orderby=published&reverse=false&max-results=1000" % post.postid
js = json.loads(web_cache.get(url).decode("utf8"))
for comment in js["feed"]["entry"]:
(author,) = comment["author"]
avatar = author["gd$image"]
int(avatar["width"])
int(avatar["height"])
src = avatar["src"]
if src not in seen:
seen.add(src)
assert "\n" not in src
fp.write(src + "\n")
def _fetch_avatar_urls():
urls = open("avatar_urls", "r").read().splitlines()
for i, url in enumerate(urls):
print("[%d/%d] fetching %s ..." % (i + 1, len(urls), url))
try:
img = PIL.Image.open(io.BytesIO(web_cache.get(url)))
except:
print("WARNING: Bad avatar URL: %s" % url)
#_make_avatar_url_list()
_fetch_avatar_urls()
|
Add a script useful for downloading large avatar images from Atom feeds#!/usr/bin/env python3
import PIL.Image
import io
import json
import requests
import post_list
import web_cache
# Split this file into two modules, because we need to move web_cache out of
# the way between the two steps. (We want to isolate the avatar HTTP requests)
# into its own thing.
def _make_avatar_url_list():
seen = set()
with open("avatar_urls", "wt") as fp:
for post in post_list.load_posts():
url = "https://thearchdruidreport.blogspot.com/feeds/%s/comments/default" \
"?alt=json&v=2&orderby=published&reverse=false&max-results=1000" % post.postid
js = json.loads(web_cache.get(url).decode("utf8"))
for comment in js["feed"]["entry"]:
(author,) = comment["author"]
avatar = author["gd$image"]
int(avatar["width"])
int(avatar["height"])
src = avatar["src"]
if src not in seen:
seen.add(src)
assert "\n" not in src
fp.write(src + "\n")
def _fetch_avatar_urls():
urls = open("avatar_urls", "r").read().splitlines()
for i, url in enumerate(urls):
print("[%d/%d] fetching %s ..." % (i + 1, len(urls), url))
try:
img = PIL.Image.open(io.BytesIO(web_cache.get(url)))
except:
print("WARNING: Bad avatar URL: %s" % url)
#_make_avatar_url_list()
_fetch_avatar_urls()
|
<commit_before><commit_msg>Add a script useful for downloading large avatar images from Atom feeds<commit_after>#!/usr/bin/env python3
import PIL.Image
import io
import json
import requests
import post_list
import web_cache
# Split this file into two modules, because we need to move web_cache out of
# the way between the two steps. (We want to isolate the avatar HTTP requests)
# into its own thing.
def _make_avatar_url_list():
seen = set()
with open("avatar_urls", "wt") as fp:
for post in post_list.load_posts():
url = "https://thearchdruidreport.blogspot.com/feeds/%s/comments/default" \
"?alt=json&v=2&orderby=published&reverse=false&max-results=1000" % post.postid
js = json.loads(web_cache.get(url).decode("utf8"))
for comment in js["feed"]["entry"]:
(author,) = comment["author"]
avatar = author["gd$image"]
int(avatar["width"])
int(avatar["height"])
src = avatar["src"]
if src not in seen:
seen.add(src)
assert "\n" not in src
fp.write(src + "\n")
def _fetch_avatar_urls():
urls = open("avatar_urls", "r").read().splitlines()
for i, url in enumerate(urls):
print("[%d/%d] fetching %s ..." % (i + 1, len(urls), url))
try:
img = PIL.Image.open(io.BytesIO(web_cache.get(url)))
except:
print("WARNING: Bad avatar URL: %s" % url)
#_make_avatar_url_list()
_fetch_avatar_urls()
|
|
0ca24ff03f6382c23995f662b678e457a8394140
|
debian/bump-symbols.py
|
debian/bump-symbols.py
|
#!/usr/bin/python
#
# Bump symbol versions of libvirt0
# Usage: ./bump-symbol-versions 1.2.16~rc2
import os
import re
import sys
import shutil
import subprocess
#import gbp.git.GitRepository
symbols_file = 'debian/libvirt0.symbols'
symbols_new_file = symbols_file + '.new'
symbols = open(symbols_file)
symbols_new = open('%s.new' % symbols_file, 'w+')
if len(sys.argv) != 2:
print >>sys.stderr, "Need a version"
sys.exit(1)
version = sys.argv[1]
s_version = version.split('~', 1)[0]
for line in symbols.readlines():
m = re.match('(?P<pre>.*LIBVIRT_(?P<admin>ADMIN_)?PRIVATE_)(?P<v>[a-z0-9.]+) ',
line)
if m:
if not m.group('admin'):
symbols_new.write(' *@LIBVIRT_%s %s\n' % (s_version, version))
symbols_new.write("%s%s %s\n" %
(m.group('pre'), s_version, version))
else:
symbols_new.write(line)
symbols.close()
symbols_new.close()
os.unlink(symbols_file)
shutil.move(symbols_new_file, symbols_file)
subprocess.call(['git', 'commit', '-m', 'Bump symbol versions', symbols_file])
|
Add script to bump symbol versions
|
Add script to bump symbol versions
|
Python
|
lgpl-2.1
|
agx/libvirt-debian,agx/libvirt-debian,agx/libvirt-debian,agx/libvirt-debian,agx/libvirt-debian
|
Add script to bump symbol versions
|
#!/usr/bin/python
#
# Bump symbol versions of libvirt0
# Usage: ./bump-symbol-versions 1.2.16~rc2
import os
import re
import sys
import shutil
import subprocess
#import gbp.git.GitRepository
symbols_file = 'debian/libvirt0.symbols'
symbols_new_file = symbols_file + '.new'
symbols = open(symbols_file)
symbols_new = open('%s.new' % symbols_file, 'w+')
if len(sys.argv) != 2:
print >>sys.stderr, "Need a version"
sys.exit(1)
version = sys.argv[1]
s_version = version.split('~', 1)[0]
for line in symbols.readlines():
m = re.match('(?P<pre>.*LIBVIRT_(?P<admin>ADMIN_)?PRIVATE_)(?P<v>[a-z0-9.]+) ',
line)
if m:
if not m.group('admin'):
symbols_new.write(' *@LIBVIRT_%s %s\n' % (s_version, version))
symbols_new.write("%s%s %s\n" %
(m.group('pre'), s_version, version))
else:
symbols_new.write(line)
symbols.close()
symbols_new.close()
os.unlink(symbols_file)
shutil.move(symbols_new_file, symbols_file)
subprocess.call(['git', 'commit', '-m', 'Bump symbol versions', symbols_file])
|
<commit_before><commit_msg>Add script to bump symbol versions<commit_after>
|
#!/usr/bin/python
#
# Bump symbol versions of libvirt0
# Usage: ./bump-symbol-versions 1.2.16~rc2
import os
import re
import sys
import shutil
import subprocess
#import gbp.git.GitRepository
symbols_file = 'debian/libvirt0.symbols'
symbols_new_file = symbols_file + '.new'
symbols = open(symbols_file)
symbols_new = open('%s.new' % symbols_file, 'w+')
if len(sys.argv) != 2:
print >>sys.stderr, "Need a version"
sys.exit(1)
version = sys.argv[1]
s_version = version.split('~', 1)[0]
for line in symbols.readlines():
m = re.match('(?P<pre>.*LIBVIRT_(?P<admin>ADMIN_)?PRIVATE_)(?P<v>[a-z0-9.]+) ',
line)
if m:
if not m.group('admin'):
symbols_new.write(' *@LIBVIRT_%s %s\n' % (s_version, version))
symbols_new.write("%s%s %s\n" %
(m.group('pre'), s_version, version))
else:
symbols_new.write(line)
symbols.close()
symbols_new.close()
os.unlink(symbols_file)
shutil.move(symbols_new_file, symbols_file)
subprocess.call(['git', 'commit', '-m', 'Bump symbol versions', symbols_file])
|
Add script to bump symbol versions#!/usr/bin/python
#
# Bump symbol versions of libvirt0
# Usage: ./bump-symbol-versions 1.2.16~rc2
import os
import re
import sys
import shutil
import subprocess
#import gbp.git.GitRepository
symbols_file = 'debian/libvirt0.symbols'
symbols_new_file = symbols_file + '.new'
symbols = open(symbols_file)
symbols_new = open('%s.new' % symbols_file, 'w+')
if len(sys.argv) != 2:
print >>sys.stderr, "Need a version"
sys.exit(1)
version = sys.argv[1]
s_version = version.split('~', 1)[0]
for line in symbols.readlines():
m = re.match('(?P<pre>.*LIBVIRT_(?P<admin>ADMIN_)?PRIVATE_)(?P<v>[a-z0-9.]+) ',
line)
if m:
if not m.group('admin'):
symbols_new.write(' *@LIBVIRT_%s %s\n' % (s_version, version))
symbols_new.write("%s%s %s\n" %
(m.group('pre'), s_version, version))
else:
symbols_new.write(line)
symbols.close()
symbols_new.close()
os.unlink(symbols_file)
shutil.move(symbols_new_file, symbols_file)
subprocess.call(['git', 'commit', '-m', 'Bump symbol versions', symbols_file])
|
<commit_before><commit_msg>Add script to bump symbol versions<commit_after>#!/usr/bin/python
#
# Bump symbol versions of libvirt0
# Usage: ./bump-symbol-versions 1.2.16~rc2
import os
import re
import sys
import shutil
import subprocess
#import gbp.git.GitRepository
symbols_file = 'debian/libvirt0.symbols'
symbols_new_file = symbols_file + '.new'
symbols = open(symbols_file)
symbols_new = open('%s.new' % symbols_file, 'w+')
if len(sys.argv) != 2:
print >>sys.stderr, "Need a version"
sys.exit(1)
version = sys.argv[1]
s_version = version.split('~', 1)[0]
for line in symbols.readlines():
m = re.match('(?P<pre>.*LIBVIRT_(?P<admin>ADMIN_)?PRIVATE_)(?P<v>[a-z0-9.]+) ',
line)
if m:
if not m.group('admin'):
symbols_new.write(' *@LIBVIRT_%s %s\n' % (s_version, version))
symbols_new.write("%s%s %s\n" %
(m.group('pre'), s_version, version))
else:
symbols_new.write(line)
symbols.close()
symbols_new.close()
os.unlink(symbols_file)
shutil.move(symbols_new_file, symbols_file)
subprocess.call(['git', 'commit', '-m', 'Bump symbol versions', symbols_file])
|
|
a5c99fe8e37079a2663fe90644d3925d6dc7a5d0
|
examples/offline_examples/test_request_fixture.py
|
examples/offline_examples/test_request_fixture.py
|
import pytest
@pytest.mark.offline
def test_request_fixture(request):
sb = request.getfixturevalue('sb')
sb.open("data:text/html,<p>Hello<br><input></p>")
sb.assert_element("html > body")
sb.assert_text("Hello", "body p")
sb.type("input", "Goodbye")
sb.click("body p")
sb.tearDown()
|
Add another example that works offline
|
Add another example that works offline
|
Python
|
mit
|
seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase
|
Add another example that works offline
|
import pytest
@pytest.mark.offline
def test_request_fixture(request):
sb = request.getfixturevalue('sb')
sb.open("data:text/html,<p>Hello<br><input></p>")
sb.assert_element("html > body")
sb.assert_text("Hello", "body p")
sb.type("input", "Goodbye")
sb.click("body p")
sb.tearDown()
|
<commit_before><commit_msg>Add another example that works offline<commit_after>
|
import pytest
@pytest.mark.offline
def test_request_fixture(request):
sb = request.getfixturevalue('sb')
sb.open("data:text/html,<p>Hello<br><input></p>")
sb.assert_element("html > body")
sb.assert_text("Hello", "body p")
sb.type("input", "Goodbye")
sb.click("body p")
sb.tearDown()
|
Add another example that works offlineimport pytest
@pytest.mark.offline
def test_request_fixture(request):
sb = request.getfixturevalue('sb')
sb.open("data:text/html,<p>Hello<br><input></p>")
sb.assert_element("html > body")
sb.assert_text("Hello", "body p")
sb.type("input", "Goodbye")
sb.click("body p")
sb.tearDown()
|
<commit_before><commit_msg>Add another example that works offline<commit_after>import pytest
@pytest.mark.offline
def test_request_fixture(request):
sb = request.getfixturevalue('sb')
sb.open("data:text/html,<p>Hello<br><input></p>")
sb.assert_element("html > body")
sb.assert_text("Hello", "body p")
sb.type("input", "Goodbye")
sb.click("body p")
sb.tearDown()
|
|
a2296ae2165b60ba182d540f729a099183169c92
|
problem_40.py
|
problem_40.py
|
from time import time
def main():
fractional_part = ''
i = 1
while len(fractional_part) < 1000000:
fractional_part += str(i)
i += 1
prod = 1
for i in [1, 10, 100, 1000, 10000, 100000, 1000000]:
prod *= int(fractional_part[i-1])
print 'Product:', prod
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
Add problem 40, decimal fraction digits
|
Add problem 40, decimal fraction digits
|
Python
|
mit
|
dimkarakostas/project-euler
|
Add problem 40, decimal fraction digits
|
from time import time
def main():
fractional_part = ''
i = 1
while len(fractional_part) < 1000000:
fractional_part += str(i)
i += 1
prod = 1
for i in [1, 10, 100, 1000, 10000, 100000, 1000000]:
prod *= int(fractional_part[i-1])
print 'Product:', prod
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
<commit_before><commit_msg>Add problem 40, decimal fraction digits<commit_after>
|
from time import time
def main():
fractional_part = ''
i = 1
while len(fractional_part) < 1000000:
fractional_part += str(i)
i += 1
prod = 1
for i in [1, 10, 100, 1000, 10000, 100000, 1000000]:
prod *= int(fractional_part[i-1])
print 'Product:', prod
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
Add problem 40, decimal fraction digitsfrom time import time
def main():
fractional_part = ''
i = 1
while len(fractional_part) < 1000000:
fractional_part += str(i)
i += 1
prod = 1
for i in [1, 10, 100, 1000, 10000, 100000, 1000000]:
prod *= int(fractional_part[i-1])
print 'Product:', prod
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
<commit_before><commit_msg>Add problem 40, decimal fraction digits<commit_after>from time import time
def main():
fractional_part = ''
i = 1
while len(fractional_part) < 1000000:
fractional_part += str(i)
i += 1
prod = 1
for i in [1, 10, 100, 1000, 10000, 100000, 1000000]:
prod *= int(fractional_part[i-1])
print 'Product:', prod
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
|
9d77092729e534b19d75b38dd700df25a009fa49
|
toolbox/convexify_costs.py
|
toolbox/convexify_costs.py
|
import sys
import commentjson as json
import os
import argparse
import numpy as np
def listify(l):
return [[e] for e in l]
def convexify(l):
features = np.array(l)
if features.shape[1] != 1:
raise InvalidArgumentException('This script can only convexify feature vectors with one feature per state!')
bestState = np.argmin(features)
for direction in [-1, 1]:
pos = bestState + direction
previousGradient = 0
while pos >= 0 and pos < features.shape[0]:
newGradient = features[pos] - features[pos-direction]
if abs(newGradient) < abs(previousGradient):
# cost function got too flat, set feature value to match old slope
features[pos] = features[pos-direction] + previousGradient
else:
# all good, continue with new slope
previousGradient = newGradient
pos += direction
return listify(features.flatten())
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Take a json file containing a result to a set of HDF5 events files')
parser.add_argument('--model', required=True, type=str, dest='model_filename',
help='Filename of the json model description')
parser.add_argument('--output', required=True, type=str, dest='result_filename',
help='Filename of the json file containing the model with convexified costs')
args = parser.parse_args()
with open(args.model_filename, 'r') as f:
model = json.load(f)
if not model['settings']['statesShareWeights']:
raise InvalidArgumentException('This script can only convexify feature vectors with shared weights!')
segmentationHypotheses = model['segmentationHypotheses']
for seg in segmentationHypotheses:
for f in ['features', 'appearanceFeatures', 'disappearanceFeatures']:
if f in seg:
seg[f] = convexify(seg[f])
# division features are always convex (is just a line)
linkingHypotheses = model['linkingHypotheses']
for link in linkingHypotheses:
link['features'] = convexify(link['features'])
with open(args.result_filename, 'w') as f:
json.dump(model, f, indent=4, separators=(',', ': '))
|
Add script to convexify the energies of a conservation tracking JSON model
|
Add script to convexify the energies of a conservation tracking JSON model
|
Python
|
mit
|
chaubold/hytra,chaubold/hytra,chaubold/hytra
|
Add script to convexify the energies of a conservation tracking JSON model
|
import sys
import commentjson as json
import os
import argparse
import numpy as np
def listify(l):
return [[e] for e in l]
def convexify(l):
features = np.array(l)
if features.shape[1] != 1:
raise InvalidArgumentException('This script can only convexify feature vectors with one feature per state!')
bestState = np.argmin(features)
for direction in [-1, 1]:
pos = bestState + direction
previousGradient = 0
while pos >= 0 and pos < features.shape[0]:
newGradient = features[pos] - features[pos-direction]
if abs(newGradient) < abs(previousGradient):
# cost function got too flat, set feature value to match old slope
features[pos] = features[pos-direction] + previousGradient
else:
# all good, continue with new slope
previousGradient = newGradient
pos += direction
return listify(features.flatten())
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Take a json file containing a result to a set of HDF5 events files')
parser.add_argument('--model', required=True, type=str, dest='model_filename',
help='Filename of the json model description')
parser.add_argument('--output', required=True, type=str, dest='result_filename',
help='Filename of the json file containing the model with convexified costs')
args = parser.parse_args()
with open(args.model_filename, 'r') as f:
model = json.load(f)
if not model['settings']['statesShareWeights']:
raise InvalidArgumentException('This script can only convexify feature vectors with shared weights!')
segmentationHypotheses = model['segmentationHypotheses']
for seg in segmentationHypotheses:
for f in ['features', 'appearanceFeatures', 'disappearanceFeatures']:
if f in seg:
seg[f] = convexify(seg[f])
# division features are always convex (is just a line)
linkingHypotheses = model['linkingHypotheses']
for link in linkingHypotheses:
link['features'] = convexify(link['features'])
with open(args.result_filename, 'w') as f:
json.dump(model, f, indent=4, separators=(',', ': '))
|
<commit_before><commit_msg>Add script to convexify the energies of a conservation tracking JSON model<commit_after>
|
import sys
import commentjson as json
import os
import argparse
import numpy as np
def listify(l):
return [[e] for e in l]
def convexify(l):
features = np.array(l)
if features.shape[1] != 1:
raise InvalidArgumentException('This script can only convexify feature vectors with one feature per state!')
bestState = np.argmin(features)
for direction in [-1, 1]:
pos = bestState + direction
previousGradient = 0
while pos >= 0 and pos < features.shape[0]:
newGradient = features[pos] - features[pos-direction]
if abs(newGradient) < abs(previousGradient):
# cost function got too flat, set feature value to match old slope
features[pos] = features[pos-direction] + previousGradient
else:
# all good, continue with new slope
previousGradient = newGradient
pos += direction
return listify(features.flatten())
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Take a json file containing a result to a set of HDF5 events files')
parser.add_argument('--model', required=True, type=str, dest='model_filename',
help='Filename of the json model description')
parser.add_argument('--output', required=True, type=str, dest='result_filename',
help='Filename of the json file containing the model with convexified costs')
args = parser.parse_args()
with open(args.model_filename, 'r') as f:
model = json.load(f)
if not model['settings']['statesShareWeights']:
raise InvalidArgumentException('This script can only convexify feature vectors with shared weights!')
segmentationHypotheses = model['segmentationHypotheses']
for seg in segmentationHypotheses:
for f in ['features', 'appearanceFeatures', 'disappearanceFeatures']:
if f in seg:
seg[f] = convexify(seg[f])
# division features are always convex (is just a line)
linkingHypotheses = model['linkingHypotheses']
for link in linkingHypotheses:
link['features'] = convexify(link['features'])
with open(args.result_filename, 'w') as f:
json.dump(model, f, indent=4, separators=(',', ': '))
|
Add script to convexify the energies of a conservation tracking JSON modelimport sys
import commentjson as json
import os
import argparse
import numpy as np
def listify(l):
return [[e] for e in l]
def convexify(l):
features = np.array(l)
if features.shape[1] != 1:
raise InvalidArgumentException('This script can only convexify feature vectors with one feature per state!')
bestState = np.argmin(features)
for direction in [-1, 1]:
pos = bestState + direction
previousGradient = 0
while pos >= 0 and pos < features.shape[0]:
newGradient = features[pos] - features[pos-direction]
if abs(newGradient) < abs(previousGradient):
# cost function got too flat, set feature value to match old slope
features[pos] = features[pos-direction] + previousGradient
else:
# all good, continue with new slope
previousGradient = newGradient
pos += direction
return listify(features.flatten())
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Take a json file containing a result to a set of HDF5 events files')
parser.add_argument('--model', required=True, type=str, dest='model_filename',
help='Filename of the json model description')
parser.add_argument('--output', required=True, type=str, dest='result_filename',
help='Filename of the json file containing the model with convexified costs')
args = parser.parse_args()
with open(args.model_filename, 'r') as f:
model = json.load(f)
if not model['settings']['statesShareWeights']:
raise InvalidArgumentException('This script can only convexify feature vectors with shared weights!')
segmentationHypotheses = model['segmentationHypotheses']
for seg in segmentationHypotheses:
for f in ['features', 'appearanceFeatures', 'disappearanceFeatures']:
if f in seg:
seg[f] = convexify(seg[f])
# division features are always convex (is just a line)
linkingHypotheses = model['linkingHypotheses']
for link in linkingHypotheses:
link['features'] = convexify(link['features'])
with open(args.result_filename, 'w') as f:
json.dump(model, f, indent=4, separators=(',', ': '))
|
<commit_before><commit_msg>Add script to convexify the energies of a conservation tracking JSON model<commit_after>import sys
import commentjson as json
import os
import argparse
import numpy as np
def listify(l):
return [[e] for e in l]
def convexify(l):
features = np.array(l)
if features.shape[1] != 1:
raise InvalidArgumentException('This script can only convexify feature vectors with one feature per state!')
bestState = np.argmin(features)
for direction in [-1, 1]:
pos = bestState + direction
previousGradient = 0
while pos >= 0 and pos < features.shape[0]:
newGradient = features[pos] - features[pos-direction]
if abs(newGradient) < abs(previousGradient):
# cost function got too flat, set feature value to match old slope
features[pos] = features[pos-direction] + previousGradient
else:
# all good, continue with new slope
previousGradient = newGradient
pos += direction
return listify(features.flatten())
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Take a json file containing a result to a set of HDF5 events files')
parser.add_argument('--model', required=True, type=str, dest='model_filename',
help='Filename of the json model description')
parser.add_argument('--output', required=True, type=str, dest='result_filename',
help='Filename of the json file containing the model with convexified costs')
args = parser.parse_args()
with open(args.model_filename, 'r') as f:
model = json.load(f)
if not model['settings']['statesShareWeights']:
raise InvalidArgumentException('This script can only convexify feature vectors with shared weights!')
segmentationHypotheses = model['segmentationHypotheses']
for seg in segmentationHypotheses:
for f in ['features', 'appearanceFeatures', 'disappearanceFeatures']:
if f in seg:
seg[f] = convexify(seg[f])
# division features are always convex (is just a line)
linkingHypotheses = model['linkingHypotheses']
for link in linkingHypotheses:
link['features'] = convexify(link['features'])
with open(args.result_filename, 'w') as f:
json.dump(model, f, indent=4, separators=(',', ': '))
|
|
609784dc106e01800eed0a7ccf88f82d6977d408
|
babybuddy/migrations/0008_auto_20200120_0622.py
|
babybuddy/migrations/0008_auto_20200120_0622.py
|
# Generated by Django 3.0.2 on 2020-01-20 14:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('babybuddy', '0007_auto_20190607_1422'),
]
operations = [
migrations.AlterField(
model_name='settings',
name='language',
field=models.CharField(choices=[('en', 'English'), ('fr', 'French'), ('de', 'German'), ('es', 'Spanish'), ('sv', 'Swedish'), ('tr', 'Turkish')], default='en', max_length=255, verbose_name='Language'),
),
]
|
Add missed language update migrations
|
Add missed language update migrations
This migration should have been included in `2627b1c`.
|
Python
|
bsd-2-clause
|
cdubz/babybuddy,cdubz/babybuddy,cdubz/babybuddy
|
Add missed language update migrations
This migration should have been included in `2627b1c`.
|
# Generated by Django 3.0.2 on 2020-01-20 14:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('babybuddy', '0007_auto_20190607_1422'),
]
operations = [
migrations.AlterField(
model_name='settings',
name='language',
field=models.CharField(choices=[('en', 'English'), ('fr', 'French'), ('de', 'German'), ('es', 'Spanish'), ('sv', 'Swedish'), ('tr', 'Turkish')], default='en', max_length=255, verbose_name='Language'),
),
]
|
<commit_before><commit_msg>Add missed language update migrations
This migration should have been included in `2627b1c`.<commit_after>
|
# Generated by Django 3.0.2 on 2020-01-20 14:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('babybuddy', '0007_auto_20190607_1422'),
]
operations = [
migrations.AlterField(
model_name='settings',
name='language',
field=models.CharField(choices=[('en', 'English'), ('fr', 'French'), ('de', 'German'), ('es', 'Spanish'), ('sv', 'Swedish'), ('tr', 'Turkish')], default='en', max_length=255, verbose_name='Language'),
),
]
|
Add missed language update migrations
This migration should have been included in `2627b1c`.# Generated by Django 3.0.2 on 2020-01-20 14:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('babybuddy', '0007_auto_20190607_1422'),
]
operations = [
migrations.AlterField(
model_name='settings',
name='language',
field=models.CharField(choices=[('en', 'English'), ('fr', 'French'), ('de', 'German'), ('es', 'Spanish'), ('sv', 'Swedish'), ('tr', 'Turkish')], default='en', max_length=255, verbose_name='Language'),
),
]
|
<commit_before><commit_msg>Add missed language update migrations
This migration should have been included in `2627b1c`.<commit_after># Generated by Django 3.0.2 on 2020-01-20 14:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('babybuddy', '0007_auto_20190607_1422'),
]
operations = [
migrations.AlterField(
model_name='settings',
name='language',
field=models.CharField(choices=[('en', 'English'), ('fr', 'French'), ('de', 'German'), ('es', 'Spanish'), ('sv', 'Swedish'), ('tr', 'Turkish')], default='en', max_length=255, verbose_name='Language'),
),
]
|
|
b0dfbb63a306255bc08eae2e7dd9360ca56a366f
|
osf/migrations/0100_set_access_request_enabled.py
|
osf/migrations/0100_set_access_request_enabled.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-30 18:34
from __future__ import unicode_literals
from django.db import migrations, models ,connection
from osf.models import AbstractNode
class Migration(migrations.Migration):
dependencies = [
('osf', '0099_merge_20180427_1109'),
]
def add_default_access_requests_enabled(self, *args, **kwargs):
# Get the date the original noderequest migration was applied
sql = "SELECT applied from django_migrations WHERE app = 'osf' AND name = '0077_add_noderequest_model';"
with connection.cursor() as cursor:
cursor.execute(sql)
date_noderequest_migration = cursor.fetchall()[0][0]
# Get all projects created before that
AbstractNode.objects.filter(created__lte=date_noderequest_migration).update(access_requests_enabled=True)
def remove_default_access_requests_enabled(self, *args, **kwargs):
# Get the date the original noderequest migration was applied
sql = "SELECT applied from django_migrations WHERE app = 'osf' AND name = '0077_add_noderequest_model';"
with connection.cursor() as cursor:
cursor.execute(sql)
date_noderequest_migration = cursor.fetchall()[0][0]
# Get all projects created before that
AbstractNode.objects.filter(created__lte=date_noderequest_migration).update(access_requests_enabled=None)
operations = [
migrations.AlterField(
model_name='noderequestaction',
name='permissions',
field=models.CharField(choices=[(b'read', b'Read'), (b'write', b'Write'), (b'admin', b'Admin')], default=b'read', max_length=5),
),
migrations.RunPython(add_default_access_requests_enabled, remove_default_access_requests_enabled),
]
|
Add default value of access requests enabled to exsisting projects made before model added
|
Add default value of access requests enabled to exsisting projects made before model added
[#PLAT-835]
Also alter permissions on noderequestaction model, since choices were altered from when the original migration was made
|
Python
|
apache-2.0
|
aaxelb/osf.io,caseyrollins/osf.io,cslzchen/osf.io,sloria/osf.io,caseyrollins/osf.io,mfraezz/osf.io,mfraezz/osf.io,cslzchen/osf.io,icereval/osf.io,adlius/osf.io,mattclark/osf.io,erinspace/osf.io,felliott/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,saradbowman/osf.io,Johnetordoff/osf.io,baylee-d/osf.io,Johnetordoff/osf.io,cslzchen/osf.io,aaxelb/osf.io,mfraezz/osf.io,CenterForOpenScience/osf.io,saradbowman/osf.io,Johnetordoff/osf.io,CenterForOpenScience/osf.io,pattisdr/osf.io,mattclark/osf.io,adlius/osf.io,sloria/osf.io,adlius/osf.io,caseyrollins/osf.io,HalcyonChimera/osf.io,aaxelb/osf.io,Johnetordoff/osf.io,felliott/osf.io,baylee-d/osf.io,aaxelb/osf.io,sloria/osf.io,adlius/osf.io,pattisdr/osf.io,mattclark/osf.io,brianjgeiger/osf.io,erinspace/osf.io,erinspace/osf.io,mfraezz/osf.io,HalcyonChimera/osf.io,HalcyonChimera/osf.io,felliott/osf.io,felliott/osf.io,brianjgeiger/osf.io,baylee-d/osf.io,icereval/osf.io,brianjgeiger/osf.io,CenterForOpenScience/osf.io,icereval/osf.io,CenterForOpenScience/osf.io,pattisdr/osf.io,HalcyonChimera/osf.io
|
Add default value of access requests enabled to exsisting projects made before model added
[#PLAT-835]
Also alter permissions on noderequestaction model, since choices were altered from when the original migration was made
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-30 18:34
from __future__ import unicode_literals
from django.db import migrations, models ,connection
from osf.models import AbstractNode
class Migration(migrations.Migration):
dependencies = [
('osf', '0099_merge_20180427_1109'),
]
def add_default_access_requests_enabled(self, *args, **kwargs):
# Get the date the original noderequest migration was applied
sql = "SELECT applied from django_migrations WHERE app = 'osf' AND name = '0077_add_noderequest_model';"
with connection.cursor() as cursor:
cursor.execute(sql)
date_noderequest_migration = cursor.fetchall()[0][0]
# Get all projects created before that
AbstractNode.objects.filter(created__lte=date_noderequest_migration).update(access_requests_enabled=True)
def remove_default_access_requests_enabled(self, *args, **kwargs):
# Get the date the original noderequest migration was applied
sql = "SELECT applied from django_migrations WHERE app = 'osf' AND name = '0077_add_noderequest_model';"
with connection.cursor() as cursor:
cursor.execute(sql)
date_noderequest_migration = cursor.fetchall()[0][0]
# Get all projects created before that
AbstractNode.objects.filter(created__lte=date_noderequest_migration).update(access_requests_enabled=None)
operations = [
migrations.AlterField(
model_name='noderequestaction',
name='permissions',
field=models.CharField(choices=[(b'read', b'Read'), (b'write', b'Write'), (b'admin', b'Admin')], default=b'read', max_length=5),
),
migrations.RunPython(add_default_access_requests_enabled, remove_default_access_requests_enabled),
]
|
<commit_before><commit_msg>Add default value of access requests enabled to exsisting projects made before model added
[#PLAT-835]
Also alter permissions on noderequestaction model, since choices were altered from when the original migration was made<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-30 18:34
from __future__ import unicode_literals
from django.db import migrations, models ,connection
from osf.models import AbstractNode
class Migration(migrations.Migration):
dependencies = [
('osf', '0099_merge_20180427_1109'),
]
def add_default_access_requests_enabled(self, *args, **kwargs):
# Get the date the original noderequest migration was applied
sql = "SELECT applied from django_migrations WHERE app = 'osf' AND name = '0077_add_noderequest_model';"
with connection.cursor() as cursor:
cursor.execute(sql)
date_noderequest_migration = cursor.fetchall()[0][0]
# Get all projects created before that
AbstractNode.objects.filter(created__lte=date_noderequest_migration).update(access_requests_enabled=True)
def remove_default_access_requests_enabled(self, *args, **kwargs):
# Get the date the original noderequest migration was applied
sql = "SELECT applied from django_migrations WHERE app = 'osf' AND name = '0077_add_noderequest_model';"
with connection.cursor() as cursor:
cursor.execute(sql)
date_noderequest_migration = cursor.fetchall()[0][0]
# Get all projects created before that
AbstractNode.objects.filter(created__lte=date_noderequest_migration).update(access_requests_enabled=None)
operations = [
migrations.AlterField(
model_name='noderequestaction',
name='permissions',
field=models.CharField(choices=[(b'read', b'Read'), (b'write', b'Write'), (b'admin', b'Admin')], default=b'read', max_length=5),
),
migrations.RunPython(add_default_access_requests_enabled, remove_default_access_requests_enabled),
]
|
Add default value of access requests enabled to exsisting projects made before model added
[#PLAT-835]
Also alter permissions on noderequestaction model, since choices were altered from when the original migration was made# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-30 18:34
from __future__ import unicode_literals
from django.db import migrations, models ,connection
from osf.models import AbstractNode
class Migration(migrations.Migration):
dependencies = [
('osf', '0099_merge_20180427_1109'),
]
def add_default_access_requests_enabled(self, *args, **kwargs):
# Get the date the original noderequest migration was applied
sql = "SELECT applied from django_migrations WHERE app = 'osf' AND name = '0077_add_noderequest_model';"
with connection.cursor() as cursor:
cursor.execute(sql)
date_noderequest_migration = cursor.fetchall()[0][0]
# Get all projects created before that
AbstractNode.objects.filter(created__lte=date_noderequest_migration).update(access_requests_enabled=True)
def remove_default_access_requests_enabled(self, *args, **kwargs):
# Get the date the original noderequest migration was applied
sql = "SELECT applied from django_migrations WHERE app = 'osf' AND name = '0077_add_noderequest_model';"
with connection.cursor() as cursor:
cursor.execute(sql)
date_noderequest_migration = cursor.fetchall()[0][0]
# Get all projects created before that
AbstractNode.objects.filter(created__lte=date_noderequest_migration).update(access_requests_enabled=None)
operations = [
migrations.AlterField(
model_name='noderequestaction',
name='permissions',
field=models.CharField(choices=[(b'read', b'Read'), (b'write', b'Write'), (b'admin', b'Admin')], default=b'read', max_length=5),
),
migrations.RunPython(add_default_access_requests_enabled, remove_default_access_requests_enabled),
]
|
<commit_before><commit_msg>Add default value of access requests enabled to exsisting projects made before model added
[#PLAT-835]
Also alter permissions on noderequestaction model, since choices were altered from when the original migration was made<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-30 18:34
from __future__ import unicode_literals
from django.db import migrations, models ,connection
from osf.models import AbstractNode
class Migration(migrations.Migration):
dependencies = [
('osf', '0099_merge_20180427_1109'),
]
def add_default_access_requests_enabled(self, *args, **kwargs):
# Get the date the original noderequest migration was applied
sql = "SELECT applied from django_migrations WHERE app = 'osf' AND name = '0077_add_noderequest_model';"
with connection.cursor() as cursor:
cursor.execute(sql)
date_noderequest_migration = cursor.fetchall()[0][0]
# Get all projects created before that
AbstractNode.objects.filter(created__lte=date_noderequest_migration).update(access_requests_enabled=True)
def remove_default_access_requests_enabled(self, *args, **kwargs):
# Get the date the original noderequest migration was applied
sql = "SELECT applied from django_migrations WHERE app = 'osf' AND name = '0077_add_noderequest_model';"
with connection.cursor() as cursor:
cursor.execute(sql)
date_noderequest_migration = cursor.fetchall()[0][0]
# Get all projects created before that
AbstractNode.objects.filter(created__lte=date_noderequest_migration).update(access_requests_enabled=None)
operations = [
migrations.AlterField(
model_name='noderequestaction',
name='permissions',
field=models.CharField(choices=[(b'read', b'Read'), (b'write', b'Write'), (b'admin', b'Admin')], default=b'read', max_length=5),
),
migrations.RunPython(add_default_access_requests_enabled, remove_default_access_requests_enabled),
]
|
|
39c50fe7d4713b9d0a8e4618a829d94b4fe7456c
|
van_der_pol_sync.py
|
van_der_pol_sync.py
|
from __future__ import division
import sys
import numpy as np
sys.path.append('/media/ixaxaar/Steam/src/nest/local/lib/python2.7/site-packages/')
import nest
import nest.raster_plot
import nest.voltage_trace
import uuid
import pylab
nest.SetKernelStatus({"resolution": .001})
u = uuid.uuid4()
nest.CopyModel('ac_generator', u, {'amplitude': 1., 'frequency': 20.})
ac = nest.Create(u)
n = ()
for i in xrange(1,10):
r = np.random.uniform(1000)
print r
n += nest.Create("relaxos_van_der_pol", 1, {"epsilon": r/1000, "input_currents_ex": r/1000})
d = nest.Create("spike_detector")
v = nest.Create('voltmeter', 1, {"withgid": True, "withtime": True})
# nest.Connect(ac, n, 'all_to_all', {'weight': .05, 'model': 'static_synapse'})
nest.Connect(n, n, 'all_to_all', {'weight': .1, 'model': 'static_synapse'})
nest.Connect(v, n)
nest.Simulate("1000")
nest.voltage_trace.from_device(v)
pylab.show()
|
Add code to test van der pol model
|
Add code to test van der pol model
|
Python
|
mit
|
synergetics/nest_expermiments,synergetics/nest_expermiments
|
Add code to test van der pol model
|
from __future__ import division
import sys
import numpy as np
sys.path.append('/media/ixaxaar/Steam/src/nest/local/lib/python2.7/site-packages/')
import nest
import nest.raster_plot
import nest.voltage_trace
import uuid
import pylab
nest.SetKernelStatus({"resolution": .001})
u = uuid.uuid4()
nest.CopyModel('ac_generator', u, {'amplitude': 1., 'frequency': 20.})
ac = nest.Create(u)
n = ()
for i in xrange(1,10):
r = np.random.uniform(1000)
print r
n += nest.Create("relaxos_van_der_pol", 1, {"epsilon": r/1000, "input_currents_ex": r/1000})
d = nest.Create("spike_detector")
v = nest.Create('voltmeter', 1, {"withgid": True, "withtime": True})
# nest.Connect(ac, n, 'all_to_all', {'weight': .05, 'model': 'static_synapse'})
nest.Connect(n, n, 'all_to_all', {'weight': .1, 'model': 'static_synapse'})
nest.Connect(v, n)
nest.Simulate("1000")
nest.voltage_trace.from_device(v)
pylab.show()
|
<commit_before><commit_msg>Add code to test van der pol model<commit_after>
|
from __future__ import division
import sys
import numpy as np
sys.path.append('/media/ixaxaar/Steam/src/nest/local/lib/python2.7/site-packages/')
import nest
import nest.raster_plot
import nest.voltage_trace
import uuid
import pylab
nest.SetKernelStatus({"resolution": .001})
u = uuid.uuid4()
nest.CopyModel('ac_generator', u, {'amplitude': 1., 'frequency': 20.})
ac = nest.Create(u)
n = ()
for i in xrange(1,10):
r = np.random.uniform(1000)
print r
n += nest.Create("relaxos_van_der_pol", 1, {"epsilon": r/1000, "input_currents_ex": r/1000})
d = nest.Create("spike_detector")
v = nest.Create('voltmeter', 1, {"withgid": True, "withtime": True})
# nest.Connect(ac, n, 'all_to_all', {'weight': .05, 'model': 'static_synapse'})
nest.Connect(n, n, 'all_to_all', {'weight': .1, 'model': 'static_synapse'})
nest.Connect(v, n)
nest.Simulate("1000")
nest.voltage_trace.from_device(v)
pylab.show()
|
Add code to test van der pol model
from __future__ import division
import sys
import numpy as np
sys.path.append('/media/ixaxaar/Steam/src/nest/local/lib/python2.7/site-packages/')
import nest
import nest.raster_plot
import nest.voltage_trace
import uuid
import pylab
nest.SetKernelStatus({"resolution": .001})
u = uuid.uuid4()
nest.CopyModel('ac_generator', u, {'amplitude': 1., 'frequency': 20.})
ac = nest.Create(u)
n = ()
for i in xrange(1,10):
r = np.random.uniform(1000)
print r
n += nest.Create("relaxos_van_der_pol", 1, {"epsilon": r/1000, "input_currents_ex": r/1000})
d = nest.Create("spike_detector")
v = nest.Create('voltmeter', 1, {"withgid": True, "withtime": True})
# nest.Connect(ac, n, 'all_to_all', {'weight': .05, 'model': 'static_synapse'})
nest.Connect(n, n, 'all_to_all', {'weight': .1, 'model': 'static_synapse'})
nest.Connect(v, n)
nest.Simulate("1000")
nest.voltage_trace.from_device(v)
pylab.show()
|
<commit_before><commit_msg>Add code to test van der pol model<commit_after>
from __future__ import division
import sys
import numpy as np
sys.path.append('/media/ixaxaar/Steam/src/nest/local/lib/python2.7/site-packages/')
import nest
import nest.raster_plot
import nest.voltage_trace
import uuid
import pylab
nest.SetKernelStatus({"resolution": .001})
u = uuid.uuid4()
nest.CopyModel('ac_generator', u, {'amplitude': 1., 'frequency': 20.})
ac = nest.Create(u)
n = ()
for i in xrange(1,10):
r = np.random.uniform(1000)
print r
n += nest.Create("relaxos_van_der_pol", 1, {"epsilon": r/1000, "input_currents_ex": r/1000})
d = nest.Create("spike_detector")
v = nest.Create('voltmeter', 1, {"withgid": True, "withtime": True})
# nest.Connect(ac, n, 'all_to_all', {'weight': .05, 'model': 'static_synapse'})
nest.Connect(n, n, 'all_to_all', {'weight': .1, 'model': 'static_synapse'})
nest.Connect(v, n)
nest.Simulate("1000")
nest.voltage_trace.from_device(v)
pylab.show()
|
|
d14130c30f776d9b10ab48c993096dce251aba28
|
get_hrs_cc_streamflow_list.py
|
get_hrs_cc_streamflow_list.py
|
import pandas as pd
from kiwis_pie import KIWIS
k = KIWIS('http://www.bom.gov.au/waterdata/services')
def get_cc_hrs_station_list(update = False):
"""
Return list of station IDs that exist in HRS and are supplied by providers that license their data under the Creative Commons license.
:param update: Flag to indicate if cached station information should be fetched from WISKI again (and saved to disk as CSV).
:type update: boolean
"""
if update:
stations = k.get_timeseries_list(parametertype_name = 'Water Course Discharge', ts_name = 'DMQaQc.Merged.DailyMean.09HR')
stations.to_csv('available_watercoursedischarge_stations.csv')
else:
stations = pd.read_csv('available_watercoursedischarge_stations.csv', index_col=0)
hrs_stations = pd.read_csv('hrs_station_list.csv', skiprows=1)
station_subset = stations.ix[stations.station_no.isin(hrs_stations.station_id)]
if update:
station_attrs = []
for i, station in station_subset.iterrows():
attrs = k.get_station_list(station_no = station.station_no, parametertype_name = 'Water Course Discharge', return_fields=['station_id','custom_attributes'])
station_attrs.append(attrs.set_index('station_id'))
station_attributes = pd.concat(station_attrs)
station_attributes.to_csv('station_attributes.csv')
else:
station_attributes = pd.read_csv('station_attributes.csv', index_col=0)
cc_providers = pd.read_csv('cc_providers.csv', skiprows=8)
station_list = station_attributes.ix[station_attributes.DATA_OWNER.isin(cc_providers.ProviderID.values)].index.values
return station_list
if __name__ == "__main__":
for station in get_cc_hrs_station_list():
print(station)
|
Add script to get list of HRS station IDs
|
Add script to get list of HRS station IDs
From providers that license their data as Creative Commons
|
Python
|
bsd-3-clause
|
amacd31/hydromet-toolkit,amacd31/hydromet-toolkit
|
Add script to get list of HRS station IDs
From providers that license their data as Creative Commons
|
import pandas as pd
from kiwis_pie import KIWIS
k = KIWIS('http://www.bom.gov.au/waterdata/services')
def get_cc_hrs_station_list(update = False):
"""
Return list of station IDs that exist in HRS and are supplied by providers that license their data under the Creative Commons license.
:param update: Flag to indicate if cached station information should be fetched from WISKI again (and saved to disk as CSV).
:type update: boolean
"""
if update:
stations = k.get_timeseries_list(parametertype_name = 'Water Course Discharge', ts_name = 'DMQaQc.Merged.DailyMean.09HR')
stations.to_csv('available_watercoursedischarge_stations.csv')
else:
stations = pd.read_csv('available_watercoursedischarge_stations.csv', index_col=0)
hrs_stations = pd.read_csv('hrs_station_list.csv', skiprows=1)
station_subset = stations.ix[stations.station_no.isin(hrs_stations.station_id)]
if update:
station_attrs = []
for i, station in station_subset.iterrows():
attrs = k.get_station_list(station_no = station.station_no, parametertype_name = 'Water Course Discharge', return_fields=['station_id','custom_attributes'])
station_attrs.append(attrs.set_index('station_id'))
station_attributes = pd.concat(station_attrs)
station_attributes.to_csv('station_attributes.csv')
else:
station_attributes = pd.read_csv('station_attributes.csv', index_col=0)
cc_providers = pd.read_csv('cc_providers.csv', skiprows=8)
station_list = station_attributes.ix[station_attributes.DATA_OWNER.isin(cc_providers.ProviderID.values)].index.values
return station_list
if __name__ == "__main__":
for station in get_cc_hrs_station_list():
print(station)
|
<commit_before><commit_msg>Add script to get list of HRS station IDs
From providers that license their data as Creative Commons<commit_after>
|
import pandas as pd
from kiwis_pie import KIWIS
k = KIWIS('http://www.bom.gov.au/waterdata/services')
def get_cc_hrs_station_list(update = False):
"""
Return list of station IDs that exist in HRS and are supplied by providers that license their data under the Creative Commons license.
:param update: Flag to indicate if cached station information should be fetched from WISKI again (and saved to disk as CSV).
:type update: boolean
"""
if update:
stations = k.get_timeseries_list(parametertype_name = 'Water Course Discharge', ts_name = 'DMQaQc.Merged.DailyMean.09HR')
stations.to_csv('available_watercoursedischarge_stations.csv')
else:
stations = pd.read_csv('available_watercoursedischarge_stations.csv', index_col=0)
hrs_stations = pd.read_csv('hrs_station_list.csv', skiprows=1)
station_subset = stations.ix[stations.station_no.isin(hrs_stations.station_id)]
if update:
station_attrs = []
for i, station in station_subset.iterrows():
attrs = k.get_station_list(station_no = station.station_no, parametertype_name = 'Water Course Discharge', return_fields=['station_id','custom_attributes'])
station_attrs.append(attrs.set_index('station_id'))
station_attributes = pd.concat(station_attrs)
station_attributes.to_csv('station_attributes.csv')
else:
station_attributes = pd.read_csv('station_attributes.csv', index_col=0)
cc_providers = pd.read_csv('cc_providers.csv', skiprows=8)
station_list = station_attributes.ix[station_attributes.DATA_OWNER.isin(cc_providers.ProviderID.values)].index.values
return station_list
if __name__ == "__main__":
for station in get_cc_hrs_station_list():
print(station)
|
Add script to get list of HRS station IDs
From providers that license their data as Creative Commonsimport pandas as pd
from kiwis_pie import KIWIS
k = KIWIS('http://www.bom.gov.au/waterdata/services')
def get_cc_hrs_station_list(update = False):
"""
Return list of station IDs that exist in HRS and are supplied by providers that license their data under the Creative Commons license.
:param update: Flag to indicate if cached station information should be fetched from WISKI again (and saved to disk as CSV).
:type update: boolean
"""
if update:
stations = k.get_timeseries_list(parametertype_name = 'Water Course Discharge', ts_name = 'DMQaQc.Merged.DailyMean.09HR')
stations.to_csv('available_watercoursedischarge_stations.csv')
else:
stations = pd.read_csv('available_watercoursedischarge_stations.csv', index_col=0)
hrs_stations = pd.read_csv('hrs_station_list.csv', skiprows=1)
station_subset = stations.ix[stations.station_no.isin(hrs_stations.station_id)]
if update:
station_attrs = []
for i, station in station_subset.iterrows():
attrs = k.get_station_list(station_no = station.station_no, parametertype_name = 'Water Course Discharge', return_fields=['station_id','custom_attributes'])
station_attrs.append(attrs.set_index('station_id'))
station_attributes = pd.concat(station_attrs)
station_attributes.to_csv('station_attributes.csv')
else:
station_attributes = pd.read_csv('station_attributes.csv', index_col=0)
cc_providers = pd.read_csv('cc_providers.csv', skiprows=8)
station_list = station_attributes.ix[station_attributes.DATA_OWNER.isin(cc_providers.ProviderID.values)].index.values
return station_list
if __name__ == "__main__":
for station in get_cc_hrs_station_list():
print(station)
|
<commit_before><commit_msg>Add script to get list of HRS station IDs
From providers that license their data as Creative Commons<commit_after>import pandas as pd
from kiwis_pie import KIWIS
k = KIWIS('http://www.bom.gov.au/waterdata/services')
def get_cc_hrs_station_list(update = False):
"""
Return list of station IDs that exist in HRS and are supplied by providers that license their data under the Creative Commons license.
:param update: Flag to indicate if cached station information should be fetched from WISKI again (and saved to disk as CSV).
:type update: boolean
"""
if update:
stations = k.get_timeseries_list(parametertype_name = 'Water Course Discharge', ts_name = 'DMQaQc.Merged.DailyMean.09HR')
stations.to_csv('available_watercoursedischarge_stations.csv')
else:
stations = pd.read_csv('available_watercoursedischarge_stations.csv', index_col=0)
hrs_stations = pd.read_csv('hrs_station_list.csv', skiprows=1)
station_subset = stations.ix[stations.station_no.isin(hrs_stations.station_id)]
if update:
station_attrs = []
for i, station in station_subset.iterrows():
attrs = k.get_station_list(station_no = station.station_no, parametertype_name = 'Water Course Discharge', return_fields=['station_id','custom_attributes'])
station_attrs.append(attrs.set_index('station_id'))
station_attributes = pd.concat(station_attrs)
station_attributes.to_csv('station_attributes.csv')
else:
station_attributes = pd.read_csv('station_attributes.csv', index_col=0)
cc_providers = pd.read_csv('cc_providers.csv', skiprows=8)
station_list = station_attributes.ix[station_attributes.DATA_OWNER.isin(cc_providers.ProviderID.values)].index.values
return station_list
if __name__ == "__main__":
for station in get_cc_hrs_station_list():
print(station)
|
|
aec88e4f9cf2d9ee7f9fe876a7b884028b6c190c
|
bin/buildHierarchiqueDiagram.py
|
bin/buildHierarchiqueDiagram.py
|
#!/usr/bin/env/python
from datetime import datetime
import os
import argparse
import re
from graphviz import Digraph
PATH = os.path.dirname(os.path.abspath(__file__))
FROM_REGEX = re.compile(ur'^FROM\s+(?P<image>[^:]+)(:(?P<tag>.+))?', re.MULTILINE)
CONTAINERS = {}
def get_current_date():
import datetime
return datetime.date.today().strftime("%d.%m.%Y")
def processDockerfile(inputFile):
outputFile = os.path.splitext(inputFile)
outputFile = os.path.join(os.path.dirname(outputFile[0]),os.path.basename(outputFile[0]))
dockerImage = os.path.basename(os.path.dirname(os.path.dirname(outputFile)))
dockerTag = os.path.basename(os.path.dirname(outputFile))
with open(inputFile, 'r') as fileInput:
DockerfileContent = fileInput.read()
data = ([m.groupdict() for m in FROM_REGEX.finditer(DockerfileContent)])[0]
CONTAINERS["webdevops/%s"%dockerImage] = data.get('image')
def main(args):
dockerfilePath = os.path.abspath(args.dockerfile)
u = Digraph('webdevops', filename='webdevops.gv')
u.body.append('size="10,10"')
u.body.append(r'label = "\n\nWebdevops Containers\n at :%s"' % get_current_date() )
u.node_attr.update(color='lightblue2', style='filled', shape='box')
# Parse Docker file
for root, dirs, files in os.walk(dockerfilePath):
for file in files:
if file.endswith("Dockerfile"):
processDockerfile(os.path.join(root, file))
# Build and render graph
for image, base in CONTAINERS.items():
if "webdevops" in base:
u.edge(base, image)
else:
u.node(image)
print u.source
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d','--dockerfile' ,help='',type=str)
args = parser.parse_args()
main(args)
|
Add Script to generate a container schema from DockerFile
|
Add Script to generate a container schema from DockerFile
|
Python
|
mit
|
webdevops/Dockerfile,webdevops/Dockerfile,webdevops/Dockerfile,webdevops/Dockerfile,webdevops/Dockerfile,webdevops/Dockerfile
|
Add Script to generate a container schema from DockerFile
|
#!/usr/bin/env/python
from datetime import datetime
import os
import argparse
import re
from graphviz import Digraph
PATH = os.path.dirname(os.path.abspath(__file__))
FROM_REGEX = re.compile(ur'^FROM\s+(?P<image>[^:]+)(:(?P<tag>.+))?', re.MULTILINE)
CONTAINERS = {}
def get_current_date():
import datetime
return datetime.date.today().strftime("%d.%m.%Y")
def processDockerfile(inputFile):
outputFile = os.path.splitext(inputFile)
outputFile = os.path.join(os.path.dirname(outputFile[0]),os.path.basename(outputFile[0]))
dockerImage = os.path.basename(os.path.dirname(os.path.dirname(outputFile)))
dockerTag = os.path.basename(os.path.dirname(outputFile))
with open(inputFile, 'r') as fileInput:
DockerfileContent = fileInput.read()
data = ([m.groupdict() for m in FROM_REGEX.finditer(DockerfileContent)])[0]
CONTAINERS["webdevops/%s"%dockerImage] = data.get('image')
def main(args):
dockerfilePath = os.path.abspath(args.dockerfile)
u = Digraph('webdevops', filename='webdevops.gv')
u.body.append('size="10,10"')
u.body.append(r'label = "\n\nWebdevops Containers\n at :%s"' % get_current_date() )
u.node_attr.update(color='lightblue2', style='filled', shape='box')
# Parse Docker file
for root, dirs, files in os.walk(dockerfilePath):
for file in files:
if file.endswith("Dockerfile"):
processDockerfile(os.path.join(root, file))
# Build and render graph
for image, base in CONTAINERS.items():
if "webdevops" in base:
u.edge(base, image)
else:
u.node(image)
print u.source
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d','--dockerfile' ,help='',type=str)
args = parser.parse_args()
main(args)
|
<commit_before><commit_msg>Add Script to generate a container schema from DockerFile<commit_after>
|
#!/usr/bin/env/python
from datetime import datetime
import os
import argparse
import re
from graphviz import Digraph
PATH = os.path.dirname(os.path.abspath(__file__))
FROM_REGEX = re.compile(ur'^FROM\s+(?P<image>[^:]+)(:(?P<tag>.+))?', re.MULTILINE)
CONTAINERS = {}
def get_current_date():
import datetime
return datetime.date.today().strftime("%d.%m.%Y")
def processDockerfile(inputFile):
outputFile = os.path.splitext(inputFile)
outputFile = os.path.join(os.path.dirname(outputFile[0]),os.path.basename(outputFile[0]))
dockerImage = os.path.basename(os.path.dirname(os.path.dirname(outputFile)))
dockerTag = os.path.basename(os.path.dirname(outputFile))
with open(inputFile, 'r') as fileInput:
DockerfileContent = fileInput.read()
data = ([m.groupdict() for m in FROM_REGEX.finditer(DockerfileContent)])[0]
CONTAINERS["webdevops/%s"%dockerImage] = data.get('image')
def main(args):
dockerfilePath = os.path.abspath(args.dockerfile)
u = Digraph('webdevops', filename='webdevops.gv')
u.body.append('size="10,10"')
u.body.append(r'label = "\n\nWebdevops Containers\n at :%s"' % get_current_date() )
u.node_attr.update(color='lightblue2', style='filled', shape='box')
# Parse Docker file
for root, dirs, files in os.walk(dockerfilePath):
for file in files:
if file.endswith("Dockerfile"):
processDockerfile(os.path.join(root, file))
# Build and render graph
for image, base in CONTAINERS.items():
if "webdevops" in base:
u.edge(base, image)
else:
u.node(image)
print u.source
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d','--dockerfile' ,help='',type=str)
args = parser.parse_args()
main(args)
|
Add Script to generate a container schema from DockerFile#!/usr/bin/env/python
from datetime import datetime
import os
import argparse
import re
from graphviz import Digraph
PATH = os.path.dirname(os.path.abspath(__file__))
FROM_REGEX = re.compile(ur'^FROM\s+(?P<image>[^:]+)(:(?P<tag>.+))?', re.MULTILINE)
CONTAINERS = {}
def get_current_date():
import datetime
return datetime.date.today().strftime("%d.%m.%Y")
def processDockerfile(inputFile):
outputFile = os.path.splitext(inputFile)
outputFile = os.path.join(os.path.dirname(outputFile[0]),os.path.basename(outputFile[0]))
dockerImage = os.path.basename(os.path.dirname(os.path.dirname(outputFile)))
dockerTag = os.path.basename(os.path.dirname(outputFile))
with open(inputFile, 'r') as fileInput:
DockerfileContent = fileInput.read()
data = ([m.groupdict() for m in FROM_REGEX.finditer(DockerfileContent)])[0]
CONTAINERS["webdevops/%s"%dockerImage] = data.get('image')
def main(args):
dockerfilePath = os.path.abspath(args.dockerfile)
u = Digraph('webdevops', filename='webdevops.gv')
u.body.append('size="10,10"')
u.body.append(r'label = "\n\nWebdevops Containers\n at :%s"' % get_current_date() )
u.node_attr.update(color='lightblue2', style='filled', shape='box')
# Parse Docker file
for root, dirs, files in os.walk(dockerfilePath):
for file in files:
if file.endswith("Dockerfile"):
processDockerfile(os.path.join(root, file))
# Build and render graph
for image, base in CONTAINERS.items():
if "webdevops" in base:
u.edge(base, image)
else:
u.node(image)
print u.source
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d','--dockerfile' ,help='',type=str)
args = parser.parse_args()
main(args)
|
<commit_before><commit_msg>Add Script to generate a container schema from DockerFile<commit_after>#!/usr/bin/env/python
from datetime import datetime
import os
import argparse
import re
from graphviz import Digraph
PATH = os.path.dirname(os.path.abspath(__file__))
FROM_REGEX = re.compile(ur'^FROM\s+(?P<image>[^:]+)(:(?P<tag>.+))?', re.MULTILINE)
CONTAINERS = {}
def get_current_date():
import datetime
return datetime.date.today().strftime("%d.%m.%Y")
def processDockerfile(inputFile):
outputFile = os.path.splitext(inputFile)
outputFile = os.path.join(os.path.dirname(outputFile[0]),os.path.basename(outputFile[0]))
dockerImage = os.path.basename(os.path.dirname(os.path.dirname(outputFile)))
dockerTag = os.path.basename(os.path.dirname(outputFile))
with open(inputFile, 'r') as fileInput:
DockerfileContent = fileInput.read()
data = ([m.groupdict() for m in FROM_REGEX.finditer(DockerfileContent)])[0]
CONTAINERS["webdevops/%s"%dockerImage] = data.get('image')
def main(args):
dockerfilePath = os.path.abspath(args.dockerfile)
u = Digraph('webdevops', filename='webdevops.gv')
u.body.append('size="10,10"')
u.body.append(r'label = "\n\nWebdevops Containers\n at :%s"' % get_current_date() )
u.node_attr.update(color='lightblue2', style='filled', shape='box')
# Parse Docker file
for root, dirs, files in os.walk(dockerfilePath):
for file in files:
if file.endswith("Dockerfile"):
processDockerfile(os.path.join(root, file))
# Build and render graph
for image, base in CONTAINERS.items():
if "webdevops" in base:
u.edge(base, image)
else:
u.node(image)
print u.source
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d','--dockerfile' ,help='',type=str)
args = parser.parse_args()
main(args)
|
|
c4d583966ef1a4d9bdb57715ef5e766ba62fbed6
|
jacquard/directory/tests/test_django.py
|
jacquard/directory/tests/test_django.py
|
from jacquard.directory.base import UserEntry
from jacquard.directory.django import DjangoDirectory
import pytest
import unittest.mock
try:
import sqlalchemy
except ImportError:
sqlalchemy = None
if sqlalchemy is not None:
test_database = sqlalchemy.create_engine('sqlite://')
test_database.execute("""
CREATE TABLE auth_user(
id INTEGER NOT NULL PRIMARY KEY,
date_joined DATETIME NOT NULL,
is_superuser BOOLEAN NOT NULL
)
""")
test_database.execute("""
INSERT INTO auth_user(id, date_joined, is_superuser) VALUES
(1, date('now'), 1),
(2, date('now'), 0),
(3, date('now'), 0)
""")
@pytest.mark.skipif(
sqlalchemy is None,
reason="sqlalchemy not installed",
)
@unittest.mock.patch('sqlalchemy.create_engine', lambda *args: test_database)
def test_get_extant_user():
directory = DjangoDirectory('')
user_one = directory.lookup('1')
assert list(user_one.tags) == ['superuser']
@pytest.mark.skipif(
sqlalchemy is None,
reason="sqlalchemy not installed",
)
@unittest.mock.patch('sqlalchemy.create_engine', lambda *args: test_database)
def test_get_missing_user():
directory = DjangoDirectory('')
user_zero = directory.lookup('0')
assert user_zero is None
@pytest.mark.skipif(
sqlalchemy is None,
reason="sqlalchemy not installed",
)
@unittest.mock.patch('sqlalchemy.create_engine', lambda *args: test_database)
def test_get_all_users():
directory = DjangoDirectory('')
users = directory.all_users()
assert [x.id for x in users] == [1, 2, 3]
|
Add tests for the Django directory
|
Add tests for the Django directory
|
Python
|
mit
|
prophile/jacquard,prophile/jacquard
|
Add tests for the Django directory
|
from jacquard.directory.base import UserEntry
from jacquard.directory.django import DjangoDirectory
import pytest
import unittest.mock
try:
import sqlalchemy
except ImportError:
sqlalchemy = None
if sqlalchemy is not None:
test_database = sqlalchemy.create_engine('sqlite://')
test_database.execute("""
CREATE TABLE auth_user(
id INTEGER NOT NULL PRIMARY KEY,
date_joined DATETIME NOT NULL,
is_superuser BOOLEAN NOT NULL
)
""")
test_database.execute("""
INSERT INTO auth_user(id, date_joined, is_superuser) VALUES
(1, date('now'), 1),
(2, date('now'), 0),
(3, date('now'), 0)
""")
@pytest.mark.skipif(
sqlalchemy is None,
reason="sqlalchemy not installed",
)
@unittest.mock.patch('sqlalchemy.create_engine', lambda *args: test_database)
def test_get_extant_user():
directory = DjangoDirectory('')
user_one = directory.lookup('1')
assert list(user_one.tags) == ['superuser']
@pytest.mark.skipif(
sqlalchemy is None,
reason="sqlalchemy not installed",
)
@unittest.mock.patch('sqlalchemy.create_engine', lambda *args: test_database)
def test_get_missing_user():
directory = DjangoDirectory('')
user_zero = directory.lookup('0')
assert user_zero is None
@pytest.mark.skipif(
sqlalchemy is None,
reason="sqlalchemy not installed",
)
@unittest.mock.patch('sqlalchemy.create_engine', lambda *args: test_database)
def test_get_all_users():
directory = DjangoDirectory('')
users = directory.all_users()
assert [x.id for x in users] == [1, 2, 3]
|
<commit_before><commit_msg>Add tests for the Django directory<commit_after>
|
from jacquard.directory.base import UserEntry
from jacquard.directory.django import DjangoDirectory
import pytest
import unittest.mock
try:
import sqlalchemy
except ImportError:
sqlalchemy = None
if sqlalchemy is not None:
test_database = sqlalchemy.create_engine('sqlite://')
test_database.execute("""
CREATE TABLE auth_user(
id INTEGER NOT NULL PRIMARY KEY,
date_joined DATETIME NOT NULL,
is_superuser BOOLEAN NOT NULL
)
""")
test_database.execute("""
INSERT INTO auth_user(id, date_joined, is_superuser) VALUES
(1, date('now'), 1),
(2, date('now'), 0),
(3, date('now'), 0)
""")
@pytest.mark.skipif(
sqlalchemy is None,
reason="sqlalchemy not installed",
)
@unittest.mock.patch('sqlalchemy.create_engine', lambda *args: test_database)
def test_get_extant_user():
directory = DjangoDirectory('')
user_one = directory.lookup('1')
assert list(user_one.tags) == ['superuser']
@pytest.mark.skipif(
sqlalchemy is None,
reason="sqlalchemy not installed",
)
@unittest.mock.patch('sqlalchemy.create_engine', lambda *args: test_database)
def test_get_missing_user():
directory = DjangoDirectory('')
user_zero = directory.lookup('0')
assert user_zero is None
@pytest.mark.skipif(
sqlalchemy is None,
reason="sqlalchemy not installed",
)
@unittest.mock.patch('sqlalchemy.create_engine', lambda *args: test_database)
def test_get_all_users():
directory = DjangoDirectory('')
users = directory.all_users()
assert [x.id for x in users] == [1, 2, 3]
|
Add tests for the Django directoryfrom jacquard.directory.base import UserEntry
from jacquard.directory.django import DjangoDirectory
import pytest
import unittest.mock
try:
import sqlalchemy
except ImportError:
sqlalchemy = None
if sqlalchemy is not None:
test_database = sqlalchemy.create_engine('sqlite://')
test_database.execute("""
CREATE TABLE auth_user(
id INTEGER NOT NULL PRIMARY KEY,
date_joined DATETIME NOT NULL,
is_superuser BOOLEAN NOT NULL
)
""")
test_database.execute("""
INSERT INTO auth_user(id, date_joined, is_superuser) VALUES
(1, date('now'), 1),
(2, date('now'), 0),
(3, date('now'), 0)
""")
@pytest.mark.skipif(
sqlalchemy is None,
reason="sqlalchemy not installed",
)
@unittest.mock.patch('sqlalchemy.create_engine', lambda *args: test_database)
def test_get_extant_user():
directory = DjangoDirectory('')
user_one = directory.lookup('1')
assert list(user_one.tags) == ['superuser']
@pytest.mark.skipif(
sqlalchemy is None,
reason="sqlalchemy not installed",
)
@unittest.mock.patch('sqlalchemy.create_engine', lambda *args: test_database)
def test_get_missing_user():
directory = DjangoDirectory('')
user_zero = directory.lookup('0')
assert user_zero is None
@pytest.mark.skipif(
sqlalchemy is None,
reason="sqlalchemy not installed",
)
@unittest.mock.patch('sqlalchemy.create_engine', lambda *args: test_database)
def test_get_all_users():
directory = DjangoDirectory('')
users = directory.all_users()
assert [x.id for x in users] == [1, 2, 3]
|
<commit_before><commit_msg>Add tests for the Django directory<commit_after>from jacquard.directory.base import UserEntry
from jacquard.directory.django import DjangoDirectory
import pytest
import unittest.mock
try:
import sqlalchemy
except ImportError:
sqlalchemy = None
if sqlalchemy is not None:
test_database = sqlalchemy.create_engine('sqlite://')
test_database.execute("""
CREATE TABLE auth_user(
id INTEGER NOT NULL PRIMARY KEY,
date_joined DATETIME NOT NULL,
is_superuser BOOLEAN NOT NULL
)
""")
test_database.execute("""
INSERT INTO auth_user(id, date_joined, is_superuser) VALUES
(1, date('now'), 1),
(2, date('now'), 0),
(3, date('now'), 0)
""")
@pytest.mark.skipif(
sqlalchemy is None,
reason="sqlalchemy not installed",
)
@unittest.mock.patch('sqlalchemy.create_engine', lambda *args: test_database)
def test_get_extant_user():
directory = DjangoDirectory('')
user_one = directory.lookup('1')
assert list(user_one.tags) == ['superuser']
@pytest.mark.skipif(
sqlalchemy is None,
reason="sqlalchemy not installed",
)
@unittest.mock.patch('sqlalchemy.create_engine', lambda *args: test_database)
def test_get_missing_user():
directory = DjangoDirectory('')
user_zero = directory.lookup('0')
assert user_zero is None
@pytest.mark.skipif(
sqlalchemy is None,
reason="sqlalchemy not installed",
)
@unittest.mock.patch('sqlalchemy.create_engine', lambda *args: test_database)
def test_get_all_users():
directory = DjangoDirectory('')
users = directory.all_users()
assert [x.id for x in users] == [1, 2, 3]
|
|
6babb6e64e93ed74a72203fdc67955ae8ca3bfb3
|
testing/benchmark.py
|
testing/benchmark.py
|
"""
Benchmarking and performance tests.
"""
import pytest
from pluggy import _MultiCall, HookImpl
from pluggy import HookspecMarker, HookimplMarker
hookspec = HookspecMarker("example")
hookimpl = HookimplMarker("example")
def MC(methods, kwargs, firstresult=False):
hookfuncs = []
for method in methods:
f = HookImpl(None, "<temp>", method, method.example_impl)
hookfuncs.append(f)
return _MultiCall(hookfuncs, kwargs, {"firstresult": firstresult})
@hookimpl(hookwrapper=True)
def m1(arg1, arg2, arg3):
yield
@hookimpl
def m2(arg1, arg2, arg3):
return arg1, arg2, arg3
@hookimpl(hookwrapper=True)
def w1(arg1, arg2, arg3):
yield
@hookimpl(hookwrapper=True)
def w2(arg1, arg2, arg3):
yield
def inner_exec(methods):
return MC(methods, {'arg1': 1, 'arg2': 2, 'arg3': 3}).execute()
@pytest.mark.benchmark
def test_hookimpls_speed(benchmark):
benchmark(inner_exec, [m1, m2])
@pytest.mark.benchmark
def test_hookwrappers_speed(benchmark):
benchmark(inner_exec, [w1, w2])
@pytest.mark.benchmark
def test_impls_and_wrappers_speed(benchmark):
benchmark(inner_exec, [m1, m2, w1, w2])
|
Add a baseline set of _MultiCall performance tests
|
Add a baseline set of _MultiCall performance tests
This begins an effort to incorporate run-time speed tests using
`pytest-benchmark`. This initial test set audits the `_MultiCall`
loop with hookimpls, hookwrappers and the combination of both.
The intention is to eventually have a reliable set of tests which
enable making core component modifications without disrupting
performance as per #37.
|
Python
|
mit
|
RonnyPfannschmidt/pluggy,nicoddemus/pluggy,hpk42/pluggy,pytest-dev/pluggy,pytest-dev/pluggy,RonnyPfannschmidt/pluggy,tgoodlet/pluggy
|
Add a baseline set of _MultiCall performance tests
This begins an effort to incorporate run-time speed tests using
`pytest-benchmark`. This initial test set audits the `_MultiCall`
loop with hookimpls, hookwrappers and the combination of both.
The intention is to eventually have a reliable set of tests which
enable making core component modifications without disrupting
performance as per #37.
|
"""
Benchmarking and performance tests.
"""
import pytest
from pluggy import _MultiCall, HookImpl
from pluggy import HookspecMarker, HookimplMarker
hookspec = HookspecMarker("example")
hookimpl = HookimplMarker("example")
def MC(methods, kwargs, firstresult=False):
hookfuncs = []
for method in methods:
f = HookImpl(None, "<temp>", method, method.example_impl)
hookfuncs.append(f)
return _MultiCall(hookfuncs, kwargs, {"firstresult": firstresult})
@hookimpl(hookwrapper=True)
def m1(arg1, arg2, arg3):
yield
@hookimpl
def m2(arg1, arg2, arg3):
return arg1, arg2, arg3
@hookimpl(hookwrapper=True)
def w1(arg1, arg2, arg3):
yield
@hookimpl(hookwrapper=True)
def w2(arg1, arg2, arg3):
yield
def inner_exec(methods):
return MC(methods, {'arg1': 1, 'arg2': 2, 'arg3': 3}).execute()
@pytest.mark.benchmark
def test_hookimpls_speed(benchmark):
benchmark(inner_exec, [m1, m2])
@pytest.mark.benchmark
def test_hookwrappers_speed(benchmark):
benchmark(inner_exec, [w1, w2])
@pytest.mark.benchmark
def test_impls_and_wrappers_speed(benchmark):
benchmark(inner_exec, [m1, m2, w1, w2])
|
<commit_before><commit_msg>Add a baseline set of _MultiCall performance tests
This begins an effort to incorporate run-time speed tests using
`pytest-benchmark`. This initial test set audits the `_MultiCall`
loop with hookimpls, hookwrappers and the combination of both.
The intention is to eventually have a reliable set of tests which
enable making core component modifications without disrupting
performance as per #37.<commit_after>
|
"""
Benchmarking and performance tests.
"""
import pytest
from pluggy import _MultiCall, HookImpl
from pluggy import HookspecMarker, HookimplMarker
hookspec = HookspecMarker("example")
hookimpl = HookimplMarker("example")
def MC(methods, kwargs, firstresult=False):
hookfuncs = []
for method in methods:
f = HookImpl(None, "<temp>", method, method.example_impl)
hookfuncs.append(f)
return _MultiCall(hookfuncs, kwargs, {"firstresult": firstresult})
@hookimpl(hookwrapper=True)
def m1(arg1, arg2, arg3):
yield
@hookimpl
def m2(arg1, arg2, arg3):
return arg1, arg2, arg3
@hookimpl(hookwrapper=True)
def w1(arg1, arg2, arg3):
yield
@hookimpl(hookwrapper=True)
def w2(arg1, arg2, arg3):
yield
def inner_exec(methods):
return MC(methods, {'arg1': 1, 'arg2': 2, 'arg3': 3}).execute()
@pytest.mark.benchmark
def test_hookimpls_speed(benchmark):
benchmark(inner_exec, [m1, m2])
@pytest.mark.benchmark
def test_hookwrappers_speed(benchmark):
benchmark(inner_exec, [w1, w2])
@pytest.mark.benchmark
def test_impls_and_wrappers_speed(benchmark):
benchmark(inner_exec, [m1, m2, w1, w2])
|
Add a baseline set of _MultiCall performance tests
This begins an effort to incorporate run-time speed tests using
`pytest-benchmark`. This initial test set audits the `_MultiCall`
loop with hookimpls, hookwrappers and the combination of both.
The intention is to eventually have a reliable set of tests which
enable making core component modifications without disrupting
performance as per #37."""
Benchmarking and performance tests.
"""
import pytest
from pluggy import _MultiCall, HookImpl
from pluggy import HookspecMarker, HookimplMarker
hookspec = HookspecMarker("example")
hookimpl = HookimplMarker("example")
def MC(methods, kwargs, firstresult=False):
hookfuncs = []
for method in methods:
f = HookImpl(None, "<temp>", method, method.example_impl)
hookfuncs.append(f)
return _MultiCall(hookfuncs, kwargs, {"firstresult": firstresult})
@hookimpl(hookwrapper=True)
def m1(arg1, arg2, arg3):
yield
@hookimpl
def m2(arg1, arg2, arg3):
return arg1, arg2, arg3
@hookimpl(hookwrapper=True)
def w1(arg1, arg2, arg3):
yield
@hookimpl(hookwrapper=True)
def w2(arg1, arg2, arg3):
yield
def inner_exec(methods):
return MC(methods, {'arg1': 1, 'arg2': 2, 'arg3': 3}).execute()
@pytest.mark.benchmark
def test_hookimpls_speed(benchmark):
benchmark(inner_exec, [m1, m2])
@pytest.mark.benchmark
def test_hookwrappers_speed(benchmark):
benchmark(inner_exec, [w1, w2])
@pytest.mark.benchmark
def test_impls_and_wrappers_speed(benchmark):
benchmark(inner_exec, [m1, m2, w1, w2])
|
<commit_before><commit_msg>Add a baseline set of _MultiCall performance tests
This begins an effort to incorporate run-time speed tests using
`pytest-benchmark`. This initial test set audits the `_MultiCall`
loop with hookimpls, hookwrappers and the combination of both.
The intention is to eventually have a reliable set of tests which
enable making core component modifications without disrupting
performance as per #37.<commit_after>"""
Benchmarking and performance tests.
"""
import pytest
from pluggy import _MultiCall, HookImpl
from pluggy import HookspecMarker, HookimplMarker
hookspec = HookspecMarker("example")
hookimpl = HookimplMarker("example")
def MC(methods, kwargs, firstresult=False):
hookfuncs = []
for method in methods:
f = HookImpl(None, "<temp>", method, method.example_impl)
hookfuncs.append(f)
return _MultiCall(hookfuncs, kwargs, {"firstresult": firstresult})
@hookimpl(hookwrapper=True)
def m1(arg1, arg2, arg3):
yield
@hookimpl
def m2(arg1, arg2, arg3):
return arg1, arg2, arg3
@hookimpl(hookwrapper=True)
def w1(arg1, arg2, arg3):
yield
@hookimpl(hookwrapper=True)
def w2(arg1, arg2, arg3):
yield
def inner_exec(methods):
return MC(methods, {'arg1': 1, 'arg2': 2, 'arg3': 3}).execute()
@pytest.mark.benchmark
def test_hookimpls_speed(benchmark):
benchmark(inner_exec, [m1, m2])
@pytest.mark.benchmark
def test_hookwrappers_speed(benchmark):
benchmark(inner_exec, [w1, w2])
@pytest.mark.benchmark
def test_impls_and_wrappers_speed(benchmark):
benchmark(inner_exec, [m1, m2, w1, w2])
|
|
1831dbd065a8776a77d18e10b44f84c99bca4c75
|
spacy/tests/textcat/test_textcat.py
|
spacy/tests/textcat/test_textcat.py
|
from __future__ import unicode_literals
from ...language import Language
def test_simple_train():
nlp = Language()
nlp.add_pipe(nlp.create_pipe('textcat'))
nlp.get_pipe('textcat').add_label('is_good')
nlp.begin_training()
for i in range(5):
for text, answer in [('aaaa', 1.), ('bbbb', 0), ('aa', 1.),
('bbbbbbbbb', 0.), ('aaaaaa', 1)]:
nlp.update([text], [{'cats': {'answer': answer}}])
doc = nlp(u'aaa')
assert 'is_good' in doc.cats
assert doc.cats['is_good'] >= 0.5
|
Add test of simple textcat workflow
|
Add test of simple textcat workflow
|
Python
|
mit
|
aikramer2/spaCy,aikramer2/spaCy,aikramer2/spaCy,honnibal/spaCy,honnibal/spaCy,aikramer2/spaCy,spacy-io/spaCy,explosion/spaCy,explosion/spaCy,recognai/spaCy,spacy-io/spaCy,honnibal/spaCy,spacy-io/spaCy,explosion/spaCy,recognai/spaCy,recognai/spaCy,spacy-io/spaCy,recognai/spaCy,spacy-io/spaCy,explosion/spaCy,aikramer2/spaCy,recognai/spaCy,recognai/spaCy,aikramer2/spaCy,explosion/spaCy,honnibal/spaCy,explosion/spaCy,spacy-io/spaCy
|
Add test of simple textcat workflow
|
from __future__ import unicode_literals
from ...language import Language
def test_simple_train():
nlp = Language()
nlp.add_pipe(nlp.create_pipe('textcat'))
nlp.get_pipe('textcat').add_label('is_good')
nlp.begin_training()
for i in range(5):
for text, answer in [('aaaa', 1.), ('bbbb', 0), ('aa', 1.),
('bbbbbbbbb', 0.), ('aaaaaa', 1)]:
nlp.update([text], [{'cats': {'answer': answer}}])
doc = nlp(u'aaa')
assert 'is_good' in doc.cats
assert doc.cats['is_good'] >= 0.5
|
<commit_before><commit_msg>Add test of simple textcat workflow<commit_after>
|
from __future__ import unicode_literals
from ...language import Language
def test_simple_train():
nlp = Language()
nlp.add_pipe(nlp.create_pipe('textcat'))
nlp.get_pipe('textcat').add_label('is_good')
nlp.begin_training()
for i in range(5):
for text, answer in [('aaaa', 1.), ('bbbb', 0), ('aa', 1.),
('bbbbbbbbb', 0.), ('aaaaaa', 1)]:
nlp.update([text], [{'cats': {'answer': answer}}])
doc = nlp(u'aaa')
assert 'is_good' in doc.cats
assert doc.cats['is_good'] >= 0.5
|
Add test of simple textcat workflowfrom __future__ import unicode_literals
from ...language import Language
def test_simple_train():
nlp = Language()
nlp.add_pipe(nlp.create_pipe('textcat'))
nlp.get_pipe('textcat').add_label('is_good')
nlp.begin_training()
for i in range(5):
for text, answer in [('aaaa', 1.), ('bbbb', 0), ('aa', 1.),
('bbbbbbbbb', 0.), ('aaaaaa', 1)]:
nlp.update([text], [{'cats': {'answer': answer}}])
doc = nlp(u'aaa')
assert 'is_good' in doc.cats
assert doc.cats['is_good'] >= 0.5
|
<commit_before><commit_msg>Add test of simple textcat workflow<commit_after>from __future__ import unicode_literals
from ...language import Language
def test_simple_train():
nlp = Language()
nlp.add_pipe(nlp.create_pipe('textcat'))
nlp.get_pipe('textcat').add_label('is_good')
nlp.begin_training()
for i in range(5):
for text, answer in [('aaaa', 1.), ('bbbb', 0), ('aa', 1.),
('bbbbbbbbb', 0.), ('aaaaaa', 1)]:
nlp.update([text], [{'cats': {'answer': answer}}])
doc = nlp(u'aaa')
assert 'is_good' in doc.cats
assert doc.cats['is_good'] >= 0.5
|
|
1a1e9123313fdedab14700ead90748d9e6182a42
|
migrations/versions/da8b38b5bdd5_add_board_moderator_roles.py
|
migrations/versions/da8b38b5bdd5_add_board_moderator_roles.py
|
"""Add board moderator roles
Revision ID: da8b38b5bdd5
Revises: 90ac01a2df
Create Date: 2016-05-03 09:32:06.756899
"""
# revision identifiers, used by Alembic.
revision = 'da8b38b5bdd5'
down_revision = '90ac01a2df'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.drop_index(op.f('ix_boardmoderator_board_id'), table_name='boardmoderator')
op.drop_index(op.f('ix_boardmoderator_moderator_id'), table_name='boardmoderator')
op.drop_table('boardmoderator')
op.create_table('boardmoderator',
sa.Column('board_id', sa.Integer(), nullable=False),
sa.Column('moderator_id', sa.Integer(), nullable=False),
sa.Column('roles', postgresql.ARRAY(sa.String()), nullable=False),
sa.ForeignKeyConstraint(['board_id'], ['board.id'], ),
sa.ForeignKeyConstraint(['moderator_id'], ['moderator.id'], ),
sa.PrimaryKeyConstraint('board_id', 'moderator_id')
)
op.create_index(op.f('ix_boardmoderator_roles'), 'boardmoderator', ['roles'], unique=False)
def downgrade():
op.drop_index(op.f('ix_boardmoderator_roles'), table_name='boardmoderator')
op.drop_table('boardmoderator')
op.create_table('boardmoderator',
sa.Column('board_id', sa.Integer(), nullable=True),
sa.Column('moderator_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['board_id'], ['board.id'], ),
sa.ForeignKeyConstraint(['moderator_id'], ['moderator.id'], )
)
op.create_index(op.f('ix_boardmoderator_board_id'), 'boardmoderator', ['board_id'], unique=False)
op.create_index(op.f('ix_boardmoderator_moderator_id'), 'boardmoderator', ['moderator_id'], unique=False)
|
Add revision for new boardmoderator columns
|
Add revision for new boardmoderator columns
|
Python
|
mit
|
Floens/uchan,Floens/uchan,Floens/uchan,Floens/uchan,Floens/uchan
|
Add revision for new boardmoderator columns
|
"""Add board moderator roles
Revision ID: da8b38b5bdd5
Revises: 90ac01a2df
Create Date: 2016-05-03 09:32:06.756899
"""
# revision identifiers, used by Alembic.
revision = 'da8b38b5bdd5'
down_revision = '90ac01a2df'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.drop_index(op.f('ix_boardmoderator_board_id'), table_name='boardmoderator')
op.drop_index(op.f('ix_boardmoderator_moderator_id'), table_name='boardmoderator')
op.drop_table('boardmoderator')
op.create_table('boardmoderator',
sa.Column('board_id', sa.Integer(), nullable=False),
sa.Column('moderator_id', sa.Integer(), nullable=False),
sa.Column('roles', postgresql.ARRAY(sa.String()), nullable=False),
sa.ForeignKeyConstraint(['board_id'], ['board.id'], ),
sa.ForeignKeyConstraint(['moderator_id'], ['moderator.id'], ),
sa.PrimaryKeyConstraint('board_id', 'moderator_id')
)
op.create_index(op.f('ix_boardmoderator_roles'), 'boardmoderator', ['roles'], unique=False)
def downgrade():
op.drop_index(op.f('ix_boardmoderator_roles'), table_name='boardmoderator')
op.drop_table('boardmoderator')
op.create_table('boardmoderator',
sa.Column('board_id', sa.Integer(), nullable=True),
sa.Column('moderator_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['board_id'], ['board.id'], ),
sa.ForeignKeyConstraint(['moderator_id'], ['moderator.id'], )
)
op.create_index(op.f('ix_boardmoderator_board_id'), 'boardmoderator', ['board_id'], unique=False)
op.create_index(op.f('ix_boardmoderator_moderator_id'), 'boardmoderator', ['moderator_id'], unique=False)
|
<commit_before><commit_msg>Add revision for new boardmoderator columns<commit_after>
|
"""Add board moderator roles
Revision ID: da8b38b5bdd5
Revises: 90ac01a2df
Create Date: 2016-05-03 09:32:06.756899
"""
# revision identifiers, used by Alembic.
revision = 'da8b38b5bdd5'
down_revision = '90ac01a2df'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.drop_index(op.f('ix_boardmoderator_board_id'), table_name='boardmoderator')
op.drop_index(op.f('ix_boardmoderator_moderator_id'), table_name='boardmoderator')
op.drop_table('boardmoderator')
op.create_table('boardmoderator',
sa.Column('board_id', sa.Integer(), nullable=False),
sa.Column('moderator_id', sa.Integer(), nullable=False),
sa.Column('roles', postgresql.ARRAY(sa.String()), nullable=False),
sa.ForeignKeyConstraint(['board_id'], ['board.id'], ),
sa.ForeignKeyConstraint(['moderator_id'], ['moderator.id'], ),
sa.PrimaryKeyConstraint('board_id', 'moderator_id')
)
op.create_index(op.f('ix_boardmoderator_roles'), 'boardmoderator', ['roles'], unique=False)
def downgrade():
op.drop_index(op.f('ix_boardmoderator_roles'), table_name='boardmoderator')
op.drop_table('boardmoderator')
op.create_table('boardmoderator',
sa.Column('board_id', sa.Integer(), nullable=True),
sa.Column('moderator_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['board_id'], ['board.id'], ),
sa.ForeignKeyConstraint(['moderator_id'], ['moderator.id'], )
)
op.create_index(op.f('ix_boardmoderator_board_id'), 'boardmoderator', ['board_id'], unique=False)
op.create_index(op.f('ix_boardmoderator_moderator_id'), 'boardmoderator', ['moderator_id'], unique=False)
|
Add revision for new boardmoderator columns"""Add board moderator roles
Revision ID: da8b38b5bdd5
Revises: 90ac01a2df
Create Date: 2016-05-03 09:32:06.756899
"""
# revision identifiers, used by Alembic.
revision = 'da8b38b5bdd5'
down_revision = '90ac01a2df'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.drop_index(op.f('ix_boardmoderator_board_id'), table_name='boardmoderator')
op.drop_index(op.f('ix_boardmoderator_moderator_id'), table_name='boardmoderator')
op.drop_table('boardmoderator')
op.create_table('boardmoderator',
sa.Column('board_id', sa.Integer(), nullable=False),
sa.Column('moderator_id', sa.Integer(), nullable=False),
sa.Column('roles', postgresql.ARRAY(sa.String()), nullable=False),
sa.ForeignKeyConstraint(['board_id'], ['board.id'], ),
sa.ForeignKeyConstraint(['moderator_id'], ['moderator.id'], ),
sa.PrimaryKeyConstraint('board_id', 'moderator_id')
)
op.create_index(op.f('ix_boardmoderator_roles'), 'boardmoderator', ['roles'], unique=False)
def downgrade():
op.drop_index(op.f('ix_boardmoderator_roles'), table_name='boardmoderator')
op.drop_table('boardmoderator')
op.create_table('boardmoderator',
sa.Column('board_id', sa.Integer(), nullable=True),
sa.Column('moderator_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['board_id'], ['board.id'], ),
sa.ForeignKeyConstraint(['moderator_id'], ['moderator.id'], )
)
op.create_index(op.f('ix_boardmoderator_board_id'), 'boardmoderator', ['board_id'], unique=False)
op.create_index(op.f('ix_boardmoderator_moderator_id'), 'boardmoderator', ['moderator_id'], unique=False)
|
<commit_before><commit_msg>Add revision for new boardmoderator columns<commit_after>"""Add board moderator roles
Revision ID: da8b38b5bdd5
Revises: 90ac01a2df
Create Date: 2016-05-03 09:32:06.756899
"""
# revision identifiers, used by Alembic.
revision = 'da8b38b5bdd5'
down_revision = '90ac01a2df'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.drop_index(op.f('ix_boardmoderator_board_id'), table_name='boardmoderator')
op.drop_index(op.f('ix_boardmoderator_moderator_id'), table_name='boardmoderator')
op.drop_table('boardmoderator')
op.create_table('boardmoderator',
sa.Column('board_id', sa.Integer(), nullable=False),
sa.Column('moderator_id', sa.Integer(), nullable=False),
sa.Column('roles', postgresql.ARRAY(sa.String()), nullable=False),
sa.ForeignKeyConstraint(['board_id'], ['board.id'], ),
sa.ForeignKeyConstraint(['moderator_id'], ['moderator.id'], ),
sa.PrimaryKeyConstraint('board_id', 'moderator_id')
)
op.create_index(op.f('ix_boardmoderator_roles'), 'boardmoderator', ['roles'], unique=False)
def downgrade():
op.drop_index(op.f('ix_boardmoderator_roles'), table_name='boardmoderator')
op.drop_table('boardmoderator')
op.create_table('boardmoderator',
sa.Column('board_id', sa.Integer(), nullable=True),
sa.Column('moderator_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['board_id'], ['board.id'], ),
sa.ForeignKeyConstraint(['moderator_id'], ['moderator.id'], )
)
op.create_index(op.f('ix_boardmoderator_board_id'), 'boardmoderator', ['board_id'], unique=False)
op.create_index(op.f('ix_boardmoderator_moderator_id'), 'boardmoderator', ['moderator_id'], unique=False)
|
|
a8b48d9174ce9c30166c0c2a8011c2c40624c4bd
|
locations/spiders/planned_parenthood.py
|
locations/spiders/planned_parenthood.py
|
# -*- coding: utf-8 -*-
import scrapy
import json
import re
from locations.items import GeojsonPointItem
class PlannedParenthoodSpider(scrapy.Spider):
name = "planned_parenthood"
allowed_domains = ["www.plannedparenthood.org"]
start_urls = (
'https://www.plannedparenthood.org/health-center',
)
def parse(self, response):
state_urls = response.xpath('//ul[@class="quicklist-list"]/li/a/@href').extract()
for path in state_urls:
yield scrapy.Request(
response.urljoin(path),
callback=self.parse_state,
)
def parse_state(self, response):
venue_urls = response.xpath('//ul[@class="quicklist-list"]/li/p/a/@href').extract()
for path in venue_urls:
yield scrapy.Request(
response.urljoin(path),
callback=self.parse_venue,
)
def parse_venue(self, response):
properties = {
'addr:full': response.xpath('//*[@itemprop="streetAddress"]/text()')[0].extract(),
'addr:city': response.xpath('//*[@itemprop="addressLocality"]/text()')[0].extract(),
'addr:state': response.xpath('//*[@itemprop="addressRegion"]/text()')[0].extract(),
'addr:postcode': response.xpath('//*[@itemprop="postalCode"]/text()')[0].extract(),
'ref': response.url,
'website': response.url,
}
map_image_url = response.xpath('//img[@class="address-map"]/@src')[0].extract()
match = re.search(r"center=(.*?),(.*?)&zoom", map_image_url)
lon_lat = [
float(match.group(2)),
float(match.group(1)),
]
yield GeojsonPointItem(
properties=properties,
lon_lat=lon_lat,
)
|
Add a spider for Planned Parenthood
|
Add a spider for Planned Parenthood
Fixes #184
|
Python
|
mit
|
iandees/all-the-places,iandees/all-the-places,iandees/all-the-places
|
Add a spider for Planned Parenthood
Fixes #184
|
# -*- coding: utf-8 -*-
import scrapy
import json
import re
from locations.items import GeojsonPointItem
class PlannedParenthoodSpider(scrapy.Spider):
name = "planned_parenthood"
allowed_domains = ["www.plannedparenthood.org"]
start_urls = (
'https://www.plannedparenthood.org/health-center',
)
def parse(self, response):
state_urls = response.xpath('//ul[@class="quicklist-list"]/li/a/@href').extract()
for path in state_urls:
yield scrapy.Request(
response.urljoin(path),
callback=self.parse_state,
)
def parse_state(self, response):
venue_urls = response.xpath('//ul[@class="quicklist-list"]/li/p/a/@href').extract()
for path in venue_urls:
yield scrapy.Request(
response.urljoin(path),
callback=self.parse_venue,
)
def parse_venue(self, response):
properties = {
'addr:full': response.xpath('//*[@itemprop="streetAddress"]/text()')[0].extract(),
'addr:city': response.xpath('//*[@itemprop="addressLocality"]/text()')[0].extract(),
'addr:state': response.xpath('//*[@itemprop="addressRegion"]/text()')[0].extract(),
'addr:postcode': response.xpath('//*[@itemprop="postalCode"]/text()')[0].extract(),
'ref': response.url,
'website': response.url,
}
map_image_url = response.xpath('//img[@class="address-map"]/@src')[0].extract()
match = re.search(r"center=(.*?),(.*?)&zoom", map_image_url)
lon_lat = [
float(match.group(2)),
float(match.group(1)),
]
yield GeojsonPointItem(
properties=properties,
lon_lat=lon_lat,
)
|
<commit_before><commit_msg>Add a spider for Planned Parenthood
Fixes #184<commit_after>
|
# -*- coding: utf-8 -*-
import scrapy
import json
import re
from locations.items import GeojsonPointItem
class PlannedParenthoodSpider(scrapy.Spider):
name = "planned_parenthood"
allowed_domains = ["www.plannedparenthood.org"]
start_urls = (
'https://www.plannedparenthood.org/health-center',
)
def parse(self, response):
state_urls = response.xpath('//ul[@class="quicklist-list"]/li/a/@href').extract()
for path in state_urls:
yield scrapy.Request(
response.urljoin(path),
callback=self.parse_state,
)
def parse_state(self, response):
venue_urls = response.xpath('//ul[@class="quicklist-list"]/li/p/a/@href').extract()
for path in venue_urls:
yield scrapy.Request(
response.urljoin(path),
callback=self.parse_venue,
)
def parse_venue(self, response):
properties = {
'addr:full': response.xpath('//*[@itemprop="streetAddress"]/text()')[0].extract(),
'addr:city': response.xpath('//*[@itemprop="addressLocality"]/text()')[0].extract(),
'addr:state': response.xpath('//*[@itemprop="addressRegion"]/text()')[0].extract(),
'addr:postcode': response.xpath('//*[@itemprop="postalCode"]/text()')[0].extract(),
'ref': response.url,
'website': response.url,
}
map_image_url = response.xpath('//img[@class="address-map"]/@src')[0].extract()
match = re.search(r"center=(.*?),(.*?)&zoom", map_image_url)
lon_lat = [
float(match.group(2)),
float(match.group(1)),
]
yield GeojsonPointItem(
properties=properties,
lon_lat=lon_lat,
)
|
Add a spider for Planned Parenthood
Fixes #184# -*- coding: utf-8 -*-
import scrapy
import json
import re
from locations.items import GeojsonPointItem
class PlannedParenthoodSpider(scrapy.Spider):
name = "planned_parenthood"
allowed_domains = ["www.plannedparenthood.org"]
start_urls = (
'https://www.plannedparenthood.org/health-center',
)
def parse(self, response):
state_urls = response.xpath('//ul[@class="quicklist-list"]/li/a/@href').extract()
for path in state_urls:
yield scrapy.Request(
response.urljoin(path),
callback=self.parse_state,
)
def parse_state(self, response):
venue_urls = response.xpath('//ul[@class="quicklist-list"]/li/p/a/@href').extract()
for path in venue_urls:
yield scrapy.Request(
response.urljoin(path),
callback=self.parse_venue,
)
def parse_venue(self, response):
properties = {
'addr:full': response.xpath('//*[@itemprop="streetAddress"]/text()')[0].extract(),
'addr:city': response.xpath('//*[@itemprop="addressLocality"]/text()')[0].extract(),
'addr:state': response.xpath('//*[@itemprop="addressRegion"]/text()')[0].extract(),
'addr:postcode': response.xpath('//*[@itemprop="postalCode"]/text()')[0].extract(),
'ref': response.url,
'website': response.url,
}
map_image_url = response.xpath('//img[@class="address-map"]/@src')[0].extract()
match = re.search(r"center=(.*?),(.*?)&zoom", map_image_url)
lon_lat = [
float(match.group(2)),
float(match.group(1)),
]
yield GeojsonPointItem(
properties=properties,
lon_lat=lon_lat,
)
|
<commit_before><commit_msg>Add a spider for Planned Parenthood
Fixes #184<commit_after># -*- coding: utf-8 -*-
import scrapy
import json
import re
from locations.items import GeojsonPointItem
class PlannedParenthoodSpider(scrapy.Spider):
name = "planned_parenthood"
allowed_domains = ["www.plannedparenthood.org"]
start_urls = (
'https://www.plannedparenthood.org/health-center',
)
def parse(self, response):
state_urls = response.xpath('//ul[@class="quicklist-list"]/li/a/@href').extract()
for path in state_urls:
yield scrapy.Request(
response.urljoin(path),
callback=self.parse_state,
)
def parse_state(self, response):
venue_urls = response.xpath('//ul[@class="quicklist-list"]/li/p/a/@href').extract()
for path in venue_urls:
yield scrapy.Request(
response.urljoin(path),
callback=self.parse_venue,
)
def parse_venue(self, response):
properties = {
'addr:full': response.xpath('//*[@itemprop="streetAddress"]/text()')[0].extract(),
'addr:city': response.xpath('//*[@itemprop="addressLocality"]/text()')[0].extract(),
'addr:state': response.xpath('//*[@itemprop="addressRegion"]/text()')[0].extract(),
'addr:postcode': response.xpath('//*[@itemprop="postalCode"]/text()')[0].extract(),
'ref': response.url,
'website': response.url,
}
map_image_url = response.xpath('//img[@class="address-map"]/@src')[0].extract()
match = re.search(r"center=(.*?),(.*?)&zoom", map_image_url)
lon_lat = [
float(match.group(2)),
float(match.group(1)),
]
yield GeojsonPointItem(
properties=properties,
lon_lat=lon_lat,
)
|
|
45fea3847e2800a920ccb06e102ebaf9a5f9a4ce
|
tk/material/migrations/0002_auto_20170704_2155.py
|
tk/material/migrations/0002_auto_20170704_2155.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-04 19:55
from __future__ import unicode_literals
from django.db import migrations
import localized_fields.fields.field
class Migration(migrations.Migration):
dependencies = [
('material', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='approval',
options={'ordering': ['-requested'], 'verbose_name': 'Approval', 'verbose_name_plural': 'Approvals'},
),
migrations.AlterModelOptions(
name='goal',
options={'ordering': ['name'], 'verbose_name': 'Goal', 'verbose_name_plural': 'Goals'},
),
migrations.AlterModelOptions(
name='groupfeature',
options={'ordering': ['name'], 'verbose_name': 'Group feature', 'verbose_name_plural': 'Group features'},
),
migrations.AlterModelOptions(
name='location',
options={'ordering': ['name'], 'verbose_name': 'Location', 'verbose_name_plural': 'Locations'},
),
migrations.AlterModelOptions(
name='subject',
options={'ordering': ['name'], 'verbose_name': 'Subject', 'verbose_name_plural': 'Subjects'},
),
migrations.AlterField(
model_name='goal',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
migrations.AlterField(
model_name='groupfeature',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
migrations.AlterField(
model_name='location',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
migrations.AlterField(
model_name='subject',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
]
|
Add forgotten migration for newly introduced default ordering
|
Add forgotten migration for newly introduced default ordering
|
Python
|
agpl-3.0
|
GISAElkartea/tresna-kutxa,GISAElkartea/tresna-kutxa,GISAElkartea/tresna-kutxa,GISAElkartea/tresna-kutxa
|
Add forgotten migration for newly introduced default ordering
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-04 19:55
from __future__ import unicode_literals
from django.db import migrations
import localized_fields.fields.field
class Migration(migrations.Migration):
dependencies = [
('material', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='approval',
options={'ordering': ['-requested'], 'verbose_name': 'Approval', 'verbose_name_plural': 'Approvals'},
),
migrations.AlterModelOptions(
name='goal',
options={'ordering': ['name'], 'verbose_name': 'Goal', 'verbose_name_plural': 'Goals'},
),
migrations.AlterModelOptions(
name='groupfeature',
options={'ordering': ['name'], 'verbose_name': 'Group feature', 'verbose_name_plural': 'Group features'},
),
migrations.AlterModelOptions(
name='location',
options={'ordering': ['name'], 'verbose_name': 'Location', 'verbose_name_plural': 'Locations'},
),
migrations.AlterModelOptions(
name='subject',
options={'ordering': ['name'], 'verbose_name': 'Subject', 'verbose_name_plural': 'Subjects'},
),
migrations.AlterField(
model_name='goal',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
migrations.AlterField(
model_name='groupfeature',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
migrations.AlterField(
model_name='location',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
migrations.AlterField(
model_name='subject',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
]
|
<commit_before><commit_msg>Add forgotten migration for newly introduced default ordering<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-04 19:55
from __future__ import unicode_literals
from django.db import migrations
import localized_fields.fields.field
class Migration(migrations.Migration):
dependencies = [
('material', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='approval',
options={'ordering': ['-requested'], 'verbose_name': 'Approval', 'verbose_name_plural': 'Approvals'},
),
migrations.AlterModelOptions(
name='goal',
options={'ordering': ['name'], 'verbose_name': 'Goal', 'verbose_name_plural': 'Goals'},
),
migrations.AlterModelOptions(
name='groupfeature',
options={'ordering': ['name'], 'verbose_name': 'Group feature', 'verbose_name_plural': 'Group features'},
),
migrations.AlterModelOptions(
name='location',
options={'ordering': ['name'], 'verbose_name': 'Location', 'verbose_name_plural': 'Locations'},
),
migrations.AlterModelOptions(
name='subject',
options={'ordering': ['name'], 'verbose_name': 'Subject', 'verbose_name_plural': 'Subjects'},
),
migrations.AlterField(
model_name='goal',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
migrations.AlterField(
model_name='groupfeature',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
migrations.AlterField(
model_name='location',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
migrations.AlterField(
model_name='subject',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
]
|
Add forgotten migration for newly introduced default ordering# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-04 19:55
from __future__ import unicode_literals
from django.db import migrations
import localized_fields.fields.field
class Migration(migrations.Migration):
dependencies = [
('material', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='approval',
options={'ordering': ['-requested'], 'verbose_name': 'Approval', 'verbose_name_plural': 'Approvals'},
),
migrations.AlterModelOptions(
name='goal',
options={'ordering': ['name'], 'verbose_name': 'Goal', 'verbose_name_plural': 'Goals'},
),
migrations.AlterModelOptions(
name='groupfeature',
options={'ordering': ['name'], 'verbose_name': 'Group feature', 'verbose_name_plural': 'Group features'},
),
migrations.AlterModelOptions(
name='location',
options={'ordering': ['name'], 'verbose_name': 'Location', 'verbose_name_plural': 'Locations'},
),
migrations.AlterModelOptions(
name='subject',
options={'ordering': ['name'], 'verbose_name': 'Subject', 'verbose_name_plural': 'Subjects'},
),
migrations.AlterField(
model_name='goal',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
migrations.AlterField(
model_name='groupfeature',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
migrations.AlterField(
model_name='location',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
migrations.AlterField(
model_name='subject',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
]
|
<commit_before><commit_msg>Add forgotten migration for newly introduced default ordering<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-04 19:55
from __future__ import unicode_literals
from django.db import migrations
import localized_fields.fields.field
class Migration(migrations.Migration):
dependencies = [
('material', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='approval',
options={'ordering': ['-requested'], 'verbose_name': 'Approval', 'verbose_name_plural': 'Approvals'},
),
migrations.AlterModelOptions(
name='goal',
options={'ordering': ['name'], 'verbose_name': 'Goal', 'verbose_name_plural': 'Goals'},
),
migrations.AlterModelOptions(
name='groupfeature',
options={'ordering': ['name'], 'verbose_name': 'Group feature', 'verbose_name_plural': 'Group features'},
),
migrations.AlterModelOptions(
name='location',
options={'ordering': ['name'], 'verbose_name': 'Location', 'verbose_name_plural': 'Locations'},
),
migrations.AlterModelOptions(
name='subject',
options={'ordering': ['name'], 'verbose_name': 'Subject', 'verbose_name_plural': 'Subjects'},
),
migrations.AlterField(
model_name='goal',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
migrations.AlterField(
model_name='groupfeature',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
migrations.AlterField(
model_name='location',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
migrations.AlterField(
model_name='subject',
name='name',
field=localized_fields.fields.field.LocalizedField(max_length=512, required=[], uniqueness=[], verbose_name='name'),
),
]
|
|
1acbad02071a4d1ef953bc2c0643525e5d681d54
|
runlint.py
|
runlint.py
|
#!/usr/bin/env python
import optparse
import sys
from closure_linter import checker
from closure_linter import error_fixer
from closure_linter import gjslint
USAGE = """%prog [options] [file1] [file2]...
Run a JavaScript linter on one or more files.
This will invoke the linter, and optionally attempt to auto-fix style-violations on the specified JavaScript files.
"""
def check_files(filenames):
fake_args = [gjslint.__file__, '--nobeep'] + filenames
return gjslint.main(argv=fake_args) == 0
def fix_files(filenames):
style_checker = checker.JavaScriptStyleChecker(error_fixer.ErrorFixer())
for filename in filenames:
style_checker.Check(filename)
return 0
def main():
parser = optparse.OptionParser(USAGE)
parser.add_option('--autofix',
dest='autofix',
action='store_true',
default=False,
help='Whether or not to autofix')
options, args = parser.parse_args()
if options.autofix:
return fix_files(args)
else:
return check_files(args)
if __name__ == '__main__':
sys.exit(main())
|
Add in a script to run the linter manually
|
Add in a script to run the linter manually
Example uses:
$ runlint.py file1.js file2.js
$ runlint.py --autofix file3.js
|
Python
|
apache-2.0
|
Khan/khan-linter,Khan/khan-linter,Khan/khan-linter,Khan/khan-linter
|
Add in a script to run the linter manually
Example uses:
$ runlint.py file1.js file2.js
$ runlint.py --autofix file3.js
|
#!/usr/bin/env python
import optparse
import sys
from closure_linter import checker
from closure_linter import error_fixer
from closure_linter import gjslint
USAGE = """%prog [options] [file1] [file2]...
Run a JavaScript linter on one or more files.
This will invoke the linter, and optionally attempt to auto-fix style-violations on the specified JavaScript files.
"""
def check_files(filenames):
fake_args = [gjslint.__file__, '--nobeep'] + filenames
return gjslint.main(argv=fake_args) == 0
def fix_files(filenames):
style_checker = checker.JavaScriptStyleChecker(error_fixer.ErrorFixer())
for filename in filenames:
style_checker.Check(filename)
return 0
def main():
parser = optparse.OptionParser(USAGE)
parser.add_option('--autofix',
dest='autofix',
action='store_true',
default=False,
help='Whether or not to autofix')
options, args = parser.parse_args()
if options.autofix:
return fix_files(args)
else:
return check_files(args)
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add in a script to run the linter manually
Example uses:
$ runlint.py file1.js file2.js
$ runlint.py --autofix file3.js<commit_after>
|
#!/usr/bin/env python
import optparse
import sys
from closure_linter import checker
from closure_linter import error_fixer
from closure_linter import gjslint
USAGE = """%prog [options] [file1] [file2]...
Run a JavaScript linter on one or more files.
This will invoke the linter, and optionally attempt to auto-fix style-violations on the specified JavaScript files.
"""
def check_files(filenames):
fake_args = [gjslint.__file__, '--nobeep'] + filenames
return gjslint.main(argv=fake_args) == 0
def fix_files(filenames):
style_checker = checker.JavaScriptStyleChecker(error_fixer.ErrorFixer())
for filename in filenames:
style_checker.Check(filename)
return 0
def main():
parser = optparse.OptionParser(USAGE)
parser.add_option('--autofix',
dest='autofix',
action='store_true',
default=False,
help='Whether or not to autofix')
options, args = parser.parse_args()
if options.autofix:
return fix_files(args)
else:
return check_files(args)
if __name__ == '__main__':
sys.exit(main())
|
Add in a script to run the linter manually
Example uses:
$ runlint.py file1.js file2.js
$ runlint.py --autofix file3.js#!/usr/bin/env python
import optparse
import sys
from closure_linter import checker
from closure_linter import error_fixer
from closure_linter import gjslint
USAGE = """%prog [options] [file1] [file2]...
Run a JavaScript linter on one or more files.
This will invoke the linter, and optionally attempt to auto-fix style-violations on the specified JavaScript files.
"""
def check_files(filenames):
fake_args = [gjslint.__file__, '--nobeep'] + filenames
return gjslint.main(argv=fake_args) == 0
def fix_files(filenames):
style_checker = checker.JavaScriptStyleChecker(error_fixer.ErrorFixer())
for filename in filenames:
style_checker.Check(filename)
return 0
def main():
parser = optparse.OptionParser(USAGE)
parser.add_option('--autofix',
dest='autofix',
action='store_true',
default=False,
help='Whether or not to autofix')
options, args = parser.parse_args()
if options.autofix:
return fix_files(args)
else:
return check_files(args)
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add in a script to run the linter manually
Example uses:
$ runlint.py file1.js file2.js
$ runlint.py --autofix file3.js<commit_after>#!/usr/bin/env python
import optparse
import sys
from closure_linter import checker
from closure_linter import error_fixer
from closure_linter import gjslint
USAGE = """%prog [options] [file1] [file2]...
Run a JavaScript linter on one or more files.
This will invoke the linter, and optionally attempt to auto-fix style-violations on the specified JavaScript files.
"""
def check_files(filenames):
fake_args = [gjslint.__file__, '--nobeep'] + filenames
return gjslint.main(argv=fake_args) == 0
def fix_files(filenames):
style_checker = checker.JavaScriptStyleChecker(error_fixer.ErrorFixer())
for filename in filenames:
style_checker.Check(filename)
return 0
def main():
parser = optparse.OptionParser(USAGE)
parser.add_option('--autofix',
dest='autofix',
action='store_true',
default=False,
help='Whether or not to autofix')
options, args = parser.parse_args()
if options.autofix:
return fix_files(args)
else:
return check_files(args)
if __name__ == '__main__':
sys.exit(main())
|
|
3e5d6e5dd31193f42ebddaeff856bfe53703a19e
|
models/fallahi_eval/evidence_sources.py
|
models/fallahi_eval/evidence_sources.py
|
from util import pklload
from collections import defaultdict
import indra.tools.assemble_corpus as ac
if __name__ == '__main__':
# Load cached Statements just before going into the model
stmts = pklload('pysb_stmts')
# Start a dictionary for source counts
sources_count = defaultdict(int)
# Count statements according to sources of evidence
for stmt in stmts:
sources = tuple(sorted(list(set([ev.source_api for ev in stmt.evidence]))))
sources_count[sources] += 1
# Statements from databases only
db_only = 0
# Statements from reading only
reading_only = 0
# Statements from databases and reading
mixture = 0
# Database sources
dbs = set(['bel', 'biopax', 'phosphosite', 'signor'])
# Reader sources
readers = set(['reach', 'trips', 'sparser', 'r3'])
for k, v in sources_count.items():
d = set(k).intersection(dbs)
r = set(k).intersection(readers)
if d and r:
mixture += v
if d and not r:
db_only += v
if r and not d:
reading_only += v
for k, v in sorted(sources_count.items(), key=lambda x: x[1]):
print(k, v)
|
Add script to get evidence sources
|
Add script to get evidence sources
|
Python
|
bsd-2-clause
|
sorgerlab/indra,sorgerlab/indra,bgyori/indra,pvtodorov/indra,sorgerlab/indra,sorgerlab/belpy,bgyori/indra,johnbachman/belpy,johnbachman/belpy,bgyori/indra,johnbachman/indra,pvtodorov/indra,johnbachman/indra,sorgerlab/belpy,sorgerlab/belpy,pvtodorov/indra,pvtodorov/indra,johnbachman/indra,johnbachman/belpy
|
Add script to get evidence sources
|
from util import pklload
from collections import defaultdict
import indra.tools.assemble_corpus as ac
if __name__ == '__main__':
# Load cached Statements just before going into the model
stmts = pklload('pysb_stmts')
# Start a dictionary for source counts
sources_count = defaultdict(int)
# Count statements according to sources of evidence
for stmt in stmts:
sources = tuple(sorted(list(set([ev.source_api for ev in stmt.evidence]))))
sources_count[sources] += 1
# Statements from databases only
db_only = 0
# Statements from reading only
reading_only = 0
# Statements from databases and reading
mixture = 0
# Database sources
dbs = set(['bel', 'biopax', 'phosphosite', 'signor'])
# Reader sources
readers = set(['reach', 'trips', 'sparser', 'r3'])
for k, v in sources_count.items():
d = set(k).intersection(dbs)
r = set(k).intersection(readers)
if d and r:
mixture += v
if d and not r:
db_only += v
if r and not d:
reading_only += v
for k, v in sorted(sources_count.items(), key=lambda x: x[1]):
print(k, v)
|
<commit_before><commit_msg>Add script to get evidence sources<commit_after>
|
from util import pklload
from collections import defaultdict
import indra.tools.assemble_corpus as ac
if __name__ == '__main__':
# Load cached Statements just before going into the model
stmts = pklload('pysb_stmts')
# Start a dictionary for source counts
sources_count = defaultdict(int)
# Count statements according to sources of evidence
for stmt in stmts:
sources = tuple(sorted(list(set([ev.source_api for ev in stmt.evidence]))))
sources_count[sources] += 1
# Statements from databases only
db_only = 0
# Statements from reading only
reading_only = 0
# Statements from databases and reading
mixture = 0
# Database sources
dbs = set(['bel', 'biopax', 'phosphosite', 'signor'])
# Reader sources
readers = set(['reach', 'trips', 'sparser', 'r3'])
for k, v in sources_count.items():
d = set(k).intersection(dbs)
r = set(k).intersection(readers)
if d and r:
mixture += v
if d and not r:
db_only += v
if r and not d:
reading_only += v
for k, v in sorted(sources_count.items(), key=lambda x: x[1]):
print(k, v)
|
Add script to get evidence sourcesfrom util import pklload
from collections import defaultdict
import indra.tools.assemble_corpus as ac
if __name__ == '__main__':
# Load cached Statements just before going into the model
stmts = pklload('pysb_stmts')
# Start a dictionary for source counts
sources_count = defaultdict(int)
# Count statements according to sources of evidence
for stmt in stmts:
sources = tuple(sorted(list(set([ev.source_api for ev in stmt.evidence]))))
sources_count[sources] += 1
# Statements from databases only
db_only = 0
# Statements from reading only
reading_only = 0
# Statements from databases and reading
mixture = 0
# Database sources
dbs = set(['bel', 'biopax', 'phosphosite', 'signor'])
# Reader sources
readers = set(['reach', 'trips', 'sparser', 'r3'])
for k, v in sources_count.items():
d = set(k).intersection(dbs)
r = set(k).intersection(readers)
if d and r:
mixture += v
if d and not r:
db_only += v
if r and not d:
reading_only += v
for k, v in sorted(sources_count.items(), key=lambda x: x[1]):
print(k, v)
|
<commit_before><commit_msg>Add script to get evidence sources<commit_after>from util import pklload
from collections import defaultdict
import indra.tools.assemble_corpus as ac
if __name__ == '__main__':
# Load cached Statements just before going into the model
stmts = pklload('pysb_stmts')
# Start a dictionary for source counts
sources_count = defaultdict(int)
# Count statements according to sources of evidence
for stmt in stmts:
sources = tuple(sorted(list(set([ev.source_api for ev in stmt.evidence]))))
sources_count[sources] += 1
# Statements from databases only
db_only = 0
# Statements from reading only
reading_only = 0
# Statements from databases and reading
mixture = 0
# Database sources
dbs = set(['bel', 'biopax', 'phosphosite', 'signor'])
# Reader sources
readers = set(['reach', 'trips', 'sparser', 'r3'])
for k, v in sources_count.items():
d = set(k).intersection(dbs)
r = set(k).intersection(readers)
if d and r:
mixture += v
if d and not r:
db_only += v
if r and not d:
reading_only += v
for k, v in sorted(sources_count.items(), key=lambda x: x[1]):
print(k, v)
|
|
ccd1822d65f5565d4881e5a6a32b535e55cc2b50
|
zinnia/views/mixins/entry_preview.py
|
zinnia/views/mixins/entry_preview.py
|
"""Preview mixins for Zinnia views"""
from django.http import Http404
from django.utils.translation import ugettext as _
from zinnia.managers import PUBLISHED
class EntryPreviewMixin(object):
"""
Mixin implementing the preview of Entries.
"""
def get_object(self, queryset=None):
"""
If the status of the entry is not PUBLISHED,
a preview is requested, so we check if the user
has the 'zinnia.can_view_all' permission or if
it's an author of the entry.
"""
obj = super(EntryPreviewMixin, self).get_object(queryset)
if obj.status == PUBLISHED:
return obj
if (self.request.user.has_perm('zinnia.can_view_all') or
self.request.user in obj.authors.all()):
return obj
raise Http404(_('No entry found matching the query'))
|
Implement preview of entries for restricted users in EntryPreviewMixin
|
Implement preview of entries for restricted users in EntryPreviewMixin
|
Python
|
bsd-3-clause
|
1844144/django-blog-zinnia,ghachey/django-blog-zinnia,petecummings/django-blog-zinnia,marctc/django-blog-zinnia,bywbilly/django-blog-zinnia,ghachey/django-blog-zinnia,Zopieux/django-blog-zinnia,ghachey/django-blog-zinnia,extertioner/django-blog-zinnia,1844144/django-blog-zinnia,ZuluPro/django-blog-zinnia,extertioner/django-blog-zinnia,dapeng0802/django-blog-zinnia,bywbilly/django-blog-zinnia,Maplecroft/django-blog-zinnia,ZuluPro/django-blog-zinnia,marctc/django-blog-zinnia,Maplecroft/django-blog-zinnia,1844144/django-blog-zinnia,aorzh/django-blog-zinnia,Zopieux/django-blog-zinnia,Fantomas42/django-blog-zinnia,Fantomas42/django-blog-zinnia,aorzh/django-blog-zinnia,Maplecroft/django-blog-zinnia,dapeng0802/django-blog-zinnia,ZuluPro/django-blog-zinnia,Zopieux/django-blog-zinnia,petecummings/django-blog-zinnia,extertioner/django-blog-zinnia,bywbilly/django-blog-zinnia,aorzh/django-blog-zinnia,petecummings/django-blog-zinnia,marctc/django-blog-zinnia,dapeng0802/django-blog-zinnia,Fantomas42/django-blog-zinnia
|
Implement preview of entries for restricted users in EntryPreviewMixin
|
"""Preview mixins for Zinnia views"""
from django.http import Http404
from django.utils.translation import ugettext as _
from zinnia.managers import PUBLISHED
class EntryPreviewMixin(object):
"""
Mixin implementing the preview of Entries.
"""
def get_object(self, queryset=None):
"""
If the status of the entry is not PUBLISHED,
a preview is requested, so we check if the user
has the 'zinnia.can_view_all' permission or if
it's an author of the entry.
"""
obj = super(EntryPreviewMixin, self).get_object(queryset)
if obj.status == PUBLISHED:
return obj
if (self.request.user.has_perm('zinnia.can_view_all') or
self.request.user in obj.authors.all()):
return obj
raise Http404(_('No entry found matching the query'))
|
<commit_before><commit_msg>Implement preview of entries for restricted users in EntryPreviewMixin<commit_after>
|
"""Preview mixins for Zinnia views"""
from django.http import Http404
from django.utils.translation import ugettext as _
from zinnia.managers import PUBLISHED
class EntryPreviewMixin(object):
"""
Mixin implementing the preview of Entries.
"""
def get_object(self, queryset=None):
"""
If the status of the entry is not PUBLISHED,
a preview is requested, so we check if the user
has the 'zinnia.can_view_all' permission or if
it's an author of the entry.
"""
obj = super(EntryPreviewMixin, self).get_object(queryset)
if obj.status == PUBLISHED:
return obj
if (self.request.user.has_perm('zinnia.can_view_all') or
self.request.user in obj.authors.all()):
return obj
raise Http404(_('No entry found matching the query'))
|
Implement preview of entries for restricted users in EntryPreviewMixin"""Preview mixins for Zinnia views"""
from django.http import Http404
from django.utils.translation import ugettext as _
from zinnia.managers import PUBLISHED
class EntryPreviewMixin(object):
"""
Mixin implementing the preview of Entries.
"""
def get_object(self, queryset=None):
"""
If the status of the entry is not PUBLISHED,
a preview is requested, so we check if the user
has the 'zinnia.can_view_all' permission or if
it's an author of the entry.
"""
obj = super(EntryPreviewMixin, self).get_object(queryset)
if obj.status == PUBLISHED:
return obj
if (self.request.user.has_perm('zinnia.can_view_all') or
self.request.user in obj.authors.all()):
return obj
raise Http404(_('No entry found matching the query'))
|
<commit_before><commit_msg>Implement preview of entries for restricted users in EntryPreviewMixin<commit_after>"""Preview mixins for Zinnia views"""
from django.http import Http404
from django.utils.translation import ugettext as _
from zinnia.managers import PUBLISHED
class EntryPreviewMixin(object):
"""
Mixin implementing the preview of Entries.
"""
def get_object(self, queryset=None):
"""
If the status of the entry is not PUBLISHED,
a preview is requested, so we check if the user
has the 'zinnia.can_view_all' permission or if
it's an author of the entry.
"""
obj = super(EntryPreviewMixin, self).get_object(queryset)
if obj.status == PUBLISHED:
return obj
if (self.request.user.has_perm('zinnia.can_view_all') or
self.request.user in obj.authors.all()):
return obj
raise Http404(_('No entry found matching the query'))
|
|
7c10150d5e667921450e8663fa9440253a495160
|
gem/migrations/0014_convert_recomended_articles.py
|
gem/migrations/0014_convert_recomended_articles.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from molo.core.models import ArticlePage, ArticlePageRecommendedSections
from wagtail.wagtailcore.blocks import StreamValue
def create_recomended_articles(main_article, article_list):
'''
Creates recommended article objects from article_list
and _prepends_ to existing recommended articles.
'''
existing_recommended_articles = [
ra.recommended_article.specific
for ra in main_article.recommended_articles.all()]
ArticlePageRecommendedSections.objects.filter(page=main_article).delete()
for hyperlinked_article in article_list:
ArticlePageRecommendedSections(
page=main_article,
recommended_article=hyperlinked_article).save()
# re-create existing recommended articles
for article in existing_recommended_articles:
if article not in article_list:
ArticlePageRecommendedSections(
page=main_article,
recommended_article=article).save()
def convert_articles(apps, schema_editor):
'''
Derived from https://github.com/wagtail/wagtail/issues/2110
'''
articles = ArticlePage.objects.all().exact_type(ArticlePage)
for article in articles:
stream_data = []
linked_articles = []
for block in article.body.stream_data:
if block['type'] == 'page':
if ArticlePage.objects.filter(id=block['value']):
linked_articles.append(ArticlePage.objects.get(
id=block['value']))
else:
# add block to new stream_data
stream_data.append(block)
if linked_articles:
create_recomended_articles(article, linked_articles)
stream_block = article.body.stream_block
article.body = StreamValue(stream_block, stream_data, is_lazy=True)
article.save()
section = article.get_parent().specific
section.enable_recommended_section = True
section.enable_next_section = True
section.save()
class Migration(migrations.Migration):
dependencies = [
('gem', '0013_gemsettings_moderator_name'),
]
operations = [
migrations.RunPython(convert_articles),
]
|
Add migration for moving recomended articles recomended section
|
Add migration for moving recomended articles recomended section
|
Python
|
bsd-2-clause
|
praekelt/molo-gem,praekelt/molo-gem,praekelt/molo-gem
|
Add migration for moving recomended articles recomended section
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from molo.core.models import ArticlePage, ArticlePageRecommendedSections
from wagtail.wagtailcore.blocks import StreamValue
def create_recomended_articles(main_article, article_list):
'''
Creates recommended article objects from article_list
and _prepends_ to existing recommended articles.
'''
existing_recommended_articles = [
ra.recommended_article.specific
for ra in main_article.recommended_articles.all()]
ArticlePageRecommendedSections.objects.filter(page=main_article).delete()
for hyperlinked_article in article_list:
ArticlePageRecommendedSections(
page=main_article,
recommended_article=hyperlinked_article).save()
# re-create existing recommended articles
for article in existing_recommended_articles:
if article not in article_list:
ArticlePageRecommendedSections(
page=main_article,
recommended_article=article).save()
def convert_articles(apps, schema_editor):
'''
Derived from https://github.com/wagtail/wagtail/issues/2110
'''
articles = ArticlePage.objects.all().exact_type(ArticlePage)
for article in articles:
stream_data = []
linked_articles = []
for block in article.body.stream_data:
if block['type'] == 'page':
if ArticlePage.objects.filter(id=block['value']):
linked_articles.append(ArticlePage.objects.get(
id=block['value']))
else:
# add block to new stream_data
stream_data.append(block)
if linked_articles:
create_recomended_articles(article, linked_articles)
stream_block = article.body.stream_block
article.body = StreamValue(stream_block, stream_data, is_lazy=True)
article.save()
section = article.get_parent().specific
section.enable_recommended_section = True
section.enable_next_section = True
section.save()
class Migration(migrations.Migration):
dependencies = [
('gem', '0013_gemsettings_moderator_name'),
]
operations = [
migrations.RunPython(convert_articles),
]
|
<commit_before><commit_msg>Add migration for moving recomended articles recomended section<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from molo.core.models import ArticlePage, ArticlePageRecommendedSections
from wagtail.wagtailcore.blocks import StreamValue
def create_recomended_articles(main_article, article_list):
'''
Creates recommended article objects from article_list
and _prepends_ to existing recommended articles.
'''
existing_recommended_articles = [
ra.recommended_article.specific
for ra in main_article.recommended_articles.all()]
ArticlePageRecommendedSections.objects.filter(page=main_article).delete()
for hyperlinked_article in article_list:
ArticlePageRecommendedSections(
page=main_article,
recommended_article=hyperlinked_article).save()
# re-create existing recommended articles
for article in existing_recommended_articles:
if article not in article_list:
ArticlePageRecommendedSections(
page=main_article,
recommended_article=article).save()
def convert_articles(apps, schema_editor):
'''
Derived from https://github.com/wagtail/wagtail/issues/2110
'''
articles = ArticlePage.objects.all().exact_type(ArticlePage)
for article in articles:
stream_data = []
linked_articles = []
for block in article.body.stream_data:
if block['type'] == 'page':
if ArticlePage.objects.filter(id=block['value']):
linked_articles.append(ArticlePage.objects.get(
id=block['value']))
else:
# add block to new stream_data
stream_data.append(block)
if linked_articles:
create_recomended_articles(article, linked_articles)
stream_block = article.body.stream_block
article.body = StreamValue(stream_block, stream_data, is_lazy=True)
article.save()
section = article.get_parent().specific
section.enable_recommended_section = True
section.enable_next_section = True
section.save()
class Migration(migrations.Migration):
dependencies = [
('gem', '0013_gemsettings_moderator_name'),
]
operations = [
migrations.RunPython(convert_articles),
]
|
Add migration for moving recomended articles recomended section# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from molo.core.models import ArticlePage, ArticlePageRecommendedSections
from wagtail.wagtailcore.blocks import StreamValue
def create_recomended_articles(main_article, article_list):
'''
Creates recommended article objects from article_list
and _prepends_ to existing recommended articles.
'''
existing_recommended_articles = [
ra.recommended_article.specific
for ra in main_article.recommended_articles.all()]
ArticlePageRecommendedSections.objects.filter(page=main_article).delete()
for hyperlinked_article in article_list:
ArticlePageRecommendedSections(
page=main_article,
recommended_article=hyperlinked_article).save()
# re-create existing recommended articles
for article in existing_recommended_articles:
if article not in article_list:
ArticlePageRecommendedSections(
page=main_article,
recommended_article=article).save()
def convert_articles(apps, schema_editor):
'''
Derived from https://github.com/wagtail/wagtail/issues/2110
'''
articles = ArticlePage.objects.all().exact_type(ArticlePage)
for article in articles:
stream_data = []
linked_articles = []
for block in article.body.stream_data:
if block['type'] == 'page':
if ArticlePage.objects.filter(id=block['value']):
linked_articles.append(ArticlePage.objects.get(
id=block['value']))
else:
# add block to new stream_data
stream_data.append(block)
if linked_articles:
create_recomended_articles(article, linked_articles)
stream_block = article.body.stream_block
article.body = StreamValue(stream_block, stream_data, is_lazy=True)
article.save()
section = article.get_parent().specific
section.enable_recommended_section = True
section.enable_next_section = True
section.save()
class Migration(migrations.Migration):
dependencies = [
('gem', '0013_gemsettings_moderator_name'),
]
operations = [
migrations.RunPython(convert_articles),
]
|
<commit_before><commit_msg>Add migration for moving recomended articles recomended section<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from molo.core.models import ArticlePage, ArticlePageRecommendedSections
from wagtail.wagtailcore.blocks import StreamValue
def create_recomended_articles(main_article, article_list):
'''
Creates recommended article objects from article_list
and _prepends_ to existing recommended articles.
'''
existing_recommended_articles = [
ra.recommended_article.specific
for ra in main_article.recommended_articles.all()]
ArticlePageRecommendedSections.objects.filter(page=main_article).delete()
for hyperlinked_article in article_list:
ArticlePageRecommendedSections(
page=main_article,
recommended_article=hyperlinked_article).save()
# re-create existing recommended articles
for article in existing_recommended_articles:
if article not in article_list:
ArticlePageRecommendedSections(
page=main_article,
recommended_article=article).save()
def convert_articles(apps, schema_editor):
'''
Derived from https://github.com/wagtail/wagtail/issues/2110
'''
articles = ArticlePage.objects.all().exact_type(ArticlePage)
for article in articles:
stream_data = []
linked_articles = []
for block in article.body.stream_data:
if block['type'] == 'page':
if ArticlePage.objects.filter(id=block['value']):
linked_articles.append(ArticlePage.objects.get(
id=block['value']))
else:
# add block to new stream_data
stream_data.append(block)
if linked_articles:
create_recomended_articles(article, linked_articles)
stream_block = article.body.stream_block
article.body = StreamValue(stream_block, stream_data, is_lazy=True)
article.save()
section = article.get_parent().specific
section.enable_recommended_section = True
section.enable_next_section = True
section.save()
class Migration(migrations.Migration):
dependencies = [
('gem', '0013_gemsettings_moderator_name'),
]
operations = [
migrations.RunPython(convert_articles),
]
|
|
abd05378eb6acf742f2deff4228a0bca4492521b
|
examples/htmlTableParser.py
|
examples/htmlTableParser.py
|
#
# htmlTableParser.py
#
# Example of parsing a simple HTML table into a list of rows, and optionally into a little database
#
# Copyright 2019, Paul McGuire
#
import pyparsing as pp
import urllib.request
# define basic HTML tags, and compose into a Table
table, table_end = pp.makeHTMLTags('table')
thead, thead_end = pp.makeHTMLTags('thead')
tbody, tbody_end = pp.makeHTMLTags('tbody')
tr, tr_end = pp.makeHTMLTags('tr')
th, th_end = pp.makeHTMLTags('th')
td, td_end = pp.makeHTMLTags('td')
a, a_end = pp.makeHTMLTags('a')
# method to strip HTML tags from a string - will be used to clean up content of table cells
strip_html = (pp.anyOpenTag | pp.anyCloseTag).suppress().transformString
# expression for parsing <a href="url">text</a> links, returning a (text, url) tuple
link = pp.Group(a + pp.SkipTo(a_end)('text') + a_end.suppress())
link.addParseAction(lambda t: (t[0].text, t[0].href))
# method to create table rows of header and data tags
def table_row(start_tag, end_tag):
body = pp.SkipTo(end_tag)
body.addParseAction(pp.tokenMap(str.strip),
pp.tokenMap(strip_html))
row = pp.Group(tr.suppress()
+ pp.ZeroOrMore(start_tag.suppress()
+ body
+ end_tag.suppress())
+ tr_end.suppress())
return row
th_row = table_row(th, th_end)
td_row = table_row(td, td_end)
# define expression for overall table - may vary slightly for different pages
html_table = table + tbody + pp.Optional(th_row('headers')) + pp.ZeroOrMore(td_row)('rows') + tbody_end + table_end
# read in a web page containing an interesting HTML table
with urllib.request.urlopen("https://en.wikipedia.org/wiki/List_of_tz_database_time_zones") as page:
page_html = page.read().decode()
tz_table = html_table.searchString(page_html)[0]
# convert rows to dicts
rows = [dict(zip(tz_table.headers, row)) for row in tz_table.rows]
# make a dict keyed by TZ database name
tz_db = {row['TZ database name']: row for row in rows}
from pprint import pprint
pprint(tz_db['America/Chicago'])
|
Add example showing scraping/parsing of an HTML table into a Python dict
|
Add example showing scraping/parsing of an HTML table into a Python dict
|
Python
|
mit
|
pyparsing/pyparsing,pyparsing/pyparsing
|
Add example showing scraping/parsing of an HTML table into a Python dict
|
#
# htmlTableParser.py
#
# Example of parsing a simple HTML table into a list of rows, and optionally into a little database
#
# Copyright 2019, Paul McGuire
#
import pyparsing as pp
import urllib.request
# define basic HTML tags, and compose into a Table
table, table_end = pp.makeHTMLTags('table')
thead, thead_end = pp.makeHTMLTags('thead')
tbody, tbody_end = pp.makeHTMLTags('tbody')
tr, tr_end = pp.makeHTMLTags('tr')
th, th_end = pp.makeHTMLTags('th')
td, td_end = pp.makeHTMLTags('td')
a, a_end = pp.makeHTMLTags('a')
# method to strip HTML tags from a string - will be used to clean up content of table cells
strip_html = (pp.anyOpenTag | pp.anyCloseTag).suppress().transformString
# expression for parsing <a href="url">text</a> links, returning a (text, url) tuple
link = pp.Group(a + pp.SkipTo(a_end)('text') + a_end.suppress())
link.addParseAction(lambda t: (t[0].text, t[0].href))
# method to create table rows of header and data tags
def table_row(start_tag, end_tag):
body = pp.SkipTo(end_tag)
body.addParseAction(pp.tokenMap(str.strip),
pp.tokenMap(strip_html))
row = pp.Group(tr.suppress()
+ pp.ZeroOrMore(start_tag.suppress()
+ body
+ end_tag.suppress())
+ tr_end.suppress())
return row
th_row = table_row(th, th_end)
td_row = table_row(td, td_end)
# define expression for overall table - may vary slightly for different pages
html_table = table + tbody + pp.Optional(th_row('headers')) + pp.ZeroOrMore(td_row)('rows') + tbody_end + table_end
# read in a web page containing an interesting HTML table
with urllib.request.urlopen("https://en.wikipedia.org/wiki/List_of_tz_database_time_zones") as page:
page_html = page.read().decode()
tz_table = html_table.searchString(page_html)[0]
# convert rows to dicts
rows = [dict(zip(tz_table.headers, row)) for row in tz_table.rows]
# make a dict keyed by TZ database name
tz_db = {row['TZ database name']: row for row in rows}
from pprint import pprint
pprint(tz_db['America/Chicago'])
|
<commit_before><commit_msg>Add example showing scraping/parsing of an HTML table into a Python dict<commit_after>
|
#
# htmlTableParser.py
#
# Example of parsing a simple HTML table into a list of rows, and optionally into a little database
#
# Copyright 2019, Paul McGuire
#
import pyparsing as pp
import urllib.request
# define basic HTML tags, and compose into a Table
table, table_end = pp.makeHTMLTags('table')
thead, thead_end = pp.makeHTMLTags('thead')
tbody, tbody_end = pp.makeHTMLTags('tbody')
tr, tr_end = pp.makeHTMLTags('tr')
th, th_end = pp.makeHTMLTags('th')
td, td_end = pp.makeHTMLTags('td')
a, a_end = pp.makeHTMLTags('a')
# method to strip HTML tags from a string - will be used to clean up content of table cells
strip_html = (pp.anyOpenTag | pp.anyCloseTag).suppress().transformString
# expression for parsing <a href="url">text</a> links, returning a (text, url) tuple
link = pp.Group(a + pp.SkipTo(a_end)('text') + a_end.suppress())
link.addParseAction(lambda t: (t[0].text, t[0].href))
# method to create table rows of header and data tags
def table_row(start_tag, end_tag):
body = pp.SkipTo(end_tag)
body.addParseAction(pp.tokenMap(str.strip),
pp.tokenMap(strip_html))
row = pp.Group(tr.suppress()
+ pp.ZeroOrMore(start_tag.suppress()
+ body
+ end_tag.suppress())
+ tr_end.suppress())
return row
th_row = table_row(th, th_end)
td_row = table_row(td, td_end)
# define expression for overall table - may vary slightly for different pages
html_table = table + tbody + pp.Optional(th_row('headers')) + pp.ZeroOrMore(td_row)('rows') + tbody_end + table_end
# read in a web page containing an interesting HTML table
with urllib.request.urlopen("https://en.wikipedia.org/wiki/List_of_tz_database_time_zones") as page:
page_html = page.read().decode()
tz_table = html_table.searchString(page_html)[0]
# convert rows to dicts
rows = [dict(zip(tz_table.headers, row)) for row in tz_table.rows]
# make a dict keyed by TZ database name
tz_db = {row['TZ database name']: row for row in rows}
from pprint import pprint
pprint(tz_db['America/Chicago'])
|
Add example showing scraping/parsing of an HTML table into a Python dict#
# htmlTableParser.py
#
# Example of parsing a simple HTML table into a list of rows, and optionally into a little database
#
# Copyright 2019, Paul McGuire
#
import pyparsing as pp
import urllib.request
# define basic HTML tags, and compose into a Table
table, table_end = pp.makeHTMLTags('table')
thead, thead_end = pp.makeHTMLTags('thead')
tbody, tbody_end = pp.makeHTMLTags('tbody')
tr, tr_end = pp.makeHTMLTags('tr')
th, th_end = pp.makeHTMLTags('th')
td, td_end = pp.makeHTMLTags('td')
a, a_end = pp.makeHTMLTags('a')
# method to strip HTML tags from a string - will be used to clean up content of table cells
strip_html = (pp.anyOpenTag | pp.anyCloseTag).suppress().transformString
# expression for parsing <a href="url">text</a> links, returning a (text, url) tuple
link = pp.Group(a + pp.SkipTo(a_end)('text') + a_end.suppress())
link.addParseAction(lambda t: (t[0].text, t[0].href))
# method to create table rows of header and data tags
def table_row(start_tag, end_tag):
body = pp.SkipTo(end_tag)
body.addParseAction(pp.tokenMap(str.strip),
pp.tokenMap(strip_html))
row = pp.Group(tr.suppress()
+ pp.ZeroOrMore(start_tag.suppress()
+ body
+ end_tag.suppress())
+ tr_end.suppress())
return row
th_row = table_row(th, th_end)
td_row = table_row(td, td_end)
# define expression for overall table - may vary slightly for different pages
html_table = table + tbody + pp.Optional(th_row('headers')) + pp.ZeroOrMore(td_row)('rows') + tbody_end + table_end
# read in a web page containing an interesting HTML table
with urllib.request.urlopen("https://en.wikipedia.org/wiki/List_of_tz_database_time_zones") as page:
page_html = page.read().decode()
tz_table = html_table.searchString(page_html)[0]
# convert rows to dicts
rows = [dict(zip(tz_table.headers, row)) for row in tz_table.rows]
# make a dict keyed by TZ database name
tz_db = {row['TZ database name']: row for row in rows}
from pprint import pprint
pprint(tz_db['America/Chicago'])
|
<commit_before><commit_msg>Add example showing scraping/parsing of an HTML table into a Python dict<commit_after>#
# htmlTableParser.py
#
# Example of parsing a simple HTML table into a list of rows, and optionally into a little database
#
# Copyright 2019, Paul McGuire
#
import pyparsing as pp
import urllib.request
# define basic HTML tags, and compose into a Table
table, table_end = pp.makeHTMLTags('table')
thead, thead_end = pp.makeHTMLTags('thead')
tbody, tbody_end = pp.makeHTMLTags('tbody')
tr, tr_end = pp.makeHTMLTags('tr')
th, th_end = pp.makeHTMLTags('th')
td, td_end = pp.makeHTMLTags('td')
a, a_end = pp.makeHTMLTags('a')
# method to strip HTML tags from a string - will be used to clean up content of table cells
strip_html = (pp.anyOpenTag | pp.anyCloseTag).suppress().transformString
# expression for parsing <a href="url">text</a> links, returning a (text, url) tuple
link = pp.Group(a + pp.SkipTo(a_end)('text') + a_end.suppress())
link.addParseAction(lambda t: (t[0].text, t[0].href))
# method to create table rows of header and data tags
def table_row(start_tag, end_tag):
body = pp.SkipTo(end_tag)
body.addParseAction(pp.tokenMap(str.strip),
pp.tokenMap(strip_html))
row = pp.Group(tr.suppress()
+ pp.ZeroOrMore(start_tag.suppress()
+ body
+ end_tag.suppress())
+ tr_end.suppress())
return row
th_row = table_row(th, th_end)
td_row = table_row(td, td_end)
# define expression for overall table - may vary slightly for different pages
html_table = table + tbody + pp.Optional(th_row('headers')) + pp.ZeroOrMore(td_row)('rows') + tbody_end + table_end
# read in a web page containing an interesting HTML table
with urllib.request.urlopen("https://en.wikipedia.org/wiki/List_of_tz_database_time_zones") as page:
page_html = page.read().decode()
tz_table = html_table.searchString(page_html)[0]
# convert rows to dicts
rows = [dict(zip(tz_table.headers, row)) for row in tz_table.rows]
# make a dict keyed by TZ database name
tz_db = {row['TZ database name']: row for row in rows}
from pprint import pprint
pprint(tz_db['America/Chicago'])
|
|
db9b756dbf68fde9930da8ab6b4594fa3f1d361e
|
migrations/versions/175_fix_recurring_override_cascade.py
|
migrations/versions/175_fix_recurring_override_cascade.py
|
"""fix recurring override cascade
Revision ID: 6e5b154d917
Revises: 41f957b595fc
Create Date: 2015-05-25 16:23:40.563050
"""
# revision identifiers, used by Alembic.
revision = '6e5b154d917'
down_revision = '4ef055945390'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import text
def upgrade():
conn = op.get_bind()
conn.execute(text("set @@lock_wait_timeout = 20;"))
conn.execute(text("SET FOREIGN_KEY_CHECKS=0;"))
conn.execute(text("ALTER TABLE recurringeventoverride DROP FOREIGN KEY "
"`recurringeventoverride_ibfk_2`"))
conn.execute(text("ALTER TABLE recurringeventoverride ADD CONSTRAINT recurringeventoverride_ibfk_2"
" FOREIGN KEY (`master_event_id`) REFERENCES `event` (`id`) ON DELETE CASCADE"))
def downgrade():
conn = op.get_bind()
conn.execute(text("set @@lock_wait_timeout = 20;"))
conn.execute(text("SET FOREIGN_KEY_CHECKS=0;"))
conn.execute(text("ALTER TABLE recurringeventoverride DROP FOREIGN KEY "
"`recurringeventoverride_ibfk_2`"))
conn.execute(text("ALTER TABLE recurringeventoverride ADD CONSTRAINT recurringeventoverride_ibfk_2"
" FOREIGN KEY (`master_event_id`) REFERENCES `event` (`id`)"))
|
Fix cascades for RecurringEventOverride table
|
Fix cascades for RecurringEventOverride table
Summary: We weren't defining an `ON DELETE CASCADE` cascade on the RecurringEventOverride table. This made it impossible to run the account reset script for accounts which had recurring event overrides. Fix this.
Test Plan: Ran the migration, checked that the account reset script worked.
Reviewers: jennie, kav-ya
Projects: #eas
Maniphest Tasks: T1383
Differential Revision: https://phab.nylas.com/D1557
|
Python
|
agpl-3.0
|
gale320/sync-engine,Eagles2F/sync-engine,nylas/sync-engine,nylas/sync-engine,Eagles2F/sync-engine,closeio/nylas,ErinCall/sync-engine,wakermahmud/sync-engine,Eagles2F/sync-engine,jobscore/sync-engine,gale320/sync-engine,jobscore/sync-engine,wakermahmud/sync-engine,gale320/sync-engine,ErinCall/sync-engine,ErinCall/sync-engine,gale320/sync-engine,jobscore/sync-engine,jobscore/sync-engine,PriviPK/privipk-sync-engine,wakermahmud/sync-engine,wakermahmud/sync-engine,ErinCall/sync-engine,closeio/nylas,nylas/sync-engine,Eagles2F/sync-engine,ErinCall/sync-engine,Eagles2F/sync-engine,closeio/nylas,PriviPK/privipk-sync-engine,gale320/sync-engine,PriviPK/privipk-sync-engine,PriviPK/privipk-sync-engine,nylas/sync-engine,wakermahmud/sync-engine,PriviPK/privipk-sync-engine,closeio/nylas
|
Fix cascades for RecurringEventOverride table
Summary: We weren't defining an `ON DELETE CASCADE` cascade on the RecurringEventOverride table. This made it impossible to run the account reset script for accounts which had recurring event overrides. Fix this.
Test Plan: Ran the migration, checked that the account reset script worked.
Reviewers: jennie, kav-ya
Projects: #eas
Maniphest Tasks: T1383
Differential Revision: https://phab.nylas.com/D1557
|
"""fix recurring override cascade
Revision ID: 6e5b154d917
Revises: 41f957b595fc
Create Date: 2015-05-25 16:23:40.563050
"""
# revision identifiers, used by Alembic.
revision = '6e5b154d917'
down_revision = '4ef055945390'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import text
def upgrade():
conn = op.get_bind()
conn.execute(text("set @@lock_wait_timeout = 20;"))
conn.execute(text("SET FOREIGN_KEY_CHECKS=0;"))
conn.execute(text("ALTER TABLE recurringeventoverride DROP FOREIGN KEY "
"`recurringeventoverride_ibfk_2`"))
conn.execute(text("ALTER TABLE recurringeventoverride ADD CONSTRAINT recurringeventoverride_ibfk_2"
" FOREIGN KEY (`master_event_id`) REFERENCES `event` (`id`) ON DELETE CASCADE"))
def downgrade():
conn = op.get_bind()
conn.execute(text("set @@lock_wait_timeout = 20;"))
conn.execute(text("SET FOREIGN_KEY_CHECKS=0;"))
conn.execute(text("ALTER TABLE recurringeventoverride DROP FOREIGN KEY "
"`recurringeventoverride_ibfk_2`"))
conn.execute(text("ALTER TABLE recurringeventoverride ADD CONSTRAINT recurringeventoverride_ibfk_2"
" FOREIGN KEY (`master_event_id`) REFERENCES `event` (`id`)"))
|
<commit_before><commit_msg>Fix cascades for RecurringEventOverride table
Summary: We weren't defining an `ON DELETE CASCADE` cascade on the RecurringEventOverride table. This made it impossible to run the account reset script for accounts which had recurring event overrides. Fix this.
Test Plan: Ran the migration, checked that the account reset script worked.
Reviewers: jennie, kav-ya
Projects: #eas
Maniphest Tasks: T1383
Differential Revision: https://phab.nylas.com/D1557<commit_after>
|
"""fix recurring override cascade
Revision ID: 6e5b154d917
Revises: 41f957b595fc
Create Date: 2015-05-25 16:23:40.563050
"""
# revision identifiers, used by Alembic.
revision = '6e5b154d917'
down_revision = '4ef055945390'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import text
def upgrade():
conn = op.get_bind()
conn.execute(text("set @@lock_wait_timeout = 20;"))
conn.execute(text("SET FOREIGN_KEY_CHECKS=0;"))
conn.execute(text("ALTER TABLE recurringeventoverride DROP FOREIGN KEY "
"`recurringeventoverride_ibfk_2`"))
conn.execute(text("ALTER TABLE recurringeventoverride ADD CONSTRAINT recurringeventoverride_ibfk_2"
" FOREIGN KEY (`master_event_id`) REFERENCES `event` (`id`) ON DELETE CASCADE"))
def downgrade():
conn = op.get_bind()
conn.execute(text("set @@lock_wait_timeout = 20;"))
conn.execute(text("SET FOREIGN_KEY_CHECKS=0;"))
conn.execute(text("ALTER TABLE recurringeventoverride DROP FOREIGN KEY "
"`recurringeventoverride_ibfk_2`"))
conn.execute(text("ALTER TABLE recurringeventoverride ADD CONSTRAINT recurringeventoverride_ibfk_2"
" FOREIGN KEY (`master_event_id`) REFERENCES `event` (`id`)"))
|
Fix cascades for RecurringEventOverride table
Summary: We weren't defining an `ON DELETE CASCADE` cascade on the RecurringEventOverride table. This made it impossible to run the account reset script for accounts which had recurring event overrides. Fix this.
Test Plan: Ran the migration, checked that the account reset script worked.
Reviewers: jennie, kav-ya
Projects: #eas
Maniphest Tasks: T1383
Differential Revision: https://phab.nylas.com/D1557"""fix recurring override cascade
Revision ID: 6e5b154d917
Revises: 41f957b595fc
Create Date: 2015-05-25 16:23:40.563050
"""
# revision identifiers, used by Alembic.
revision = '6e5b154d917'
down_revision = '4ef055945390'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import text
def upgrade():
conn = op.get_bind()
conn.execute(text("set @@lock_wait_timeout = 20;"))
conn.execute(text("SET FOREIGN_KEY_CHECKS=0;"))
conn.execute(text("ALTER TABLE recurringeventoverride DROP FOREIGN KEY "
"`recurringeventoverride_ibfk_2`"))
conn.execute(text("ALTER TABLE recurringeventoverride ADD CONSTRAINT recurringeventoverride_ibfk_2"
" FOREIGN KEY (`master_event_id`) REFERENCES `event` (`id`) ON DELETE CASCADE"))
def downgrade():
conn = op.get_bind()
conn.execute(text("set @@lock_wait_timeout = 20;"))
conn.execute(text("SET FOREIGN_KEY_CHECKS=0;"))
conn.execute(text("ALTER TABLE recurringeventoverride DROP FOREIGN KEY "
"`recurringeventoverride_ibfk_2`"))
conn.execute(text("ALTER TABLE recurringeventoverride ADD CONSTRAINT recurringeventoverride_ibfk_2"
" FOREIGN KEY (`master_event_id`) REFERENCES `event` (`id`)"))
|
<commit_before><commit_msg>Fix cascades for RecurringEventOverride table
Summary: We weren't defining an `ON DELETE CASCADE` cascade on the RecurringEventOverride table. This made it impossible to run the account reset script for accounts which had recurring event overrides. Fix this.
Test Plan: Ran the migration, checked that the account reset script worked.
Reviewers: jennie, kav-ya
Projects: #eas
Maniphest Tasks: T1383
Differential Revision: https://phab.nylas.com/D1557<commit_after>"""fix recurring override cascade
Revision ID: 6e5b154d917
Revises: 41f957b595fc
Create Date: 2015-05-25 16:23:40.563050
"""
# revision identifiers, used by Alembic.
revision = '6e5b154d917'
down_revision = '4ef055945390'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import text
def upgrade():
conn = op.get_bind()
conn.execute(text("set @@lock_wait_timeout = 20;"))
conn.execute(text("SET FOREIGN_KEY_CHECKS=0;"))
conn.execute(text("ALTER TABLE recurringeventoverride DROP FOREIGN KEY "
"`recurringeventoverride_ibfk_2`"))
conn.execute(text("ALTER TABLE recurringeventoverride ADD CONSTRAINT recurringeventoverride_ibfk_2"
" FOREIGN KEY (`master_event_id`) REFERENCES `event` (`id`) ON DELETE CASCADE"))
def downgrade():
conn = op.get_bind()
conn.execute(text("set @@lock_wait_timeout = 20;"))
conn.execute(text("SET FOREIGN_KEY_CHECKS=0;"))
conn.execute(text("ALTER TABLE recurringeventoverride DROP FOREIGN KEY "
"`recurringeventoverride_ibfk_2`"))
conn.execute(text("ALTER TABLE recurringeventoverride ADD CONSTRAINT recurringeventoverride_ibfk_2"
" FOREIGN KEY (`master_event_id`) REFERENCES `event` (`id`)"))
|
|
730548fe74dda462d7aac1e3c5ee8e8ba47f4371
|
scripts/extract_clips_from_hdf5_file.py
|
scripts/extract_clips_from_hdf5_file.py
|
from pathlib import Path
import wave
import h5py
DIR_PATH = Path('/Users/harold/Desktop/Clips')
INPUT_FILE_PATH = DIR_PATH / 'Clips.h5'
CLIP_COUNT = 5
def main():
with h5py.File(INPUT_FILE_PATH, 'r') as file_:
clip_group = file_['clips']
for i, clip_id in enumerate(clip_group):
if i == CLIP_COUNT:
break
samples, sample_rate = read_clip(clip_group, clip_id)
print(clip_id, len(samples), samples.dtype, sample_rate)
write_wave_file(clip_id, samples, sample_rate)
def read_clip(clip_group, clip_id):
clip = clip_group[clip_id]
samples = clip[:]
sample_rate = clip.attrs['sample_rate']
return samples, sample_rate
def write_wave_file(i, samples, sample_rate):
file_name = f'{i}.wav'
file_path = DIR_PATH / file_name
with wave.open(str(file_path), 'wb') as file_:
file_.setparams((1, 2, sample_rate, len(samples), 'NONE', ''))
file_.writeframes(samples.tobytes())
if __name__ == '__main__':
main()
|
Add script that extracts clips from HDF5 file.
|
Add script that extracts clips from HDF5 file.
|
Python
|
mit
|
HaroldMills/Vesper,HaroldMills/Vesper,HaroldMills/Vesper,HaroldMills/Vesper,HaroldMills/Vesper
|
Add script that extracts clips from HDF5 file.
|
from pathlib import Path
import wave
import h5py
DIR_PATH = Path('/Users/harold/Desktop/Clips')
INPUT_FILE_PATH = DIR_PATH / 'Clips.h5'
CLIP_COUNT = 5
def main():
with h5py.File(INPUT_FILE_PATH, 'r') as file_:
clip_group = file_['clips']
for i, clip_id in enumerate(clip_group):
if i == CLIP_COUNT:
break
samples, sample_rate = read_clip(clip_group, clip_id)
print(clip_id, len(samples), samples.dtype, sample_rate)
write_wave_file(clip_id, samples, sample_rate)
def read_clip(clip_group, clip_id):
clip = clip_group[clip_id]
samples = clip[:]
sample_rate = clip.attrs['sample_rate']
return samples, sample_rate
def write_wave_file(i, samples, sample_rate):
file_name = f'{i}.wav'
file_path = DIR_PATH / file_name
with wave.open(str(file_path), 'wb') as file_:
file_.setparams((1, 2, sample_rate, len(samples), 'NONE', ''))
file_.writeframes(samples.tobytes())
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script that extracts clips from HDF5 file.<commit_after>
|
from pathlib import Path
import wave
import h5py
DIR_PATH = Path('/Users/harold/Desktop/Clips')
INPUT_FILE_PATH = DIR_PATH / 'Clips.h5'
CLIP_COUNT = 5
def main():
with h5py.File(INPUT_FILE_PATH, 'r') as file_:
clip_group = file_['clips']
for i, clip_id in enumerate(clip_group):
if i == CLIP_COUNT:
break
samples, sample_rate = read_clip(clip_group, clip_id)
print(clip_id, len(samples), samples.dtype, sample_rate)
write_wave_file(clip_id, samples, sample_rate)
def read_clip(clip_group, clip_id):
clip = clip_group[clip_id]
samples = clip[:]
sample_rate = clip.attrs['sample_rate']
return samples, sample_rate
def write_wave_file(i, samples, sample_rate):
file_name = f'{i}.wav'
file_path = DIR_PATH / file_name
with wave.open(str(file_path), 'wb') as file_:
file_.setparams((1, 2, sample_rate, len(samples), 'NONE', ''))
file_.writeframes(samples.tobytes())
if __name__ == '__main__':
main()
|
Add script that extracts clips from HDF5 file.from pathlib import Path
import wave
import h5py
DIR_PATH = Path('/Users/harold/Desktop/Clips')
INPUT_FILE_PATH = DIR_PATH / 'Clips.h5'
CLIP_COUNT = 5
def main():
with h5py.File(INPUT_FILE_PATH, 'r') as file_:
clip_group = file_['clips']
for i, clip_id in enumerate(clip_group):
if i == CLIP_COUNT:
break
samples, sample_rate = read_clip(clip_group, clip_id)
print(clip_id, len(samples), samples.dtype, sample_rate)
write_wave_file(clip_id, samples, sample_rate)
def read_clip(clip_group, clip_id):
clip = clip_group[clip_id]
samples = clip[:]
sample_rate = clip.attrs['sample_rate']
return samples, sample_rate
def write_wave_file(i, samples, sample_rate):
file_name = f'{i}.wav'
file_path = DIR_PATH / file_name
with wave.open(str(file_path), 'wb') as file_:
file_.setparams((1, 2, sample_rate, len(samples), 'NONE', ''))
file_.writeframes(samples.tobytes())
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script that extracts clips from HDF5 file.<commit_after>from pathlib import Path
import wave
import h5py
DIR_PATH = Path('/Users/harold/Desktop/Clips')
INPUT_FILE_PATH = DIR_PATH / 'Clips.h5'
CLIP_COUNT = 5
def main():
with h5py.File(INPUT_FILE_PATH, 'r') as file_:
clip_group = file_['clips']
for i, clip_id in enumerate(clip_group):
if i == CLIP_COUNT:
break
samples, sample_rate = read_clip(clip_group, clip_id)
print(clip_id, len(samples), samples.dtype, sample_rate)
write_wave_file(clip_id, samples, sample_rate)
def read_clip(clip_group, clip_id):
clip = clip_group[clip_id]
samples = clip[:]
sample_rate = clip.attrs['sample_rate']
return samples, sample_rate
def write_wave_file(i, samples, sample_rate):
file_name = f'{i}.wav'
file_path = DIR_PATH / file_name
with wave.open(str(file_path), 'wb') as file_:
file_.setparams((1, 2, sample_rate, len(samples), 'NONE', ''))
file_.writeframes(samples.tobytes())
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.