commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f6fcd5324993a6804b8ec5b17e63b8235ca4b730
|
check_cuda_version.py
|
check_cuda_version.py
|
#!/usr/bin/env python
"""
Simple script for checking installed CUDA version.
"""
import ctypes
try:
_libcudart = ctypes.cdll.LoadLibrary('libcudart.so')
except:
print 'CUDA runtime library not found'
else:
_libcudart.cudaDriverGetVersion.restype = int
_libcudart.cudaDriverGetVersion.argtypes = [ctypes.POINTER(ctypes.c_int)]
version = ctypes.c_int()
status = _libcudart.cudaDriverGetVersion(ctypes.byref(version))
if status != 0:
print 'CUDA runtime library found: version unknown'
else:
major = version.value/1000
minor = (version.value%1000)/10
print 'CUDA runtime library found: version %s' % (major + minor/10.0)
|
Add script for checking installed CUDA version.
|
Add script for checking installed CUDA version.
|
Python
|
bsd-3-clause
|
cerrno/neurokernel
|
Add script for checking installed CUDA version.
|
#!/usr/bin/env python
"""
Simple script for checking installed CUDA version.
"""
import ctypes
try:
_libcudart = ctypes.cdll.LoadLibrary('libcudart.so')
except:
print 'CUDA runtime library not found'
else:
_libcudart.cudaDriverGetVersion.restype = int
_libcudart.cudaDriverGetVersion.argtypes = [ctypes.POINTER(ctypes.c_int)]
version = ctypes.c_int()
status = _libcudart.cudaDriverGetVersion(ctypes.byref(version))
if status != 0:
print 'CUDA runtime library found: version unknown'
else:
major = version.value/1000
minor = (version.value%1000)/10
print 'CUDA runtime library found: version %s' % (major + minor/10.0)
|
<commit_before><commit_msg>Add script for checking installed CUDA version.<commit_after>
|
#!/usr/bin/env python
"""
Simple script for checking installed CUDA version.
"""
import ctypes
try:
_libcudart = ctypes.cdll.LoadLibrary('libcudart.so')
except:
print 'CUDA runtime library not found'
else:
_libcudart.cudaDriverGetVersion.restype = int
_libcudart.cudaDriverGetVersion.argtypes = [ctypes.POINTER(ctypes.c_int)]
version = ctypes.c_int()
status = _libcudart.cudaDriverGetVersion(ctypes.byref(version))
if status != 0:
print 'CUDA runtime library found: version unknown'
else:
major = version.value/1000
minor = (version.value%1000)/10
print 'CUDA runtime library found: version %s' % (major + minor/10.0)
|
Add script for checking installed CUDA version.#!/usr/bin/env python
"""
Simple script for checking installed CUDA version.
"""
import ctypes
try:
_libcudart = ctypes.cdll.LoadLibrary('libcudart.so')
except:
print 'CUDA runtime library not found'
else:
_libcudart.cudaDriverGetVersion.restype = int
_libcudart.cudaDriverGetVersion.argtypes = [ctypes.POINTER(ctypes.c_int)]
version = ctypes.c_int()
status = _libcudart.cudaDriverGetVersion(ctypes.byref(version))
if status != 0:
print 'CUDA runtime library found: version unknown'
else:
major = version.value/1000
minor = (version.value%1000)/10
print 'CUDA runtime library found: version %s' % (major + minor/10.0)
|
<commit_before><commit_msg>Add script for checking installed CUDA version.<commit_after>#!/usr/bin/env python
"""
Simple script for checking installed CUDA version.
"""
import ctypes
try:
_libcudart = ctypes.cdll.LoadLibrary('libcudart.so')
except:
print 'CUDA runtime library not found'
else:
_libcudart.cudaDriverGetVersion.restype = int
_libcudart.cudaDriverGetVersion.argtypes = [ctypes.POINTER(ctypes.c_int)]
version = ctypes.c_int()
status = _libcudart.cudaDriverGetVersion(ctypes.byref(version))
if status != 0:
print 'CUDA runtime library found: version unknown'
else:
major = version.value/1000
minor = (version.value%1000)/10
print 'CUDA runtime library found: version %s' % (major + minor/10.0)
|
|
5e31b9bc0817047c1e63f69ad38a4a9e9dbd29ec
|
fiasco/generator.py
|
fiasco/generator.py
|
from . import model
class Connection(object):
def __init__(self, left, right, relationship, detail):
self.left = left
self.right = right
self.relationship = relationship
self.detail = detail
class ConnectionModel(object):
"""The build method should yield a tuple of every
possible character combination for this connection model."""
def __init__(self, character_set):
self.character_set = character_set
def build(self):
"""Should yield a tuple of characters."""
pass
class Circular(ConnectionModel):
"""A circular connection model, that is to say,
the basic loop around the table that is a standard
Fiasco
>>> c = Circular(["Joebob", "Sallybob", "Jimbob"])
>>> l = []
>>> for tpl in c.build():
... l.append(tpl)
>>> l
[('Jimbob', 'Joebob'), ('Joebob', 'Sallybob'), ('Sallybob', 'Jimbob')]
"""
def __init__(self, character_set):
ConnectionModel.__init__(self, character_set)
def build(self):
prev = self.character_set[-1]
for c in self.character_set:
yield prev, c
prev = c
if __name__ == '__main__':
import doctest
doctest.testmod()
|
Build a connection model system which will help with generating new Fiascos based on a playset and a set of characters.
|
Build a connection model system which will help with generating new Fiascos based on a playset and a set of characters.
|
Python
|
mit
|
RemyPorter/FiascoGenerator
|
Build a connection model system which will help with generating new Fiascos based on a playset and a set of characters.
|
from . import model
class Connection(object):
def __init__(self, left, right, relationship, detail):
self.left = left
self.right = right
self.relationship = relationship
self.detail = detail
class ConnectionModel(object):
"""The build method should yield a tuple of every
possible character combination for this connection model."""
def __init__(self, character_set):
self.character_set = character_set
def build(self):
"""Should yield a tuple of characters."""
pass
class Circular(ConnectionModel):
"""A circular connection model, that is to say,
the basic loop around the table that is a standard
Fiasco
>>> c = Circular(["Joebob", "Sallybob", "Jimbob"])
>>> l = []
>>> for tpl in c.build():
... l.append(tpl)
>>> l
[('Jimbob', 'Joebob'), ('Joebob', 'Sallybob'), ('Sallybob', 'Jimbob')]
"""
def __init__(self, character_set):
ConnectionModel.__init__(self, character_set)
def build(self):
prev = self.character_set[-1]
for c in self.character_set:
yield prev, c
prev = c
if __name__ == '__main__':
import doctest
doctest.testmod()
|
<commit_before><commit_msg>Build a connection model system which will help with generating new Fiascos based on a playset and a set of characters.<commit_after>
|
from . import model
class Connection(object):
def __init__(self, left, right, relationship, detail):
self.left = left
self.right = right
self.relationship = relationship
self.detail = detail
class ConnectionModel(object):
"""The build method should yield a tuple of every
possible character combination for this connection model."""
def __init__(self, character_set):
self.character_set = character_set
def build(self):
"""Should yield a tuple of characters."""
pass
class Circular(ConnectionModel):
"""A circular connection model, that is to say,
the basic loop around the table that is a standard
Fiasco
>>> c = Circular(["Joebob", "Sallybob", "Jimbob"])
>>> l = []
>>> for tpl in c.build():
... l.append(tpl)
>>> l
[('Jimbob', 'Joebob'), ('Joebob', 'Sallybob'), ('Sallybob', 'Jimbob')]
"""
def __init__(self, character_set):
ConnectionModel.__init__(self, character_set)
def build(self):
prev = self.character_set[-1]
for c in self.character_set:
yield prev, c
prev = c
if __name__ == '__main__':
import doctest
doctest.testmod()
|
Build a connection model system which will help with generating new Fiascos based on a playset and a set of characters.from . import model
class Connection(object):
def __init__(self, left, right, relationship, detail):
self.left = left
self.right = right
self.relationship = relationship
self.detail = detail
class ConnectionModel(object):
"""The build method should yield a tuple of every
possible character combination for this connection model."""
def __init__(self, character_set):
self.character_set = character_set
def build(self):
"""Should yield a tuple of characters."""
pass
class Circular(ConnectionModel):
"""A circular connection model, that is to say,
the basic loop around the table that is a standard
Fiasco
>>> c = Circular(["Joebob", "Sallybob", "Jimbob"])
>>> l = []
>>> for tpl in c.build():
... l.append(tpl)
>>> l
[('Jimbob', 'Joebob'), ('Joebob', 'Sallybob'), ('Sallybob', 'Jimbob')]
"""
def __init__(self, character_set):
ConnectionModel.__init__(self, character_set)
def build(self):
prev = self.character_set[-1]
for c in self.character_set:
yield prev, c
prev = c
if __name__ == '__main__':
import doctest
doctest.testmod()
|
<commit_before><commit_msg>Build a connection model system which will help with generating new Fiascos based on a playset and a set of characters.<commit_after>from . import model
class Connection(object):
def __init__(self, left, right, relationship, detail):
self.left = left
self.right = right
self.relationship = relationship
self.detail = detail
class ConnectionModel(object):
"""The build method should yield a tuple of every
possible character combination for this connection model."""
def __init__(self, character_set):
self.character_set = character_set
def build(self):
"""Should yield a tuple of characters."""
pass
class Circular(ConnectionModel):
"""A circular connection model, that is to say,
the basic loop around the table that is a standard
Fiasco
>>> c = Circular(["Joebob", "Sallybob", "Jimbob"])
>>> l = []
>>> for tpl in c.build():
... l.append(tpl)
>>> l
[('Jimbob', 'Joebob'), ('Joebob', 'Sallybob'), ('Sallybob', 'Jimbob')]
"""
def __init__(self, character_set):
ConnectionModel.__init__(self, character_set)
def build(self):
prev = self.character_set[-1]
for c in self.character_set:
yield prev, c
prev = c
if __name__ == '__main__':
import doctest
doctest.testmod()
|
|
a817600e1b30544a0244e76c414244b5182cdf08
|
Python/makeBarcode.py
|
Python/makeBarcode.py
|
import argparse
def makeBarcode(label):
print("^XA") #start of label
print("^DFFORMAT^FS") #download and store format, name of format, end of field data (FS = field stop)
print("^LH0,0") # label home position (label home = LH)
print("^FO400,20^AFN,60,20^FN1^FS") #AF = assign font F, field number 1 (FN1), print text at position field origin (FO) rel. to home
print("^FO120,5^BCN,70,N,N^FN2^FS") #BC=barcode 128, field number 2, Normal orientation, height 70, no interpreation line.
print("^XZ") #end format
print("^XA") #start of label format
print("^XFFORMAT^FS") #label home posision
print("^FN1^FD{}^FS".format(label)) #this is readable
print("^FN2^FD{}^FS".format(label)) #this is the barcode
print("^XZ")
def main(args):
makeBarcode(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Format a barcode for the Zebra printer")
parser.add_argument("label", help="The text to print as barcode and text")
args = parser.parse_args()
main(args.label)
|
Add script to generate plate barcodes for the Zebra printer
|
Add script to generate plate barcodes for the Zebra printer
|
Python
|
apache-2.0
|
jgruselius/misc,jgruselius/misc,jgruselius/misc,jgruselius/misc,jgruselius/misc
|
Add script to generate plate barcodes for the Zebra printer
|
import argparse
def makeBarcode(label):
print("^XA") #start of label
print("^DFFORMAT^FS") #download and store format, name of format, end of field data (FS = field stop)
print("^LH0,0") # label home position (label home = LH)
print("^FO400,20^AFN,60,20^FN1^FS") #AF = assign font F, field number 1 (FN1), print text at position field origin (FO) rel. to home
print("^FO120,5^BCN,70,N,N^FN2^FS") #BC=barcode 128, field number 2, Normal orientation, height 70, no interpreation line.
print("^XZ") #end format
print("^XA") #start of label format
print("^XFFORMAT^FS") #label home posision
print("^FN1^FD{}^FS".format(label)) #this is readable
print("^FN2^FD{}^FS".format(label)) #this is the barcode
print("^XZ")
def main(args):
makeBarcode(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Format a barcode for the Zebra printer")
parser.add_argument("label", help="The text to print as barcode and text")
args = parser.parse_args()
main(args.label)
|
<commit_before><commit_msg>Add script to generate plate barcodes for the Zebra printer<commit_after>
|
import argparse
def makeBarcode(label):
print("^XA") #start of label
print("^DFFORMAT^FS") #download and store format, name of format, end of field data (FS = field stop)
print("^LH0,0") # label home position (label home = LH)
print("^FO400,20^AFN,60,20^FN1^FS") #AF = assign font F, field number 1 (FN1), print text at position field origin (FO) rel. to home
print("^FO120,5^BCN,70,N,N^FN2^FS") #BC=barcode 128, field number 2, Normal orientation, height 70, no interpreation line.
print("^XZ") #end format
print("^XA") #start of label format
print("^XFFORMAT^FS") #label home posision
print("^FN1^FD{}^FS".format(label)) #this is readable
print("^FN2^FD{}^FS".format(label)) #this is the barcode
print("^XZ")
def main(args):
makeBarcode(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Format a barcode for the Zebra printer")
parser.add_argument("label", help="The text to print as barcode and text")
args = parser.parse_args()
main(args.label)
|
Add script to generate plate barcodes for the Zebra printerimport argparse
def makeBarcode(label):
print("^XA") #start of label
print("^DFFORMAT^FS") #download and store format, name of format, end of field data (FS = field stop)
print("^LH0,0") # label home position (label home = LH)
print("^FO400,20^AFN,60,20^FN1^FS") #AF = assign font F, field number 1 (FN1), print text at position field origin (FO) rel. to home
print("^FO120,5^BCN,70,N,N^FN2^FS") #BC=barcode 128, field number 2, Normal orientation, height 70, no interpreation line.
print("^XZ") #end format
print("^XA") #start of label format
print("^XFFORMAT^FS") #label home posision
print("^FN1^FD{}^FS".format(label)) #this is readable
print("^FN2^FD{}^FS".format(label)) #this is the barcode
print("^XZ")
def main(args):
makeBarcode(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Format a barcode for the Zebra printer")
parser.add_argument("label", help="The text to print as barcode and text")
args = parser.parse_args()
main(args.label)
|
<commit_before><commit_msg>Add script to generate plate barcodes for the Zebra printer<commit_after>import argparse
def makeBarcode(label):
print("^XA") #start of label
print("^DFFORMAT^FS") #download and store format, name of format, end of field data (FS = field stop)
print("^LH0,0") # label home position (label home = LH)
print("^FO400,20^AFN,60,20^FN1^FS") #AF = assign font F, field number 1 (FN1), print text at position field origin (FO) rel. to home
print("^FO120,5^BCN,70,N,N^FN2^FS") #BC=barcode 128, field number 2, Normal orientation, height 70, no interpreation line.
print("^XZ") #end format
print("^XA") #start of label format
print("^XFFORMAT^FS") #label home posision
print("^FN1^FD{}^FS".format(label)) #this is readable
print("^FN2^FD{}^FS".format(label)) #this is the barcode
print("^XZ")
def main(args):
makeBarcode(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Format a barcode for the Zebra printer")
parser.add_argument("label", help="The text to print as barcode and text")
args = parser.parse_args()
main(args.label)
|
|
84b1d56be22320a21bc4a0b100bd1ac488b72711
|
tekka/helper/history.py
|
tekka/helper/history.py
|
""" provide functions for history handling.
note that history stuff is only possible
if maki is not running remote.
"""
import os
from ..com import sushi
from ..typecheck import types
FILEPATTERN= re.compile('[0-9]+-[0-9]+\.txt')
def get_log_dir():
return sushi.config_get("directories","logs")
def get_available_servers():
""" return a list with all servers containing logs """
if sushi.remote:
return []
log_dir = get_log_dir()
return [dir for dir in os.listdir(log_dir) if os.path.isdir(dir)]
@types(server=basestring)
def get_available_conversations(server):
""" return a list with all available logged channels/queries """
if sushi.remote or not server:
return []
log_dir = os.path.join(get_log_dir(), server)
if not os.path.exists(log_dir):
return []
return [dir for dir in os.listdir(log_dir) if os.path.isdir(dir)]
@types(server=basestring, target=basestring)
def get_available_logs(server, target):
""" return a list with all available logs for the target """
if sushi.remote or not server or not basestring:
return []
log_dir = os.path.join(get_log_dir(), server, target)
if not os.path.exists(log_dir):
return []
return [f for f in os.listdir(log_dir) if FILEPATTERN.match(f)
and os.path.isfile(f)]
|
Add helper for log retrieval
|
Add helper for log retrieval
|
Python
|
bsd-2-clause
|
sushi-irc/tekka
|
Add helper for log retrieval
|
""" provide functions for history handling.
note that history stuff is only possible
if maki is not running remote.
"""
import os
from ..com import sushi
from ..typecheck import types
FILEPATTERN= re.compile('[0-9]+-[0-9]+\.txt')
def get_log_dir():
return sushi.config_get("directories","logs")
def get_available_servers():
""" return a list with all servers containing logs """
if sushi.remote:
return []
log_dir = get_log_dir()
return [dir for dir in os.listdir(log_dir) if os.path.isdir(dir)]
@types(server=basestring)
def get_available_conversations(server):
""" return a list with all available logged channels/queries """
if sushi.remote or not server:
return []
log_dir = os.path.join(get_log_dir(), server)
if not os.path.exists(log_dir):
return []
return [dir for dir in os.listdir(log_dir) if os.path.isdir(dir)]
@types(server=basestring, target=basestring)
def get_available_logs(server, target):
""" return a list with all available logs for the target """
if sushi.remote or not server or not basestring:
return []
log_dir = os.path.join(get_log_dir(), server, target)
if not os.path.exists(log_dir):
return []
return [f for f in os.listdir(log_dir) if FILEPATTERN.match(f)
and os.path.isfile(f)]
|
<commit_before><commit_msg>Add helper for log retrieval<commit_after>
|
""" provide functions for history handling.
note that history stuff is only possible
if maki is not running remote.
"""
import os
from ..com import sushi
from ..typecheck import types
FILEPATTERN= re.compile('[0-9]+-[0-9]+\.txt')
def get_log_dir():
return sushi.config_get("directories","logs")
def get_available_servers():
""" return a list with all servers containing logs """
if sushi.remote:
return []
log_dir = get_log_dir()
return [dir for dir in os.listdir(log_dir) if os.path.isdir(dir)]
@types(server=basestring)
def get_available_conversations(server):
""" return a list with all available logged channels/queries """
if sushi.remote or not server:
return []
log_dir = os.path.join(get_log_dir(), server)
if not os.path.exists(log_dir):
return []
return [dir for dir in os.listdir(log_dir) if os.path.isdir(dir)]
@types(server=basestring, target=basestring)
def get_available_logs(server, target):
""" return a list with all available logs for the target """
if sushi.remote or not server or not basestring:
return []
log_dir = os.path.join(get_log_dir(), server, target)
if not os.path.exists(log_dir):
return []
return [f for f in os.listdir(log_dir) if FILEPATTERN.match(f)
and os.path.isfile(f)]
|
Add helper for log retrieval""" provide functions for history handling.
note that history stuff is only possible
if maki is not running remote.
"""
import os
from ..com import sushi
from ..typecheck import types
FILEPATTERN= re.compile('[0-9]+-[0-9]+\.txt')
def get_log_dir():
return sushi.config_get("directories","logs")
def get_available_servers():
""" return a list with all servers containing logs """
if sushi.remote:
return []
log_dir = get_log_dir()
return [dir for dir in os.listdir(log_dir) if os.path.isdir(dir)]
@types(server=basestring)
def get_available_conversations(server):
""" return a list with all available logged channels/queries """
if sushi.remote or not server:
return []
log_dir = os.path.join(get_log_dir(), server)
if not os.path.exists(log_dir):
return []
return [dir for dir in os.listdir(log_dir) if os.path.isdir(dir)]
@types(server=basestring, target=basestring)
def get_available_logs(server, target):
""" return a list with all available logs for the target """
if sushi.remote or not server or not basestring:
return []
log_dir = os.path.join(get_log_dir(), server, target)
if not os.path.exists(log_dir):
return []
return [f for f in os.listdir(log_dir) if FILEPATTERN.match(f)
and os.path.isfile(f)]
|
<commit_before><commit_msg>Add helper for log retrieval<commit_after>""" provide functions for history handling.
note that history stuff is only possible
if maki is not running remote.
"""
import os
from ..com import sushi
from ..typecheck import types
FILEPATTERN= re.compile('[0-9]+-[0-9]+\.txt')
def get_log_dir():
return sushi.config_get("directories","logs")
def get_available_servers():
""" return a list with all servers containing logs """
if sushi.remote:
return []
log_dir = get_log_dir()
return [dir for dir in os.listdir(log_dir) if os.path.isdir(dir)]
@types(server=basestring)
def get_available_conversations(server):
""" return a list with all available logged channels/queries """
if sushi.remote or not server:
return []
log_dir = os.path.join(get_log_dir(), server)
if not os.path.exists(log_dir):
return []
return [dir for dir in os.listdir(log_dir) if os.path.isdir(dir)]
@types(server=basestring, target=basestring)
def get_available_logs(server, target):
""" return a list with all available logs for the target """
if sushi.remote or not server or not basestring:
return []
log_dir = os.path.join(get_log_dir(), server, target)
if not os.path.exists(log_dir):
return []
return [f for f in os.listdir(log_dir) if FILEPATTERN.match(f)
and os.path.isfile(f)]
|
|
c8bb2fdf3a3e48945c2bed100c1856c14a75e8f7
|
tests/test_issue_410.py
|
tests/test_issue_410.py
|
"""Test for issue `#410`_.
This tests for at least one of the bugs reported in `#410`_, namely
that paginated pages were not being built if they had no children.
.. _#410: https://github.com/lektor/lektor/issues/410
"""
import os
import pytest
@pytest.fixture(
params=[
pytest.param(True, id="paginated"),
pytest.param(False, id="non-paginated"),
]
)
def scratch_project_data(scratch_project_data, request):
# Specialize the inherited scratch project (from conftest.py)
# by (possibly) enabling pagination for models/page.ini.
is_paginated = request.param
page_ini = scratch_project_data.join("models", "page.ini")
if is_paginated:
page_ini.write_text(
"\n".join(
[
page_ini.read_text("utf-8"),
"[pagination]",
"enabled = yes",
"",
]
),
"utf-8",
)
return scratch_project_data
def test_build_childless_page(scratch_builder):
# Test that a basic childless page gets built (whether it is
# paginated or not)
scratch_builder.build_all()
index_html = os.path.join(scratch_builder.destination_path, "index.html")
assert os.path.exists(index_html)
|
Test that childless pages get built, even if paginated
|
Test that childless pages get built, even if paginated
|
Python
|
bsd-3-clause
|
lektor/lektor,lektor/lektor,lektor/lektor,lektor/lektor
|
Test that childless pages get built, even if paginated
|
"""Test for issue `#410`_.
This tests for at least one of the bugs reported in `#410`_, namely
that paginated pages were not being built if they had no children.
.. _#410: https://github.com/lektor/lektor/issues/410
"""
import os
import pytest
@pytest.fixture(
params=[
pytest.param(True, id="paginated"),
pytest.param(False, id="non-paginated"),
]
)
def scratch_project_data(scratch_project_data, request):
# Specialize the inherited scratch project (from conftest.py)
# by (possibly) enabling pagination for models/page.ini.
is_paginated = request.param
page_ini = scratch_project_data.join("models", "page.ini")
if is_paginated:
page_ini.write_text(
"\n".join(
[
page_ini.read_text("utf-8"),
"[pagination]",
"enabled = yes",
"",
]
),
"utf-8",
)
return scratch_project_data
def test_build_childless_page(scratch_builder):
# Test that a basic childless page gets built (whether it is
# paginated or not)
scratch_builder.build_all()
index_html = os.path.join(scratch_builder.destination_path, "index.html")
assert os.path.exists(index_html)
|
<commit_before><commit_msg>Test that childless pages get built, even if paginated<commit_after>
|
"""Test for issue `#410`_.
This tests for at least one of the bugs reported in `#410`_, namely
that paginated pages were not being built if they had no children.
.. _#410: https://github.com/lektor/lektor/issues/410
"""
import os
import pytest
@pytest.fixture(
params=[
pytest.param(True, id="paginated"),
pytest.param(False, id="non-paginated"),
]
)
def scratch_project_data(scratch_project_data, request):
# Specialize the inherited scratch project (from conftest.py)
# by (possibly) enabling pagination for models/page.ini.
is_paginated = request.param
page_ini = scratch_project_data.join("models", "page.ini")
if is_paginated:
page_ini.write_text(
"\n".join(
[
page_ini.read_text("utf-8"),
"[pagination]",
"enabled = yes",
"",
]
),
"utf-8",
)
return scratch_project_data
def test_build_childless_page(scratch_builder):
# Test that a basic childless page gets built (whether it is
# paginated or not)
scratch_builder.build_all()
index_html = os.path.join(scratch_builder.destination_path, "index.html")
assert os.path.exists(index_html)
|
Test that childless pages get built, even if paginated"""Test for issue `#410`_.
This tests for at least one of the bugs reported in `#410`_, namely
that paginated pages were not being built if they had no children.
.. _#410: https://github.com/lektor/lektor/issues/410
"""
import os
import pytest
@pytest.fixture(
params=[
pytest.param(True, id="paginated"),
pytest.param(False, id="non-paginated"),
]
)
def scratch_project_data(scratch_project_data, request):
# Specialize the inherited scratch project (from conftest.py)
# by (possibly) enabling pagination for models/page.ini.
is_paginated = request.param
page_ini = scratch_project_data.join("models", "page.ini")
if is_paginated:
page_ini.write_text(
"\n".join(
[
page_ini.read_text("utf-8"),
"[pagination]",
"enabled = yes",
"",
]
),
"utf-8",
)
return scratch_project_data
def test_build_childless_page(scratch_builder):
# Test that a basic childless page gets built (whether it is
# paginated or not)
scratch_builder.build_all()
index_html = os.path.join(scratch_builder.destination_path, "index.html")
assert os.path.exists(index_html)
|
<commit_before><commit_msg>Test that childless pages get built, even if paginated<commit_after>"""Test for issue `#410`_.
This tests for at least one of the bugs reported in `#410`_, namely
that paginated pages were not being built if they had no children.
.. _#410: https://github.com/lektor/lektor/issues/410
"""
import os
import pytest
@pytest.fixture(
params=[
pytest.param(True, id="paginated"),
pytest.param(False, id="non-paginated"),
]
)
def scratch_project_data(scratch_project_data, request):
# Specialize the inherited scratch project (from conftest.py)
# by (possibly) enabling pagination for models/page.ini.
is_paginated = request.param
page_ini = scratch_project_data.join("models", "page.ini")
if is_paginated:
page_ini.write_text(
"\n".join(
[
page_ini.read_text("utf-8"),
"[pagination]",
"enabled = yes",
"",
]
),
"utf-8",
)
return scratch_project_data
def test_build_childless_page(scratch_builder):
# Test that a basic childless page gets built (whether it is
# paginated or not)
scratch_builder.build_all()
index_html = os.path.join(scratch_builder.destination_path, "index.html")
assert os.path.exists(index_html)
|
|
3316b3fdde76125515e056abf403f509a6b7c454
|
lookup_from_filesystem.py
|
lookup_from_filesystem.py
|
from __future__ import print_function
import os
import sys
from movie_util import filenames_to_search_strings, print_movies
from filmtipset_util import get_grades
def is_proper_movie_file(filename):
FILE_ENDINGS = [".mkv", ".mp4", ".avi", ".iso", "mpeg", ]
if filename[-4:] in FILE_ENDINGS:
return True
elif (filename.find(".") == -1 and not filename.endswith("-ignore")):
return True
return False
def get_movies(dir):
movies = os.listdir(dir)
movies = [movie for movie in movies if is_proper_movie_file(movie)]
return movies
def main(directory, debug=False):
print("Loading movies from: %s" % directory)
movies = get_movies(directory)
movies = filenames_to_search_strings(movies)
graded = get_grades(movies, debug=debug)
print_movies("Movies seen, by grade", sorted(filter(lambda x: x[1] == u'seen', graded), reverse=True))
print_movies("Movies not seen, by grade", sorted(filter(lambda x: x[1] != u'seen', graded), reverse=True))
# Possible calls:
# lookup_from_filesystem.py /My-Movies/
# lookup_from_filesystem.py /My-Movies/ --verbose
if __name__ == "__main__":
directory = sys.argv[1]
debug = False
if len(sys.argv) == 3:
if sys.argv[2] == "--verbose":
debug = True
main(directory=directory, debug=debug)
|
Add utility for looking up grades for files in a directory.
|
Add utility for looking up grades for files in a directory.
|
Python
|
mit
|
EmilStenstrom/nephele
|
Add utility for looking up grades for files in a directory.
|
from __future__ import print_function
import os
import sys
from movie_util import filenames_to_search_strings, print_movies
from filmtipset_util import get_grades
def is_proper_movie_file(filename):
FILE_ENDINGS = [".mkv", ".mp4", ".avi", ".iso", "mpeg", ]
if filename[-4:] in FILE_ENDINGS:
return True
elif (filename.find(".") == -1 and not filename.endswith("-ignore")):
return True
return False
def get_movies(dir):
movies = os.listdir(dir)
movies = [movie for movie in movies if is_proper_movie_file(movie)]
return movies
def main(directory, debug=False):
print("Loading movies from: %s" % directory)
movies = get_movies(directory)
movies = filenames_to_search_strings(movies)
graded = get_grades(movies, debug=debug)
print_movies("Movies seen, by grade", sorted(filter(lambda x: x[1] == u'seen', graded), reverse=True))
print_movies("Movies not seen, by grade", sorted(filter(lambda x: x[1] != u'seen', graded), reverse=True))
# Possible calls:
# lookup_from_filesystem.py /My-Movies/
# lookup_from_filesystem.py /My-Movies/ --verbose
if __name__ == "__main__":
directory = sys.argv[1]
debug = False
if len(sys.argv) == 3:
if sys.argv[2] == "--verbose":
debug = True
main(directory=directory, debug=debug)
|
<commit_before><commit_msg>Add utility for looking up grades for files in a directory.<commit_after>
|
from __future__ import print_function
import os
import sys
from movie_util import filenames_to_search_strings, print_movies
from filmtipset_util import get_grades
def is_proper_movie_file(filename):
FILE_ENDINGS = [".mkv", ".mp4", ".avi", ".iso", "mpeg", ]
if filename[-4:] in FILE_ENDINGS:
return True
elif (filename.find(".") == -1 and not filename.endswith("-ignore")):
return True
return False
def get_movies(dir):
movies = os.listdir(dir)
movies = [movie for movie in movies if is_proper_movie_file(movie)]
return movies
def main(directory, debug=False):
print("Loading movies from: %s" % directory)
movies = get_movies(directory)
movies = filenames_to_search_strings(movies)
graded = get_grades(movies, debug=debug)
print_movies("Movies seen, by grade", sorted(filter(lambda x: x[1] == u'seen', graded), reverse=True))
print_movies("Movies not seen, by grade", sorted(filter(lambda x: x[1] != u'seen', graded), reverse=True))
# Possible calls:
# lookup_from_filesystem.py /My-Movies/
# lookup_from_filesystem.py /My-Movies/ --verbose
if __name__ == "__main__":
directory = sys.argv[1]
debug = False
if len(sys.argv) == 3:
if sys.argv[2] == "--verbose":
debug = True
main(directory=directory, debug=debug)
|
Add utility for looking up grades for files in a directory.from __future__ import print_function
import os
import sys
from movie_util import filenames_to_search_strings, print_movies
from filmtipset_util import get_grades
def is_proper_movie_file(filename):
FILE_ENDINGS = [".mkv", ".mp4", ".avi", ".iso", "mpeg", ]
if filename[-4:] in FILE_ENDINGS:
return True
elif (filename.find(".") == -1 and not filename.endswith("-ignore")):
return True
return False
def get_movies(dir):
movies = os.listdir(dir)
movies = [movie for movie in movies if is_proper_movie_file(movie)]
return movies
def main(directory, debug=False):
print("Loading movies from: %s" % directory)
movies = get_movies(directory)
movies = filenames_to_search_strings(movies)
graded = get_grades(movies, debug=debug)
print_movies("Movies seen, by grade", sorted(filter(lambda x: x[1] == u'seen', graded), reverse=True))
print_movies("Movies not seen, by grade", sorted(filter(lambda x: x[1] != u'seen', graded), reverse=True))
# Possible calls:
# lookup_from_filesystem.py /My-Movies/
# lookup_from_filesystem.py /My-Movies/ --verbose
if __name__ == "__main__":
directory = sys.argv[1]
debug = False
if len(sys.argv) == 3:
if sys.argv[2] == "--verbose":
debug = True
main(directory=directory, debug=debug)
|
<commit_before><commit_msg>Add utility for looking up grades for files in a directory.<commit_after>from __future__ import print_function
import os
import sys
from movie_util import filenames_to_search_strings, print_movies
from filmtipset_util import get_grades
def is_proper_movie_file(filename):
FILE_ENDINGS = [".mkv", ".mp4", ".avi", ".iso", "mpeg", ]
if filename[-4:] in FILE_ENDINGS:
return True
elif (filename.find(".") == -1 and not filename.endswith("-ignore")):
return True
return False
def get_movies(dir):
movies = os.listdir(dir)
movies = [movie for movie in movies if is_proper_movie_file(movie)]
return movies
def main(directory, debug=False):
print("Loading movies from: %s" % directory)
movies = get_movies(directory)
movies = filenames_to_search_strings(movies)
graded = get_grades(movies, debug=debug)
print_movies("Movies seen, by grade", sorted(filter(lambda x: x[1] == u'seen', graded), reverse=True))
print_movies("Movies not seen, by grade", sorted(filter(lambda x: x[1] != u'seen', graded), reverse=True))
# Possible calls:
# lookup_from_filesystem.py /My-Movies/
# lookup_from_filesystem.py /My-Movies/ --verbose
if __name__ == "__main__":
directory = sys.argv[1]
debug = False
if len(sys.argv) == 3:
if sys.argv[2] == "--verbose":
debug = True
main(directory=directory, debug=debug)
|
|
9bffaf3054960b88daec22e1206fe15e345716fc
|
tests/terminal_tests/TerminalCreationTest.py
|
tests/terminal_tests/TerminalCreationTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase
from grammpy import Terminal
class TempClass:
pass
class TerminalCreationTest(TestCase):
def test_createWithSymbol(self):
ter = Terminal('a', None)
self.assertEqual('a', ter.symbol(), 'Terminal should return same symbol')
def test_createWithNumber(self):
ter = Terminal(5, None)
self.assertEqual(5, ter.symbol(), 'Terminal should return same number')
def test_createWithClass(self):
ter = Terminal(TempClass, None)
self.assertEqual(TempClass, ter.symbol(), 'Terminal should return same class')
def test_createWithInstance(self):
inst = TempClass()
ter = Terminal(inst, None)
self.assertEqual(inst, ter.symbol(), 'Terminal should return same instance')
|
Add creation tests for Terminal
|
Add creation tests for Terminal
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add creation tests for Terminal
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase
from grammpy import Terminal
class TempClass:
pass
class TerminalCreationTest(TestCase):
def test_createWithSymbol(self):
ter = Terminal('a', None)
self.assertEqual('a', ter.symbol(), 'Terminal should return same symbol')
def test_createWithNumber(self):
ter = Terminal(5, None)
self.assertEqual(5, ter.symbol(), 'Terminal should return same number')
def test_createWithClass(self):
ter = Terminal(TempClass, None)
self.assertEqual(TempClass, ter.symbol(), 'Terminal should return same class')
def test_createWithInstance(self):
inst = TempClass()
ter = Terminal(inst, None)
self.assertEqual(inst, ter.symbol(), 'Terminal should return same instance')
|
<commit_before><commit_msg>Add creation tests for Terminal<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase
from grammpy import Terminal
class TempClass:
pass
class TerminalCreationTest(TestCase):
def test_createWithSymbol(self):
ter = Terminal('a', None)
self.assertEqual('a', ter.symbol(), 'Terminal should return same symbol')
def test_createWithNumber(self):
ter = Terminal(5, None)
self.assertEqual(5, ter.symbol(), 'Terminal should return same number')
def test_createWithClass(self):
ter = Terminal(TempClass, None)
self.assertEqual(TempClass, ter.symbol(), 'Terminal should return same class')
def test_createWithInstance(self):
inst = TempClass()
ter = Terminal(inst, None)
self.assertEqual(inst, ter.symbol(), 'Terminal should return same instance')
|
Add creation tests for Terminal#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase
from grammpy import Terminal
class TempClass:
pass
class TerminalCreationTest(TestCase):
def test_createWithSymbol(self):
ter = Terminal('a', None)
self.assertEqual('a', ter.symbol(), 'Terminal should return same symbol')
def test_createWithNumber(self):
ter = Terminal(5, None)
self.assertEqual(5, ter.symbol(), 'Terminal should return same number')
def test_createWithClass(self):
ter = Terminal(TempClass, None)
self.assertEqual(TempClass, ter.symbol(), 'Terminal should return same class')
def test_createWithInstance(self):
inst = TempClass()
ter = Terminal(inst, None)
self.assertEqual(inst, ter.symbol(), 'Terminal should return same instance')
|
<commit_before><commit_msg>Add creation tests for Terminal<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase
from grammpy import Terminal
class TempClass:
pass
class TerminalCreationTest(TestCase):
def test_createWithSymbol(self):
ter = Terminal('a', None)
self.assertEqual('a', ter.symbol(), 'Terminal should return same symbol')
def test_createWithNumber(self):
ter = Terminal(5, None)
self.assertEqual(5, ter.symbol(), 'Terminal should return same number')
def test_createWithClass(self):
ter = Terminal(TempClass, None)
self.assertEqual(TempClass, ter.symbol(), 'Terminal should return same class')
def test_createWithInstance(self):
inst = TempClass()
ter = Terminal(inst, None)
self.assertEqual(inst, ter.symbol(), 'Terminal should return same instance')
|
|
28e9129a71cac0ab60071d6e2a6bd258312703a8
|
example_script3.py
|
example_script3.py
|
"""
Usage: python -m recipy example_script3.py OUTPUT.npy
"""
from __future__ import print_function
import sys
import numpy
if len(sys.argv) < 2:
print(__doc__, file=sys.stderr)
sys.exit(1)
arr = numpy.arange(10)
arr = arr + 500
# We've made a fairly big change here!
numpy.save(sys.argv[1], arr)
|
Add example script for python -m recipy usage
|
Add example script for python -m recipy usage
|
Python
|
apache-2.0
|
github4ry/recipy,musically-ut/recipy,github4ry/recipy,MBARIMike/recipy,MichielCottaar/recipy,MBARIMike/recipy,recipy/recipy,recipy/recipy,MichielCottaar/recipy,musically-ut/recipy
|
Add example script for python -m recipy usage
|
"""
Usage: python -m recipy example_script3.py OUTPUT.npy
"""
from __future__ import print_function
import sys
import numpy
if len(sys.argv) < 2:
print(__doc__, file=sys.stderr)
sys.exit(1)
arr = numpy.arange(10)
arr = arr + 500
# We've made a fairly big change here!
numpy.save(sys.argv[1], arr)
|
<commit_before><commit_msg>Add example script for python -m recipy usage<commit_after>
|
"""
Usage: python -m recipy example_script3.py OUTPUT.npy
"""
from __future__ import print_function
import sys
import numpy
if len(sys.argv) < 2:
print(__doc__, file=sys.stderr)
sys.exit(1)
arr = numpy.arange(10)
arr = arr + 500
# We've made a fairly big change here!
numpy.save(sys.argv[1], arr)
|
Add example script for python -m recipy usage"""
Usage: python -m recipy example_script3.py OUTPUT.npy
"""
from __future__ import print_function
import sys
import numpy
if len(sys.argv) < 2:
print(__doc__, file=sys.stderr)
sys.exit(1)
arr = numpy.arange(10)
arr = arr + 500
# We've made a fairly big change here!
numpy.save(sys.argv[1], arr)
|
<commit_before><commit_msg>Add example script for python -m recipy usage<commit_after>"""
Usage: python -m recipy example_script3.py OUTPUT.npy
"""
from __future__ import print_function
import sys
import numpy
if len(sys.argv) < 2:
print(__doc__, file=sys.stderr)
sys.exit(1)
arr = numpy.arange(10)
arr = arr + 500
# We've made a fairly big change here!
numpy.save(sys.argv[1], arr)
|
|
46c241e7cfae717d5dfd5352846c2568079280a4
|
b_spline.py
|
b_spline.py
|
''' Module provides functions for points interpolation using b-splines of 2nd order '''
import numpy as np
from matplotlib import pyplot as plt
def b_spline_2(x):
n = len(x)
x = np.hstack([np.linspace(0, n-1, n)[np.newaxis].T, x])
M0 = np.array([[2, -4, 2], [0, 4, -3], [0, 0, 1]])
M1 = np.array([[1, -2, 1], [1, 2, -2], [0, 0, 1]])
M2 = np.array([[1, -2, 1], [1, 2, -3], [0, 0, 2]])
lst = [0.5*x.T[:,0:3].dot(M0)]
for i in range(1,n-3):
lst.append(0.5*x.T[:,i:i+3].dot(M1))
lst.append(0.5 * x.T[:,-3:].dot(M2))
param = [[(lst[i][1:])[:, 1:]] for i in range(len(lst))]
param = np.reshape(param, [len(lst), 12])
np.save('param_spline_2', param)
return lst
def interpolate(points, graph=False):
res = 100 #step of x axis
SP = b_spline_2(points) #type: np.ndarray
if graph:
t = np.arange(0, points.shape[0])
t2 = np.array([np.linspace(0, 1, res)**0, np.linspace(0, 1, res), np.linspace(0, 1, res)**2])
x = np.array([np.array(SP[0]).dot(t2)][0])
for K in range(1, len(SP)):
f = np.array(SP[K]).dot(t2)
x = np.hstack([x, np.array(SP[K]).dot(t2)])
#plotting against vector x[0] instead of t
plt.plot(t, points.T[0], t, points.T[1], t, points.T[2], t, points.T[3], t, points.T[4], t, points.T[5],
x[0], x[6], x[0], x[1], x[0], x[2], x[0], x[3], x[0], x[4], x[0], x[5] )
plt.show()
|
Create module for points interpolation using B-spline of 2nd degree.
|
Create module for points interpolation using B-spline of 2nd degree.
|
Python
|
mit
|
petroolg/robo-spline
|
Create module for points interpolation using B-spline of 2nd degree.
|
''' Module provides functions for points interpolation using b-splines of 2nd order '''
import numpy as np
from matplotlib import pyplot as plt
def b_spline_2(x):
n = len(x)
x = np.hstack([np.linspace(0, n-1, n)[np.newaxis].T, x])
M0 = np.array([[2, -4, 2], [0, 4, -3], [0, 0, 1]])
M1 = np.array([[1, -2, 1], [1, 2, -2], [0, 0, 1]])
M2 = np.array([[1, -2, 1], [1, 2, -3], [0, 0, 2]])
lst = [0.5*x.T[:,0:3].dot(M0)]
for i in range(1,n-3):
lst.append(0.5*x.T[:,i:i+3].dot(M1))
lst.append(0.5 * x.T[:,-3:].dot(M2))
param = [[(lst[i][1:])[:, 1:]] for i in range(len(lst))]
param = np.reshape(param, [len(lst), 12])
np.save('param_spline_2', param)
return lst
def interpolate(points, graph=False):
res = 100 #step of x axis
SP = b_spline_2(points) #type: np.ndarray
if graph:
t = np.arange(0, points.shape[0])
t2 = np.array([np.linspace(0, 1, res)**0, np.linspace(0, 1, res), np.linspace(0, 1, res)**2])
x = np.array([np.array(SP[0]).dot(t2)][0])
for K in range(1, len(SP)):
f = np.array(SP[K]).dot(t2)
x = np.hstack([x, np.array(SP[K]).dot(t2)])
#plotting against vector x[0] instead of t
plt.plot(t, points.T[0], t, points.T[1], t, points.T[2], t, points.T[3], t, points.T[4], t, points.T[5],
x[0], x[6], x[0], x[1], x[0], x[2], x[0], x[3], x[0], x[4], x[0], x[5] )
plt.show()
|
<commit_before><commit_msg>Create module for points interpolation using B-spline of 2nd degree.<commit_after>
|
''' Module provides functions for points interpolation using b-splines of 2nd order '''
import numpy as np
from matplotlib import pyplot as plt
def b_spline_2(x):
n = len(x)
x = np.hstack([np.linspace(0, n-1, n)[np.newaxis].T, x])
M0 = np.array([[2, -4, 2], [0, 4, -3], [0, 0, 1]])
M1 = np.array([[1, -2, 1], [1, 2, -2], [0, 0, 1]])
M2 = np.array([[1, -2, 1], [1, 2, -3], [0, 0, 2]])
lst = [0.5*x.T[:,0:3].dot(M0)]
for i in range(1,n-3):
lst.append(0.5*x.T[:,i:i+3].dot(M1))
lst.append(0.5 * x.T[:,-3:].dot(M2))
param = [[(lst[i][1:])[:, 1:]] for i in range(len(lst))]
param = np.reshape(param, [len(lst), 12])
np.save('param_spline_2', param)
return lst
def interpolate(points, graph=False):
res = 100 #step of x axis
SP = b_spline_2(points) #type: np.ndarray
if graph:
t = np.arange(0, points.shape[0])
t2 = np.array([np.linspace(0, 1, res)**0, np.linspace(0, 1, res), np.linspace(0, 1, res)**2])
x = np.array([np.array(SP[0]).dot(t2)][0])
for K in range(1, len(SP)):
f = np.array(SP[K]).dot(t2)
x = np.hstack([x, np.array(SP[K]).dot(t2)])
#plotting against vector x[0] instead of t
plt.plot(t, points.T[0], t, points.T[1], t, points.T[2], t, points.T[3], t, points.T[4], t, points.T[5],
x[0], x[6], x[0], x[1], x[0], x[2], x[0], x[3], x[0], x[4], x[0], x[5] )
plt.show()
|
Create module for points interpolation using B-spline of 2nd degree.''' Module provides functions for points interpolation using b-splines of 2nd order '''
import numpy as np
from matplotlib import pyplot as plt
def b_spline_2(x):
n = len(x)
x = np.hstack([np.linspace(0, n-1, n)[np.newaxis].T, x])
M0 = np.array([[2, -4, 2], [0, 4, -3], [0, 0, 1]])
M1 = np.array([[1, -2, 1], [1, 2, -2], [0, 0, 1]])
M2 = np.array([[1, -2, 1], [1, 2, -3], [0, 0, 2]])
lst = [0.5*x.T[:,0:3].dot(M0)]
for i in range(1,n-3):
lst.append(0.5*x.T[:,i:i+3].dot(M1))
lst.append(0.5 * x.T[:,-3:].dot(M2))
param = [[(lst[i][1:])[:, 1:]] for i in range(len(lst))]
param = np.reshape(param, [len(lst), 12])
np.save('param_spline_2', param)
return lst
def interpolate(points, graph=False):
res = 100 #step of x axis
SP = b_spline_2(points) #type: np.ndarray
if graph:
t = np.arange(0, points.shape[0])
t2 = np.array([np.linspace(0, 1, res)**0, np.linspace(0, 1, res), np.linspace(0, 1, res)**2])
x = np.array([np.array(SP[0]).dot(t2)][0])
for K in range(1, len(SP)):
f = np.array(SP[K]).dot(t2)
x = np.hstack([x, np.array(SP[K]).dot(t2)])
#plotting against vector x[0] instead of t
plt.plot(t, points.T[0], t, points.T[1], t, points.T[2], t, points.T[3], t, points.T[4], t, points.T[5],
x[0], x[6], x[0], x[1], x[0], x[2], x[0], x[3], x[0], x[4], x[0], x[5] )
plt.show()
|
<commit_before><commit_msg>Create module for points interpolation using B-spline of 2nd degree.<commit_after>''' Module provides functions for points interpolation using b-splines of 2nd order '''
import numpy as np
from matplotlib import pyplot as plt
def b_spline_2(x):
n = len(x)
x = np.hstack([np.linspace(0, n-1, n)[np.newaxis].T, x])
M0 = np.array([[2, -4, 2], [0, 4, -3], [0, 0, 1]])
M1 = np.array([[1, -2, 1], [1, 2, -2], [0, 0, 1]])
M2 = np.array([[1, -2, 1], [1, 2, -3], [0, 0, 2]])
lst = [0.5*x.T[:,0:3].dot(M0)]
for i in range(1,n-3):
lst.append(0.5*x.T[:,i:i+3].dot(M1))
lst.append(0.5 * x.T[:,-3:].dot(M2))
param = [[(lst[i][1:])[:, 1:]] for i in range(len(lst))]
param = np.reshape(param, [len(lst), 12])
np.save('param_spline_2', param)
return lst
def interpolate(points, graph=False):
res = 100 #step of x axis
SP = b_spline_2(points) #type: np.ndarray
if graph:
t = np.arange(0, points.shape[0])
t2 = np.array([np.linspace(0, 1, res)**0, np.linspace(0, 1, res), np.linspace(0, 1, res)**2])
x = np.array([np.array(SP[0]).dot(t2)][0])
for K in range(1, len(SP)):
f = np.array(SP[K]).dot(t2)
x = np.hstack([x, np.array(SP[K]).dot(t2)])
#plotting against vector x[0] instead of t
plt.plot(t, points.T[0], t, points.T[1], t, points.T[2], t, points.T[3], t, points.T[4], t, points.T[5],
x[0], x[6], x[0], x[1], x[0], x[2], x[0], x[3], x[0], x[4], x[0], x[5] )
plt.show()
|
|
62bef762a3d999e1d9f3b551320e22619d83b7ca
|
src/etc/latest-unix-snaps.py
|
src/etc/latest-unix-snaps.py
|
#!/usr/bin/env python
import os, tarfile, hashlib, re, shutil, sys
from snapshot import *
f = open(snapshotfile)
date = None
rev = None
platform = None
snap = None
i = 0
newestSet = {}
for line in f.readlines():
i += 1
parsed = parse_line(i, line)
if (not parsed): continue
if parsed["type"] == "snapshot":
if (len(newestSet) == 0 or parsed["date"] > newestSet["date"]):
newestSet["date"] = parsed["date"]
newestSet["rev"] = parsed["rev"]
newestSet["files"] = []
addingMode = True
else:
addingMode = False
elif addingMode == True and parsed["type"] == "file":
tux = re.compile("linux", re.IGNORECASE)
if (tux.match(parsed["platform"]) != None):
ff = {}
ff["platform"] = parsed["platform"]
ff["hash"] = parsed["hash"]
newestSet["files"] += [ff]
def download_new_file (date, rev, platform, hsh):
snap = full_snapshot_name(date, rev, platform, hsh)
dl = os.path.join(download_dir_base, snap)
url = download_url_base + "/" + snap
if (not os.path.exists(dl)):
print("downloading " + url)
get_url_to_file(url, dl)
if (snap_filename_hash_part(snap) == hash_file(dl)):
print("got download with ok hash")
else:
raise Exception("bad hash on download")
for ff in newestSet["files"]:
download_new_file (newestSet["date"], newestSet["rev"], ff["platform"], ff["hash"])
|
Add a Python script which downloads only the latest Linux snapshots (derived from other scripts here)
|
Add a Python script which downloads only the latest Linux snapshots (derived from other scripts here)
|
Python
|
apache-2.0
|
pshc/rust,TheNeikos/rust,AerialX/rust,jroesch/rust,reem/rust,rprichard/rust,mahkoh/rust,LeoTestard/rust,jroesch/rust,philyoon/rust,P1start/rust,P1start/rust,pythonesque/rust,zachwick/rust,ebfull/rust,mvdnes/rust,pshc/rust,aepsil0n/rust,robertg/rust,carols10cents/rust,emk/rust,sarojaba/rust-doc-korean,rohitjoshi/rust,servo/rust,cllns/rust,rohitjoshi/rust,rprichard/rust,carols10cents/rust,cllns/rust,zaeleus/rust,philyoon/rust,seanrivera/rust,aidancully/rust,mitsuhiko/rust,seanrivera/rust,mihneadb/rust,jroesch/rust,aepsil0n/rust,aepsil0n/rust,kimroen/rust,reem/rust,aturon/rust,servo/rust,carols10cents/rust,sarojaba/rust-doc-korean,nham/rust,andars/rust,rprichard/rust,dinfuehr/rust,0x73/rust,GrahamDennis/rand,pythonesque/rust,l0kod/rust,kmcallister/rust,l0kod/rust,vhbit/rust,j16r/rust,XMPPwocky/rust,AerialX/rust,stepancheg/rust-ide-rust,TheNeikos/rust,sae-bom/rust,P1start/rust,defuz/rust,pythonesque/rust,seanrivera/rust,SiegeLord/rust,miniupnp/rust,pythonesque/rust,ebfull/rust,krzysz00/rust,fabricedesre/rust,GBGamer/rust,pczarn/rust,ruud-v-a/rust,erickt/rust,ejjeong/rust,aidancully/rust,LeoTestard/rust,defuz/rust,jroesch/rust,jashank/rust,nwin/rust,jashank/rust,mvdnes/rust,mdinger/rust,gifnksm/rust,pshc/rust,pshc/rust,defuz/rust,zubron/rust,andars/rust,waynenilsen/rand,stepancheg/rust-ide-rust,mihneadb/rust,sae-bom/rust,victorvde/rust,pshc/rust,P1start/rust,fabricedesre/rust,richo/rust,zubron/rust,retep998/rand,fabricedesre/rust,quornian/rust,ruud-v-a/rust,philyoon/rust,TheNeikos/rust,bombless/rust,zubron/rust,michaelballantyne/rust-gpu,omasanori/rust,servo/rust,l0kod/rust,jbclements/rust,ebfull/rust,jbclements/rust,zubron/rust,kmcallister/rust,erickt/rust,andars/rust,quornian/rust,defuz/rust,servo/rust,kmcallister/rust,krzysz00/rust,0x73/rust,0x73/rust,miniupnp/rust,dinfuehr/rust,richo/rust,jbclements/rust,vhbit/rust,gifnksm/rust,kimroen/rust,shepmaster/rand,miniupnp/rust,stepancheg/rust-ide-rust,erickt/rust,pshc/rust,j16r/rust,mvdnes/rust,ruud-v-a/rust,untitaker/rust,jashank/rust,emk/rust,mdinger/rust,emk/rust,emk/rust,AerialX/rust-rt-minimal,stepancheg/rust-ide-rust,l0kod/rust,sae-bom/rust,kmcallister/rust,seanrivera/rust,KokaKiwi/rust,victorvde/rust,aneeshusa/rust,Ryman/rust,emk/rust,avdi/rust,jroesch/rust,richo/rust,kimroen/rust,l0kod/rust,dwillmer/rust,fabricedesre/rust,pelmers/rust,krzysz00/rust,victorvde/rust,l0kod/rust,TheNeikos/rust,KokaKiwi/rust,nham/rust,gifnksm/rust,bombless/rust,hauleth/rust,servo/rust,aturon/rust,fabricedesre/rust,quornian/rust,dwillmer/rust,KokaKiwi/rust,aepsil0n/rust,zaeleus/rust,ruud-v-a/rust,aidancully/rust,Ryman/rust,ebfull/rust,kwantam/rust,mihneadb/rust,AerialX/rust-rt-minimal,hauleth/rust,barosl/rust,pshc/rust,emk/rust,XMPPwocky/rust,nham/rust,pelmers/rust,vhbit/rust,pczarn/rust,kwantam/rust,carols10cents/rust,pczarn/rust,nham/rust,jbclements/rust,michaelballantyne/rust-gpu,Ryman/rust,graydon/rust,richo/rust,dwillmer/rust,michaelballantyne/rust-gpu,omasanori/rust,aidancully/rust,bombless/rust,erickt/rust,graydon/rust,mahkoh/rust,TheNeikos/rust,aneeshusa/rust,j16r/rust,philyoon/rust,pshc/rust,vhbit/rust,bhickey/rand,mdinger/rust,rprichard/rust,zaeleus/rust,rprichard/rust,dwillmer/rust,aturon/rust,sarojaba/rust-doc-korean,zachwick/rust,ebfull/rand,GBGamer/rust,graydon/rust,pythonesque/rust,hauleth/rust,mahkoh/rust,ebfull/rust,graydon/rust,sarojaba/rust-doc-korean,ktossell/rust,omasanori/rust,XMPPwocky/rust,XMPPwocky/rust,ktossell/rust,nwin/rust,stepancheg/rust-ide-rust,seanrivera/rust,jbclements/rust,GBGamer/rust,0x73/rust,victorvde/rust,zachwick/rust,robertg/rust,servo/rust,graydon/rust,sae-bom/rust,ejjeong/rust,aidancully/rust,aneeshusa/rust,kimroen/rust,jroesch/rust,avdi/rust,zaeleus/rust,pelmers/rust,mdinger/rust,gifnksm/rust,victorvde/rust,aturon/rust,aturon/rust,ebfull/rust,miniupnp/rust,nham/rust,fabricedesre/rust,l0kod/rust,quornian/rust,kimroen/rust,nwin/rust,SiegeLord/rust,pczarn/rust,barosl/rust,kwantam/rust,mvdnes/rust,michaelballantyne/rust-gpu,mahkoh/rust,bombless/rust,mitsuhiko/rust,erickt/rust,barosl/rust,untitaker/rust,aepsil0n/rust,nwin/rust,krzysz00/rust,bombless/rust-docs-chinese,barosl/rust,untitaker/rust,andars/rust,pythonesque/rust,0x73/rust,nham/rust,quornian/rust,jbclements/rust,pczarn/rust,ejjeong/rust,reem/rust,KokaKiwi/rust,mvdnes/rust,vhbit/rust,jashank/rust,mihneadb/rust,ktossell/rust,LeoTestard/rust,mihneadb/rust,AerialX/rust-rt-minimal,zachwick/rust,ejjeong/rust,barosl/rust,AerialX/rust,mdinger/rust,krzysz00/rust,0x73/rust,KokaKiwi/rust,avdi/rust,jashank/rust,rprichard/rust,reem/rust,nwin/rust,seanrivera/rust,Ryman/rust,sae-bom/rust,cllns/rust,mitsuhiko/rust,mdinger/rust,robertg/rust,victorvde/rust,omasanori/rust,XMPPwocky/rust,rohitjoshi/rust,untitaker/rust,richo/rust,ktossell/rust,jbclements/rust,GBGamer/rust,SiegeLord/rust,LeoTestard/rust,pelmers/rust,vhbit/rust,krzysz00/rust,aidancully/rust,LeoTestard/rust,Ryman/rust,ruud-v-a/rust,dwillmer/rust,nham/rust,michaelballantyne/rust-gpu,carols10cents/rust,andars/rust,rohitjoshi/rust,mahkoh/rust,Ryman/rust,kwantam/rust,dinfuehr/rust,miniupnp/rust,kimroen/rust,barosl/rust,TheNeikos/rust,cllns/rust,zubron/rust,AerialX/rust-rt-minimal,avdi/rust,mvdnes/rust,j16r/rust,pczarn/rust,ktossell/rust,pelmers/rust,gifnksm/rust,cllns/rust,aturon/rust,cllns/rust,bluss/rand,mitsuhiko/rust,sarojaba/rust-doc-korean,stepancheg/rust-ide-rust,kwantam/rust,miniupnp/rust,mahkoh/rust,nwin/rust,dwillmer/rust,zaeleus/rust,reem/rust,arthurprs/rand,dinfuehr/rust,AerialX/rust-rt-minimal,kimroen/rust,LeoTestard/rust,robertg/rust,fabricedesre/rust,sarojaba/rust-doc-korean,GBGamer/rust,servo/rust,miniupnp/rust,dwillmer/rust,michaelballantyne/rust-gpu,hauleth/rust,philyoon/rust,rohitjoshi/rust,jashank/rust,kmcallister/rust,untitaker/rust,AerialX/rust-rt-minimal,carols10cents/rust,aturon/rust,reem/rust,jashank/rust,jashank/rust,jbclements/rust,vhbit/rust,dwillmer/rust,l0kod/rust,AerialX/rust,j16r/rust,mitsuhiko/rust,quornian/rust,zubron/rust,aepsil0n/rust,andars/rust,sae-bom/rust,mitsuhiko/rust,zachwick/rust,pczarn/rust,dinfuehr/rust,avdi/rust,nwin/rust,P1start/rust,quornian/rust,rohitjoshi/rust,XMPPwocky/rust,aneeshusa/rust,KokaKiwi/rust,zubron/rust,ejjeong/rust,mihneadb/rust,SiegeLord/rust,kmcallister/rust,richo/rust,kmcallister/rust,AerialX/rust,achanda/rand,erickt/rust,hauleth/rust,AerialX/rust,Ryman/rust,michaelballantyne/rust-gpu,omasanori/rust,zachwick/rust,P1start/rust,graydon/rust,untitaker/rust,LeoTestard/rust,gifnksm/rust,GBGamer/rust,0x73/rust,barosl/rust,avdi/rust,ktossell/rust,nwin/rust,bombless/rust,bombless/rust,zubron/rust,miniupnp/rust,SiegeLord/rust,dinfuehr/rust,jbclements/rust,philyoon/rust,ruud-v-a/rust,pythonesque/rust,defuz/rust,ktossell/rust,vhbit/rust,zaeleus/rust,GBGamer/rust,emk/rust,pelmers/rust,jroesch/rust,jroesch/rust,P1start/rust,SiegeLord/rust,sarojaba/rust-doc-korean,SiegeLord/rust,omasanori/rust,stepancheg/rust-ide-rust,kwantam/rust,j16r/rust,mitsuhiko/rust,ejjeong/rust,robertg/rust,j16r/rust,robertg/rust,defuz/rust,GBGamer/rust,huonw/rand,aneeshusa/rust,aneeshusa/rust,erickt/rust,hauleth/rust
|
Add a Python script which downloads only the latest Linux snapshots (derived from other scripts here)
|
#!/usr/bin/env python
import os, tarfile, hashlib, re, shutil, sys
from snapshot import *
f = open(snapshotfile)
date = None
rev = None
platform = None
snap = None
i = 0
newestSet = {}
for line in f.readlines():
i += 1
parsed = parse_line(i, line)
if (not parsed): continue
if parsed["type"] == "snapshot":
if (len(newestSet) == 0 or parsed["date"] > newestSet["date"]):
newestSet["date"] = parsed["date"]
newestSet["rev"] = parsed["rev"]
newestSet["files"] = []
addingMode = True
else:
addingMode = False
elif addingMode == True and parsed["type"] == "file":
tux = re.compile("linux", re.IGNORECASE)
if (tux.match(parsed["platform"]) != None):
ff = {}
ff["platform"] = parsed["platform"]
ff["hash"] = parsed["hash"]
newestSet["files"] += [ff]
def download_new_file (date, rev, platform, hsh):
snap = full_snapshot_name(date, rev, platform, hsh)
dl = os.path.join(download_dir_base, snap)
url = download_url_base + "/" + snap
if (not os.path.exists(dl)):
print("downloading " + url)
get_url_to_file(url, dl)
if (snap_filename_hash_part(snap) == hash_file(dl)):
print("got download with ok hash")
else:
raise Exception("bad hash on download")
for ff in newestSet["files"]:
download_new_file (newestSet["date"], newestSet["rev"], ff["platform"], ff["hash"])
|
<commit_before><commit_msg>Add a Python script which downloads only the latest Linux snapshots (derived from other scripts here)<commit_after>
|
#!/usr/bin/env python
import os, tarfile, hashlib, re, shutil, sys
from snapshot import *
f = open(snapshotfile)
date = None
rev = None
platform = None
snap = None
i = 0
newestSet = {}
for line in f.readlines():
i += 1
parsed = parse_line(i, line)
if (not parsed): continue
if parsed["type"] == "snapshot":
if (len(newestSet) == 0 or parsed["date"] > newestSet["date"]):
newestSet["date"] = parsed["date"]
newestSet["rev"] = parsed["rev"]
newestSet["files"] = []
addingMode = True
else:
addingMode = False
elif addingMode == True and parsed["type"] == "file":
tux = re.compile("linux", re.IGNORECASE)
if (tux.match(parsed["platform"]) != None):
ff = {}
ff["platform"] = parsed["platform"]
ff["hash"] = parsed["hash"]
newestSet["files"] += [ff]
def download_new_file (date, rev, platform, hsh):
snap = full_snapshot_name(date, rev, platform, hsh)
dl = os.path.join(download_dir_base, snap)
url = download_url_base + "/" + snap
if (not os.path.exists(dl)):
print("downloading " + url)
get_url_to_file(url, dl)
if (snap_filename_hash_part(snap) == hash_file(dl)):
print("got download with ok hash")
else:
raise Exception("bad hash on download")
for ff in newestSet["files"]:
download_new_file (newestSet["date"], newestSet["rev"], ff["platform"], ff["hash"])
|
Add a Python script which downloads only the latest Linux snapshots (derived from other scripts here)#!/usr/bin/env python
import os, tarfile, hashlib, re, shutil, sys
from snapshot import *
f = open(snapshotfile)
date = None
rev = None
platform = None
snap = None
i = 0
newestSet = {}
for line in f.readlines():
i += 1
parsed = parse_line(i, line)
if (not parsed): continue
if parsed["type"] == "snapshot":
if (len(newestSet) == 0 or parsed["date"] > newestSet["date"]):
newestSet["date"] = parsed["date"]
newestSet["rev"] = parsed["rev"]
newestSet["files"] = []
addingMode = True
else:
addingMode = False
elif addingMode == True and parsed["type"] == "file":
tux = re.compile("linux", re.IGNORECASE)
if (tux.match(parsed["platform"]) != None):
ff = {}
ff["platform"] = parsed["platform"]
ff["hash"] = parsed["hash"]
newestSet["files"] += [ff]
def download_new_file (date, rev, platform, hsh):
snap = full_snapshot_name(date, rev, platform, hsh)
dl = os.path.join(download_dir_base, snap)
url = download_url_base + "/" + snap
if (not os.path.exists(dl)):
print("downloading " + url)
get_url_to_file(url, dl)
if (snap_filename_hash_part(snap) == hash_file(dl)):
print("got download with ok hash")
else:
raise Exception("bad hash on download")
for ff in newestSet["files"]:
download_new_file (newestSet["date"], newestSet["rev"], ff["platform"], ff["hash"])
|
<commit_before><commit_msg>Add a Python script which downloads only the latest Linux snapshots (derived from other scripts here)<commit_after>#!/usr/bin/env python
import os, tarfile, hashlib, re, shutil, sys
from snapshot import *
f = open(snapshotfile)
date = None
rev = None
platform = None
snap = None
i = 0
newestSet = {}
for line in f.readlines():
i += 1
parsed = parse_line(i, line)
if (not parsed): continue
if parsed["type"] == "snapshot":
if (len(newestSet) == 0 or parsed["date"] > newestSet["date"]):
newestSet["date"] = parsed["date"]
newestSet["rev"] = parsed["rev"]
newestSet["files"] = []
addingMode = True
else:
addingMode = False
elif addingMode == True and parsed["type"] == "file":
tux = re.compile("linux", re.IGNORECASE)
if (tux.match(parsed["platform"]) != None):
ff = {}
ff["platform"] = parsed["platform"]
ff["hash"] = parsed["hash"]
newestSet["files"] += [ff]
def download_new_file (date, rev, platform, hsh):
snap = full_snapshot_name(date, rev, platform, hsh)
dl = os.path.join(download_dir_base, snap)
url = download_url_base + "/" + snap
if (not os.path.exists(dl)):
print("downloading " + url)
get_url_to_file(url, dl)
if (snap_filename_hash_part(snap) == hash_file(dl)):
print("got download with ok hash")
else:
raise Exception("bad hash on download")
for ff in newestSet["files"]:
download_new_file (newestSet["date"], newestSet["rev"], ff["platform"], ff["hash"])
|
|
e46f171f87d756bcd2f25dbc5d5a56422e1bcbd8
|
kufpybio/gorestapi.py
|
kufpybio/gorestapi.py
|
# https://www.ebi.ac.uk/QuickGO/WebServices.html
import os
import csv
import restapi
class GORESTAPI(restapi.RESTAPI):
def __init__(self, download_folder="go_files"):
self._download_folder = download_folder
self._base_url = "https://www.ebi.ac.uk/QuickGO/GTerm?"
if not os.path.exists(self._download_folder):
os.makedirs(self._download_folder)
def go_term_information_xml(self, go_id):
"""e.g. GO:0003824"""
data = self._get_data(
"%s/%s.xml", "id=%s&format=oboxml", go_id)
return(data)
|
Add a REST API for GO
|
Add a REST API for GO
|
Python
|
isc
|
konrad/kufpybio
|
Add a REST API for GO
|
# https://www.ebi.ac.uk/QuickGO/WebServices.html
import os
import csv
import restapi
class GORESTAPI(restapi.RESTAPI):
def __init__(self, download_folder="go_files"):
self._download_folder = download_folder
self._base_url = "https://www.ebi.ac.uk/QuickGO/GTerm?"
if not os.path.exists(self._download_folder):
os.makedirs(self._download_folder)
def go_term_information_xml(self, go_id):
"""e.g. GO:0003824"""
data = self._get_data(
"%s/%s.xml", "id=%s&format=oboxml", go_id)
return(data)
|
<commit_before><commit_msg>Add a REST API for GO<commit_after>
|
# https://www.ebi.ac.uk/QuickGO/WebServices.html
import os
import csv
import restapi
class GORESTAPI(restapi.RESTAPI):
def __init__(self, download_folder="go_files"):
self._download_folder = download_folder
self._base_url = "https://www.ebi.ac.uk/QuickGO/GTerm?"
if not os.path.exists(self._download_folder):
os.makedirs(self._download_folder)
def go_term_information_xml(self, go_id):
"""e.g. GO:0003824"""
data = self._get_data(
"%s/%s.xml", "id=%s&format=oboxml", go_id)
return(data)
|
Add a REST API for GO# https://www.ebi.ac.uk/QuickGO/WebServices.html
import os
import csv
import restapi
class GORESTAPI(restapi.RESTAPI):
def __init__(self, download_folder="go_files"):
self._download_folder = download_folder
self._base_url = "https://www.ebi.ac.uk/QuickGO/GTerm?"
if not os.path.exists(self._download_folder):
os.makedirs(self._download_folder)
def go_term_information_xml(self, go_id):
"""e.g. GO:0003824"""
data = self._get_data(
"%s/%s.xml", "id=%s&format=oboxml", go_id)
return(data)
|
<commit_before><commit_msg>Add a REST API for GO<commit_after># https://www.ebi.ac.uk/QuickGO/WebServices.html
import os
import csv
import restapi
class GORESTAPI(restapi.RESTAPI):
def __init__(self, download_folder="go_files"):
self._download_folder = download_folder
self._base_url = "https://www.ebi.ac.uk/QuickGO/GTerm?"
if not os.path.exists(self._download_folder):
os.makedirs(self._download_folder)
def go_term_information_xml(self, go_id):
"""e.g. GO:0003824"""
data = self._get_data(
"%s/%s.xml", "id=%s&format=oboxml", go_id)
return(data)
|
|
d59103daa62897996b3585c2a826b092caf95a76
|
non_deterministic.py
|
non_deterministic.py
|
# Non-Deterministic Turing Machine Simulator
class Queue():
def __init__(self):
self.queue = []
def enqueue(self, state, head, string, iter_count):
self.queue.append((state, head, string, iter_count))
def dequeue(self):
item = self.queue[0]
self.queue = self.queue[1:]
return item
def is_empty(self):
return len(self.queue) == 0
class TuringMachine():
def __init__(self, transitions, accepted_states, max_iterations):
self.transitions = transitions
self.accepted_states = accepted_states
self.max_iterations = max_iterations
def validate_string(self, string):
head = 0
state = self.transitions[0][0]
iter_count = 1
self.queue = Queue()
self.queue.enqueue(head, state, string, iter_count)
outputs = self.validate_symbol()
return self.output(outputs)
def validate_symbol(self):
if self.queue.is_empty():
return [0]
(state, head, string, iter_count) = self.queue.dequeue()
outputs = []
symbol = string[head]
for (current_state, current_symbol, next_symbol, move, next_state) in self.transitions:
if state == current_state and (symbol == current_symbol or current_symbol == 'x'):
if next_state in accepted_states and head == len(string) - 1:
return [1]
if iter_count > max_iterations:
outputs = outputs + ['u']
else:
head_copy, string_copy = head, string
if next_symbol != 'x':
string_copy[head] = next_symbol
(head_copy, string_copy) = self.update_values(head_copy, string_copy, move)
self.queue.enqueue(next_state, head_copy, string_copy, iter_count + 1)
outputs = outputs + self.validate_symbol()
return outputs
def update_values(self, head, string, move):
if move == 'r':
head += 1
if head == len(string):
string = string + ['_']
elif move == 'l':
head -= 1
if head == 0:
string = ['_'] + string
return (head, string)
def output(self, outputs):
if 1 in outputs:
return 'Accepted'
if 0 in outputs:
return 'Rejected'
return 'Undefined'
# Example: Automata that accepts strings that ends with 'b'
transitions = [(0, 'x', 'x', 'r', 0), (0, 'b', 'b', 's', 1)]
accepted_states = [1]
max_iterations = 50
input_strings = ['aa', 'ab', 'aaab', 'ababab', 'bba']
turing_machine = TuringMachine(transitions, accepted_states, max_iterations)
for string in input_strings:
output = turing_machine.validate_string(list(string))
print(string, output)
|
Implement non-deterministic turing machine in Python
|
Implement non-deterministic turing machine in Python
|
Python
|
mit
|
yedhukrishnan/turing-machine,yedhukrishnan/turing-machine
|
Implement non-deterministic turing machine in Python
|
# Non-Deterministic Turing Machine Simulator
class Queue():
def __init__(self):
self.queue = []
def enqueue(self, state, head, string, iter_count):
self.queue.append((state, head, string, iter_count))
def dequeue(self):
item = self.queue[0]
self.queue = self.queue[1:]
return item
def is_empty(self):
return len(self.queue) == 0
class TuringMachine():
def __init__(self, transitions, accepted_states, max_iterations):
self.transitions = transitions
self.accepted_states = accepted_states
self.max_iterations = max_iterations
def validate_string(self, string):
head = 0
state = self.transitions[0][0]
iter_count = 1
self.queue = Queue()
self.queue.enqueue(head, state, string, iter_count)
outputs = self.validate_symbol()
return self.output(outputs)
def validate_symbol(self):
if self.queue.is_empty():
return [0]
(state, head, string, iter_count) = self.queue.dequeue()
outputs = []
symbol = string[head]
for (current_state, current_symbol, next_symbol, move, next_state) in self.transitions:
if state == current_state and (symbol == current_symbol or current_symbol == 'x'):
if next_state in accepted_states and head == len(string) - 1:
return [1]
if iter_count > max_iterations:
outputs = outputs + ['u']
else:
head_copy, string_copy = head, string
if next_symbol != 'x':
string_copy[head] = next_symbol
(head_copy, string_copy) = self.update_values(head_copy, string_copy, move)
self.queue.enqueue(next_state, head_copy, string_copy, iter_count + 1)
outputs = outputs + self.validate_symbol()
return outputs
def update_values(self, head, string, move):
if move == 'r':
head += 1
if head == len(string):
string = string + ['_']
elif move == 'l':
head -= 1
if head == 0:
string = ['_'] + string
return (head, string)
def output(self, outputs):
if 1 in outputs:
return 'Accepted'
if 0 in outputs:
return 'Rejected'
return 'Undefined'
# Example: Automata that accepts strings that ends with 'b'
transitions = [(0, 'x', 'x', 'r', 0), (0, 'b', 'b', 's', 1)]
accepted_states = [1]
max_iterations = 50
input_strings = ['aa', 'ab', 'aaab', 'ababab', 'bba']
turing_machine = TuringMachine(transitions, accepted_states, max_iterations)
for string in input_strings:
output = turing_machine.validate_string(list(string))
print(string, output)
|
<commit_before><commit_msg>Implement non-deterministic turing machine in Python<commit_after>
|
# Non-Deterministic Turing Machine Simulator
class Queue():
def __init__(self):
self.queue = []
def enqueue(self, state, head, string, iter_count):
self.queue.append((state, head, string, iter_count))
def dequeue(self):
item = self.queue[0]
self.queue = self.queue[1:]
return item
def is_empty(self):
return len(self.queue) == 0
class TuringMachine():
def __init__(self, transitions, accepted_states, max_iterations):
self.transitions = transitions
self.accepted_states = accepted_states
self.max_iterations = max_iterations
def validate_string(self, string):
head = 0
state = self.transitions[0][0]
iter_count = 1
self.queue = Queue()
self.queue.enqueue(head, state, string, iter_count)
outputs = self.validate_symbol()
return self.output(outputs)
def validate_symbol(self):
if self.queue.is_empty():
return [0]
(state, head, string, iter_count) = self.queue.dequeue()
outputs = []
symbol = string[head]
for (current_state, current_symbol, next_symbol, move, next_state) in self.transitions:
if state == current_state and (symbol == current_symbol or current_symbol == 'x'):
if next_state in accepted_states and head == len(string) - 1:
return [1]
if iter_count > max_iterations:
outputs = outputs + ['u']
else:
head_copy, string_copy = head, string
if next_symbol != 'x':
string_copy[head] = next_symbol
(head_copy, string_copy) = self.update_values(head_copy, string_copy, move)
self.queue.enqueue(next_state, head_copy, string_copy, iter_count + 1)
outputs = outputs + self.validate_symbol()
return outputs
def update_values(self, head, string, move):
if move == 'r':
head += 1
if head == len(string):
string = string + ['_']
elif move == 'l':
head -= 1
if head == 0:
string = ['_'] + string
return (head, string)
def output(self, outputs):
if 1 in outputs:
return 'Accepted'
if 0 in outputs:
return 'Rejected'
return 'Undefined'
# Example: Automata that accepts strings that ends with 'b'
transitions = [(0, 'x', 'x', 'r', 0), (0, 'b', 'b', 's', 1)]
accepted_states = [1]
max_iterations = 50
input_strings = ['aa', 'ab', 'aaab', 'ababab', 'bba']
turing_machine = TuringMachine(transitions, accepted_states, max_iterations)
for string in input_strings:
output = turing_machine.validate_string(list(string))
print(string, output)
|
Implement non-deterministic turing machine in Python# Non-Deterministic Turing Machine Simulator
class Queue():
def __init__(self):
self.queue = []
def enqueue(self, state, head, string, iter_count):
self.queue.append((state, head, string, iter_count))
def dequeue(self):
item = self.queue[0]
self.queue = self.queue[1:]
return item
def is_empty(self):
return len(self.queue) == 0
class TuringMachine():
def __init__(self, transitions, accepted_states, max_iterations):
self.transitions = transitions
self.accepted_states = accepted_states
self.max_iterations = max_iterations
def validate_string(self, string):
head = 0
state = self.transitions[0][0]
iter_count = 1
self.queue = Queue()
self.queue.enqueue(head, state, string, iter_count)
outputs = self.validate_symbol()
return self.output(outputs)
def validate_symbol(self):
if self.queue.is_empty():
return [0]
(state, head, string, iter_count) = self.queue.dequeue()
outputs = []
symbol = string[head]
for (current_state, current_symbol, next_symbol, move, next_state) in self.transitions:
if state == current_state and (symbol == current_symbol or current_symbol == 'x'):
if next_state in accepted_states and head == len(string) - 1:
return [1]
if iter_count > max_iterations:
outputs = outputs + ['u']
else:
head_copy, string_copy = head, string
if next_symbol != 'x':
string_copy[head] = next_symbol
(head_copy, string_copy) = self.update_values(head_copy, string_copy, move)
self.queue.enqueue(next_state, head_copy, string_copy, iter_count + 1)
outputs = outputs + self.validate_symbol()
return outputs
def update_values(self, head, string, move):
if move == 'r':
head += 1
if head == len(string):
string = string + ['_']
elif move == 'l':
head -= 1
if head == 0:
string = ['_'] + string
return (head, string)
def output(self, outputs):
if 1 in outputs:
return 'Accepted'
if 0 in outputs:
return 'Rejected'
return 'Undefined'
# Example: Automata that accepts strings that ends with 'b'
transitions = [(0, 'x', 'x', 'r', 0), (0, 'b', 'b', 's', 1)]
accepted_states = [1]
max_iterations = 50
input_strings = ['aa', 'ab', 'aaab', 'ababab', 'bba']
turing_machine = TuringMachine(transitions, accepted_states, max_iterations)
for string in input_strings:
output = turing_machine.validate_string(list(string))
print(string, output)
|
<commit_before><commit_msg>Implement non-deterministic turing machine in Python<commit_after># Non-Deterministic Turing Machine Simulator
class Queue():
def __init__(self):
self.queue = []
def enqueue(self, state, head, string, iter_count):
self.queue.append((state, head, string, iter_count))
def dequeue(self):
item = self.queue[0]
self.queue = self.queue[1:]
return item
def is_empty(self):
return len(self.queue) == 0
class TuringMachine():
def __init__(self, transitions, accepted_states, max_iterations):
self.transitions = transitions
self.accepted_states = accepted_states
self.max_iterations = max_iterations
def validate_string(self, string):
head = 0
state = self.transitions[0][0]
iter_count = 1
self.queue = Queue()
self.queue.enqueue(head, state, string, iter_count)
outputs = self.validate_symbol()
return self.output(outputs)
def validate_symbol(self):
if self.queue.is_empty():
return [0]
(state, head, string, iter_count) = self.queue.dequeue()
outputs = []
symbol = string[head]
for (current_state, current_symbol, next_symbol, move, next_state) in self.transitions:
if state == current_state and (symbol == current_symbol or current_symbol == 'x'):
if next_state in accepted_states and head == len(string) - 1:
return [1]
if iter_count > max_iterations:
outputs = outputs + ['u']
else:
head_copy, string_copy = head, string
if next_symbol != 'x':
string_copy[head] = next_symbol
(head_copy, string_copy) = self.update_values(head_copy, string_copy, move)
self.queue.enqueue(next_state, head_copy, string_copy, iter_count + 1)
outputs = outputs + self.validate_symbol()
return outputs
def update_values(self, head, string, move):
if move == 'r':
head += 1
if head == len(string):
string = string + ['_']
elif move == 'l':
head -= 1
if head == 0:
string = ['_'] + string
return (head, string)
def output(self, outputs):
if 1 in outputs:
return 'Accepted'
if 0 in outputs:
return 'Rejected'
return 'Undefined'
# Example: Automata that accepts strings that ends with 'b'
transitions = [(0, 'x', 'x', 'r', 0), (0, 'b', 'b', 's', 1)]
accepted_states = [1]
max_iterations = 50
input_strings = ['aa', 'ab', 'aaab', 'ababab', 'bba']
turing_machine = TuringMachine(transitions, accepted_states, max_iterations)
for string in input_strings:
output = turing_machine.validate_string(list(string))
print(string, output)
|
|
61be745b641689addc9f009311d28a5775d5a18b
|
ctconfig.py
|
ctconfig.py
|
import logging
import json
from tornado.options import define, options
_CONFIG_FILENAME = "cutthroat.conf"
def define_options():
"""Define defaults for most custom options"""
# Log file and config file paths
options.log_file_prefix = "/var/log/cutthroat/cutthroat.log"
define(
"conf_file_path",
default="/etc/cutthroat/{}".format(_CONFIG_FILENAME),
help="Path for the JSON configuration file with customized options",
type="str"
)
# Port
define(
"port",
default=8888,
help="run on the given port",
type=int
)
# Database options
define(
"sqlite_db",
default="cutthroat.db"
)
# Options for testing
define(
"output_routes",
default=False,
type=bool,
help="If enabled, outputs all application routes to `routes.json`"
)
|
import logging
import json
from tornado.options import define, options
_CONFIG_FILENAME = "cutthroat.conf"
def define_options():
"""Define defaults for most custom options"""
# Log file and config file paths
options.log_file_prefix = "/var/log/cutthroat/cutthroat.log"
define(
"conf_file_path",
default="/etc/cutthroat/{}".format(_CONFIG_FILENAME),
help="Path for the JSON configuration file with customized options",
type="str"
)
# Port
define(
"port",
default=8888,
help="run on the given port",
type=int
)
# Database options
define(
"sqlite_db",
default="cutthroat.db"
)
define(
"output_routes",
default=True,
type=bool,
help="If enabled, outputs all application routes to `routes.json`"
)
|
Set `output_routes` to True by default
|
Set `output_routes` to True by default
|
Python
|
agpl-3.0
|
hfaran/LivesPool,hfaran/LivesPool,hfaran/LivesPool,hfaran/LivesPool
|
import logging
import json
from tornado.options import define, options
_CONFIG_FILENAME = "cutthroat.conf"
def define_options():
"""Define defaults for most custom options"""
# Log file and config file paths
options.log_file_prefix = "/var/log/cutthroat/cutthroat.log"
define(
"conf_file_path",
default="/etc/cutthroat/{}".format(_CONFIG_FILENAME),
help="Path for the JSON configuration file with customized options",
type="str"
)
# Port
define(
"port",
default=8888,
help="run on the given port",
type=int
)
# Database options
define(
"sqlite_db",
default="cutthroat.db"
)
# Options for testing
define(
"output_routes",
default=False,
type=bool,
help="If enabled, outputs all application routes to `routes.json`"
)
Set `output_routes` to True by default
|
import logging
import json
from tornado.options import define, options
_CONFIG_FILENAME = "cutthroat.conf"
def define_options():
"""Define defaults for most custom options"""
# Log file and config file paths
options.log_file_prefix = "/var/log/cutthroat/cutthroat.log"
define(
"conf_file_path",
default="/etc/cutthroat/{}".format(_CONFIG_FILENAME),
help="Path for the JSON configuration file with customized options",
type="str"
)
# Port
define(
"port",
default=8888,
help="run on the given port",
type=int
)
# Database options
define(
"sqlite_db",
default="cutthroat.db"
)
define(
"output_routes",
default=True,
type=bool,
help="If enabled, outputs all application routes to `routes.json`"
)
|
<commit_before>import logging
import json
from tornado.options import define, options
_CONFIG_FILENAME = "cutthroat.conf"
def define_options():
"""Define defaults for most custom options"""
# Log file and config file paths
options.log_file_prefix = "/var/log/cutthroat/cutthroat.log"
define(
"conf_file_path",
default="/etc/cutthroat/{}".format(_CONFIG_FILENAME),
help="Path for the JSON configuration file with customized options",
type="str"
)
# Port
define(
"port",
default=8888,
help="run on the given port",
type=int
)
# Database options
define(
"sqlite_db",
default="cutthroat.db"
)
# Options for testing
define(
"output_routes",
default=False,
type=bool,
help="If enabled, outputs all application routes to `routes.json`"
)
<commit_msg>Set `output_routes` to True by default<commit_after>
|
import logging
import json
from tornado.options import define, options
_CONFIG_FILENAME = "cutthroat.conf"
def define_options():
"""Define defaults for most custom options"""
# Log file and config file paths
options.log_file_prefix = "/var/log/cutthroat/cutthroat.log"
define(
"conf_file_path",
default="/etc/cutthroat/{}".format(_CONFIG_FILENAME),
help="Path for the JSON configuration file with customized options",
type="str"
)
# Port
define(
"port",
default=8888,
help="run on the given port",
type=int
)
# Database options
define(
"sqlite_db",
default="cutthroat.db"
)
define(
"output_routes",
default=True,
type=bool,
help="If enabled, outputs all application routes to `routes.json`"
)
|
import logging
import json
from tornado.options import define, options
_CONFIG_FILENAME = "cutthroat.conf"
def define_options():
"""Define defaults for most custom options"""
# Log file and config file paths
options.log_file_prefix = "/var/log/cutthroat/cutthroat.log"
define(
"conf_file_path",
default="/etc/cutthroat/{}".format(_CONFIG_FILENAME),
help="Path for the JSON configuration file with customized options",
type="str"
)
# Port
define(
"port",
default=8888,
help="run on the given port",
type=int
)
# Database options
define(
"sqlite_db",
default="cutthroat.db"
)
# Options for testing
define(
"output_routes",
default=False,
type=bool,
help="If enabled, outputs all application routes to `routes.json`"
)
Set `output_routes` to True by defaultimport logging
import json
from tornado.options import define, options
_CONFIG_FILENAME = "cutthroat.conf"
def define_options():
"""Define defaults for most custom options"""
# Log file and config file paths
options.log_file_prefix = "/var/log/cutthroat/cutthroat.log"
define(
"conf_file_path",
default="/etc/cutthroat/{}".format(_CONFIG_FILENAME),
help="Path for the JSON configuration file with customized options",
type="str"
)
# Port
define(
"port",
default=8888,
help="run on the given port",
type=int
)
# Database options
define(
"sqlite_db",
default="cutthroat.db"
)
define(
"output_routes",
default=True,
type=bool,
help="If enabled, outputs all application routes to `routes.json`"
)
|
<commit_before>import logging
import json
from tornado.options import define, options
_CONFIG_FILENAME = "cutthroat.conf"
def define_options():
"""Define defaults for most custom options"""
# Log file and config file paths
options.log_file_prefix = "/var/log/cutthroat/cutthroat.log"
define(
"conf_file_path",
default="/etc/cutthroat/{}".format(_CONFIG_FILENAME),
help="Path for the JSON configuration file with customized options",
type="str"
)
# Port
define(
"port",
default=8888,
help="run on the given port",
type=int
)
# Database options
define(
"sqlite_db",
default="cutthroat.db"
)
# Options for testing
define(
"output_routes",
default=False,
type=bool,
help="If enabled, outputs all application routes to `routes.json`"
)
<commit_msg>Set `output_routes` to True by default<commit_after>import logging
import json
from tornado.options import define, options
_CONFIG_FILENAME = "cutthroat.conf"
def define_options():
"""Define defaults for most custom options"""
# Log file and config file paths
options.log_file_prefix = "/var/log/cutthroat/cutthroat.log"
define(
"conf_file_path",
default="/etc/cutthroat/{}".format(_CONFIG_FILENAME),
help="Path for the JSON configuration file with customized options",
type="str"
)
# Port
define(
"port",
default=8888,
help="run on the given port",
type=int
)
# Database options
define(
"sqlite_db",
default="cutthroat.db"
)
define(
"output_routes",
default=True,
type=bool,
help="If enabled, outputs all application routes to `routes.json`"
)
|
8c20a2c570aac422106e866d63a8c1e40cc2a98f
|
components/google-cloud/google_cloud_pipeline_components/container/experimental/__init__.py
|
components/google-cloud/google_cloud_pipeline_components/container/experimental/__init__.py
|
# Copyright 2022 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Pipeline container experimental root."""
|
Add init file to container/experimental directory to recognize as a python module.
|
Add init file to container/experimental directory to recognize as a python module.
PiperOrigin-RevId: 447585056
|
Python
|
apache-2.0
|
kubeflow/pipelines,kubeflow/pipelines,kubeflow/pipelines,kubeflow/pipelines,kubeflow/pipelines,kubeflow/pipelines
|
Add init file to container/experimental directory to recognize as a python module.
PiperOrigin-RevId: 447585056
|
# Copyright 2022 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Pipeline container experimental root."""
|
<commit_before><commit_msg>Add init file to container/experimental directory to recognize as a python module.
PiperOrigin-RevId: 447585056<commit_after>
|
# Copyright 2022 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Pipeline container experimental root."""
|
Add init file to container/experimental directory to recognize as a python module.
PiperOrigin-RevId: 447585056# Copyright 2022 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Pipeline container experimental root."""
|
<commit_before><commit_msg>Add init file to container/experimental directory to recognize as a python module.
PiperOrigin-RevId: 447585056<commit_after># Copyright 2022 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Google Cloud Pipeline container experimental root."""
|
|
b9093b09b1bfa1e7bb50a41d03eab61c3a3d9fc5
|
test/test_commonsdowloader.py
|
test/test_commonsdowloader.py
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
"""Unit tests."""
import unittest
import commonsdownloader
class TestCommonsDownloader(unittest.TestCase):
"""Testing methods from commonsdownloader."""
def test_clean_up_filename(self):
"""Test clean_up_filename."""
values = [('Example.jpg', 'Example.jpg'),
('Example.jpg ', 'Example.jpg'),
(' Example.jpg', 'Example.jpg'),
('My Example.jpg', 'My_Example.jpg')]
for (input_value, expected_value) in values:
self.assertEqual(commonsdownloader.clean_up_filename(input_value),
expected_value)
if __name__ == "__main__":
unittest.main()
|
Add unittests for the commonsdownloader module
|
Add unittests for the commonsdownloader module
Add a unittest module to test the methods in the commonsdownloader module.
Add test for method clean_up_filename()
|
Python
|
mit
|
Commonists/CommonsDownloader
|
Add unittests for the commonsdownloader module
Add a unittest module to test the methods in the commonsdownloader module.
Add test for method clean_up_filename()
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
"""Unit tests."""
import unittest
import commonsdownloader
class TestCommonsDownloader(unittest.TestCase):
"""Testing methods from commonsdownloader."""
def test_clean_up_filename(self):
"""Test clean_up_filename."""
values = [('Example.jpg', 'Example.jpg'),
('Example.jpg ', 'Example.jpg'),
(' Example.jpg', 'Example.jpg'),
('My Example.jpg', 'My_Example.jpg')]
for (input_value, expected_value) in values:
self.assertEqual(commonsdownloader.clean_up_filename(input_value),
expected_value)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add unittests for the commonsdownloader module
Add a unittest module to test the methods in the commonsdownloader module.
Add test for method clean_up_filename()<commit_after>
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
"""Unit tests."""
import unittest
import commonsdownloader
class TestCommonsDownloader(unittest.TestCase):
"""Testing methods from commonsdownloader."""
def test_clean_up_filename(self):
"""Test clean_up_filename."""
values = [('Example.jpg', 'Example.jpg'),
('Example.jpg ', 'Example.jpg'),
(' Example.jpg', 'Example.jpg'),
('My Example.jpg', 'My_Example.jpg')]
for (input_value, expected_value) in values:
self.assertEqual(commonsdownloader.clean_up_filename(input_value),
expected_value)
if __name__ == "__main__":
unittest.main()
|
Add unittests for the commonsdownloader module
Add a unittest module to test the methods in the commonsdownloader module.
Add test for method clean_up_filename()#!/usr/bin/env python
# -*- coding: latin-1 -*-
"""Unit tests."""
import unittest
import commonsdownloader
class TestCommonsDownloader(unittest.TestCase):
"""Testing methods from commonsdownloader."""
def test_clean_up_filename(self):
"""Test clean_up_filename."""
values = [('Example.jpg', 'Example.jpg'),
('Example.jpg ', 'Example.jpg'),
(' Example.jpg', 'Example.jpg'),
('My Example.jpg', 'My_Example.jpg')]
for (input_value, expected_value) in values:
self.assertEqual(commonsdownloader.clean_up_filename(input_value),
expected_value)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add unittests for the commonsdownloader module
Add a unittest module to test the methods in the commonsdownloader module.
Add test for method clean_up_filename()<commit_after>#!/usr/bin/env python
# -*- coding: latin-1 -*-
"""Unit tests."""
import unittest
import commonsdownloader
class TestCommonsDownloader(unittest.TestCase):
"""Testing methods from commonsdownloader."""
def test_clean_up_filename(self):
"""Test clean_up_filename."""
values = [('Example.jpg', 'Example.jpg'),
('Example.jpg ', 'Example.jpg'),
(' Example.jpg', 'Example.jpg'),
('My Example.jpg', 'My_Example.jpg')]
for (input_value, expected_value) in values:
self.assertEqual(commonsdownloader.clean_up_filename(input_value),
expected_value)
if __name__ == "__main__":
unittest.main()
|
|
90a0ed45ed56467f3083b262708f81434aa9aaa9
|
tests/test_pipeline_rnaseq.py
|
tests/test_pipeline_rnaseq.py
|
"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest # pylint: disable=unused-import
from process_rnaseq import process_rnaseq
@pytest.mark.rnaseq
@pytest.mark.pipeline
def test_tb_pipeline():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \
--lang=python \
--library_path=${HOME}/bin \
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \
--log_level=debug \
process_rnaseq.py \
--taxon_id 9606 \
--genome /<dataset_dir>/Human.GRCh38.fasta \
--assembly GRCh38 \
--file /<dataset_dir>/ERR030872_1.fastq \
--file2 /<dataset_dir>/ERR030872_2.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
rs_handle = process_rnaseq()
rs_files, rs_meta = rs_handle.run(
[
resource_path + 'kallisto.Human.GRCh38.fasta',
resource_path + 'kallisto.Human.ERR030872_1.fastq',
resource_path + 'kallisto.Human.ERR030872_2.fastq'
],
{'assembly' : 'GRCh38'},
[]
)
print(rs_files)
# Add tests for all files created
for f_out in rs_files:
print("RNA SEQ RESULTS FILE:", f_out)
assert os.path.isfile(f_out) is True
assert os.path.getsize(f_out) > 0
|
Test the pipeline code for the RNA-seq pipeline
|
Test the pipeline code for the RNA-seq pipeline
|
Python
|
apache-2.0
|
Multiscale-Genomics/mg-process-fastq,Multiscale-Genomics/mg-process-fastq,Multiscale-Genomics/mg-process-fastq
|
Test the pipeline code for the RNA-seq pipeline
|
"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest # pylint: disable=unused-import
from process_rnaseq import process_rnaseq
@pytest.mark.rnaseq
@pytest.mark.pipeline
def test_tb_pipeline():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \
--lang=python \
--library_path=${HOME}/bin \
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \
--log_level=debug \
process_rnaseq.py \
--taxon_id 9606 \
--genome /<dataset_dir>/Human.GRCh38.fasta \
--assembly GRCh38 \
--file /<dataset_dir>/ERR030872_1.fastq \
--file2 /<dataset_dir>/ERR030872_2.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
rs_handle = process_rnaseq()
rs_files, rs_meta = rs_handle.run(
[
resource_path + 'kallisto.Human.GRCh38.fasta',
resource_path + 'kallisto.Human.ERR030872_1.fastq',
resource_path + 'kallisto.Human.ERR030872_2.fastq'
],
{'assembly' : 'GRCh38'},
[]
)
print(rs_files)
# Add tests for all files created
for f_out in rs_files:
print("RNA SEQ RESULTS FILE:", f_out)
assert os.path.isfile(f_out) is True
assert os.path.getsize(f_out) > 0
|
<commit_before><commit_msg>Test the pipeline code for the RNA-seq pipeline<commit_after>
|
"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest # pylint: disable=unused-import
from process_rnaseq import process_rnaseq
@pytest.mark.rnaseq
@pytest.mark.pipeline
def test_tb_pipeline():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \
--lang=python \
--library_path=${HOME}/bin \
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \
--log_level=debug \
process_rnaseq.py \
--taxon_id 9606 \
--genome /<dataset_dir>/Human.GRCh38.fasta \
--assembly GRCh38 \
--file /<dataset_dir>/ERR030872_1.fastq \
--file2 /<dataset_dir>/ERR030872_2.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
rs_handle = process_rnaseq()
rs_files, rs_meta = rs_handle.run(
[
resource_path + 'kallisto.Human.GRCh38.fasta',
resource_path + 'kallisto.Human.ERR030872_1.fastq',
resource_path + 'kallisto.Human.ERR030872_2.fastq'
],
{'assembly' : 'GRCh38'},
[]
)
print(rs_files)
# Add tests for all files created
for f_out in rs_files:
print("RNA SEQ RESULTS FILE:", f_out)
assert os.path.isfile(f_out) is True
assert os.path.getsize(f_out) > 0
|
Test the pipeline code for the RNA-seq pipeline"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest # pylint: disable=unused-import
from process_rnaseq import process_rnaseq
@pytest.mark.rnaseq
@pytest.mark.pipeline
def test_tb_pipeline():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \
--lang=python \
--library_path=${HOME}/bin \
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \
--log_level=debug \
process_rnaseq.py \
--taxon_id 9606 \
--genome /<dataset_dir>/Human.GRCh38.fasta \
--assembly GRCh38 \
--file /<dataset_dir>/ERR030872_1.fastq \
--file2 /<dataset_dir>/ERR030872_2.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
rs_handle = process_rnaseq()
rs_files, rs_meta = rs_handle.run(
[
resource_path + 'kallisto.Human.GRCh38.fasta',
resource_path + 'kallisto.Human.ERR030872_1.fastq',
resource_path + 'kallisto.Human.ERR030872_2.fastq'
],
{'assembly' : 'GRCh38'},
[]
)
print(rs_files)
# Add tests for all files created
for f_out in rs_files:
print("RNA SEQ RESULTS FILE:", f_out)
assert os.path.isfile(f_out) is True
assert os.path.getsize(f_out) > 0
|
<commit_before><commit_msg>Test the pipeline code for the RNA-seq pipeline<commit_after>"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import os.path
import pytest # pylint: disable=unused-import
from process_rnaseq import process_rnaseq
@pytest.mark.rnaseq
@pytest.mark.pipeline
def test_tb_pipeline():
"""
Test case to ensure that the ChIP-seq pipeline code works.
Running the pipeline with the test data from the command line:
.. code-block:: none
runcompss \
--lang=python \
--library_path=${HOME}/bin \
--pythonpath=/<pyenv_virtenv_dir>/lib/python2.7/site-packages/ \
--log_level=debug \
process_rnaseq.py \
--taxon_id 9606 \
--genome /<dataset_dir>/Human.GRCh38.fasta \
--assembly GRCh38 \
--file /<dataset_dir>/ERR030872_1.fastq \
--file2 /<dataset_dir>/ERR030872_2.fastq
"""
resource_path = os.path.join(os.path.dirname(__file__), "data/")
rs_handle = process_rnaseq()
rs_files, rs_meta = rs_handle.run(
[
resource_path + 'kallisto.Human.GRCh38.fasta',
resource_path + 'kallisto.Human.ERR030872_1.fastq',
resource_path + 'kallisto.Human.ERR030872_2.fastq'
],
{'assembly' : 'GRCh38'},
[]
)
print(rs_files)
# Add tests for all files created
for f_out in rs_files:
print("RNA SEQ RESULTS FILE:", f_out)
assert os.path.isfile(f_out) is True
assert os.path.getsize(f_out) > 0
|
|
e9c0aaf5434f30d66e9b3827261c8576e16e7083
|
get_data.py
|
get_data.py
|
#!/usr/bin/env python
from rajab_roza import RajabRoza
lat = 51.0 + 32.0/60.0
lng = -22.0/60.0
start_year = 1435
end_year = 1436
filename = "1435-1436.yml"
if __name__ == '__main__':
rajab_roza = RajabRoza(lat, lng, start_year, end_year)
rajab_roza.get_roza_durations()
rajab_roza.save_to_yaml(filename)
|
Add basic script to get roza durations for assigned parameters.
|
Add basic script to get roza durations for assigned parameters.
|
Python
|
mit
|
mygulamali/rajab_roza
|
Add basic script to get roza durations for assigned parameters.
|
#!/usr/bin/env python
from rajab_roza import RajabRoza
lat = 51.0 + 32.0/60.0
lng = -22.0/60.0
start_year = 1435
end_year = 1436
filename = "1435-1436.yml"
if __name__ == '__main__':
rajab_roza = RajabRoza(lat, lng, start_year, end_year)
rajab_roza.get_roza_durations()
rajab_roza.save_to_yaml(filename)
|
<commit_before><commit_msg>Add basic script to get roza durations for assigned parameters.<commit_after>
|
#!/usr/bin/env python
from rajab_roza import RajabRoza
lat = 51.0 + 32.0/60.0
lng = -22.0/60.0
start_year = 1435
end_year = 1436
filename = "1435-1436.yml"
if __name__ == '__main__':
rajab_roza = RajabRoza(lat, lng, start_year, end_year)
rajab_roza.get_roza_durations()
rajab_roza.save_to_yaml(filename)
|
Add basic script to get roza durations for assigned parameters.#!/usr/bin/env python
from rajab_roza import RajabRoza
lat = 51.0 + 32.0/60.0
lng = -22.0/60.0
start_year = 1435
end_year = 1436
filename = "1435-1436.yml"
if __name__ == '__main__':
rajab_roza = RajabRoza(lat, lng, start_year, end_year)
rajab_roza.get_roza_durations()
rajab_roza.save_to_yaml(filename)
|
<commit_before><commit_msg>Add basic script to get roza durations for assigned parameters.<commit_after>#!/usr/bin/env python
from rajab_roza import RajabRoza
lat = 51.0 + 32.0/60.0
lng = -22.0/60.0
start_year = 1435
end_year = 1436
filename = "1435-1436.yml"
if __name__ == '__main__':
rajab_roza = RajabRoza(lat, lng, start_year, end_year)
rajab_roza.get_roza_durations()
rajab_roza.save_to_yaml(filename)
|
|
934cafa73a15705c4d7c33733c61f8c272b9971e
|
profile_basic_test.py
|
profile_basic_test.py
|
from pyresttest import resttest
from pyresttest.benchmarks import Benchmark
from pyresttest.binding import Context
from pyresttest.contenthandling import ContentHandler
from pyresttest.generators import factory_generate_ids
import cProfile
cProfile.run('resttest.command_line_run(["http://localhost:8000","pyresttest/content-test.yaml"])', sort='tottime')
#cProfile.run('resttest.command_line_run(["http://localhost:8000","schema_test.yaml"])', sort='tottime')
#cProfile.run('resttest.command_line_run(["https://api.github.com","github_api_test.yaml"])', sort='tottime')
|
Add script that profiles basic test run
|
Add script that profiles basic test run
|
Python
|
apache-2.0
|
svanoort/pyresttest,suvarnaraju/pyresttest,suvarnaraju/pyresttest,alazaro/pyresttest,satish-suradkar/pyresttest,MorrisJobke/pyresttest,netjunki/pyresttest,wirewit/pyresttest,janusnic/pyresttest,TimYi/pyresttest,wirewit/pyresttest,MorrisJobke/pyresttest,TimYi/pyresttest,sunyanhui/pyresttest,netjunki/pyresttest,janusnic/pyresttest,holdenweb/pyresttest,svanoort/pyresttest,alazaro/pyresttest,satish-suradkar/pyresttest,holdenweb/pyresttest,sunyanhui/pyresttest
|
Add script that profiles basic test run
|
from pyresttest import resttest
from pyresttest.benchmarks import Benchmark
from pyresttest.binding import Context
from pyresttest.contenthandling import ContentHandler
from pyresttest.generators import factory_generate_ids
import cProfile
cProfile.run('resttest.command_line_run(["http://localhost:8000","pyresttest/content-test.yaml"])', sort='tottime')
#cProfile.run('resttest.command_line_run(["http://localhost:8000","schema_test.yaml"])', sort='tottime')
#cProfile.run('resttest.command_line_run(["https://api.github.com","github_api_test.yaml"])', sort='tottime')
|
<commit_before><commit_msg>Add script that profiles basic test run<commit_after>
|
from pyresttest import resttest
from pyresttest.benchmarks import Benchmark
from pyresttest.binding import Context
from pyresttest.contenthandling import ContentHandler
from pyresttest.generators import factory_generate_ids
import cProfile
cProfile.run('resttest.command_line_run(["http://localhost:8000","pyresttest/content-test.yaml"])', sort='tottime')
#cProfile.run('resttest.command_line_run(["http://localhost:8000","schema_test.yaml"])', sort='tottime')
#cProfile.run('resttest.command_line_run(["https://api.github.com","github_api_test.yaml"])', sort='tottime')
|
Add script that profiles basic test runfrom pyresttest import resttest
from pyresttest.benchmarks import Benchmark
from pyresttest.binding import Context
from pyresttest.contenthandling import ContentHandler
from pyresttest.generators import factory_generate_ids
import cProfile
cProfile.run('resttest.command_line_run(["http://localhost:8000","pyresttest/content-test.yaml"])', sort='tottime')
#cProfile.run('resttest.command_line_run(["http://localhost:8000","schema_test.yaml"])', sort='tottime')
#cProfile.run('resttest.command_line_run(["https://api.github.com","github_api_test.yaml"])', sort='tottime')
|
<commit_before><commit_msg>Add script that profiles basic test run<commit_after>from pyresttest import resttest
from pyresttest.benchmarks import Benchmark
from pyresttest.binding import Context
from pyresttest.contenthandling import ContentHandler
from pyresttest.generators import factory_generate_ids
import cProfile
cProfile.run('resttest.command_line_run(["http://localhost:8000","pyresttest/content-test.yaml"])', sort='tottime')
#cProfile.run('resttest.command_line_run(["http://localhost:8000","schema_test.yaml"])', sort='tottime')
#cProfile.run('resttest.command_line_run(["https://api.github.com","github_api_test.yaml"])', sort='tottime')
|
|
11f2470adb5b52e32e08a041bf868591e858e4ed
|
doggunk4.py
|
doggunk4.py
|
import curses
# -*- coding: utf-8 -*-
dogdance1=[
' ▄','▄', '▄', #3
'▄▄▄▄', '▄', '▄', #6
'▄\n', ' ', ' ',
' ', '▄', '▄\n',#12
' ', ' ' , '▄',#15
' ', '▄', ' ',#18
'▄▄', '▄▄\n', ' ',#21
' ', '▄', ' ',#24
' ', '▄', ' ',#27
'▄', ' ', '▄▄▄▄▄▄',#30
'▄\n', ' ', ' ',#33
'▄▄▄▄',' ',' ',#36
'▀\n', ' ', ' ',#39
' \n', ' ', ' ', #42
'▄', '▀\n', ' ',#45
' ', ' ', '▄▄ ▄▄▄▄ ▄▄ ', #48
' \n', ' ', ' ',#51
' ', '▄', '▀▀',#54
'▄', '▀', ' ▀',#57
'▄', '▀', ' ',#60
' ', ' ', '▄',#63
'▀\n', ' ', '▀ ▀']#66
dog1pallete=[1,3,1, #3
3,1,3,#6
1,1,3,#9
2,3,1, #12
4,2,3, #15
2,3,2, #18
3,1,4, #21
2,2,2, #24
4,3,2, #27
2,2,3, #30
1,4,2, #33
3,2,4, #36
1,4,2, #39
4,4,2, #42
2,1,1, #45
4,2,2, #48
4,1,4, #51
2,2,1, #54
2,1,1, #57
2,1,1, #60
4,2,2, #63
1,1,1] #6
dogdance2=[' ▄▄▄▄▄▄▄▄▄▄',
' ▄▄',
' ▄ ▄ ▄▄▄▄ ▄▄▄',
' ▄ ▄ ▄ ▄▄▄ ',
' ▄▄▄▄ ',
' ',
' ▄▀',
' ▄▄ ▄▄▄▄ ▄▄ ',
' ▀▄▀ ▄▀ ▄▀▀▄▀',
' ▀ ▀']
def draw_dog1(scr):
i = 0
#for num in dog1pallete:
# scr.addstr(dogdance1[0][i],curses.color_pair(num))
# i = i + 1
#
for num in dog1pallete:
scr.addstr(dogdance1[i],curses.color_pair(num))
i = i + 1
|
WORK WORK WOKR ROWK KROW
|
WORK WORK WOKR ROWK KROW
|
Python
|
mit
|
David-OC/dancingdog,David-OC/dancingdog
|
WORK WORK WOKR ROWK KROW
|
import curses
# -*- coding: utf-8 -*-
dogdance1=[
' ▄','▄', '▄', #3
'▄▄▄▄', '▄', '▄', #6
'▄\n', ' ', ' ',
' ', '▄', '▄\n',#12
' ', ' ' , '▄',#15
' ', '▄', ' ',#18
'▄▄', '▄▄\n', ' ',#21
' ', '▄', ' ',#24
' ', '▄', ' ',#27
'▄', ' ', '▄▄▄▄▄▄',#30
'▄\n', ' ', ' ',#33
'▄▄▄▄',' ',' ',#36
'▀\n', ' ', ' ',#39
' \n', ' ', ' ', #42
'▄', '▀\n', ' ',#45
' ', ' ', '▄▄ ▄▄▄▄ ▄▄ ', #48
' \n', ' ', ' ',#51
' ', '▄', '▀▀',#54
'▄', '▀', ' ▀',#57
'▄', '▀', ' ',#60
' ', ' ', '▄',#63
'▀\n', ' ', '▀ ▀']#66
dog1pallete=[1,3,1, #3
3,1,3,#6
1,1,3,#9
2,3,1, #12
4,2,3, #15
2,3,2, #18
3,1,4, #21
2,2,2, #24
4,3,2, #27
2,2,3, #30
1,4,2, #33
3,2,4, #36
1,4,2, #39
4,4,2, #42
2,1,1, #45
4,2,2, #48
4,1,4, #51
2,2,1, #54
2,1,1, #57
2,1,1, #60
4,2,2, #63
1,1,1] #6
dogdance2=[' ▄▄▄▄▄▄▄▄▄▄',
' ▄▄',
' ▄ ▄ ▄▄▄▄ ▄▄▄',
' ▄ ▄ ▄ ▄▄▄ ',
' ▄▄▄▄ ',
' ',
' ▄▀',
' ▄▄ ▄▄▄▄ ▄▄ ',
' ▀▄▀ ▄▀ ▄▀▀▄▀',
' ▀ ▀']
def draw_dog1(scr):
i = 0
#for num in dog1pallete:
# scr.addstr(dogdance1[0][i],curses.color_pair(num))
# i = i + 1
#
for num in dog1pallete:
scr.addstr(dogdance1[i],curses.color_pair(num))
i = i + 1
|
<commit_before><commit_msg>WORK WORK WOKR ROWK KROW<commit_after>
|
import curses
# -*- coding: utf-8 -*-
dogdance1=[
' ▄','▄', '▄', #3
'▄▄▄▄', '▄', '▄', #6
'▄\n', ' ', ' ',
' ', '▄', '▄\n',#12
' ', ' ' , '▄',#15
' ', '▄', ' ',#18
'▄▄', '▄▄\n', ' ',#21
' ', '▄', ' ',#24
' ', '▄', ' ',#27
'▄', ' ', '▄▄▄▄▄▄',#30
'▄\n', ' ', ' ',#33
'▄▄▄▄',' ',' ',#36
'▀\n', ' ', ' ',#39
' \n', ' ', ' ', #42
'▄', '▀\n', ' ',#45
' ', ' ', '▄▄ ▄▄▄▄ ▄▄ ', #48
' \n', ' ', ' ',#51
' ', '▄', '▀▀',#54
'▄', '▀', ' ▀',#57
'▄', '▀', ' ',#60
' ', ' ', '▄',#63
'▀\n', ' ', '▀ ▀']#66
dog1pallete=[1,3,1, #3
3,1,3,#6
1,1,3,#9
2,3,1, #12
4,2,3, #15
2,3,2, #18
3,1,4, #21
2,2,2, #24
4,3,2, #27
2,2,3, #30
1,4,2, #33
3,2,4, #36
1,4,2, #39
4,4,2, #42
2,1,1, #45
4,2,2, #48
4,1,4, #51
2,2,1, #54
2,1,1, #57
2,1,1, #60
4,2,2, #63
1,1,1] #6
dogdance2=[' ▄▄▄▄▄▄▄▄▄▄',
' ▄▄',
' ▄ ▄ ▄▄▄▄ ▄▄▄',
' ▄ ▄ ▄ ▄▄▄ ',
' ▄▄▄▄ ',
' ',
' ▄▀',
' ▄▄ ▄▄▄▄ ▄▄ ',
' ▀▄▀ ▄▀ ▄▀▀▄▀',
' ▀ ▀']
def draw_dog1(scr):
i = 0
#for num in dog1pallete:
# scr.addstr(dogdance1[0][i],curses.color_pair(num))
# i = i + 1
#
for num in dog1pallete:
scr.addstr(dogdance1[i],curses.color_pair(num))
i = i + 1
|
WORK WORK WOKR ROWK KROWimport curses
# -*- coding: utf-8 -*-
dogdance1=[
' ▄','▄', '▄', #3
'▄▄▄▄', '▄', '▄', #6
'▄\n', ' ', ' ',
' ', '▄', '▄\n',#12
' ', ' ' , '▄',#15
' ', '▄', ' ',#18
'▄▄', '▄▄\n', ' ',#21
' ', '▄', ' ',#24
' ', '▄', ' ',#27
'▄', ' ', '▄▄▄▄▄▄',#30
'▄\n', ' ', ' ',#33
'▄▄▄▄',' ',' ',#36
'▀\n', ' ', ' ',#39
' \n', ' ', ' ', #42
'▄', '▀\n', ' ',#45
' ', ' ', '▄▄ ▄▄▄▄ ▄▄ ', #48
' \n', ' ', ' ',#51
' ', '▄', '▀▀',#54
'▄', '▀', ' ▀',#57
'▄', '▀', ' ',#60
' ', ' ', '▄',#63
'▀\n', ' ', '▀ ▀']#66
dog1pallete=[1,3,1, #3
3,1,3,#6
1,1,3,#9
2,3,1, #12
4,2,3, #15
2,3,2, #18
3,1,4, #21
2,2,2, #24
4,3,2, #27
2,2,3, #30
1,4,2, #33
3,2,4, #36
1,4,2, #39
4,4,2, #42
2,1,1, #45
4,2,2, #48
4,1,4, #51
2,2,1, #54
2,1,1, #57
2,1,1, #60
4,2,2, #63
1,1,1] #6
dogdance2=[' ▄▄▄▄▄▄▄▄▄▄',
' ▄▄',
' ▄ ▄ ▄▄▄▄ ▄▄▄',
' ▄ ▄ ▄ ▄▄▄ ',
' ▄▄▄▄ ',
' ',
' ▄▀',
' ▄▄ ▄▄▄▄ ▄▄ ',
' ▀▄▀ ▄▀ ▄▀▀▄▀',
' ▀ ▀']
def draw_dog1(scr):
i = 0
#for num in dog1pallete:
# scr.addstr(dogdance1[0][i],curses.color_pair(num))
# i = i + 1
#
for num in dog1pallete:
scr.addstr(dogdance1[i],curses.color_pair(num))
i = i + 1
|
<commit_before><commit_msg>WORK WORK WOKR ROWK KROW<commit_after>import curses
# -*- coding: utf-8 -*-
dogdance1=[
' ▄','▄', '▄', #3
'▄▄▄▄', '▄', '▄', #6
'▄\n', ' ', ' ',
' ', '▄', '▄\n',#12
' ', ' ' , '▄',#15
' ', '▄', ' ',#18
'▄▄', '▄▄\n', ' ',#21
' ', '▄', ' ',#24
' ', '▄', ' ',#27
'▄', ' ', '▄▄▄▄▄▄',#30
'▄\n', ' ', ' ',#33
'▄▄▄▄',' ',' ',#36
'▀\n', ' ', ' ',#39
' \n', ' ', ' ', #42
'▄', '▀\n', ' ',#45
' ', ' ', '▄▄ ▄▄▄▄ ▄▄ ', #48
' \n', ' ', ' ',#51
' ', '▄', '▀▀',#54
'▄', '▀', ' ▀',#57
'▄', '▀', ' ',#60
' ', ' ', '▄',#63
'▀\n', ' ', '▀ ▀']#66
dog1pallete=[1,3,1, #3
3,1,3,#6
1,1,3,#9
2,3,1, #12
4,2,3, #15
2,3,2, #18
3,1,4, #21
2,2,2, #24
4,3,2, #27
2,2,3, #30
1,4,2, #33
3,2,4, #36
1,4,2, #39
4,4,2, #42
2,1,1, #45
4,2,2, #48
4,1,4, #51
2,2,1, #54
2,1,1, #57
2,1,1, #60
4,2,2, #63
1,1,1] #6
dogdance2=[' ▄▄▄▄▄▄▄▄▄▄',
' ▄▄',
' ▄ ▄ ▄▄▄▄ ▄▄▄',
' ▄ ▄ ▄ ▄▄▄ ',
' ▄▄▄▄ ',
' ',
' ▄▀',
' ▄▄ ▄▄▄▄ ▄▄ ',
' ▀▄▀ ▄▀ ▄▀▀▄▀',
' ▀ ▀']
def draw_dog1(scr):
i = 0
#for num in dog1pallete:
# scr.addstr(dogdance1[0][i],curses.color_pair(num))
# i = i + 1
#
for num in dog1pallete:
scr.addstr(dogdance1[i],curses.color_pair(num))
i = i + 1
|
|
2fa1e73c44b03ab81e72d14863fd80cff010f0d7
|
tests/test_comparisons.py
|
tests/test_comparisons.py
|
from itertools import combinations
from unittest import TestCase
from ordering import Ordering
class TestComparisons(TestCase):
def setUp(self) -> None:
self.ordering = Ordering[int]()
self.ordering.insert_start(0)
self.ordering.insert_after(0, 1)
self.ordering.insert_before(0, 2)
self.ordering.insert_end(3)
self.ordering.insert_start(4)
self.ordering.insert_after(3, 5)
self.ordering.insert_before(3, 6)
self.ordering_list = [4, 2, 0, 1, 6, 3, 5]
def test_iterate_correct_order(self) -> None:
self.assertListEqual(
list(self.ordering),
self.ordering_list
)
def test_comparisons(self) -> None:
for a, b in combinations(self.ordering_list, 2):
self.assertTrue(self.ordering.compare(a, b))
self.assertFalse(self.ordering.compare(b, a))
|
Add unit test for complicated comparisons
|
Add unit test for complicated comparisons
|
Python
|
mit
|
madman-bob/python-order-maintenance
|
Add unit test for complicated comparisons
|
from itertools import combinations
from unittest import TestCase
from ordering import Ordering
class TestComparisons(TestCase):
def setUp(self) -> None:
self.ordering = Ordering[int]()
self.ordering.insert_start(0)
self.ordering.insert_after(0, 1)
self.ordering.insert_before(0, 2)
self.ordering.insert_end(3)
self.ordering.insert_start(4)
self.ordering.insert_after(3, 5)
self.ordering.insert_before(3, 6)
self.ordering_list = [4, 2, 0, 1, 6, 3, 5]
def test_iterate_correct_order(self) -> None:
self.assertListEqual(
list(self.ordering),
self.ordering_list
)
def test_comparisons(self) -> None:
for a, b in combinations(self.ordering_list, 2):
self.assertTrue(self.ordering.compare(a, b))
self.assertFalse(self.ordering.compare(b, a))
|
<commit_before><commit_msg>Add unit test for complicated comparisons<commit_after>
|
from itertools import combinations
from unittest import TestCase
from ordering import Ordering
class TestComparisons(TestCase):
def setUp(self) -> None:
self.ordering = Ordering[int]()
self.ordering.insert_start(0)
self.ordering.insert_after(0, 1)
self.ordering.insert_before(0, 2)
self.ordering.insert_end(3)
self.ordering.insert_start(4)
self.ordering.insert_after(3, 5)
self.ordering.insert_before(3, 6)
self.ordering_list = [4, 2, 0, 1, 6, 3, 5]
def test_iterate_correct_order(self) -> None:
self.assertListEqual(
list(self.ordering),
self.ordering_list
)
def test_comparisons(self) -> None:
for a, b in combinations(self.ordering_list, 2):
self.assertTrue(self.ordering.compare(a, b))
self.assertFalse(self.ordering.compare(b, a))
|
Add unit test for complicated comparisonsfrom itertools import combinations
from unittest import TestCase
from ordering import Ordering
class TestComparisons(TestCase):
def setUp(self) -> None:
self.ordering = Ordering[int]()
self.ordering.insert_start(0)
self.ordering.insert_after(0, 1)
self.ordering.insert_before(0, 2)
self.ordering.insert_end(3)
self.ordering.insert_start(4)
self.ordering.insert_after(3, 5)
self.ordering.insert_before(3, 6)
self.ordering_list = [4, 2, 0, 1, 6, 3, 5]
def test_iterate_correct_order(self) -> None:
self.assertListEqual(
list(self.ordering),
self.ordering_list
)
def test_comparisons(self) -> None:
for a, b in combinations(self.ordering_list, 2):
self.assertTrue(self.ordering.compare(a, b))
self.assertFalse(self.ordering.compare(b, a))
|
<commit_before><commit_msg>Add unit test for complicated comparisons<commit_after>from itertools import combinations
from unittest import TestCase
from ordering import Ordering
class TestComparisons(TestCase):
def setUp(self) -> None:
self.ordering = Ordering[int]()
self.ordering.insert_start(0)
self.ordering.insert_after(0, 1)
self.ordering.insert_before(0, 2)
self.ordering.insert_end(3)
self.ordering.insert_start(4)
self.ordering.insert_after(3, 5)
self.ordering.insert_before(3, 6)
self.ordering_list = [4, 2, 0, 1, 6, 3, 5]
def test_iterate_correct_order(self) -> None:
self.assertListEqual(
list(self.ordering),
self.ordering_list
)
def test_comparisons(self) -> None:
for a, b in combinations(self.ordering_list, 2):
self.assertTrue(self.ordering.compare(a, b))
self.assertFalse(self.ordering.compare(b, a))
|
|
5fa0ad818cbb1afd17d819ec6649430d16726f7c
|
euler029.py
|
euler029.py
|
#!/usr/bin/python
power_list = set()
for a in range(2, 101):
for b in range(2, 101):
power_list.add(a ** b)
print(len(power_list))
|
Add solution for problem 29
|
Add solution for problem 29
|
Python
|
mit
|
cifvts/PyEuler
|
Add solution for problem 29
|
#!/usr/bin/python
power_list = set()
for a in range(2, 101):
for b in range(2, 101):
power_list.add(a ** b)
print(len(power_list))
|
<commit_before><commit_msg>Add solution for problem 29<commit_after>
|
#!/usr/bin/python
power_list = set()
for a in range(2, 101):
for b in range(2, 101):
power_list.add(a ** b)
print(len(power_list))
|
Add solution for problem 29#!/usr/bin/python
power_list = set()
for a in range(2, 101):
for b in range(2, 101):
power_list.add(a ** b)
print(len(power_list))
|
<commit_before><commit_msg>Add solution for problem 29<commit_after>#!/usr/bin/python
power_list = set()
for a in range(2, 101):
for b in range(2, 101):
power_list.add(a ** b)
print(len(power_list))
|
|
09ff9e9967ad53b6ee2bff5cb38874d3b2e6d35a
|
build/unix/rootmapcat.py
|
build/unix/rootmapcat.py
|
#! /usr/bin/env python
'''
An utility to smartly "cat" rootmap files.
'''
from __future__ import print_function
import argparse
import sys
#-------------------------------------------------------------------------------
def getParser():
parser = argparse.ArgumentParser(description='Get input rootmaps and output rootmap.')
parser.add_argument("rootmaps", nargs='+', help='The name of the rootmaps separated by a space.')
parser.add_argument("-o", "--output", dest='output',
default="all.rootmap", help='The output rootmap name.')
return parser
#-------------------------------------------------------------------------------
class Rootmap(object):
def __init__(self):
self.fwdDecls = []
self.sections = {}
def ParseAndAddMany(self,rootmapnames):
for rootmapname in rootmapnames:
self.ParseAndAdd(rootmapname)
def ParseAndAdd(self,rootmapname):
ifile = open(rootmapname)
rootmapLines = ifile.readlines()
ifile.close()
fwdDeclsSet = set()
fwdDeclsSection = False
keysSection = True
for line in rootmapLines:
if line.startswith("{ decls }"):
fwdDeclsSection = True
keysSection = False
continue
if line.startswith("[ "):
fwdDeclsSection = False
keysSection = True
secName = line
if line == "\n": continue
if fwdDeclsSection:
fwdDeclsSet.add(line)
if keysSection:
if self.sections.has_key(secName):
self.sections[secName].append(line)
else:
self.sections[secName] = []
self.fwdDecls.extend(fwdDeclsSet)
def Print(self,outrootmapname):
# Now we reduce the fwd declarations
self.fwdDecls = sorted(list(set(self.fwdDecls)))
ofile = file(outrootmapname, "w")
if len(self.fwdDecls) != 0:
ofile.write("{ decls }\n")
for fwdDecl in self.fwdDecls:
ofile.write(fwdDecl)
ofile.write("\n")
for libname, keylines in self.sections.items():
ofile.write(libname)
for keyline in keylines:
ofile.write(keyline)
ofile.write("\n")
ofile.close()
#-------------------------------------------------------------------------------
def merge(rmapsnames, outrootmapname):
rm = Rootmap()
rm.ParseAndAddMany(rmapsnames)
rm.Print(outrootmapname)
return 0
#-------------------------------------------------------------------------------
if __name__ == "__main__":
parser = getParser()
args = parser.parse_args()
rmapsnames = args.rootmaps
outrootmapname = args.output
sys.exit(merge(rmapsnames, outrootmapname))
|
Concatenate rootmaps in a smart way
|
Concatenate rootmaps in a smart way
- Avoid duplicates in the fwd declaration section
- Reduce keys sections if the library is the same
Merging rootmaps speeds up ROOT startup, especially on file systems
like afs or cvmfs.
|
Python
|
lgpl-2.1
|
thomaskeck/root,CristinaCristescu/root,karies/root,buuck/root,CristinaCristescu/root,agarciamontoro/root,gganis/root,veprbl/root,zzxuanyuan/root,root-mirror/root,abhinavmoudgil95/root,olifre/root,gbitzes/root,mhuwiler/rootauto,buuck/root,bbockelm/root,karies/root,karies/root,sawenzel/root,CristinaCristescu/root,gganis/root,Y--/root,thomaskeck/root,veprbl/root,root-mirror/root,davidlt/root,BerserkerTroll/root,gganis/root,krafczyk/root,thomaskeck/root,pspe/root,beniz/root,root-mirror/root,pspe/root,CristinaCristescu/root,zzxuanyuan/root-compressor-dummy,gbitzes/root,bbockelm/root,krafczyk/root,root-mirror/root,zzxuanyuan/root,agarciamontoro/root,krafczyk/root,veprbl/root,zzxuanyuan/root,simonpf/root,Y--/root,davidlt/root,karies/root,simonpf/root,simonpf/root,karies/root,buuck/root,beniz/root,zzxuanyuan/root,satyarth934/root,gganis/root,lgiommi/root,pspe/root,mhuwiler/rootauto,root-mirror/root,BerserkerTroll/root,BerserkerTroll/root,simonpf/root,agarciamontoro/root,Y--/root,gbitzes/root,bbockelm/root,thomaskeck/root,zzxuanyuan/root-compressor-dummy,bbockelm/root,BerserkerTroll/root,simonpf/root,beniz/root,abhinavmoudgil95/root,CristinaCristescu/root,olifre/root,buuck/root,gbitzes/root,olifre/root,georgtroska/root,sawenzel/root,BerserkerTroll/root,lgiommi/root,mhuwiler/rootauto,abhinavmoudgil95/root,satyarth934/root,beniz/root,olifre/root,davidlt/root,mhuwiler/rootauto,agarciamontoro/root,simonpf/root,veprbl/root,veprbl/root,root-mirror/root,buuck/root,georgtroska/root,gbitzes/root,mhuwiler/rootauto,karies/root,bbockelm/root,bbockelm/root,karies/root,krafczyk/root,BerserkerTroll/root,simonpf/root,Y--/root,beniz/root,karies/root,georgtroska/root,georgtroska/root,zzxuanyuan/root,gganis/root,gganis/root,zzxuanyuan/root-compressor-dummy,zzxuanyuan/root,georgtroska/root,lgiommi/root,beniz/root,mhuwiler/rootauto,BerserkerTroll/root,satyarth934/root,Y--/root,gganis/root,davidlt/root,pspe/root,veprbl/root,lgiommi/root,olifre/root,Y--/root,buuck/root,gbitzes/root,krafczyk/root,mhuwiler/rootauto,agarciamontoro/root,pspe/root,davidlt/root,olifre/root,agarciamontoro/root,root-mirror/root,gganis/root,CristinaCristescu/root,simonpf/root,sawenzel/root,zzxuanyuan/root-compressor-dummy,davidlt/root,agarciamontoro/root,simonpf/root,krafczyk/root,georgtroska/root,davidlt/root,lgiommi/root,Y--/root,CristinaCristescu/root,sawenzel/root,krafczyk/root,thomaskeck/root,davidlt/root,zzxuanyuan/root,pspe/root,georgtroska/root,pspe/root,thomaskeck/root,sawenzel/root,abhinavmoudgil95/root,davidlt/root,sawenzel/root,sawenzel/root,gbitzes/root,buuck/root,zzxuanyuan/root,zzxuanyuan/root-compressor-dummy,satyarth934/root,krafczyk/root,pspe/root,satyarth934/root,karies/root,karies/root,thomaskeck/root,gganis/root,Y--/root,Y--/root,satyarth934/root,CristinaCristescu/root,olifre/root,abhinavmoudgil95/root,thomaskeck/root,pspe/root,agarciamontoro/root,georgtroska/root,mhuwiler/rootauto,olifre/root,gganis/root,thomaskeck/root,root-mirror/root,CristinaCristescu/root,georgtroska/root,krafczyk/root,zzxuanyuan/root-compressor-dummy,bbockelm/root,olifre/root,mhuwiler/rootauto,Y--/root,mhuwiler/rootauto,satyarth934/root,georgtroska/root,satyarth934/root,bbockelm/root,gganis/root,davidlt/root,lgiommi/root,simonpf/root,BerserkerTroll/root,zzxuanyuan/root,abhinavmoudgil95/root,beniz/root,lgiommi/root,zzxuanyuan/root,root-mirror/root,bbockelm/root,lgiommi/root,veprbl/root,CristinaCristescu/root,buuck/root,root-mirror/root,zzxuanyuan/root-compressor-dummy,Y--/root,lgiommi/root,olifre/root,buuck/root,olifre/root,BerserkerTroll/root,mhuwiler/rootauto,beniz/root,BerserkerTroll/root,lgiommi/root,beniz/root,buuck/root,zzxuanyuan/root,davidlt/root,satyarth934/root,zzxuanyuan/root-compressor-dummy,beniz/root,krafczyk/root,satyarth934/root,abhinavmoudgil95/root,abhinavmoudgil95/root,zzxuanyuan/root-compressor-dummy,agarciamontoro/root,georgtroska/root,veprbl/root,thomaskeck/root,abhinavmoudgil95/root,sawenzel/root,lgiommi/root,veprbl/root,pspe/root,CristinaCristescu/root,sawenzel/root,pspe/root,beniz/root,simonpf/root,bbockelm/root,bbockelm/root,buuck/root,BerserkerTroll/root,agarciamontoro/root,satyarth934/root,agarciamontoro/root,gbitzes/root,gbitzes/root,abhinavmoudgil95/root,sawenzel/root,gbitzes/root,gbitzes/root,zzxuanyuan/root-compressor-dummy,krafczyk/root,zzxuanyuan/root-compressor-dummy,veprbl/root,sawenzel/root,abhinavmoudgil95/root,veprbl/root,zzxuanyuan/root,root-mirror/root,karies/root
|
Concatenate rootmaps in a smart way
- Avoid duplicates in the fwd declaration section
- Reduce keys sections if the library is the same
Merging rootmaps speeds up ROOT startup, especially on file systems
like afs or cvmfs.
|
#! /usr/bin/env python
'''
An utility to smartly "cat" rootmap files.
'''
from __future__ import print_function
import argparse
import sys
#-------------------------------------------------------------------------------
def getParser():
parser = argparse.ArgumentParser(description='Get input rootmaps and output rootmap.')
parser.add_argument("rootmaps", nargs='+', help='The name of the rootmaps separated by a space.')
parser.add_argument("-o", "--output", dest='output',
default="all.rootmap", help='The output rootmap name.')
return parser
#-------------------------------------------------------------------------------
class Rootmap(object):
def __init__(self):
self.fwdDecls = []
self.sections = {}
def ParseAndAddMany(self,rootmapnames):
for rootmapname in rootmapnames:
self.ParseAndAdd(rootmapname)
def ParseAndAdd(self,rootmapname):
ifile = open(rootmapname)
rootmapLines = ifile.readlines()
ifile.close()
fwdDeclsSet = set()
fwdDeclsSection = False
keysSection = True
for line in rootmapLines:
if line.startswith("{ decls }"):
fwdDeclsSection = True
keysSection = False
continue
if line.startswith("[ "):
fwdDeclsSection = False
keysSection = True
secName = line
if line == "\n": continue
if fwdDeclsSection:
fwdDeclsSet.add(line)
if keysSection:
if self.sections.has_key(secName):
self.sections[secName].append(line)
else:
self.sections[secName] = []
self.fwdDecls.extend(fwdDeclsSet)
def Print(self,outrootmapname):
# Now we reduce the fwd declarations
self.fwdDecls = sorted(list(set(self.fwdDecls)))
ofile = file(outrootmapname, "w")
if len(self.fwdDecls) != 0:
ofile.write("{ decls }\n")
for fwdDecl in self.fwdDecls:
ofile.write(fwdDecl)
ofile.write("\n")
for libname, keylines in self.sections.items():
ofile.write(libname)
for keyline in keylines:
ofile.write(keyline)
ofile.write("\n")
ofile.close()
#-------------------------------------------------------------------------------
def merge(rmapsnames, outrootmapname):
rm = Rootmap()
rm.ParseAndAddMany(rmapsnames)
rm.Print(outrootmapname)
return 0
#-------------------------------------------------------------------------------
if __name__ == "__main__":
parser = getParser()
args = parser.parse_args()
rmapsnames = args.rootmaps
outrootmapname = args.output
sys.exit(merge(rmapsnames, outrootmapname))
|
<commit_before><commit_msg>Concatenate rootmaps in a smart way
- Avoid duplicates in the fwd declaration section
- Reduce keys sections if the library is the same
Merging rootmaps speeds up ROOT startup, especially on file systems
like afs or cvmfs.<commit_after>
|
#! /usr/bin/env python
'''
An utility to smartly "cat" rootmap files.
'''
from __future__ import print_function
import argparse
import sys
#-------------------------------------------------------------------------------
def getParser():
parser = argparse.ArgumentParser(description='Get input rootmaps and output rootmap.')
parser.add_argument("rootmaps", nargs='+', help='The name of the rootmaps separated by a space.')
parser.add_argument("-o", "--output", dest='output',
default="all.rootmap", help='The output rootmap name.')
return parser
#-------------------------------------------------------------------------------
class Rootmap(object):
def __init__(self):
self.fwdDecls = []
self.sections = {}
def ParseAndAddMany(self,rootmapnames):
for rootmapname in rootmapnames:
self.ParseAndAdd(rootmapname)
def ParseAndAdd(self,rootmapname):
ifile = open(rootmapname)
rootmapLines = ifile.readlines()
ifile.close()
fwdDeclsSet = set()
fwdDeclsSection = False
keysSection = True
for line in rootmapLines:
if line.startswith("{ decls }"):
fwdDeclsSection = True
keysSection = False
continue
if line.startswith("[ "):
fwdDeclsSection = False
keysSection = True
secName = line
if line == "\n": continue
if fwdDeclsSection:
fwdDeclsSet.add(line)
if keysSection:
if self.sections.has_key(secName):
self.sections[secName].append(line)
else:
self.sections[secName] = []
self.fwdDecls.extend(fwdDeclsSet)
def Print(self,outrootmapname):
# Now we reduce the fwd declarations
self.fwdDecls = sorted(list(set(self.fwdDecls)))
ofile = file(outrootmapname, "w")
if len(self.fwdDecls) != 0:
ofile.write("{ decls }\n")
for fwdDecl in self.fwdDecls:
ofile.write(fwdDecl)
ofile.write("\n")
for libname, keylines in self.sections.items():
ofile.write(libname)
for keyline in keylines:
ofile.write(keyline)
ofile.write("\n")
ofile.close()
#-------------------------------------------------------------------------------
def merge(rmapsnames, outrootmapname):
rm = Rootmap()
rm.ParseAndAddMany(rmapsnames)
rm.Print(outrootmapname)
return 0
#-------------------------------------------------------------------------------
if __name__ == "__main__":
parser = getParser()
args = parser.parse_args()
rmapsnames = args.rootmaps
outrootmapname = args.output
sys.exit(merge(rmapsnames, outrootmapname))
|
Concatenate rootmaps in a smart way
- Avoid duplicates in the fwd declaration section
- Reduce keys sections if the library is the same
Merging rootmaps speeds up ROOT startup, especially on file systems
like afs or cvmfs.#! /usr/bin/env python
'''
An utility to smartly "cat" rootmap files.
'''
from __future__ import print_function
import argparse
import sys
#-------------------------------------------------------------------------------
def getParser():
parser = argparse.ArgumentParser(description='Get input rootmaps and output rootmap.')
parser.add_argument("rootmaps", nargs='+', help='The name of the rootmaps separated by a space.')
parser.add_argument("-o", "--output", dest='output',
default="all.rootmap", help='The output rootmap name.')
return parser
#-------------------------------------------------------------------------------
class Rootmap(object):
def __init__(self):
self.fwdDecls = []
self.sections = {}
def ParseAndAddMany(self,rootmapnames):
for rootmapname in rootmapnames:
self.ParseAndAdd(rootmapname)
def ParseAndAdd(self,rootmapname):
ifile = open(rootmapname)
rootmapLines = ifile.readlines()
ifile.close()
fwdDeclsSet = set()
fwdDeclsSection = False
keysSection = True
for line in rootmapLines:
if line.startswith("{ decls }"):
fwdDeclsSection = True
keysSection = False
continue
if line.startswith("[ "):
fwdDeclsSection = False
keysSection = True
secName = line
if line == "\n": continue
if fwdDeclsSection:
fwdDeclsSet.add(line)
if keysSection:
if self.sections.has_key(secName):
self.sections[secName].append(line)
else:
self.sections[secName] = []
self.fwdDecls.extend(fwdDeclsSet)
def Print(self,outrootmapname):
# Now we reduce the fwd declarations
self.fwdDecls = sorted(list(set(self.fwdDecls)))
ofile = file(outrootmapname, "w")
if len(self.fwdDecls) != 0:
ofile.write("{ decls }\n")
for fwdDecl in self.fwdDecls:
ofile.write(fwdDecl)
ofile.write("\n")
for libname, keylines in self.sections.items():
ofile.write(libname)
for keyline in keylines:
ofile.write(keyline)
ofile.write("\n")
ofile.close()
#-------------------------------------------------------------------------------
def merge(rmapsnames, outrootmapname):
rm = Rootmap()
rm.ParseAndAddMany(rmapsnames)
rm.Print(outrootmapname)
return 0
#-------------------------------------------------------------------------------
if __name__ == "__main__":
parser = getParser()
args = parser.parse_args()
rmapsnames = args.rootmaps
outrootmapname = args.output
sys.exit(merge(rmapsnames, outrootmapname))
|
<commit_before><commit_msg>Concatenate rootmaps in a smart way
- Avoid duplicates in the fwd declaration section
- Reduce keys sections if the library is the same
Merging rootmaps speeds up ROOT startup, especially on file systems
like afs or cvmfs.<commit_after>#! /usr/bin/env python
'''
An utility to smartly "cat" rootmap files.
'''
from __future__ import print_function
import argparse
import sys
#-------------------------------------------------------------------------------
def getParser():
parser = argparse.ArgumentParser(description='Get input rootmaps and output rootmap.')
parser.add_argument("rootmaps", nargs='+', help='The name of the rootmaps separated by a space.')
parser.add_argument("-o", "--output", dest='output',
default="all.rootmap", help='The output rootmap name.')
return parser
#-------------------------------------------------------------------------------
class Rootmap(object):
def __init__(self):
self.fwdDecls = []
self.sections = {}
def ParseAndAddMany(self,rootmapnames):
for rootmapname in rootmapnames:
self.ParseAndAdd(rootmapname)
def ParseAndAdd(self,rootmapname):
ifile = open(rootmapname)
rootmapLines = ifile.readlines()
ifile.close()
fwdDeclsSet = set()
fwdDeclsSection = False
keysSection = True
for line in rootmapLines:
if line.startswith("{ decls }"):
fwdDeclsSection = True
keysSection = False
continue
if line.startswith("[ "):
fwdDeclsSection = False
keysSection = True
secName = line
if line == "\n": continue
if fwdDeclsSection:
fwdDeclsSet.add(line)
if keysSection:
if self.sections.has_key(secName):
self.sections[secName].append(line)
else:
self.sections[secName] = []
self.fwdDecls.extend(fwdDeclsSet)
def Print(self,outrootmapname):
# Now we reduce the fwd declarations
self.fwdDecls = sorted(list(set(self.fwdDecls)))
ofile = file(outrootmapname, "w")
if len(self.fwdDecls) != 0:
ofile.write("{ decls }\n")
for fwdDecl in self.fwdDecls:
ofile.write(fwdDecl)
ofile.write("\n")
for libname, keylines in self.sections.items():
ofile.write(libname)
for keyline in keylines:
ofile.write(keyline)
ofile.write("\n")
ofile.close()
#-------------------------------------------------------------------------------
def merge(rmapsnames, outrootmapname):
rm = Rootmap()
rm.ParseAndAddMany(rmapsnames)
rm.Print(outrootmapname)
return 0
#-------------------------------------------------------------------------------
if __name__ == "__main__":
parser = getParser()
args = parser.parse_args()
rmapsnames = args.rootmaps
outrootmapname = args.output
sys.exit(merge(rmapsnames, outrootmapname))
|
|
0125d6a8617d6ebc95c74f923ce3107ec538a297
|
test_hacks_monkeypatching.py
|
test_hacks_monkeypatching.py
|
import sys
import io
import hacks
def test_stdout_monkeypatching():
# Let's first patch stdout manually:
real_stdout = sys.stdout
fake_stdout = io.StringIO()
sys.stdout = fake_stdout # While it is monkey-patched, other users
print('Hello') # may write something else into out fake_stdout.
sys.stdout = real_stdout
assert fake_stdout.getvalue() == 'Hello\n'
# Now let's patch stdout with hacks:
sys.stdout = hacks.friendly_callable('stdout')(sys.stdout)
# Nothing bad should happen for now
fake_stdout = io.StringIO()
@hacks.around('stdout')
def capture_stdout(real_stdout_ignored):
return fake_stdout
with hacks.use(capture_stdout): # hacks-aided monkeypatching should not
print('Hello') # affect other users of stdout
assert fake_stdout.getvalue() == 'Hello\n'
# The other benefit is that hacks stack nicely
@hacks.around('stdout')
def zomg_ponies(stdout_to_modify):
class Ponyfier:
def write(self, text):
stdout_to_modify.write('🐎' * len(text))
return Ponyfier()
with hacks.use(capture_stdout):
with hacks.use(zomg_ponies): # A second hack stacks on top
print('oh no') # of the other one reasonably
assert fake_stdout.getvalue() == 'Hello\n' + '🐎' * len('oh no\n')
|
Add new illustrative test: monkeypatching stdout
|
Add new illustrative test: monkeypatching stdout
|
Python
|
mit
|
t184256/hacks
|
Add new illustrative test: monkeypatching stdout
|
import sys
import io
import hacks
def test_stdout_monkeypatching():
# Let's first patch stdout manually:
real_stdout = sys.stdout
fake_stdout = io.StringIO()
sys.stdout = fake_stdout # While it is monkey-patched, other users
print('Hello') # may write something else into out fake_stdout.
sys.stdout = real_stdout
assert fake_stdout.getvalue() == 'Hello\n'
# Now let's patch stdout with hacks:
sys.stdout = hacks.friendly_callable('stdout')(sys.stdout)
# Nothing bad should happen for now
fake_stdout = io.StringIO()
@hacks.around('stdout')
def capture_stdout(real_stdout_ignored):
return fake_stdout
with hacks.use(capture_stdout): # hacks-aided monkeypatching should not
print('Hello') # affect other users of stdout
assert fake_stdout.getvalue() == 'Hello\n'
# The other benefit is that hacks stack nicely
@hacks.around('stdout')
def zomg_ponies(stdout_to_modify):
class Ponyfier:
def write(self, text):
stdout_to_modify.write('🐎' * len(text))
return Ponyfier()
with hacks.use(capture_stdout):
with hacks.use(zomg_ponies): # A second hack stacks on top
print('oh no') # of the other one reasonably
assert fake_stdout.getvalue() == 'Hello\n' + '🐎' * len('oh no\n')
|
<commit_before><commit_msg>Add new illustrative test: monkeypatching stdout<commit_after>
|
import sys
import io
import hacks
def test_stdout_monkeypatching():
# Let's first patch stdout manually:
real_stdout = sys.stdout
fake_stdout = io.StringIO()
sys.stdout = fake_stdout # While it is monkey-patched, other users
print('Hello') # may write something else into out fake_stdout.
sys.stdout = real_stdout
assert fake_stdout.getvalue() == 'Hello\n'
# Now let's patch stdout with hacks:
sys.stdout = hacks.friendly_callable('stdout')(sys.stdout)
# Nothing bad should happen for now
fake_stdout = io.StringIO()
@hacks.around('stdout')
def capture_stdout(real_stdout_ignored):
return fake_stdout
with hacks.use(capture_stdout): # hacks-aided monkeypatching should not
print('Hello') # affect other users of stdout
assert fake_stdout.getvalue() == 'Hello\n'
# The other benefit is that hacks stack nicely
@hacks.around('stdout')
def zomg_ponies(stdout_to_modify):
class Ponyfier:
def write(self, text):
stdout_to_modify.write('🐎' * len(text))
return Ponyfier()
with hacks.use(capture_stdout):
with hacks.use(zomg_ponies): # A second hack stacks on top
print('oh no') # of the other one reasonably
assert fake_stdout.getvalue() == 'Hello\n' + '🐎' * len('oh no\n')
|
Add new illustrative test: monkeypatching stdoutimport sys
import io
import hacks
def test_stdout_monkeypatching():
# Let's first patch stdout manually:
real_stdout = sys.stdout
fake_stdout = io.StringIO()
sys.stdout = fake_stdout # While it is monkey-patched, other users
print('Hello') # may write something else into out fake_stdout.
sys.stdout = real_stdout
assert fake_stdout.getvalue() == 'Hello\n'
# Now let's patch stdout with hacks:
sys.stdout = hacks.friendly_callable('stdout')(sys.stdout)
# Nothing bad should happen for now
fake_stdout = io.StringIO()
@hacks.around('stdout')
def capture_stdout(real_stdout_ignored):
return fake_stdout
with hacks.use(capture_stdout): # hacks-aided monkeypatching should not
print('Hello') # affect other users of stdout
assert fake_stdout.getvalue() == 'Hello\n'
# The other benefit is that hacks stack nicely
@hacks.around('stdout')
def zomg_ponies(stdout_to_modify):
class Ponyfier:
def write(self, text):
stdout_to_modify.write('🐎' * len(text))
return Ponyfier()
with hacks.use(capture_stdout):
with hacks.use(zomg_ponies): # A second hack stacks on top
print('oh no') # of the other one reasonably
assert fake_stdout.getvalue() == 'Hello\n' + '🐎' * len('oh no\n')
|
<commit_before><commit_msg>Add new illustrative test: monkeypatching stdout<commit_after>import sys
import io
import hacks
def test_stdout_monkeypatching():
# Let's first patch stdout manually:
real_stdout = sys.stdout
fake_stdout = io.StringIO()
sys.stdout = fake_stdout # While it is monkey-patched, other users
print('Hello') # may write something else into out fake_stdout.
sys.stdout = real_stdout
assert fake_stdout.getvalue() == 'Hello\n'
# Now let's patch stdout with hacks:
sys.stdout = hacks.friendly_callable('stdout')(sys.stdout)
# Nothing bad should happen for now
fake_stdout = io.StringIO()
@hacks.around('stdout')
def capture_stdout(real_stdout_ignored):
return fake_stdout
with hacks.use(capture_stdout): # hacks-aided monkeypatching should not
print('Hello') # affect other users of stdout
assert fake_stdout.getvalue() == 'Hello\n'
# The other benefit is that hacks stack nicely
@hacks.around('stdout')
def zomg_ponies(stdout_to_modify):
class Ponyfier:
def write(self, text):
stdout_to_modify.write('🐎' * len(text))
return Ponyfier()
with hacks.use(capture_stdout):
with hacks.use(zomg_ponies): # A second hack stacks on top
print('oh no') # of the other one reasonably
assert fake_stdout.getvalue() == 'Hello\n' + '🐎' * len('oh no\n')
|
|
8656688c82334dcb1bf064687bc05b1ea9b6a9d0
|
mechanize.py
|
mechanize.py
|
#!/usr/bin/env
import mechanize
def test_agent(url, user_agent):
browser = mechanize.Browser()
browser.addheaders = user_agent
page = browser.open(url)
source_code = page.read()
print source_code
user_agent = [('User-agent','Mozilla/5.0 (X11; U; Linux 2.4.2-2 i586; en-US; m18) Gecko/20010131 Netscape6/6.01')]
url = 'http://whatsmyuseragent.com/'
test_agent(url, user_agent)
|
Create basic user agent spoofer with Mechanize
|
Create basic user agent spoofer with Mechanize
|
Python
|
mit
|
jwarren116/network-tools-deux
|
Create basic user agent spoofer with Mechanize
|
#!/usr/bin/env
import mechanize
def test_agent(url, user_agent):
browser = mechanize.Browser()
browser.addheaders = user_agent
page = browser.open(url)
source_code = page.read()
print source_code
user_agent = [('User-agent','Mozilla/5.0 (X11; U; Linux 2.4.2-2 i586; en-US; m18) Gecko/20010131 Netscape6/6.01')]
url = 'http://whatsmyuseragent.com/'
test_agent(url, user_agent)
|
<commit_before><commit_msg>Create basic user agent spoofer with Mechanize<commit_after>
|
#!/usr/bin/env
import mechanize
def test_agent(url, user_agent):
browser = mechanize.Browser()
browser.addheaders = user_agent
page = browser.open(url)
source_code = page.read()
print source_code
user_agent = [('User-agent','Mozilla/5.0 (X11; U; Linux 2.4.2-2 i586; en-US; m18) Gecko/20010131 Netscape6/6.01')]
url = 'http://whatsmyuseragent.com/'
test_agent(url, user_agent)
|
Create basic user agent spoofer with Mechanize#!/usr/bin/env
import mechanize
def test_agent(url, user_agent):
browser = mechanize.Browser()
browser.addheaders = user_agent
page = browser.open(url)
source_code = page.read()
print source_code
user_agent = [('User-agent','Mozilla/5.0 (X11; U; Linux 2.4.2-2 i586; en-US; m18) Gecko/20010131 Netscape6/6.01')]
url = 'http://whatsmyuseragent.com/'
test_agent(url, user_agent)
|
<commit_before><commit_msg>Create basic user agent spoofer with Mechanize<commit_after>#!/usr/bin/env
import mechanize
def test_agent(url, user_agent):
browser = mechanize.Browser()
browser.addheaders = user_agent
page = browser.open(url)
source_code = page.read()
print source_code
user_agent = [('User-agent','Mozilla/5.0 (X11; U; Linux 2.4.2-2 i586; en-US; m18) Gecko/20010131 Netscape6/6.01')]
url = 'http://whatsmyuseragent.com/'
test_agent(url, user_agent)
|
|
66c5e80afc4f520ddc239e6f458a4a2e2142fab1
|
tests/test_hdf5_adjacency.py
|
tests/test_hdf5_adjacency.py
|
"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import pytest
from reader.hdf5_adjacency import adjacency
def test_range():
"""
Test the range function
"""
hdf5_handle = adjacency('test', '', 10000)
x = hdf5_handle.get_range('chr1', 100000, 200000, limit_chr='chr2')
assert 'results' in x
assert len(x['results']) > 0
|
Test script stub for testing the adjacency reader
|
Test script stub for testing the adjacency reader
|
Python
|
apache-2.0
|
Multiscale-Genomics/mg-dm-api,Multiscale-Genomics/mg-dm-api
|
Test script stub for testing the adjacency reader
|
"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import pytest
from reader.hdf5_adjacency import adjacency
def test_range():
"""
Test the range function
"""
hdf5_handle = adjacency('test', '', 10000)
x = hdf5_handle.get_range('chr1', 100000, 200000, limit_chr='chr2')
assert 'results' in x
assert len(x['results']) > 0
|
<commit_before><commit_msg>Test script stub for testing the adjacency reader<commit_after>
|
"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import pytest
from reader.hdf5_adjacency import adjacency
def test_range():
"""
Test the range function
"""
hdf5_handle = adjacency('test', '', 10000)
x = hdf5_handle.get_range('chr1', 100000, 200000, limit_chr='chr2')
assert 'results' in x
assert len(x['results']) > 0
|
Test script stub for testing the adjacency reader"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import pytest
from reader.hdf5_adjacency import adjacency
def test_range():
"""
Test the range function
"""
hdf5_handle = adjacency('test', '', 10000)
x = hdf5_handle.get_range('chr1', 100000, 200000, limit_chr='chr2')
assert 'results' in x
assert len(x['results']) > 0
|
<commit_before><commit_msg>Test script stub for testing the adjacency reader<commit_after>"""
.. Copyright 2017 EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
import pytest
from reader.hdf5_adjacency import adjacency
def test_range():
"""
Test the range function
"""
hdf5_handle = adjacency('test', '', 10000)
x = hdf5_handle.get_range('chr1', 100000, 200000, limit_chr='chr2')
assert 'results' in x
assert len(x['results']) > 0
|
|
c7c72c2221109b6fd7b9bf7e18c6a6c3a0e65c1b
|
scripts/utils.py
|
scripts/utils.py
|
"""Helper script that contains many utilities.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
]
from tagging.managers import TaggedItem
from pytask.taskapp.models import Task
def remove_textbook_from_chapter():
"""Removes the tag Textbook from Chapter.
"""
tasks = TaggedItem.objects.get_by_model(Task, 'Chapter')
for task in tasks:
tags = task.tags_field.split(',')
retags = []
for tag in tags:
if 'Textbook' not in tag:
retags.append(tag)
task.tags_field = ', '.join(retags)
task.save()
|
Add a utility script with a function to remove Textbook from current textbook chapter tasks.
|
Add a utility script with a function to remove Textbook from current textbook chapter tasks.
|
Python
|
agpl-3.0
|
madhusudancs/pytask,madhusudancs/pytask,madhusudancs/pytask
|
Add a utility script with a function to remove Textbook from current textbook chapter tasks.
|
"""Helper script that contains many utilities.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
]
from tagging.managers import TaggedItem
from pytask.taskapp.models import Task
def remove_textbook_from_chapter():
"""Removes the tag Textbook from Chapter.
"""
tasks = TaggedItem.objects.get_by_model(Task, 'Chapter')
for task in tasks:
tags = task.tags_field.split(',')
retags = []
for tag in tags:
if 'Textbook' not in tag:
retags.append(tag)
task.tags_field = ', '.join(retags)
task.save()
|
<commit_before><commit_msg>Add a utility script with a function to remove Textbook from current textbook chapter tasks.<commit_after>
|
"""Helper script that contains many utilities.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
]
from tagging.managers import TaggedItem
from pytask.taskapp.models import Task
def remove_textbook_from_chapter():
"""Removes the tag Textbook from Chapter.
"""
tasks = TaggedItem.objects.get_by_model(Task, 'Chapter')
for task in tasks:
tags = task.tags_field.split(',')
retags = []
for tag in tags:
if 'Textbook' not in tag:
retags.append(tag)
task.tags_field = ', '.join(retags)
task.save()
|
Add a utility script with a function to remove Textbook from current textbook chapter tasks."""Helper script that contains many utilities.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
]
from tagging.managers import TaggedItem
from pytask.taskapp.models import Task
def remove_textbook_from_chapter():
"""Removes the tag Textbook from Chapter.
"""
tasks = TaggedItem.objects.get_by_model(Task, 'Chapter')
for task in tasks:
tags = task.tags_field.split(',')
retags = []
for tag in tags:
if 'Textbook' not in tag:
retags.append(tag)
task.tags_field = ', '.join(retags)
task.save()
|
<commit_before><commit_msg>Add a utility script with a function to remove Textbook from current textbook chapter tasks.<commit_after>"""Helper script that contains many utilities.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>',
]
from tagging.managers import TaggedItem
from pytask.taskapp.models import Task
def remove_textbook_from_chapter():
"""Removes the tag Textbook from Chapter.
"""
tasks = TaggedItem.objects.get_by_model(Task, 'Chapter')
for task in tasks:
tags = task.tags_field.split(',')
retags = []
for tag in tags:
if 'Textbook' not in tag:
retags.append(tag)
task.tags_field = ', '.join(retags)
task.save()
|
|
23ee1e71fd7811789c5425b6fc2cfcde2057b57f
|
examples/convert_samples.py
|
examples/convert_samples.py
|
#!\usr\bin\env python
"""A test script to parse the existing samples and convert them to JSON.
Not all of the examples currently work.
Example usage:
python convert_samples.py /path/to/cybox_v2.0_samples
"""
import os
import sys
import cybox.bindings.cybox_core as core_binding
from cybox.core import Observables
def from_file(filename):
cybox_obj = core_binding.parse(os.path.abspath(filename))
return Observables.from_obj(cybox_obj)
def main():
if len(sys.argv) < 2:
print "Argument required"
return
# The argument should be a directory containing XML files.
d = sys.argv[1]
output_dir = os.path.join(d, "json")
if not os.path.exists(output_dir):
os.mkdir(output_dir)
if not os.path.isdir(output_dir):
print "{0} exists and is not a directory.".format(output_dir)
return
for f in os.listdir(sys.argv[1]):
orig_file = os.path.join(d, f)
if not os.path.isfile(orig_file):
return
output_fn = "{0}.json".format(os.path.splitext(f)[0])
output_fn = os.path.join(output_dir, output_fn)
with open(output_fn, "wt") as f:
try:
f.write(from_file(orig_file).to_json())
except Exception as e:
print "---------------------------------"
print "ERROR with {0}".format(orig_file)
print e
continue
if __name__ == "__main__":
main()
|
Add test script for existing samples
|
Add test script for existing samples
|
Python
|
bsd-3-clause
|
CybOXProject/python-cybox
|
Add test script for existing samples
|
#!\usr\bin\env python
"""A test script to parse the existing samples and convert them to JSON.
Not all of the examples currently work.
Example usage:
python convert_samples.py /path/to/cybox_v2.0_samples
"""
import os
import sys
import cybox.bindings.cybox_core as core_binding
from cybox.core import Observables
def from_file(filename):
cybox_obj = core_binding.parse(os.path.abspath(filename))
return Observables.from_obj(cybox_obj)
def main():
if len(sys.argv) < 2:
print "Argument required"
return
# The argument should be a directory containing XML files.
d = sys.argv[1]
output_dir = os.path.join(d, "json")
if not os.path.exists(output_dir):
os.mkdir(output_dir)
if not os.path.isdir(output_dir):
print "{0} exists and is not a directory.".format(output_dir)
return
for f in os.listdir(sys.argv[1]):
orig_file = os.path.join(d, f)
if not os.path.isfile(orig_file):
return
output_fn = "{0}.json".format(os.path.splitext(f)[0])
output_fn = os.path.join(output_dir, output_fn)
with open(output_fn, "wt") as f:
try:
f.write(from_file(orig_file).to_json())
except Exception as e:
print "---------------------------------"
print "ERROR with {0}".format(orig_file)
print e
continue
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add test script for existing samples<commit_after>
|
#!\usr\bin\env python
"""A test script to parse the existing samples and convert them to JSON.
Not all of the examples currently work.
Example usage:
python convert_samples.py /path/to/cybox_v2.0_samples
"""
import os
import sys
import cybox.bindings.cybox_core as core_binding
from cybox.core import Observables
def from_file(filename):
cybox_obj = core_binding.parse(os.path.abspath(filename))
return Observables.from_obj(cybox_obj)
def main():
if len(sys.argv) < 2:
print "Argument required"
return
# The argument should be a directory containing XML files.
d = sys.argv[1]
output_dir = os.path.join(d, "json")
if not os.path.exists(output_dir):
os.mkdir(output_dir)
if not os.path.isdir(output_dir):
print "{0} exists and is not a directory.".format(output_dir)
return
for f in os.listdir(sys.argv[1]):
orig_file = os.path.join(d, f)
if not os.path.isfile(orig_file):
return
output_fn = "{0}.json".format(os.path.splitext(f)[0])
output_fn = os.path.join(output_dir, output_fn)
with open(output_fn, "wt") as f:
try:
f.write(from_file(orig_file).to_json())
except Exception as e:
print "---------------------------------"
print "ERROR with {0}".format(orig_file)
print e
continue
if __name__ == "__main__":
main()
|
Add test script for existing samples#!\usr\bin\env python
"""A test script to parse the existing samples and convert them to JSON.
Not all of the examples currently work.
Example usage:
python convert_samples.py /path/to/cybox_v2.0_samples
"""
import os
import sys
import cybox.bindings.cybox_core as core_binding
from cybox.core import Observables
def from_file(filename):
cybox_obj = core_binding.parse(os.path.abspath(filename))
return Observables.from_obj(cybox_obj)
def main():
if len(sys.argv) < 2:
print "Argument required"
return
# The argument should be a directory containing XML files.
d = sys.argv[1]
output_dir = os.path.join(d, "json")
if not os.path.exists(output_dir):
os.mkdir(output_dir)
if not os.path.isdir(output_dir):
print "{0} exists and is not a directory.".format(output_dir)
return
for f in os.listdir(sys.argv[1]):
orig_file = os.path.join(d, f)
if not os.path.isfile(orig_file):
return
output_fn = "{0}.json".format(os.path.splitext(f)[0])
output_fn = os.path.join(output_dir, output_fn)
with open(output_fn, "wt") as f:
try:
f.write(from_file(orig_file).to_json())
except Exception as e:
print "---------------------------------"
print "ERROR with {0}".format(orig_file)
print e
continue
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add test script for existing samples<commit_after>#!\usr\bin\env python
"""A test script to parse the existing samples and convert them to JSON.
Not all of the examples currently work.
Example usage:
python convert_samples.py /path/to/cybox_v2.0_samples
"""
import os
import sys
import cybox.bindings.cybox_core as core_binding
from cybox.core import Observables
def from_file(filename):
cybox_obj = core_binding.parse(os.path.abspath(filename))
return Observables.from_obj(cybox_obj)
def main():
if len(sys.argv) < 2:
print "Argument required"
return
# The argument should be a directory containing XML files.
d = sys.argv[1]
output_dir = os.path.join(d, "json")
if not os.path.exists(output_dir):
os.mkdir(output_dir)
if not os.path.isdir(output_dir):
print "{0} exists and is not a directory.".format(output_dir)
return
for f in os.listdir(sys.argv[1]):
orig_file = os.path.join(d, f)
if not os.path.isfile(orig_file):
return
output_fn = "{0}.json".format(os.path.splitext(f)[0])
output_fn = os.path.join(output_dir, output_fn)
with open(output_fn, "wt") as f:
try:
f.write(from_file(orig_file).to_json())
except Exception as e:
print "---------------------------------"
print "ERROR with {0}".format(orig_file)
print e
continue
if __name__ == "__main__":
main()
|
|
6b3424f3cc33574c825b7c41d9d848e9d6b002a1
|
src/run_similarity.py
|
src/run_similarity.py
|
import argparse
import datetime
from os import getcwd
from os.path import isdir, exists
from project import corpus, knn_corpus, lda_corpus, word2vec_corpus
algorithms = {"lda": lda_corpus.LDACorpus,
"knn": knn_corpus.KNNCorpus,
"w2v": word2vec_corpus.W2VCorpus}
base_dir = getcwd()
output_loc = base_dir + "/%s.corpus_out"
dictionary_loc = output_loc + "/%scorpus.dict"
corpus_loc = output_loc + "/%scorpus.mm"
log_file = output_loc + "/Sim_runtimes.log"
sup_file_loc = output_loc + "/%d.%s"
def run_sim(ints, algorithm):
output_dir = output_loc % algorithm
if not exists(output_dir):
print "Output directory for %s must exist already. Run run_algorithm.py first." % algorithm
return
log = open(log_file % algorithm, 'a+')
for size in ints:
corpus_dict = dictionary_loc % size
corpus = corpus_loc % size
sup_file = sup_file_loc % (size, algorithm)
test_corpus = algorithms[algorithm].load(dictionary=corpus_dict, corpus=corpus, sup_file=sup_file)
# Run some queries on the corpus
# Log temporal time
log.write("%s %d query time:\t" % (algorithm, size) + str(test_corpus.get_train_time()) + "\n")
log.close()
def main():
parser = argparse.ArgumentParser(description='Run queries on bow corpus generated from the arxiv corpus.')
parser.add_argument('integers', metavar='N', type=int, nargs='+', help='size values for the corpus')
parser.add_argument('algorithm', help='algorithm to apply to the corpus', choices=algorithms)
args = parser.parse_args()
run_sim(args.integers, args.algorithm)
if __name__ == "__main__":
main()
|
Add structure for query harness
|
Add structure for query harness
|
Python
|
mit
|
PinPinIre/Final-Year-Project,PinPinIre/Final-Year-Project,PinPinIre/Final-Year-Project
|
Add structure for query harness
|
import argparse
import datetime
from os import getcwd
from os.path import isdir, exists
from project import corpus, knn_corpus, lda_corpus, word2vec_corpus
algorithms = {"lda": lda_corpus.LDACorpus,
"knn": knn_corpus.KNNCorpus,
"w2v": word2vec_corpus.W2VCorpus}
base_dir = getcwd()
output_loc = base_dir + "/%s.corpus_out"
dictionary_loc = output_loc + "/%scorpus.dict"
corpus_loc = output_loc + "/%scorpus.mm"
log_file = output_loc + "/Sim_runtimes.log"
sup_file_loc = output_loc + "/%d.%s"
def run_sim(ints, algorithm):
output_dir = output_loc % algorithm
if not exists(output_dir):
print "Output directory for %s must exist already. Run run_algorithm.py first." % algorithm
return
log = open(log_file % algorithm, 'a+')
for size in ints:
corpus_dict = dictionary_loc % size
corpus = corpus_loc % size
sup_file = sup_file_loc % (size, algorithm)
test_corpus = algorithms[algorithm].load(dictionary=corpus_dict, corpus=corpus, sup_file=sup_file)
# Run some queries on the corpus
# Log temporal time
log.write("%s %d query time:\t" % (algorithm, size) + str(test_corpus.get_train_time()) + "\n")
log.close()
def main():
parser = argparse.ArgumentParser(description='Run queries on bow corpus generated from the arxiv corpus.')
parser.add_argument('integers', metavar='N', type=int, nargs='+', help='size values for the corpus')
parser.add_argument('algorithm', help='algorithm to apply to the corpus', choices=algorithms)
args = parser.parse_args()
run_sim(args.integers, args.algorithm)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add structure for query harness<commit_after>
|
import argparse
import datetime
from os import getcwd
from os.path import isdir, exists
from project import corpus, knn_corpus, lda_corpus, word2vec_corpus
algorithms = {"lda": lda_corpus.LDACorpus,
"knn": knn_corpus.KNNCorpus,
"w2v": word2vec_corpus.W2VCorpus}
base_dir = getcwd()
output_loc = base_dir + "/%s.corpus_out"
dictionary_loc = output_loc + "/%scorpus.dict"
corpus_loc = output_loc + "/%scorpus.mm"
log_file = output_loc + "/Sim_runtimes.log"
sup_file_loc = output_loc + "/%d.%s"
def run_sim(ints, algorithm):
output_dir = output_loc % algorithm
if not exists(output_dir):
print "Output directory for %s must exist already. Run run_algorithm.py first." % algorithm
return
log = open(log_file % algorithm, 'a+')
for size in ints:
corpus_dict = dictionary_loc % size
corpus = corpus_loc % size
sup_file = sup_file_loc % (size, algorithm)
test_corpus = algorithms[algorithm].load(dictionary=corpus_dict, corpus=corpus, sup_file=sup_file)
# Run some queries on the corpus
# Log temporal time
log.write("%s %d query time:\t" % (algorithm, size) + str(test_corpus.get_train_time()) + "\n")
log.close()
def main():
parser = argparse.ArgumentParser(description='Run queries on bow corpus generated from the arxiv corpus.')
parser.add_argument('integers', metavar='N', type=int, nargs='+', help='size values for the corpus')
parser.add_argument('algorithm', help='algorithm to apply to the corpus', choices=algorithms)
args = parser.parse_args()
run_sim(args.integers, args.algorithm)
if __name__ == "__main__":
main()
|
Add structure for query harnessimport argparse
import datetime
from os import getcwd
from os.path import isdir, exists
from project import corpus, knn_corpus, lda_corpus, word2vec_corpus
algorithms = {"lda": lda_corpus.LDACorpus,
"knn": knn_corpus.KNNCorpus,
"w2v": word2vec_corpus.W2VCorpus}
base_dir = getcwd()
output_loc = base_dir + "/%s.corpus_out"
dictionary_loc = output_loc + "/%scorpus.dict"
corpus_loc = output_loc + "/%scorpus.mm"
log_file = output_loc + "/Sim_runtimes.log"
sup_file_loc = output_loc + "/%d.%s"
def run_sim(ints, algorithm):
output_dir = output_loc % algorithm
if not exists(output_dir):
print "Output directory for %s must exist already. Run run_algorithm.py first." % algorithm
return
log = open(log_file % algorithm, 'a+')
for size in ints:
corpus_dict = dictionary_loc % size
corpus = corpus_loc % size
sup_file = sup_file_loc % (size, algorithm)
test_corpus = algorithms[algorithm].load(dictionary=corpus_dict, corpus=corpus, sup_file=sup_file)
# Run some queries on the corpus
# Log temporal time
log.write("%s %d query time:\t" % (algorithm, size) + str(test_corpus.get_train_time()) + "\n")
log.close()
def main():
parser = argparse.ArgumentParser(description='Run queries on bow corpus generated from the arxiv corpus.')
parser.add_argument('integers', metavar='N', type=int, nargs='+', help='size values for the corpus')
parser.add_argument('algorithm', help='algorithm to apply to the corpus', choices=algorithms)
args = parser.parse_args()
run_sim(args.integers, args.algorithm)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add structure for query harness<commit_after>import argparse
import datetime
from os import getcwd
from os.path import isdir, exists
from project import corpus, knn_corpus, lda_corpus, word2vec_corpus
algorithms = {"lda": lda_corpus.LDACorpus,
"knn": knn_corpus.KNNCorpus,
"w2v": word2vec_corpus.W2VCorpus}
base_dir = getcwd()
output_loc = base_dir + "/%s.corpus_out"
dictionary_loc = output_loc + "/%scorpus.dict"
corpus_loc = output_loc + "/%scorpus.mm"
log_file = output_loc + "/Sim_runtimes.log"
sup_file_loc = output_loc + "/%d.%s"
def run_sim(ints, algorithm):
output_dir = output_loc % algorithm
if not exists(output_dir):
print "Output directory for %s must exist already. Run run_algorithm.py first." % algorithm
return
log = open(log_file % algorithm, 'a+')
for size in ints:
corpus_dict = dictionary_loc % size
corpus = corpus_loc % size
sup_file = sup_file_loc % (size, algorithm)
test_corpus = algorithms[algorithm].load(dictionary=corpus_dict, corpus=corpus, sup_file=sup_file)
# Run some queries on the corpus
# Log temporal time
log.write("%s %d query time:\t" % (algorithm, size) + str(test_corpus.get_train_time()) + "\n")
log.close()
def main():
parser = argparse.ArgumentParser(description='Run queries on bow corpus generated from the arxiv corpus.')
parser.add_argument('integers', metavar='N', type=int, nargs='+', help='size values for the corpus')
parser.add_argument('algorithm', help='algorithm to apply to the corpus', choices=algorithms)
args = parser.parse_args()
run_sim(args.integers, args.algorithm)
if __name__ == "__main__":
main()
|
|
94c481fc17968030d7c25072985d70eb7b4413e1
|
tests/test_api.py
|
tests/test_api.py
|
from bmi_tester.api import check_bmi
def test_bmi_check(tmpdir):
with tmpdir.as_cwd():
with open("input.yaml", "w") as fp:
pass
assert (
check_bmi(
"bmi_tester.bmi:Bmi", input_file="input.yaml", extra_args=["-vvv"]
)
== 0
)
def test_bmi_check_with_manifest_as_list(tmpdir):
with tmpdir.as_cwd():
with open("input.yaml", "w") as fp:
pass
assert (
check_bmi(
"bmi_tester.bmi:Bmi",
extra_args=["-vvv"],
input_file="input.yaml",
manifest=["input.yaml"],
)
== 0
)
def test_bmi_check_with_manifest_as_string(tmpdir):
with tmpdir.as_cwd():
with open("manifest.txt", "w") as fp:
fp.write("input.yaml")
with open("input.yaml", "w") as fp:
pass
assert (
check_bmi(
"bmi_tester.bmi:Bmi",
extra_args=["-vvv"],
input_file="input.yaml",
manifest="manifest.txt",
)
== 0
)
|
Add unit tests for check_bmi.
|
Add unit tests for check_bmi.
|
Python
|
mit
|
csdms/bmi-tester
|
Add unit tests for check_bmi.
|
from bmi_tester.api import check_bmi
def test_bmi_check(tmpdir):
with tmpdir.as_cwd():
with open("input.yaml", "w") as fp:
pass
assert (
check_bmi(
"bmi_tester.bmi:Bmi", input_file="input.yaml", extra_args=["-vvv"]
)
== 0
)
def test_bmi_check_with_manifest_as_list(tmpdir):
with tmpdir.as_cwd():
with open("input.yaml", "w") as fp:
pass
assert (
check_bmi(
"bmi_tester.bmi:Bmi",
extra_args=["-vvv"],
input_file="input.yaml",
manifest=["input.yaml"],
)
== 0
)
def test_bmi_check_with_manifest_as_string(tmpdir):
with tmpdir.as_cwd():
with open("manifest.txt", "w") as fp:
fp.write("input.yaml")
with open("input.yaml", "w") as fp:
pass
assert (
check_bmi(
"bmi_tester.bmi:Bmi",
extra_args=["-vvv"],
input_file="input.yaml",
manifest="manifest.txt",
)
== 0
)
|
<commit_before><commit_msg>Add unit tests for check_bmi.<commit_after>
|
from bmi_tester.api import check_bmi
def test_bmi_check(tmpdir):
with tmpdir.as_cwd():
with open("input.yaml", "w") as fp:
pass
assert (
check_bmi(
"bmi_tester.bmi:Bmi", input_file="input.yaml", extra_args=["-vvv"]
)
== 0
)
def test_bmi_check_with_manifest_as_list(tmpdir):
with tmpdir.as_cwd():
with open("input.yaml", "w") as fp:
pass
assert (
check_bmi(
"bmi_tester.bmi:Bmi",
extra_args=["-vvv"],
input_file="input.yaml",
manifest=["input.yaml"],
)
== 0
)
def test_bmi_check_with_manifest_as_string(tmpdir):
with tmpdir.as_cwd():
with open("manifest.txt", "w") as fp:
fp.write("input.yaml")
with open("input.yaml", "w") as fp:
pass
assert (
check_bmi(
"bmi_tester.bmi:Bmi",
extra_args=["-vvv"],
input_file="input.yaml",
manifest="manifest.txt",
)
== 0
)
|
Add unit tests for check_bmi.from bmi_tester.api import check_bmi
def test_bmi_check(tmpdir):
with tmpdir.as_cwd():
with open("input.yaml", "w") as fp:
pass
assert (
check_bmi(
"bmi_tester.bmi:Bmi", input_file="input.yaml", extra_args=["-vvv"]
)
== 0
)
def test_bmi_check_with_manifest_as_list(tmpdir):
with tmpdir.as_cwd():
with open("input.yaml", "w") as fp:
pass
assert (
check_bmi(
"bmi_tester.bmi:Bmi",
extra_args=["-vvv"],
input_file="input.yaml",
manifest=["input.yaml"],
)
== 0
)
def test_bmi_check_with_manifest_as_string(tmpdir):
with tmpdir.as_cwd():
with open("manifest.txt", "w") as fp:
fp.write("input.yaml")
with open("input.yaml", "w") as fp:
pass
assert (
check_bmi(
"bmi_tester.bmi:Bmi",
extra_args=["-vvv"],
input_file="input.yaml",
manifest="manifest.txt",
)
== 0
)
|
<commit_before><commit_msg>Add unit tests for check_bmi.<commit_after>from bmi_tester.api import check_bmi
def test_bmi_check(tmpdir):
with tmpdir.as_cwd():
with open("input.yaml", "w") as fp:
pass
assert (
check_bmi(
"bmi_tester.bmi:Bmi", input_file="input.yaml", extra_args=["-vvv"]
)
== 0
)
def test_bmi_check_with_manifest_as_list(tmpdir):
with tmpdir.as_cwd():
with open("input.yaml", "w") as fp:
pass
assert (
check_bmi(
"bmi_tester.bmi:Bmi",
extra_args=["-vvv"],
input_file="input.yaml",
manifest=["input.yaml"],
)
== 0
)
def test_bmi_check_with_manifest_as_string(tmpdir):
with tmpdir.as_cwd():
with open("manifest.txt", "w") as fp:
fp.write("input.yaml")
with open("input.yaml", "w") as fp:
pass
assert (
check_bmi(
"bmi_tester.bmi:Bmi",
extra_args=["-vvv"],
input_file="input.yaml",
manifest="manifest.txt",
)
== 0
)
|
|
cc14698280f5982c472c51185d57d5f5292ce518
|
byceps/blueprints/ticketing/views.py
|
byceps/blueprints/ticketing/views.py
|
"""
byceps.blueprints.ticketing.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from flask import abort, g
from ...services.ticketing import ticket_service
from ...util.framework.blueprint import create_blueprint
from ...util.iterables import find
from ...util.framework.templating import templated
blueprint = create_blueprint('ticketing', __name__)
@blueprint.route('/mine')
@templated
def index_mine():
"""List tickets related to the current user."""
me = get_current_user_or_403()
tickets = ticket_service.find_tickets_related_to_user_for_party(
me.id, g.party.id)
current_user_uses_any_ticket = find(lambda t: t.used_by_id == me.id, tickets)
return {
'tickets': tickets,
'current_user_uses_any_ticket': current_user_uses_any_ticket,
}
def get_current_user_or_403():
user = g.current_user
if not user.is_active:
abort(403)
return user
|
"""
byceps.blueprints.ticketing.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from flask import abort, g
from ...services.ticketing import ticket_service
from ...util.framework.blueprint import create_blueprint
from ...util.iterables import find
from ...util.framework.templating import templated
blueprint = create_blueprint('ticketing', __name__)
@blueprint.route('/mine')
@templated
def index_mine():
"""List tickets related to the current user."""
current_user = _get_current_user_or_403()
tickets = ticket_service.find_tickets_related_to_user_for_party(
current_user.id, g.party.id)
current_user_uses_any_ticket = find(
lambda t: t.used_by_id == current_user.id, tickets)
return {
'tickets': tickets,
'current_user_uses_any_ticket': current_user_uses_any_ticket,
}
def _get_current_user_or_403():
user = g.current_user
if not user.is_active:
abort(403)
return user
|
Rename variable, prefix private function
|
Rename variable, prefix private function
|
Python
|
bsd-3-clause
|
homeworkprod/byceps,m-ober/byceps,homeworkprod/byceps,m-ober/byceps,m-ober/byceps,homeworkprod/byceps
|
"""
byceps.blueprints.ticketing.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from flask import abort, g
from ...services.ticketing import ticket_service
from ...util.framework.blueprint import create_blueprint
from ...util.iterables import find
from ...util.framework.templating import templated
blueprint = create_blueprint('ticketing', __name__)
@blueprint.route('/mine')
@templated
def index_mine():
"""List tickets related to the current user."""
me = get_current_user_or_403()
tickets = ticket_service.find_tickets_related_to_user_for_party(
me.id, g.party.id)
current_user_uses_any_ticket = find(lambda t: t.used_by_id == me.id, tickets)
return {
'tickets': tickets,
'current_user_uses_any_ticket': current_user_uses_any_ticket,
}
def get_current_user_or_403():
user = g.current_user
if not user.is_active:
abort(403)
return user
Rename variable, prefix private function
|
"""
byceps.blueprints.ticketing.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from flask import abort, g
from ...services.ticketing import ticket_service
from ...util.framework.blueprint import create_blueprint
from ...util.iterables import find
from ...util.framework.templating import templated
blueprint = create_blueprint('ticketing', __name__)
@blueprint.route('/mine')
@templated
def index_mine():
"""List tickets related to the current user."""
current_user = _get_current_user_or_403()
tickets = ticket_service.find_tickets_related_to_user_for_party(
current_user.id, g.party.id)
current_user_uses_any_ticket = find(
lambda t: t.used_by_id == current_user.id, tickets)
return {
'tickets': tickets,
'current_user_uses_any_ticket': current_user_uses_any_ticket,
}
def _get_current_user_or_403():
user = g.current_user
if not user.is_active:
abort(403)
return user
|
<commit_before>"""
byceps.blueprints.ticketing.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from flask import abort, g
from ...services.ticketing import ticket_service
from ...util.framework.blueprint import create_blueprint
from ...util.iterables import find
from ...util.framework.templating import templated
blueprint = create_blueprint('ticketing', __name__)
@blueprint.route('/mine')
@templated
def index_mine():
"""List tickets related to the current user."""
me = get_current_user_or_403()
tickets = ticket_service.find_tickets_related_to_user_for_party(
me.id, g.party.id)
current_user_uses_any_ticket = find(lambda t: t.used_by_id == me.id, tickets)
return {
'tickets': tickets,
'current_user_uses_any_ticket': current_user_uses_any_ticket,
}
def get_current_user_or_403():
user = g.current_user
if not user.is_active:
abort(403)
return user
<commit_msg>Rename variable, prefix private function<commit_after>
|
"""
byceps.blueprints.ticketing.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from flask import abort, g
from ...services.ticketing import ticket_service
from ...util.framework.blueprint import create_blueprint
from ...util.iterables import find
from ...util.framework.templating import templated
blueprint = create_blueprint('ticketing', __name__)
@blueprint.route('/mine')
@templated
def index_mine():
"""List tickets related to the current user."""
current_user = _get_current_user_or_403()
tickets = ticket_service.find_tickets_related_to_user_for_party(
current_user.id, g.party.id)
current_user_uses_any_ticket = find(
lambda t: t.used_by_id == current_user.id, tickets)
return {
'tickets': tickets,
'current_user_uses_any_ticket': current_user_uses_any_ticket,
}
def _get_current_user_or_403():
user = g.current_user
if not user.is_active:
abort(403)
return user
|
"""
byceps.blueprints.ticketing.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from flask import abort, g
from ...services.ticketing import ticket_service
from ...util.framework.blueprint import create_blueprint
from ...util.iterables import find
from ...util.framework.templating import templated
blueprint = create_blueprint('ticketing', __name__)
@blueprint.route('/mine')
@templated
def index_mine():
"""List tickets related to the current user."""
me = get_current_user_or_403()
tickets = ticket_service.find_tickets_related_to_user_for_party(
me.id, g.party.id)
current_user_uses_any_ticket = find(lambda t: t.used_by_id == me.id, tickets)
return {
'tickets': tickets,
'current_user_uses_any_ticket': current_user_uses_any_ticket,
}
def get_current_user_or_403():
user = g.current_user
if not user.is_active:
abort(403)
return user
Rename variable, prefix private function"""
byceps.blueprints.ticketing.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from flask import abort, g
from ...services.ticketing import ticket_service
from ...util.framework.blueprint import create_blueprint
from ...util.iterables import find
from ...util.framework.templating import templated
blueprint = create_blueprint('ticketing', __name__)
@blueprint.route('/mine')
@templated
def index_mine():
"""List tickets related to the current user."""
current_user = _get_current_user_or_403()
tickets = ticket_service.find_tickets_related_to_user_for_party(
current_user.id, g.party.id)
current_user_uses_any_ticket = find(
lambda t: t.used_by_id == current_user.id, tickets)
return {
'tickets': tickets,
'current_user_uses_any_ticket': current_user_uses_any_ticket,
}
def _get_current_user_or_403():
user = g.current_user
if not user.is_active:
abort(403)
return user
|
<commit_before>"""
byceps.blueprints.ticketing.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from flask import abort, g
from ...services.ticketing import ticket_service
from ...util.framework.blueprint import create_blueprint
from ...util.iterables import find
from ...util.framework.templating import templated
blueprint = create_blueprint('ticketing', __name__)
@blueprint.route('/mine')
@templated
def index_mine():
"""List tickets related to the current user."""
me = get_current_user_or_403()
tickets = ticket_service.find_tickets_related_to_user_for_party(
me.id, g.party.id)
current_user_uses_any_ticket = find(lambda t: t.used_by_id == me.id, tickets)
return {
'tickets': tickets,
'current_user_uses_any_ticket': current_user_uses_any_ticket,
}
def get_current_user_or_403():
user = g.current_user
if not user.is_active:
abort(403)
return user
<commit_msg>Rename variable, prefix private function<commit_after>"""
byceps.blueprints.ticketing.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Copyright: 2006-2017 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
from flask import abort, g
from ...services.ticketing import ticket_service
from ...util.framework.blueprint import create_blueprint
from ...util.iterables import find
from ...util.framework.templating import templated
blueprint = create_blueprint('ticketing', __name__)
@blueprint.route('/mine')
@templated
def index_mine():
"""List tickets related to the current user."""
current_user = _get_current_user_or_403()
tickets = ticket_service.find_tickets_related_to_user_for_party(
current_user.id, g.party.id)
current_user_uses_any_ticket = find(
lambda t: t.used_by_id == current_user.id, tickets)
return {
'tickets': tickets,
'current_user_uses_any_ticket': current_user_uses_any_ticket,
}
def _get_current_user_or_403():
user = g.current_user
if not user.is_active:
abort(403)
return user
|
fba35ea4fdaf3a9076bcd9eefdcf1dce3d41b05a
|
src/templates.py
|
src/templates.py
|
import os
from . import reporting
def create_section(title, desc, props, subsects, figures_list):
assert isinstance(title, str)
assert isinstance(desc, str)
assert isinstance(props, list)
assert isinstance(figures_list, list)
section = reporting.Section(title, [])
section.add(reporting.BlockLatex(desc + "\n"))
for s in subsects:
section.add(s)
# Create figures in the appropriate directory
for f in figures_list:
section.add(reporting.FloatFigure(f))
section.add(reporting.BlockLatex(r"\vspace{1cm}" + "\n"))
return section
def prepare_report(props, sects, filename, paperwidth=75, user_declarations=""):
"""Creates a LaTeX report of the results, where properties are shared for all subsections and
dimension for rows (e.g. benchmarks) is also the same for each subsection.
:param props: (list[dict]) list of properties dicts to be processed.
:param sects: ((title, desc, subsections, figures)), where figures are paths to images. Subsections
are specified as pairs (function, arguments), where function is supposed to return reporting.Subsection.
:param filename: (str) name of the LaTeX and PDF files to be generated.
:param paperwidth: (float) width of the page.
:param user_declarations: (str) user LaTeX code to be placed in the preamble of the document.
:return: None
"""
report = reporting.ReportPDF(geometry_params="[paperwidth={0}cm, paperheight=40cm, margin=0.3cm]".format(paperwidth),
packages=["pbox", "makecell"], user_declarations=user_declarations)
latex_sects = []
for title, desc, subsections, figures in sects:
subsects = []
for fun, args in subsections:
args2 = [props] + args
subsects.append(fun(*args2))
s = create_section(title, desc, props, subsects, figures)
latex_sects.append(s)
for s in latex_sects:
if s is not None:
report.add(s)
print("\n\nGenerating PDF report ...")
cwd = os.getcwd()
os.chdir("results/")
report.save_and_compile(filename)
os.chdir(cwd)
|
Add a template to readily generate PDF report from properties and dimensions
|
Add a template to readily generate PDF report from properties and dimensions
|
Python
|
mit
|
iwob/evoplotter,iwob/evoplotter
|
Add a template to readily generate PDF report from properties and dimensions
|
import os
from . import reporting
def create_section(title, desc, props, subsects, figures_list):
assert isinstance(title, str)
assert isinstance(desc, str)
assert isinstance(props, list)
assert isinstance(figures_list, list)
section = reporting.Section(title, [])
section.add(reporting.BlockLatex(desc + "\n"))
for s in subsects:
section.add(s)
# Create figures in the appropriate directory
for f in figures_list:
section.add(reporting.FloatFigure(f))
section.add(reporting.BlockLatex(r"\vspace{1cm}" + "\n"))
return section
def prepare_report(props, sects, filename, paperwidth=75, user_declarations=""):
"""Creates a LaTeX report of the results, where properties are shared for all subsections and
dimension for rows (e.g. benchmarks) is also the same for each subsection.
:param props: (list[dict]) list of properties dicts to be processed.
:param sects: ((title, desc, subsections, figures)), where figures are paths to images. Subsections
are specified as pairs (function, arguments), where function is supposed to return reporting.Subsection.
:param filename: (str) name of the LaTeX and PDF files to be generated.
:param paperwidth: (float) width of the page.
:param user_declarations: (str) user LaTeX code to be placed in the preamble of the document.
:return: None
"""
report = reporting.ReportPDF(geometry_params="[paperwidth={0}cm, paperheight=40cm, margin=0.3cm]".format(paperwidth),
packages=["pbox", "makecell"], user_declarations=user_declarations)
latex_sects = []
for title, desc, subsections, figures in sects:
subsects = []
for fun, args in subsections:
args2 = [props] + args
subsects.append(fun(*args2))
s = create_section(title, desc, props, subsects, figures)
latex_sects.append(s)
for s in latex_sects:
if s is not None:
report.add(s)
print("\n\nGenerating PDF report ...")
cwd = os.getcwd()
os.chdir("results/")
report.save_and_compile(filename)
os.chdir(cwd)
|
<commit_before><commit_msg>Add a template to readily generate PDF report from properties and dimensions<commit_after>
|
import os
from . import reporting
def create_section(title, desc, props, subsects, figures_list):
assert isinstance(title, str)
assert isinstance(desc, str)
assert isinstance(props, list)
assert isinstance(figures_list, list)
section = reporting.Section(title, [])
section.add(reporting.BlockLatex(desc + "\n"))
for s in subsects:
section.add(s)
# Create figures in the appropriate directory
for f in figures_list:
section.add(reporting.FloatFigure(f))
section.add(reporting.BlockLatex(r"\vspace{1cm}" + "\n"))
return section
def prepare_report(props, sects, filename, paperwidth=75, user_declarations=""):
"""Creates a LaTeX report of the results, where properties are shared for all subsections and
dimension for rows (e.g. benchmarks) is also the same for each subsection.
:param props: (list[dict]) list of properties dicts to be processed.
:param sects: ((title, desc, subsections, figures)), where figures are paths to images. Subsections
are specified as pairs (function, arguments), where function is supposed to return reporting.Subsection.
:param filename: (str) name of the LaTeX and PDF files to be generated.
:param paperwidth: (float) width of the page.
:param user_declarations: (str) user LaTeX code to be placed in the preamble of the document.
:return: None
"""
report = reporting.ReportPDF(geometry_params="[paperwidth={0}cm, paperheight=40cm, margin=0.3cm]".format(paperwidth),
packages=["pbox", "makecell"], user_declarations=user_declarations)
latex_sects = []
for title, desc, subsections, figures in sects:
subsects = []
for fun, args in subsections:
args2 = [props] + args
subsects.append(fun(*args2))
s = create_section(title, desc, props, subsects, figures)
latex_sects.append(s)
for s in latex_sects:
if s is not None:
report.add(s)
print("\n\nGenerating PDF report ...")
cwd = os.getcwd()
os.chdir("results/")
report.save_and_compile(filename)
os.chdir(cwd)
|
Add a template to readily generate PDF report from properties and dimensionsimport os
from . import reporting
def create_section(title, desc, props, subsects, figures_list):
assert isinstance(title, str)
assert isinstance(desc, str)
assert isinstance(props, list)
assert isinstance(figures_list, list)
section = reporting.Section(title, [])
section.add(reporting.BlockLatex(desc + "\n"))
for s in subsects:
section.add(s)
# Create figures in the appropriate directory
for f in figures_list:
section.add(reporting.FloatFigure(f))
section.add(reporting.BlockLatex(r"\vspace{1cm}" + "\n"))
return section
def prepare_report(props, sects, filename, paperwidth=75, user_declarations=""):
"""Creates a LaTeX report of the results, where properties are shared for all subsections and
dimension for rows (e.g. benchmarks) is also the same for each subsection.
:param props: (list[dict]) list of properties dicts to be processed.
:param sects: ((title, desc, subsections, figures)), where figures are paths to images. Subsections
are specified as pairs (function, arguments), where function is supposed to return reporting.Subsection.
:param filename: (str) name of the LaTeX and PDF files to be generated.
:param paperwidth: (float) width of the page.
:param user_declarations: (str) user LaTeX code to be placed in the preamble of the document.
:return: None
"""
report = reporting.ReportPDF(geometry_params="[paperwidth={0}cm, paperheight=40cm, margin=0.3cm]".format(paperwidth),
packages=["pbox", "makecell"], user_declarations=user_declarations)
latex_sects = []
for title, desc, subsections, figures in sects:
subsects = []
for fun, args in subsections:
args2 = [props] + args
subsects.append(fun(*args2))
s = create_section(title, desc, props, subsects, figures)
latex_sects.append(s)
for s in latex_sects:
if s is not None:
report.add(s)
print("\n\nGenerating PDF report ...")
cwd = os.getcwd()
os.chdir("results/")
report.save_and_compile(filename)
os.chdir(cwd)
|
<commit_before><commit_msg>Add a template to readily generate PDF report from properties and dimensions<commit_after>import os
from . import reporting
def create_section(title, desc, props, subsects, figures_list):
assert isinstance(title, str)
assert isinstance(desc, str)
assert isinstance(props, list)
assert isinstance(figures_list, list)
section = reporting.Section(title, [])
section.add(reporting.BlockLatex(desc + "\n"))
for s in subsects:
section.add(s)
# Create figures in the appropriate directory
for f in figures_list:
section.add(reporting.FloatFigure(f))
section.add(reporting.BlockLatex(r"\vspace{1cm}" + "\n"))
return section
def prepare_report(props, sects, filename, paperwidth=75, user_declarations=""):
"""Creates a LaTeX report of the results, where properties are shared for all subsections and
dimension for rows (e.g. benchmarks) is also the same for each subsection.
:param props: (list[dict]) list of properties dicts to be processed.
:param sects: ((title, desc, subsections, figures)), where figures are paths to images. Subsections
are specified as pairs (function, arguments), where function is supposed to return reporting.Subsection.
:param filename: (str) name of the LaTeX and PDF files to be generated.
:param paperwidth: (float) width of the page.
:param user_declarations: (str) user LaTeX code to be placed in the preamble of the document.
:return: None
"""
report = reporting.ReportPDF(geometry_params="[paperwidth={0}cm, paperheight=40cm, margin=0.3cm]".format(paperwidth),
packages=["pbox", "makecell"], user_declarations=user_declarations)
latex_sects = []
for title, desc, subsections, figures in sects:
subsects = []
for fun, args in subsections:
args2 = [props] + args
subsects.append(fun(*args2))
s = create_section(title, desc, props, subsects, figures)
latex_sects.append(s)
for s in latex_sects:
if s is not None:
report.add(s)
print("\n\nGenerating PDF report ...")
cwd = os.getcwd()
os.chdir("results/")
report.save_and_compile(filename)
os.chdir(cwd)
|
|
79a83a46f788814d186267eeea640bcb9127be6f
|
problem3.py
|
problem3.py
|
"""
In DNA strings, symbols 'A' and 'T' are complements of each other, as are 'C' and 'G'.
The reverse complement of a DNA string s is the string sc formed by reversing the symbols of s, then
taking the complement of each symbol (e.g., the reverse complement of "GTCA" is "TGAC").
Given: A DNA string s of length at most 1000 bp.
Return: The reverse complement sc of s.
"""
if __name__ == '__main__':
inverse = {
'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C',
'\n': '',
}
with open('data/rosalind_revc.txt') as f:
sequence = f.read()
complement = ''
for N in sequence:
complement = inverse[N] + complement
print complement
|
Add solution to complementing a strand of DNA
|
Add solution to complementing a strand of DNA
|
Python
|
mit
|
MichaelAquilina/rosalind-solutions
|
Add solution to complementing a strand of DNA
|
"""
In DNA strings, symbols 'A' and 'T' are complements of each other, as are 'C' and 'G'.
The reverse complement of a DNA string s is the string sc formed by reversing the symbols of s, then
taking the complement of each symbol (e.g., the reverse complement of "GTCA" is "TGAC").
Given: A DNA string s of length at most 1000 bp.
Return: The reverse complement sc of s.
"""
if __name__ == '__main__':
inverse = {
'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C',
'\n': '',
}
with open('data/rosalind_revc.txt') as f:
sequence = f.read()
complement = ''
for N in sequence:
complement = inverse[N] + complement
print complement
|
<commit_before><commit_msg>Add solution to complementing a strand of DNA<commit_after>
|
"""
In DNA strings, symbols 'A' and 'T' are complements of each other, as are 'C' and 'G'.
The reverse complement of a DNA string s is the string sc formed by reversing the symbols of s, then
taking the complement of each symbol (e.g., the reverse complement of "GTCA" is "TGAC").
Given: A DNA string s of length at most 1000 bp.
Return: The reverse complement sc of s.
"""
if __name__ == '__main__':
inverse = {
'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C',
'\n': '',
}
with open('data/rosalind_revc.txt') as f:
sequence = f.read()
complement = ''
for N in sequence:
complement = inverse[N] + complement
print complement
|
Add solution to complementing a strand of DNA"""
In DNA strings, symbols 'A' and 'T' are complements of each other, as are 'C' and 'G'.
The reverse complement of a DNA string s is the string sc formed by reversing the symbols of s, then
taking the complement of each symbol (e.g., the reverse complement of "GTCA" is "TGAC").
Given: A DNA string s of length at most 1000 bp.
Return: The reverse complement sc of s.
"""
if __name__ == '__main__':
inverse = {
'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C',
'\n': '',
}
with open('data/rosalind_revc.txt') as f:
sequence = f.read()
complement = ''
for N in sequence:
complement = inverse[N] + complement
print complement
|
<commit_before><commit_msg>Add solution to complementing a strand of DNA<commit_after>"""
In DNA strings, symbols 'A' and 'T' are complements of each other, as are 'C' and 'G'.
The reverse complement of a DNA string s is the string sc formed by reversing the symbols of s, then
taking the complement of each symbol (e.g., the reverse complement of "GTCA" is "TGAC").
Given: A DNA string s of length at most 1000 bp.
Return: The reverse complement sc of s.
"""
if __name__ == '__main__':
inverse = {
'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C',
'\n': '',
}
with open('data/rosalind_revc.txt') as f:
sequence = f.read()
complement = ''
for N in sequence:
complement = inverse[N] + complement
print complement
|
|
9f88afa75279a6accd859a521afb2ec311874032
|
spiral_out.py
|
spiral_out.py
|
size = 7
m = [[0] * size for _ in range(size)]
start = size // 2
total = size * size
current = 1
i = j = start
direction = 0
current_leg_length = 1
leg_length_now = 0
current_leg_count = 0
while current <= total:
m[i][j] = current
current += 1
leg_length_now += 1
if direction == 0:
j -= 1
elif direction == 1:
i += 1
elif direction == 2:
j += 1
else:
i -= 1
if leg_length_now == current_leg_length:
leg_length_now = 0
direction = (direction + 1) % 4
if current_leg_count == 1:
current_leg_length += 1
current_leg_count = 0
else:
current_leg_count += 1
for i in range(size):
print(m[i])
|
Add program populating matrix with numbers starting with 1 in the matrix center and then spiralling out
|
Add program populating matrix with numbers starting with 1 in the matrix center and then spiralling out
|
Python
|
mit
|
dnl-blkv/algorithms
|
Add program populating matrix with numbers starting with 1 in the matrix center and then spiralling out
|
size = 7
m = [[0] * size for _ in range(size)]
start = size // 2
total = size * size
current = 1
i = j = start
direction = 0
current_leg_length = 1
leg_length_now = 0
current_leg_count = 0
while current <= total:
m[i][j] = current
current += 1
leg_length_now += 1
if direction == 0:
j -= 1
elif direction == 1:
i += 1
elif direction == 2:
j += 1
else:
i -= 1
if leg_length_now == current_leg_length:
leg_length_now = 0
direction = (direction + 1) % 4
if current_leg_count == 1:
current_leg_length += 1
current_leg_count = 0
else:
current_leg_count += 1
for i in range(size):
print(m[i])
|
<commit_before><commit_msg>Add program populating matrix with numbers starting with 1 in the matrix center and then spiralling out<commit_after>
|
size = 7
m = [[0] * size for _ in range(size)]
start = size // 2
total = size * size
current = 1
i = j = start
direction = 0
current_leg_length = 1
leg_length_now = 0
current_leg_count = 0
while current <= total:
m[i][j] = current
current += 1
leg_length_now += 1
if direction == 0:
j -= 1
elif direction == 1:
i += 1
elif direction == 2:
j += 1
else:
i -= 1
if leg_length_now == current_leg_length:
leg_length_now = 0
direction = (direction + 1) % 4
if current_leg_count == 1:
current_leg_length += 1
current_leg_count = 0
else:
current_leg_count += 1
for i in range(size):
print(m[i])
|
Add program populating matrix with numbers starting with 1 in the matrix center and then spiralling outsize = 7
m = [[0] * size for _ in range(size)]
start = size // 2
total = size * size
current = 1
i = j = start
direction = 0
current_leg_length = 1
leg_length_now = 0
current_leg_count = 0
while current <= total:
m[i][j] = current
current += 1
leg_length_now += 1
if direction == 0:
j -= 1
elif direction == 1:
i += 1
elif direction == 2:
j += 1
else:
i -= 1
if leg_length_now == current_leg_length:
leg_length_now = 0
direction = (direction + 1) % 4
if current_leg_count == 1:
current_leg_length += 1
current_leg_count = 0
else:
current_leg_count += 1
for i in range(size):
print(m[i])
|
<commit_before><commit_msg>Add program populating matrix with numbers starting with 1 in the matrix center and then spiralling out<commit_after>size = 7
m = [[0] * size for _ in range(size)]
start = size // 2
total = size * size
current = 1
i = j = start
direction = 0
current_leg_length = 1
leg_length_now = 0
current_leg_count = 0
while current <= total:
m[i][j] = current
current += 1
leg_length_now += 1
if direction == 0:
j -= 1
elif direction == 1:
i += 1
elif direction == 2:
j += 1
else:
i -= 1
if leg_length_now == current_leg_length:
leg_length_now = 0
direction = (direction + 1) % 4
if current_leg_count == 1:
current_leg_length += 1
current_leg_count = 0
else:
current_leg_count += 1
for i in range(size):
print(m[i])
|
|
74a624b57e1f5ccc0f62665d3ff52eef38ca9192
|
tests/test_robot.py
|
tests/test_robot.py
|
import time
import unittest
from unittest import mock
from robot import game_specific
from robot.board import Board
from robot.robot import Robot
from tests.mock_robotd import MockRobotDFactoryMixin
class RobotTest(MockRobotDFactoryMixin, unittest.TestCase):
def mock_kill_after_delay(self):
return mock.patch('robot.robot.kill_after_delay')
def set_competition_mode(self):
board = Board(self.board_path(self.game_state))
board._send_and_receive({'mode': 'competition'})
def setUp(self):
mock = self.create_mock_robotd()
self.power_board = mock.new_powerboard()
self.game_state = mock.new_gamestate()
time.sleep(0.2)
self.mock = mock
def test_explicit_wait_start_development_mode(self):
robot = Robot(
robotd_path=self.mock.root_dir,
wait_for_start_button=False,
)
with self.mock_kill_after_delay() as mock_kill_after_delay:
robot.power_board.wait_start()
# default is development mode, which doesn't have a timeout
mock_kill_after_delay.assert_not_called()
def test_explicit_wait_start_competition_mode(self):
robot = Robot(
robotd_path=self.mock.root_dir,
wait_for_start_button=False,
)
self.set_competition_mode()
with self.mock_kill_after_delay() as mock_kill_after_delay:
robot.power_board.wait_start()
mock_kill_after_delay.assert_called_once_with(
game_specific.GAME_DURATION_SECONDS,
game_specific.GAME_EXIT_MESSAGE,
)
def test_implicit_wait_start_development_mode(self):
with self.mock_kill_after_delay() as mock_kill_after_delay:
Robot(robotd_path=self.mock.root_dir)
# default is development mode, which doesn't have a timeout
mock_kill_after_delay.assert_not_called()
def test_implicit_wait_start_competition_mode(self):
self.set_competition_mode()
with self.mock_kill_after_delay() as mock_kill_after_delay:
Robot(robotd_path=self.mock.root_dir)
mock_kill_after_delay.assert_called_once_with(
game_specific.GAME_DURATION_SECONDS,
game_specific.GAME_EXIT_MESSAGE,
)
|
Test Robot interactions with wait_start
|
Test Robot interactions with wait_start
|
Python
|
mit
|
sourcebots/robot-api,sourcebots/robot-api
|
Test Robot interactions with wait_start
|
import time
import unittest
from unittest import mock
from robot import game_specific
from robot.board import Board
from robot.robot import Robot
from tests.mock_robotd import MockRobotDFactoryMixin
class RobotTest(MockRobotDFactoryMixin, unittest.TestCase):
def mock_kill_after_delay(self):
return mock.patch('robot.robot.kill_after_delay')
def set_competition_mode(self):
board = Board(self.board_path(self.game_state))
board._send_and_receive({'mode': 'competition'})
def setUp(self):
mock = self.create_mock_robotd()
self.power_board = mock.new_powerboard()
self.game_state = mock.new_gamestate()
time.sleep(0.2)
self.mock = mock
def test_explicit_wait_start_development_mode(self):
robot = Robot(
robotd_path=self.mock.root_dir,
wait_for_start_button=False,
)
with self.mock_kill_after_delay() as mock_kill_after_delay:
robot.power_board.wait_start()
# default is development mode, which doesn't have a timeout
mock_kill_after_delay.assert_not_called()
def test_explicit_wait_start_competition_mode(self):
robot = Robot(
robotd_path=self.mock.root_dir,
wait_for_start_button=False,
)
self.set_competition_mode()
with self.mock_kill_after_delay() as mock_kill_after_delay:
robot.power_board.wait_start()
mock_kill_after_delay.assert_called_once_with(
game_specific.GAME_DURATION_SECONDS,
game_specific.GAME_EXIT_MESSAGE,
)
def test_implicit_wait_start_development_mode(self):
with self.mock_kill_after_delay() as mock_kill_after_delay:
Robot(robotd_path=self.mock.root_dir)
# default is development mode, which doesn't have a timeout
mock_kill_after_delay.assert_not_called()
def test_implicit_wait_start_competition_mode(self):
self.set_competition_mode()
with self.mock_kill_after_delay() as mock_kill_after_delay:
Robot(robotd_path=self.mock.root_dir)
mock_kill_after_delay.assert_called_once_with(
game_specific.GAME_DURATION_SECONDS,
game_specific.GAME_EXIT_MESSAGE,
)
|
<commit_before><commit_msg>Test Robot interactions with wait_start<commit_after>
|
import time
import unittest
from unittest import mock
from robot import game_specific
from robot.board import Board
from robot.robot import Robot
from tests.mock_robotd import MockRobotDFactoryMixin
class RobotTest(MockRobotDFactoryMixin, unittest.TestCase):
def mock_kill_after_delay(self):
return mock.patch('robot.robot.kill_after_delay')
def set_competition_mode(self):
board = Board(self.board_path(self.game_state))
board._send_and_receive({'mode': 'competition'})
def setUp(self):
mock = self.create_mock_robotd()
self.power_board = mock.new_powerboard()
self.game_state = mock.new_gamestate()
time.sleep(0.2)
self.mock = mock
def test_explicit_wait_start_development_mode(self):
robot = Robot(
robotd_path=self.mock.root_dir,
wait_for_start_button=False,
)
with self.mock_kill_after_delay() as mock_kill_after_delay:
robot.power_board.wait_start()
# default is development mode, which doesn't have a timeout
mock_kill_after_delay.assert_not_called()
def test_explicit_wait_start_competition_mode(self):
robot = Robot(
robotd_path=self.mock.root_dir,
wait_for_start_button=False,
)
self.set_competition_mode()
with self.mock_kill_after_delay() as mock_kill_after_delay:
robot.power_board.wait_start()
mock_kill_after_delay.assert_called_once_with(
game_specific.GAME_DURATION_SECONDS,
game_specific.GAME_EXIT_MESSAGE,
)
def test_implicit_wait_start_development_mode(self):
with self.mock_kill_after_delay() as mock_kill_after_delay:
Robot(robotd_path=self.mock.root_dir)
# default is development mode, which doesn't have a timeout
mock_kill_after_delay.assert_not_called()
def test_implicit_wait_start_competition_mode(self):
self.set_competition_mode()
with self.mock_kill_after_delay() as mock_kill_after_delay:
Robot(robotd_path=self.mock.root_dir)
mock_kill_after_delay.assert_called_once_with(
game_specific.GAME_DURATION_SECONDS,
game_specific.GAME_EXIT_MESSAGE,
)
|
Test Robot interactions with wait_startimport time
import unittest
from unittest import mock
from robot import game_specific
from robot.board import Board
from robot.robot import Robot
from tests.mock_robotd import MockRobotDFactoryMixin
class RobotTest(MockRobotDFactoryMixin, unittest.TestCase):
def mock_kill_after_delay(self):
return mock.patch('robot.robot.kill_after_delay')
def set_competition_mode(self):
board = Board(self.board_path(self.game_state))
board._send_and_receive({'mode': 'competition'})
def setUp(self):
mock = self.create_mock_robotd()
self.power_board = mock.new_powerboard()
self.game_state = mock.new_gamestate()
time.sleep(0.2)
self.mock = mock
def test_explicit_wait_start_development_mode(self):
robot = Robot(
robotd_path=self.mock.root_dir,
wait_for_start_button=False,
)
with self.mock_kill_after_delay() as mock_kill_after_delay:
robot.power_board.wait_start()
# default is development mode, which doesn't have a timeout
mock_kill_after_delay.assert_not_called()
def test_explicit_wait_start_competition_mode(self):
robot = Robot(
robotd_path=self.mock.root_dir,
wait_for_start_button=False,
)
self.set_competition_mode()
with self.mock_kill_after_delay() as mock_kill_after_delay:
robot.power_board.wait_start()
mock_kill_after_delay.assert_called_once_with(
game_specific.GAME_DURATION_SECONDS,
game_specific.GAME_EXIT_MESSAGE,
)
def test_implicit_wait_start_development_mode(self):
with self.mock_kill_after_delay() as mock_kill_after_delay:
Robot(robotd_path=self.mock.root_dir)
# default is development mode, which doesn't have a timeout
mock_kill_after_delay.assert_not_called()
def test_implicit_wait_start_competition_mode(self):
self.set_competition_mode()
with self.mock_kill_after_delay() as mock_kill_after_delay:
Robot(robotd_path=self.mock.root_dir)
mock_kill_after_delay.assert_called_once_with(
game_specific.GAME_DURATION_SECONDS,
game_specific.GAME_EXIT_MESSAGE,
)
|
<commit_before><commit_msg>Test Robot interactions with wait_start<commit_after>import time
import unittest
from unittest import mock
from robot import game_specific
from robot.board import Board
from robot.robot import Robot
from tests.mock_robotd import MockRobotDFactoryMixin
class RobotTest(MockRobotDFactoryMixin, unittest.TestCase):
def mock_kill_after_delay(self):
return mock.patch('robot.robot.kill_after_delay')
def set_competition_mode(self):
board = Board(self.board_path(self.game_state))
board._send_and_receive({'mode': 'competition'})
def setUp(self):
mock = self.create_mock_robotd()
self.power_board = mock.new_powerboard()
self.game_state = mock.new_gamestate()
time.sleep(0.2)
self.mock = mock
def test_explicit_wait_start_development_mode(self):
robot = Robot(
robotd_path=self.mock.root_dir,
wait_for_start_button=False,
)
with self.mock_kill_after_delay() as mock_kill_after_delay:
robot.power_board.wait_start()
# default is development mode, which doesn't have a timeout
mock_kill_after_delay.assert_not_called()
def test_explicit_wait_start_competition_mode(self):
robot = Robot(
robotd_path=self.mock.root_dir,
wait_for_start_button=False,
)
self.set_competition_mode()
with self.mock_kill_after_delay() as mock_kill_after_delay:
robot.power_board.wait_start()
mock_kill_after_delay.assert_called_once_with(
game_specific.GAME_DURATION_SECONDS,
game_specific.GAME_EXIT_MESSAGE,
)
def test_implicit_wait_start_development_mode(self):
with self.mock_kill_after_delay() as mock_kill_after_delay:
Robot(robotd_path=self.mock.root_dir)
# default is development mode, which doesn't have a timeout
mock_kill_after_delay.assert_not_called()
def test_implicit_wait_start_competition_mode(self):
self.set_competition_mode()
with self.mock_kill_after_delay() as mock_kill_after_delay:
Robot(robotd_path=self.mock.root_dir)
mock_kill_after_delay.assert_called_once_with(
game_specific.GAME_DURATION_SECONDS,
game_specific.GAME_EXIT_MESSAGE,
)
|
|
1a11007938fbe2964fc121b57aadef8a9d9cb1a0
|
note/migrations/0004_auto_20150305_1003.py
|
note/migrations/0004_auto_20150305_1003.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations, transaction
import django.utils.timezone
# the function that will be used by the migration operation
@transaction.atomic
def copy_old_users(apps, schema_editor):
# the default user class
User = apps.get_model("auth", "User")
# my custom user class
MyUser = apps.get_model("note", "MyUser")
# the fields I want to copy from User to MyUser
fields = ['id', 'username', 'email', 'first_name', 'last_name',
'is_staff', 'is_active', 'date_joined', 'is_superuser',
'last_login']
# for loop to copy all users from one class to the other
for user in User.objects.all():
custom_user = MyUser()
for field in fields:
setattr(custom_user, field, getattr(user, field))
custom_user.save()
# also, groups and permissions should be copied
custom_user.groups.add(*user.groups.all())
custom_user.user_permissions.add(*user.user_permissions.all())
class Migration(migrations.Migration):
dependencies = [
('note', '0003_auto_20150305_0955'),
]
operations = [
migrations.RunPython(copy_old_users)
]
|
Migrate to a custom User class (4)
|
Migrate to a custom User class (4)
step4: create an empty migration script
python manage.py makemigrations --empty note
Then edit the script and add a function that will copy the users from
the old users table to the new one.
|
Python
|
bsd-2-clause
|
LeMeteore/boomer2
|
Migrate to a custom User class (4)
step4: create an empty migration script
python manage.py makemigrations --empty note
Then edit the script and add a function that will copy the users from
the old users table to the new one.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations, transaction
import django.utils.timezone
# the function that will be used by the migration operation
@transaction.atomic
def copy_old_users(apps, schema_editor):
# the default user class
User = apps.get_model("auth", "User")
# my custom user class
MyUser = apps.get_model("note", "MyUser")
# the fields I want to copy from User to MyUser
fields = ['id', 'username', 'email', 'first_name', 'last_name',
'is_staff', 'is_active', 'date_joined', 'is_superuser',
'last_login']
# for loop to copy all users from one class to the other
for user in User.objects.all():
custom_user = MyUser()
for field in fields:
setattr(custom_user, field, getattr(user, field))
custom_user.save()
# also, groups and permissions should be copied
custom_user.groups.add(*user.groups.all())
custom_user.user_permissions.add(*user.user_permissions.all())
class Migration(migrations.Migration):
dependencies = [
('note', '0003_auto_20150305_0955'),
]
operations = [
migrations.RunPython(copy_old_users)
]
|
<commit_before><commit_msg>Migrate to a custom User class (4)
step4: create an empty migration script
python manage.py makemigrations --empty note
Then edit the script and add a function that will copy the users from
the old users table to the new one.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations, transaction
import django.utils.timezone
# the function that will be used by the migration operation
@transaction.atomic
def copy_old_users(apps, schema_editor):
# the default user class
User = apps.get_model("auth", "User")
# my custom user class
MyUser = apps.get_model("note", "MyUser")
# the fields I want to copy from User to MyUser
fields = ['id', 'username', 'email', 'first_name', 'last_name',
'is_staff', 'is_active', 'date_joined', 'is_superuser',
'last_login']
# for loop to copy all users from one class to the other
for user in User.objects.all():
custom_user = MyUser()
for field in fields:
setattr(custom_user, field, getattr(user, field))
custom_user.save()
# also, groups and permissions should be copied
custom_user.groups.add(*user.groups.all())
custom_user.user_permissions.add(*user.user_permissions.all())
class Migration(migrations.Migration):
dependencies = [
('note', '0003_auto_20150305_0955'),
]
operations = [
migrations.RunPython(copy_old_users)
]
|
Migrate to a custom User class (4)
step4: create an empty migration script
python manage.py makemigrations --empty note
Then edit the script and add a function that will copy the users from
the old users table to the new one.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations, transaction
import django.utils.timezone
# the function that will be used by the migration operation
@transaction.atomic
def copy_old_users(apps, schema_editor):
# the default user class
User = apps.get_model("auth", "User")
# my custom user class
MyUser = apps.get_model("note", "MyUser")
# the fields I want to copy from User to MyUser
fields = ['id', 'username', 'email', 'first_name', 'last_name',
'is_staff', 'is_active', 'date_joined', 'is_superuser',
'last_login']
# for loop to copy all users from one class to the other
for user in User.objects.all():
custom_user = MyUser()
for field in fields:
setattr(custom_user, field, getattr(user, field))
custom_user.save()
# also, groups and permissions should be copied
custom_user.groups.add(*user.groups.all())
custom_user.user_permissions.add(*user.user_permissions.all())
class Migration(migrations.Migration):
dependencies = [
('note', '0003_auto_20150305_0955'),
]
operations = [
migrations.RunPython(copy_old_users)
]
|
<commit_before><commit_msg>Migrate to a custom User class (4)
step4: create an empty migration script
python manage.py makemigrations --empty note
Then edit the script and add a function that will copy the users from
the old users table to the new one.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations, transaction
import django.utils.timezone
# the function that will be used by the migration operation
@transaction.atomic
def copy_old_users(apps, schema_editor):
# the default user class
User = apps.get_model("auth", "User")
# my custom user class
MyUser = apps.get_model("note", "MyUser")
# the fields I want to copy from User to MyUser
fields = ['id', 'username', 'email', 'first_name', 'last_name',
'is_staff', 'is_active', 'date_joined', 'is_superuser',
'last_login']
# for loop to copy all users from one class to the other
for user in User.objects.all():
custom_user = MyUser()
for field in fields:
setattr(custom_user, field, getattr(user, field))
custom_user.save()
# also, groups and permissions should be copied
custom_user.groups.add(*user.groups.all())
custom_user.user_permissions.add(*user.user_permissions.all())
class Migration(migrations.Migration):
dependencies = [
('note', '0003_auto_20150305_0955'),
]
operations = [
migrations.RunPython(copy_old_users)
]
|
|
b4a35dc750ca1b9defd955c239fb43cb9a322732
|
print_xsl.py
|
print_xsl.py
|
import logging
import os
from settings import CONVERSIONS, XSL_PATH
logger = logging.getLogger(__name__)
def print_xsl_files():
for parts in CONVERSIONS:
file_path = os.path.join(XSL_PATH, parts[0])
print(file_path)
if '__main__' == __name__:
logging.basicConfig(level=logging.DEBUG)
print_xsl_files()
|
Add print XSL files script
|
Add print XSL files script
|
Python
|
mit
|
AustralianAntarcticDataCentre/metadata_xml_convert,AustralianAntarcticDataCentre/metadata_xml_convert
|
Add print XSL files script
|
import logging
import os
from settings import CONVERSIONS, XSL_PATH
logger = logging.getLogger(__name__)
def print_xsl_files():
for parts in CONVERSIONS:
file_path = os.path.join(XSL_PATH, parts[0])
print(file_path)
if '__main__' == __name__:
logging.basicConfig(level=logging.DEBUG)
print_xsl_files()
|
<commit_before><commit_msg>Add print XSL files script<commit_after>
|
import logging
import os
from settings import CONVERSIONS, XSL_PATH
logger = logging.getLogger(__name__)
def print_xsl_files():
for parts in CONVERSIONS:
file_path = os.path.join(XSL_PATH, parts[0])
print(file_path)
if '__main__' == __name__:
logging.basicConfig(level=logging.DEBUG)
print_xsl_files()
|
Add print XSL files scriptimport logging
import os
from settings import CONVERSIONS, XSL_PATH
logger = logging.getLogger(__name__)
def print_xsl_files():
for parts in CONVERSIONS:
file_path = os.path.join(XSL_PATH, parts[0])
print(file_path)
if '__main__' == __name__:
logging.basicConfig(level=logging.DEBUG)
print_xsl_files()
|
<commit_before><commit_msg>Add print XSL files script<commit_after>import logging
import os
from settings import CONVERSIONS, XSL_PATH
logger = logging.getLogger(__name__)
def print_xsl_files():
for parts in CONVERSIONS:
file_path = os.path.join(XSL_PATH, parts[0])
print(file_path)
if '__main__' == __name__:
logging.basicConfig(level=logging.DEBUG)
print_xsl_files()
|
|
368ade3641c017d534bb42b8c448a9bbdbb39631
|
quilt/pop.py
|
quilt/pop.py
|
# vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <bjoern.ricks@googlemail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from quilt.command import Command
from quilt.db import Db, Series
from quilt.error import NoAppliedPatch
from quilt.utils import Directory, RollbackPatch, File
class Pop(Command):
def __init__(self, cwd, quilt_pc, quilt_patches):
super(Pop, self).__init__(cwd)
self.quilt_pc = quilt_pc
self.db = Db(quilt_pc)
self.series = Series(quilt_patches)
def _check(self):
if not self.db.exists() or not self.db.patches():
raise NoAppliedPatch(self.db)
def _unapply_patch(self, patch_name):
prefix = os.path.join(quilt_patches, patch_name)
timestamp = File(os.path.join(prefix, ".timestamp"))
timestamp.delete_if_exists()
patch = RollbackPatch(self.cwd, self.prefix)
patch.rollback()
patch.delete_backup()
self.db.remove_patch(patch_name)
refresh = File(prefix + "~refresh")
refresh.delete_if_exists()
def unapply_patch(self, patch_name):
""" Unapply patches up to patch_name. patch_name will end up as top
patch """
self._check()
patches = self.db.patches_after(patch_name)
for patch in reverse(patches):
self._unapply_patch(patch)
self.db.save()
def unapply_top_patch(self):
""" Unapply top patch """
self._check()
patch = self.top_patch()
self._unapply_patch(patch)
self.db.save()
def unapply_all(self):
""" Unapply all patches in series file """
self._check()
for patch in reverse(self.db.patches())
self._unapply_patch(patch)
self.db.save()
|
Implement a Pop class to unapply patches
|
Implement a Pop class to unapply patches
|
Python
|
mit
|
vadmium/python-quilt,bjoernricks/python-quilt
|
Implement a Pop class to unapply patches
|
# vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <bjoern.ricks@googlemail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from quilt.command import Command
from quilt.db import Db, Series
from quilt.error import NoAppliedPatch
from quilt.utils import Directory, RollbackPatch, File
class Pop(Command):
def __init__(self, cwd, quilt_pc, quilt_patches):
super(Pop, self).__init__(cwd)
self.quilt_pc = quilt_pc
self.db = Db(quilt_pc)
self.series = Series(quilt_patches)
def _check(self):
if not self.db.exists() or not self.db.patches():
raise NoAppliedPatch(self.db)
def _unapply_patch(self, patch_name):
prefix = os.path.join(quilt_patches, patch_name)
timestamp = File(os.path.join(prefix, ".timestamp"))
timestamp.delete_if_exists()
patch = RollbackPatch(self.cwd, self.prefix)
patch.rollback()
patch.delete_backup()
self.db.remove_patch(patch_name)
refresh = File(prefix + "~refresh")
refresh.delete_if_exists()
def unapply_patch(self, patch_name):
""" Unapply patches up to patch_name. patch_name will end up as top
patch """
self._check()
patches = self.db.patches_after(patch_name)
for patch in reverse(patches):
self._unapply_patch(patch)
self.db.save()
def unapply_top_patch(self):
""" Unapply top patch """
self._check()
patch = self.top_patch()
self._unapply_patch(patch)
self.db.save()
def unapply_all(self):
""" Unapply all patches in series file """
self._check()
for patch in reverse(self.db.patches())
self._unapply_patch(patch)
self.db.save()
|
<commit_before><commit_msg>Implement a Pop class to unapply patches<commit_after>
|
# vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <bjoern.ricks@googlemail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from quilt.command import Command
from quilt.db import Db, Series
from quilt.error import NoAppliedPatch
from quilt.utils import Directory, RollbackPatch, File
class Pop(Command):
def __init__(self, cwd, quilt_pc, quilt_patches):
super(Pop, self).__init__(cwd)
self.quilt_pc = quilt_pc
self.db = Db(quilt_pc)
self.series = Series(quilt_patches)
def _check(self):
if not self.db.exists() or not self.db.patches():
raise NoAppliedPatch(self.db)
def _unapply_patch(self, patch_name):
prefix = os.path.join(quilt_patches, patch_name)
timestamp = File(os.path.join(prefix, ".timestamp"))
timestamp.delete_if_exists()
patch = RollbackPatch(self.cwd, self.prefix)
patch.rollback()
patch.delete_backup()
self.db.remove_patch(patch_name)
refresh = File(prefix + "~refresh")
refresh.delete_if_exists()
def unapply_patch(self, patch_name):
""" Unapply patches up to patch_name. patch_name will end up as top
patch """
self._check()
patches = self.db.patches_after(patch_name)
for patch in reverse(patches):
self._unapply_patch(patch)
self.db.save()
def unapply_top_patch(self):
""" Unapply top patch """
self._check()
patch = self.top_patch()
self._unapply_patch(patch)
self.db.save()
def unapply_all(self):
""" Unapply all patches in series file """
self._check()
for patch in reverse(self.db.patches())
self._unapply_patch(patch)
self.db.save()
|
Implement a Pop class to unapply patches# vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <bjoern.ricks@googlemail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from quilt.command import Command
from quilt.db import Db, Series
from quilt.error import NoAppliedPatch
from quilt.utils import Directory, RollbackPatch, File
class Pop(Command):
def __init__(self, cwd, quilt_pc, quilt_patches):
super(Pop, self).__init__(cwd)
self.quilt_pc = quilt_pc
self.db = Db(quilt_pc)
self.series = Series(quilt_patches)
def _check(self):
if not self.db.exists() or not self.db.patches():
raise NoAppliedPatch(self.db)
def _unapply_patch(self, patch_name):
prefix = os.path.join(quilt_patches, patch_name)
timestamp = File(os.path.join(prefix, ".timestamp"))
timestamp.delete_if_exists()
patch = RollbackPatch(self.cwd, self.prefix)
patch.rollback()
patch.delete_backup()
self.db.remove_patch(patch_name)
refresh = File(prefix + "~refresh")
refresh.delete_if_exists()
def unapply_patch(self, patch_name):
""" Unapply patches up to patch_name. patch_name will end up as top
patch """
self._check()
patches = self.db.patches_after(patch_name)
for patch in reverse(patches):
self._unapply_patch(patch)
self.db.save()
def unapply_top_patch(self):
""" Unapply top patch """
self._check()
patch = self.top_patch()
self._unapply_patch(patch)
self.db.save()
def unapply_all(self):
""" Unapply all patches in series file """
self._check()
for patch in reverse(self.db.patches())
self._unapply_patch(patch)
self.db.save()
|
<commit_before><commit_msg>Implement a Pop class to unapply patches<commit_after># vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <bjoern.ricks@googlemail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
from quilt.command import Command
from quilt.db import Db, Series
from quilt.error import NoAppliedPatch
from quilt.utils import Directory, RollbackPatch, File
class Pop(Command):
def __init__(self, cwd, quilt_pc, quilt_patches):
super(Pop, self).__init__(cwd)
self.quilt_pc = quilt_pc
self.db = Db(quilt_pc)
self.series = Series(quilt_patches)
def _check(self):
if not self.db.exists() or not self.db.patches():
raise NoAppliedPatch(self.db)
def _unapply_patch(self, patch_name):
prefix = os.path.join(quilt_patches, patch_name)
timestamp = File(os.path.join(prefix, ".timestamp"))
timestamp.delete_if_exists()
patch = RollbackPatch(self.cwd, self.prefix)
patch.rollback()
patch.delete_backup()
self.db.remove_patch(patch_name)
refresh = File(prefix + "~refresh")
refresh.delete_if_exists()
def unapply_patch(self, patch_name):
""" Unapply patches up to patch_name. patch_name will end up as top
patch """
self._check()
patches = self.db.patches_after(patch_name)
for patch in reverse(patches):
self._unapply_patch(patch)
self.db.save()
def unapply_top_patch(self):
""" Unapply top patch """
self._check()
patch = self.top_patch()
self._unapply_patch(patch)
self.db.save()
def unapply_all(self):
""" Unapply all patches in series file """
self._check()
for patch in reverse(self.db.patches())
self._unapply_patch(patch)
self.db.save()
|
|
ea4294761482d5cf1c6f7c5aeab452f43bfcd1fa
|
tools/detect_stuff.py
|
tools/detect_stuff.py
|
# Ported From: http://docs.opencv.org/3.1.0/d7/d8b/tutorial_py_face_detection.html
import os
import cv2
cascade = cv2.CascadeClassifier('/home/matt/Projects/opencv-junk/classifier/run_two/cascade_xmls/cascade.xml')
img_dir = '/mnt/jam-gui/smb-share:server=jamstation,share=gopro/2017-02-17/HERO4 Session 1/testing_frames'
img_files = [i for i in os.listdir(img_dir) if os.path.splitext(i)[1] == '.jpg']
## Single Detection
# img_file = 'GOPR0195_64000.jpg'
## Double Detection
# img_file = 'GP010195_200.jpg'
## Test that the rec will detect a large positive image used in training the cascade.
# img_dir = '/home/matt/Projects/opencv-sharrow-images/positives'
## LARGE TRAINING POSITIVE, WILL DETECT IN CURRENT CASCADE
# img_file = '10.jpg'
## SMALL TRAINING POSITIVE, WON'T DETECT IN CURRENT CASCADE
# img_file = 'GP010194_71100.jpg'
for img_file in img_files:
img = cv2.imread(os.path.join(img_dir, img_file))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = cascade.detectMultiScale(gray, 1.3, 5)
if len(faces) > 0:
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 3)
roi_gray = gray[y:y + h, x:x + w]
roi_color = img[y:y + h, x:x + w]
cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
Add simple script to display positive detections
|
Add simple script to display positive detections
|
Python
|
mit
|
mattmakesmaps/opencv-junk
|
Add simple script to display positive detections
|
# Ported From: http://docs.opencv.org/3.1.0/d7/d8b/tutorial_py_face_detection.html
import os
import cv2
cascade = cv2.CascadeClassifier('/home/matt/Projects/opencv-junk/classifier/run_two/cascade_xmls/cascade.xml')
img_dir = '/mnt/jam-gui/smb-share:server=jamstation,share=gopro/2017-02-17/HERO4 Session 1/testing_frames'
img_files = [i for i in os.listdir(img_dir) if os.path.splitext(i)[1] == '.jpg']
## Single Detection
# img_file = 'GOPR0195_64000.jpg'
## Double Detection
# img_file = 'GP010195_200.jpg'
## Test that the rec will detect a large positive image used in training the cascade.
# img_dir = '/home/matt/Projects/opencv-sharrow-images/positives'
## LARGE TRAINING POSITIVE, WILL DETECT IN CURRENT CASCADE
# img_file = '10.jpg'
## SMALL TRAINING POSITIVE, WON'T DETECT IN CURRENT CASCADE
# img_file = 'GP010194_71100.jpg'
for img_file in img_files:
img = cv2.imread(os.path.join(img_dir, img_file))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = cascade.detectMultiScale(gray, 1.3, 5)
if len(faces) > 0:
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 3)
roi_gray = gray[y:y + h, x:x + w]
roi_color = img[y:y + h, x:x + w]
cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
<commit_before><commit_msg>Add simple script to display positive detections<commit_after>
|
# Ported From: http://docs.opencv.org/3.1.0/d7/d8b/tutorial_py_face_detection.html
import os
import cv2
cascade = cv2.CascadeClassifier('/home/matt/Projects/opencv-junk/classifier/run_two/cascade_xmls/cascade.xml')
img_dir = '/mnt/jam-gui/smb-share:server=jamstation,share=gopro/2017-02-17/HERO4 Session 1/testing_frames'
img_files = [i for i in os.listdir(img_dir) if os.path.splitext(i)[1] == '.jpg']
## Single Detection
# img_file = 'GOPR0195_64000.jpg'
## Double Detection
# img_file = 'GP010195_200.jpg'
## Test that the rec will detect a large positive image used in training the cascade.
# img_dir = '/home/matt/Projects/opencv-sharrow-images/positives'
## LARGE TRAINING POSITIVE, WILL DETECT IN CURRENT CASCADE
# img_file = '10.jpg'
## SMALL TRAINING POSITIVE, WON'T DETECT IN CURRENT CASCADE
# img_file = 'GP010194_71100.jpg'
for img_file in img_files:
img = cv2.imread(os.path.join(img_dir, img_file))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = cascade.detectMultiScale(gray, 1.3, 5)
if len(faces) > 0:
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 3)
roi_gray = gray[y:y + h, x:x + w]
roi_color = img[y:y + h, x:x + w]
cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
Add simple script to display positive detections# Ported From: http://docs.opencv.org/3.1.0/d7/d8b/tutorial_py_face_detection.html
import os
import cv2
cascade = cv2.CascadeClassifier('/home/matt/Projects/opencv-junk/classifier/run_two/cascade_xmls/cascade.xml')
img_dir = '/mnt/jam-gui/smb-share:server=jamstation,share=gopro/2017-02-17/HERO4 Session 1/testing_frames'
img_files = [i for i in os.listdir(img_dir) if os.path.splitext(i)[1] == '.jpg']
## Single Detection
# img_file = 'GOPR0195_64000.jpg'
## Double Detection
# img_file = 'GP010195_200.jpg'
## Test that the rec will detect a large positive image used in training the cascade.
# img_dir = '/home/matt/Projects/opencv-sharrow-images/positives'
## LARGE TRAINING POSITIVE, WILL DETECT IN CURRENT CASCADE
# img_file = '10.jpg'
## SMALL TRAINING POSITIVE, WON'T DETECT IN CURRENT CASCADE
# img_file = 'GP010194_71100.jpg'
for img_file in img_files:
img = cv2.imread(os.path.join(img_dir, img_file))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = cascade.detectMultiScale(gray, 1.3, 5)
if len(faces) > 0:
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 3)
roi_gray = gray[y:y + h, x:x + w]
roi_color = img[y:y + h, x:x + w]
cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
<commit_before><commit_msg>Add simple script to display positive detections<commit_after># Ported From: http://docs.opencv.org/3.1.0/d7/d8b/tutorial_py_face_detection.html
import os
import cv2
cascade = cv2.CascadeClassifier('/home/matt/Projects/opencv-junk/classifier/run_two/cascade_xmls/cascade.xml')
img_dir = '/mnt/jam-gui/smb-share:server=jamstation,share=gopro/2017-02-17/HERO4 Session 1/testing_frames'
img_files = [i for i in os.listdir(img_dir) if os.path.splitext(i)[1] == '.jpg']
## Single Detection
# img_file = 'GOPR0195_64000.jpg'
## Double Detection
# img_file = 'GP010195_200.jpg'
## Test that the rec will detect a large positive image used in training the cascade.
# img_dir = '/home/matt/Projects/opencv-sharrow-images/positives'
## LARGE TRAINING POSITIVE, WILL DETECT IN CURRENT CASCADE
# img_file = '10.jpg'
## SMALL TRAINING POSITIVE, WON'T DETECT IN CURRENT CASCADE
# img_file = 'GP010194_71100.jpg'
for img_file in img_files:
img = cv2.imread(os.path.join(img_dir, img_file))
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = cascade.detectMultiScale(gray, 1.3, 5)
if len(faces) > 0:
for (x, y, w, h) in faces:
cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 3)
roi_gray = gray[y:y + h, x:x + w]
roi_color = img[y:y + h, x:x + w]
cv2.imshow('img', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
|
6a2313efdf440e0c73d4c40898e9d36c5949d044
|
museum_site/migrations/0003_auto_20211028_1858.py
|
museum_site/migrations/0003_auto_20211028_1858.py
|
# Generated by Django 3.2.7 on 2021-10-28 18:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('museum_site', '0002_upload_announced'),
]
operations = [
migrations.AlterField(
model_name='file',
name='author',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='file',
name='genre',
field=models.CharField(blank=True, default='', max_length=255),
),
]
|
Increase max length of author and genre fields
|
Increase max length of author and genre fields
|
Python
|
mit
|
DrDos0016/z2,DrDos0016/z2,DrDos0016/z2
|
Increase max length of author and genre fields
|
# Generated by Django 3.2.7 on 2021-10-28 18:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('museum_site', '0002_upload_announced'),
]
operations = [
migrations.AlterField(
model_name='file',
name='author',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='file',
name='genre',
field=models.CharField(blank=True, default='', max_length=255),
),
]
|
<commit_before><commit_msg>Increase max length of author and genre fields<commit_after>
|
# Generated by Django 3.2.7 on 2021-10-28 18:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('museum_site', '0002_upload_announced'),
]
operations = [
migrations.AlterField(
model_name='file',
name='author',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='file',
name='genre',
field=models.CharField(blank=True, default='', max_length=255),
),
]
|
Increase max length of author and genre fields# Generated by Django 3.2.7 on 2021-10-28 18:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('museum_site', '0002_upload_announced'),
]
operations = [
migrations.AlterField(
model_name='file',
name='author',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='file',
name='genre',
field=models.CharField(blank=True, default='', max_length=255),
),
]
|
<commit_before><commit_msg>Increase max length of author and genre fields<commit_after># Generated by Django 3.2.7 on 2021-10-28 18:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('museum_site', '0002_upload_announced'),
]
operations = [
migrations.AlterField(
model_name='file',
name='author',
field=models.CharField(max_length=255),
),
migrations.AlterField(
model_name='file',
name='genre',
field=models.CharField(blank=True, default='', max_length=255),
),
]
|
|
b36eb09bb85bb4eee0db9669745e0c1adc244980
|
pavement.py
|
pavement.py
|
from paver.easy import *
config = """# replace pass with values you would like to overwrite from DefaultConfig in
# default_config.py. Values you do not explicitly overwrite will be inherited
# from DefaultConfig. At the very least, you must set secret_key and
# tmdb_api_key.
from default_config import DefaultConfig
class Config(DefaultConfig):
pass
"""
@task
def setup():
"""Writes a default config to config.py"""
print('Writing default config.')
f = open('config.py', 'w')
f.write(config)
f.close()
|
Write default config with paver.
|
Write default config with paver.
|
Python
|
mit
|
simon-andrews/movieman2,simon-andrews/movieman2
|
Write default config with paver.
|
from paver.easy import *
config = """# replace pass with values you would like to overwrite from DefaultConfig in
# default_config.py. Values you do not explicitly overwrite will be inherited
# from DefaultConfig. At the very least, you must set secret_key and
# tmdb_api_key.
from default_config import DefaultConfig
class Config(DefaultConfig):
pass
"""
@task
def setup():
"""Writes a default config to config.py"""
print('Writing default config.')
f = open('config.py', 'w')
f.write(config)
f.close()
|
<commit_before><commit_msg>Write default config with paver.<commit_after>
|
from paver.easy import *
config = """# replace pass with values you would like to overwrite from DefaultConfig in
# default_config.py. Values you do not explicitly overwrite will be inherited
# from DefaultConfig. At the very least, you must set secret_key and
# tmdb_api_key.
from default_config import DefaultConfig
class Config(DefaultConfig):
pass
"""
@task
def setup():
"""Writes a default config to config.py"""
print('Writing default config.')
f = open('config.py', 'w')
f.write(config)
f.close()
|
Write default config with paver.from paver.easy import *
config = """# replace pass with values you would like to overwrite from DefaultConfig in
# default_config.py. Values you do not explicitly overwrite will be inherited
# from DefaultConfig. At the very least, you must set secret_key and
# tmdb_api_key.
from default_config import DefaultConfig
class Config(DefaultConfig):
pass
"""
@task
def setup():
"""Writes a default config to config.py"""
print('Writing default config.')
f = open('config.py', 'w')
f.write(config)
f.close()
|
<commit_before><commit_msg>Write default config with paver.<commit_after>from paver.easy import *
config = """# replace pass with values you would like to overwrite from DefaultConfig in
# default_config.py. Values you do not explicitly overwrite will be inherited
# from DefaultConfig. At the very least, you must set secret_key and
# tmdb_api_key.
from default_config import DefaultConfig
class Config(DefaultConfig):
pass
"""
@task
def setup():
"""Writes a default config to config.py"""
print('Writing default config.')
f = open('config.py', 'w')
f.write(config)
f.close()
|
|
460db2ca2fb55adc5ae67516f0e1af4c42898080
|
tests/test_frames.py
|
tests/test_frames.py
|
from . import TheInternetTestCase
from helium.api import click, Text
class FramesTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/frames"
def test_nested_frames(self):
click("Nested Frames")
self.assertTrue(Text("LEFT").exists())
self.assertTrue(Text("MIDDLE").exists())
self.assertTrue(Text("RIGHT").exists())
self.assertTrue(Text("BOTTOM").exists())
def test_iframe(self):
click("iFrame")
self.assertTrue(Text("Your content goes here.").exists())
|
Add test case for frames.
|
Add test case for frames.
|
Python
|
mit
|
bugfree-software/the-internet-solution-python
|
Add test case for frames.
|
from . import TheInternetTestCase
from helium.api import click, Text
class FramesTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/frames"
def test_nested_frames(self):
click("Nested Frames")
self.assertTrue(Text("LEFT").exists())
self.assertTrue(Text("MIDDLE").exists())
self.assertTrue(Text("RIGHT").exists())
self.assertTrue(Text("BOTTOM").exists())
def test_iframe(self):
click("iFrame")
self.assertTrue(Text("Your content goes here.").exists())
|
<commit_before><commit_msg>Add test case for frames.<commit_after>
|
from . import TheInternetTestCase
from helium.api import click, Text
class FramesTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/frames"
def test_nested_frames(self):
click("Nested Frames")
self.assertTrue(Text("LEFT").exists())
self.assertTrue(Text("MIDDLE").exists())
self.assertTrue(Text("RIGHT").exists())
self.assertTrue(Text("BOTTOM").exists())
def test_iframe(self):
click("iFrame")
self.assertTrue(Text("Your content goes here.").exists())
|
Add test case for frames.from . import TheInternetTestCase
from helium.api import click, Text
class FramesTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/frames"
def test_nested_frames(self):
click("Nested Frames")
self.assertTrue(Text("LEFT").exists())
self.assertTrue(Text("MIDDLE").exists())
self.assertTrue(Text("RIGHT").exists())
self.assertTrue(Text("BOTTOM").exists())
def test_iframe(self):
click("iFrame")
self.assertTrue(Text("Your content goes here.").exists())
|
<commit_before><commit_msg>Add test case for frames.<commit_after>from . import TheInternetTestCase
from helium.api import click, Text
class FramesTest(TheInternetTestCase):
def get_page(self):
return "http://the-internet.herokuapp.com/frames"
def test_nested_frames(self):
click("Nested Frames")
self.assertTrue(Text("LEFT").exists())
self.assertTrue(Text("MIDDLE").exists())
self.assertTrue(Text("RIGHT").exists())
self.assertTrue(Text("BOTTOM").exists())
def test_iframe(self):
click("iFrame")
self.assertTrue(Text("Your content goes here.").exists())
|
|
a5f380db22e20265b4d543827f052300b2fb3fa4
|
tests/test_choose.py
|
tests/test_choose.py
|
from tests.base import IntegrationTest
from time import sleep
class TestChooseProject(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
vimoutput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
tasks = [
dict(description="test task 1", project="Home"),
dict(description="test task 2"),
]
def execute(self):
self.client.normal('2gg')
sleep(1)
self.command("TaskWikiChooseProject")
sleep(1)
self.client.normal('5gg')
sleep(0.5)
self.client.feedkeys("\\<CR>")
sleep(1)
for task in self.tasks:
task.refresh()
assert self.tasks[0]['project'] == "Home"
assert self.tasks[1]['project'] == "Home"
class TestChooseProjectUnset(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
vimoutput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
tasks = [
dict(description="test task 1", project="Home"),
dict(description="test task 2"),
]
def execute(self):
self.client.normal('1gg')
sleep(0.5)
self.command("TaskWikiChooseProject")
sleep(0.5)
self.client.normal('4gg')
sleep(0.5)
self.client.feedkeys("\\<CR>")
sleep(0.5)
for task in self.tasks:
task.refresh()
assert self.tasks[0]['project'] == None
assert self.tasks[1]['project'] == None
class TestChooseProjectCanceled(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
vimoutput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
tasks = [
dict(description="test task 1", project="Home"),
dict(description="test task 2"),
]
def execute(self):
self.client.normal('1gg')
sleep(0.5)
self.command("TaskWikiChooseProject")
sleep(0.5)
self.client.normal('4gg')
sleep(0.5)
self.client.feedkeys("q")
sleep(0.5)
for task in self.tasks:
task.refresh()
assert self.tasks[0]['project'] == "Home"
assert self.tasks[1]['project'] == None
|
Add tests for TaskWikiChooseProject command
|
tests: Add tests for TaskWikiChooseProject command
|
Python
|
mit
|
Spirotot/taskwiki,phha/taskwiki
|
tests: Add tests for TaskWikiChooseProject command
|
from tests.base import IntegrationTest
from time import sleep
class TestChooseProject(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
vimoutput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
tasks = [
dict(description="test task 1", project="Home"),
dict(description="test task 2"),
]
def execute(self):
self.client.normal('2gg')
sleep(1)
self.command("TaskWikiChooseProject")
sleep(1)
self.client.normal('5gg')
sleep(0.5)
self.client.feedkeys("\\<CR>")
sleep(1)
for task in self.tasks:
task.refresh()
assert self.tasks[0]['project'] == "Home"
assert self.tasks[1]['project'] == "Home"
class TestChooseProjectUnset(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
vimoutput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
tasks = [
dict(description="test task 1", project="Home"),
dict(description="test task 2"),
]
def execute(self):
self.client.normal('1gg')
sleep(0.5)
self.command("TaskWikiChooseProject")
sleep(0.5)
self.client.normal('4gg')
sleep(0.5)
self.client.feedkeys("\\<CR>")
sleep(0.5)
for task in self.tasks:
task.refresh()
assert self.tasks[0]['project'] == None
assert self.tasks[1]['project'] == None
class TestChooseProjectCanceled(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
vimoutput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
tasks = [
dict(description="test task 1", project="Home"),
dict(description="test task 2"),
]
def execute(self):
self.client.normal('1gg')
sleep(0.5)
self.command("TaskWikiChooseProject")
sleep(0.5)
self.client.normal('4gg')
sleep(0.5)
self.client.feedkeys("q")
sleep(0.5)
for task in self.tasks:
task.refresh()
assert self.tasks[0]['project'] == "Home"
assert self.tasks[1]['project'] == None
|
<commit_before><commit_msg>tests: Add tests for TaskWikiChooseProject command<commit_after>
|
from tests.base import IntegrationTest
from time import sleep
class TestChooseProject(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
vimoutput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
tasks = [
dict(description="test task 1", project="Home"),
dict(description="test task 2"),
]
def execute(self):
self.client.normal('2gg')
sleep(1)
self.command("TaskWikiChooseProject")
sleep(1)
self.client.normal('5gg')
sleep(0.5)
self.client.feedkeys("\\<CR>")
sleep(1)
for task in self.tasks:
task.refresh()
assert self.tasks[0]['project'] == "Home"
assert self.tasks[1]['project'] == "Home"
class TestChooseProjectUnset(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
vimoutput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
tasks = [
dict(description="test task 1", project="Home"),
dict(description="test task 2"),
]
def execute(self):
self.client.normal('1gg')
sleep(0.5)
self.command("TaskWikiChooseProject")
sleep(0.5)
self.client.normal('4gg')
sleep(0.5)
self.client.feedkeys("\\<CR>")
sleep(0.5)
for task in self.tasks:
task.refresh()
assert self.tasks[0]['project'] == None
assert self.tasks[1]['project'] == None
class TestChooseProjectCanceled(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
vimoutput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
tasks = [
dict(description="test task 1", project="Home"),
dict(description="test task 2"),
]
def execute(self):
self.client.normal('1gg')
sleep(0.5)
self.command("TaskWikiChooseProject")
sleep(0.5)
self.client.normal('4gg')
sleep(0.5)
self.client.feedkeys("q")
sleep(0.5)
for task in self.tasks:
task.refresh()
assert self.tasks[0]['project'] == "Home"
assert self.tasks[1]['project'] == None
|
tests: Add tests for TaskWikiChooseProject commandfrom tests.base import IntegrationTest
from time import sleep
class TestChooseProject(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
vimoutput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
tasks = [
dict(description="test task 1", project="Home"),
dict(description="test task 2"),
]
def execute(self):
self.client.normal('2gg')
sleep(1)
self.command("TaskWikiChooseProject")
sleep(1)
self.client.normal('5gg')
sleep(0.5)
self.client.feedkeys("\\<CR>")
sleep(1)
for task in self.tasks:
task.refresh()
assert self.tasks[0]['project'] == "Home"
assert self.tasks[1]['project'] == "Home"
class TestChooseProjectUnset(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
vimoutput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
tasks = [
dict(description="test task 1", project="Home"),
dict(description="test task 2"),
]
def execute(self):
self.client.normal('1gg')
sleep(0.5)
self.command("TaskWikiChooseProject")
sleep(0.5)
self.client.normal('4gg')
sleep(0.5)
self.client.feedkeys("\\<CR>")
sleep(0.5)
for task in self.tasks:
task.refresh()
assert self.tasks[0]['project'] == None
assert self.tasks[1]['project'] == None
class TestChooseProjectCanceled(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
vimoutput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
tasks = [
dict(description="test task 1", project="Home"),
dict(description="test task 2"),
]
def execute(self):
self.client.normal('1gg')
sleep(0.5)
self.command("TaskWikiChooseProject")
sleep(0.5)
self.client.normal('4gg')
sleep(0.5)
self.client.feedkeys("q")
sleep(0.5)
for task in self.tasks:
task.refresh()
assert self.tasks[0]['project'] == "Home"
assert self.tasks[1]['project'] == None
|
<commit_before><commit_msg>tests: Add tests for TaskWikiChooseProject command<commit_after>from tests.base import IntegrationTest
from time import sleep
class TestChooseProject(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
vimoutput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
tasks = [
dict(description="test task 1", project="Home"),
dict(description="test task 2"),
]
def execute(self):
self.client.normal('2gg')
sleep(1)
self.command("TaskWikiChooseProject")
sleep(1)
self.client.normal('5gg')
sleep(0.5)
self.client.feedkeys("\\<CR>")
sleep(1)
for task in self.tasks:
task.refresh()
assert self.tasks[0]['project'] == "Home"
assert self.tasks[1]['project'] == "Home"
class TestChooseProjectUnset(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
vimoutput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
tasks = [
dict(description="test task 1", project="Home"),
dict(description="test task 2"),
]
def execute(self):
self.client.normal('1gg')
sleep(0.5)
self.command("TaskWikiChooseProject")
sleep(0.5)
self.client.normal('4gg')
sleep(0.5)
self.client.feedkeys("\\<CR>")
sleep(0.5)
for task in self.tasks:
task.refresh()
assert self.tasks[0]['project'] == None
assert self.tasks[1]['project'] == None
class TestChooseProjectCanceled(IntegrationTest):
viminput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
vimoutput = """
* [ ] test task 1 #{uuid}
* [ ] test task 2 #{uuid}
"""
tasks = [
dict(description="test task 1", project="Home"),
dict(description="test task 2"),
]
def execute(self):
self.client.normal('1gg')
sleep(0.5)
self.command("TaskWikiChooseProject")
sleep(0.5)
self.client.normal('4gg')
sleep(0.5)
self.client.feedkeys("q")
sleep(0.5)
for task in self.tasks:
task.refresh()
assert self.tasks[0]['project'] == "Home"
assert self.tasks[1]['project'] == None
|
|
b30644799afb03ba6985b3e6d135d08e0db3d697
|
register.py
|
register.py
|
# -*- coding: utf-8 -*-
#
# register.py
#
# purpose: Automagically creates a Rst README.txt
# author: Filipe P. A. Fernandes
# e-mail: ocefpaf@gmail
# web: http://ocefpaf.github.io/
# created: 10-Apr-2014
# modified: Fri 11 Apr 2014 12:10:43 AM BRT
#
# obs: https://coderwall.com/p/qawuyq
#
import os
import pandoc
home = os.path.expanduser("~")
pandoc.core.PANDOC_PATH = os.path.join(home, 'bin', 'pandoc')
doc = pandoc.Document()
doc.markdown = open('README.md').read()
with open('README.txt', 'w+') as f:
f.write(doc.rst)
# Some modifications are need to README.txt before registering. Rendering this
# part useless...
if False:
os.system("python2 setup.py register")
os.remove('README.txt')
|
Convert README from Markdown to rst.
|
Convert README from Markdown to rst.
|
Python
|
bsd-3-clause
|
pyoceans/python-oceans,ocefpaf/python-oceans
|
Convert README from Markdown to rst.
|
# -*- coding: utf-8 -*-
#
# register.py
#
# purpose: Automagically creates a Rst README.txt
# author: Filipe P. A. Fernandes
# e-mail: ocefpaf@gmail
# web: http://ocefpaf.github.io/
# created: 10-Apr-2014
# modified: Fri 11 Apr 2014 12:10:43 AM BRT
#
# obs: https://coderwall.com/p/qawuyq
#
import os
import pandoc
home = os.path.expanduser("~")
pandoc.core.PANDOC_PATH = os.path.join(home, 'bin', 'pandoc')
doc = pandoc.Document()
doc.markdown = open('README.md').read()
with open('README.txt', 'w+') as f:
f.write(doc.rst)
# Some modifications are need to README.txt before registering. Rendering this
# part useless...
if False:
os.system("python2 setup.py register")
os.remove('README.txt')
|
<commit_before><commit_msg>Convert README from Markdown to rst.<commit_after>
|
# -*- coding: utf-8 -*-
#
# register.py
#
# purpose: Automagically creates a Rst README.txt
# author: Filipe P. A. Fernandes
# e-mail: ocefpaf@gmail
# web: http://ocefpaf.github.io/
# created: 10-Apr-2014
# modified: Fri 11 Apr 2014 12:10:43 AM BRT
#
# obs: https://coderwall.com/p/qawuyq
#
import os
import pandoc
home = os.path.expanduser("~")
pandoc.core.PANDOC_PATH = os.path.join(home, 'bin', 'pandoc')
doc = pandoc.Document()
doc.markdown = open('README.md').read()
with open('README.txt', 'w+') as f:
f.write(doc.rst)
# Some modifications are need to README.txt before registering. Rendering this
# part useless...
if False:
os.system("python2 setup.py register")
os.remove('README.txt')
|
Convert README from Markdown to rst.# -*- coding: utf-8 -*-
#
# register.py
#
# purpose: Automagically creates a Rst README.txt
# author: Filipe P. A. Fernandes
# e-mail: ocefpaf@gmail
# web: http://ocefpaf.github.io/
# created: 10-Apr-2014
# modified: Fri 11 Apr 2014 12:10:43 AM BRT
#
# obs: https://coderwall.com/p/qawuyq
#
import os
import pandoc
home = os.path.expanduser("~")
pandoc.core.PANDOC_PATH = os.path.join(home, 'bin', 'pandoc')
doc = pandoc.Document()
doc.markdown = open('README.md').read()
with open('README.txt', 'w+') as f:
f.write(doc.rst)
# Some modifications are need to README.txt before registering. Rendering this
# part useless...
if False:
os.system("python2 setup.py register")
os.remove('README.txt')
|
<commit_before><commit_msg>Convert README from Markdown to rst.<commit_after># -*- coding: utf-8 -*-
#
# register.py
#
# purpose: Automagically creates a Rst README.txt
# author: Filipe P. A. Fernandes
# e-mail: ocefpaf@gmail
# web: http://ocefpaf.github.io/
# created: 10-Apr-2014
# modified: Fri 11 Apr 2014 12:10:43 AM BRT
#
# obs: https://coderwall.com/p/qawuyq
#
import os
import pandoc
home = os.path.expanduser("~")
pandoc.core.PANDOC_PATH = os.path.join(home, 'bin', 'pandoc')
doc = pandoc.Document()
doc.markdown = open('README.md').read()
with open('README.txt', 'w+') as f:
f.write(doc.rst)
# Some modifications are need to README.txt before registering. Rendering this
# part useless...
if False:
os.system("python2 setup.py register")
os.remove('README.txt')
|
|
5a59094e58e3389bd2f182b080e065c4a709f8f9
|
tests/test_turing.py
|
tests/test_turing.py
|
from unittest import TestCase, expectedFailure
class TuringTests(TestCase):
def setUp(self):
from chatterbot import ChatBot
self.chatbot = ChatBot('Agent Jr.')
@expectedFailure
def test_ask_name(self):
response = self.chatbot.get_response(
'What is your name?'
)
self.assertIn('Agent', response.text)
@expectedFailure
def test_repeat_information(self):
"""
Test if we can detect any repeat responses from the agent.
"""
self.fail('Condition not met.')
@expectedFailure
def test_repeat_input(self):
"""
Test what the responses are like if we keep giving the same input.
"""
self.fail('Condition not met.')
@expectedFailure
def test_contradicting_responses(self):
"""
Test if we can get the agent to contradict themselves.
"""
self.fail('Condition not met.')
@expectedFailure
def test_mathematical_ability(self):
"""
The math questions inherently suggest that the agent
should get some math problems wrong in order to seem
more human. My view on this is that it is more useful
to have a bot that is good at math, which could just
as easily be a human.
"""
self.fail('Condition not met.')
@expectedFailure
def test_response_time(self):
"""
Does the agent respond in a realistic amount of time?
"""
self.fail('Condition not met.')
|
Add very basic turing tests
|
Add very basic turing tests
|
Python
|
bsd-3-clause
|
vkosuri/ChatterBot,gunthercox/ChatterBot
|
Add very basic turing tests
|
from unittest import TestCase, expectedFailure
class TuringTests(TestCase):
def setUp(self):
from chatterbot import ChatBot
self.chatbot = ChatBot('Agent Jr.')
@expectedFailure
def test_ask_name(self):
response = self.chatbot.get_response(
'What is your name?'
)
self.assertIn('Agent', response.text)
@expectedFailure
def test_repeat_information(self):
"""
Test if we can detect any repeat responses from the agent.
"""
self.fail('Condition not met.')
@expectedFailure
def test_repeat_input(self):
"""
Test what the responses are like if we keep giving the same input.
"""
self.fail('Condition not met.')
@expectedFailure
def test_contradicting_responses(self):
"""
Test if we can get the agent to contradict themselves.
"""
self.fail('Condition not met.')
@expectedFailure
def test_mathematical_ability(self):
"""
The math questions inherently suggest that the agent
should get some math problems wrong in order to seem
more human. My view on this is that it is more useful
to have a bot that is good at math, which could just
as easily be a human.
"""
self.fail('Condition not met.')
@expectedFailure
def test_response_time(self):
"""
Does the agent respond in a realistic amount of time?
"""
self.fail('Condition not met.')
|
<commit_before><commit_msg>Add very basic turing tests<commit_after>
|
from unittest import TestCase, expectedFailure
class TuringTests(TestCase):
def setUp(self):
from chatterbot import ChatBot
self.chatbot = ChatBot('Agent Jr.')
@expectedFailure
def test_ask_name(self):
response = self.chatbot.get_response(
'What is your name?'
)
self.assertIn('Agent', response.text)
@expectedFailure
def test_repeat_information(self):
"""
Test if we can detect any repeat responses from the agent.
"""
self.fail('Condition not met.')
@expectedFailure
def test_repeat_input(self):
"""
Test what the responses are like if we keep giving the same input.
"""
self.fail('Condition not met.')
@expectedFailure
def test_contradicting_responses(self):
"""
Test if we can get the agent to contradict themselves.
"""
self.fail('Condition not met.')
@expectedFailure
def test_mathematical_ability(self):
"""
The math questions inherently suggest that the agent
should get some math problems wrong in order to seem
more human. My view on this is that it is more useful
to have a bot that is good at math, which could just
as easily be a human.
"""
self.fail('Condition not met.')
@expectedFailure
def test_response_time(self):
"""
Does the agent respond in a realistic amount of time?
"""
self.fail('Condition not met.')
|
Add very basic turing testsfrom unittest import TestCase, expectedFailure
class TuringTests(TestCase):
def setUp(self):
from chatterbot import ChatBot
self.chatbot = ChatBot('Agent Jr.')
@expectedFailure
def test_ask_name(self):
response = self.chatbot.get_response(
'What is your name?'
)
self.assertIn('Agent', response.text)
@expectedFailure
def test_repeat_information(self):
"""
Test if we can detect any repeat responses from the agent.
"""
self.fail('Condition not met.')
@expectedFailure
def test_repeat_input(self):
"""
Test what the responses are like if we keep giving the same input.
"""
self.fail('Condition not met.')
@expectedFailure
def test_contradicting_responses(self):
"""
Test if we can get the agent to contradict themselves.
"""
self.fail('Condition not met.')
@expectedFailure
def test_mathematical_ability(self):
"""
The math questions inherently suggest that the agent
should get some math problems wrong in order to seem
more human. My view on this is that it is more useful
to have a bot that is good at math, which could just
as easily be a human.
"""
self.fail('Condition not met.')
@expectedFailure
def test_response_time(self):
"""
Does the agent respond in a realistic amount of time?
"""
self.fail('Condition not met.')
|
<commit_before><commit_msg>Add very basic turing tests<commit_after>from unittest import TestCase, expectedFailure
class TuringTests(TestCase):
def setUp(self):
from chatterbot import ChatBot
self.chatbot = ChatBot('Agent Jr.')
@expectedFailure
def test_ask_name(self):
response = self.chatbot.get_response(
'What is your name?'
)
self.assertIn('Agent', response.text)
@expectedFailure
def test_repeat_information(self):
"""
Test if we can detect any repeat responses from the agent.
"""
self.fail('Condition not met.')
@expectedFailure
def test_repeat_input(self):
"""
Test what the responses are like if we keep giving the same input.
"""
self.fail('Condition not met.')
@expectedFailure
def test_contradicting_responses(self):
"""
Test if we can get the agent to contradict themselves.
"""
self.fail('Condition not met.')
@expectedFailure
def test_mathematical_ability(self):
"""
The math questions inherently suggest that the agent
should get some math problems wrong in order to seem
more human. My view on this is that it is more useful
to have a bot that is good at math, which could just
as easily be a human.
"""
self.fail('Condition not met.')
@expectedFailure
def test_response_time(self):
"""
Does the agent respond in a realistic amount of time?
"""
self.fail('Condition not met.')
|
|
dceae6725d10a5d1af6287e1b684c651683d1750
|
runtests.py
|
runtests.py
|
#!/usr/bin/env python
import sys
from os.path import dirname, abspath
from django.conf import settings
if len(sys.argv) > 1 and 'postgres' in sys.argv:
sys.argv.remove('postgres')
db_engine = 'postgresql_psycopg2'
db_name = 'test_main'
else:
db_engine = 'sqlite3'
db_name = ''
if not settings.configured:
settings.configure(
DATABASE_ENGINE = db_engine,
DATABASE_NAME = db_name,
INSTALLED_APPS = [
'django.contrib.contenttypes',
'genericm2m',
'genericm2m.genericm2m_tests',
],
)
from django.test.simple import run_tests
def runtests(*test_args):
if not test_args:
test_args = ['genericm2m_tests']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
failures = run_tests(test_args, verbosity=1, interactive=True)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
#!/usr/bin/env python
import sys
from os.path import dirname, abspath
import django
from django.conf import settings
if len(sys.argv) > 1 and 'postgres' in sys.argv:
sys.argv.remove('postgres')
db_engine = 'django.db.backends.postgresql_psycopg2'
db_name = 'test_main'
else:
db_engine = 'django.db.backends.sqlite3'
db_name = ''
if not settings.configured:
settings.configure(
DATABASES=dict(default=dict(ENGINE=db_engine, NAME=db_name)),
INSTALLED_APPS = [
'django.contrib.contenttypes',
'genericm2m',
'genericm2m.genericm2m_tests',
],
)
from django.test.utils import get_runner
def runtests(*test_args):
if not test_args:
test_args = ['genericm2m_tests']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=1, interactive=True)
failures = test_runner.run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
Allow tests to be run on 1.4
|
Allow tests to be run on 1.4
|
Python
|
mit
|
jayfk/django-generic-m2m,jayfk/django-generic-m2m,coleifer/django-generic-m2m,coleifer/django-generic-m2m,coleifer/django-generic-m2m
|
#!/usr/bin/env python
import sys
from os.path import dirname, abspath
from django.conf import settings
if len(sys.argv) > 1 and 'postgres' in sys.argv:
sys.argv.remove('postgres')
db_engine = 'postgresql_psycopg2'
db_name = 'test_main'
else:
db_engine = 'sqlite3'
db_name = ''
if not settings.configured:
settings.configure(
DATABASE_ENGINE = db_engine,
DATABASE_NAME = db_name,
INSTALLED_APPS = [
'django.contrib.contenttypes',
'genericm2m',
'genericm2m.genericm2m_tests',
],
)
from django.test.simple import run_tests
def runtests(*test_args):
if not test_args:
test_args = ['genericm2m_tests']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
failures = run_tests(test_args, verbosity=1, interactive=True)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
Allow tests to be run on 1.4
|
#!/usr/bin/env python
import sys
from os.path import dirname, abspath
import django
from django.conf import settings
if len(sys.argv) > 1 and 'postgres' in sys.argv:
sys.argv.remove('postgres')
db_engine = 'django.db.backends.postgresql_psycopg2'
db_name = 'test_main'
else:
db_engine = 'django.db.backends.sqlite3'
db_name = ''
if not settings.configured:
settings.configure(
DATABASES=dict(default=dict(ENGINE=db_engine, NAME=db_name)),
INSTALLED_APPS = [
'django.contrib.contenttypes',
'genericm2m',
'genericm2m.genericm2m_tests',
],
)
from django.test.utils import get_runner
def runtests(*test_args):
if not test_args:
test_args = ['genericm2m_tests']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=1, interactive=True)
failures = test_runner.run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
<commit_before>#!/usr/bin/env python
import sys
from os.path import dirname, abspath
from django.conf import settings
if len(sys.argv) > 1 and 'postgres' in sys.argv:
sys.argv.remove('postgres')
db_engine = 'postgresql_psycopg2'
db_name = 'test_main'
else:
db_engine = 'sqlite3'
db_name = ''
if not settings.configured:
settings.configure(
DATABASE_ENGINE = db_engine,
DATABASE_NAME = db_name,
INSTALLED_APPS = [
'django.contrib.contenttypes',
'genericm2m',
'genericm2m.genericm2m_tests',
],
)
from django.test.simple import run_tests
def runtests(*test_args):
if not test_args:
test_args = ['genericm2m_tests']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
failures = run_tests(test_args, verbosity=1, interactive=True)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
<commit_msg>Allow tests to be run on 1.4<commit_after>
|
#!/usr/bin/env python
import sys
from os.path import dirname, abspath
import django
from django.conf import settings
if len(sys.argv) > 1 and 'postgres' in sys.argv:
sys.argv.remove('postgres')
db_engine = 'django.db.backends.postgresql_psycopg2'
db_name = 'test_main'
else:
db_engine = 'django.db.backends.sqlite3'
db_name = ''
if not settings.configured:
settings.configure(
DATABASES=dict(default=dict(ENGINE=db_engine, NAME=db_name)),
INSTALLED_APPS = [
'django.contrib.contenttypes',
'genericm2m',
'genericm2m.genericm2m_tests',
],
)
from django.test.utils import get_runner
def runtests(*test_args):
if not test_args:
test_args = ['genericm2m_tests']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=1, interactive=True)
failures = test_runner.run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
#!/usr/bin/env python
import sys
from os.path import dirname, abspath
from django.conf import settings
if len(sys.argv) > 1 and 'postgres' in sys.argv:
sys.argv.remove('postgres')
db_engine = 'postgresql_psycopg2'
db_name = 'test_main'
else:
db_engine = 'sqlite3'
db_name = ''
if not settings.configured:
settings.configure(
DATABASE_ENGINE = db_engine,
DATABASE_NAME = db_name,
INSTALLED_APPS = [
'django.contrib.contenttypes',
'genericm2m',
'genericm2m.genericm2m_tests',
],
)
from django.test.simple import run_tests
def runtests(*test_args):
if not test_args:
test_args = ['genericm2m_tests']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
failures = run_tests(test_args, verbosity=1, interactive=True)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
Allow tests to be run on 1.4#!/usr/bin/env python
import sys
from os.path import dirname, abspath
import django
from django.conf import settings
if len(sys.argv) > 1 and 'postgres' in sys.argv:
sys.argv.remove('postgres')
db_engine = 'django.db.backends.postgresql_psycopg2'
db_name = 'test_main'
else:
db_engine = 'django.db.backends.sqlite3'
db_name = ''
if not settings.configured:
settings.configure(
DATABASES=dict(default=dict(ENGINE=db_engine, NAME=db_name)),
INSTALLED_APPS = [
'django.contrib.contenttypes',
'genericm2m',
'genericm2m.genericm2m_tests',
],
)
from django.test.utils import get_runner
def runtests(*test_args):
if not test_args:
test_args = ['genericm2m_tests']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=1, interactive=True)
failures = test_runner.run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
<commit_before>#!/usr/bin/env python
import sys
from os.path import dirname, abspath
from django.conf import settings
if len(sys.argv) > 1 and 'postgres' in sys.argv:
sys.argv.remove('postgres')
db_engine = 'postgresql_psycopg2'
db_name = 'test_main'
else:
db_engine = 'sqlite3'
db_name = ''
if not settings.configured:
settings.configure(
DATABASE_ENGINE = db_engine,
DATABASE_NAME = db_name,
INSTALLED_APPS = [
'django.contrib.contenttypes',
'genericm2m',
'genericm2m.genericm2m_tests',
],
)
from django.test.simple import run_tests
def runtests(*test_args):
if not test_args:
test_args = ['genericm2m_tests']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
failures = run_tests(test_args, verbosity=1, interactive=True)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
<commit_msg>Allow tests to be run on 1.4<commit_after>#!/usr/bin/env python
import sys
from os.path import dirname, abspath
import django
from django.conf import settings
if len(sys.argv) > 1 and 'postgres' in sys.argv:
sys.argv.remove('postgres')
db_engine = 'django.db.backends.postgresql_psycopg2'
db_name = 'test_main'
else:
db_engine = 'django.db.backends.sqlite3'
db_name = ''
if not settings.configured:
settings.configure(
DATABASES=dict(default=dict(ENGINE=db_engine, NAME=db_name)),
INSTALLED_APPS = [
'django.contrib.contenttypes',
'genericm2m',
'genericm2m.genericm2m_tests',
],
)
from django.test.utils import get_runner
def runtests(*test_args):
if not test_args:
test_args = ['genericm2m_tests']
parent = dirname(abspath(__file__))
sys.path.insert(0, parent)
TestRunner = get_runner(settings)
test_runner = TestRunner(verbosity=1, interactive=True)
failures = test_runner.run_tests(test_args)
sys.exit(failures)
if __name__ == '__main__':
runtests(*sys.argv[1:])
|
08fe04425cfb92a65bfededc85ed372188c6042e
|
python/tests/test_ctypes.py
|
python/tests/test_ctypes.py
|
from ctypes import CDLL, sizeof, create_string_buffer
def test_hello_world(workspace):
workspace.src('greeting.c', r"""
#include <stdio.h>
void greet(char *somebody) {
printf("Hello, %s!\n", somebody);
}
""")
workspace.src('hello.py', r"""
import ctypes
lib = ctypes.CDLL('./greeting.so') # leading ./ is required
lib.greet(b'World')
""")
# -fPIC: Position Independent Code, -shared: shared object (so)
workspace.run('gcc -fPIC -shared -o greeting.so greeting.c')
r = workspace.run('python hello.py')
assert r.out == 'Hello, World!'
def test_mutable_buffer(workspace):
workspace.src('mylib.c', r"""\
#include <ctype.h>
void upper(char *chars, int len) {
for (int i = 0; i <= len; i++)
*(chars + i) = toupper(*(chars + i));
}
""")
workspace.run('gcc -fPIC -shared -o mylib.so mylib.c')
chars = b'abc123'
buffer = create_string_buffer(chars)
assert sizeof(buffer) == 7 # len(chars) + 1 (NUL-terminated)
assert buffer.raw == b'abc123\x00' # raw: memory block content
assert buffer.value == b'abc123' # value: as NUL-terminated string
lib = CDLL('./mylib.so')
lib.upper(buffer, len(chars))
assert buffer.value == b'ABC123' # changed in-place
assert chars == b'abc123' # unchanged
|
Call functions in C library, mutable buffer
|
[python] Call functions in C library, mutable buffer
|
Python
|
mit
|
imsardine/learning,imsardine/learning,imsardine/learning,imsardine/learning,imsardine/learning,imsardine/learning,imsardine/learning
|
[python] Call functions in C library, mutable buffer
|
from ctypes import CDLL, sizeof, create_string_buffer
def test_hello_world(workspace):
workspace.src('greeting.c', r"""
#include <stdio.h>
void greet(char *somebody) {
printf("Hello, %s!\n", somebody);
}
""")
workspace.src('hello.py', r"""
import ctypes
lib = ctypes.CDLL('./greeting.so') # leading ./ is required
lib.greet(b'World')
""")
# -fPIC: Position Independent Code, -shared: shared object (so)
workspace.run('gcc -fPIC -shared -o greeting.so greeting.c')
r = workspace.run('python hello.py')
assert r.out == 'Hello, World!'
def test_mutable_buffer(workspace):
workspace.src('mylib.c', r"""\
#include <ctype.h>
void upper(char *chars, int len) {
for (int i = 0; i <= len; i++)
*(chars + i) = toupper(*(chars + i));
}
""")
workspace.run('gcc -fPIC -shared -o mylib.so mylib.c')
chars = b'abc123'
buffer = create_string_buffer(chars)
assert sizeof(buffer) == 7 # len(chars) + 1 (NUL-terminated)
assert buffer.raw == b'abc123\x00' # raw: memory block content
assert buffer.value == b'abc123' # value: as NUL-terminated string
lib = CDLL('./mylib.so')
lib.upper(buffer, len(chars))
assert buffer.value == b'ABC123' # changed in-place
assert chars == b'abc123' # unchanged
|
<commit_before><commit_msg>[python] Call functions in C library, mutable buffer<commit_after>
|
from ctypes import CDLL, sizeof, create_string_buffer
def test_hello_world(workspace):
workspace.src('greeting.c', r"""
#include <stdio.h>
void greet(char *somebody) {
printf("Hello, %s!\n", somebody);
}
""")
workspace.src('hello.py', r"""
import ctypes
lib = ctypes.CDLL('./greeting.so') # leading ./ is required
lib.greet(b'World')
""")
# -fPIC: Position Independent Code, -shared: shared object (so)
workspace.run('gcc -fPIC -shared -o greeting.so greeting.c')
r = workspace.run('python hello.py')
assert r.out == 'Hello, World!'
def test_mutable_buffer(workspace):
workspace.src('mylib.c', r"""\
#include <ctype.h>
void upper(char *chars, int len) {
for (int i = 0; i <= len; i++)
*(chars + i) = toupper(*(chars + i));
}
""")
workspace.run('gcc -fPIC -shared -o mylib.so mylib.c')
chars = b'abc123'
buffer = create_string_buffer(chars)
assert sizeof(buffer) == 7 # len(chars) + 1 (NUL-terminated)
assert buffer.raw == b'abc123\x00' # raw: memory block content
assert buffer.value == b'abc123' # value: as NUL-terminated string
lib = CDLL('./mylib.so')
lib.upper(buffer, len(chars))
assert buffer.value == b'ABC123' # changed in-place
assert chars == b'abc123' # unchanged
|
[python] Call functions in C library, mutable bufferfrom ctypes import CDLL, sizeof, create_string_buffer
def test_hello_world(workspace):
workspace.src('greeting.c', r"""
#include <stdio.h>
void greet(char *somebody) {
printf("Hello, %s!\n", somebody);
}
""")
workspace.src('hello.py', r"""
import ctypes
lib = ctypes.CDLL('./greeting.so') # leading ./ is required
lib.greet(b'World')
""")
# -fPIC: Position Independent Code, -shared: shared object (so)
workspace.run('gcc -fPIC -shared -o greeting.so greeting.c')
r = workspace.run('python hello.py')
assert r.out == 'Hello, World!'
def test_mutable_buffer(workspace):
workspace.src('mylib.c', r"""\
#include <ctype.h>
void upper(char *chars, int len) {
for (int i = 0; i <= len; i++)
*(chars + i) = toupper(*(chars + i));
}
""")
workspace.run('gcc -fPIC -shared -o mylib.so mylib.c')
chars = b'abc123'
buffer = create_string_buffer(chars)
assert sizeof(buffer) == 7 # len(chars) + 1 (NUL-terminated)
assert buffer.raw == b'abc123\x00' # raw: memory block content
assert buffer.value == b'abc123' # value: as NUL-terminated string
lib = CDLL('./mylib.so')
lib.upper(buffer, len(chars))
assert buffer.value == b'ABC123' # changed in-place
assert chars == b'abc123' # unchanged
|
<commit_before><commit_msg>[python] Call functions in C library, mutable buffer<commit_after>from ctypes import CDLL, sizeof, create_string_buffer
def test_hello_world(workspace):
workspace.src('greeting.c', r"""
#include <stdio.h>
void greet(char *somebody) {
printf("Hello, %s!\n", somebody);
}
""")
workspace.src('hello.py', r"""
import ctypes
lib = ctypes.CDLL('./greeting.so') # leading ./ is required
lib.greet(b'World')
""")
# -fPIC: Position Independent Code, -shared: shared object (so)
workspace.run('gcc -fPIC -shared -o greeting.so greeting.c')
r = workspace.run('python hello.py')
assert r.out == 'Hello, World!'
def test_mutable_buffer(workspace):
workspace.src('mylib.c', r"""\
#include <ctype.h>
void upper(char *chars, int len) {
for (int i = 0; i <= len; i++)
*(chars + i) = toupper(*(chars + i));
}
""")
workspace.run('gcc -fPIC -shared -o mylib.so mylib.c')
chars = b'abc123'
buffer = create_string_buffer(chars)
assert sizeof(buffer) == 7 # len(chars) + 1 (NUL-terminated)
assert buffer.raw == b'abc123\x00' # raw: memory block content
assert buffer.value == b'abc123' # value: as NUL-terminated string
lib = CDLL('./mylib.so')
lib.upper(buffer, len(chars))
assert buffer.value == b'ABC123' # changed in-place
assert chars == b'abc123' # unchanged
|
|
97f0326bc5ab5ce5601b72eb3e2196dd85588705
|
19/Solution.py
|
19/Solution.py
|
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
count = 0
node = head
while node is not None:
count += 1
node = node.next
if count - n == 0:
return head.next
prev = count - n - 1
node = head
while prev > 0:
node = node.next
prev -= 1
node.next = node.next.next
return head
|
Add my two pass solution
|
Add my two pass solution
|
Python
|
mit
|
xliiauo/leetcode,xiao0720/leetcode,xliiauo/leetcode,xiao0720/leetcode,xliiauo/leetcode
|
Add my two pass solution
|
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
count = 0
node = head
while node is not None:
count += 1
node = node.next
if count - n == 0:
return head.next
prev = count - n - 1
node = head
while prev > 0:
node = node.next
prev -= 1
node.next = node.next.next
return head
|
<commit_before><commit_msg>Add my two pass solution<commit_after>
|
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
count = 0
node = head
while node is not None:
count += 1
node = node.next
if count - n == 0:
return head.next
prev = count - n - 1
node = head
while prev > 0:
node = node.next
prev -= 1
node.next = node.next.next
return head
|
Add my two pass solutionclass ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
count = 0
node = head
while node is not None:
count += 1
node = node.next
if count - n == 0:
return head.next
prev = count - n - 1
node = head
while prev > 0:
node = node.next
prev -= 1
node.next = node.next.next
return head
|
<commit_before><commit_msg>Add my two pass solution<commit_after>class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def removeNthFromEnd(self, head, n):
"""
:type head: ListNode
:type n: int
:rtype: ListNode
"""
count = 0
node = head
while node is not None:
count += 1
node = node.next
if count - n == 0:
return head.next
prev = count - n - 1
node = head
while prev > 0:
node = node.next
prev -= 1
node.next = node.next.next
return head
|
|
040a86941e20db4976850c3cfb046c58ff48d559
|
examples/pywapi-example.py
|
examples/pywapi-example.py
|
#!/usr/bin/env python
import pywapi
weather_com_result = pywapi.get_weather_from_weather_com('10001')
yahoo_result = pywapi.get_weather_from_yahoo('10001')
noaa_result = pywapi.get_weather_from_noaa('KJFK')
print "Weather.com says: It is " + string.lower(weather_com_result['current_conditions']['text']) + " and " + weather_com_result['current_conditions']['temperature'] + "C now in New York.\n\n"
print("Yahoo says: It is " + yahoo_result['condition']['text'].lower() + " and " + yahoo_result['condition']['temp'] + "C now in New York.")
print("NOAA says: It is " + noaa_result['weather'].lower() + " and " + noaa_result['temp_c'] + "C now in New York.")
|
#!/usr/bin/env python
import pywapi
weather_com_result = pywapi.get_weather_from_weather_com('10001')
yahoo_result = pywapi.get_weather_from_yahoo('10001')
noaa_result = pywapi.get_weather_from_noaa('KJFK')
print "Weather.com says: It is " + weather_com_result['current_conditions']['text'].lower() + " and " + weather_com_result['current_conditions']['temperature'] + "C now in New York."
print("Yahoo says: It is " + yahoo_result['condition']['text'].lower() + " and " + yahoo_result['condition']['temp'] + "C now in New York.")
print("NOAA says: It is " + noaa_result['weather'].lower() + " and " + noaa_result['temp_c'] + "C now in New York.")
|
Fix error in example script
|
Fix error in example script
|
Python
|
mit
|
nmbryant/python-weather-api,bethany1/python-weather-api,lorenzosaino/python-weather-api,littleboss/python-weather-api,tmw25/python-weather-api,ExtraordinaryBen/python-weather-api,n0012/python-weather-api,tectronics/python-weather-api,dubwoc/python-weather-api,prasadsidda107/python-weather-api,hdiwan/python-weather-api
|
#!/usr/bin/env python
import pywapi
weather_com_result = pywapi.get_weather_from_weather_com('10001')
yahoo_result = pywapi.get_weather_from_yahoo('10001')
noaa_result = pywapi.get_weather_from_noaa('KJFK')
print "Weather.com says: It is " + string.lower(weather_com_result['current_conditions']['text']) + " and " + weather_com_result['current_conditions']['temperature'] + "C now in New York.\n\n"
print("Yahoo says: It is " + yahoo_result['condition']['text'].lower() + " and " + yahoo_result['condition']['temp'] + "C now in New York.")
print("NOAA says: It is " + noaa_result['weather'].lower() + " and " + noaa_result['temp_c'] + "C now in New York.")
Fix error in example script
|
#!/usr/bin/env python
import pywapi
weather_com_result = pywapi.get_weather_from_weather_com('10001')
yahoo_result = pywapi.get_weather_from_yahoo('10001')
noaa_result = pywapi.get_weather_from_noaa('KJFK')
print "Weather.com says: It is " + weather_com_result['current_conditions']['text'].lower() + " and " + weather_com_result['current_conditions']['temperature'] + "C now in New York."
print("Yahoo says: It is " + yahoo_result['condition']['text'].lower() + " and " + yahoo_result['condition']['temp'] + "C now in New York.")
print("NOAA says: It is " + noaa_result['weather'].lower() + " and " + noaa_result['temp_c'] + "C now in New York.")
|
<commit_before>#!/usr/bin/env python
import pywapi
weather_com_result = pywapi.get_weather_from_weather_com('10001')
yahoo_result = pywapi.get_weather_from_yahoo('10001')
noaa_result = pywapi.get_weather_from_noaa('KJFK')
print "Weather.com says: It is " + string.lower(weather_com_result['current_conditions']['text']) + " and " + weather_com_result['current_conditions']['temperature'] + "C now in New York.\n\n"
print("Yahoo says: It is " + yahoo_result['condition']['text'].lower() + " and " + yahoo_result['condition']['temp'] + "C now in New York.")
print("NOAA says: It is " + noaa_result['weather'].lower() + " and " + noaa_result['temp_c'] + "C now in New York.")
<commit_msg>Fix error in example script<commit_after>
|
#!/usr/bin/env python
import pywapi
weather_com_result = pywapi.get_weather_from_weather_com('10001')
yahoo_result = pywapi.get_weather_from_yahoo('10001')
noaa_result = pywapi.get_weather_from_noaa('KJFK')
print "Weather.com says: It is " + weather_com_result['current_conditions']['text'].lower() + " and " + weather_com_result['current_conditions']['temperature'] + "C now in New York."
print("Yahoo says: It is " + yahoo_result['condition']['text'].lower() + " and " + yahoo_result['condition']['temp'] + "C now in New York.")
print("NOAA says: It is " + noaa_result['weather'].lower() + " and " + noaa_result['temp_c'] + "C now in New York.")
|
#!/usr/bin/env python
import pywapi
weather_com_result = pywapi.get_weather_from_weather_com('10001')
yahoo_result = pywapi.get_weather_from_yahoo('10001')
noaa_result = pywapi.get_weather_from_noaa('KJFK')
print "Weather.com says: It is " + string.lower(weather_com_result['current_conditions']['text']) + " and " + weather_com_result['current_conditions']['temperature'] + "C now in New York.\n\n"
print("Yahoo says: It is " + yahoo_result['condition']['text'].lower() + " and " + yahoo_result['condition']['temp'] + "C now in New York.")
print("NOAA says: It is " + noaa_result['weather'].lower() + " and " + noaa_result['temp_c'] + "C now in New York.")
Fix error in example script#!/usr/bin/env python
import pywapi
weather_com_result = pywapi.get_weather_from_weather_com('10001')
yahoo_result = pywapi.get_weather_from_yahoo('10001')
noaa_result = pywapi.get_weather_from_noaa('KJFK')
print "Weather.com says: It is " + weather_com_result['current_conditions']['text'].lower() + " and " + weather_com_result['current_conditions']['temperature'] + "C now in New York."
print("Yahoo says: It is " + yahoo_result['condition']['text'].lower() + " and " + yahoo_result['condition']['temp'] + "C now in New York.")
print("NOAA says: It is " + noaa_result['weather'].lower() + " and " + noaa_result['temp_c'] + "C now in New York.")
|
<commit_before>#!/usr/bin/env python
import pywapi
weather_com_result = pywapi.get_weather_from_weather_com('10001')
yahoo_result = pywapi.get_weather_from_yahoo('10001')
noaa_result = pywapi.get_weather_from_noaa('KJFK')
print "Weather.com says: It is " + string.lower(weather_com_result['current_conditions']['text']) + " and " + weather_com_result['current_conditions']['temperature'] + "C now in New York.\n\n"
print("Yahoo says: It is " + yahoo_result['condition']['text'].lower() + " and " + yahoo_result['condition']['temp'] + "C now in New York.")
print("NOAA says: It is " + noaa_result['weather'].lower() + " and " + noaa_result['temp_c'] + "C now in New York.")
<commit_msg>Fix error in example script<commit_after>#!/usr/bin/env python
import pywapi
weather_com_result = pywapi.get_weather_from_weather_com('10001')
yahoo_result = pywapi.get_weather_from_yahoo('10001')
noaa_result = pywapi.get_weather_from_noaa('KJFK')
print "Weather.com says: It is " + weather_com_result['current_conditions']['text'].lower() + " and " + weather_com_result['current_conditions']['temperature'] + "C now in New York."
print("Yahoo says: It is " + yahoo_result['condition']['text'].lower() + " and " + yahoo_result['condition']['temp'] + "C now in New York.")
print("NOAA says: It is " + noaa_result['weather'].lower() + " and " + noaa_result['temp_c'] + "C now in New York.")
|
d4789ddbfbcd889d80690cb0d4f735a7d094141c
|
experimental/directshow.py
|
experimental/directshow.py
|
#!/usr/bin/python
# $Id:$
# Play an audio file with DirectShow. Tested ok with MP3, WMA, MID, WAV, AU.
# Caveats:
# - Requires a filename (not from memory or stream yet). Looks like we need
# to manually implement a filter which provides an output IPin. Lot of
# work.
# - Theoretically can traverse the graph to get the output filter, which by
# default is supposed to implement IDirectSound3DBuffer, for positioned
# sounds. Untested.
# - Requires comtypes. Can work around this in future by implementing the
# small subset of comtypes ourselves (or including a snapshot of comtypes in
# pyglet).
import ctypes
from comtypes import client
import sys
import time
filename = sys.argv[1]
qedit = client.GetModule('qedit.dll') # DexterLib
quartz = client.GetModule('quartz.dll') #
CLSID_FilterGraph = '{e436ebb3-524f-11ce-9f53-0020af0ba770}'
filter_graph = client.CreateObject(CLSID_FilterGraph,
interface=qedit.IFilterGraph)
filter_builder = filter_graph.QueryInterface(qedit.IGraphBuilder)
filter_builder.RenderFile(filename, None)
media_control = filter_graph.QueryInterface(quartz.IMediaControl)
media_control.Run()
try:
# Look at IMediaEvent interface for EOS notification
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
# Need these because finalisers don't have enough context to clean up after
# themselves when script exits.
del media_control
del filter_builder
del filter_graph
|
Move win32 audio experiment to trunk.
|
Move win32 audio experiment to trunk.
|
Python
|
bsd-3-clause
|
google-code-export/pyglet,odyaka341/pyglet,cledio66/pyglet,cledio66/pyglet,shaileshgoogler/pyglet,odyaka341/pyglet,gdkar/pyglet,Alwnikrotikz/pyglet,Austin503/pyglet,google-code-export/pyglet,mpasternak/pyglet-fix-issue-552,Alwnikrotikz/pyglet,gdkar/pyglet,odyaka341/pyglet,kmonsoor/pyglet,Alwnikrotikz/pyglet,mpasternak/pyglet-fix-issue-552,mpasternak/michaldtz-fixes-518-522,shaileshgoogler/pyglet,Austin503/pyglet,xshotD/pyglet,cledio66/pyglet,mpasternak/pyglet-fix-issue-552,kmonsoor/pyglet,mpasternak/michaldtz-fix-552,mpasternak/michaldtz-fixes-518-522,arifgursel/pyglet,qbektrix/pyglet,mpasternak/michaldtz-fix-552,mpasternak/michaldtz-fixes-518-522,cledio66/pyglet,qbektrix/pyglet,arifgursel/pyglet,Austin503/pyglet,odyaka341/pyglet,mpasternak/pyglet-fix-issue-518-522,Austin503/pyglet,arifgursel/pyglet,mpasternak/pyglet-fix-issue-518-522,kmonsoor/pyglet,qbektrix/pyglet,Alwnikrotikz/pyglet,qbektrix/pyglet,google-code-export/pyglet,arifgursel/pyglet,mpasternak/michaldtz-fix-552,kmonsoor/pyglet,shaileshgoogler/pyglet,xshotD/pyglet,xshotD/pyglet,google-code-export/pyglet,Alwnikrotikz/pyglet,arifgursel/pyglet,shaileshgoogler/pyglet,mpasternak/pyglet-fix-issue-518-522,kmonsoor/pyglet,cledio66/pyglet,google-code-export/pyglet,qbektrix/pyglet,mpasternak/pyglet-fix-issue-552,gdkar/pyglet,mpasternak/michaldtz-fixes-518-522,xshotD/pyglet,gdkar/pyglet,odyaka341/pyglet,Austin503/pyglet,shaileshgoogler/pyglet,mpasternak/pyglet-fix-issue-518-522,xshotD/pyglet,mpasternak/michaldtz-fix-552,gdkar/pyglet
|
Move win32 audio experiment to trunk.
|
#!/usr/bin/python
# $Id:$
# Play an audio file with DirectShow. Tested ok with MP3, WMA, MID, WAV, AU.
# Caveats:
# - Requires a filename (not from memory or stream yet). Looks like we need
# to manually implement a filter which provides an output IPin. Lot of
# work.
# - Theoretically can traverse the graph to get the output filter, which by
# default is supposed to implement IDirectSound3DBuffer, for positioned
# sounds. Untested.
# - Requires comtypes. Can work around this in future by implementing the
# small subset of comtypes ourselves (or including a snapshot of comtypes in
# pyglet).
import ctypes
from comtypes import client
import sys
import time
filename = sys.argv[1]
qedit = client.GetModule('qedit.dll') # DexterLib
quartz = client.GetModule('quartz.dll') #
CLSID_FilterGraph = '{e436ebb3-524f-11ce-9f53-0020af0ba770}'
filter_graph = client.CreateObject(CLSID_FilterGraph,
interface=qedit.IFilterGraph)
filter_builder = filter_graph.QueryInterface(qedit.IGraphBuilder)
filter_builder.RenderFile(filename, None)
media_control = filter_graph.QueryInterface(quartz.IMediaControl)
media_control.Run()
try:
# Look at IMediaEvent interface for EOS notification
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
# Need these because finalisers don't have enough context to clean up after
# themselves when script exits.
del media_control
del filter_builder
del filter_graph
|
<commit_before><commit_msg>Move win32 audio experiment to trunk.<commit_after>
|
#!/usr/bin/python
# $Id:$
# Play an audio file with DirectShow. Tested ok with MP3, WMA, MID, WAV, AU.
# Caveats:
# - Requires a filename (not from memory or stream yet). Looks like we need
# to manually implement a filter which provides an output IPin. Lot of
# work.
# - Theoretically can traverse the graph to get the output filter, which by
# default is supposed to implement IDirectSound3DBuffer, for positioned
# sounds. Untested.
# - Requires comtypes. Can work around this in future by implementing the
# small subset of comtypes ourselves (or including a snapshot of comtypes in
# pyglet).
import ctypes
from comtypes import client
import sys
import time
filename = sys.argv[1]
qedit = client.GetModule('qedit.dll') # DexterLib
quartz = client.GetModule('quartz.dll') #
CLSID_FilterGraph = '{e436ebb3-524f-11ce-9f53-0020af0ba770}'
filter_graph = client.CreateObject(CLSID_FilterGraph,
interface=qedit.IFilterGraph)
filter_builder = filter_graph.QueryInterface(qedit.IGraphBuilder)
filter_builder.RenderFile(filename, None)
media_control = filter_graph.QueryInterface(quartz.IMediaControl)
media_control.Run()
try:
# Look at IMediaEvent interface for EOS notification
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
# Need these because finalisers don't have enough context to clean up after
# themselves when script exits.
del media_control
del filter_builder
del filter_graph
|
Move win32 audio experiment to trunk.#!/usr/bin/python
# $Id:$
# Play an audio file with DirectShow. Tested ok with MP3, WMA, MID, WAV, AU.
# Caveats:
# - Requires a filename (not from memory or stream yet). Looks like we need
# to manually implement a filter which provides an output IPin. Lot of
# work.
# - Theoretically can traverse the graph to get the output filter, which by
# default is supposed to implement IDirectSound3DBuffer, for positioned
# sounds. Untested.
# - Requires comtypes. Can work around this in future by implementing the
# small subset of comtypes ourselves (or including a snapshot of comtypes in
# pyglet).
import ctypes
from comtypes import client
import sys
import time
filename = sys.argv[1]
qedit = client.GetModule('qedit.dll') # DexterLib
quartz = client.GetModule('quartz.dll') #
CLSID_FilterGraph = '{e436ebb3-524f-11ce-9f53-0020af0ba770}'
filter_graph = client.CreateObject(CLSID_FilterGraph,
interface=qedit.IFilterGraph)
filter_builder = filter_graph.QueryInterface(qedit.IGraphBuilder)
filter_builder.RenderFile(filename, None)
media_control = filter_graph.QueryInterface(quartz.IMediaControl)
media_control.Run()
try:
# Look at IMediaEvent interface for EOS notification
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
# Need these because finalisers don't have enough context to clean up after
# themselves when script exits.
del media_control
del filter_builder
del filter_graph
|
<commit_before><commit_msg>Move win32 audio experiment to trunk.<commit_after>#!/usr/bin/python
# $Id:$
# Play an audio file with DirectShow. Tested ok with MP3, WMA, MID, WAV, AU.
# Caveats:
# - Requires a filename (not from memory or stream yet). Looks like we need
# to manually implement a filter which provides an output IPin. Lot of
# work.
# - Theoretically can traverse the graph to get the output filter, which by
# default is supposed to implement IDirectSound3DBuffer, for positioned
# sounds. Untested.
# - Requires comtypes. Can work around this in future by implementing the
# small subset of comtypes ourselves (or including a snapshot of comtypes in
# pyglet).
import ctypes
from comtypes import client
import sys
import time
filename = sys.argv[1]
qedit = client.GetModule('qedit.dll') # DexterLib
quartz = client.GetModule('quartz.dll') #
CLSID_FilterGraph = '{e436ebb3-524f-11ce-9f53-0020af0ba770}'
filter_graph = client.CreateObject(CLSID_FilterGraph,
interface=qedit.IFilterGraph)
filter_builder = filter_graph.QueryInterface(qedit.IGraphBuilder)
filter_builder.RenderFile(filename, None)
media_control = filter_graph.QueryInterface(quartz.IMediaControl)
media_control.Run()
try:
# Look at IMediaEvent interface for EOS notification
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
# Need these because finalisers don't have enough context to clean up after
# themselves when script exits.
del media_control
del filter_builder
del filter_graph
|
|
365d61ee5620f0743ffcdeb9c6b09f2b4d66940c
|
grab.py
|
grab.py
|
#!/usr/bin/python3
import json
import requests
import argparse
from typing import Tuple
from os.path import exists
BASE_URL = 'https://leetcode.com/problems/'
GRAPHQL_API_URL = 'https://leetcode.com/graphql'
QUERY = '''query questionData($titleSlug: String!) {
question(titleSlug: $titleSlug) {
questionId
questionFrontendId
boundTopicId
title
titleSlug
content
translatedTitle
translatedContent
difficulty
exampleTestcases
codeSnippets {
lang
langSlug
code
}
}
}'''
def get_url() -> str:
parser = argparse.ArgumentParser(description='Grab leetcode problem')
parser.add_argument(
'slug', metavar='slug', type=str, nargs='+',
help='Slug of the leetcode problem e.g.: two-sum',
)
parser.add_argument(
'--force', action='store_true',
help='Overwrite the file if it already exists',
)
args = parser.parse_args()
return args.slug[0], args.force
def get_data(slug: str) -> Tuple[str, int, str, str, str]:
resp = requests.post(GRAPHQL_API_URL, json={
'query': QUERY,
'variables': {
'titleSlug': slug,
}
})
question = json.loads(resp.text)['data']['question']
difficulty = question['difficulty'].lower()
nr = question['questionId']
title = question['title']
title_slug = question['titleSlug']
url = f'{BASE_URL}{title_slug}/'
code = ''
for snippet in question['codeSnippets']:
if snippet['langSlug'] == 'python3':
code = snippet['code']
return difficulty, nr, title, url, code
def create_file(difficulty: str, nr: int, title: str, url: str, code: str, *, force: bool) -> None:
filename = f'{difficulty}/{nr}.py'
if exists(filename) and not force:
print(f'\n{filename} already exists! Use --force to overwrite.\n')
return
with open(filename, 'w') as f:
f.write('"""\n')
f.write(f'{nr}. {title}\n')
f.write('\n')
f.write(f'{url}\n')
f.write('"""\n')
f.write('\n\n')
f.write(code)
f.write('...\n')
f.write((
'\n\n'
'def main():\n'
' s = Solution()\n'
' print(s.xxx())\n'
'\n\n'
"if __name__ == '__main__':\n"
' raise(SystemExit(main()))'
))
def main() -> int:
slug, force = get_url()
difficulty, nr, title, url, code = get_data(slug)
create_file(difficulty, nr, title, url, code, force=force)
return 0
if __name__ == '__main__':
raise SystemExit(main())
|
Create program that fethes code from leetcode.
|
Create program that fethes code from leetcode.
|
Python
|
mit
|
pisskidney/leetcode
|
Create program that fethes code from leetcode.
|
#!/usr/bin/python3
import json
import requests
import argparse
from typing import Tuple
from os.path import exists
BASE_URL = 'https://leetcode.com/problems/'
GRAPHQL_API_URL = 'https://leetcode.com/graphql'
QUERY = '''query questionData($titleSlug: String!) {
question(titleSlug: $titleSlug) {
questionId
questionFrontendId
boundTopicId
title
titleSlug
content
translatedTitle
translatedContent
difficulty
exampleTestcases
codeSnippets {
lang
langSlug
code
}
}
}'''
def get_url() -> str:
parser = argparse.ArgumentParser(description='Grab leetcode problem')
parser.add_argument(
'slug', metavar='slug', type=str, nargs='+',
help='Slug of the leetcode problem e.g.: two-sum',
)
parser.add_argument(
'--force', action='store_true',
help='Overwrite the file if it already exists',
)
args = parser.parse_args()
return args.slug[0], args.force
def get_data(slug: str) -> Tuple[str, int, str, str, str]:
resp = requests.post(GRAPHQL_API_URL, json={
'query': QUERY,
'variables': {
'titleSlug': slug,
}
})
question = json.loads(resp.text)['data']['question']
difficulty = question['difficulty'].lower()
nr = question['questionId']
title = question['title']
title_slug = question['titleSlug']
url = f'{BASE_URL}{title_slug}/'
code = ''
for snippet in question['codeSnippets']:
if snippet['langSlug'] == 'python3':
code = snippet['code']
return difficulty, nr, title, url, code
def create_file(difficulty: str, nr: int, title: str, url: str, code: str, *, force: bool) -> None:
filename = f'{difficulty}/{nr}.py'
if exists(filename) and not force:
print(f'\n{filename} already exists! Use --force to overwrite.\n')
return
with open(filename, 'w') as f:
f.write('"""\n')
f.write(f'{nr}. {title}\n')
f.write('\n')
f.write(f'{url}\n')
f.write('"""\n')
f.write('\n\n')
f.write(code)
f.write('...\n')
f.write((
'\n\n'
'def main():\n'
' s = Solution()\n'
' print(s.xxx())\n'
'\n\n'
"if __name__ == '__main__':\n"
' raise(SystemExit(main()))'
))
def main() -> int:
slug, force = get_url()
difficulty, nr, title, url, code = get_data(slug)
create_file(difficulty, nr, title, url, code, force=force)
return 0
if __name__ == '__main__':
raise SystemExit(main())
|
<commit_before><commit_msg>Create program that fethes code from leetcode.<commit_after>
|
#!/usr/bin/python3
import json
import requests
import argparse
from typing import Tuple
from os.path import exists
BASE_URL = 'https://leetcode.com/problems/'
GRAPHQL_API_URL = 'https://leetcode.com/graphql'
QUERY = '''query questionData($titleSlug: String!) {
question(titleSlug: $titleSlug) {
questionId
questionFrontendId
boundTopicId
title
titleSlug
content
translatedTitle
translatedContent
difficulty
exampleTestcases
codeSnippets {
lang
langSlug
code
}
}
}'''
def get_url() -> str:
parser = argparse.ArgumentParser(description='Grab leetcode problem')
parser.add_argument(
'slug', metavar='slug', type=str, nargs='+',
help='Slug of the leetcode problem e.g.: two-sum',
)
parser.add_argument(
'--force', action='store_true',
help='Overwrite the file if it already exists',
)
args = parser.parse_args()
return args.slug[0], args.force
def get_data(slug: str) -> Tuple[str, int, str, str, str]:
resp = requests.post(GRAPHQL_API_URL, json={
'query': QUERY,
'variables': {
'titleSlug': slug,
}
})
question = json.loads(resp.text)['data']['question']
difficulty = question['difficulty'].lower()
nr = question['questionId']
title = question['title']
title_slug = question['titleSlug']
url = f'{BASE_URL}{title_slug}/'
code = ''
for snippet in question['codeSnippets']:
if snippet['langSlug'] == 'python3':
code = snippet['code']
return difficulty, nr, title, url, code
def create_file(difficulty: str, nr: int, title: str, url: str, code: str, *, force: bool) -> None:
filename = f'{difficulty}/{nr}.py'
if exists(filename) and not force:
print(f'\n{filename} already exists! Use --force to overwrite.\n')
return
with open(filename, 'w') as f:
f.write('"""\n')
f.write(f'{nr}. {title}\n')
f.write('\n')
f.write(f'{url}\n')
f.write('"""\n')
f.write('\n\n')
f.write(code)
f.write('...\n')
f.write((
'\n\n'
'def main():\n'
' s = Solution()\n'
' print(s.xxx())\n'
'\n\n'
"if __name__ == '__main__':\n"
' raise(SystemExit(main()))'
))
def main() -> int:
slug, force = get_url()
difficulty, nr, title, url, code = get_data(slug)
create_file(difficulty, nr, title, url, code, force=force)
return 0
if __name__ == '__main__':
raise SystemExit(main())
|
Create program that fethes code from leetcode.#!/usr/bin/python3
import json
import requests
import argparse
from typing import Tuple
from os.path import exists
BASE_URL = 'https://leetcode.com/problems/'
GRAPHQL_API_URL = 'https://leetcode.com/graphql'
QUERY = '''query questionData($titleSlug: String!) {
question(titleSlug: $titleSlug) {
questionId
questionFrontendId
boundTopicId
title
titleSlug
content
translatedTitle
translatedContent
difficulty
exampleTestcases
codeSnippets {
lang
langSlug
code
}
}
}'''
def get_url() -> str:
parser = argparse.ArgumentParser(description='Grab leetcode problem')
parser.add_argument(
'slug', metavar='slug', type=str, nargs='+',
help='Slug of the leetcode problem e.g.: two-sum',
)
parser.add_argument(
'--force', action='store_true',
help='Overwrite the file if it already exists',
)
args = parser.parse_args()
return args.slug[0], args.force
def get_data(slug: str) -> Tuple[str, int, str, str, str]:
resp = requests.post(GRAPHQL_API_URL, json={
'query': QUERY,
'variables': {
'titleSlug': slug,
}
})
question = json.loads(resp.text)['data']['question']
difficulty = question['difficulty'].lower()
nr = question['questionId']
title = question['title']
title_slug = question['titleSlug']
url = f'{BASE_URL}{title_slug}/'
code = ''
for snippet in question['codeSnippets']:
if snippet['langSlug'] == 'python3':
code = snippet['code']
return difficulty, nr, title, url, code
def create_file(difficulty: str, nr: int, title: str, url: str, code: str, *, force: bool) -> None:
filename = f'{difficulty}/{nr}.py'
if exists(filename) and not force:
print(f'\n{filename} already exists! Use --force to overwrite.\n')
return
with open(filename, 'w') as f:
f.write('"""\n')
f.write(f'{nr}. {title}\n')
f.write('\n')
f.write(f'{url}\n')
f.write('"""\n')
f.write('\n\n')
f.write(code)
f.write('...\n')
f.write((
'\n\n'
'def main():\n'
' s = Solution()\n'
' print(s.xxx())\n'
'\n\n'
"if __name__ == '__main__':\n"
' raise(SystemExit(main()))'
))
def main() -> int:
slug, force = get_url()
difficulty, nr, title, url, code = get_data(slug)
create_file(difficulty, nr, title, url, code, force=force)
return 0
if __name__ == '__main__':
raise SystemExit(main())
|
<commit_before><commit_msg>Create program that fethes code from leetcode.<commit_after>#!/usr/bin/python3
import json
import requests
import argparse
from typing import Tuple
from os.path import exists
BASE_URL = 'https://leetcode.com/problems/'
GRAPHQL_API_URL = 'https://leetcode.com/graphql'
QUERY = '''query questionData($titleSlug: String!) {
question(titleSlug: $titleSlug) {
questionId
questionFrontendId
boundTopicId
title
titleSlug
content
translatedTitle
translatedContent
difficulty
exampleTestcases
codeSnippets {
lang
langSlug
code
}
}
}'''
def get_url() -> str:
parser = argparse.ArgumentParser(description='Grab leetcode problem')
parser.add_argument(
'slug', metavar='slug', type=str, nargs='+',
help='Slug of the leetcode problem e.g.: two-sum',
)
parser.add_argument(
'--force', action='store_true',
help='Overwrite the file if it already exists',
)
args = parser.parse_args()
return args.slug[0], args.force
def get_data(slug: str) -> Tuple[str, int, str, str, str]:
resp = requests.post(GRAPHQL_API_URL, json={
'query': QUERY,
'variables': {
'titleSlug': slug,
}
})
question = json.loads(resp.text)['data']['question']
difficulty = question['difficulty'].lower()
nr = question['questionId']
title = question['title']
title_slug = question['titleSlug']
url = f'{BASE_URL}{title_slug}/'
code = ''
for snippet in question['codeSnippets']:
if snippet['langSlug'] == 'python3':
code = snippet['code']
return difficulty, nr, title, url, code
def create_file(difficulty: str, nr: int, title: str, url: str, code: str, *, force: bool) -> None:
filename = f'{difficulty}/{nr}.py'
if exists(filename) and not force:
print(f'\n{filename} already exists! Use --force to overwrite.\n')
return
with open(filename, 'w') as f:
f.write('"""\n')
f.write(f'{nr}. {title}\n')
f.write('\n')
f.write(f'{url}\n')
f.write('"""\n')
f.write('\n\n')
f.write(code)
f.write('...\n')
f.write((
'\n\n'
'def main():\n'
' s = Solution()\n'
' print(s.xxx())\n'
'\n\n'
"if __name__ == '__main__':\n"
' raise(SystemExit(main()))'
))
def main() -> int:
slug, force = get_url()
difficulty, nr, title, url, code = get_data(slug)
create_file(difficulty, nr, title, url, code, force=force)
return 0
if __name__ == '__main__':
raise SystemExit(main())
|
|
afc5b02f2520382fc0ebb3370538ca2baeb04dd4
|
planetstack/core/models/__init__.py
|
planetstack/core/models/__init__.py
|
from .plcorebase import PlCoreBase
from .planetstack import PlanetStack
from .project import Project
from .singletonmodel import SingletonModel
from .service import Service
from .service import ServiceAttribute
from .tag import Tag
from .role import Role
from .site import Site,Deployment, DeploymentRole, DeploymentPrivilege, SiteDeployments
from .dashboard import DashboardView
from .user import User, UserDashboardView
from .serviceclass import ServiceClass
from .slice import Slice, SliceDeployments
from .site import SitePrivilege, SiteDeployments
from .userdeployments import UserDeployments
from .image import Image, ImageDeployments
from .node import Node
from .serviceresource import ServiceResource
from .slice import SliceRole
from .slice import SlicePrivilege
from .site import SiteRole
from .site import SitePrivilege
from .planetstack import PlanetStackRole
from .planetstack import PlanetStackPrivilege
from .slicetag import SliceTag
from .flavor import Flavor
from .sliver import Sliver
from .reservation import ReservedResource
from .reservation import Reservation
from .network import Network, NetworkParameterType, NetworkParameter, NetworkSliver, NetworkTemplate, Router, NetworkSlice, NetworkDeployments
from .billing import Account, Invoice, Charge, UsableObject, Payment
|
from .plcorebase import PlCoreBase
from .planetstack import PlanetStack
from .project import Project
from .singletonmodel import SingletonModel
from .service import Service
from .service import ServiceAttribute
from .tag import Tag
from .role import Role
from .site import Site,Deployment, DeploymentRole, DeploymentPrivilege, SiteDeployments
from .dashboard import DashboardView
from .user import User, UserDashboardView
from .serviceclass import ServiceClass
from .slice import Slice, SliceDeployments
from .site import SitePrivilege, SiteDeployments
from .userdeployments import UserDeployments
from .image import Image, ImageDeployments
from .node import Node
from .serviceresource import ServiceResource
from .slice import SliceRole
from .slice import SlicePrivilege
from .credential import UserCredential,SiteCredential,SliceCredential
from .site import SiteRole
from .site import SitePrivilege
from .planetstack import PlanetStackRole
from .planetstack import PlanetStackPrivilege
from .slicetag import SliceTag
from .flavor import Flavor
from .sliver import Sliver
from .reservation import ReservedResource
from .reservation import Reservation
from .network import Network, NetworkParameterType, NetworkParameter, NetworkSliver, NetworkTemplate, Router, NetworkSlice, NetworkDeployments
from .billing import Account, Invoice, Charge, UsableObject, Payment
|
Add credentials module to core list
|
Add credentials module to core list
|
Python
|
apache-2.0
|
cboling/xos,zdw/xos,open-cloud/xos,open-cloud/xos,open-cloud/xos,zdw/xos,cboling/xos,opencord/xos,opencord/xos,cboling/xos,zdw/xos,cboling/xos,zdw/xos,cboling/xos,opencord/xos
|
from .plcorebase import PlCoreBase
from .planetstack import PlanetStack
from .project import Project
from .singletonmodel import SingletonModel
from .service import Service
from .service import ServiceAttribute
from .tag import Tag
from .role import Role
from .site import Site,Deployment, DeploymentRole, DeploymentPrivilege, SiteDeployments
from .dashboard import DashboardView
from .user import User, UserDashboardView
from .serviceclass import ServiceClass
from .slice import Slice, SliceDeployments
from .site import SitePrivilege, SiteDeployments
from .userdeployments import UserDeployments
from .image import Image, ImageDeployments
from .node import Node
from .serviceresource import ServiceResource
from .slice import SliceRole
from .slice import SlicePrivilege
from .site import SiteRole
from .site import SitePrivilege
from .planetstack import PlanetStackRole
from .planetstack import PlanetStackPrivilege
from .slicetag import SliceTag
from .flavor import Flavor
from .sliver import Sliver
from .reservation import ReservedResource
from .reservation import Reservation
from .network import Network, NetworkParameterType, NetworkParameter, NetworkSliver, NetworkTemplate, Router, NetworkSlice, NetworkDeployments
from .billing import Account, Invoice, Charge, UsableObject, Payment
Add credentials module to core list
|
from .plcorebase import PlCoreBase
from .planetstack import PlanetStack
from .project import Project
from .singletonmodel import SingletonModel
from .service import Service
from .service import ServiceAttribute
from .tag import Tag
from .role import Role
from .site import Site,Deployment, DeploymentRole, DeploymentPrivilege, SiteDeployments
from .dashboard import DashboardView
from .user import User, UserDashboardView
from .serviceclass import ServiceClass
from .slice import Slice, SliceDeployments
from .site import SitePrivilege, SiteDeployments
from .userdeployments import UserDeployments
from .image import Image, ImageDeployments
from .node import Node
from .serviceresource import ServiceResource
from .slice import SliceRole
from .slice import SlicePrivilege
from .credential import UserCredential,SiteCredential,SliceCredential
from .site import SiteRole
from .site import SitePrivilege
from .planetstack import PlanetStackRole
from .planetstack import PlanetStackPrivilege
from .slicetag import SliceTag
from .flavor import Flavor
from .sliver import Sliver
from .reservation import ReservedResource
from .reservation import Reservation
from .network import Network, NetworkParameterType, NetworkParameter, NetworkSliver, NetworkTemplate, Router, NetworkSlice, NetworkDeployments
from .billing import Account, Invoice, Charge, UsableObject, Payment
|
<commit_before>from .plcorebase import PlCoreBase
from .planetstack import PlanetStack
from .project import Project
from .singletonmodel import SingletonModel
from .service import Service
from .service import ServiceAttribute
from .tag import Tag
from .role import Role
from .site import Site,Deployment, DeploymentRole, DeploymentPrivilege, SiteDeployments
from .dashboard import DashboardView
from .user import User, UserDashboardView
from .serviceclass import ServiceClass
from .slice import Slice, SliceDeployments
from .site import SitePrivilege, SiteDeployments
from .userdeployments import UserDeployments
from .image import Image, ImageDeployments
from .node import Node
from .serviceresource import ServiceResource
from .slice import SliceRole
from .slice import SlicePrivilege
from .site import SiteRole
from .site import SitePrivilege
from .planetstack import PlanetStackRole
from .planetstack import PlanetStackPrivilege
from .slicetag import SliceTag
from .flavor import Flavor
from .sliver import Sliver
from .reservation import ReservedResource
from .reservation import Reservation
from .network import Network, NetworkParameterType, NetworkParameter, NetworkSliver, NetworkTemplate, Router, NetworkSlice, NetworkDeployments
from .billing import Account, Invoice, Charge, UsableObject, Payment
<commit_msg>Add credentials module to core list<commit_after>
|
from .plcorebase import PlCoreBase
from .planetstack import PlanetStack
from .project import Project
from .singletonmodel import SingletonModel
from .service import Service
from .service import ServiceAttribute
from .tag import Tag
from .role import Role
from .site import Site,Deployment, DeploymentRole, DeploymentPrivilege, SiteDeployments
from .dashboard import DashboardView
from .user import User, UserDashboardView
from .serviceclass import ServiceClass
from .slice import Slice, SliceDeployments
from .site import SitePrivilege, SiteDeployments
from .userdeployments import UserDeployments
from .image import Image, ImageDeployments
from .node import Node
from .serviceresource import ServiceResource
from .slice import SliceRole
from .slice import SlicePrivilege
from .credential import UserCredential,SiteCredential,SliceCredential
from .site import SiteRole
from .site import SitePrivilege
from .planetstack import PlanetStackRole
from .planetstack import PlanetStackPrivilege
from .slicetag import SliceTag
from .flavor import Flavor
from .sliver import Sliver
from .reservation import ReservedResource
from .reservation import Reservation
from .network import Network, NetworkParameterType, NetworkParameter, NetworkSliver, NetworkTemplate, Router, NetworkSlice, NetworkDeployments
from .billing import Account, Invoice, Charge, UsableObject, Payment
|
from .plcorebase import PlCoreBase
from .planetstack import PlanetStack
from .project import Project
from .singletonmodel import SingletonModel
from .service import Service
from .service import ServiceAttribute
from .tag import Tag
from .role import Role
from .site import Site,Deployment, DeploymentRole, DeploymentPrivilege, SiteDeployments
from .dashboard import DashboardView
from .user import User, UserDashboardView
from .serviceclass import ServiceClass
from .slice import Slice, SliceDeployments
from .site import SitePrivilege, SiteDeployments
from .userdeployments import UserDeployments
from .image import Image, ImageDeployments
from .node import Node
from .serviceresource import ServiceResource
from .slice import SliceRole
from .slice import SlicePrivilege
from .site import SiteRole
from .site import SitePrivilege
from .planetstack import PlanetStackRole
from .planetstack import PlanetStackPrivilege
from .slicetag import SliceTag
from .flavor import Flavor
from .sliver import Sliver
from .reservation import ReservedResource
from .reservation import Reservation
from .network import Network, NetworkParameterType, NetworkParameter, NetworkSliver, NetworkTemplate, Router, NetworkSlice, NetworkDeployments
from .billing import Account, Invoice, Charge, UsableObject, Payment
Add credentials module to core listfrom .plcorebase import PlCoreBase
from .planetstack import PlanetStack
from .project import Project
from .singletonmodel import SingletonModel
from .service import Service
from .service import ServiceAttribute
from .tag import Tag
from .role import Role
from .site import Site,Deployment, DeploymentRole, DeploymentPrivilege, SiteDeployments
from .dashboard import DashboardView
from .user import User, UserDashboardView
from .serviceclass import ServiceClass
from .slice import Slice, SliceDeployments
from .site import SitePrivilege, SiteDeployments
from .userdeployments import UserDeployments
from .image import Image, ImageDeployments
from .node import Node
from .serviceresource import ServiceResource
from .slice import SliceRole
from .slice import SlicePrivilege
from .credential import UserCredential,SiteCredential,SliceCredential
from .site import SiteRole
from .site import SitePrivilege
from .planetstack import PlanetStackRole
from .planetstack import PlanetStackPrivilege
from .slicetag import SliceTag
from .flavor import Flavor
from .sliver import Sliver
from .reservation import ReservedResource
from .reservation import Reservation
from .network import Network, NetworkParameterType, NetworkParameter, NetworkSliver, NetworkTemplate, Router, NetworkSlice, NetworkDeployments
from .billing import Account, Invoice, Charge, UsableObject, Payment
|
<commit_before>from .plcorebase import PlCoreBase
from .planetstack import PlanetStack
from .project import Project
from .singletonmodel import SingletonModel
from .service import Service
from .service import ServiceAttribute
from .tag import Tag
from .role import Role
from .site import Site,Deployment, DeploymentRole, DeploymentPrivilege, SiteDeployments
from .dashboard import DashboardView
from .user import User, UserDashboardView
from .serviceclass import ServiceClass
from .slice import Slice, SliceDeployments
from .site import SitePrivilege, SiteDeployments
from .userdeployments import UserDeployments
from .image import Image, ImageDeployments
from .node import Node
from .serviceresource import ServiceResource
from .slice import SliceRole
from .slice import SlicePrivilege
from .site import SiteRole
from .site import SitePrivilege
from .planetstack import PlanetStackRole
from .planetstack import PlanetStackPrivilege
from .slicetag import SliceTag
from .flavor import Flavor
from .sliver import Sliver
from .reservation import ReservedResource
from .reservation import Reservation
from .network import Network, NetworkParameterType, NetworkParameter, NetworkSliver, NetworkTemplate, Router, NetworkSlice, NetworkDeployments
from .billing import Account, Invoice, Charge, UsableObject, Payment
<commit_msg>Add credentials module to core list<commit_after>from .plcorebase import PlCoreBase
from .planetstack import PlanetStack
from .project import Project
from .singletonmodel import SingletonModel
from .service import Service
from .service import ServiceAttribute
from .tag import Tag
from .role import Role
from .site import Site,Deployment, DeploymentRole, DeploymentPrivilege, SiteDeployments
from .dashboard import DashboardView
from .user import User, UserDashboardView
from .serviceclass import ServiceClass
from .slice import Slice, SliceDeployments
from .site import SitePrivilege, SiteDeployments
from .userdeployments import UserDeployments
from .image import Image, ImageDeployments
from .node import Node
from .serviceresource import ServiceResource
from .slice import SliceRole
from .slice import SlicePrivilege
from .credential import UserCredential,SiteCredential,SliceCredential
from .site import SiteRole
from .site import SitePrivilege
from .planetstack import PlanetStackRole
from .planetstack import PlanetStackPrivilege
from .slicetag import SliceTag
from .flavor import Flavor
from .sliver import Sliver
from .reservation import ReservedResource
from .reservation import Reservation
from .network import Network, NetworkParameterType, NetworkParameter, NetworkSliver, NetworkTemplate, Router, NetworkSlice, NetworkDeployments
from .billing import Account, Invoice, Charge, UsableObject, Payment
|
9f91a62afb313684215987a54ddb043bdbc46fde
|
mots_vides/constants.py
|
mots_vides/constants.py
|
"""
Constants for mots-vides
"""
import os
DATA_DIRECTORY = os.path.join(
os.path.dirname(
os.path.abspath(__file__)),
'datas/'
)
|
Create constant module with DATA_DIRECTORY
|
Create constant module with DATA_DIRECTORY
|
Python
|
bsd-3-clause
|
Fantomas42/mots-vides,Fantomas42/mots-vides
|
Create constant module with DATA_DIRECTORY
|
"""
Constants for mots-vides
"""
import os
DATA_DIRECTORY = os.path.join(
os.path.dirname(
os.path.abspath(__file__)),
'datas/'
)
|
<commit_before><commit_msg>Create constant module with DATA_DIRECTORY<commit_after>
|
"""
Constants for mots-vides
"""
import os
DATA_DIRECTORY = os.path.join(
os.path.dirname(
os.path.abspath(__file__)),
'datas/'
)
|
Create constant module with DATA_DIRECTORY"""
Constants for mots-vides
"""
import os
DATA_DIRECTORY = os.path.join(
os.path.dirname(
os.path.abspath(__file__)),
'datas/'
)
|
<commit_before><commit_msg>Create constant module with DATA_DIRECTORY<commit_after>"""
Constants for mots-vides
"""
import os
DATA_DIRECTORY = os.path.join(
os.path.dirname(
os.path.abspath(__file__)),
'datas/'
)
|
|
2a59a06880e8e382aee3452be19a9ac2b193df8e
|
tests/api.py
|
tests/api.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
try:
from unittest import mock
except ImportError:
import mock
from requests.structures import CaseInsensitiveDict
from gyazo.api import Api
from gyazo.error import GyazoError
class TestApi(unittest.TestCase):
def setUp(self):
self.api = Api()
def test_parse_and_check_success(self):
mock_response = mock.MagicMock()
headers = CaseInsensitiveDict()
headers["X-Runtime"] = "0.008495"
mock_response.headers = headers
data = {
"height": 320,
"provider_name": "Gyazo",
"provider_url": "https://gyazo.com",
"type": "photo",
"url": "https://bot.gyazo.com/e72675b15a56b1.png",
"version": "1.0",
"width": 640
}
mock_response.json.return_value = data
mock_response.status_code = 200
actual_headers, actual_data = self.api._parse_and_check(mock_response)
self.assertEqual(actual_headers, headers)
self.assertDictEqual(actual_data, data)
def test_parse_and_check_error_exception(self):
mock_response = mock.MagicMock()
headers = CaseInsensitiveDict()
headers["X-Runtime"] = "0.008495"
mock_response.headers = headers
mock_response.json.side_effect = ValueError
mock_response.status_code = 200
with self.assertRaises(ValueError):
self.api._parse_and_check(mock_response)
def test_parse_and_check_error_status_code(self):
mock_response = mock.MagicMock()
headers = CaseInsensitiveDict()
headers["X-Runtime"] = "0.008495"
mock_response.headers = headers
data = {
"message": "image not found."
}
mock_response.json.return_value = data
mock_response.status_code = 404
with self.assertRaises(GyazoError):
self.api._parse_and_check(mock_response)
def test_parse_and_check_error_status_code_without_message(self):
mock_response = mock.MagicMock()
headers = CaseInsensitiveDict()
headers["X-Runtime"] = "0.008495"
mock_response.headers = headers
data = {}
mock_response.json.return_value = data
mock_response.status_code = 404
with self.assertRaises(GyazoError):
self.api._parse_and_check(mock_response)
|
Add more unit test cases
|
Add more unit test cases
|
Python
|
mit
|
ymyzk/python-gyazo
|
Add more unit test cases
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
try:
from unittest import mock
except ImportError:
import mock
from requests.structures import CaseInsensitiveDict
from gyazo.api import Api
from gyazo.error import GyazoError
class TestApi(unittest.TestCase):
def setUp(self):
self.api = Api()
def test_parse_and_check_success(self):
mock_response = mock.MagicMock()
headers = CaseInsensitiveDict()
headers["X-Runtime"] = "0.008495"
mock_response.headers = headers
data = {
"height": 320,
"provider_name": "Gyazo",
"provider_url": "https://gyazo.com",
"type": "photo",
"url": "https://bot.gyazo.com/e72675b15a56b1.png",
"version": "1.0",
"width": 640
}
mock_response.json.return_value = data
mock_response.status_code = 200
actual_headers, actual_data = self.api._parse_and_check(mock_response)
self.assertEqual(actual_headers, headers)
self.assertDictEqual(actual_data, data)
def test_parse_and_check_error_exception(self):
mock_response = mock.MagicMock()
headers = CaseInsensitiveDict()
headers["X-Runtime"] = "0.008495"
mock_response.headers = headers
mock_response.json.side_effect = ValueError
mock_response.status_code = 200
with self.assertRaises(ValueError):
self.api._parse_and_check(mock_response)
def test_parse_and_check_error_status_code(self):
mock_response = mock.MagicMock()
headers = CaseInsensitiveDict()
headers["X-Runtime"] = "0.008495"
mock_response.headers = headers
data = {
"message": "image not found."
}
mock_response.json.return_value = data
mock_response.status_code = 404
with self.assertRaises(GyazoError):
self.api._parse_and_check(mock_response)
def test_parse_and_check_error_status_code_without_message(self):
mock_response = mock.MagicMock()
headers = CaseInsensitiveDict()
headers["X-Runtime"] = "0.008495"
mock_response.headers = headers
data = {}
mock_response.json.return_value = data
mock_response.status_code = 404
with self.assertRaises(GyazoError):
self.api._parse_and_check(mock_response)
|
<commit_before><commit_msg>Add more unit test cases<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
try:
from unittest import mock
except ImportError:
import mock
from requests.structures import CaseInsensitiveDict
from gyazo.api import Api
from gyazo.error import GyazoError
class TestApi(unittest.TestCase):
def setUp(self):
self.api = Api()
def test_parse_and_check_success(self):
mock_response = mock.MagicMock()
headers = CaseInsensitiveDict()
headers["X-Runtime"] = "0.008495"
mock_response.headers = headers
data = {
"height": 320,
"provider_name": "Gyazo",
"provider_url": "https://gyazo.com",
"type": "photo",
"url": "https://bot.gyazo.com/e72675b15a56b1.png",
"version": "1.0",
"width": 640
}
mock_response.json.return_value = data
mock_response.status_code = 200
actual_headers, actual_data = self.api._parse_and_check(mock_response)
self.assertEqual(actual_headers, headers)
self.assertDictEqual(actual_data, data)
def test_parse_and_check_error_exception(self):
mock_response = mock.MagicMock()
headers = CaseInsensitiveDict()
headers["X-Runtime"] = "0.008495"
mock_response.headers = headers
mock_response.json.side_effect = ValueError
mock_response.status_code = 200
with self.assertRaises(ValueError):
self.api._parse_and_check(mock_response)
def test_parse_and_check_error_status_code(self):
mock_response = mock.MagicMock()
headers = CaseInsensitiveDict()
headers["X-Runtime"] = "0.008495"
mock_response.headers = headers
data = {
"message": "image not found."
}
mock_response.json.return_value = data
mock_response.status_code = 404
with self.assertRaises(GyazoError):
self.api._parse_and_check(mock_response)
def test_parse_and_check_error_status_code_without_message(self):
mock_response = mock.MagicMock()
headers = CaseInsensitiveDict()
headers["X-Runtime"] = "0.008495"
mock_response.headers = headers
data = {}
mock_response.json.return_value = data
mock_response.status_code = 404
with self.assertRaises(GyazoError):
self.api._parse_and_check(mock_response)
|
Add more unit test cases# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
try:
from unittest import mock
except ImportError:
import mock
from requests.structures import CaseInsensitiveDict
from gyazo.api import Api
from gyazo.error import GyazoError
class TestApi(unittest.TestCase):
def setUp(self):
self.api = Api()
def test_parse_and_check_success(self):
mock_response = mock.MagicMock()
headers = CaseInsensitiveDict()
headers["X-Runtime"] = "0.008495"
mock_response.headers = headers
data = {
"height": 320,
"provider_name": "Gyazo",
"provider_url": "https://gyazo.com",
"type": "photo",
"url": "https://bot.gyazo.com/e72675b15a56b1.png",
"version": "1.0",
"width": 640
}
mock_response.json.return_value = data
mock_response.status_code = 200
actual_headers, actual_data = self.api._parse_and_check(mock_response)
self.assertEqual(actual_headers, headers)
self.assertDictEqual(actual_data, data)
def test_parse_and_check_error_exception(self):
mock_response = mock.MagicMock()
headers = CaseInsensitiveDict()
headers["X-Runtime"] = "0.008495"
mock_response.headers = headers
mock_response.json.side_effect = ValueError
mock_response.status_code = 200
with self.assertRaises(ValueError):
self.api._parse_and_check(mock_response)
def test_parse_and_check_error_status_code(self):
mock_response = mock.MagicMock()
headers = CaseInsensitiveDict()
headers["X-Runtime"] = "0.008495"
mock_response.headers = headers
data = {
"message": "image not found."
}
mock_response.json.return_value = data
mock_response.status_code = 404
with self.assertRaises(GyazoError):
self.api._parse_and_check(mock_response)
def test_parse_and_check_error_status_code_without_message(self):
mock_response = mock.MagicMock()
headers = CaseInsensitiveDict()
headers["X-Runtime"] = "0.008495"
mock_response.headers = headers
data = {}
mock_response.json.return_value = data
mock_response.status_code = 404
with self.assertRaises(GyazoError):
self.api._parse_and_check(mock_response)
|
<commit_before><commit_msg>Add more unit test cases<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
import unittest
try:
from unittest import mock
except ImportError:
import mock
from requests.structures import CaseInsensitiveDict
from gyazo.api import Api
from gyazo.error import GyazoError
class TestApi(unittest.TestCase):
def setUp(self):
self.api = Api()
def test_parse_and_check_success(self):
mock_response = mock.MagicMock()
headers = CaseInsensitiveDict()
headers["X-Runtime"] = "0.008495"
mock_response.headers = headers
data = {
"height": 320,
"provider_name": "Gyazo",
"provider_url": "https://gyazo.com",
"type": "photo",
"url": "https://bot.gyazo.com/e72675b15a56b1.png",
"version": "1.0",
"width": 640
}
mock_response.json.return_value = data
mock_response.status_code = 200
actual_headers, actual_data = self.api._parse_and_check(mock_response)
self.assertEqual(actual_headers, headers)
self.assertDictEqual(actual_data, data)
def test_parse_and_check_error_exception(self):
mock_response = mock.MagicMock()
headers = CaseInsensitiveDict()
headers["X-Runtime"] = "0.008495"
mock_response.headers = headers
mock_response.json.side_effect = ValueError
mock_response.status_code = 200
with self.assertRaises(ValueError):
self.api._parse_and_check(mock_response)
def test_parse_and_check_error_status_code(self):
mock_response = mock.MagicMock()
headers = CaseInsensitiveDict()
headers["X-Runtime"] = "0.008495"
mock_response.headers = headers
data = {
"message": "image not found."
}
mock_response.json.return_value = data
mock_response.status_code = 404
with self.assertRaises(GyazoError):
self.api._parse_and_check(mock_response)
def test_parse_and_check_error_status_code_without_message(self):
mock_response = mock.MagicMock()
headers = CaseInsensitiveDict()
headers["X-Runtime"] = "0.008495"
mock_response.headers = headers
data = {}
mock_response.json.return_value = data
mock_response.status_code = 404
with self.assertRaises(GyazoError):
self.api._parse_and_check(mock_response)
|
|
e50490d9bcce4604a4b30212611f1550da2604e1
|
elpiwear/pycon.py
|
elpiwear/pycon.py
|
import time
import Edison.i2c as I2C
import Edison.gpio as GPIO
import Edison.spi as SPI
import ads1015
import sharp2y0a21
import screen
import ILI9341 as TFT
import watchout_screen
import proximity_warning
import twitter_screen
import tag_screen
import gplus_screen
class pycon():
def __init__(self):
self.init_display()
self.init_screens()
while 1:
self.screens[self.current_screen].update()
self.disp.display(self.screens[self.current_screen].img)
time.sleep(60*1)
self.current_screen = (self.current_screen + 1) % len(self.screens)
def init_display(self):
dc = GPIO.gpio(4, GPIO.OUT)
rst = GPIO.gpio(13, GPIO.OUT)
self.disp = TFT.ILI9341(dc, rst=rst, spi=SPI.spi(5, 0, speed=5000000))
self.disp.begin()
def init_screens(self):
self.tag = tag_screen.tag_screen()
self.watchout = watchout_screen.watchout_screen()
self.twitter = twitter_screen.twitter_screen()
self.gplus = gplus_screen.gplus_screen()
self.screens = [self.tag, self.watchout, self.twitter, self.gplus]
self.current_screen = 0
if __name__ == "__main__":
main = pycon()
|
Add the basic main program for the Pycon
|
Add the basic main program for the Pycon
|
Python
|
mit
|
fjacob21/pycon2015
|
Add the basic main program for the Pycon
|
import time
import Edison.i2c as I2C
import Edison.gpio as GPIO
import Edison.spi as SPI
import ads1015
import sharp2y0a21
import screen
import ILI9341 as TFT
import watchout_screen
import proximity_warning
import twitter_screen
import tag_screen
import gplus_screen
class pycon():
def __init__(self):
self.init_display()
self.init_screens()
while 1:
self.screens[self.current_screen].update()
self.disp.display(self.screens[self.current_screen].img)
time.sleep(60*1)
self.current_screen = (self.current_screen + 1) % len(self.screens)
def init_display(self):
dc = GPIO.gpio(4, GPIO.OUT)
rst = GPIO.gpio(13, GPIO.OUT)
self.disp = TFT.ILI9341(dc, rst=rst, spi=SPI.spi(5, 0, speed=5000000))
self.disp.begin()
def init_screens(self):
self.tag = tag_screen.tag_screen()
self.watchout = watchout_screen.watchout_screen()
self.twitter = twitter_screen.twitter_screen()
self.gplus = gplus_screen.gplus_screen()
self.screens = [self.tag, self.watchout, self.twitter, self.gplus]
self.current_screen = 0
if __name__ == "__main__":
main = pycon()
|
<commit_before><commit_msg>Add the basic main program for the Pycon<commit_after>
|
import time
import Edison.i2c as I2C
import Edison.gpio as GPIO
import Edison.spi as SPI
import ads1015
import sharp2y0a21
import screen
import ILI9341 as TFT
import watchout_screen
import proximity_warning
import twitter_screen
import tag_screen
import gplus_screen
class pycon():
def __init__(self):
self.init_display()
self.init_screens()
while 1:
self.screens[self.current_screen].update()
self.disp.display(self.screens[self.current_screen].img)
time.sleep(60*1)
self.current_screen = (self.current_screen + 1) % len(self.screens)
def init_display(self):
dc = GPIO.gpio(4, GPIO.OUT)
rst = GPIO.gpio(13, GPIO.OUT)
self.disp = TFT.ILI9341(dc, rst=rst, spi=SPI.spi(5, 0, speed=5000000))
self.disp.begin()
def init_screens(self):
self.tag = tag_screen.tag_screen()
self.watchout = watchout_screen.watchout_screen()
self.twitter = twitter_screen.twitter_screen()
self.gplus = gplus_screen.gplus_screen()
self.screens = [self.tag, self.watchout, self.twitter, self.gplus]
self.current_screen = 0
if __name__ == "__main__":
main = pycon()
|
Add the basic main program for the Pyconimport time
import Edison.i2c as I2C
import Edison.gpio as GPIO
import Edison.spi as SPI
import ads1015
import sharp2y0a21
import screen
import ILI9341 as TFT
import watchout_screen
import proximity_warning
import twitter_screen
import tag_screen
import gplus_screen
class pycon():
def __init__(self):
self.init_display()
self.init_screens()
while 1:
self.screens[self.current_screen].update()
self.disp.display(self.screens[self.current_screen].img)
time.sleep(60*1)
self.current_screen = (self.current_screen + 1) % len(self.screens)
def init_display(self):
dc = GPIO.gpio(4, GPIO.OUT)
rst = GPIO.gpio(13, GPIO.OUT)
self.disp = TFT.ILI9341(dc, rst=rst, spi=SPI.spi(5, 0, speed=5000000))
self.disp.begin()
def init_screens(self):
self.tag = tag_screen.tag_screen()
self.watchout = watchout_screen.watchout_screen()
self.twitter = twitter_screen.twitter_screen()
self.gplus = gplus_screen.gplus_screen()
self.screens = [self.tag, self.watchout, self.twitter, self.gplus]
self.current_screen = 0
if __name__ == "__main__":
main = pycon()
|
<commit_before><commit_msg>Add the basic main program for the Pycon<commit_after>import time
import Edison.i2c as I2C
import Edison.gpio as GPIO
import Edison.spi as SPI
import ads1015
import sharp2y0a21
import screen
import ILI9341 as TFT
import watchout_screen
import proximity_warning
import twitter_screen
import tag_screen
import gplus_screen
class pycon():
def __init__(self):
self.init_display()
self.init_screens()
while 1:
self.screens[self.current_screen].update()
self.disp.display(self.screens[self.current_screen].img)
time.sleep(60*1)
self.current_screen = (self.current_screen + 1) % len(self.screens)
def init_display(self):
dc = GPIO.gpio(4, GPIO.OUT)
rst = GPIO.gpio(13, GPIO.OUT)
self.disp = TFT.ILI9341(dc, rst=rst, spi=SPI.spi(5, 0, speed=5000000))
self.disp.begin()
def init_screens(self):
self.tag = tag_screen.tag_screen()
self.watchout = watchout_screen.watchout_screen()
self.twitter = twitter_screen.twitter_screen()
self.gplus = gplus_screen.gplus_screen()
self.screens = [self.tag, self.watchout, self.twitter, self.gplus]
self.current_screen = 0
if __name__ == "__main__":
main = pycon()
|
|
9af2c07a21e426d52abccb6cc92afcd8cef9e340
|
glowing-lines2.py
|
glowing-lines2.py
|
from PIL import Image, ImageDraw
import random
W = 500
im = Image.new('RGB', (W, W))
NCOLORS = 19
NLINES = 15
def make_line_mask(im):
mask = Image.new('L', im.size, color=0)
grays = []
v = 255.0
for i in range(NCOLORS):
grays.append(int(v))
v *= 0.91
grays.reverse()
draw=ImageDraw.Draw(mask)
y = im.size[1]/2
for i in range(NCOLORS):
draw.line((0,y, im.size[0], y), fill=grays[i], width=NCOLORS-i)
mask.save('mask.png')
return mask
def make_master_line():
'''Make an image with alpha to be pasted for all lines'''
im = Image.new('RGB', (W, W), color=(0, 255, 0))
mask = make_line_mask(im)
im.putalpha(mask)
im.save('mline.png')
return im
def add_line(im0, im1):
x = random.randint(-W/2, W/2)
y = random.randint(-W/2, W/2)
r1 = im1.rotate(random.randint(5, 145))
im0.paste(r1, (x, y), r1)
def make_image():
im = Image.new('RGB', (W, W), color=(0, 0, 0))
ml = make_master_line()
for i in range(NLINES):
add_line(im, ml)
im.save('f.png')
make_image()
|
Add better glowing line script; uses alpha to create the line out of solid green; handles intersections well
|
Add better glowing line script; uses alpha to create the line out of solid green; handles intersections well
|
Python
|
mit
|
redpig2/pilhacks
|
Add better glowing line script; uses alpha to create the line out of solid green; handles intersections well
|
from PIL import Image, ImageDraw
import random
W = 500
im = Image.new('RGB', (W, W))
NCOLORS = 19
NLINES = 15
def make_line_mask(im):
mask = Image.new('L', im.size, color=0)
grays = []
v = 255.0
for i in range(NCOLORS):
grays.append(int(v))
v *= 0.91
grays.reverse()
draw=ImageDraw.Draw(mask)
y = im.size[1]/2
for i in range(NCOLORS):
draw.line((0,y, im.size[0], y), fill=grays[i], width=NCOLORS-i)
mask.save('mask.png')
return mask
def make_master_line():
'''Make an image with alpha to be pasted for all lines'''
im = Image.new('RGB', (W, W), color=(0, 255, 0))
mask = make_line_mask(im)
im.putalpha(mask)
im.save('mline.png')
return im
def add_line(im0, im1):
x = random.randint(-W/2, W/2)
y = random.randint(-W/2, W/2)
r1 = im1.rotate(random.randint(5, 145))
im0.paste(r1, (x, y), r1)
def make_image():
im = Image.new('RGB', (W, W), color=(0, 0, 0))
ml = make_master_line()
for i in range(NLINES):
add_line(im, ml)
im.save('f.png')
make_image()
|
<commit_before><commit_msg>Add better glowing line script; uses alpha to create the line out of solid green; handles intersections well<commit_after>
|
from PIL import Image, ImageDraw
import random
W = 500
im = Image.new('RGB', (W, W))
NCOLORS = 19
NLINES = 15
def make_line_mask(im):
mask = Image.new('L', im.size, color=0)
grays = []
v = 255.0
for i in range(NCOLORS):
grays.append(int(v))
v *= 0.91
grays.reverse()
draw=ImageDraw.Draw(mask)
y = im.size[1]/2
for i in range(NCOLORS):
draw.line((0,y, im.size[0], y), fill=grays[i], width=NCOLORS-i)
mask.save('mask.png')
return mask
def make_master_line():
'''Make an image with alpha to be pasted for all lines'''
im = Image.new('RGB', (W, W), color=(0, 255, 0))
mask = make_line_mask(im)
im.putalpha(mask)
im.save('mline.png')
return im
def add_line(im0, im1):
x = random.randint(-W/2, W/2)
y = random.randint(-W/2, W/2)
r1 = im1.rotate(random.randint(5, 145))
im0.paste(r1, (x, y), r1)
def make_image():
im = Image.new('RGB', (W, W), color=(0, 0, 0))
ml = make_master_line()
for i in range(NLINES):
add_line(im, ml)
im.save('f.png')
make_image()
|
Add better glowing line script; uses alpha to create the line out of solid green; handles intersections wellfrom PIL import Image, ImageDraw
import random
W = 500
im = Image.new('RGB', (W, W))
NCOLORS = 19
NLINES = 15
def make_line_mask(im):
mask = Image.new('L', im.size, color=0)
grays = []
v = 255.0
for i in range(NCOLORS):
grays.append(int(v))
v *= 0.91
grays.reverse()
draw=ImageDraw.Draw(mask)
y = im.size[1]/2
for i in range(NCOLORS):
draw.line((0,y, im.size[0], y), fill=grays[i], width=NCOLORS-i)
mask.save('mask.png')
return mask
def make_master_line():
'''Make an image with alpha to be pasted for all lines'''
im = Image.new('RGB', (W, W), color=(0, 255, 0))
mask = make_line_mask(im)
im.putalpha(mask)
im.save('mline.png')
return im
def add_line(im0, im1):
x = random.randint(-W/2, W/2)
y = random.randint(-W/2, W/2)
r1 = im1.rotate(random.randint(5, 145))
im0.paste(r1, (x, y), r1)
def make_image():
im = Image.new('RGB', (W, W), color=(0, 0, 0))
ml = make_master_line()
for i in range(NLINES):
add_line(im, ml)
im.save('f.png')
make_image()
|
<commit_before><commit_msg>Add better glowing line script; uses alpha to create the line out of solid green; handles intersections well<commit_after>from PIL import Image, ImageDraw
import random
W = 500
im = Image.new('RGB', (W, W))
NCOLORS = 19
NLINES = 15
def make_line_mask(im):
mask = Image.new('L', im.size, color=0)
grays = []
v = 255.0
for i in range(NCOLORS):
grays.append(int(v))
v *= 0.91
grays.reverse()
draw=ImageDraw.Draw(mask)
y = im.size[1]/2
for i in range(NCOLORS):
draw.line((0,y, im.size[0], y), fill=grays[i], width=NCOLORS-i)
mask.save('mask.png')
return mask
def make_master_line():
'''Make an image with alpha to be pasted for all lines'''
im = Image.new('RGB', (W, W), color=(0, 255, 0))
mask = make_line_mask(im)
im.putalpha(mask)
im.save('mline.png')
return im
def add_line(im0, im1):
x = random.randint(-W/2, W/2)
y = random.randint(-W/2, W/2)
r1 = im1.rotate(random.randint(5, 145))
im0.paste(r1, (x, y), r1)
def make_image():
im = Image.new('RGB', (W, W), color=(0, 0, 0))
ml = make_master_line()
for i in range(NLINES):
add_line(im, ml)
im.save('f.png')
make_image()
|
|
2d617ac87df76a6191f393e77c7f2330948cb0cc
|
tests/test_twitter.py
|
tests/test_twitter.py
|
import os
import sys
import unittest
sys.path.insert(0, os.path.abspath('./situation'))
sys.path.insert(0, os.path.abspath('./'))
from situation import settings, tweeter, tweepy
from tweepy import TweepError
from google.appengine.ext import testbed
class TestTwitter(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
self.testbed.init_logservice_stub()
def tearDown(self):
self.testbed.deactivate()
# Asset that fetching timeline tweets from twitter does not throw an exception
def test_get_tweets(self):
t = tweeter.TwitterStatusProcessor(**settings.TWITTER['AUTH'])
tweets = []
try:
tweets = t._get_tweets(None)
except TweepError, e:
pass
self.assertGreater(len(tweets), 0)
|
Test fetching tweets from twitter works.
|
Test fetching tweets from twitter works.
|
Python
|
mit
|
chriskuehl/kloudless-status,chriskuehl/kloudless-status,balanced/status.balancedpayments.com,chriskuehl/kloudless-status,balanced/status.balancedpayments.com,balanced/status.balancedpayments.com
|
Test fetching tweets from twitter works.
|
import os
import sys
import unittest
sys.path.insert(0, os.path.abspath('./situation'))
sys.path.insert(0, os.path.abspath('./'))
from situation import settings, tweeter, tweepy
from tweepy import TweepError
from google.appengine.ext import testbed
class TestTwitter(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
self.testbed.init_logservice_stub()
def tearDown(self):
self.testbed.deactivate()
# Asset that fetching timeline tweets from twitter does not throw an exception
def test_get_tweets(self):
t = tweeter.TwitterStatusProcessor(**settings.TWITTER['AUTH'])
tweets = []
try:
tweets = t._get_tweets(None)
except TweepError, e:
pass
self.assertGreater(len(tweets), 0)
|
<commit_before><commit_msg>Test fetching tweets from twitter works.<commit_after>
|
import os
import sys
import unittest
sys.path.insert(0, os.path.abspath('./situation'))
sys.path.insert(0, os.path.abspath('./'))
from situation import settings, tweeter, tweepy
from tweepy import TweepError
from google.appengine.ext import testbed
class TestTwitter(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
self.testbed.init_logservice_stub()
def tearDown(self):
self.testbed.deactivate()
# Asset that fetching timeline tweets from twitter does not throw an exception
def test_get_tweets(self):
t = tweeter.TwitterStatusProcessor(**settings.TWITTER['AUTH'])
tweets = []
try:
tweets = t._get_tweets(None)
except TweepError, e:
pass
self.assertGreater(len(tweets), 0)
|
Test fetching tweets from twitter works.import os
import sys
import unittest
sys.path.insert(0, os.path.abspath('./situation'))
sys.path.insert(0, os.path.abspath('./'))
from situation import settings, tweeter, tweepy
from tweepy import TweepError
from google.appengine.ext import testbed
class TestTwitter(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
self.testbed.init_logservice_stub()
def tearDown(self):
self.testbed.deactivate()
# Asset that fetching timeline tweets from twitter does not throw an exception
def test_get_tweets(self):
t = tweeter.TwitterStatusProcessor(**settings.TWITTER['AUTH'])
tweets = []
try:
tweets = t._get_tweets(None)
except TweepError, e:
pass
self.assertGreater(len(tweets), 0)
|
<commit_before><commit_msg>Test fetching tweets from twitter works.<commit_after>import os
import sys
import unittest
sys.path.insert(0, os.path.abspath('./situation'))
sys.path.insert(0, os.path.abspath('./'))
from situation import settings, tweeter, tweepy
from tweepy import TweepError
from google.appengine.ext import testbed
class TestTwitter(unittest.TestCase):
def setUp(self):
# First, create an instance of the Testbed class.
self.testbed = testbed.Testbed()
# Then activate the testbed, which prepares the service stubs for use.
self.testbed.activate()
# Next, declare which service stubs you want to use.
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
self.testbed.init_logservice_stub()
def tearDown(self):
self.testbed.deactivate()
# Asset that fetching timeline tweets from twitter does not throw an exception
def test_get_tweets(self):
t = tweeter.TwitterStatusProcessor(**settings.TWITTER['AUTH'])
tweets = []
try:
tweets = t._get_tweets(None)
except TweepError, e:
pass
self.assertGreater(len(tweets), 0)
|
|
f2a049f918c9753e88699e0cb1574e7b10c0cb82
|
tests/test_unicode.py
|
tests/test_unicode.py
|
# -*- coding: utf-8 -*-
from dynmen import Menu, MenuResult
from functools import partial
import unittest
class TestFirstItem(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.head = Menu(('head', '-n1'))
def assertMenuResultEqual(self, menu_result, selected, value=None, returncode=0):
self.assertIsInstance(menu_result, MenuResult)
self.assertEqual(menu_result.selected, selected)
self.assertEqual(menu_result.value, value)
self.assertEqual(menu_result.returncode, returncode)
def test_ascii(self):
res = self.head(['a', 'b', 'c'])
self.assertMenuResultEqual(res, 'a')
def test_unicode(self):
res = self.head([u'ä', u'ü', u'π'])
self.assertMenuResultEqual(res, u'ä')
def test_notexplicit(self):
res = self.head(['π', 'ü', 'ä'])
self.assertMenuResultEqual(res, 'π')
def test_dict_notexplicit(self):
d = {
'a': 'äää',
}
res = self.head(d)
self.assertMenuResultEqual(res, 'a', 'äää')
def test_dict_unicode_val(self):
d = {
'a': u'äää',
}
res = self.head(d)
self.assertMenuResultEqual(res, 'a', u'äää')
|
Add some unicode tests for Menu()
|
Add some unicode tests for Menu()
|
Python
|
mit
|
frostidaho/dynmen
|
Add some unicode tests for Menu()
|
# -*- coding: utf-8 -*-
from dynmen import Menu, MenuResult
from functools import partial
import unittest
class TestFirstItem(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.head = Menu(('head', '-n1'))
def assertMenuResultEqual(self, menu_result, selected, value=None, returncode=0):
self.assertIsInstance(menu_result, MenuResult)
self.assertEqual(menu_result.selected, selected)
self.assertEqual(menu_result.value, value)
self.assertEqual(menu_result.returncode, returncode)
def test_ascii(self):
res = self.head(['a', 'b', 'c'])
self.assertMenuResultEqual(res, 'a')
def test_unicode(self):
res = self.head([u'ä', u'ü', u'π'])
self.assertMenuResultEqual(res, u'ä')
def test_notexplicit(self):
res = self.head(['π', 'ü', 'ä'])
self.assertMenuResultEqual(res, 'π')
def test_dict_notexplicit(self):
d = {
'a': 'äää',
}
res = self.head(d)
self.assertMenuResultEqual(res, 'a', 'äää')
def test_dict_unicode_val(self):
d = {
'a': u'äää',
}
res = self.head(d)
self.assertMenuResultEqual(res, 'a', u'äää')
|
<commit_before><commit_msg>Add some unicode tests for Menu()<commit_after>
|
# -*- coding: utf-8 -*-
from dynmen import Menu, MenuResult
from functools import partial
import unittest
class TestFirstItem(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.head = Menu(('head', '-n1'))
def assertMenuResultEqual(self, menu_result, selected, value=None, returncode=0):
self.assertIsInstance(menu_result, MenuResult)
self.assertEqual(menu_result.selected, selected)
self.assertEqual(menu_result.value, value)
self.assertEqual(menu_result.returncode, returncode)
def test_ascii(self):
res = self.head(['a', 'b', 'c'])
self.assertMenuResultEqual(res, 'a')
def test_unicode(self):
res = self.head([u'ä', u'ü', u'π'])
self.assertMenuResultEqual(res, u'ä')
def test_notexplicit(self):
res = self.head(['π', 'ü', 'ä'])
self.assertMenuResultEqual(res, 'π')
def test_dict_notexplicit(self):
d = {
'a': 'äää',
}
res = self.head(d)
self.assertMenuResultEqual(res, 'a', 'äää')
def test_dict_unicode_val(self):
d = {
'a': u'äää',
}
res = self.head(d)
self.assertMenuResultEqual(res, 'a', u'äää')
|
Add some unicode tests for Menu()# -*- coding: utf-8 -*-
from dynmen import Menu, MenuResult
from functools import partial
import unittest
class TestFirstItem(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.head = Menu(('head', '-n1'))
def assertMenuResultEqual(self, menu_result, selected, value=None, returncode=0):
self.assertIsInstance(menu_result, MenuResult)
self.assertEqual(menu_result.selected, selected)
self.assertEqual(menu_result.value, value)
self.assertEqual(menu_result.returncode, returncode)
def test_ascii(self):
res = self.head(['a', 'b', 'c'])
self.assertMenuResultEqual(res, 'a')
def test_unicode(self):
res = self.head([u'ä', u'ü', u'π'])
self.assertMenuResultEqual(res, u'ä')
def test_notexplicit(self):
res = self.head(['π', 'ü', 'ä'])
self.assertMenuResultEqual(res, 'π')
def test_dict_notexplicit(self):
d = {
'a': 'äää',
}
res = self.head(d)
self.assertMenuResultEqual(res, 'a', 'äää')
def test_dict_unicode_val(self):
d = {
'a': u'äää',
}
res = self.head(d)
self.assertMenuResultEqual(res, 'a', u'äää')
|
<commit_before><commit_msg>Add some unicode tests for Menu()<commit_after># -*- coding: utf-8 -*-
from dynmen import Menu, MenuResult
from functools import partial
import unittest
class TestFirstItem(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.head = Menu(('head', '-n1'))
def assertMenuResultEqual(self, menu_result, selected, value=None, returncode=0):
self.assertIsInstance(menu_result, MenuResult)
self.assertEqual(menu_result.selected, selected)
self.assertEqual(menu_result.value, value)
self.assertEqual(menu_result.returncode, returncode)
def test_ascii(self):
res = self.head(['a', 'b', 'c'])
self.assertMenuResultEqual(res, 'a')
def test_unicode(self):
res = self.head([u'ä', u'ü', u'π'])
self.assertMenuResultEqual(res, u'ä')
def test_notexplicit(self):
res = self.head(['π', 'ü', 'ä'])
self.assertMenuResultEqual(res, 'π')
def test_dict_notexplicit(self):
d = {
'a': 'äää',
}
res = self.head(d)
self.assertMenuResultEqual(res, 'a', 'äää')
def test_dict_unicode_val(self):
d = {
'a': u'äää',
}
res = self.head(d)
self.assertMenuResultEqual(res, 'a', u'äää')
|
|
de1cc61e9b88d50b4b54a2ec81607ab4c33b3053
|
core/migrations/0002_auto_20170423_0441.py
|
core/migrations/0002_auto_20170423_0441.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-23 08:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='entry',
options={'ordering': ['-date', '-id'], 'verbose_name_plural': 'Entries'},
),
migrations.AlterModelOptions(
name='task',
options={'ordering': ['timesheet', '-id']},
),
migrations.AlterModelOptions(
name='timesheet',
options={'ordering': ['-id']},
),
migrations.AlterField(
model_name='entry',
name='duration',
field=models.DurationField(blank=True),
),
]
|
Update meta data for core models
|
Update meta data for core models
|
Python
|
bsd-2-clause
|
Leahelisabeth/timestrap,Leahelisabeth/timestrap,overshard/timestrap,Leahelisabeth/timestrap,muhleder/timestrap,Leahelisabeth/timestrap,cdubz/timestrap,overshard/timestrap,overshard/timestrap,muhleder/timestrap,muhleder/timestrap,cdubz/timestrap,cdubz/timestrap
|
Update meta data for core models
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-23 08:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='entry',
options={'ordering': ['-date', '-id'], 'verbose_name_plural': 'Entries'},
),
migrations.AlterModelOptions(
name='task',
options={'ordering': ['timesheet', '-id']},
),
migrations.AlterModelOptions(
name='timesheet',
options={'ordering': ['-id']},
),
migrations.AlterField(
model_name='entry',
name='duration',
field=models.DurationField(blank=True),
),
]
|
<commit_before><commit_msg>Update meta data for core models<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-23 08:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='entry',
options={'ordering': ['-date', '-id'], 'verbose_name_plural': 'Entries'},
),
migrations.AlterModelOptions(
name='task',
options={'ordering': ['timesheet', '-id']},
),
migrations.AlterModelOptions(
name='timesheet',
options={'ordering': ['-id']},
),
migrations.AlterField(
model_name='entry',
name='duration',
field=models.DurationField(blank=True),
),
]
|
Update meta data for core models# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-23 08:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='entry',
options={'ordering': ['-date', '-id'], 'verbose_name_plural': 'Entries'},
),
migrations.AlterModelOptions(
name='task',
options={'ordering': ['timesheet', '-id']},
),
migrations.AlterModelOptions(
name='timesheet',
options={'ordering': ['-id']},
),
migrations.AlterField(
model_name='entry',
name='duration',
field=models.DurationField(blank=True),
),
]
|
<commit_before><commit_msg>Update meta data for core models<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-23 08:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='entry',
options={'ordering': ['-date', '-id'], 'verbose_name_plural': 'Entries'},
),
migrations.AlterModelOptions(
name='task',
options={'ordering': ['timesheet', '-id']},
),
migrations.AlterModelOptions(
name='timesheet',
options={'ordering': ['-id']},
),
migrations.AlterField(
model_name='entry',
name='duration',
field=models.DurationField(blank=True),
),
]
|
|
d24e6930966d66939583be44e33dc4e75f35a6bf
|
utest/controller/test_backup.py
|
utest/controller/test_backup.py
|
import unittest
from robotide.controller.chiefcontroller import Backup
class BackupTestCase(unittest.TestCase):
def setUp(self):
self._backupper = _MyBackup()
def test_backup_is_restored_when_save_raises_exception(self):
try:
with self._backupper:
raise AssertionError('expected')
self.fail('should not get here')
except AssertionError:
self.assertTrue(self._backupper.restored)
self.assertEqual(None, self._backupper._backup)
def test_backup_is_not_restored_when_save_passes(self):
with self._backupper:
pass
self.assertFalse(self._backupper.restored)
self.assertEqual(None, self._backupper._backup)
class _MyBackup(Backup):
def __init__(self):
self._path = object()
self._backup = object()
self.restored = False
def _make_backup(self):
self._backup = object()
def _restore_backup(self):
if not self._backup:
raise AssertionError('No backup')
self.restored = True
def _remove_backup(self):
self._backup = None
if __name__ == '__main__':
unittest.main()
|
Add tests for backup mechanism
|
Add tests for backup mechanism
|
Python
|
apache-2.0
|
HelioGuilherme66/RIDE,fingeronthebutton/RIDE,caio2k/RIDE,HelioGuilherme66/RIDE,HelioGuilherme66/RIDE,fingeronthebutton/RIDE,robotframework/RIDE,robotframework/RIDE,fingeronthebutton/RIDE,robotframework/RIDE,robotframework/RIDE,caio2k/RIDE,caio2k/RIDE,HelioGuilherme66/RIDE
|
Add tests for backup mechanism
|
import unittest
from robotide.controller.chiefcontroller import Backup
class BackupTestCase(unittest.TestCase):
def setUp(self):
self._backupper = _MyBackup()
def test_backup_is_restored_when_save_raises_exception(self):
try:
with self._backupper:
raise AssertionError('expected')
self.fail('should not get here')
except AssertionError:
self.assertTrue(self._backupper.restored)
self.assertEqual(None, self._backupper._backup)
def test_backup_is_not_restored_when_save_passes(self):
with self._backupper:
pass
self.assertFalse(self._backupper.restored)
self.assertEqual(None, self._backupper._backup)
class _MyBackup(Backup):
def __init__(self):
self._path = object()
self._backup = object()
self.restored = False
def _make_backup(self):
self._backup = object()
def _restore_backup(self):
if not self._backup:
raise AssertionError('No backup')
self.restored = True
def _remove_backup(self):
self._backup = None
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for backup mechanism<commit_after>
|
import unittest
from robotide.controller.chiefcontroller import Backup
class BackupTestCase(unittest.TestCase):
def setUp(self):
self._backupper = _MyBackup()
def test_backup_is_restored_when_save_raises_exception(self):
try:
with self._backupper:
raise AssertionError('expected')
self.fail('should not get here')
except AssertionError:
self.assertTrue(self._backupper.restored)
self.assertEqual(None, self._backupper._backup)
def test_backup_is_not_restored_when_save_passes(self):
with self._backupper:
pass
self.assertFalse(self._backupper.restored)
self.assertEqual(None, self._backupper._backup)
class _MyBackup(Backup):
def __init__(self):
self._path = object()
self._backup = object()
self.restored = False
def _make_backup(self):
self._backup = object()
def _restore_backup(self):
if not self._backup:
raise AssertionError('No backup')
self.restored = True
def _remove_backup(self):
self._backup = None
if __name__ == '__main__':
unittest.main()
|
Add tests for backup mechanismimport unittest
from robotide.controller.chiefcontroller import Backup
class BackupTestCase(unittest.TestCase):
def setUp(self):
self._backupper = _MyBackup()
def test_backup_is_restored_when_save_raises_exception(self):
try:
with self._backupper:
raise AssertionError('expected')
self.fail('should not get here')
except AssertionError:
self.assertTrue(self._backupper.restored)
self.assertEqual(None, self._backupper._backup)
def test_backup_is_not_restored_when_save_passes(self):
with self._backupper:
pass
self.assertFalse(self._backupper.restored)
self.assertEqual(None, self._backupper._backup)
class _MyBackup(Backup):
def __init__(self):
self._path = object()
self._backup = object()
self.restored = False
def _make_backup(self):
self._backup = object()
def _restore_backup(self):
if not self._backup:
raise AssertionError('No backup')
self.restored = True
def _remove_backup(self):
self._backup = None
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for backup mechanism<commit_after>import unittest
from robotide.controller.chiefcontroller import Backup
class BackupTestCase(unittest.TestCase):
def setUp(self):
self._backupper = _MyBackup()
def test_backup_is_restored_when_save_raises_exception(self):
try:
with self._backupper:
raise AssertionError('expected')
self.fail('should not get here')
except AssertionError:
self.assertTrue(self._backupper.restored)
self.assertEqual(None, self._backupper._backup)
def test_backup_is_not_restored_when_save_passes(self):
with self._backupper:
pass
self.assertFalse(self._backupper.restored)
self.assertEqual(None, self._backupper._backup)
class _MyBackup(Backup):
def __init__(self):
self._path = object()
self._backup = object()
self.restored = False
def _make_backup(self):
self._backup = object()
def _restore_backup(self):
if not self._backup:
raise AssertionError('No backup')
self.restored = True
def _remove_backup(self):
self._backup = None
if __name__ == '__main__':
unittest.main()
|
|
aef89cffbb3be71206e5822d079993feafdbf96a
|
callbacks.py
|
callbacks.py
|
from __future__ import print_function
import time
import numpy as np
from keras.callbacks import Callback
class BatchTiming(Callback):
"""
It measure robust stats for timing of batches and epochs.
Useful for measuring the training process.
For each epoch it prints median batch time and total epoch time.
After training it prints overall median batch time and median epoch time.
Usage: model.fit(X_train, Y_train, callbacks=[BatchTiming()])
All times are in seconds.
More info: https://keras.io/callbacks/
"""
def on_train_begin(self, logs={}):
self.all_batch_times = []
self.all_epoch_times = []
def on_epoch_begin(self, batch, logs={}):
self.epoch_batch_times = []
def on_batch_begin(self, batch, logs={}):
self.start_time = time.time()
def on_batch_end(self, batch, logs={}):
end_time = time.time()
elapsed_time = end_time - self.start_time
self.epoch_batch_times.append(elapsed_time)
self.all_batch_times.append(elapsed_time)
def on_epoch_end(self, epoch, logs={}):
epoch_time = np.sum(self.epoch_batch_times)
self.all_epoch_times.append(epoch_time)
median_batch_time = np.median(self.epoch_batch_times)
print('Epoch timing - batch (median): %0.5f, epoch: %0.5f (sec)' % \
(median_batch_time, epoch_time))
def on_train_end(self, logs={}):
median_batch_time = np.median(self.all_batch_times)
median_epoch_time = np.median(self.all_epoch_times)
print('Overall - batch (median): %0.5f, epoch (median): %0.5f (sec)' % \
(median_batch_time, median_epoch_time))
|
Implement a BatchTiming callback for Keras.
|
Implement a BatchTiming callback for Keras.
It measure robust stats for timing of batches and epochs.
Useful for measuring the training process.
|
Python
|
mit
|
rossumai/keras-multi-gpu,rossumai/keras-multi-gpu
|
Implement a BatchTiming callback for Keras.
It measure robust stats for timing of batches and epochs.
Useful for measuring the training process.
|
from __future__ import print_function
import time
import numpy as np
from keras.callbacks import Callback
class BatchTiming(Callback):
"""
It measure robust stats for timing of batches and epochs.
Useful for measuring the training process.
For each epoch it prints median batch time and total epoch time.
After training it prints overall median batch time and median epoch time.
Usage: model.fit(X_train, Y_train, callbacks=[BatchTiming()])
All times are in seconds.
More info: https://keras.io/callbacks/
"""
def on_train_begin(self, logs={}):
self.all_batch_times = []
self.all_epoch_times = []
def on_epoch_begin(self, batch, logs={}):
self.epoch_batch_times = []
def on_batch_begin(self, batch, logs={}):
self.start_time = time.time()
def on_batch_end(self, batch, logs={}):
end_time = time.time()
elapsed_time = end_time - self.start_time
self.epoch_batch_times.append(elapsed_time)
self.all_batch_times.append(elapsed_time)
def on_epoch_end(self, epoch, logs={}):
epoch_time = np.sum(self.epoch_batch_times)
self.all_epoch_times.append(epoch_time)
median_batch_time = np.median(self.epoch_batch_times)
print('Epoch timing - batch (median): %0.5f, epoch: %0.5f (sec)' % \
(median_batch_time, epoch_time))
def on_train_end(self, logs={}):
median_batch_time = np.median(self.all_batch_times)
median_epoch_time = np.median(self.all_epoch_times)
print('Overall - batch (median): %0.5f, epoch (median): %0.5f (sec)' % \
(median_batch_time, median_epoch_time))
|
<commit_before><commit_msg>Implement a BatchTiming callback for Keras.
It measure robust stats for timing of batches and epochs.
Useful for measuring the training process.<commit_after>
|
from __future__ import print_function
import time
import numpy as np
from keras.callbacks import Callback
class BatchTiming(Callback):
"""
It measure robust stats for timing of batches and epochs.
Useful for measuring the training process.
For each epoch it prints median batch time and total epoch time.
After training it prints overall median batch time and median epoch time.
Usage: model.fit(X_train, Y_train, callbacks=[BatchTiming()])
All times are in seconds.
More info: https://keras.io/callbacks/
"""
def on_train_begin(self, logs={}):
self.all_batch_times = []
self.all_epoch_times = []
def on_epoch_begin(self, batch, logs={}):
self.epoch_batch_times = []
def on_batch_begin(self, batch, logs={}):
self.start_time = time.time()
def on_batch_end(self, batch, logs={}):
end_time = time.time()
elapsed_time = end_time - self.start_time
self.epoch_batch_times.append(elapsed_time)
self.all_batch_times.append(elapsed_time)
def on_epoch_end(self, epoch, logs={}):
epoch_time = np.sum(self.epoch_batch_times)
self.all_epoch_times.append(epoch_time)
median_batch_time = np.median(self.epoch_batch_times)
print('Epoch timing - batch (median): %0.5f, epoch: %0.5f (sec)' % \
(median_batch_time, epoch_time))
def on_train_end(self, logs={}):
median_batch_time = np.median(self.all_batch_times)
median_epoch_time = np.median(self.all_epoch_times)
print('Overall - batch (median): %0.5f, epoch (median): %0.5f (sec)' % \
(median_batch_time, median_epoch_time))
|
Implement a BatchTiming callback for Keras.
It measure robust stats for timing of batches and epochs.
Useful for measuring the training process.from __future__ import print_function
import time
import numpy as np
from keras.callbacks import Callback
class BatchTiming(Callback):
"""
It measure robust stats for timing of batches and epochs.
Useful for measuring the training process.
For each epoch it prints median batch time and total epoch time.
After training it prints overall median batch time and median epoch time.
Usage: model.fit(X_train, Y_train, callbacks=[BatchTiming()])
All times are in seconds.
More info: https://keras.io/callbacks/
"""
def on_train_begin(self, logs={}):
self.all_batch_times = []
self.all_epoch_times = []
def on_epoch_begin(self, batch, logs={}):
self.epoch_batch_times = []
def on_batch_begin(self, batch, logs={}):
self.start_time = time.time()
def on_batch_end(self, batch, logs={}):
end_time = time.time()
elapsed_time = end_time - self.start_time
self.epoch_batch_times.append(elapsed_time)
self.all_batch_times.append(elapsed_time)
def on_epoch_end(self, epoch, logs={}):
epoch_time = np.sum(self.epoch_batch_times)
self.all_epoch_times.append(epoch_time)
median_batch_time = np.median(self.epoch_batch_times)
print('Epoch timing - batch (median): %0.5f, epoch: %0.5f (sec)' % \
(median_batch_time, epoch_time))
def on_train_end(self, logs={}):
median_batch_time = np.median(self.all_batch_times)
median_epoch_time = np.median(self.all_epoch_times)
print('Overall - batch (median): %0.5f, epoch (median): %0.5f (sec)' % \
(median_batch_time, median_epoch_time))
|
<commit_before><commit_msg>Implement a BatchTiming callback for Keras.
It measure robust stats for timing of batches and epochs.
Useful for measuring the training process.<commit_after>from __future__ import print_function
import time
import numpy as np
from keras.callbacks import Callback
class BatchTiming(Callback):
"""
It measure robust stats for timing of batches and epochs.
Useful for measuring the training process.
For each epoch it prints median batch time and total epoch time.
After training it prints overall median batch time and median epoch time.
Usage: model.fit(X_train, Y_train, callbacks=[BatchTiming()])
All times are in seconds.
More info: https://keras.io/callbacks/
"""
def on_train_begin(self, logs={}):
self.all_batch_times = []
self.all_epoch_times = []
def on_epoch_begin(self, batch, logs={}):
self.epoch_batch_times = []
def on_batch_begin(self, batch, logs={}):
self.start_time = time.time()
def on_batch_end(self, batch, logs={}):
end_time = time.time()
elapsed_time = end_time - self.start_time
self.epoch_batch_times.append(elapsed_time)
self.all_batch_times.append(elapsed_time)
def on_epoch_end(self, epoch, logs={}):
epoch_time = np.sum(self.epoch_batch_times)
self.all_epoch_times.append(epoch_time)
median_batch_time = np.median(self.epoch_batch_times)
print('Epoch timing - batch (median): %0.5f, epoch: %0.5f (sec)' % \
(median_batch_time, epoch_time))
def on_train_end(self, logs={}):
median_batch_time = np.median(self.all_batch_times)
median_epoch_time = np.median(self.all_epoch_times)
print('Overall - batch (median): %0.5f, epoch (median): %0.5f (sec)' % \
(median_batch_time, median_epoch_time))
|
|
aa7c14f5c93cc03137c4939d348a21ee2255ea00
|
scripts/cmt/deform/np_mesh.py
|
scripts/cmt/deform/np_mesh.py
|
"""Efficient mesh processing using numpy"""
import numpy as np
import json
import maya.api.OpenMaya as OpenMaya
import cmt.shortcuts as shortcuts
class Mesh(object):
@classmethod
def from_obj(cls, file_path):
with open(file_path, "r") as fh:
lines = fh.readlines()
points = []
read_vertices = False
for line in lines:
if line.startswith("v "):
read_vertices = True
v = line.split()[1:]
points.append([float(v[0]), float(v[1]), float(v[2])])
elif read_vertices:
break
points = np.array(points)
return Mesh(points)
@classmethod
def from_maya_mesh(cls, mesh):
points = shortcuts.get_points(mesh)
points = np.array([[p.x, p.y, p.z] for p in points])
return Mesh(points)
def __init__(self, points):
self.points = points
def mask_points(self, base, mask):
points = base.points + ((self.points - base.points).T * mask.values).T
return Mesh(points)
def to_maya_mesh(self, mesh):
points = OpenMaya.MPointArray()
for p in self.points:
points.append(OpenMaya.MPoint(p[0], p[1], p[2]))
shortcuts.set_points(mesh, points)
class Mask(object):
"""1D array of float values."""
@classmethod
def from_file(cls, file_path):
with open(file_path, "r") as fh:
data = json.load(fh)
values = np.array(data)
return Mask(values)
def __init__(self, values):
self.values = values
|
Add efficient mesh processing class using numpy
|
Add efficient mesh processing class using numpy
|
Python
|
mit
|
chadmv/cmt,chadmv/cmt,chadmv/cmt
|
Add efficient mesh processing class using numpy
|
"""Efficient mesh processing using numpy"""
import numpy as np
import json
import maya.api.OpenMaya as OpenMaya
import cmt.shortcuts as shortcuts
class Mesh(object):
@classmethod
def from_obj(cls, file_path):
with open(file_path, "r") as fh:
lines = fh.readlines()
points = []
read_vertices = False
for line in lines:
if line.startswith("v "):
read_vertices = True
v = line.split()[1:]
points.append([float(v[0]), float(v[1]), float(v[2])])
elif read_vertices:
break
points = np.array(points)
return Mesh(points)
@classmethod
def from_maya_mesh(cls, mesh):
points = shortcuts.get_points(mesh)
points = np.array([[p.x, p.y, p.z] for p in points])
return Mesh(points)
def __init__(self, points):
self.points = points
def mask_points(self, base, mask):
points = base.points + ((self.points - base.points).T * mask.values).T
return Mesh(points)
def to_maya_mesh(self, mesh):
points = OpenMaya.MPointArray()
for p in self.points:
points.append(OpenMaya.MPoint(p[0], p[1], p[2]))
shortcuts.set_points(mesh, points)
class Mask(object):
"""1D array of float values."""
@classmethod
def from_file(cls, file_path):
with open(file_path, "r") as fh:
data = json.load(fh)
values = np.array(data)
return Mask(values)
def __init__(self, values):
self.values = values
|
<commit_before><commit_msg>Add efficient mesh processing class using numpy<commit_after>
|
"""Efficient mesh processing using numpy"""
import numpy as np
import json
import maya.api.OpenMaya as OpenMaya
import cmt.shortcuts as shortcuts
class Mesh(object):
@classmethod
def from_obj(cls, file_path):
with open(file_path, "r") as fh:
lines = fh.readlines()
points = []
read_vertices = False
for line in lines:
if line.startswith("v "):
read_vertices = True
v = line.split()[1:]
points.append([float(v[0]), float(v[1]), float(v[2])])
elif read_vertices:
break
points = np.array(points)
return Mesh(points)
@classmethod
def from_maya_mesh(cls, mesh):
points = shortcuts.get_points(mesh)
points = np.array([[p.x, p.y, p.z] for p in points])
return Mesh(points)
def __init__(self, points):
self.points = points
def mask_points(self, base, mask):
points = base.points + ((self.points - base.points).T * mask.values).T
return Mesh(points)
def to_maya_mesh(self, mesh):
points = OpenMaya.MPointArray()
for p in self.points:
points.append(OpenMaya.MPoint(p[0], p[1], p[2]))
shortcuts.set_points(mesh, points)
class Mask(object):
"""1D array of float values."""
@classmethod
def from_file(cls, file_path):
with open(file_path, "r") as fh:
data = json.load(fh)
values = np.array(data)
return Mask(values)
def __init__(self, values):
self.values = values
|
Add efficient mesh processing class using numpy"""Efficient mesh processing using numpy"""
import numpy as np
import json
import maya.api.OpenMaya as OpenMaya
import cmt.shortcuts as shortcuts
class Mesh(object):
@classmethod
def from_obj(cls, file_path):
with open(file_path, "r") as fh:
lines = fh.readlines()
points = []
read_vertices = False
for line in lines:
if line.startswith("v "):
read_vertices = True
v = line.split()[1:]
points.append([float(v[0]), float(v[1]), float(v[2])])
elif read_vertices:
break
points = np.array(points)
return Mesh(points)
@classmethod
def from_maya_mesh(cls, mesh):
points = shortcuts.get_points(mesh)
points = np.array([[p.x, p.y, p.z] for p in points])
return Mesh(points)
def __init__(self, points):
self.points = points
def mask_points(self, base, mask):
points = base.points + ((self.points - base.points).T * mask.values).T
return Mesh(points)
def to_maya_mesh(self, mesh):
points = OpenMaya.MPointArray()
for p in self.points:
points.append(OpenMaya.MPoint(p[0], p[1], p[2]))
shortcuts.set_points(mesh, points)
class Mask(object):
"""1D array of float values."""
@classmethod
def from_file(cls, file_path):
with open(file_path, "r") as fh:
data = json.load(fh)
values = np.array(data)
return Mask(values)
def __init__(self, values):
self.values = values
|
<commit_before><commit_msg>Add efficient mesh processing class using numpy<commit_after>"""Efficient mesh processing using numpy"""
import numpy as np
import json
import maya.api.OpenMaya as OpenMaya
import cmt.shortcuts as shortcuts
class Mesh(object):
@classmethod
def from_obj(cls, file_path):
with open(file_path, "r") as fh:
lines = fh.readlines()
points = []
read_vertices = False
for line in lines:
if line.startswith("v "):
read_vertices = True
v = line.split()[1:]
points.append([float(v[0]), float(v[1]), float(v[2])])
elif read_vertices:
break
points = np.array(points)
return Mesh(points)
@classmethod
def from_maya_mesh(cls, mesh):
points = shortcuts.get_points(mesh)
points = np.array([[p.x, p.y, p.z] for p in points])
return Mesh(points)
def __init__(self, points):
self.points = points
def mask_points(self, base, mask):
points = base.points + ((self.points - base.points).T * mask.values).T
return Mesh(points)
def to_maya_mesh(self, mesh):
points = OpenMaya.MPointArray()
for p in self.points:
points.append(OpenMaya.MPoint(p[0], p[1], p[2]))
shortcuts.set_points(mesh, points)
class Mask(object):
"""1D array of float values."""
@classmethod
def from_file(cls, file_path):
with open(file_path, "r") as fh:
data = json.load(fh)
values = np.array(data)
return Mask(values)
def __init__(self, values):
self.values = values
|
|
3cde2060825449ae91e2e7172bde8c47680c42c7
|
tests/baseapi/test_query.py
|
tests/baseapi/test_query.py
|
import pytest
from pypuppetdb.errors import APIError, ExperimentalDisabledError
pytestmark = pytest.mark.unit
def test_query_endpoint(api2, stub_get):
stub_get('http://localhost:8080/v2/nodes', body='[]')
response = api2._query('nodes')
assert response == []
def test_query_endpoint_path(api2, stub_get):
stub_get('http://localhost:8080/v2/nodes/host1', body='[]')
response = api2._query('nodes', path='host1')
assert response == []
def test_query_endpoint_empty_query(api2, stub_get):
stub_get('http://localhost:8080/v2/nodes?query=', body='[]')
response = api2._query('nodes', query='')
assert response == []
def test_query_endpoint_query(api2, stub_get):
url = ('http://localhost:8080/v2/nodes?query=%5B%22%3D%22%2C'
'+%22name%22%2C+%22host1%22%5D')
stub_get('{0}'.format(url), body='[]')
response = api2._query('nodes', query='["=", "name", "host1"]')
assert response == []
def test_query_endpoint_path_empty_query(api2, stub_get):
stub_get('http://localhost:8080/v2/nodes/host1?query=', body='[]')
response = api2._query('nodes', path='host1', query='')
assert response == []
def test_query_endpoint_path_query(api2, stub_get):
url = ('http://localhost:8080/v2/nodes/host1?query=%5B%22%3D%22%2C'
'+%22name%22%2C+%22host1%22%5D')
stub_get('{0}'.format(url), body='[]')
response = api2._query('nodes', path='host1',
query='["=", "name", "host1"]')
assert response == []
def test_endpoint_experimental(api2e, stub_get):
stub_get('http://localhost:8080/experimental/reports', body='[]')
response = api2e._query('reports')
assert response == []
|
Add tests for _query in baseapi.
|
tests: Add tests for _query in baseapi.
These tests check that _query ends up generating and querying the right
url depending on the different options passed to it.
|
Python
|
apache-2.0
|
jcastillocano/pypuppetdb,jorik041/pypuppetdb,voxpupuli/pypuppetdb,amwilson/pypuppetdb,jcastillocano/pypuppetdb,puppet-community/pypuppetdb,dforste/pypuppetdb,vicinus/pypuppetdb
|
tests: Add tests for _query in baseapi.
These tests check that _query ends up generating and querying the right
url depending on the different options passed to it.
|
import pytest
from pypuppetdb.errors import APIError, ExperimentalDisabledError
pytestmark = pytest.mark.unit
def test_query_endpoint(api2, stub_get):
stub_get('http://localhost:8080/v2/nodes', body='[]')
response = api2._query('nodes')
assert response == []
def test_query_endpoint_path(api2, stub_get):
stub_get('http://localhost:8080/v2/nodes/host1', body='[]')
response = api2._query('nodes', path='host1')
assert response == []
def test_query_endpoint_empty_query(api2, stub_get):
stub_get('http://localhost:8080/v2/nodes?query=', body='[]')
response = api2._query('nodes', query='')
assert response == []
def test_query_endpoint_query(api2, stub_get):
url = ('http://localhost:8080/v2/nodes?query=%5B%22%3D%22%2C'
'+%22name%22%2C+%22host1%22%5D')
stub_get('{0}'.format(url), body='[]')
response = api2._query('nodes', query='["=", "name", "host1"]')
assert response == []
def test_query_endpoint_path_empty_query(api2, stub_get):
stub_get('http://localhost:8080/v2/nodes/host1?query=', body='[]')
response = api2._query('nodes', path='host1', query='')
assert response == []
def test_query_endpoint_path_query(api2, stub_get):
url = ('http://localhost:8080/v2/nodes/host1?query=%5B%22%3D%22%2C'
'+%22name%22%2C+%22host1%22%5D')
stub_get('{0}'.format(url), body='[]')
response = api2._query('nodes', path='host1',
query='["=", "name", "host1"]')
assert response == []
def test_endpoint_experimental(api2e, stub_get):
stub_get('http://localhost:8080/experimental/reports', body='[]')
response = api2e._query('reports')
assert response == []
|
<commit_before><commit_msg>tests: Add tests for _query in baseapi.
These tests check that _query ends up generating and querying the right
url depending on the different options passed to it.<commit_after>
|
import pytest
from pypuppetdb.errors import APIError, ExperimentalDisabledError
pytestmark = pytest.mark.unit
def test_query_endpoint(api2, stub_get):
stub_get('http://localhost:8080/v2/nodes', body='[]')
response = api2._query('nodes')
assert response == []
def test_query_endpoint_path(api2, stub_get):
stub_get('http://localhost:8080/v2/nodes/host1', body='[]')
response = api2._query('nodes', path='host1')
assert response == []
def test_query_endpoint_empty_query(api2, stub_get):
stub_get('http://localhost:8080/v2/nodes?query=', body='[]')
response = api2._query('nodes', query='')
assert response == []
def test_query_endpoint_query(api2, stub_get):
url = ('http://localhost:8080/v2/nodes?query=%5B%22%3D%22%2C'
'+%22name%22%2C+%22host1%22%5D')
stub_get('{0}'.format(url), body='[]')
response = api2._query('nodes', query='["=", "name", "host1"]')
assert response == []
def test_query_endpoint_path_empty_query(api2, stub_get):
stub_get('http://localhost:8080/v2/nodes/host1?query=', body='[]')
response = api2._query('nodes', path='host1', query='')
assert response == []
def test_query_endpoint_path_query(api2, stub_get):
url = ('http://localhost:8080/v2/nodes/host1?query=%5B%22%3D%22%2C'
'+%22name%22%2C+%22host1%22%5D')
stub_get('{0}'.format(url), body='[]')
response = api2._query('nodes', path='host1',
query='["=", "name", "host1"]')
assert response == []
def test_endpoint_experimental(api2e, stub_get):
stub_get('http://localhost:8080/experimental/reports', body='[]')
response = api2e._query('reports')
assert response == []
|
tests: Add tests for _query in baseapi.
These tests check that _query ends up generating and querying the right
url depending on the different options passed to it.import pytest
from pypuppetdb.errors import APIError, ExperimentalDisabledError
pytestmark = pytest.mark.unit
def test_query_endpoint(api2, stub_get):
stub_get('http://localhost:8080/v2/nodes', body='[]')
response = api2._query('nodes')
assert response == []
def test_query_endpoint_path(api2, stub_get):
stub_get('http://localhost:8080/v2/nodes/host1', body='[]')
response = api2._query('nodes', path='host1')
assert response == []
def test_query_endpoint_empty_query(api2, stub_get):
stub_get('http://localhost:8080/v2/nodes?query=', body='[]')
response = api2._query('nodes', query='')
assert response == []
def test_query_endpoint_query(api2, stub_get):
url = ('http://localhost:8080/v2/nodes?query=%5B%22%3D%22%2C'
'+%22name%22%2C+%22host1%22%5D')
stub_get('{0}'.format(url), body='[]')
response = api2._query('nodes', query='["=", "name", "host1"]')
assert response == []
def test_query_endpoint_path_empty_query(api2, stub_get):
stub_get('http://localhost:8080/v2/nodes/host1?query=', body='[]')
response = api2._query('nodes', path='host1', query='')
assert response == []
def test_query_endpoint_path_query(api2, stub_get):
url = ('http://localhost:8080/v2/nodes/host1?query=%5B%22%3D%22%2C'
'+%22name%22%2C+%22host1%22%5D')
stub_get('{0}'.format(url), body='[]')
response = api2._query('nodes', path='host1',
query='["=", "name", "host1"]')
assert response == []
def test_endpoint_experimental(api2e, stub_get):
stub_get('http://localhost:8080/experimental/reports', body='[]')
response = api2e._query('reports')
assert response == []
|
<commit_before><commit_msg>tests: Add tests for _query in baseapi.
These tests check that _query ends up generating and querying the right
url depending on the different options passed to it.<commit_after>import pytest
from pypuppetdb.errors import APIError, ExperimentalDisabledError
pytestmark = pytest.mark.unit
def test_query_endpoint(api2, stub_get):
stub_get('http://localhost:8080/v2/nodes', body='[]')
response = api2._query('nodes')
assert response == []
def test_query_endpoint_path(api2, stub_get):
stub_get('http://localhost:8080/v2/nodes/host1', body='[]')
response = api2._query('nodes', path='host1')
assert response == []
def test_query_endpoint_empty_query(api2, stub_get):
stub_get('http://localhost:8080/v2/nodes?query=', body='[]')
response = api2._query('nodes', query='')
assert response == []
def test_query_endpoint_query(api2, stub_get):
url = ('http://localhost:8080/v2/nodes?query=%5B%22%3D%22%2C'
'+%22name%22%2C+%22host1%22%5D')
stub_get('{0}'.format(url), body='[]')
response = api2._query('nodes', query='["=", "name", "host1"]')
assert response == []
def test_query_endpoint_path_empty_query(api2, stub_get):
stub_get('http://localhost:8080/v2/nodes/host1?query=', body='[]')
response = api2._query('nodes', path='host1', query='')
assert response == []
def test_query_endpoint_path_query(api2, stub_get):
url = ('http://localhost:8080/v2/nodes/host1?query=%5B%22%3D%22%2C'
'+%22name%22%2C+%22host1%22%5D')
stub_get('{0}'.format(url), body='[]')
response = api2._query('nodes', path='host1',
query='["=", "name", "host1"]')
assert response == []
def test_endpoint_experimental(api2e, stub_get):
stub_get('http://localhost:8080/experimental/reports', body='[]')
response = api2e._query('reports')
assert response == []
|
|
29f7dcbe4f3469d051d92344963821a4199644cb
|
examples/on_startup.py
|
examples/on_startup.py
|
"""Provides an example of attaching an action on hug server startup"""
import hug
data = []
@hug.startup()
def add_data(api):
'''Adds initial data to the api on startup'''
data.append("It's working")
@hug.startup()
def add_more_data(api):
'''Adds initial data to the api on startup'''
data.append("Even subsequent calls")
@hug.get()
def test():
'''Returns all stored data'''
return data
|
Add an example of the intended implementation of the feature
|
Add an example of the intended implementation of the feature
|
Python
|
mit
|
timothycrosley/hug,timothycrosley/hug,timothycrosley/hug,MuhammadAlkarouri/hug,MuhammadAlkarouri/hug,MuhammadAlkarouri/hug
|
Add an example of the intended implementation of the feature
|
"""Provides an example of attaching an action on hug server startup"""
import hug
data = []
@hug.startup()
def add_data(api):
'''Adds initial data to the api on startup'''
data.append("It's working")
@hug.startup()
def add_more_data(api):
'''Adds initial data to the api on startup'''
data.append("Even subsequent calls")
@hug.get()
def test():
'''Returns all stored data'''
return data
|
<commit_before><commit_msg>Add an example of the intended implementation of the feature<commit_after>
|
"""Provides an example of attaching an action on hug server startup"""
import hug
data = []
@hug.startup()
def add_data(api):
'''Adds initial data to the api on startup'''
data.append("It's working")
@hug.startup()
def add_more_data(api):
'''Adds initial data to the api on startup'''
data.append("Even subsequent calls")
@hug.get()
def test():
'''Returns all stored data'''
return data
|
Add an example of the intended implementation of the feature"""Provides an example of attaching an action on hug server startup"""
import hug
data = []
@hug.startup()
def add_data(api):
'''Adds initial data to the api on startup'''
data.append("It's working")
@hug.startup()
def add_more_data(api):
'''Adds initial data to the api on startup'''
data.append("Even subsequent calls")
@hug.get()
def test():
'''Returns all stored data'''
return data
|
<commit_before><commit_msg>Add an example of the intended implementation of the feature<commit_after>"""Provides an example of attaching an action on hug server startup"""
import hug
data = []
@hug.startup()
def add_data(api):
'''Adds initial data to the api on startup'''
data.append("It's working")
@hug.startup()
def add_more_data(api):
'''Adds initial data to the api on startup'''
data.append("Even subsequent calls")
@hug.get()
def test():
'''Returns all stored data'''
return data
|
|
0ee9372ee9034314853d6215d6b3c0be48796ca6
|
scripts/response_time_dist.py
|
scripts/response_time_dist.py
|
import click
import pandas
from scipy.stats import lognorm
@click.command()
@click.argument('filename')
@click.option('--column', default='Total_Trav', help='Column to identify shape, location, and scale from.')
def response_time_dist(filename, column):
"""
Returns the lognormal distribution fit of travel times.
"""
dt = pandas.read_csv(filename)
response = lognorm.fit(dt[column])
click.echo(response)
return response
if __name__ == '__main__':
response_time_dist()
|
Add script to generate lognormal fits of GIS response data.
|
Add script to generate lognormal fits of GIS response data.
|
Python
|
mit
|
FireCARES/fire-risk,FireCARES/fire-risk
|
Add script to generate lognormal fits of GIS response data.
|
import click
import pandas
from scipy.stats import lognorm
@click.command()
@click.argument('filename')
@click.option('--column', default='Total_Trav', help='Column to identify shape, location, and scale from.')
def response_time_dist(filename, column):
"""
Returns the lognormal distribution fit of travel times.
"""
dt = pandas.read_csv(filename)
response = lognorm.fit(dt[column])
click.echo(response)
return response
if __name__ == '__main__':
response_time_dist()
|
<commit_before><commit_msg>Add script to generate lognormal fits of GIS response data.<commit_after>
|
import click
import pandas
from scipy.stats import lognorm
@click.command()
@click.argument('filename')
@click.option('--column', default='Total_Trav', help='Column to identify shape, location, and scale from.')
def response_time_dist(filename, column):
"""
Returns the lognormal distribution fit of travel times.
"""
dt = pandas.read_csv(filename)
response = lognorm.fit(dt[column])
click.echo(response)
return response
if __name__ == '__main__':
response_time_dist()
|
Add script to generate lognormal fits of GIS response data.import click
import pandas
from scipy.stats import lognorm
@click.command()
@click.argument('filename')
@click.option('--column', default='Total_Trav', help='Column to identify shape, location, and scale from.')
def response_time_dist(filename, column):
"""
Returns the lognormal distribution fit of travel times.
"""
dt = pandas.read_csv(filename)
response = lognorm.fit(dt[column])
click.echo(response)
return response
if __name__ == '__main__':
response_time_dist()
|
<commit_before><commit_msg>Add script to generate lognormal fits of GIS response data.<commit_after>import click
import pandas
from scipy.stats import lognorm
@click.command()
@click.argument('filename')
@click.option('--column', default='Total_Trav', help='Column to identify shape, location, and scale from.')
def response_time_dist(filename, column):
"""
Returns the lognormal distribution fit of travel times.
"""
dt = pandas.read_csv(filename)
response = lognorm.fit(dt[column])
click.echo(response)
return response
if __name__ == '__main__':
response_time_dist()
|
|
86b710e87666ff20be8cfd78648eb67a9637dfaf
|
test/runtest/testargv.py
|
test/runtest/testargv.py
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test subdir args for runtest.py, for example:
python runtest.py test/subdir
"""
import os
import TestRuntest
test = TestRuntest.TestRuntest()
test.subdir('test', ['test', 'subdir'])
files = {}
files['pythonstring'] = TestRuntest.pythonstring
files['one'] = os.path.join('test/subdir', 'test_one.py')
files['two'] = os.path.join('test/subdir', 'two.py')
files['three'] = os.path.join('test', 'test_three.py')
test.write_passing_test(files['one'])
test.write_passing_test(files['two'])
test.write_passing_test(files['three'])
expect_stdout = """\
%(pythonstring)s -tt %(one)s
PASSING TEST STDOUT
%(pythonstring)s -tt %(two)s
PASSING TEST STDOUT
""" % files
expect_stderr = """\
PASSING TEST STDERR
PASSING TEST STDERR
"""
test.run(arguments = '--no-progress test/subdir',
status = 0,
stdout = expect_stdout,
stderr = expect_stderr)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Add test for "runtest test/somedir" case
|
Add test for "runtest test/somedir" case
|
Python
|
mit
|
timj/scons,timj/scons,timj/scons,timj/scons,timj/scons,timj/scons,timj/scons,timj/scons,timj/scons
|
Add test for "runtest test/somedir" case
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test subdir args for runtest.py, for example:
python runtest.py test/subdir
"""
import os
import TestRuntest
test = TestRuntest.TestRuntest()
test.subdir('test', ['test', 'subdir'])
files = {}
files['pythonstring'] = TestRuntest.pythonstring
files['one'] = os.path.join('test/subdir', 'test_one.py')
files['two'] = os.path.join('test/subdir', 'two.py')
files['three'] = os.path.join('test', 'test_three.py')
test.write_passing_test(files['one'])
test.write_passing_test(files['two'])
test.write_passing_test(files['three'])
expect_stdout = """\
%(pythonstring)s -tt %(one)s
PASSING TEST STDOUT
%(pythonstring)s -tt %(two)s
PASSING TEST STDOUT
""" % files
expect_stderr = """\
PASSING TEST STDERR
PASSING TEST STDERR
"""
test.run(arguments = '--no-progress test/subdir',
status = 0,
stdout = expect_stdout,
stderr = expect_stderr)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
<commit_before><commit_msg>Add test for "runtest test/somedir" case<commit_after>
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test subdir args for runtest.py, for example:
python runtest.py test/subdir
"""
import os
import TestRuntest
test = TestRuntest.TestRuntest()
test.subdir('test', ['test', 'subdir'])
files = {}
files['pythonstring'] = TestRuntest.pythonstring
files['one'] = os.path.join('test/subdir', 'test_one.py')
files['two'] = os.path.join('test/subdir', 'two.py')
files['three'] = os.path.join('test', 'test_three.py')
test.write_passing_test(files['one'])
test.write_passing_test(files['two'])
test.write_passing_test(files['three'])
expect_stdout = """\
%(pythonstring)s -tt %(one)s
PASSING TEST STDOUT
%(pythonstring)s -tt %(two)s
PASSING TEST STDOUT
""" % files
expect_stderr = """\
PASSING TEST STDERR
PASSING TEST STDERR
"""
test.run(arguments = '--no-progress test/subdir',
status = 0,
stdout = expect_stdout,
stderr = expect_stderr)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
Add test for "runtest test/somedir" case#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test subdir args for runtest.py, for example:
python runtest.py test/subdir
"""
import os
import TestRuntest
test = TestRuntest.TestRuntest()
test.subdir('test', ['test', 'subdir'])
files = {}
files['pythonstring'] = TestRuntest.pythonstring
files['one'] = os.path.join('test/subdir', 'test_one.py')
files['two'] = os.path.join('test/subdir', 'two.py')
files['three'] = os.path.join('test', 'test_three.py')
test.write_passing_test(files['one'])
test.write_passing_test(files['two'])
test.write_passing_test(files['three'])
expect_stdout = """\
%(pythonstring)s -tt %(one)s
PASSING TEST STDOUT
%(pythonstring)s -tt %(two)s
PASSING TEST STDOUT
""" % files
expect_stderr = """\
PASSING TEST STDERR
PASSING TEST STDERR
"""
test.run(arguments = '--no-progress test/subdir',
status = 0,
stdout = expect_stdout,
stderr = expect_stderr)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
<commit_before><commit_msg>Add test for "runtest test/somedir" case<commit_after>#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test subdir args for runtest.py, for example:
python runtest.py test/subdir
"""
import os
import TestRuntest
test = TestRuntest.TestRuntest()
test.subdir('test', ['test', 'subdir'])
files = {}
files['pythonstring'] = TestRuntest.pythonstring
files['one'] = os.path.join('test/subdir', 'test_one.py')
files['two'] = os.path.join('test/subdir', 'two.py')
files['three'] = os.path.join('test', 'test_three.py')
test.write_passing_test(files['one'])
test.write_passing_test(files['two'])
test.write_passing_test(files['three'])
expect_stdout = """\
%(pythonstring)s -tt %(one)s
PASSING TEST STDOUT
%(pythonstring)s -tt %(two)s
PASSING TEST STDOUT
""" % files
expect_stderr = """\
PASSING TEST STDERR
PASSING TEST STDERR
"""
test.run(arguments = '--no-progress test/subdir',
status = 0,
stdout = expect_stdout,
stderr = expect_stderr)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
|
35b9153d1eefb1312d12f69d821cefd0814059d0
|
resolwe_bio/migrations/0014_star_index2.py
|
resolwe_bio/migrations/0014_star_index2.py
|
from django.db import migrations
from resolwe.flow.utils.iterators import iterate_schema
def migrate_star_downstream_processes(apps, schema_editor):
"""Migrate schemas of processes that use alignment-star as input."""
Process = apps.get_model("flow", "Process")
for process in Process.objects.all():
for schema, _, _ in iterate_schema({}, process.input_schema):
if schema["type"] == "data:genomeindex:star:":
schema["type"] = "data:index:star:"
process.save()
class Migration(migrations.Migration):
"""Migrate schemas of processes that use alignment-star as input."""
dependencies = [
("resolwe_bio", "0013_star_index"),
]
operations = [
migrations.RunPython(migrate_star_downstream_processes),
]
|
Migrate inputs of all star-index related processes
|
Migrate inputs of all star-index related processes
|
Python
|
apache-2.0
|
genialis/resolwe-bio,genialis/resolwe-bio,genialis/resolwe-bio,genialis/resolwe-bio
|
Migrate inputs of all star-index related processes
|
from django.db import migrations
from resolwe.flow.utils.iterators import iterate_schema
def migrate_star_downstream_processes(apps, schema_editor):
"""Migrate schemas of processes that use alignment-star as input."""
Process = apps.get_model("flow", "Process")
for process in Process.objects.all():
for schema, _, _ in iterate_schema({}, process.input_schema):
if schema["type"] == "data:genomeindex:star:":
schema["type"] = "data:index:star:"
process.save()
class Migration(migrations.Migration):
"""Migrate schemas of processes that use alignment-star as input."""
dependencies = [
("resolwe_bio", "0013_star_index"),
]
operations = [
migrations.RunPython(migrate_star_downstream_processes),
]
|
<commit_before><commit_msg>Migrate inputs of all star-index related processes<commit_after>
|
from django.db import migrations
from resolwe.flow.utils.iterators import iterate_schema
def migrate_star_downstream_processes(apps, schema_editor):
"""Migrate schemas of processes that use alignment-star as input."""
Process = apps.get_model("flow", "Process")
for process in Process.objects.all():
for schema, _, _ in iterate_schema({}, process.input_schema):
if schema["type"] == "data:genomeindex:star:":
schema["type"] = "data:index:star:"
process.save()
class Migration(migrations.Migration):
"""Migrate schemas of processes that use alignment-star as input."""
dependencies = [
("resolwe_bio", "0013_star_index"),
]
operations = [
migrations.RunPython(migrate_star_downstream_processes),
]
|
Migrate inputs of all star-index related processesfrom django.db import migrations
from resolwe.flow.utils.iterators import iterate_schema
def migrate_star_downstream_processes(apps, schema_editor):
"""Migrate schemas of processes that use alignment-star as input."""
Process = apps.get_model("flow", "Process")
for process in Process.objects.all():
for schema, _, _ in iterate_schema({}, process.input_schema):
if schema["type"] == "data:genomeindex:star:":
schema["type"] = "data:index:star:"
process.save()
class Migration(migrations.Migration):
"""Migrate schemas of processes that use alignment-star as input."""
dependencies = [
("resolwe_bio", "0013_star_index"),
]
operations = [
migrations.RunPython(migrate_star_downstream_processes),
]
|
<commit_before><commit_msg>Migrate inputs of all star-index related processes<commit_after>from django.db import migrations
from resolwe.flow.utils.iterators import iterate_schema
def migrate_star_downstream_processes(apps, schema_editor):
"""Migrate schemas of processes that use alignment-star as input."""
Process = apps.get_model("flow", "Process")
for process in Process.objects.all():
for schema, _, _ in iterate_schema({}, process.input_schema):
if schema["type"] == "data:genomeindex:star:":
schema["type"] = "data:index:star:"
process.save()
class Migration(migrations.Migration):
"""Migrate schemas of processes that use alignment-star as input."""
dependencies = [
("resolwe_bio", "0013_star_index"),
]
operations = [
migrations.RunPython(migrate_star_downstream_processes),
]
|
|
fcba965521f601d862bfb4968bedad7e2c56123c
|
models/params.py
|
models/params.py
|
"""
Parameters and parameterizable objects.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
__all__ = []
class Parameterized(object):
def __init__(self):
self.nhyper = 0
self.__names = []
self.__hypers = []
self.__transforms = []
self.__priors = []
def __repr__(self):
substrings = []
for name, hyper in zip(self.__names, self.__hypers):
value = np.array2string(hyper, separator=',')
substrings.append('{:s}={:s}'.format(name, value))
return self.__class__.__name__ + '(' + ', '.join(substrings) + ')'
def _link_hyper(self, name, hyper, transform=None, prior=None):
# FIXME: check C-order, etc?
# FIXME: add properties of some sort?
if name in self.__names:
raise ValueError('hyperparameter names must be unique')
self.nhyper += hyper.size
self.__names.append(name)
self.__hypers.append(hyper)
self.__transforms.append(transform)
self.__priors.append(prior)
def set_hyper(self, hyper):
hyper = np.array(hyper, dtype=float, copy=False, ndmin=1)
if hyper.shape != (self.nhyper,):
raise ValueError('incorrect number of hyperparameters')
offset = 0
for hyper_ in self.__hypers:
hyper_.flat[:] = hyper[offset:offset+hyper_.size]
offset += hyper_.size
def get_hyper(self):
if self.nhyper == 0:
return np.array([])
else:
return np.hstack(_ for _ in self.__hypers)
|
Add a base parameterized object class.
|
Add a base parameterized object class.
|
Python
|
bsd-2-clause
|
mwhoffman/reggie
|
Add a base parameterized object class.
|
"""
Parameters and parameterizable objects.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
__all__ = []
class Parameterized(object):
def __init__(self):
self.nhyper = 0
self.__names = []
self.__hypers = []
self.__transforms = []
self.__priors = []
def __repr__(self):
substrings = []
for name, hyper in zip(self.__names, self.__hypers):
value = np.array2string(hyper, separator=',')
substrings.append('{:s}={:s}'.format(name, value))
return self.__class__.__name__ + '(' + ', '.join(substrings) + ')'
def _link_hyper(self, name, hyper, transform=None, prior=None):
# FIXME: check C-order, etc?
# FIXME: add properties of some sort?
if name in self.__names:
raise ValueError('hyperparameter names must be unique')
self.nhyper += hyper.size
self.__names.append(name)
self.__hypers.append(hyper)
self.__transforms.append(transform)
self.__priors.append(prior)
def set_hyper(self, hyper):
hyper = np.array(hyper, dtype=float, copy=False, ndmin=1)
if hyper.shape != (self.nhyper,):
raise ValueError('incorrect number of hyperparameters')
offset = 0
for hyper_ in self.__hypers:
hyper_.flat[:] = hyper[offset:offset+hyper_.size]
offset += hyper_.size
def get_hyper(self):
if self.nhyper == 0:
return np.array([])
else:
return np.hstack(_ for _ in self.__hypers)
|
<commit_before><commit_msg>Add a base parameterized object class.<commit_after>
|
"""
Parameters and parameterizable objects.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
__all__ = []
class Parameterized(object):
def __init__(self):
self.nhyper = 0
self.__names = []
self.__hypers = []
self.__transforms = []
self.__priors = []
def __repr__(self):
substrings = []
for name, hyper in zip(self.__names, self.__hypers):
value = np.array2string(hyper, separator=',')
substrings.append('{:s}={:s}'.format(name, value))
return self.__class__.__name__ + '(' + ', '.join(substrings) + ')'
def _link_hyper(self, name, hyper, transform=None, prior=None):
# FIXME: check C-order, etc?
# FIXME: add properties of some sort?
if name in self.__names:
raise ValueError('hyperparameter names must be unique')
self.nhyper += hyper.size
self.__names.append(name)
self.__hypers.append(hyper)
self.__transforms.append(transform)
self.__priors.append(prior)
def set_hyper(self, hyper):
hyper = np.array(hyper, dtype=float, copy=False, ndmin=1)
if hyper.shape != (self.nhyper,):
raise ValueError('incorrect number of hyperparameters')
offset = 0
for hyper_ in self.__hypers:
hyper_.flat[:] = hyper[offset:offset+hyper_.size]
offset += hyper_.size
def get_hyper(self):
if self.nhyper == 0:
return np.array([])
else:
return np.hstack(_ for _ in self.__hypers)
|
Add a base parameterized object class."""
Parameters and parameterizable objects.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
__all__ = []
class Parameterized(object):
def __init__(self):
self.nhyper = 0
self.__names = []
self.__hypers = []
self.__transforms = []
self.__priors = []
def __repr__(self):
substrings = []
for name, hyper in zip(self.__names, self.__hypers):
value = np.array2string(hyper, separator=',')
substrings.append('{:s}={:s}'.format(name, value))
return self.__class__.__name__ + '(' + ', '.join(substrings) + ')'
def _link_hyper(self, name, hyper, transform=None, prior=None):
# FIXME: check C-order, etc?
# FIXME: add properties of some sort?
if name in self.__names:
raise ValueError('hyperparameter names must be unique')
self.nhyper += hyper.size
self.__names.append(name)
self.__hypers.append(hyper)
self.__transforms.append(transform)
self.__priors.append(prior)
def set_hyper(self, hyper):
hyper = np.array(hyper, dtype=float, copy=False, ndmin=1)
if hyper.shape != (self.nhyper,):
raise ValueError('incorrect number of hyperparameters')
offset = 0
for hyper_ in self.__hypers:
hyper_.flat[:] = hyper[offset:offset+hyper_.size]
offset += hyper_.size
def get_hyper(self):
if self.nhyper == 0:
return np.array([])
else:
return np.hstack(_ for _ in self.__hypers)
|
<commit_before><commit_msg>Add a base parameterized object class.<commit_after>"""
Parameters and parameterizable objects.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
__all__ = []
class Parameterized(object):
def __init__(self):
self.nhyper = 0
self.__names = []
self.__hypers = []
self.__transforms = []
self.__priors = []
def __repr__(self):
substrings = []
for name, hyper in zip(self.__names, self.__hypers):
value = np.array2string(hyper, separator=',')
substrings.append('{:s}={:s}'.format(name, value))
return self.__class__.__name__ + '(' + ', '.join(substrings) + ')'
def _link_hyper(self, name, hyper, transform=None, prior=None):
# FIXME: check C-order, etc?
# FIXME: add properties of some sort?
if name in self.__names:
raise ValueError('hyperparameter names must be unique')
self.nhyper += hyper.size
self.__names.append(name)
self.__hypers.append(hyper)
self.__transforms.append(transform)
self.__priors.append(prior)
def set_hyper(self, hyper):
hyper = np.array(hyper, dtype=float, copy=False, ndmin=1)
if hyper.shape != (self.nhyper,):
raise ValueError('incorrect number of hyperparameters')
offset = 0
for hyper_ in self.__hypers:
hyper_.flat[:] = hyper[offset:offset+hyper_.size]
offset += hyper_.size
def get_hyper(self):
if self.nhyper == 0:
return np.array([])
else:
return np.hstack(_ for _ in self.__hypers)
|
|
caf25fab4495e116303a83d52601da164b13638f
|
angkot/route/management/commands/export_geojson.py
|
angkot/route/management/commands/export_geojson.py
|
import sys
import os
import json
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = 'Export Route(s) as GeoJSON'
option_list = BaseCommand.option_list + (
make_option('-o', dest='output_directory'),
)
def handle(self, *args, **kwargs):
if len(args) == 0:
raise CommandError('Please specify transportation id')
output = kwargs.get('output_directory')
tid = args[0]
self._export(tid, output)
def _export(self, tid, output=None):
t = self._get_route_or_fail(tid)
self._write(t, output)
def _get_route_or_fail(self, tid):
from angkot.route.models import Transportation
t = Transportation.objects.filter(pk=tid, active=True)
if len(t) == 0:
raise CommandError('Transportation id not found: {}'.format(tid))
return t[0]
def _write(self, t, output=None):
data = t.to_geojson()
data['properties']['legal'] = dict(
license='ODbL 1.0',
copyright='© AngkotWebId Contributors')
geojson = json.dumps(data, indent=4)
out = self._get_output(t, output)
out.write(geojson)
out.close()
def _get_output(self, t, output=None):
if output is None:
return sys.stdout
fname = '{} - {} - {} - {} - {}.json'.format(t.id, t.province, t.city, t.company, t.number)
path = os.path.join(output, fname)
return open(path, 'w')
|
Add script to export route to GeoJSON data
|
Add script to export route to GeoJSON data
|
Python
|
agpl-3.0
|
angkot/angkot,angkot/angkot,angkot/angkot,angkot/angkot
|
Add script to export route to GeoJSON data
|
import sys
import os
import json
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = 'Export Route(s) as GeoJSON'
option_list = BaseCommand.option_list + (
make_option('-o', dest='output_directory'),
)
def handle(self, *args, **kwargs):
if len(args) == 0:
raise CommandError('Please specify transportation id')
output = kwargs.get('output_directory')
tid = args[0]
self._export(tid, output)
def _export(self, tid, output=None):
t = self._get_route_or_fail(tid)
self._write(t, output)
def _get_route_or_fail(self, tid):
from angkot.route.models import Transportation
t = Transportation.objects.filter(pk=tid, active=True)
if len(t) == 0:
raise CommandError('Transportation id not found: {}'.format(tid))
return t[0]
def _write(self, t, output=None):
data = t.to_geojson()
data['properties']['legal'] = dict(
license='ODbL 1.0',
copyright='© AngkotWebId Contributors')
geojson = json.dumps(data, indent=4)
out = self._get_output(t, output)
out.write(geojson)
out.close()
def _get_output(self, t, output=None):
if output is None:
return sys.stdout
fname = '{} - {} - {} - {} - {}.json'.format(t.id, t.province, t.city, t.company, t.number)
path = os.path.join(output, fname)
return open(path, 'w')
|
<commit_before><commit_msg>Add script to export route to GeoJSON data<commit_after>
|
import sys
import os
import json
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = 'Export Route(s) as GeoJSON'
option_list = BaseCommand.option_list + (
make_option('-o', dest='output_directory'),
)
def handle(self, *args, **kwargs):
if len(args) == 0:
raise CommandError('Please specify transportation id')
output = kwargs.get('output_directory')
tid = args[0]
self._export(tid, output)
def _export(self, tid, output=None):
t = self._get_route_or_fail(tid)
self._write(t, output)
def _get_route_or_fail(self, tid):
from angkot.route.models import Transportation
t = Transportation.objects.filter(pk=tid, active=True)
if len(t) == 0:
raise CommandError('Transportation id not found: {}'.format(tid))
return t[0]
def _write(self, t, output=None):
data = t.to_geojson()
data['properties']['legal'] = dict(
license='ODbL 1.0',
copyright='© AngkotWebId Contributors')
geojson = json.dumps(data, indent=4)
out = self._get_output(t, output)
out.write(geojson)
out.close()
def _get_output(self, t, output=None):
if output is None:
return sys.stdout
fname = '{} - {} - {} - {} - {}.json'.format(t.id, t.province, t.city, t.company, t.number)
path = os.path.join(output, fname)
return open(path, 'w')
|
Add script to export route to GeoJSON dataimport sys
import os
import json
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = 'Export Route(s) as GeoJSON'
option_list = BaseCommand.option_list + (
make_option('-o', dest='output_directory'),
)
def handle(self, *args, **kwargs):
if len(args) == 0:
raise CommandError('Please specify transportation id')
output = kwargs.get('output_directory')
tid = args[0]
self._export(tid, output)
def _export(self, tid, output=None):
t = self._get_route_or_fail(tid)
self._write(t, output)
def _get_route_or_fail(self, tid):
from angkot.route.models import Transportation
t = Transportation.objects.filter(pk=tid, active=True)
if len(t) == 0:
raise CommandError('Transportation id not found: {}'.format(tid))
return t[0]
def _write(self, t, output=None):
data = t.to_geojson()
data['properties']['legal'] = dict(
license='ODbL 1.0',
copyright='© AngkotWebId Contributors')
geojson = json.dumps(data, indent=4)
out = self._get_output(t, output)
out.write(geojson)
out.close()
def _get_output(self, t, output=None):
if output is None:
return sys.stdout
fname = '{} - {} - {} - {} - {}.json'.format(t.id, t.province, t.city, t.company, t.number)
path = os.path.join(output, fname)
return open(path, 'w')
|
<commit_before><commit_msg>Add script to export route to GeoJSON data<commit_after>import sys
import os
import json
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = 'Export Route(s) as GeoJSON'
option_list = BaseCommand.option_list + (
make_option('-o', dest='output_directory'),
)
def handle(self, *args, **kwargs):
if len(args) == 0:
raise CommandError('Please specify transportation id')
output = kwargs.get('output_directory')
tid = args[0]
self._export(tid, output)
def _export(self, tid, output=None):
t = self._get_route_or_fail(tid)
self._write(t, output)
def _get_route_or_fail(self, tid):
from angkot.route.models import Transportation
t = Transportation.objects.filter(pk=tid, active=True)
if len(t) == 0:
raise CommandError('Transportation id not found: {}'.format(tid))
return t[0]
def _write(self, t, output=None):
data = t.to_geojson()
data['properties']['legal'] = dict(
license='ODbL 1.0',
copyright='© AngkotWebId Contributors')
geojson = json.dumps(data, indent=4)
out = self._get_output(t, output)
out.write(geojson)
out.close()
def _get_output(self, t, output=None):
if output is None:
return sys.stdout
fname = '{} - {} - {} - {} - {}.json'.format(t.id, t.province, t.city, t.company, t.number)
path = os.path.join(output, fname)
return open(path, 'w')
|
|
fbf6e40eee370ff5bc61e79334c76e76cd2032e4
|
fix_int16.py
|
fix_int16.py
|
from os import listdir, path
import numpy as np
from patch_headers import fix_int16_images, compare_data_in_fits
from triage_fits_files import ImageFileCollection
fix_vol = '/Volumes/FULL BACKUP/processed'
#fix_vol = 'foo'
raw_vol = '/Volumes/feder_data_originals/ast390/raw'
dirs_to_fix = listdir(fix_vol)
for current_dir in dirs_to_fix:
current_proc = path.join(fix_vol, current_dir)
current_raw = path.join(raw_vol, current_dir)
print '----------------- processing directory %s -----------------------' % current_proc
try:
test = open(path.join(current_proc,'FIXED_YAY'),'rb')
print ' >>>>>>>>>>>>>>>>>>skipping this directory, already done.'
continue
except IOError:
pass
fix_int16_images(current_proc)
files_to_check = ImageFileCollection(current_proc, keywords=['imagetyp'])
files_to_check = files_to_check.summary_info['file']
files_match = np.zeros(len(files_to_check), dtype=np.bool)
for nfil, fil in enumerate(files_to_check):
fixed_name = path.join(current_proc, fil)
raw_name = path.join(current_raw, fil)
files_match[nfil] = compare_data_in_fits(fixed_name, raw_name)
if not files_match[nfil]:
print '****************-------> FAIL on file %s' % fixed_name
if not files_match.all():
print '================>>>>>> One or more failures in directory %s' % current_proc
crap = open(path.join(current_proc,'FAILED'),'wb')
else:
print '++++++++++++++++++ success in directory %s ++++++++++++++++++++++++' % current_proc
crap = open(path.join(current_proc,'FIXED_YAY'),'wb')
crap.close()
|
Add script to fix int16 files.
|
Add script to fix int16 files.
|
Python
|
bsd-3-clause
|
mwcraig/msumastro
|
Add script to fix int16 files.
|
from os import listdir, path
import numpy as np
from patch_headers import fix_int16_images, compare_data_in_fits
from triage_fits_files import ImageFileCollection
fix_vol = '/Volumes/FULL BACKUP/processed'
#fix_vol = 'foo'
raw_vol = '/Volumes/feder_data_originals/ast390/raw'
dirs_to_fix = listdir(fix_vol)
for current_dir in dirs_to_fix:
current_proc = path.join(fix_vol, current_dir)
current_raw = path.join(raw_vol, current_dir)
print '----------------- processing directory %s -----------------------' % current_proc
try:
test = open(path.join(current_proc,'FIXED_YAY'),'rb')
print ' >>>>>>>>>>>>>>>>>>skipping this directory, already done.'
continue
except IOError:
pass
fix_int16_images(current_proc)
files_to_check = ImageFileCollection(current_proc, keywords=['imagetyp'])
files_to_check = files_to_check.summary_info['file']
files_match = np.zeros(len(files_to_check), dtype=np.bool)
for nfil, fil in enumerate(files_to_check):
fixed_name = path.join(current_proc, fil)
raw_name = path.join(current_raw, fil)
files_match[nfil] = compare_data_in_fits(fixed_name, raw_name)
if not files_match[nfil]:
print '****************-------> FAIL on file %s' % fixed_name
if not files_match.all():
print '================>>>>>> One or more failures in directory %s' % current_proc
crap = open(path.join(current_proc,'FAILED'),'wb')
else:
print '++++++++++++++++++ success in directory %s ++++++++++++++++++++++++' % current_proc
crap = open(path.join(current_proc,'FIXED_YAY'),'wb')
crap.close()
|
<commit_before><commit_msg>Add script to fix int16 files.<commit_after>
|
from os import listdir, path
import numpy as np
from patch_headers import fix_int16_images, compare_data_in_fits
from triage_fits_files import ImageFileCollection
fix_vol = '/Volumes/FULL BACKUP/processed'
#fix_vol = 'foo'
raw_vol = '/Volumes/feder_data_originals/ast390/raw'
dirs_to_fix = listdir(fix_vol)
for current_dir in dirs_to_fix:
current_proc = path.join(fix_vol, current_dir)
current_raw = path.join(raw_vol, current_dir)
print '----------------- processing directory %s -----------------------' % current_proc
try:
test = open(path.join(current_proc,'FIXED_YAY'),'rb')
print ' >>>>>>>>>>>>>>>>>>skipping this directory, already done.'
continue
except IOError:
pass
fix_int16_images(current_proc)
files_to_check = ImageFileCollection(current_proc, keywords=['imagetyp'])
files_to_check = files_to_check.summary_info['file']
files_match = np.zeros(len(files_to_check), dtype=np.bool)
for nfil, fil in enumerate(files_to_check):
fixed_name = path.join(current_proc, fil)
raw_name = path.join(current_raw, fil)
files_match[nfil] = compare_data_in_fits(fixed_name, raw_name)
if not files_match[nfil]:
print '****************-------> FAIL on file %s' % fixed_name
if not files_match.all():
print '================>>>>>> One or more failures in directory %s' % current_proc
crap = open(path.join(current_proc,'FAILED'),'wb')
else:
print '++++++++++++++++++ success in directory %s ++++++++++++++++++++++++' % current_proc
crap = open(path.join(current_proc,'FIXED_YAY'),'wb')
crap.close()
|
Add script to fix int16 files.from os import listdir, path
import numpy as np
from patch_headers import fix_int16_images, compare_data_in_fits
from triage_fits_files import ImageFileCollection
fix_vol = '/Volumes/FULL BACKUP/processed'
#fix_vol = 'foo'
raw_vol = '/Volumes/feder_data_originals/ast390/raw'
dirs_to_fix = listdir(fix_vol)
for current_dir in dirs_to_fix:
current_proc = path.join(fix_vol, current_dir)
current_raw = path.join(raw_vol, current_dir)
print '----------------- processing directory %s -----------------------' % current_proc
try:
test = open(path.join(current_proc,'FIXED_YAY'),'rb')
print ' >>>>>>>>>>>>>>>>>>skipping this directory, already done.'
continue
except IOError:
pass
fix_int16_images(current_proc)
files_to_check = ImageFileCollection(current_proc, keywords=['imagetyp'])
files_to_check = files_to_check.summary_info['file']
files_match = np.zeros(len(files_to_check), dtype=np.bool)
for nfil, fil in enumerate(files_to_check):
fixed_name = path.join(current_proc, fil)
raw_name = path.join(current_raw, fil)
files_match[nfil] = compare_data_in_fits(fixed_name, raw_name)
if not files_match[nfil]:
print '****************-------> FAIL on file %s' % fixed_name
if not files_match.all():
print '================>>>>>> One or more failures in directory %s' % current_proc
crap = open(path.join(current_proc,'FAILED'),'wb')
else:
print '++++++++++++++++++ success in directory %s ++++++++++++++++++++++++' % current_proc
crap = open(path.join(current_proc,'FIXED_YAY'),'wb')
crap.close()
|
<commit_before><commit_msg>Add script to fix int16 files.<commit_after>from os import listdir, path
import numpy as np
from patch_headers import fix_int16_images, compare_data_in_fits
from triage_fits_files import ImageFileCollection
fix_vol = '/Volumes/FULL BACKUP/processed'
#fix_vol = 'foo'
raw_vol = '/Volumes/feder_data_originals/ast390/raw'
dirs_to_fix = listdir(fix_vol)
for current_dir in dirs_to_fix:
current_proc = path.join(fix_vol, current_dir)
current_raw = path.join(raw_vol, current_dir)
print '----------------- processing directory %s -----------------------' % current_proc
try:
test = open(path.join(current_proc,'FIXED_YAY'),'rb')
print ' >>>>>>>>>>>>>>>>>>skipping this directory, already done.'
continue
except IOError:
pass
fix_int16_images(current_proc)
files_to_check = ImageFileCollection(current_proc, keywords=['imagetyp'])
files_to_check = files_to_check.summary_info['file']
files_match = np.zeros(len(files_to_check), dtype=np.bool)
for nfil, fil in enumerate(files_to_check):
fixed_name = path.join(current_proc, fil)
raw_name = path.join(current_raw, fil)
files_match[nfil] = compare_data_in_fits(fixed_name, raw_name)
if not files_match[nfil]:
print '****************-------> FAIL on file %s' % fixed_name
if not files_match.all():
print '================>>>>>> One or more failures in directory %s' % current_proc
crap = open(path.join(current_proc,'FAILED'),'wb')
else:
print '++++++++++++++++++ success in directory %s ++++++++++++++++++++++++' % current_proc
crap = open(path.join(current_proc,'FIXED_YAY'),'wb')
crap.close()
|
|
bb103cd384702b68e36b4588e679f86568565551
|
snapshot/upload-dump-to-s3.py
|
snapshot/upload-dump-to-s3.py
|
#!/usr/bin/env python
import os
import sys
import json
import requests
from requests.exceptions import HTTPError
def upload_dump_to_s3():
s3_post_data_url = json.loads(os.environ['S3_POST_URL_DATA'])
dump_file = os.environ['DUMP_FILE']
url = s3_post_data_url['url']
fields = s3_post_data_url['fields']
files = { "file": open(dump_file, 'r') }
response = requests.post(url, data=fields, files=files)
try:
response.raise_for_status()
except HTTPError as e:
print("Error uploading {} to {}: {}".format(dump_file, url, e.args[0]))
sys.exit(1)
else:
print('Successfully uploaded {} to {}'.format(dump_file, url))
if __name__ == "__main__":
upload_dump_to_s3()
|
Add script to upload dump to s3
|
Add script to upload dump to s3
This is a simple python script which uses the signed url previously
generated to upload the encrypted dump to s3 using the requets library.
If the response from s3 is not successful, an error is raised and we
print the error message. Also sets the exit code to be not zero.
|
Python
|
mit
|
alphagov/digitalmarketplace-aws,alphagov/digitalmarketplace-aws,alphagov/digitalmarketplace-aws
|
Add script to upload dump to s3
This is a simple python script which uses the signed url previously
generated to upload the encrypted dump to s3 using the requets library.
If the response from s3 is not successful, an error is raised and we
print the error message. Also sets the exit code to be not zero.
|
#!/usr/bin/env python
import os
import sys
import json
import requests
from requests.exceptions import HTTPError
def upload_dump_to_s3():
s3_post_data_url = json.loads(os.environ['S3_POST_URL_DATA'])
dump_file = os.environ['DUMP_FILE']
url = s3_post_data_url['url']
fields = s3_post_data_url['fields']
files = { "file": open(dump_file, 'r') }
response = requests.post(url, data=fields, files=files)
try:
response.raise_for_status()
except HTTPError as e:
print("Error uploading {} to {}: {}".format(dump_file, url, e.args[0]))
sys.exit(1)
else:
print('Successfully uploaded {} to {}'.format(dump_file, url))
if __name__ == "__main__":
upload_dump_to_s3()
|
<commit_before><commit_msg>Add script to upload dump to s3
This is a simple python script which uses the signed url previously
generated to upload the encrypted dump to s3 using the requets library.
If the response from s3 is not successful, an error is raised and we
print the error message. Also sets the exit code to be not zero.<commit_after>
|
#!/usr/bin/env python
import os
import sys
import json
import requests
from requests.exceptions import HTTPError
def upload_dump_to_s3():
s3_post_data_url = json.loads(os.environ['S3_POST_URL_DATA'])
dump_file = os.environ['DUMP_FILE']
url = s3_post_data_url['url']
fields = s3_post_data_url['fields']
files = { "file": open(dump_file, 'r') }
response = requests.post(url, data=fields, files=files)
try:
response.raise_for_status()
except HTTPError as e:
print("Error uploading {} to {}: {}".format(dump_file, url, e.args[0]))
sys.exit(1)
else:
print('Successfully uploaded {} to {}'.format(dump_file, url))
if __name__ == "__main__":
upload_dump_to_s3()
|
Add script to upload dump to s3
This is a simple python script which uses the signed url previously
generated to upload the encrypted dump to s3 using the requets library.
If the response from s3 is not successful, an error is raised and we
print the error message. Also sets the exit code to be not zero.#!/usr/bin/env python
import os
import sys
import json
import requests
from requests.exceptions import HTTPError
def upload_dump_to_s3():
s3_post_data_url = json.loads(os.environ['S3_POST_URL_DATA'])
dump_file = os.environ['DUMP_FILE']
url = s3_post_data_url['url']
fields = s3_post_data_url['fields']
files = { "file": open(dump_file, 'r') }
response = requests.post(url, data=fields, files=files)
try:
response.raise_for_status()
except HTTPError as e:
print("Error uploading {} to {}: {}".format(dump_file, url, e.args[0]))
sys.exit(1)
else:
print('Successfully uploaded {} to {}'.format(dump_file, url))
if __name__ == "__main__":
upload_dump_to_s3()
|
<commit_before><commit_msg>Add script to upload dump to s3
This is a simple python script which uses the signed url previously
generated to upload the encrypted dump to s3 using the requets library.
If the response from s3 is not successful, an error is raised and we
print the error message. Also sets the exit code to be not zero.<commit_after>#!/usr/bin/env python
import os
import sys
import json
import requests
from requests.exceptions import HTTPError
def upload_dump_to_s3():
s3_post_data_url = json.loads(os.environ['S3_POST_URL_DATA'])
dump_file = os.environ['DUMP_FILE']
url = s3_post_data_url['url']
fields = s3_post_data_url['fields']
files = { "file": open(dump_file, 'r') }
response = requests.post(url, data=fields, files=files)
try:
response.raise_for_status()
except HTTPError as e:
print("Error uploading {} to {}: {}".format(dump_file, url, e.args[0]))
sys.exit(1)
else:
print('Successfully uploaded {} to {}'.format(dump_file, url))
if __name__ == "__main__":
upload_dump_to_s3()
|
|
8c05529f93dfe4fb3fffbf1d5f46b1d38adc6fce
|
social_core/tests/backends/test_chatwork.py
|
social_core/tests/backends/test_chatwork.py
|
import json
from .oauth import OAuth2Test
class ChatworkOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.chatwork.ChatworkOAuth2'
user_data_url = 'https://api.chatwork.com/v2/me'
expected_username = 'hogehoge'
access_token_body = json.dumps({
"access_token": "pyopyopyopyopyopyopyopyopyopyo",
"token_type": "Bearer",
"expires_in": "1501138041000",
"refresh_token": "pyopyopyopyopyopyo",
"scope": "rooms.all:read_write"
})
user_data_body = json.dumps({
"account_id": 123,
"room_id": 322,
"name": "Foo Bar",
"chatwork_id": "hogehoge",
"organization_id": 101,
"organization_name": "Foo foobar",
"department": "Support",
"title": "CMO",
"url": "http://www.example.com",
"introduction": "",
"mail": "hogehoge@example.com",
"tel_organization": "",
"tel_extension": "",
"tel_mobile": "",
"skype": "",
"facebook": "",
"twitter": "",
"avatar_image_url": "https://www.example.com/hogehoge.jpg",
"login_mail": "hogehoge@example.com"
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
|
Add test for ChatworkOAuth2 backend
|
Add test for ChatworkOAuth2 backend
|
Python
|
bsd-3-clause
|
python-social-auth/social-core,python-social-auth/social-core
|
Add test for ChatworkOAuth2 backend
|
import json
from .oauth import OAuth2Test
class ChatworkOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.chatwork.ChatworkOAuth2'
user_data_url = 'https://api.chatwork.com/v2/me'
expected_username = 'hogehoge'
access_token_body = json.dumps({
"access_token": "pyopyopyopyopyopyopyopyopyopyo",
"token_type": "Bearer",
"expires_in": "1501138041000",
"refresh_token": "pyopyopyopyopyopyo",
"scope": "rooms.all:read_write"
})
user_data_body = json.dumps({
"account_id": 123,
"room_id": 322,
"name": "Foo Bar",
"chatwork_id": "hogehoge",
"organization_id": 101,
"organization_name": "Foo foobar",
"department": "Support",
"title": "CMO",
"url": "http://www.example.com",
"introduction": "",
"mail": "hogehoge@example.com",
"tel_organization": "",
"tel_extension": "",
"tel_mobile": "",
"skype": "",
"facebook": "",
"twitter": "",
"avatar_image_url": "https://www.example.com/hogehoge.jpg",
"login_mail": "hogehoge@example.com"
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
|
<commit_before><commit_msg>Add test for ChatworkOAuth2 backend<commit_after>
|
import json
from .oauth import OAuth2Test
class ChatworkOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.chatwork.ChatworkOAuth2'
user_data_url = 'https://api.chatwork.com/v2/me'
expected_username = 'hogehoge'
access_token_body = json.dumps({
"access_token": "pyopyopyopyopyopyopyopyopyopyo",
"token_type": "Bearer",
"expires_in": "1501138041000",
"refresh_token": "pyopyopyopyopyopyo",
"scope": "rooms.all:read_write"
})
user_data_body = json.dumps({
"account_id": 123,
"room_id": 322,
"name": "Foo Bar",
"chatwork_id": "hogehoge",
"organization_id": 101,
"organization_name": "Foo foobar",
"department": "Support",
"title": "CMO",
"url": "http://www.example.com",
"introduction": "",
"mail": "hogehoge@example.com",
"tel_organization": "",
"tel_extension": "",
"tel_mobile": "",
"skype": "",
"facebook": "",
"twitter": "",
"avatar_image_url": "https://www.example.com/hogehoge.jpg",
"login_mail": "hogehoge@example.com"
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
|
Add test for ChatworkOAuth2 backendimport json
from .oauth import OAuth2Test
class ChatworkOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.chatwork.ChatworkOAuth2'
user_data_url = 'https://api.chatwork.com/v2/me'
expected_username = 'hogehoge'
access_token_body = json.dumps({
"access_token": "pyopyopyopyopyopyopyopyopyopyo",
"token_type": "Bearer",
"expires_in": "1501138041000",
"refresh_token": "pyopyopyopyopyopyo",
"scope": "rooms.all:read_write"
})
user_data_body = json.dumps({
"account_id": 123,
"room_id": 322,
"name": "Foo Bar",
"chatwork_id": "hogehoge",
"organization_id": 101,
"organization_name": "Foo foobar",
"department": "Support",
"title": "CMO",
"url": "http://www.example.com",
"introduction": "",
"mail": "hogehoge@example.com",
"tel_organization": "",
"tel_extension": "",
"tel_mobile": "",
"skype": "",
"facebook": "",
"twitter": "",
"avatar_image_url": "https://www.example.com/hogehoge.jpg",
"login_mail": "hogehoge@example.com"
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
|
<commit_before><commit_msg>Add test for ChatworkOAuth2 backend<commit_after>import json
from .oauth import OAuth2Test
class ChatworkOAuth2Test(OAuth2Test):
backend_path = 'social_core.backends.chatwork.ChatworkOAuth2'
user_data_url = 'https://api.chatwork.com/v2/me'
expected_username = 'hogehoge'
access_token_body = json.dumps({
"access_token": "pyopyopyopyopyopyopyopyopyopyo",
"token_type": "Bearer",
"expires_in": "1501138041000",
"refresh_token": "pyopyopyopyopyopyo",
"scope": "rooms.all:read_write"
})
user_data_body = json.dumps({
"account_id": 123,
"room_id": 322,
"name": "Foo Bar",
"chatwork_id": "hogehoge",
"organization_id": 101,
"organization_name": "Foo foobar",
"department": "Support",
"title": "CMO",
"url": "http://www.example.com",
"introduction": "",
"mail": "hogehoge@example.com",
"tel_organization": "",
"tel_extension": "",
"tel_mobile": "",
"skype": "",
"facebook": "",
"twitter": "",
"avatar_image_url": "https://www.example.com/hogehoge.jpg",
"login_mail": "hogehoge@example.com"
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
|
|
0aafd10663f42c02d290949453946afe9f1e2f88
|
py/fraction-addition-and-subtraction.py
|
py/fraction-addition-and-subtraction.py
|
import re
class Solution(object):
def fractionAddition(self, expression):
"""
:type expression: str
:rtype: str
"""
expression = expression.replace('-+', '-')
expression = expression.replace('+-', '-')
matches = re.findall(r'([+-]?)(\d+)/(\d+)', expression)
ans = (0, 1)
for m in matches:
if m[0] == '-':
v = -int(m[1]), int(m[2])
else:
v = int(m[1]), int(m[2])
ans = ans[0] * v[1] + ans[1] * v[0], ans[1] * v[1]
def gcd(a, b):
while b != 0:
a, b = b, a % b
return a
neg = 1
if ans[0] * ans[1] < 0:
neg = -1
g = gcd(abs(ans[0]), abs(ans[1]))
return '{}/{}'.format(neg * abs(ans[0]) / g, abs(ans[1]) / g)
|
Add py solution for 592. Fraction Addition and Subtraction
|
Add py solution for 592. Fraction Addition and Subtraction
592. Fraction Addition and Subtraction: https://leetcode.com/problems/fraction-addition-and-subtraction/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 592. Fraction Addition and Subtraction
592. Fraction Addition and Subtraction: https://leetcode.com/problems/fraction-addition-and-subtraction/
|
import re
class Solution(object):
def fractionAddition(self, expression):
"""
:type expression: str
:rtype: str
"""
expression = expression.replace('-+', '-')
expression = expression.replace('+-', '-')
matches = re.findall(r'([+-]?)(\d+)/(\d+)', expression)
ans = (0, 1)
for m in matches:
if m[0] == '-':
v = -int(m[1]), int(m[2])
else:
v = int(m[1]), int(m[2])
ans = ans[0] * v[1] + ans[1] * v[0], ans[1] * v[1]
def gcd(a, b):
while b != 0:
a, b = b, a % b
return a
neg = 1
if ans[0] * ans[1] < 0:
neg = -1
g = gcd(abs(ans[0]), abs(ans[1]))
return '{}/{}'.format(neg * abs(ans[0]) / g, abs(ans[1]) / g)
|
<commit_before><commit_msg>Add py solution for 592. Fraction Addition and Subtraction
592. Fraction Addition and Subtraction: https://leetcode.com/problems/fraction-addition-and-subtraction/<commit_after>
|
import re
class Solution(object):
def fractionAddition(self, expression):
"""
:type expression: str
:rtype: str
"""
expression = expression.replace('-+', '-')
expression = expression.replace('+-', '-')
matches = re.findall(r'([+-]?)(\d+)/(\d+)', expression)
ans = (0, 1)
for m in matches:
if m[0] == '-':
v = -int(m[1]), int(m[2])
else:
v = int(m[1]), int(m[2])
ans = ans[0] * v[1] + ans[1] * v[0], ans[1] * v[1]
def gcd(a, b):
while b != 0:
a, b = b, a % b
return a
neg = 1
if ans[0] * ans[1] < 0:
neg = -1
g = gcd(abs(ans[0]), abs(ans[1]))
return '{}/{}'.format(neg * abs(ans[0]) / g, abs(ans[1]) / g)
|
Add py solution for 592. Fraction Addition and Subtraction
592. Fraction Addition and Subtraction: https://leetcode.com/problems/fraction-addition-and-subtraction/import re
class Solution(object):
def fractionAddition(self, expression):
"""
:type expression: str
:rtype: str
"""
expression = expression.replace('-+', '-')
expression = expression.replace('+-', '-')
matches = re.findall(r'([+-]?)(\d+)/(\d+)', expression)
ans = (0, 1)
for m in matches:
if m[0] == '-':
v = -int(m[1]), int(m[2])
else:
v = int(m[1]), int(m[2])
ans = ans[0] * v[1] + ans[1] * v[0], ans[1] * v[1]
def gcd(a, b):
while b != 0:
a, b = b, a % b
return a
neg = 1
if ans[0] * ans[1] < 0:
neg = -1
g = gcd(abs(ans[0]), abs(ans[1]))
return '{}/{}'.format(neg * abs(ans[0]) / g, abs(ans[1]) / g)
|
<commit_before><commit_msg>Add py solution for 592. Fraction Addition and Subtraction
592. Fraction Addition and Subtraction: https://leetcode.com/problems/fraction-addition-and-subtraction/<commit_after>import re
class Solution(object):
def fractionAddition(self, expression):
"""
:type expression: str
:rtype: str
"""
expression = expression.replace('-+', '-')
expression = expression.replace('+-', '-')
matches = re.findall(r'([+-]?)(\d+)/(\d+)', expression)
ans = (0, 1)
for m in matches:
if m[0] == '-':
v = -int(m[1]), int(m[2])
else:
v = int(m[1]), int(m[2])
ans = ans[0] * v[1] + ans[1] * v[0], ans[1] * v[1]
def gcd(a, b):
while b != 0:
a, b = b, a % b
return a
neg = 1
if ans[0] * ans[1] < 0:
neg = -1
g = gcd(abs(ans[0]), abs(ans[1]))
return '{}/{}'.format(neg * abs(ans[0]) / g, abs(ans[1]) / g)
|
|
5d2246cac222cd035924d2e68acaddca3a726fb7
|
list_comprehensions.py
|
list_comprehensions.py
|
"""Show how to use list comprehensions and zip"""
sunday_temps = [76, 78, 86, 54, 88, 77, 66, 55, 44, 57, 58, 58, 78, 79, 69, 65]
monday_temps = [68, 67, 68, 76, 77, 66, 61, 81, 73, 61, 83, 67, 89, 78, 67, 85]
tuesday_temps = [78, 79, 70, 76, 75, 74, 73, 72, 63, 64, 65, 58, 59, 85, 59, 85]
def show_temp_tuples():
for item in zip(sunday_temps, monday_temps):
print(item)
for sunday, monday in zip(sunday_temps, monday_temps):
print("Sunday: {}, Monday: {}, Average:{}".format(sunday, monday, (sunday + monday) / 2))
for temps in zip(sunday_temps, monday_temps, tuesday_temps):
print("min={:4.1f}, max={:4.1f}, average={:4.1f}"
.format(min(temps), max(temps), sum(temps) / len(temps)))
if __name__ == '__main__':
show_temp_tuples()
|
Add list comprehensions with zips
|
Add list comprehensions with zips
|
Python
|
mit
|
kentoj/python-fundamentals
|
Add list comprehensions with zips
|
"""Show how to use list comprehensions and zip"""
sunday_temps = [76, 78, 86, 54, 88, 77, 66, 55, 44, 57, 58, 58, 78, 79, 69, 65]
monday_temps = [68, 67, 68, 76, 77, 66, 61, 81, 73, 61, 83, 67, 89, 78, 67, 85]
tuesday_temps = [78, 79, 70, 76, 75, 74, 73, 72, 63, 64, 65, 58, 59, 85, 59, 85]
def show_temp_tuples():
for item in zip(sunday_temps, monday_temps):
print(item)
for sunday, monday in zip(sunday_temps, monday_temps):
print("Sunday: {}, Monday: {}, Average:{}".format(sunday, monday, (sunday + monday) / 2))
for temps in zip(sunday_temps, monday_temps, tuesday_temps):
print("min={:4.1f}, max={:4.1f}, average={:4.1f}"
.format(min(temps), max(temps), sum(temps) / len(temps)))
if __name__ == '__main__':
show_temp_tuples()
|
<commit_before><commit_msg>Add list comprehensions with zips<commit_after>
|
"""Show how to use list comprehensions and zip"""
sunday_temps = [76, 78, 86, 54, 88, 77, 66, 55, 44, 57, 58, 58, 78, 79, 69, 65]
monday_temps = [68, 67, 68, 76, 77, 66, 61, 81, 73, 61, 83, 67, 89, 78, 67, 85]
tuesday_temps = [78, 79, 70, 76, 75, 74, 73, 72, 63, 64, 65, 58, 59, 85, 59, 85]
def show_temp_tuples():
for item in zip(sunday_temps, monday_temps):
print(item)
for sunday, monday in zip(sunday_temps, monday_temps):
print("Sunday: {}, Monday: {}, Average:{}".format(sunday, monday, (sunday + monday) / 2))
for temps in zip(sunday_temps, monday_temps, tuesday_temps):
print("min={:4.1f}, max={:4.1f}, average={:4.1f}"
.format(min(temps), max(temps), sum(temps) / len(temps)))
if __name__ == '__main__':
show_temp_tuples()
|
Add list comprehensions with zips"""Show how to use list comprehensions and zip"""
sunday_temps = [76, 78, 86, 54, 88, 77, 66, 55, 44, 57, 58, 58, 78, 79, 69, 65]
monday_temps = [68, 67, 68, 76, 77, 66, 61, 81, 73, 61, 83, 67, 89, 78, 67, 85]
tuesday_temps = [78, 79, 70, 76, 75, 74, 73, 72, 63, 64, 65, 58, 59, 85, 59, 85]
def show_temp_tuples():
for item in zip(sunday_temps, monday_temps):
print(item)
for sunday, monday in zip(sunday_temps, monday_temps):
print("Sunday: {}, Monday: {}, Average:{}".format(sunday, monday, (sunday + monday) / 2))
for temps in zip(sunday_temps, monday_temps, tuesday_temps):
print("min={:4.1f}, max={:4.1f}, average={:4.1f}"
.format(min(temps), max(temps), sum(temps) / len(temps)))
if __name__ == '__main__':
show_temp_tuples()
|
<commit_before><commit_msg>Add list comprehensions with zips<commit_after>"""Show how to use list comprehensions and zip"""
sunday_temps = [76, 78, 86, 54, 88, 77, 66, 55, 44, 57, 58, 58, 78, 79, 69, 65]
monday_temps = [68, 67, 68, 76, 77, 66, 61, 81, 73, 61, 83, 67, 89, 78, 67, 85]
tuesday_temps = [78, 79, 70, 76, 75, 74, 73, 72, 63, 64, 65, 58, 59, 85, 59, 85]
def show_temp_tuples():
for item in zip(sunday_temps, monday_temps):
print(item)
for sunday, monday in zip(sunday_temps, monday_temps):
print("Sunday: {}, Monday: {}, Average:{}".format(sunday, monday, (sunday + monday) / 2))
for temps in zip(sunday_temps, monday_temps, tuesday_temps):
print("min={:4.1f}, max={:4.1f}, average={:4.1f}"
.format(min(temps), max(temps), sum(temps) / len(temps)))
if __name__ == '__main__':
show_temp_tuples()
|
|
c81d1534084673680ae313296bfae64c13899bcc
|
crop_logo.py
|
crop_logo.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import os
TRAIN_DIR = 'flickr_logos_27_dataset'
TRAIN_IMAGE_DIR = os.path.join(TRAIN_DIR, 'flickr_logos_27_dataset_images')
def main():
annot_train = np.loadtxt(
os.path.join(TRAIN_DIR,
'flickr_logos_27_dataset_training_set_annotation.txt'),
dtype=np.str)
print('train_annotation: %d, %d ' % (annot_train.shape))
if __name__ == '__main__':
main()
|
Add a script for cropping logo
|
Add a script for cropping logo
|
Python
|
mit
|
satojkovic/DeepLogo
|
Add a script for cropping logo
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import os
TRAIN_DIR = 'flickr_logos_27_dataset'
TRAIN_IMAGE_DIR = os.path.join(TRAIN_DIR, 'flickr_logos_27_dataset_images')
def main():
annot_train = np.loadtxt(
os.path.join(TRAIN_DIR,
'flickr_logos_27_dataset_training_set_annotation.txt'),
dtype=np.str)
print('train_annotation: %d, %d ' % (annot_train.shape))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script for cropping logo<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import os
TRAIN_DIR = 'flickr_logos_27_dataset'
TRAIN_IMAGE_DIR = os.path.join(TRAIN_DIR, 'flickr_logos_27_dataset_images')
def main():
annot_train = np.loadtxt(
os.path.join(TRAIN_DIR,
'flickr_logos_27_dataset_training_set_annotation.txt'),
dtype=np.str)
print('train_annotation: %d, %d ' % (annot_train.shape))
if __name__ == '__main__':
main()
|
Add a script for cropping logo#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import os
TRAIN_DIR = 'flickr_logos_27_dataset'
TRAIN_IMAGE_DIR = os.path.join(TRAIN_DIR, 'flickr_logos_27_dataset_images')
def main():
annot_train = np.loadtxt(
os.path.join(TRAIN_DIR,
'flickr_logos_27_dataset_training_set_annotation.txt'),
dtype=np.str)
print('train_annotation: %d, %d ' % (annot_train.shape))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script for cropping logo<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import os
TRAIN_DIR = 'flickr_logos_27_dataset'
TRAIN_IMAGE_DIR = os.path.join(TRAIN_DIR, 'flickr_logos_27_dataset_images')
def main():
annot_train = np.loadtxt(
os.path.join(TRAIN_DIR,
'flickr_logos_27_dataset_training_set_annotation.txt'),
dtype=np.str)
print('train_annotation: %d, %d ' % (annot_train.shape))
if __name__ == '__main__':
main()
|
|
07624ba907d66e734c9098149b62d2ea8c76392a
|
tests/test_coord.py
|
tests/test_coord.py
|
import unittest
from coral import coord
class TestCoord(unittest.TestCase):
def test_simplify(self):
poly = [(-10, -10), (-10, 10), (0, 15), (10, 10), (10, -10), (0, -5)]
poly = coord.simplify(poly, 10)
simple = [(-1, -1), (-1, 1), (1, 1), (1, -1)]
self.assertEqual(poly, simple)
if __name__ == "__main__":
unittest.main()
|
Add a test for coord.simplify().
|
Add a test for coord.simplify().
|
Python
|
mit
|
lecram/coral
|
Add a test for coord.simplify().
|
import unittest
from coral import coord
class TestCoord(unittest.TestCase):
def test_simplify(self):
poly = [(-10, -10), (-10, 10), (0, 15), (10, 10), (10, -10), (0, -5)]
poly = coord.simplify(poly, 10)
simple = [(-1, -1), (-1, 1), (1, 1), (1, -1)]
self.assertEqual(poly, simple)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add a test for coord.simplify().<commit_after>
|
import unittest
from coral import coord
class TestCoord(unittest.TestCase):
def test_simplify(self):
poly = [(-10, -10), (-10, 10), (0, 15), (10, 10), (10, -10), (0, -5)]
poly = coord.simplify(poly, 10)
simple = [(-1, -1), (-1, 1), (1, 1), (1, -1)]
self.assertEqual(poly, simple)
if __name__ == "__main__":
unittest.main()
|
Add a test for coord.simplify().import unittest
from coral import coord
class TestCoord(unittest.TestCase):
def test_simplify(self):
poly = [(-10, -10), (-10, 10), (0, 15), (10, 10), (10, -10), (0, -5)]
poly = coord.simplify(poly, 10)
simple = [(-1, -1), (-1, 1), (1, 1), (1, -1)]
self.assertEqual(poly, simple)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add a test for coord.simplify().<commit_after>import unittest
from coral import coord
class TestCoord(unittest.TestCase):
def test_simplify(self):
poly = [(-10, -10), (-10, 10), (0, 15), (10, 10), (10, -10), (0, -5)]
poly = coord.simplify(poly, 10)
simple = [(-1, -1), (-1, 1), (1, 1), (1, -1)]
self.assertEqual(poly, simple)
if __name__ == "__main__":
unittest.main()
|
|
412dc5aad04bfa3d3ece140e711b45fafe9c1b64
|
codefights/arcade_py/68.CalkinWilf.py
|
codefights/arcade_py/68.CalkinWilf.py
|
# The Calkin-Wilf tree is a tree in which the vertices correspond 1-for-1 to the positive rational numbers. The tree is rooted at the number 1, and any rational number expressed in simplest terms as the fraction a / b has as its two children the numbers a / (a + b) and (a + b) / b. Every positive rational number appears exactly once in the tree. Here's what it looks like:
# The Calkin-Wilf sequence is the sequence of rational numbers generated by a breadth-first traversal of the Calkin-Wilf tree, where the vertices of the same level are traversed from left to right (as displayed in the image above). The sequence thus also contains each rational number exactly once, and can be represented as follows:
# Given a rational number, your task is to return its 0-based index in the Calkin-Wilf sequence.
# Example
# For number = [1, 3], the output should be
# calkinWilfSequence(number) = 3.
# As you can see in the image above, 1 / 3 is the 3rd 0-based number in the sequence.
# Input/Output
# [time limit] 4000ms (py)
# [input] array.integer number
def calkinWilfSequence(number):
def fractions():
n = d = 1
while True:
yield [n, d]
n, d = d, 2 * (n // d) * d + d - n
gen = fractions()
res = 0
while next(gen) != number:
res += 1
return res
|
Add 68 Calkin Wilf Sequence from CodeFights
|
Add 68 Calkin Wilf Sequence from CodeFights
|
Python
|
mit
|
aenon/OnlineJudge,aenon/OnlineJudge
|
Add 68 Calkin Wilf Sequence from CodeFights
|
# The Calkin-Wilf tree is a tree in which the vertices correspond 1-for-1 to the positive rational numbers. The tree is rooted at the number 1, and any rational number expressed in simplest terms as the fraction a / b has as its two children the numbers a / (a + b) and (a + b) / b. Every positive rational number appears exactly once in the tree. Here's what it looks like:
# The Calkin-Wilf sequence is the sequence of rational numbers generated by a breadth-first traversal of the Calkin-Wilf tree, where the vertices of the same level are traversed from left to right (as displayed in the image above). The sequence thus also contains each rational number exactly once, and can be represented as follows:
# Given a rational number, your task is to return its 0-based index in the Calkin-Wilf sequence.
# Example
# For number = [1, 3], the output should be
# calkinWilfSequence(number) = 3.
# As you can see in the image above, 1 / 3 is the 3rd 0-based number in the sequence.
# Input/Output
# [time limit] 4000ms (py)
# [input] array.integer number
def calkinWilfSequence(number):
def fractions():
n = d = 1
while True:
yield [n, d]
n, d = d, 2 * (n // d) * d + d - n
gen = fractions()
res = 0
while next(gen) != number:
res += 1
return res
|
<commit_before><commit_msg>Add 68 Calkin Wilf Sequence from CodeFights<commit_after>
|
# The Calkin-Wilf tree is a tree in which the vertices correspond 1-for-1 to the positive rational numbers. The tree is rooted at the number 1, and any rational number expressed in simplest terms as the fraction a / b has as its two children the numbers a / (a + b) and (a + b) / b. Every positive rational number appears exactly once in the tree. Here's what it looks like:
# The Calkin-Wilf sequence is the sequence of rational numbers generated by a breadth-first traversal of the Calkin-Wilf tree, where the vertices of the same level are traversed from left to right (as displayed in the image above). The sequence thus also contains each rational number exactly once, and can be represented as follows:
# Given a rational number, your task is to return its 0-based index in the Calkin-Wilf sequence.
# Example
# For number = [1, 3], the output should be
# calkinWilfSequence(number) = 3.
# As you can see in the image above, 1 / 3 is the 3rd 0-based number in the sequence.
# Input/Output
# [time limit] 4000ms (py)
# [input] array.integer number
def calkinWilfSequence(number):
def fractions():
n = d = 1
while True:
yield [n, d]
n, d = d, 2 * (n // d) * d + d - n
gen = fractions()
res = 0
while next(gen) != number:
res += 1
return res
|
Add 68 Calkin Wilf Sequence from CodeFights# The Calkin-Wilf tree is a tree in which the vertices correspond 1-for-1 to the positive rational numbers. The tree is rooted at the number 1, and any rational number expressed in simplest terms as the fraction a / b has as its two children the numbers a / (a + b) and (a + b) / b. Every positive rational number appears exactly once in the tree. Here's what it looks like:
# The Calkin-Wilf sequence is the sequence of rational numbers generated by a breadth-first traversal of the Calkin-Wilf tree, where the vertices of the same level are traversed from left to right (as displayed in the image above). The sequence thus also contains each rational number exactly once, and can be represented as follows:
# Given a rational number, your task is to return its 0-based index in the Calkin-Wilf sequence.
# Example
# For number = [1, 3], the output should be
# calkinWilfSequence(number) = 3.
# As you can see in the image above, 1 / 3 is the 3rd 0-based number in the sequence.
# Input/Output
# [time limit] 4000ms (py)
# [input] array.integer number
def calkinWilfSequence(number):
def fractions():
n = d = 1
while True:
yield [n, d]
n, d = d, 2 * (n // d) * d + d - n
gen = fractions()
res = 0
while next(gen) != number:
res += 1
return res
|
<commit_before><commit_msg>Add 68 Calkin Wilf Sequence from CodeFights<commit_after># The Calkin-Wilf tree is a tree in which the vertices correspond 1-for-1 to the positive rational numbers. The tree is rooted at the number 1, and any rational number expressed in simplest terms as the fraction a / b has as its two children the numbers a / (a + b) and (a + b) / b. Every positive rational number appears exactly once in the tree. Here's what it looks like:
# The Calkin-Wilf sequence is the sequence of rational numbers generated by a breadth-first traversal of the Calkin-Wilf tree, where the vertices of the same level are traversed from left to right (as displayed in the image above). The sequence thus also contains each rational number exactly once, and can be represented as follows:
# Given a rational number, your task is to return its 0-based index in the Calkin-Wilf sequence.
# Example
# For number = [1, 3], the output should be
# calkinWilfSequence(number) = 3.
# As you can see in the image above, 1 / 3 is the 3rd 0-based number in the sequence.
# Input/Output
# [time limit] 4000ms (py)
# [input] array.integer number
def calkinWilfSequence(number):
def fractions():
n = d = 1
while True:
yield [n, d]
n, d = d, 2 * (n // d) * d + d - n
gen = fractions()
res = 0
while next(gen) != number:
res += 1
return res
|
|
cba19fbadf8d69b567255a8282974749aff57835
|
tools/game_utils.py
|
tools/game_utils.py
|
import scipy.misc
import scipy.special
def get_num_hole_card_combinations(game):
num_players = game.get_num_players()
num_hole_cards = game.get_num_hole_cards()
num_cards = game.get_num_suits() * game.get_num_ranks()
num_total_hole_cards = num_players * num_hole_cards
return scipy.misc.comb(num_cards, num_total_hole_cards) * scipy.special.perm(num_total_hole_cards, num_total_hole_cards)
|
Add method to calculate hole card combinations in game
|
Add method to calculate hole card combinations in game
|
Python
|
mit
|
JakubPetriska/poker-cfr,JakubPetriska/poker-cfr
|
Add method to calculate hole card combinations in game
|
import scipy.misc
import scipy.special
def get_num_hole_card_combinations(game):
num_players = game.get_num_players()
num_hole_cards = game.get_num_hole_cards()
num_cards = game.get_num_suits() * game.get_num_ranks()
num_total_hole_cards = num_players * num_hole_cards
return scipy.misc.comb(num_cards, num_total_hole_cards) * scipy.special.perm(num_total_hole_cards, num_total_hole_cards)
|
<commit_before><commit_msg>Add method to calculate hole card combinations in game<commit_after>
|
import scipy.misc
import scipy.special
def get_num_hole_card_combinations(game):
num_players = game.get_num_players()
num_hole_cards = game.get_num_hole_cards()
num_cards = game.get_num_suits() * game.get_num_ranks()
num_total_hole_cards = num_players * num_hole_cards
return scipy.misc.comb(num_cards, num_total_hole_cards) * scipy.special.perm(num_total_hole_cards, num_total_hole_cards)
|
Add method to calculate hole card combinations in gameimport scipy.misc
import scipy.special
def get_num_hole_card_combinations(game):
num_players = game.get_num_players()
num_hole_cards = game.get_num_hole_cards()
num_cards = game.get_num_suits() * game.get_num_ranks()
num_total_hole_cards = num_players * num_hole_cards
return scipy.misc.comb(num_cards, num_total_hole_cards) * scipy.special.perm(num_total_hole_cards, num_total_hole_cards)
|
<commit_before><commit_msg>Add method to calculate hole card combinations in game<commit_after>import scipy.misc
import scipy.special
def get_num_hole_card_combinations(game):
num_players = game.get_num_players()
num_hole_cards = game.get_num_hole_cards()
num_cards = game.get_num_suits() * game.get_num_ranks()
num_total_hole_cards = num_players * num_hole_cards
return scipy.misc.comb(num_cards, num_total_hole_cards) * scipy.special.perm(num_total_hole_cards, num_total_hole_cards)
|
|
3f1353d48b688c65f9dec0b7e3b753b5c3cfc9bb
|
export_puid.py
|
export_puid.py
|
#!/usr/bin/env python3
# coding: utf-8
from wxpy import *
'''
使用 cache 来缓存登陆信息,同时使用控制台登陆
'''
bot = Bot('bot.pkl', console_qr=False)
'''
开启 PUID 用于后续的控制
'''
bot.enable_puid('wxpy_puid.pkl')
friends = bot.friends()
groups = bot.groups()
output = open('data', 'w')
output.write("-----Friends-------\n")
for i in friends:
output.write(i.nick_name + " ---> " + i.puid + "\n")
pass
output.write("-----Groups-------\n")
for i in groups:
output.write(i.name + " ---> " + i.puid + "\n")
pass
|
ADD EXPORT_PUID.PY * USER CAN USE IT TO GET PUID
|
ADD EXPORT_PUID.PY
* USER CAN USE IT TO GET PUID
|
Python
|
mit
|
robot527/LCBot,Yinr/LCBot,LCTT/LCBot
|
ADD EXPORT_PUID.PY
* USER CAN USE IT TO GET PUID
|
#!/usr/bin/env python3
# coding: utf-8
from wxpy import *
'''
使用 cache 来缓存登陆信息,同时使用控制台登陆
'''
bot = Bot('bot.pkl', console_qr=False)
'''
开启 PUID 用于后续的控制
'''
bot.enable_puid('wxpy_puid.pkl')
friends = bot.friends()
groups = bot.groups()
output = open('data', 'w')
output.write("-----Friends-------\n")
for i in friends:
output.write(i.nick_name + " ---> " + i.puid + "\n")
pass
output.write("-----Groups-------\n")
for i in groups:
output.write(i.name + " ---> " + i.puid + "\n")
pass
|
<commit_before><commit_msg>ADD EXPORT_PUID.PY
* USER CAN USE IT TO GET PUID<commit_after>
|
#!/usr/bin/env python3
# coding: utf-8
from wxpy import *
'''
使用 cache 来缓存登陆信息,同时使用控制台登陆
'''
bot = Bot('bot.pkl', console_qr=False)
'''
开启 PUID 用于后续的控制
'''
bot.enable_puid('wxpy_puid.pkl')
friends = bot.friends()
groups = bot.groups()
output = open('data', 'w')
output.write("-----Friends-------\n")
for i in friends:
output.write(i.nick_name + " ---> " + i.puid + "\n")
pass
output.write("-----Groups-------\n")
for i in groups:
output.write(i.name + " ---> " + i.puid + "\n")
pass
|
ADD EXPORT_PUID.PY
* USER CAN USE IT TO GET PUID#!/usr/bin/env python3
# coding: utf-8
from wxpy import *
'''
使用 cache 来缓存登陆信息,同时使用控制台登陆
'''
bot = Bot('bot.pkl', console_qr=False)
'''
开启 PUID 用于后续的控制
'''
bot.enable_puid('wxpy_puid.pkl')
friends = bot.friends()
groups = bot.groups()
output = open('data', 'w')
output.write("-----Friends-------\n")
for i in friends:
output.write(i.nick_name + " ---> " + i.puid + "\n")
pass
output.write("-----Groups-------\n")
for i in groups:
output.write(i.name + " ---> " + i.puid + "\n")
pass
|
<commit_before><commit_msg>ADD EXPORT_PUID.PY
* USER CAN USE IT TO GET PUID<commit_after>#!/usr/bin/env python3
# coding: utf-8
from wxpy import *
'''
使用 cache 来缓存登陆信息,同时使用控制台登陆
'''
bot = Bot('bot.pkl', console_qr=False)
'''
开启 PUID 用于后续的控制
'''
bot.enable_puid('wxpy_puid.pkl')
friends = bot.friends()
groups = bot.groups()
output = open('data', 'w')
output.write("-----Friends-------\n")
for i in friends:
output.write(i.nick_name + " ---> " + i.puid + "\n")
pass
output.write("-----Groups-------\n")
for i in groups:
output.write(i.name + " ---> " + i.puid + "\n")
pass
|
|
fc89664fd75f787b03953d8eac3ec99b6fdf19de
|
lesson5/exceptions_except.py
|
lesson5/exceptions_except.py
|
def take_beer(fridge, number=1):
if "beer" not in fridge:
raise Exception("No beer at all:(")
if number > fridge["beer"]:
raise Exception("Not enough beer:(")
fridge["beer"] -= number
if __name__ == "__main__":
fridge = {
"beer": 2,
"milk": 1,
"meat": 3,
}
print("I wanna drink 1 bottle of beer...")
take_beer(fridge)
print("Oooh, great!")
print("I wanna drink 2 bottle of beer...")
try:
take_beer(fridge, 2)
except Exception as e:
print("Error: {}. Let's continue".format(e))
print("Fallback. Try to take 1 bottle of beer...")
take_beer(fridge, 1)
print("Oooh, awesome!")
|
Add y.a. script for showing except working
|
Add y.a. script for showing except working
|
Python
|
bsd-2-clause
|
drednout/letspython,drednout/letspython
|
Add y.a. script for showing except working
|
def take_beer(fridge, number=1):
if "beer" not in fridge:
raise Exception("No beer at all:(")
if number > fridge["beer"]:
raise Exception("Not enough beer:(")
fridge["beer"] -= number
if __name__ == "__main__":
fridge = {
"beer": 2,
"milk": 1,
"meat": 3,
}
print("I wanna drink 1 bottle of beer...")
take_beer(fridge)
print("Oooh, great!")
print("I wanna drink 2 bottle of beer...")
try:
take_beer(fridge, 2)
except Exception as e:
print("Error: {}. Let's continue".format(e))
print("Fallback. Try to take 1 bottle of beer...")
take_beer(fridge, 1)
print("Oooh, awesome!")
|
<commit_before><commit_msg>Add y.a. script for showing except working<commit_after>
|
def take_beer(fridge, number=1):
if "beer" not in fridge:
raise Exception("No beer at all:(")
if number > fridge["beer"]:
raise Exception("Not enough beer:(")
fridge["beer"] -= number
if __name__ == "__main__":
fridge = {
"beer": 2,
"milk": 1,
"meat": 3,
}
print("I wanna drink 1 bottle of beer...")
take_beer(fridge)
print("Oooh, great!")
print("I wanna drink 2 bottle of beer...")
try:
take_beer(fridge, 2)
except Exception as e:
print("Error: {}. Let's continue".format(e))
print("Fallback. Try to take 1 bottle of beer...")
take_beer(fridge, 1)
print("Oooh, awesome!")
|
Add y.a. script for showing except workingdef take_beer(fridge, number=1):
if "beer" not in fridge:
raise Exception("No beer at all:(")
if number > fridge["beer"]:
raise Exception("Not enough beer:(")
fridge["beer"] -= number
if __name__ == "__main__":
fridge = {
"beer": 2,
"milk": 1,
"meat": 3,
}
print("I wanna drink 1 bottle of beer...")
take_beer(fridge)
print("Oooh, great!")
print("I wanna drink 2 bottle of beer...")
try:
take_beer(fridge, 2)
except Exception as e:
print("Error: {}. Let's continue".format(e))
print("Fallback. Try to take 1 bottle of beer...")
take_beer(fridge, 1)
print("Oooh, awesome!")
|
<commit_before><commit_msg>Add y.a. script for showing except working<commit_after>def take_beer(fridge, number=1):
if "beer" not in fridge:
raise Exception("No beer at all:(")
if number > fridge["beer"]:
raise Exception("Not enough beer:(")
fridge["beer"] -= number
if __name__ == "__main__":
fridge = {
"beer": 2,
"milk": 1,
"meat": 3,
}
print("I wanna drink 1 bottle of beer...")
take_beer(fridge)
print("Oooh, great!")
print("I wanna drink 2 bottle of beer...")
try:
take_beer(fridge, 2)
except Exception as e:
print("Error: {}. Let's continue".format(e))
print("Fallback. Try to take 1 bottle of beer...")
take_beer(fridge, 1)
print("Oooh, awesome!")
|
|
ebf39da6ee59e2fb55226e5c256553ce1093006c
|
nettests/core/http_invalid_requests.py
|
nettests/core/http_invalid_requests.py
|
# -*- encoding: utf-8 -*-
from twisted.python import usage
from ooni.utils import randomStr
from ooni.templates import tcpt
class UsageOptions(usage.Options):
optParameters = [['backend', 'b', '127.0.0.1:57002',
'The OONI backend that runs a TCP echo server (must be on port 80)']]
optFlags = [['nopayloadmatch', 'n',
"Don't match the payload of the response. This option is used when you don't have a TCP echo server running"]]
class HTTPInvalidRequests(tcpt.TCPTest):
name = "HTTP Invalid Requests"
version = "0.1.1"
authors = "Arturo Filastò"
inputFile = ['file', 'f', None,
'Input file of list of hostnames to attempt to resolve']
usageOptions = UsageOptions
requiredOptions = ['backend']
def setUp(self):
try:
self.address, self.port = self.localOptions['backend'].split(":")
self.port = int(self.port)
except:
raise usage.UsageError("Invalid backend address specified (must be address:port)")
def test_random_invalid_request(self):
"""
We test sending data to a TCP echo server, if what we get back is not
what we have sent then there is tampering going on.
This is for example what squid will return when performing such
request:
HTTP/1.0 400 Bad Request
Server: squid/2.6.STABLE21
Date: Sat, 23 Jul 2011 02:22:44 GMT
Content-Type: text/html
Content-Length: 1178
Expires: Sat, 23 Jul 2011 02:22:44 GMT
X-Squid-Error: ERR_INVALID_REQ 0
X-Cache: MISS from cache_server
X-Cache-Lookup: NONE from cache_server:3128
Via: 1.0 cache_server:3128 (squid/2.6.STABLE21)
Proxy-Connection: close
"""
payload = randomStr(10) + "\n\r"
def got_all_data(received_array):
if not self.localOptions['nopayloadmatch']:
first = received_array[0]
if first != payload:
self.report['tampering'] = True
else:
self.report['tampering'] = 'unknown'
d = self.sendPayload(payload)
d.addCallback(got_all_data)
return d
|
Add test that generates a random invalid HTTP request
|
Add test that generates a random invalid HTTP request
|
Python
|
bsd-2-clause
|
Karthikeyan-kkk/ooni-probe,lordappsec/ooni-probe,juga0/ooni-probe,kdmurray91/ooni-probe,kdmurray91/ooni-probe,0xPoly/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe,juga0/ooni-probe,Karthikeyan-kkk/ooni-probe,Karthikeyan-kkk/ooni-probe,juga0/ooni-probe,juga0/ooni-probe,Karthikeyan-kkk/ooni-probe,lordappsec/ooni-probe,0xPoly/ooni-probe,lordappsec/ooni-probe,0xPoly/ooni-probe,kdmurray91/ooni-probe,lordappsec/ooni-probe
|
Add test that generates a random invalid HTTP request
|
# -*- encoding: utf-8 -*-
from twisted.python import usage
from ooni.utils import randomStr
from ooni.templates import tcpt
class UsageOptions(usage.Options):
optParameters = [['backend', 'b', '127.0.0.1:57002',
'The OONI backend that runs a TCP echo server (must be on port 80)']]
optFlags = [['nopayloadmatch', 'n',
"Don't match the payload of the response. This option is used when you don't have a TCP echo server running"]]
class HTTPInvalidRequests(tcpt.TCPTest):
name = "HTTP Invalid Requests"
version = "0.1.1"
authors = "Arturo Filastò"
inputFile = ['file', 'f', None,
'Input file of list of hostnames to attempt to resolve']
usageOptions = UsageOptions
requiredOptions = ['backend']
def setUp(self):
try:
self.address, self.port = self.localOptions['backend'].split(":")
self.port = int(self.port)
except:
raise usage.UsageError("Invalid backend address specified (must be address:port)")
def test_random_invalid_request(self):
"""
We test sending data to a TCP echo server, if what we get back is not
what we have sent then there is tampering going on.
This is for example what squid will return when performing such
request:
HTTP/1.0 400 Bad Request
Server: squid/2.6.STABLE21
Date: Sat, 23 Jul 2011 02:22:44 GMT
Content-Type: text/html
Content-Length: 1178
Expires: Sat, 23 Jul 2011 02:22:44 GMT
X-Squid-Error: ERR_INVALID_REQ 0
X-Cache: MISS from cache_server
X-Cache-Lookup: NONE from cache_server:3128
Via: 1.0 cache_server:3128 (squid/2.6.STABLE21)
Proxy-Connection: close
"""
payload = randomStr(10) + "\n\r"
def got_all_data(received_array):
if not self.localOptions['nopayloadmatch']:
first = received_array[0]
if first != payload:
self.report['tampering'] = True
else:
self.report['tampering'] = 'unknown'
d = self.sendPayload(payload)
d.addCallback(got_all_data)
return d
|
<commit_before><commit_msg>Add test that generates a random invalid HTTP request<commit_after>
|
# -*- encoding: utf-8 -*-
from twisted.python import usage
from ooni.utils import randomStr
from ooni.templates import tcpt
class UsageOptions(usage.Options):
optParameters = [['backend', 'b', '127.0.0.1:57002',
'The OONI backend that runs a TCP echo server (must be on port 80)']]
optFlags = [['nopayloadmatch', 'n',
"Don't match the payload of the response. This option is used when you don't have a TCP echo server running"]]
class HTTPInvalidRequests(tcpt.TCPTest):
name = "HTTP Invalid Requests"
version = "0.1.1"
authors = "Arturo Filastò"
inputFile = ['file', 'f', None,
'Input file of list of hostnames to attempt to resolve']
usageOptions = UsageOptions
requiredOptions = ['backend']
def setUp(self):
try:
self.address, self.port = self.localOptions['backend'].split(":")
self.port = int(self.port)
except:
raise usage.UsageError("Invalid backend address specified (must be address:port)")
def test_random_invalid_request(self):
"""
We test sending data to a TCP echo server, if what we get back is not
what we have sent then there is tampering going on.
This is for example what squid will return when performing such
request:
HTTP/1.0 400 Bad Request
Server: squid/2.6.STABLE21
Date: Sat, 23 Jul 2011 02:22:44 GMT
Content-Type: text/html
Content-Length: 1178
Expires: Sat, 23 Jul 2011 02:22:44 GMT
X-Squid-Error: ERR_INVALID_REQ 0
X-Cache: MISS from cache_server
X-Cache-Lookup: NONE from cache_server:3128
Via: 1.0 cache_server:3128 (squid/2.6.STABLE21)
Proxy-Connection: close
"""
payload = randomStr(10) + "\n\r"
def got_all_data(received_array):
if not self.localOptions['nopayloadmatch']:
first = received_array[0]
if first != payload:
self.report['tampering'] = True
else:
self.report['tampering'] = 'unknown'
d = self.sendPayload(payload)
d.addCallback(got_all_data)
return d
|
Add test that generates a random invalid HTTP request# -*- encoding: utf-8 -*-
from twisted.python import usage
from ooni.utils import randomStr
from ooni.templates import tcpt
class UsageOptions(usage.Options):
optParameters = [['backend', 'b', '127.0.0.1:57002',
'The OONI backend that runs a TCP echo server (must be on port 80)']]
optFlags = [['nopayloadmatch', 'n',
"Don't match the payload of the response. This option is used when you don't have a TCP echo server running"]]
class HTTPInvalidRequests(tcpt.TCPTest):
name = "HTTP Invalid Requests"
version = "0.1.1"
authors = "Arturo Filastò"
inputFile = ['file', 'f', None,
'Input file of list of hostnames to attempt to resolve']
usageOptions = UsageOptions
requiredOptions = ['backend']
def setUp(self):
try:
self.address, self.port = self.localOptions['backend'].split(":")
self.port = int(self.port)
except:
raise usage.UsageError("Invalid backend address specified (must be address:port)")
def test_random_invalid_request(self):
"""
We test sending data to a TCP echo server, if what we get back is not
what we have sent then there is tampering going on.
This is for example what squid will return when performing such
request:
HTTP/1.0 400 Bad Request
Server: squid/2.6.STABLE21
Date: Sat, 23 Jul 2011 02:22:44 GMT
Content-Type: text/html
Content-Length: 1178
Expires: Sat, 23 Jul 2011 02:22:44 GMT
X-Squid-Error: ERR_INVALID_REQ 0
X-Cache: MISS from cache_server
X-Cache-Lookup: NONE from cache_server:3128
Via: 1.0 cache_server:3128 (squid/2.6.STABLE21)
Proxy-Connection: close
"""
payload = randomStr(10) + "\n\r"
def got_all_data(received_array):
if not self.localOptions['nopayloadmatch']:
first = received_array[0]
if first != payload:
self.report['tampering'] = True
else:
self.report['tampering'] = 'unknown'
d = self.sendPayload(payload)
d.addCallback(got_all_data)
return d
|
<commit_before><commit_msg>Add test that generates a random invalid HTTP request<commit_after># -*- encoding: utf-8 -*-
from twisted.python import usage
from ooni.utils import randomStr
from ooni.templates import tcpt
class UsageOptions(usage.Options):
optParameters = [['backend', 'b', '127.0.0.1:57002',
'The OONI backend that runs a TCP echo server (must be on port 80)']]
optFlags = [['nopayloadmatch', 'n',
"Don't match the payload of the response. This option is used when you don't have a TCP echo server running"]]
class HTTPInvalidRequests(tcpt.TCPTest):
name = "HTTP Invalid Requests"
version = "0.1.1"
authors = "Arturo Filastò"
inputFile = ['file', 'f', None,
'Input file of list of hostnames to attempt to resolve']
usageOptions = UsageOptions
requiredOptions = ['backend']
def setUp(self):
try:
self.address, self.port = self.localOptions['backend'].split(":")
self.port = int(self.port)
except:
raise usage.UsageError("Invalid backend address specified (must be address:port)")
def test_random_invalid_request(self):
"""
We test sending data to a TCP echo server, if what we get back is not
what we have sent then there is tampering going on.
This is for example what squid will return when performing such
request:
HTTP/1.0 400 Bad Request
Server: squid/2.6.STABLE21
Date: Sat, 23 Jul 2011 02:22:44 GMT
Content-Type: text/html
Content-Length: 1178
Expires: Sat, 23 Jul 2011 02:22:44 GMT
X-Squid-Error: ERR_INVALID_REQ 0
X-Cache: MISS from cache_server
X-Cache-Lookup: NONE from cache_server:3128
Via: 1.0 cache_server:3128 (squid/2.6.STABLE21)
Proxy-Connection: close
"""
payload = randomStr(10) + "\n\r"
def got_all_data(received_array):
if not self.localOptions['nopayloadmatch']:
first = received_array[0]
if first != payload:
self.report['tampering'] = True
else:
self.report['tampering'] = 'unknown'
d = self.sendPayload(payload)
d.addCallback(got_all_data)
return d
|
|
a586973dc63f56dfc180add0f0ae41b6c0475641
|
tools/print_descriptor.py
|
tools/print_descriptor.py
|
#!/usr/bin/env python3
# Copyright (C) 2021 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from google.protobuf import descriptor_pb2
# Take a path to file with binary protobuf descriptor as CLI argument and print
# it in textproto format.
#
# Example usage:
# tools/print_descriptor.py path/to/file.descriptor
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'input_file',
type=str,
help='File name with binary proto descriptor to print')
args = parser.parse_args()
descriptor = descriptor_pb2.FileDescriptorSet()
with open(args.input_file, 'rb') as f:
contents = f.read()
descriptor.MergeFromString(contents)
print(descriptor)
if __name__ == "__main__":
main()
|
Implement simple tool to print proto descriptor files
|
Implement simple tool to print proto descriptor files
Change-Id: I6b2a2d399bd490a85668e4a53c84a87abe828a46
|
Python
|
apache-2.0
|
google/perfetto,google/perfetto,google/perfetto,google/perfetto,google/perfetto,google/perfetto,google/perfetto,google/perfetto
|
Implement simple tool to print proto descriptor files
Change-Id: I6b2a2d399bd490a85668e4a53c84a87abe828a46
|
#!/usr/bin/env python3
# Copyright (C) 2021 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from google.protobuf import descriptor_pb2
# Take a path to file with binary protobuf descriptor as CLI argument and print
# it in textproto format.
#
# Example usage:
# tools/print_descriptor.py path/to/file.descriptor
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'input_file',
type=str,
help='File name with binary proto descriptor to print')
args = parser.parse_args()
descriptor = descriptor_pb2.FileDescriptorSet()
with open(args.input_file, 'rb') as f:
contents = f.read()
descriptor.MergeFromString(contents)
print(descriptor)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Implement simple tool to print proto descriptor files
Change-Id: I6b2a2d399bd490a85668e4a53c84a87abe828a46<commit_after>
|
#!/usr/bin/env python3
# Copyright (C) 2021 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from google.protobuf import descriptor_pb2
# Take a path to file with binary protobuf descriptor as CLI argument and print
# it in textproto format.
#
# Example usage:
# tools/print_descriptor.py path/to/file.descriptor
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'input_file',
type=str,
help='File name with binary proto descriptor to print')
args = parser.parse_args()
descriptor = descriptor_pb2.FileDescriptorSet()
with open(args.input_file, 'rb') as f:
contents = f.read()
descriptor.MergeFromString(contents)
print(descriptor)
if __name__ == "__main__":
main()
|
Implement simple tool to print proto descriptor files
Change-Id: I6b2a2d399bd490a85668e4a53c84a87abe828a46#!/usr/bin/env python3
# Copyright (C) 2021 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from google.protobuf import descriptor_pb2
# Take a path to file with binary protobuf descriptor as CLI argument and print
# it in textproto format.
#
# Example usage:
# tools/print_descriptor.py path/to/file.descriptor
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'input_file',
type=str,
help='File name with binary proto descriptor to print')
args = parser.parse_args()
descriptor = descriptor_pb2.FileDescriptorSet()
with open(args.input_file, 'rb') as f:
contents = f.read()
descriptor.MergeFromString(contents)
print(descriptor)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Implement simple tool to print proto descriptor files
Change-Id: I6b2a2d399bd490a85668e4a53c84a87abe828a46<commit_after>#!/usr/bin/env python3
# Copyright (C) 2021 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from google.protobuf import descriptor_pb2
# Take a path to file with binary protobuf descriptor as CLI argument and print
# it in textproto format.
#
# Example usage:
# tools/print_descriptor.py path/to/file.descriptor
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'input_file',
type=str,
help='File name with binary proto descriptor to print')
args = parser.parse_args()
descriptor = descriptor_pb2.FileDescriptorSet()
with open(args.input_file, 'rb') as f:
contents = f.read()
descriptor.MergeFromString(contents)
print(descriptor)
if __name__ == "__main__":
main()
|
|
dc0ff0fba6a83daefb35db2e7c93b474aad1928b
|
scripts/add-migration.py
|
scripts/add-migration.py
|
#!/usr/bin/python
# This script should add a template migration to the migrations.php
# file. If you provide a git commit ID it uses the commit date from
# that commit for the timestamp. Otherwise, it uses the current time.
import sys
import os
import subprocess
import datetime
import dateutil.parser
import pytz
import re
import tempfile
import shutil
top_level = os.path.join(os.path.dirname(sys.path[0]))
migrations_filename = os.path.join(top_level,'inc','migrations.php')
def usage():
print("Usage: {0} DESCRIPTION [GIT-COMMIT]".format(sys.argv[0]))
if len(sys.argv) == 2:
timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
elif len(sys.argv) == 3:
git_commit = sys.argv[2]
local_timestamp = subprocess.check_output(['git',
'show',
'-s',
'--format=%ci',
git_commit]).strip()
parsed_local_timestamp = dateutil.parser.parse(local_timestamp)
timestamp = parsed_local_timestamp.astimezone(pytz.utc).strftime('%Y-%m-%dT%H:%M:%S')
else:
usage()
sys.exit(1)
description = sys.argv[1]
def quote_string_for_php(s):
return "'" + s.replace('\\','\\\\').replace("'", "\\'") + "'"
migration_text = ''' {0} => new Migration(
{1},
<<<EOMIGRATION
[Put your migration here.]
EOMIGRATION
),
'''.format(quote_string_for_php(timestamp),
quote_string_for_php(description))
tmp = tempfile.NamedTemporaryFile(delete=False)
with open(migrations_filename) as finput:
with open(tmp.name, 'w') as foutput:
for line in finput:
if re.search('INSERT NEW MIGRATIONS HERE', line):
foutput.write(migration_text)
foutput.write(line)
shutil.copymode(migrations_filename, tmp.name)
shutil.move(tmp.name, migrations_filename)
|
Add a helper script to add a template migration to the migrations.php file
|
Add a helper script to add a template migration to the migrations.php file
|
Python
|
agpl-3.0
|
htem/CATMAID,fzadow/CATMAID,fzadow/CATMAID,htem/CATMAID,fzadow/CATMAID,htem/CATMAID,htem/CATMAID,fzadow/CATMAID
|
Add a helper script to add a template migration to the migrations.php file
|
#!/usr/bin/python
# This script should add a template migration to the migrations.php
# file. If you provide a git commit ID it uses the commit date from
# that commit for the timestamp. Otherwise, it uses the current time.
import sys
import os
import subprocess
import datetime
import dateutil.parser
import pytz
import re
import tempfile
import shutil
top_level = os.path.join(os.path.dirname(sys.path[0]))
migrations_filename = os.path.join(top_level,'inc','migrations.php')
def usage():
print("Usage: {0} DESCRIPTION [GIT-COMMIT]".format(sys.argv[0]))
if len(sys.argv) == 2:
timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
elif len(sys.argv) == 3:
git_commit = sys.argv[2]
local_timestamp = subprocess.check_output(['git',
'show',
'-s',
'--format=%ci',
git_commit]).strip()
parsed_local_timestamp = dateutil.parser.parse(local_timestamp)
timestamp = parsed_local_timestamp.astimezone(pytz.utc).strftime('%Y-%m-%dT%H:%M:%S')
else:
usage()
sys.exit(1)
description = sys.argv[1]
def quote_string_for_php(s):
return "'" + s.replace('\\','\\\\').replace("'", "\\'") + "'"
migration_text = ''' {0} => new Migration(
{1},
<<<EOMIGRATION
[Put your migration here.]
EOMIGRATION
),
'''.format(quote_string_for_php(timestamp),
quote_string_for_php(description))
tmp = tempfile.NamedTemporaryFile(delete=False)
with open(migrations_filename) as finput:
with open(tmp.name, 'w') as foutput:
for line in finput:
if re.search('INSERT NEW MIGRATIONS HERE', line):
foutput.write(migration_text)
foutput.write(line)
shutil.copymode(migrations_filename, tmp.name)
shutil.move(tmp.name, migrations_filename)
|
<commit_before><commit_msg>Add a helper script to add a template migration to the migrations.php file<commit_after>
|
#!/usr/bin/python
# This script should add a template migration to the migrations.php
# file. If you provide a git commit ID it uses the commit date from
# that commit for the timestamp. Otherwise, it uses the current time.
import sys
import os
import subprocess
import datetime
import dateutil.parser
import pytz
import re
import tempfile
import shutil
top_level = os.path.join(os.path.dirname(sys.path[0]))
migrations_filename = os.path.join(top_level,'inc','migrations.php')
def usage():
print("Usage: {0} DESCRIPTION [GIT-COMMIT]".format(sys.argv[0]))
if len(sys.argv) == 2:
timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
elif len(sys.argv) == 3:
git_commit = sys.argv[2]
local_timestamp = subprocess.check_output(['git',
'show',
'-s',
'--format=%ci',
git_commit]).strip()
parsed_local_timestamp = dateutil.parser.parse(local_timestamp)
timestamp = parsed_local_timestamp.astimezone(pytz.utc).strftime('%Y-%m-%dT%H:%M:%S')
else:
usage()
sys.exit(1)
description = sys.argv[1]
def quote_string_for_php(s):
return "'" + s.replace('\\','\\\\').replace("'", "\\'") + "'"
migration_text = ''' {0} => new Migration(
{1},
<<<EOMIGRATION
[Put your migration here.]
EOMIGRATION
),
'''.format(quote_string_for_php(timestamp),
quote_string_for_php(description))
tmp = tempfile.NamedTemporaryFile(delete=False)
with open(migrations_filename) as finput:
with open(tmp.name, 'w') as foutput:
for line in finput:
if re.search('INSERT NEW MIGRATIONS HERE', line):
foutput.write(migration_text)
foutput.write(line)
shutil.copymode(migrations_filename, tmp.name)
shutil.move(tmp.name, migrations_filename)
|
Add a helper script to add a template migration to the migrations.php file#!/usr/bin/python
# This script should add a template migration to the migrations.php
# file. If you provide a git commit ID it uses the commit date from
# that commit for the timestamp. Otherwise, it uses the current time.
import sys
import os
import subprocess
import datetime
import dateutil.parser
import pytz
import re
import tempfile
import shutil
top_level = os.path.join(os.path.dirname(sys.path[0]))
migrations_filename = os.path.join(top_level,'inc','migrations.php')
def usage():
print("Usage: {0} DESCRIPTION [GIT-COMMIT]".format(sys.argv[0]))
if len(sys.argv) == 2:
timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
elif len(sys.argv) == 3:
git_commit = sys.argv[2]
local_timestamp = subprocess.check_output(['git',
'show',
'-s',
'--format=%ci',
git_commit]).strip()
parsed_local_timestamp = dateutil.parser.parse(local_timestamp)
timestamp = parsed_local_timestamp.astimezone(pytz.utc).strftime('%Y-%m-%dT%H:%M:%S')
else:
usage()
sys.exit(1)
description = sys.argv[1]
def quote_string_for_php(s):
return "'" + s.replace('\\','\\\\').replace("'", "\\'") + "'"
migration_text = ''' {0} => new Migration(
{1},
<<<EOMIGRATION
[Put your migration here.]
EOMIGRATION
),
'''.format(quote_string_for_php(timestamp),
quote_string_for_php(description))
tmp = tempfile.NamedTemporaryFile(delete=False)
with open(migrations_filename) as finput:
with open(tmp.name, 'w') as foutput:
for line in finput:
if re.search('INSERT NEW MIGRATIONS HERE', line):
foutput.write(migration_text)
foutput.write(line)
shutil.copymode(migrations_filename, tmp.name)
shutil.move(tmp.name, migrations_filename)
|
<commit_before><commit_msg>Add a helper script to add a template migration to the migrations.php file<commit_after>#!/usr/bin/python
# This script should add a template migration to the migrations.php
# file. If you provide a git commit ID it uses the commit date from
# that commit for the timestamp. Otherwise, it uses the current time.
import sys
import os
import subprocess
import datetime
import dateutil.parser
import pytz
import re
import tempfile
import shutil
top_level = os.path.join(os.path.dirname(sys.path[0]))
migrations_filename = os.path.join(top_level,'inc','migrations.php')
def usage():
print("Usage: {0} DESCRIPTION [GIT-COMMIT]".format(sys.argv[0]))
if len(sys.argv) == 2:
timestamp = datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S')
elif len(sys.argv) == 3:
git_commit = sys.argv[2]
local_timestamp = subprocess.check_output(['git',
'show',
'-s',
'--format=%ci',
git_commit]).strip()
parsed_local_timestamp = dateutil.parser.parse(local_timestamp)
timestamp = parsed_local_timestamp.astimezone(pytz.utc).strftime('%Y-%m-%dT%H:%M:%S')
else:
usage()
sys.exit(1)
description = sys.argv[1]
def quote_string_for_php(s):
return "'" + s.replace('\\','\\\\').replace("'", "\\'") + "'"
migration_text = ''' {0} => new Migration(
{1},
<<<EOMIGRATION
[Put your migration here.]
EOMIGRATION
),
'''.format(quote_string_for_php(timestamp),
quote_string_for_php(description))
tmp = tempfile.NamedTemporaryFile(delete=False)
with open(migrations_filename) as finput:
with open(tmp.name, 'w') as foutput:
for line in finput:
if re.search('INSERT NEW MIGRATIONS HERE', line):
foutput.write(migration_text)
foutput.write(line)
shutil.copymode(migrations_filename, tmp.name)
shutil.move(tmp.name, migrations_filename)
|
|
94ff998fbda79e3760f8ead366a88654512597b6
|
get_ideogram_annots.py
|
get_ideogram_annots.py
|
import sys
import os
import subprocess
import json
from scipy import stats
from argparse import ArgumentParser
parser = ArgumentParser(description="Get Ideogram.js annotations for an SRR")
parser.add_argument("--acc", required=True, help="SRR accession")
args = parser.parse_args()
acc = args.acc
out = acc + "_counts"
os.chdir("counting")
subprocess.call(["python", "counter.py", "--inp", acc, "--out", out])
os.chdir("../formatter")
subprocess.call([
"python", "formatter.py", "--type", "srr", "--lookup", "gene_lookup_GRCh37.tsv",
"--inp", "../counting/" + out + "_norm.tsv", "--out", acc + ".json"
])
|
Add draft of wrapper script
|
Add draft of wrapper script
|
Python
|
cc0-1.0
|
NCBI-Hackathons/rnaseqview,NCBI-Hackathons/rnaseqview,NCBI-Hackathons/rnaseqview,NCBI-Hackathons/rnaseqview,NCBI-Hackathons/rnaseqview
|
Add draft of wrapper script
|
import sys
import os
import subprocess
import json
from scipy import stats
from argparse import ArgumentParser
parser = ArgumentParser(description="Get Ideogram.js annotations for an SRR")
parser.add_argument("--acc", required=True, help="SRR accession")
args = parser.parse_args()
acc = args.acc
out = acc + "_counts"
os.chdir("counting")
subprocess.call(["python", "counter.py", "--inp", acc, "--out", out])
os.chdir("../formatter")
subprocess.call([
"python", "formatter.py", "--type", "srr", "--lookup", "gene_lookup_GRCh37.tsv",
"--inp", "../counting/" + out + "_norm.tsv", "--out", acc + ".json"
])
|
<commit_before><commit_msg>Add draft of wrapper script<commit_after>
|
import sys
import os
import subprocess
import json
from scipy import stats
from argparse import ArgumentParser
parser = ArgumentParser(description="Get Ideogram.js annotations for an SRR")
parser.add_argument("--acc", required=True, help="SRR accession")
args = parser.parse_args()
acc = args.acc
out = acc + "_counts"
os.chdir("counting")
subprocess.call(["python", "counter.py", "--inp", acc, "--out", out])
os.chdir("../formatter")
subprocess.call([
"python", "formatter.py", "--type", "srr", "--lookup", "gene_lookup_GRCh37.tsv",
"--inp", "../counting/" + out + "_norm.tsv", "--out", acc + ".json"
])
|
Add draft of wrapper scriptimport sys
import os
import subprocess
import json
from scipy import stats
from argparse import ArgumentParser
parser = ArgumentParser(description="Get Ideogram.js annotations for an SRR")
parser.add_argument("--acc", required=True, help="SRR accession")
args = parser.parse_args()
acc = args.acc
out = acc + "_counts"
os.chdir("counting")
subprocess.call(["python", "counter.py", "--inp", acc, "--out", out])
os.chdir("../formatter")
subprocess.call([
"python", "formatter.py", "--type", "srr", "--lookup", "gene_lookup_GRCh37.tsv",
"--inp", "../counting/" + out + "_norm.tsv", "--out", acc + ".json"
])
|
<commit_before><commit_msg>Add draft of wrapper script<commit_after>import sys
import os
import subprocess
import json
from scipy import stats
from argparse import ArgumentParser
parser = ArgumentParser(description="Get Ideogram.js annotations for an SRR")
parser.add_argument("--acc", required=True, help="SRR accession")
args = parser.parse_args()
acc = args.acc
out = acc + "_counts"
os.chdir("counting")
subprocess.call(["python", "counter.py", "--inp", acc, "--out", out])
os.chdir("../formatter")
subprocess.call([
"python", "formatter.py", "--type", "srr", "--lookup", "gene_lookup_GRCh37.tsv",
"--inp", "../counting/" + out + "_norm.tsv", "--out", acc + ".json"
])
|
|
51fdbfed4f96d696847de962906467f1762e789e
|
scripts/import-lastfm-bio.py
|
scripts/import-lastfm-bio.py
|
#!/usr/bin/env python
import psycopg2 as ordbms
import urllib, urllib2
import xml.etree.cElementTree as ElementTree
class ImportLastfmBio:
def __init__(self):
self.conn = ordbms.connect ("dbname='librefm'")
self.cursor = self.conn.cursor()
def importAll(self):
"""Imports descriptions for all artists who don't currently have one"""
self.cursor.execute("SELECT * FROM artist WHERE bio_summary IS NULL AND bio_content IS NULL")
for artist in self.cursor.fetchall():
name = artist[0]
url = "http://ws.audioscrobbler.com/2.0/artist/%s/info.xml" % urllib.quote(name)
print "\nFetching %s..." % name
try:
xml = urllib2.urlopen(url)
self.parse(xml, name, "http://www.last.fm/music/%s" % urllib.quote(name))
except urllib2.HTTPError:
print "Failed."
def parse(self, xml, name, source):
for event, elem in ElementTree.iterparse(xml):
if elem.tag == "bio":
for bio_e in elem.getchildren():
if bio_e.tag == "summary":
summary = bio_e.text
elif bio_e.tag == "content":
content = bio_e.text
if summary:
summary.strip()
summary = self.fixUrls(summary)
if content:
content.strip()
content = self.fixUrls(content)
if summary != None or content != None:
self.cursor.execute("UPDATE artist SET bio_summary = %s, bio_content = %s, bio_source = %s WHERE name = %s", (summary, content, source, name))
self.conn.commit()
print "Imported!"
else:
print "No Bio"
def fixUrls(self, text):
text.replace("http://www.last.fm/tag/", "/tag/")
text.replace("http://last.fm/tag/", "/tag/")
text.replace("http://www.last.fm/music/", "/artist/")
text.replace("http://last.fm/music/", "/artist/")
return text
if __name__ == '__main__':
importer = ImportLastfmBio()
importer.importAll()
|
Add simple script for importing last.fm bio information (which is licensed under CC-BY-SA and GFDL)
|
Add simple script for importing last.fm bio information (which is licensed under CC-BY-SA and GFDL)
|
Python
|
agpl-3.0
|
foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm
|
Add simple script for importing last.fm bio information (which is licensed under CC-BY-SA and GFDL)
|
#!/usr/bin/env python
import psycopg2 as ordbms
import urllib, urllib2
import xml.etree.cElementTree as ElementTree
class ImportLastfmBio:
def __init__(self):
self.conn = ordbms.connect ("dbname='librefm'")
self.cursor = self.conn.cursor()
def importAll(self):
"""Imports descriptions for all artists who don't currently have one"""
self.cursor.execute("SELECT * FROM artist WHERE bio_summary IS NULL AND bio_content IS NULL")
for artist in self.cursor.fetchall():
name = artist[0]
url = "http://ws.audioscrobbler.com/2.0/artist/%s/info.xml" % urllib.quote(name)
print "\nFetching %s..." % name
try:
xml = urllib2.urlopen(url)
self.parse(xml, name, "http://www.last.fm/music/%s" % urllib.quote(name))
except urllib2.HTTPError:
print "Failed."
def parse(self, xml, name, source):
for event, elem in ElementTree.iterparse(xml):
if elem.tag == "bio":
for bio_e in elem.getchildren():
if bio_e.tag == "summary":
summary = bio_e.text
elif bio_e.tag == "content":
content = bio_e.text
if summary:
summary.strip()
summary = self.fixUrls(summary)
if content:
content.strip()
content = self.fixUrls(content)
if summary != None or content != None:
self.cursor.execute("UPDATE artist SET bio_summary = %s, bio_content = %s, bio_source = %s WHERE name = %s", (summary, content, source, name))
self.conn.commit()
print "Imported!"
else:
print "No Bio"
def fixUrls(self, text):
text.replace("http://www.last.fm/tag/", "/tag/")
text.replace("http://last.fm/tag/", "/tag/")
text.replace("http://www.last.fm/music/", "/artist/")
text.replace("http://last.fm/music/", "/artist/")
return text
if __name__ == '__main__':
importer = ImportLastfmBio()
importer.importAll()
|
<commit_before><commit_msg>Add simple script for importing last.fm bio information (which is licensed under CC-BY-SA and GFDL)<commit_after>
|
#!/usr/bin/env python
import psycopg2 as ordbms
import urllib, urllib2
import xml.etree.cElementTree as ElementTree
class ImportLastfmBio:
def __init__(self):
self.conn = ordbms.connect ("dbname='librefm'")
self.cursor = self.conn.cursor()
def importAll(self):
"""Imports descriptions for all artists who don't currently have one"""
self.cursor.execute("SELECT * FROM artist WHERE bio_summary IS NULL AND bio_content IS NULL")
for artist in self.cursor.fetchall():
name = artist[0]
url = "http://ws.audioscrobbler.com/2.0/artist/%s/info.xml" % urllib.quote(name)
print "\nFetching %s..." % name
try:
xml = urllib2.urlopen(url)
self.parse(xml, name, "http://www.last.fm/music/%s" % urllib.quote(name))
except urllib2.HTTPError:
print "Failed."
def parse(self, xml, name, source):
for event, elem in ElementTree.iterparse(xml):
if elem.tag == "bio":
for bio_e in elem.getchildren():
if bio_e.tag == "summary":
summary = bio_e.text
elif bio_e.tag == "content":
content = bio_e.text
if summary:
summary.strip()
summary = self.fixUrls(summary)
if content:
content.strip()
content = self.fixUrls(content)
if summary != None or content != None:
self.cursor.execute("UPDATE artist SET bio_summary = %s, bio_content = %s, bio_source = %s WHERE name = %s", (summary, content, source, name))
self.conn.commit()
print "Imported!"
else:
print "No Bio"
def fixUrls(self, text):
text.replace("http://www.last.fm/tag/", "/tag/")
text.replace("http://last.fm/tag/", "/tag/")
text.replace("http://www.last.fm/music/", "/artist/")
text.replace("http://last.fm/music/", "/artist/")
return text
if __name__ == '__main__':
importer = ImportLastfmBio()
importer.importAll()
|
Add simple script for importing last.fm bio information (which is licensed under CC-BY-SA and GFDL)#!/usr/bin/env python
import psycopg2 as ordbms
import urllib, urllib2
import xml.etree.cElementTree as ElementTree
class ImportLastfmBio:
def __init__(self):
self.conn = ordbms.connect ("dbname='librefm'")
self.cursor = self.conn.cursor()
def importAll(self):
"""Imports descriptions for all artists who don't currently have one"""
self.cursor.execute("SELECT * FROM artist WHERE bio_summary IS NULL AND bio_content IS NULL")
for artist in self.cursor.fetchall():
name = artist[0]
url = "http://ws.audioscrobbler.com/2.0/artist/%s/info.xml" % urllib.quote(name)
print "\nFetching %s..." % name
try:
xml = urllib2.urlopen(url)
self.parse(xml, name, "http://www.last.fm/music/%s" % urllib.quote(name))
except urllib2.HTTPError:
print "Failed."
def parse(self, xml, name, source):
for event, elem in ElementTree.iterparse(xml):
if elem.tag == "bio":
for bio_e in elem.getchildren():
if bio_e.tag == "summary":
summary = bio_e.text
elif bio_e.tag == "content":
content = bio_e.text
if summary:
summary.strip()
summary = self.fixUrls(summary)
if content:
content.strip()
content = self.fixUrls(content)
if summary != None or content != None:
self.cursor.execute("UPDATE artist SET bio_summary = %s, bio_content = %s, bio_source = %s WHERE name = %s", (summary, content, source, name))
self.conn.commit()
print "Imported!"
else:
print "No Bio"
def fixUrls(self, text):
text.replace("http://www.last.fm/tag/", "/tag/")
text.replace("http://last.fm/tag/", "/tag/")
text.replace("http://www.last.fm/music/", "/artist/")
text.replace("http://last.fm/music/", "/artist/")
return text
if __name__ == '__main__':
importer = ImportLastfmBio()
importer.importAll()
|
<commit_before><commit_msg>Add simple script for importing last.fm bio information (which is licensed under CC-BY-SA and GFDL)<commit_after>#!/usr/bin/env python
import psycopg2 as ordbms
import urllib, urllib2
import xml.etree.cElementTree as ElementTree
class ImportLastfmBio:
def __init__(self):
self.conn = ordbms.connect ("dbname='librefm'")
self.cursor = self.conn.cursor()
def importAll(self):
"""Imports descriptions for all artists who don't currently have one"""
self.cursor.execute("SELECT * FROM artist WHERE bio_summary IS NULL AND bio_content IS NULL")
for artist in self.cursor.fetchall():
name = artist[0]
url = "http://ws.audioscrobbler.com/2.0/artist/%s/info.xml" % urllib.quote(name)
print "\nFetching %s..." % name
try:
xml = urllib2.urlopen(url)
self.parse(xml, name, "http://www.last.fm/music/%s" % urllib.quote(name))
except urllib2.HTTPError:
print "Failed."
def parse(self, xml, name, source):
for event, elem in ElementTree.iterparse(xml):
if elem.tag == "bio":
for bio_e in elem.getchildren():
if bio_e.tag == "summary":
summary = bio_e.text
elif bio_e.tag == "content":
content = bio_e.text
if summary:
summary.strip()
summary = self.fixUrls(summary)
if content:
content.strip()
content = self.fixUrls(content)
if summary != None or content != None:
self.cursor.execute("UPDATE artist SET bio_summary = %s, bio_content = %s, bio_source = %s WHERE name = %s", (summary, content, source, name))
self.conn.commit()
print "Imported!"
else:
print "No Bio"
def fixUrls(self, text):
text.replace("http://www.last.fm/tag/", "/tag/")
text.replace("http://last.fm/tag/", "/tag/")
text.replace("http://www.last.fm/music/", "/artist/")
text.replace("http://last.fm/music/", "/artist/")
return text
if __name__ == '__main__':
importer = ImportLastfmBio()
importer.importAll()
|
|
98ee8824fcf8a9136b6a48a56108cc5c175f5d00
|
nysa-path.py
|
nysa-path.py
|
#! /usr/bin/python
import os
import json
import site
PATH = os.path.abspath(os.path.dirname(__file__))
NYSA_NAME = "nysa"
PATH_DICT_NAME = "path.json"
PATH_ENTRY_NAME = "nysa-verilog"
SITE_NYSA = os.path.abspath(os.path.join(site.getuserbase(), NYSA_NAME))
SITE_PATH = os.path.join(SITE_NYSA, PATH_DICT_NAME)
if __name__ == "__main__":
if not os.path.exists(SITE_NYSA):
os.makedirs(SITE_NYSA)
if not os.path.exists(SITE_PATH):
f = open(SITE_PATH, "w")
f.write("{}")
f.close()
print "Openning %s" % SITE_PATH
f = open(SITE_PATH, "r")
path_dict = json.load(f)
f.close()
pentry = PATH
if "nysa-verilog" in path_dict and type(path_dict["nysa-verilog"]) is list:
if pentry not in path_dict["nysa-verilog"]:
path_dict["nysa-verilog"].insert(0, pentry)
else:
path_dict["nysa-verilog"] = [PATH]
f = open(SITE_PATH, "w")
f.write(json.dumps(path_dict))
f.close()
|
Add a script to update the nysa-verilog path
|
Add a script to update the nysa-verilog path
This path is used to tell Nysa where to look for verilog modules
|
Python
|
mit
|
CospanDesign/nysa-verilog,CospanDesign/nysa-verilog
|
Add a script to update the nysa-verilog path
This path is used to tell Nysa where to look for verilog modules
|
#! /usr/bin/python
import os
import json
import site
PATH = os.path.abspath(os.path.dirname(__file__))
NYSA_NAME = "nysa"
PATH_DICT_NAME = "path.json"
PATH_ENTRY_NAME = "nysa-verilog"
SITE_NYSA = os.path.abspath(os.path.join(site.getuserbase(), NYSA_NAME))
SITE_PATH = os.path.join(SITE_NYSA, PATH_DICT_NAME)
if __name__ == "__main__":
if not os.path.exists(SITE_NYSA):
os.makedirs(SITE_NYSA)
if not os.path.exists(SITE_PATH):
f = open(SITE_PATH, "w")
f.write("{}")
f.close()
print "Openning %s" % SITE_PATH
f = open(SITE_PATH, "r")
path_dict = json.load(f)
f.close()
pentry = PATH
if "nysa-verilog" in path_dict and type(path_dict["nysa-verilog"]) is list:
if pentry not in path_dict["nysa-verilog"]:
path_dict["nysa-verilog"].insert(0, pentry)
else:
path_dict["nysa-verilog"] = [PATH]
f = open(SITE_PATH, "w")
f.write(json.dumps(path_dict))
f.close()
|
<commit_before><commit_msg>Add a script to update the nysa-verilog path
This path is used to tell Nysa where to look for verilog modules<commit_after>
|
#! /usr/bin/python
import os
import json
import site
PATH = os.path.abspath(os.path.dirname(__file__))
NYSA_NAME = "nysa"
PATH_DICT_NAME = "path.json"
PATH_ENTRY_NAME = "nysa-verilog"
SITE_NYSA = os.path.abspath(os.path.join(site.getuserbase(), NYSA_NAME))
SITE_PATH = os.path.join(SITE_NYSA, PATH_DICT_NAME)
if __name__ == "__main__":
if not os.path.exists(SITE_NYSA):
os.makedirs(SITE_NYSA)
if not os.path.exists(SITE_PATH):
f = open(SITE_PATH, "w")
f.write("{}")
f.close()
print "Openning %s" % SITE_PATH
f = open(SITE_PATH, "r")
path_dict = json.load(f)
f.close()
pentry = PATH
if "nysa-verilog" in path_dict and type(path_dict["nysa-verilog"]) is list:
if pentry not in path_dict["nysa-verilog"]:
path_dict["nysa-verilog"].insert(0, pentry)
else:
path_dict["nysa-verilog"] = [PATH]
f = open(SITE_PATH, "w")
f.write(json.dumps(path_dict))
f.close()
|
Add a script to update the nysa-verilog path
This path is used to tell Nysa where to look for verilog modules#! /usr/bin/python
import os
import json
import site
PATH = os.path.abspath(os.path.dirname(__file__))
NYSA_NAME = "nysa"
PATH_DICT_NAME = "path.json"
PATH_ENTRY_NAME = "nysa-verilog"
SITE_NYSA = os.path.abspath(os.path.join(site.getuserbase(), NYSA_NAME))
SITE_PATH = os.path.join(SITE_NYSA, PATH_DICT_NAME)
if __name__ == "__main__":
if not os.path.exists(SITE_NYSA):
os.makedirs(SITE_NYSA)
if not os.path.exists(SITE_PATH):
f = open(SITE_PATH, "w")
f.write("{}")
f.close()
print "Openning %s" % SITE_PATH
f = open(SITE_PATH, "r")
path_dict = json.load(f)
f.close()
pentry = PATH
if "nysa-verilog" in path_dict and type(path_dict["nysa-verilog"]) is list:
if pentry not in path_dict["nysa-verilog"]:
path_dict["nysa-verilog"].insert(0, pentry)
else:
path_dict["nysa-verilog"] = [PATH]
f = open(SITE_PATH, "w")
f.write(json.dumps(path_dict))
f.close()
|
<commit_before><commit_msg>Add a script to update the nysa-verilog path
This path is used to tell Nysa where to look for verilog modules<commit_after>#! /usr/bin/python
import os
import json
import site
PATH = os.path.abspath(os.path.dirname(__file__))
NYSA_NAME = "nysa"
PATH_DICT_NAME = "path.json"
PATH_ENTRY_NAME = "nysa-verilog"
SITE_NYSA = os.path.abspath(os.path.join(site.getuserbase(), NYSA_NAME))
SITE_PATH = os.path.join(SITE_NYSA, PATH_DICT_NAME)
if __name__ == "__main__":
if not os.path.exists(SITE_NYSA):
os.makedirs(SITE_NYSA)
if not os.path.exists(SITE_PATH):
f = open(SITE_PATH, "w")
f.write("{}")
f.close()
print "Openning %s" % SITE_PATH
f = open(SITE_PATH, "r")
path_dict = json.load(f)
f.close()
pentry = PATH
if "nysa-verilog" in path_dict and type(path_dict["nysa-verilog"]) is list:
if pentry not in path_dict["nysa-verilog"]:
path_dict["nysa-verilog"].insert(0, pentry)
else:
path_dict["nysa-verilog"] = [PATH]
f = open(SITE_PATH, "w")
f.write(json.dumps(path_dict))
f.close()
|
|
f331780f48d9f053ba770cade487417537cc2a93
|
data_structures/graphs/adjacency_list.py
|
data_structures/graphs/adjacency_list.py
|
# -*- coding: utf-8 -*-
if __name__ == '__main__':
from os import getcwd
from os import sys
sys.path.append(getcwd())
from helpers.display import Section
from pprint import pprint as ppr
class AbstractGraphList(object):
def __init__(self):
# We're using a dict since the vertices are labeled, but the lists
# are contained within: "a collection of unordered lists."
self.nodes = {}
class AdjacencyList(AbstractGraphList):
"""
[Wikipedia]
"In graph theory and computer science, an adjacency list representation
of a graph is a collection of unordered lists, one for each vertex
in the graph. Each list describes the set of neighbors of its vertex.
See "Storing a sparse matrix" for an alternative approach." """
def __str__(self):
divider = '-' * 40
print(divider)
for node, adjacent in self.nodes.iteritems():
print('{} is adjacent to {} '.format(node, ', '.join(adjacent)))
print(divider)
return ''
def __setitem__(self, node, neighbors):
self.nodes[node] = neighbors
def __getitem__(self, node):
return self.nodes[node]
def report(self, vertex):
return self.__getitem__(vertex)
if __name__ == '__main__':
with Section('Adjacency list'):
AList = AdjacencyList()
AList['A'] = ['B', 'C', 'D']
AList['B'] = ['A', 'C', 'D']
AList['C'] = ['A', 'B', 'D']
AList['D'] = ['A', 'B', 'C']
print(AList)
ppr(AList.nodes)
print(AList.report('B'))
|
Add adjacency list data structure
|
Add adjacency list data structure
|
Python
|
apache-2.0
|
christabor/MoAL,christabor/MoAL,christabor/MoAL,christabor/MoAL,christabor/MoAL
|
Add adjacency list data structure
|
# -*- coding: utf-8 -*-
if __name__ == '__main__':
from os import getcwd
from os import sys
sys.path.append(getcwd())
from helpers.display import Section
from pprint import pprint as ppr
class AbstractGraphList(object):
def __init__(self):
# We're using a dict since the vertices are labeled, but the lists
# are contained within: "a collection of unordered lists."
self.nodes = {}
class AdjacencyList(AbstractGraphList):
"""
[Wikipedia]
"In graph theory and computer science, an adjacency list representation
of a graph is a collection of unordered lists, one for each vertex
in the graph. Each list describes the set of neighbors of its vertex.
See "Storing a sparse matrix" for an alternative approach." """
def __str__(self):
divider = '-' * 40
print(divider)
for node, adjacent in self.nodes.iteritems():
print('{} is adjacent to {} '.format(node, ', '.join(adjacent)))
print(divider)
return ''
def __setitem__(self, node, neighbors):
self.nodes[node] = neighbors
def __getitem__(self, node):
return self.nodes[node]
def report(self, vertex):
return self.__getitem__(vertex)
if __name__ == '__main__':
with Section('Adjacency list'):
AList = AdjacencyList()
AList['A'] = ['B', 'C', 'D']
AList['B'] = ['A', 'C', 'D']
AList['C'] = ['A', 'B', 'D']
AList['D'] = ['A', 'B', 'C']
print(AList)
ppr(AList.nodes)
print(AList.report('B'))
|
<commit_before><commit_msg>Add adjacency list data structure<commit_after>
|
# -*- coding: utf-8 -*-
if __name__ == '__main__':
from os import getcwd
from os import sys
sys.path.append(getcwd())
from helpers.display import Section
from pprint import pprint as ppr
class AbstractGraphList(object):
def __init__(self):
# We're using a dict since the vertices are labeled, but the lists
# are contained within: "a collection of unordered lists."
self.nodes = {}
class AdjacencyList(AbstractGraphList):
"""
[Wikipedia]
"In graph theory and computer science, an adjacency list representation
of a graph is a collection of unordered lists, one for each vertex
in the graph. Each list describes the set of neighbors of its vertex.
See "Storing a sparse matrix" for an alternative approach." """
def __str__(self):
divider = '-' * 40
print(divider)
for node, adjacent in self.nodes.iteritems():
print('{} is adjacent to {} '.format(node, ', '.join(adjacent)))
print(divider)
return ''
def __setitem__(self, node, neighbors):
self.nodes[node] = neighbors
def __getitem__(self, node):
return self.nodes[node]
def report(self, vertex):
return self.__getitem__(vertex)
if __name__ == '__main__':
with Section('Adjacency list'):
AList = AdjacencyList()
AList['A'] = ['B', 'C', 'D']
AList['B'] = ['A', 'C', 'D']
AList['C'] = ['A', 'B', 'D']
AList['D'] = ['A', 'B', 'C']
print(AList)
ppr(AList.nodes)
print(AList.report('B'))
|
Add adjacency list data structure# -*- coding: utf-8 -*-
if __name__ == '__main__':
from os import getcwd
from os import sys
sys.path.append(getcwd())
from helpers.display import Section
from pprint import pprint as ppr
class AbstractGraphList(object):
def __init__(self):
# We're using a dict since the vertices are labeled, but the lists
# are contained within: "a collection of unordered lists."
self.nodes = {}
class AdjacencyList(AbstractGraphList):
"""
[Wikipedia]
"In graph theory and computer science, an adjacency list representation
of a graph is a collection of unordered lists, one for each vertex
in the graph. Each list describes the set of neighbors of its vertex.
See "Storing a sparse matrix" for an alternative approach." """
def __str__(self):
divider = '-' * 40
print(divider)
for node, adjacent in self.nodes.iteritems():
print('{} is adjacent to {} '.format(node, ', '.join(adjacent)))
print(divider)
return ''
def __setitem__(self, node, neighbors):
self.nodes[node] = neighbors
def __getitem__(self, node):
return self.nodes[node]
def report(self, vertex):
return self.__getitem__(vertex)
if __name__ == '__main__':
with Section('Adjacency list'):
AList = AdjacencyList()
AList['A'] = ['B', 'C', 'D']
AList['B'] = ['A', 'C', 'D']
AList['C'] = ['A', 'B', 'D']
AList['D'] = ['A', 'B', 'C']
print(AList)
ppr(AList.nodes)
print(AList.report('B'))
|
<commit_before><commit_msg>Add adjacency list data structure<commit_after># -*- coding: utf-8 -*-
if __name__ == '__main__':
from os import getcwd
from os import sys
sys.path.append(getcwd())
from helpers.display import Section
from pprint import pprint as ppr
class AbstractGraphList(object):
def __init__(self):
# We're using a dict since the vertices are labeled, but the lists
# are contained within: "a collection of unordered lists."
self.nodes = {}
class AdjacencyList(AbstractGraphList):
"""
[Wikipedia]
"In graph theory and computer science, an adjacency list representation
of a graph is a collection of unordered lists, one for each vertex
in the graph. Each list describes the set of neighbors of its vertex.
See "Storing a sparse matrix" for an alternative approach." """
def __str__(self):
divider = '-' * 40
print(divider)
for node, adjacent in self.nodes.iteritems():
print('{} is adjacent to {} '.format(node, ', '.join(adjacent)))
print(divider)
return ''
def __setitem__(self, node, neighbors):
self.nodes[node] = neighbors
def __getitem__(self, node):
return self.nodes[node]
def report(self, vertex):
return self.__getitem__(vertex)
if __name__ == '__main__':
with Section('Adjacency list'):
AList = AdjacencyList()
AList['A'] = ['B', 'C', 'D']
AList['B'] = ['A', 'C', 'D']
AList['C'] = ['A', 'B', 'D']
AList['D'] = ['A', 'B', 'C']
print(AList)
ppr(AList.nodes)
print(AList.report('B'))
|
|
d59439960bed2abe706aa159c1c257a80ae7f7ca
|
misc/split-mirax.py
|
misc/split-mirax.py
|
#!/usr/bin/python
import struct, sys, os
def rr(f):
return struct.unpack("<i", f.read(4))[0]
filename = sys.argv[1]
f = open(filename)
dir = os.path.dirname(filename)
HEADER_OFFSET = 37
f.seek(HEADER_OFFSET)
filesize = os.stat(sys.argv[1]).st_size
num_items = (filesize - HEADER_OFFSET) / 4
# read first pointer
top = rr(f)
# seek
f.seek(top)
# read table pointers
table = []
while True:
ptr = rr(f)
if ptr == 0:
break
table.append(ptr)
print "table: " + str(table)
# read each item
for ptr in table:
print ptr
f.seek(ptr)
rr(f) # 0
newptr = rr(f)
# seek
f.seek(newptr)
while True:
# read page stuff
pages = rr(f)
nextptr = rr(f)
while pages != 0:
tileno = rr(f)
fileoffset = rr(f)
filelen = rr(f)
fileno = rr(f)
pages = pages - 1
# open file and get data
filename = os.path.join(dir, "Data%0.4d.dat" % (fileno))
ff = open(filename)
ff.seek(fileoffset)
data = ff.read(filelen)
ff.close()
# write it
outfilename = "Data%0.4d_%0.10d.dat" % (fileno, tileno)
of = open(outfilename, "w")
of.write(data)
of.close()
if nextptr == 0:
break
|
Add utility to split mirax data files
|
Add utility to split mirax data files
|
Python
|
lgpl-2.1
|
openslide/openslide,openslide/openslide,openslide/openslide,openslide/openslide
|
Add utility to split mirax data files
|
#!/usr/bin/python
import struct, sys, os
def rr(f):
return struct.unpack("<i", f.read(4))[0]
filename = sys.argv[1]
f = open(filename)
dir = os.path.dirname(filename)
HEADER_OFFSET = 37
f.seek(HEADER_OFFSET)
filesize = os.stat(sys.argv[1]).st_size
num_items = (filesize - HEADER_OFFSET) / 4
# read first pointer
top = rr(f)
# seek
f.seek(top)
# read table pointers
table = []
while True:
ptr = rr(f)
if ptr == 0:
break
table.append(ptr)
print "table: " + str(table)
# read each item
for ptr in table:
print ptr
f.seek(ptr)
rr(f) # 0
newptr = rr(f)
# seek
f.seek(newptr)
while True:
# read page stuff
pages = rr(f)
nextptr = rr(f)
while pages != 0:
tileno = rr(f)
fileoffset = rr(f)
filelen = rr(f)
fileno = rr(f)
pages = pages - 1
# open file and get data
filename = os.path.join(dir, "Data%0.4d.dat" % (fileno))
ff = open(filename)
ff.seek(fileoffset)
data = ff.read(filelen)
ff.close()
# write it
outfilename = "Data%0.4d_%0.10d.dat" % (fileno, tileno)
of = open(outfilename, "w")
of.write(data)
of.close()
if nextptr == 0:
break
|
<commit_before><commit_msg>Add utility to split mirax data files<commit_after>
|
#!/usr/bin/python
import struct, sys, os
def rr(f):
return struct.unpack("<i", f.read(4))[0]
filename = sys.argv[1]
f = open(filename)
dir = os.path.dirname(filename)
HEADER_OFFSET = 37
f.seek(HEADER_OFFSET)
filesize = os.stat(sys.argv[1]).st_size
num_items = (filesize - HEADER_OFFSET) / 4
# read first pointer
top = rr(f)
# seek
f.seek(top)
# read table pointers
table = []
while True:
ptr = rr(f)
if ptr == 0:
break
table.append(ptr)
print "table: " + str(table)
# read each item
for ptr in table:
print ptr
f.seek(ptr)
rr(f) # 0
newptr = rr(f)
# seek
f.seek(newptr)
while True:
# read page stuff
pages = rr(f)
nextptr = rr(f)
while pages != 0:
tileno = rr(f)
fileoffset = rr(f)
filelen = rr(f)
fileno = rr(f)
pages = pages - 1
# open file and get data
filename = os.path.join(dir, "Data%0.4d.dat" % (fileno))
ff = open(filename)
ff.seek(fileoffset)
data = ff.read(filelen)
ff.close()
# write it
outfilename = "Data%0.4d_%0.10d.dat" % (fileno, tileno)
of = open(outfilename, "w")
of.write(data)
of.close()
if nextptr == 0:
break
|
Add utility to split mirax data files#!/usr/bin/python
import struct, sys, os
def rr(f):
return struct.unpack("<i", f.read(4))[0]
filename = sys.argv[1]
f = open(filename)
dir = os.path.dirname(filename)
HEADER_OFFSET = 37
f.seek(HEADER_OFFSET)
filesize = os.stat(sys.argv[1]).st_size
num_items = (filesize - HEADER_OFFSET) / 4
# read first pointer
top = rr(f)
# seek
f.seek(top)
# read table pointers
table = []
while True:
ptr = rr(f)
if ptr == 0:
break
table.append(ptr)
print "table: " + str(table)
# read each item
for ptr in table:
print ptr
f.seek(ptr)
rr(f) # 0
newptr = rr(f)
# seek
f.seek(newptr)
while True:
# read page stuff
pages = rr(f)
nextptr = rr(f)
while pages != 0:
tileno = rr(f)
fileoffset = rr(f)
filelen = rr(f)
fileno = rr(f)
pages = pages - 1
# open file and get data
filename = os.path.join(dir, "Data%0.4d.dat" % (fileno))
ff = open(filename)
ff.seek(fileoffset)
data = ff.read(filelen)
ff.close()
# write it
outfilename = "Data%0.4d_%0.10d.dat" % (fileno, tileno)
of = open(outfilename, "w")
of.write(data)
of.close()
if nextptr == 0:
break
|
<commit_before><commit_msg>Add utility to split mirax data files<commit_after>#!/usr/bin/python
import struct, sys, os
def rr(f):
return struct.unpack("<i", f.read(4))[0]
filename = sys.argv[1]
f = open(filename)
dir = os.path.dirname(filename)
HEADER_OFFSET = 37
f.seek(HEADER_OFFSET)
filesize = os.stat(sys.argv[1]).st_size
num_items = (filesize - HEADER_OFFSET) / 4
# read first pointer
top = rr(f)
# seek
f.seek(top)
# read table pointers
table = []
while True:
ptr = rr(f)
if ptr == 0:
break
table.append(ptr)
print "table: " + str(table)
# read each item
for ptr in table:
print ptr
f.seek(ptr)
rr(f) # 0
newptr = rr(f)
# seek
f.seek(newptr)
while True:
# read page stuff
pages = rr(f)
nextptr = rr(f)
while pages != 0:
tileno = rr(f)
fileoffset = rr(f)
filelen = rr(f)
fileno = rr(f)
pages = pages - 1
# open file and get data
filename = os.path.join(dir, "Data%0.4d.dat" % (fileno))
ff = open(filename)
ff.seek(fileoffset)
data = ff.read(filelen)
ff.close()
# write it
outfilename = "Data%0.4d_%0.10d.dat" % (fileno, tileno)
of = open(outfilename, "w")
of.write(data)
of.close()
if nextptr == 0:
break
|
|
71f7f9b344f6475dc86adf00757f265455112aa5
|
web/Aovek/migrations/0002_video_image.py
|
web/Aovek/migrations/0002_video_image.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-02-19 14:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('Aovek', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='video',
name='image',
field=models.ImageField(default=django.utils.timezone.now, upload_to=''),
preserve_default=False,
),
]
|
Add migrations for image field in model
|
Add migrations for image field in model
|
Python
|
mit
|
nikolaystanishev/traffic-sign-recognition
|
Add migrations for image field in model
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-02-19 14:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('Aovek', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='video',
name='image',
field=models.ImageField(default=django.utils.timezone.now, upload_to=''),
preserve_default=False,
),
]
|
<commit_before><commit_msg>Add migrations for image field in model<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-02-19 14:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('Aovek', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='video',
name='image',
field=models.ImageField(default=django.utils.timezone.now, upload_to=''),
preserve_default=False,
),
]
|
Add migrations for image field in model# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-02-19 14:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('Aovek', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='video',
name='image',
field=models.ImageField(default=django.utils.timezone.now, upload_to=''),
preserve_default=False,
),
]
|
<commit_before><commit_msg>Add migrations for image field in model<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-02-19 14:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('Aovek', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='video',
name='image',
field=models.ImageField(default=django.utils.timezone.now, upload_to=''),
preserve_default=False,
),
]
|
|
51536bfdd41c4814f51049fcb91e59327d1515c9
|
control/tests/ctrlutil_test.py
|
control/tests/ctrlutil_test.py
|
import unittest
import numpy as np
from control.ctrlutil import *
class TestUtils(unittest.TestCase):
def setUp(self):
self.mag = np.array([1, 10, 100, 2, 0.1, 0.01])
self.db = np.array([0, 20, 40, 6.0206, -20, -40])
def check_unwrap_array(self, angle, period=None):
if period is None:
angle_mod = angle % (2 * np.pi)
angle_unwrap = unwrap(angle_mod)
else:
angle_mod = angle % period
angle_unwrap = unwrap(angle_mod, period)
np.testing.assert_array_almost_equal(angle_unwrap, angle)
def test_unwrap_increasing(self):
angle = np.linspace(0, 20, 50)
self.check_unwrap_array(angle)
def test_unwrap_decreasing(self):
angle = np.linspace(0, -20, 50)
self.check_unwrap_array(angle)
def test_unwrap_inc_degrees(self):
angle = np.linspace(0, 720, 50)
self.check_unwrap_array(angle, 360)
def test_unwrap_dec_degrees(self):
angle = np.linspace(0, -720, 50)
self.check_unwrap_array(angle, 360)
def test_unwrap_large_skips(self):
angle = np.array([0., 4 * np.pi, -2 * np.pi])
np.testing.assert_array_almost_equal(unwrap(angle), [0., 0., 0.])
def test_suite():
return unittest.TestLoader().loadTestsFromTestCase(TestUtils)
if __name__ == "__main__":
unittest.main()
|
Add tests showing problems with ctrlutil.unwrap
|
Add tests showing problems with ctrlutil.unwrap
The routine ctrlutil.unwrap fails if there are large jumps in phase.
|
Python
|
bsd-3-clause
|
murrayrm/python-control,roryyorke/python-control,python-control/python-control
|
Add tests showing problems with ctrlutil.unwrap
The routine ctrlutil.unwrap fails if there are large jumps in phase.
|
import unittest
import numpy as np
from control.ctrlutil import *
class TestUtils(unittest.TestCase):
def setUp(self):
self.mag = np.array([1, 10, 100, 2, 0.1, 0.01])
self.db = np.array([0, 20, 40, 6.0206, -20, -40])
def check_unwrap_array(self, angle, period=None):
if period is None:
angle_mod = angle % (2 * np.pi)
angle_unwrap = unwrap(angle_mod)
else:
angle_mod = angle % period
angle_unwrap = unwrap(angle_mod, period)
np.testing.assert_array_almost_equal(angle_unwrap, angle)
def test_unwrap_increasing(self):
angle = np.linspace(0, 20, 50)
self.check_unwrap_array(angle)
def test_unwrap_decreasing(self):
angle = np.linspace(0, -20, 50)
self.check_unwrap_array(angle)
def test_unwrap_inc_degrees(self):
angle = np.linspace(0, 720, 50)
self.check_unwrap_array(angle, 360)
def test_unwrap_dec_degrees(self):
angle = np.linspace(0, -720, 50)
self.check_unwrap_array(angle, 360)
def test_unwrap_large_skips(self):
angle = np.array([0., 4 * np.pi, -2 * np.pi])
np.testing.assert_array_almost_equal(unwrap(angle), [0., 0., 0.])
def test_suite():
return unittest.TestLoader().loadTestsFromTestCase(TestUtils)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add tests showing problems with ctrlutil.unwrap
The routine ctrlutil.unwrap fails if there are large jumps in phase.<commit_after>
|
import unittest
import numpy as np
from control.ctrlutil import *
class TestUtils(unittest.TestCase):
def setUp(self):
self.mag = np.array([1, 10, 100, 2, 0.1, 0.01])
self.db = np.array([0, 20, 40, 6.0206, -20, -40])
def check_unwrap_array(self, angle, period=None):
if period is None:
angle_mod = angle % (2 * np.pi)
angle_unwrap = unwrap(angle_mod)
else:
angle_mod = angle % period
angle_unwrap = unwrap(angle_mod, period)
np.testing.assert_array_almost_equal(angle_unwrap, angle)
def test_unwrap_increasing(self):
angle = np.linspace(0, 20, 50)
self.check_unwrap_array(angle)
def test_unwrap_decreasing(self):
angle = np.linspace(0, -20, 50)
self.check_unwrap_array(angle)
def test_unwrap_inc_degrees(self):
angle = np.linspace(0, 720, 50)
self.check_unwrap_array(angle, 360)
def test_unwrap_dec_degrees(self):
angle = np.linspace(0, -720, 50)
self.check_unwrap_array(angle, 360)
def test_unwrap_large_skips(self):
angle = np.array([0., 4 * np.pi, -2 * np.pi])
np.testing.assert_array_almost_equal(unwrap(angle), [0., 0., 0.])
def test_suite():
return unittest.TestLoader().loadTestsFromTestCase(TestUtils)
if __name__ == "__main__":
unittest.main()
|
Add tests showing problems with ctrlutil.unwrap
The routine ctrlutil.unwrap fails if there are large jumps in phase.import unittest
import numpy as np
from control.ctrlutil import *
class TestUtils(unittest.TestCase):
def setUp(self):
self.mag = np.array([1, 10, 100, 2, 0.1, 0.01])
self.db = np.array([0, 20, 40, 6.0206, -20, -40])
def check_unwrap_array(self, angle, period=None):
if period is None:
angle_mod = angle % (2 * np.pi)
angle_unwrap = unwrap(angle_mod)
else:
angle_mod = angle % period
angle_unwrap = unwrap(angle_mod, period)
np.testing.assert_array_almost_equal(angle_unwrap, angle)
def test_unwrap_increasing(self):
angle = np.linspace(0, 20, 50)
self.check_unwrap_array(angle)
def test_unwrap_decreasing(self):
angle = np.linspace(0, -20, 50)
self.check_unwrap_array(angle)
def test_unwrap_inc_degrees(self):
angle = np.linspace(0, 720, 50)
self.check_unwrap_array(angle, 360)
def test_unwrap_dec_degrees(self):
angle = np.linspace(0, -720, 50)
self.check_unwrap_array(angle, 360)
def test_unwrap_large_skips(self):
angle = np.array([0., 4 * np.pi, -2 * np.pi])
np.testing.assert_array_almost_equal(unwrap(angle), [0., 0., 0.])
def test_suite():
return unittest.TestLoader().loadTestsFromTestCase(TestUtils)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add tests showing problems with ctrlutil.unwrap
The routine ctrlutil.unwrap fails if there are large jumps in phase.<commit_after>import unittest
import numpy as np
from control.ctrlutil import *
class TestUtils(unittest.TestCase):
def setUp(self):
self.mag = np.array([1, 10, 100, 2, 0.1, 0.01])
self.db = np.array([0, 20, 40, 6.0206, -20, -40])
def check_unwrap_array(self, angle, period=None):
if period is None:
angle_mod = angle % (2 * np.pi)
angle_unwrap = unwrap(angle_mod)
else:
angle_mod = angle % period
angle_unwrap = unwrap(angle_mod, period)
np.testing.assert_array_almost_equal(angle_unwrap, angle)
def test_unwrap_increasing(self):
angle = np.linspace(0, 20, 50)
self.check_unwrap_array(angle)
def test_unwrap_decreasing(self):
angle = np.linspace(0, -20, 50)
self.check_unwrap_array(angle)
def test_unwrap_inc_degrees(self):
angle = np.linspace(0, 720, 50)
self.check_unwrap_array(angle, 360)
def test_unwrap_dec_degrees(self):
angle = np.linspace(0, -720, 50)
self.check_unwrap_array(angle, 360)
def test_unwrap_large_skips(self):
angle = np.array([0., 4 * np.pi, -2 * np.pi])
np.testing.assert_array_almost_equal(unwrap(angle), [0., 0., 0.])
def test_suite():
return unittest.TestLoader().loadTestsFromTestCase(TestUtils)
if __name__ == "__main__":
unittest.main()
|
|
548fa6380fc0f76708e414a1a3165dde13a663d9
|
pyqode/python/code_edit.py
|
pyqode/python/code_edit.py
|
"""
Deprecated; the PyCodeEdit class has been moved into pyqode.python.widgets.
This file will be removed in the next minor version (2.2).
"""
from .widgets.code_edit import PyCodeEdit
__all__ = [
'PyCodeEdit'
]
|
Fix backward incompatility. This module is deprecated and will be removed in version 2.2
|
Fix backward incompatility. This module is deprecated and will be removed in version 2.2
|
Python
|
mit
|
pyQode/pyqode.python,zwadar/pyqode.python,pyQode/pyqode.python,mmolero/pyqode.python
|
Fix backward incompatility. This module is deprecated and will be removed in version 2.2
|
"""
Deprecated; the PyCodeEdit class has been moved into pyqode.python.widgets.
This file will be removed in the next minor version (2.2).
"""
from .widgets.code_edit import PyCodeEdit
__all__ = [
'PyCodeEdit'
]
|
<commit_before><commit_msg>Fix backward incompatility. This module is deprecated and will be removed in version 2.2<commit_after>
|
"""
Deprecated; the PyCodeEdit class has been moved into pyqode.python.widgets.
This file will be removed in the next minor version (2.2).
"""
from .widgets.code_edit import PyCodeEdit
__all__ = [
'PyCodeEdit'
]
|
Fix backward incompatility. This module is deprecated and will be removed in version 2.2"""
Deprecated; the PyCodeEdit class has been moved into pyqode.python.widgets.
This file will be removed in the next minor version (2.2).
"""
from .widgets.code_edit import PyCodeEdit
__all__ = [
'PyCodeEdit'
]
|
<commit_before><commit_msg>Fix backward incompatility. This module is deprecated and will be removed in version 2.2<commit_after>"""
Deprecated; the PyCodeEdit class has been moved into pyqode.python.widgets.
This file will be removed in the next minor version (2.2).
"""
from .widgets.code_edit import PyCodeEdit
__all__ = [
'PyCodeEdit'
]
|
|
620a58856a3051cb6522b94ef68900cdfbdac3b6
|
tests/fields/base.py
|
tests/fields/base.py
|
import steel
import unittest
class FieldTests(unittest.TestCase):
def test_auto_label(self):
# One word
field = steel.Bytes(size=1)
field.set_name('byte')
self.assertEqual(field.label, 'byte')
# Two words
field = steel.Bytes(size=1)
field.set_name('two_bytes')
self.assertEqual(field.label, 'two bytes')
def test_manual_label(self):
field = steel.Bytes(size=1, label='explicit')
field.set_name('field')
self.assertEqual(field.label, 'explicit')
|
Add tests for label management
|
Add tests for label management
|
Python
|
bsd-3-clause
|
gulopine/steel-experiment
|
Add tests for label management
|
import steel
import unittest
class FieldTests(unittest.TestCase):
def test_auto_label(self):
# One word
field = steel.Bytes(size=1)
field.set_name('byte')
self.assertEqual(field.label, 'byte')
# Two words
field = steel.Bytes(size=1)
field.set_name('two_bytes')
self.assertEqual(field.label, 'two bytes')
def test_manual_label(self):
field = steel.Bytes(size=1, label='explicit')
field.set_name('field')
self.assertEqual(field.label, 'explicit')
|
<commit_before><commit_msg>Add tests for label management<commit_after>
|
import steel
import unittest
class FieldTests(unittest.TestCase):
def test_auto_label(self):
# One word
field = steel.Bytes(size=1)
field.set_name('byte')
self.assertEqual(field.label, 'byte')
# Two words
field = steel.Bytes(size=1)
field.set_name('two_bytes')
self.assertEqual(field.label, 'two bytes')
def test_manual_label(self):
field = steel.Bytes(size=1, label='explicit')
field.set_name('field')
self.assertEqual(field.label, 'explicit')
|
Add tests for label managementimport steel
import unittest
class FieldTests(unittest.TestCase):
def test_auto_label(self):
# One word
field = steel.Bytes(size=1)
field.set_name('byte')
self.assertEqual(field.label, 'byte')
# Two words
field = steel.Bytes(size=1)
field.set_name('two_bytes')
self.assertEqual(field.label, 'two bytes')
def test_manual_label(self):
field = steel.Bytes(size=1, label='explicit')
field.set_name('field')
self.assertEqual(field.label, 'explicit')
|
<commit_before><commit_msg>Add tests for label management<commit_after>import steel
import unittest
class FieldTests(unittest.TestCase):
def test_auto_label(self):
# One word
field = steel.Bytes(size=1)
field.set_name('byte')
self.assertEqual(field.label, 'byte')
# Two words
field = steel.Bytes(size=1)
field.set_name('two_bytes')
self.assertEqual(field.label, 'two bytes')
def test_manual_label(self):
field = steel.Bytes(size=1, label='explicit')
field.set_name('field')
self.assertEqual(field.label, 'explicit')
|
|
abe81ab36ec3468cc12243d16fe3d43d5d2752a4
|
tests/test_upload.py
|
tests/test_upload.py
|
import pytest
from twine.commands import upload
def test_find_dists_expands_globs():
files = sorted(upload.find_dists(['twine/__*.py']))
expected = ['twine/__init__.py', 'twine/__main__.py']
assert expected == files
def test_find_dists_errors_on_invalid_globs():
with pytest.raises(ValueError):
upload.find_dists(['twine/*.rb'])
def test_find_dists_handles_real_files():
expected = ['twine/__init__.py', 'twine/__main__.py', 'twine/cli.py',
'twine/utils.py', 'twine/wheel.py']
files = upload.find_dists(expected)
assert expected == files
|
Add tests for new twine-upload functionality
|
Add tests for new twine-upload functionality
|
Python
|
apache-2.0
|
pypa/twine,warner/twine,reinout/twine,sigmavirus24/twine,beni55/twine,dstufft/twine,jamesblunt/twine,mhils/twine
|
Add tests for new twine-upload functionality
|
import pytest
from twine.commands import upload
def test_find_dists_expands_globs():
files = sorted(upload.find_dists(['twine/__*.py']))
expected = ['twine/__init__.py', 'twine/__main__.py']
assert expected == files
def test_find_dists_errors_on_invalid_globs():
with pytest.raises(ValueError):
upload.find_dists(['twine/*.rb'])
def test_find_dists_handles_real_files():
expected = ['twine/__init__.py', 'twine/__main__.py', 'twine/cli.py',
'twine/utils.py', 'twine/wheel.py']
files = upload.find_dists(expected)
assert expected == files
|
<commit_before><commit_msg>Add tests for new twine-upload functionality<commit_after>
|
import pytest
from twine.commands import upload
def test_find_dists_expands_globs():
files = sorted(upload.find_dists(['twine/__*.py']))
expected = ['twine/__init__.py', 'twine/__main__.py']
assert expected == files
def test_find_dists_errors_on_invalid_globs():
with pytest.raises(ValueError):
upload.find_dists(['twine/*.rb'])
def test_find_dists_handles_real_files():
expected = ['twine/__init__.py', 'twine/__main__.py', 'twine/cli.py',
'twine/utils.py', 'twine/wheel.py']
files = upload.find_dists(expected)
assert expected == files
|
Add tests for new twine-upload functionalityimport pytest
from twine.commands import upload
def test_find_dists_expands_globs():
files = sorted(upload.find_dists(['twine/__*.py']))
expected = ['twine/__init__.py', 'twine/__main__.py']
assert expected == files
def test_find_dists_errors_on_invalid_globs():
with pytest.raises(ValueError):
upload.find_dists(['twine/*.rb'])
def test_find_dists_handles_real_files():
expected = ['twine/__init__.py', 'twine/__main__.py', 'twine/cli.py',
'twine/utils.py', 'twine/wheel.py']
files = upload.find_dists(expected)
assert expected == files
|
<commit_before><commit_msg>Add tests for new twine-upload functionality<commit_after>import pytest
from twine.commands import upload
def test_find_dists_expands_globs():
files = sorted(upload.find_dists(['twine/__*.py']))
expected = ['twine/__init__.py', 'twine/__main__.py']
assert expected == files
def test_find_dists_errors_on_invalid_globs():
with pytest.raises(ValueError):
upload.find_dists(['twine/*.rb'])
def test_find_dists_handles_real_files():
expected = ['twine/__init__.py', 'twine/__main__.py', 'twine/cli.py',
'twine/utils.py', 'twine/wheel.py']
files = upload.find_dists(expected)
assert expected == files
|
|
20745f81d89efd48b83d448bef60bf809999d32e
|
testing/test_storm_f.py
|
testing/test_storm_f.py
|
#! /usr/bin/env python
#
# Tests for the Fortran version of `storm`.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import shutil
from subprocess import call
# Globals
data_dir = os.path.join(os.getcwd(), 'testing', 'data')
f_dir = os.path.join(os.getcwd(), 'f')
input_file = 'wind.in'
output_files = ('WDIR.DATA', 'WINDX.DATA', 'WINDY.DATA')
output_file_lengths = (100, 104, 104)
# Fixtures -------------------------------------------------------------
def setup_module():
'''
Called before any tests are performed.
'''
print('Fortran tests:')
os.chdir(f_dir)
def teardown_module():
'''
Called after all tests have completed.
'''
call(['make', 'clean'])
for fn in output_files: os.remove(fn)
def setup():
'''
Called at start of any test using it @with_setup()
'''
shutil.copy(os.path.join(data_dir, input_file), f_dir)
def teardown():
'''
Called at end of any test using it @with_setup()
'''
os.remove(input_file)
# Tests ----------------------------------------------------------------
def test_compile():
'''
Test whether `storm` compiles
'''
call(['make'])
def test_no_input_file():
'''
Check that storm fails without input file
'''
r = call(['./storm'])
assert_not_equal(r, 0)
@with_setup(setup, teardown)
def test_output_files_exist():
'''
Test for creation of the three named output files
'''
r = call(['./storm'])
for fname in output_files:
assert_true(os.path.exists(fname))
@with_setup(setup, teardown)
def test_output_file_lengths():
'''
Test the lengths of the three named output files
'''
r = call(['./storm'])
for i in range(len(output_files)):
n_lines = sum(1 for line in open(output_files[i]))
assert_equal(n_lines, output_file_lengths[i])
|
Add unit tests for Fortran version of `storm`
|
Add unit tests for Fortran version of `storm`
I chose to use `nose` for testing because it's convenient, and
I'm treating the original version of `storm` as a black box.
|
Python
|
mit
|
mdpiper/storm,csdms-contrib/storm,csdms-contrib/storm,mdpiper/storm
|
Add unit tests for Fortran version of `storm`
I chose to use `nose` for testing because it's convenient, and
I'm treating the original version of `storm` as a black box.
|
#! /usr/bin/env python
#
# Tests for the Fortran version of `storm`.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import shutil
from subprocess import call
# Globals
data_dir = os.path.join(os.getcwd(), 'testing', 'data')
f_dir = os.path.join(os.getcwd(), 'f')
input_file = 'wind.in'
output_files = ('WDIR.DATA', 'WINDX.DATA', 'WINDY.DATA')
output_file_lengths = (100, 104, 104)
# Fixtures -------------------------------------------------------------
def setup_module():
'''
Called before any tests are performed.
'''
print('Fortran tests:')
os.chdir(f_dir)
def teardown_module():
'''
Called after all tests have completed.
'''
call(['make', 'clean'])
for fn in output_files: os.remove(fn)
def setup():
'''
Called at start of any test using it @with_setup()
'''
shutil.copy(os.path.join(data_dir, input_file), f_dir)
def teardown():
'''
Called at end of any test using it @with_setup()
'''
os.remove(input_file)
# Tests ----------------------------------------------------------------
def test_compile():
'''
Test whether `storm` compiles
'''
call(['make'])
def test_no_input_file():
'''
Check that storm fails without input file
'''
r = call(['./storm'])
assert_not_equal(r, 0)
@with_setup(setup, teardown)
def test_output_files_exist():
'''
Test for creation of the three named output files
'''
r = call(['./storm'])
for fname in output_files:
assert_true(os.path.exists(fname))
@with_setup(setup, teardown)
def test_output_file_lengths():
'''
Test the lengths of the three named output files
'''
r = call(['./storm'])
for i in range(len(output_files)):
n_lines = sum(1 for line in open(output_files[i]))
assert_equal(n_lines, output_file_lengths[i])
|
<commit_before><commit_msg>Add unit tests for Fortran version of `storm`
I chose to use `nose` for testing because it's convenient, and
I'm treating the original version of `storm` as a black box.<commit_after>
|
#! /usr/bin/env python
#
# Tests for the Fortran version of `storm`.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import shutil
from subprocess import call
# Globals
data_dir = os.path.join(os.getcwd(), 'testing', 'data')
f_dir = os.path.join(os.getcwd(), 'f')
input_file = 'wind.in'
output_files = ('WDIR.DATA', 'WINDX.DATA', 'WINDY.DATA')
output_file_lengths = (100, 104, 104)
# Fixtures -------------------------------------------------------------
def setup_module():
'''
Called before any tests are performed.
'''
print('Fortran tests:')
os.chdir(f_dir)
def teardown_module():
'''
Called after all tests have completed.
'''
call(['make', 'clean'])
for fn in output_files: os.remove(fn)
def setup():
'''
Called at start of any test using it @with_setup()
'''
shutil.copy(os.path.join(data_dir, input_file), f_dir)
def teardown():
'''
Called at end of any test using it @with_setup()
'''
os.remove(input_file)
# Tests ----------------------------------------------------------------
def test_compile():
'''
Test whether `storm` compiles
'''
call(['make'])
def test_no_input_file():
'''
Check that storm fails without input file
'''
r = call(['./storm'])
assert_not_equal(r, 0)
@with_setup(setup, teardown)
def test_output_files_exist():
'''
Test for creation of the three named output files
'''
r = call(['./storm'])
for fname in output_files:
assert_true(os.path.exists(fname))
@with_setup(setup, teardown)
def test_output_file_lengths():
'''
Test the lengths of the three named output files
'''
r = call(['./storm'])
for i in range(len(output_files)):
n_lines = sum(1 for line in open(output_files[i]))
assert_equal(n_lines, output_file_lengths[i])
|
Add unit tests for Fortran version of `storm`
I chose to use `nose` for testing because it's convenient, and
I'm treating the original version of `storm` as a black box.#! /usr/bin/env python
#
# Tests for the Fortran version of `storm`.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import shutil
from subprocess import call
# Globals
data_dir = os.path.join(os.getcwd(), 'testing', 'data')
f_dir = os.path.join(os.getcwd(), 'f')
input_file = 'wind.in'
output_files = ('WDIR.DATA', 'WINDX.DATA', 'WINDY.DATA')
output_file_lengths = (100, 104, 104)
# Fixtures -------------------------------------------------------------
def setup_module():
'''
Called before any tests are performed.
'''
print('Fortran tests:')
os.chdir(f_dir)
def teardown_module():
'''
Called after all tests have completed.
'''
call(['make', 'clean'])
for fn in output_files: os.remove(fn)
def setup():
'''
Called at start of any test using it @with_setup()
'''
shutil.copy(os.path.join(data_dir, input_file), f_dir)
def teardown():
'''
Called at end of any test using it @with_setup()
'''
os.remove(input_file)
# Tests ----------------------------------------------------------------
def test_compile():
'''
Test whether `storm` compiles
'''
call(['make'])
def test_no_input_file():
'''
Check that storm fails without input file
'''
r = call(['./storm'])
assert_not_equal(r, 0)
@with_setup(setup, teardown)
def test_output_files_exist():
'''
Test for creation of the three named output files
'''
r = call(['./storm'])
for fname in output_files:
assert_true(os.path.exists(fname))
@with_setup(setup, teardown)
def test_output_file_lengths():
'''
Test the lengths of the three named output files
'''
r = call(['./storm'])
for i in range(len(output_files)):
n_lines = sum(1 for line in open(output_files[i]))
assert_equal(n_lines, output_file_lengths[i])
|
<commit_before><commit_msg>Add unit tests for Fortran version of `storm`
I chose to use `nose` for testing because it's convenient, and
I'm treating the original version of `storm` as a black box.<commit_after>#! /usr/bin/env python
#
# Tests for the Fortran version of `storm`.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import shutil
from subprocess import call
# Globals
data_dir = os.path.join(os.getcwd(), 'testing', 'data')
f_dir = os.path.join(os.getcwd(), 'f')
input_file = 'wind.in'
output_files = ('WDIR.DATA', 'WINDX.DATA', 'WINDY.DATA')
output_file_lengths = (100, 104, 104)
# Fixtures -------------------------------------------------------------
def setup_module():
'''
Called before any tests are performed.
'''
print('Fortran tests:')
os.chdir(f_dir)
def teardown_module():
'''
Called after all tests have completed.
'''
call(['make', 'clean'])
for fn in output_files: os.remove(fn)
def setup():
'''
Called at start of any test using it @with_setup()
'''
shutil.copy(os.path.join(data_dir, input_file), f_dir)
def teardown():
'''
Called at end of any test using it @with_setup()
'''
os.remove(input_file)
# Tests ----------------------------------------------------------------
def test_compile():
'''
Test whether `storm` compiles
'''
call(['make'])
def test_no_input_file():
'''
Check that storm fails without input file
'''
r = call(['./storm'])
assert_not_equal(r, 0)
@with_setup(setup, teardown)
def test_output_files_exist():
'''
Test for creation of the three named output files
'''
r = call(['./storm'])
for fname in output_files:
assert_true(os.path.exists(fname))
@with_setup(setup, teardown)
def test_output_file_lengths():
'''
Test the lengths of the three named output files
'''
r = call(['./storm'])
for i in range(len(output_files)):
n_lines = sum(1 for line in open(output_files[i]))
assert_equal(n_lines, output_file_lengths[i])
|
|
8006a9afbffc1636702802cf5613ba0aaf1c71ec
|
qcfractal/alembic/versions/469ece903d76_migrate_provenance_to_not_null.py
|
qcfractal/alembic/versions/469ece903d76_migrate_provenance_to_not_null.py
|
"""migrate provenance to not null
Revision ID: 469ece903d76
Revises: 6b07e9a3589d
Create Date: 2021-05-02 09:48:57.061825
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.orm.session import Session
# revision identifiers, used by Alembic.
revision = "469ece903d76"
down_revision = "6b07e9a3589d"
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
conn.execute(
sa.text(
"UPDATE base_result SET provenance = provenance::jsonb || '{\"creator\":\"\"}' where (provenance->'creator')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE base_result SET provenance = provenance::jsonb || '{\"routine\":\"\"}' where (provenance->'routine')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE base_result SET provenance = provenance::jsonb || '{\"version\":\"\"}' where (provenance->'version')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE molecule SET provenance = provenance::jsonb || '{\"creator\":\"\"}' where (provenance->'creator')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE molecule SET provenance = provenance::jsonb || '{\"routine\":\"\"}' where (provenance->'routine')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE molecule SET provenance = provenance::jsonb || '{\"version\":\"\"}' where (provenance->'version')::text = 'null'"
)
)
conn.execute(sa.text("UPDATE molecule SET connectivity = null where connectivity::text = '[]'"))
conn.execute(
sa.text(
"UPDATE result SET properties = properties::jsonb - 'mp2_total_correlation_energy' || jsonb_build_object('mp2_correlation_energy', properties->'mp2_total_correlation_energy') WHERE properties::jsonb ? 'mp2_total_correlation_energy'"
)
)
def downgrade():
pass
|
Make fields of provenance not null, and fix other validation issues
|
Make fields of provenance not null, and fix other validation issues
|
Python
|
bsd-3-clause
|
psi4/mongo_qcdb,psi4/mongo_qcdb
|
Make fields of provenance not null, and fix other validation issues
|
"""migrate provenance to not null
Revision ID: 469ece903d76
Revises: 6b07e9a3589d
Create Date: 2021-05-02 09:48:57.061825
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.orm.session import Session
# revision identifiers, used by Alembic.
revision = "469ece903d76"
down_revision = "6b07e9a3589d"
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
conn.execute(
sa.text(
"UPDATE base_result SET provenance = provenance::jsonb || '{\"creator\":\"\"}' where (provenance->'creator')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE base_result SET provenance = provenance::jsonb || '{\"routine\":\"\"}' where (provenance->'routine')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE base_result SET provenance = provenance::jsonb || '{\"version\":\"\"}' where (provenance->'version')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE molecule SET provenance = provenance::jsonb || '{\"creator\":\"\"}' where (provenance->'creator')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE molecule SET provenance = provenance::jsonb || '{\"routine\":\"\"}' where (provenance->'routine')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE molecule SET provenance = provenance::jsonb || '{\"version\":\"\"}' where (provenance->'version')::text = 'null'"
)
)
conn.execute(sa.text("UPDATE molecule SET connectivity = null where connectivity::text = '[]'"))
conn.execute(
sa.text(
"UPDATE result SET properties = properties::jsonb - 'mp2_total_correlation_energy' || jsonb_build_object('mp2_correlation_energy', properties->'mp2_total_correlation_energy') WHERE properties::jsonb ? 'mp2_total_correlation_energy'"
)
)
def downgrade():
pass
|
<commit_before><commit_msg>Make fields of provenance not null, and fix other validation issues<commit_after>
|
"""migrate provenance to not null
Revision ID: 469ece903d76
Revises: 6b07e9a3589d
Create Date: 2021-05-02 09:48:57.061825
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.orm.session import Session
# revision identifiers, used by Alembic.
revision = "469ece903d76"
down_revision = "6b07e9a3589d"
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
conn.execute(
sa.text(
"UPDATE base_result SET provenance = provenance::jsonb || '{\"creator\":\"\"}' where (provenance->'creator')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE base_result SET provenance = provenance::jsonb || '{\"routine\":\"\"}' where (provenance->'routine')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE base_result SET provenance = provenance::jsonb || '{\"version\":\"\"}' where (provenance->'version')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE molecule SET provenance = provenance::jsonb || '{\"creator\":\"\"}' where (provenance->'creator')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE molecule SET provenance = provenance::jsonb || '{\"routine\":\"\"}' where (provenance->'routine')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE molecule SET provenance = provenance::jsonb || '{\"version\":\"\"}' where (provenance->'version')::text = 'null'"
)
)
conn.execute(sa.text("UPDATE molecule SET connectivity = null where connectivity::text = '[]'"))
conn.execute(
sa.text(
"UPDATE result SET properties = properties::jsonb - 'mp2_total_correlation_energy' || jsonb_build_object('mp2_correlation_energy', properties->'mp2_total_correlation_energy') WHERE properties::jsonb ? 'mp2_total_correlation_energy'"
)
)
def downgrade():
pass
|
Make fields of provenance not null, and fix other validation issues"""migrate provenance to not null
Revision ID: 469ece903d76
Revises: 6b07e9a3589d
Create Date: 2021-05-02 09:48:57.061825
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.orm.session import Session
# revision identifiers, used by Alembic.
revision = "469ece903d76"
down_revision = "6b07e9a3589d"
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
conn.execute(
sa.text(
"UPDATE base_result SET provenance = provenance::jsonb || '{\"creator\":\"\"}' where (provenance->'creator')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE base_result SET provenance = provenance::jsonb || '{\"routine\":\"\"}' where (provenance->'routine')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE base_result SET provenance = provenance::jsonb || '{\"version\":\"\"}' where (provenance->'version')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE molecule SET provenance = provenance::jsonb || '{\"creator\":\"\"}' where (provenance->'creator')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE molecule SET provenance = provenance::jsonb || '{\"routine\":\"\"}' where (provenance->'routine')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE molecule SET provenance = provenance::jsonb || '{\"version\":\"\"}' where (provenance->'version')::text = 'null'"
)
)
conn.execute(sa.text("UPDATE molecule SET connectivity = null where connectivity::text = '[]'"))
conn.execute(
sa.text(
"UPDATE result SET properties = properties::jsonb - 'mp2_total_correlation_energy' || jsonb_build_object('mp2_correlation_energy', properties->'mp2_total_correlation_energy') WHERE properties::jsonb ? 'mp2_total_correlation_energy'"
)
)
def downgrade():
pass
|
<commit_before><commit_msg>Make fields of provenance not null, and fix other validation issues<commit_after>"""migrate provenance to not null
Revision ID: 469ece903d76
Revises: 6b07e9a3589d
Create Date: 2021-05-02 09:48:57.061825
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.orm.session import Session
# revision identifiers, used by Alembic.
revision = "469ece903d76"
down_revision = "6b07e9a3589d"
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
conn.execute(
sa.text(
"UPDATE base_result SET provenance = provenance::jsonb || '{\"creator\":\"\"}' where (provenance->'creator')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE base_result SET provenance = provenance::jsonb || '{\"routine\":\"\"}' where (provenance->'routine')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE base_result SET provenance = provenance::jsonb || '{\"version\":\"\"}' where (provenance->'version')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE molecule SET provenance = provenance::jsonb || '{\"creator\":\"\"}' where (provenance->'creator')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE molecule SET provenance = provenance::jsonb || '{\"routine\":\"\"}' where (provenance->'routine')::text = 'null'"
)
)
conn.execute(
sa.text(
"UPDATE molecule SET provenance = provenance::jsonb || '{\"version\":\"\"}' where (provenance->'version')::text = 'null'"
)
)
conn.execute(sa.text("UPDATE molecule SET connectivity = null where connectivity::text = '[]'"))
conn.execute(
sa.text(
"UPDATE result SET properties = properties::jsonb - 'mp2_total_correlation_energy' || jsonb_build_object('mp2_correlation_energy', properties->'mp2_total_correlation_energy') WHERE properties::jsonb ? 'mp2_total_correlation_energy'"
)
)
def downgrade():
pass
|
|
e804e2258183d9986f5756327f875735c8234924
|
apps/uploads/forms.py
|
apps/uploads/forms.py
|
#
# Copyright (C) 2017 Maha Farhat
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Any useful django forms. Forn now just the testing form.
"""
from django.forms import Form
from .fields import UploadField
class TestUploadForm(Form):
test_files = UploadField(
extensions=['.vcf'],
)
|
Add a test form object for testing
|
Add a test form object for testing
|
Python
|
agpl-3.0
|
IQSS/gentb-site,IQSS/gentb-site,IQSS/gentb-site,IQSS/gentb-site,IQSS/gentb-site,IQSS/gentb-site,IQSS/gentb-site,IQSS/gentb-site
|
Add a test form object for testing
|
#
# Copyright (C) 2017 Maha Farhat
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Any useful django forms. Forn now just the testing form.
"""
from django.forms import Form
from .fields import UploadField
class TestUploadForm(Form):
test_files = UploadField(
extensions=['.vcf'],
)
|
<commit_before><commit_msg>Add a test form object for testing<commit_after>
|
#
# Copyright (C) 2017 Maha Farhat
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Any useful django forms. Forn now just the testing form.
"""
from django.forms import Form
from .fields import UploadField
class TestUploadForm(Form):
test_files = UploadField(
extensions=['.vcf'],
)
|
Add a test form object for testing#
# Copyright (C) 2017 Maha Farhat
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Any useful django forms. Forn now just the testing form.
"""
from django.forms import Form
from .fields import UploadField
class TestUploadForm(Form):
test_files = UploadField(
extensions=['.vcf'],
)
|
<commit_before><commit_msg>Add a test form object for testing<commit_after>#
# Copyright (C) 2017 Maha Farhat
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Any useful django forms. Forn now just the testing form.
"""
from django.forms import Form
from .fields import UploadField
class TestUploadForm(Form):
test_files = UploadField(
extensions=['.vcf'],
)
|
|
ba92b42f0729b68648aa485e02314e2a7a7997cb
|
vigir_ltl_specification/src/vigir_ltl_specification/test_activation_completion.py
|
vigir_ltl_specification/src/vigir_ltl_specification/test_activation_completion.py
|
#!/usr/bin/env python
from activation_outcomes import *
import unittest
class FormulaGenerationTests(unittest.TestCase):
# =========================================================================
# Test the generation of ActivationOutcomes formulas
# =========================================================================
def setUp(self):
"""Gets called before every test case."""
self.sys_props = ['dance', 'sleep']
self.outcomes = ['completed', 'failed', 'preempted']
print("Setting up a new formula test.")
def tearDown(self):
"""Gets called after every test case."""
print("Cleaning up after latest test ...")
del self.sys_props
del self.outcomes
def test_base_class(self):
"""..."""
formula = ActivationOutcomesFormula(self.sys_props, self.outcomes)
# Test whether the obvious things are working as expected
self.assertSetEqual(set(self.outcomes), set(formula.outcomes))
self.assertSetEqual(set(self.sys_props), set(formula.sys_props))
self.assertSetEqual(set(), set(formula.env_props))
self.assertSetEqual(set(), set(formula.formulas))
# Test whether the activation propositions are generated correctly
expected_act_props = ['dance_a', 'sleep_a']
self.assertSetEqual(set(expected_act_props), set(formula.activation))
# Test whether the outcome propositions are generated correctly
expected_out_props = {'dance': ['dance_c', 'dance_f', 'dance_p'],
'sleep': ['sleep_c', 'sleep_f', 'sleep_p']}
self.assertDictEqual(expected_out_props, formula.outcome_props)
# =============================================================================
# Entry point
# =============================================================================
if __name__ == '__main__':
# Run all tests
unittest.main()
|
Add test suite for a-o formulas
|
[vigir_ltl_specification] Add test suite for a-o formulas
|
Python
|
bsd-3-clause
|
team-vigir/vigir_behavior_synthesis,team-vigir/vigir_behavior_synthesis
|
[vigir_ltl_specification] Add test suite for a-o formulas
|
#!/usr/bin/env python
from activation_outcomes import *
import unittest
class FormulaGenerationTests(unittest.TestCase):
# =========================================================================
# Test the generation of ActivationOutcomes formulas
# =========================================================================
def setUp(self):
"""Gets called before every test case."""
self.sys_props = ['dance', 'sleep']
self.outcomes = ['completed', 'failed', 'preempted']
print("Setting up a new formula test.")
def tearDown(self):
"""Gets called after every test case."""
print("Cleaning up after latest test ...")
del self.sys_props
del self.outcomes
def test_base_class(self):
"""..."""
formula = ActivationOutcomesFormula(self.sys_props, self.outcomes)
# Test whether the obvious things are working as expected
self.assertSetEqual(set(self.outcomes), set(formula.outcomes))
self.assertSetEqual(set(self.sys_props), set(formula.sys_props))
self.assertSetEqual(set(), set(formula.env_props))
self.assertSetEqual(set(), set(formula.formulas))
# Test whether the activation propositions are generated correctly
expected_act_props = ['dance_a', 'sleep_a']
self.assertSetEqual(set(expected_act_props), set(formula.activation))
# Test whether the outcome propositions are generated correctly
expected_out_props = {'dance': ['dance_c', 'dance_f', 'dance_p'],
'sleep': ['sleep_c', 'sleep_f', 'sleep_p']}
self.assertDictEqual(expected_out_props, formula.outcome_props)
# =============================================================================
# Entry point
# =============================================================================
if __name__ == '__main__':
# Run all tests
unittest.main()
|
<commit_before><commit_msg>[vigir_ltl_specification] Add test suite for a-o formulas<commit_after>
|
#!/usr/bin/env python
from activation_outcomes import *
import unittest
class FormulaGenerationTests(unittest.TestCase):
# =========================================================================
# Test the generation of ActivationOutcomes formulas
# =========================================================================
def setUp(self):
"""Gets called before every test case."""
self.sys_props = ['dance', 'sleep']
self.outcomes = ['completed', 'failed', 'preempted']
print("Setting up a new formula test.")
def tearDown(self):
"""Gets called after every test case."""
print("Cleaning up after latest test ...")
del self.sys_props
del self.outcomes
def test_base_class(self):
"""..."""
formula = ActivationOutcomesFormula(self.sys_props, self.outcomes)
# Test whether the obvious things are working as expected
self.assertSetEqual(set(self.outcomes), set(formula.outcomes))
self.assertSetEqual(set(self.sys_props), set(formula.sys_props))
self.assertSetEqual(set(), set(formula.env_props))
self.assertSetEqual(set(), set(formula.formulas))
# Test whether the activation propositions are generated correctly
expected_act_props = ['dance_a', 'sleep_a']
self.assertSetEqual(set(expected_act_props), set(formula.activation))
# Test whether the outcome propositions are generated correctly
expected_out_props = {'dance': ['dance_c', 'dance_f', 'dance_p'],
'sleep': ['sleep_c', 'sleep_f', 'sleep_p']}
self.assertDictEqual(expected_out_props, formula.outcome_props)
# =============================================================================
# Entry point
# =============================================================================
if __name__ == '__main__':
# Run all tests
unittest.main()
|
[vigir_ltl_specification] Add test suite for a-o formulas#!/usr/bin/env python
from activation_outcomes import *
import unittest
class FormulaGenerationTests(unittest.TestCase):
# =========================================================================
# Test the generation of ActivationOutcomes formulas
# =========================================================================
def setUp(self):
"""Gets called before every test case."""
self.sys_props = ['dance', 'sleep']
self.outcomes = ['completed', 'failed', 'preempted']
print("Setting up a new formula test.")
def tearDown(self):
"""Gets called after every test case."""
print("Cleaning up after latest test ...")
del self.sys_props
del self.outcomes
def test_base_class(self):
"""..."""
formula = ActivationOutcomesFormula(self.sys_props, self.outcomes)
# Test whether the obvious things are working as expected
self.assertSetEqual(set(self.outcomes), set(formula.outcomes))
self.assertSetEqual(set(self.sys_props), set(formula.sys_props))
self.assertSetEqual(set(), set(formula.env_props))
self.assertSetEqual(set(), set(formula.formulas))
# Test whether the activation propositions are generated correctly
expected_act_props = ['dance_a', 'sleep_a']
self.assertSetEqual(set(expected_act_props), set(formula.activation))
# Test whether the outcome propositions are generated correctly
expected_out_props = {'dance': ['dance_c', 'dance_f', 'dance_p'],
'sleep': ['sleep_c', 'sleep_f', 'sleep_p']}
self.assertDictEqual(expected_out_props, formula.outcome_props)
# =============================================================================
# Entry point
# =============================================================================
if __name__ == '__main__':
# Run all tests
unittest.main()
|
<commit_before><commit_msg>[vigir_ltl_specification] Add test suite for a-o formulas<commit_after>#!/usr/bin/env python
from activation_outcomes import *
import unittest
class FormulaGenerationTests(unittest.TestCase):
# =========================================================================
# Test the generation of ActivationOutcomes formulas
# =========================================================================
def setUp(self):
"""Gets called before every test case."""
self.sys_props = ['dance', 'sleep']
self.outcomes = ['completed', 'failed', 'preempted']
print("Setting up a new formula test.")
def tearDown(self):
"""Gets called after every test case."""
print("Cleaning up after latest test ...")
del self.sys_props
del self.outcomes
def test_base_class(self):
"""..."""
formula = ActivationOutcomesFormula(self.sys_props, self.outcomes)
# Test whether the obvious things are working as expected
self.assertSetEqual(set(self.outcomes), set(formula.outcomes))
self.assertSetEqual(set(self.sys_props), set(formula.sys_props))
self.assertSetEqual(set(), set(formula.env_props))
self.assertSetEqual(set(), set(formula.formulas))
# Test whether the activation propositions are generated correctly
expected_act_props = ['dance_a', 'sleep_a']
self.assertSetEqual(set(expected_act_props), set(formula.activation))
# Test whether the outcome propositions are generated correctly
expected_out_props = {'dance': ['dance_c', 'dance_f', 'dance_p'],
'sleep': ['sleep_c', 'sleep_f', 'sleep_p']}
self.assertDictEqual(expected_out_props, formula.outcome_props)
# =============================================================================
# Entry point
# =============================================================================
if __name__ == '__main__':
# Run all tests
unittest.main()
|
|
31f2b8407a4a369dd79bf73f8a838d2bb22d2c19
|
python/example_code/iam/list_users_with_resource.py
|
python/example_code/iam/list_users_with_resource.py
|
# Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
# Create an IAM service resource
resource = boto3.resource('iam')
# Get an iterable of all users
users = resource.users.all()
# Print details for each user
for user in users:
print("User {} created on {}".format(
user.user_name,
user.create_date
))
|
Add example to list IAM users with service resource
|
Add example to list IAM users with service resource
|
Python
|
apache-2.0
|
awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples,awsdocs/aws-doc-sdk-examples
|
Add example to list IAM users with service resource
|
# Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
# Create an IAM service resource
resource = boto3.resource('iam')
# Get an iterable of all users
users = resource.users.all()
# Print details for each user
for user in users:
print("User {} created on {}".format(
user.user_name,
user.create_date
))
|
<commit_before><commit_msg>Add example to list IAM users with service resource<commit_after>
|
# Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
# Create an IAM service resource
resource = boto3.resource('iam')
# Get an iterable of all users
users = resource.users.all()
# Print details for each user
for user in users:
print("User {} created on {}".format(
user.user_name,
user.create_date
))
|
Add example to list IAM users with service resource# Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
# Create an IAM service resource
resource = boto3.resource('iam')
# Get an iterable of all users
users = resource.users.all()
# Print details for each user
for user in users:
print("User {} created on {}".format(
user.user_name,
user.create_date
))
|
<commit_before><commit_msg>Add example to list IAM users with service resource<commit_after># Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
# Create an IAM service resource
resource = boto3.resource('iam')
# Get an iterable of all users
users = resource.users.all()
# Print details for each user
for user in users:
print("User {} created on {}".format(
user.user_name,
user.create_date
))
|
|
7d46787474dbc1902cf35fad0ad57c2336ef22f8
|
CodeFights/kthPermutation.py
|
CodeFights/kthPermutation.py
|
#!/usr/local/bin/python
# Code Fights Kth Permutation Problem
from itertools import permutations
def kthPermutation(numbers, k):
return list(list(permutations(numbers, len(numbers)))[k - 1])
def main():
tests = [
[[1, 2, 3, 4, 5], 4, [1, 2, 4, 5, 3]],
[[1, 2], 1, [1, 2]],
[[11, 22, 31, 43, 56], 120, [56, 43, 31, 22, 11]],
[[14, 25, 27, 29, 30, 40, 55, 89, 100, 239], 238,
[14, 25, 27, 29, 40, 239, 100, 55, 89, 30]],
[[14, 25, 27, 29, 30, 40, 55, 89, 100, 239], 3628800,
[239, 100, 89, 55, 40, 30, 29, 27, 25, 14]],
[[50, 100, 123, 789], 15, [123, 100, 50, 789]]
]
for t in tests:
res = kthPermutation(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: kthPermutation({}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: kthPermutation({}, {}) returned {}, "
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights kth permutation problem
|
Solve Code Fights kth permutation problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights kth permutation problem
|
#!/usr/local/bin/python
# Code Fights Kth Permutation Problem
from itertools import permutations
def kthPermutation(numbers, k):
return list(list(permutations(numbers, len(numbers)))[k - 1])
def main():
tests = [
[[1, 2, 3, 4, 5], 4, [1, 2, 4, 5, 3]],
[[1, 2], 1, [1, 2]],
[[11, 22, 31, 43, 56], 120, [56, 43, 31, 22, 11]],
[[14, 25, 27, 29, 30, 40, 55, 89, 100, 239], 238,
[14, 25, 27, 29, 40, 239, 100, 55, 89, 30]],
[[14, 25, 27, 29, 30, 40, 55, 89, 100, 239], 3628800,
[239, 100, 89, 55, 40, 30, 29, 27, 25, 14]],
[[50, 100, 123, 789], 15, [123, 100, 50, 789]]
]
for t in tests:
res = kthPermutation(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: kthPermutation({}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: kthPermutation({}, {}) returned {}, "
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights kth permutation problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Kth Permutation Problem
from itertools import permutations
def kthPermutation(numbers, k):
return list(list(permutations(numbers, len(numbers)))[k - 1])
def main():
tests = [
[[1, 2, 3, 4, 5], 4, [1, 2, 4, 5, 3]],
[[1, 2], 1, [1, 2]],
[[11, 22, 31, 43, 56], 120, [56, 43, 31, 22, 11]],
[[14, 25, 27, 29, 30, 40, 55, 89, 100, 239], 238,
[14, 25, 27, 29, 40, 239, 100, 55, 89, 30]],
[[14, 25, 27, 29, 30, 40, 55, 89, 100, 239], 3628800,
[239, 100, 89, 55, 40, 30, 29, 27, 25, 14]],
[[50, 100, 123, 789], 15, [123, 100, 50, 789]]
]
for t in tests:
res = kthPermutation(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: kthPermutation({}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: kthPermutation({}, {}) returned {}, "
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights kth permutation problem#!/usr/local/bin/python
# Code Fights Kth Permutation Problem
from itertools import permutations
def kthPermutation(numbers, k):
return list(list(permutations(numbers, len(numbers)))[k - 1])
def main():
tests = [
[[1, 2, 3, 4, 5], 4, [1, 2, 4, 5, 3]],
[[1, 2], 1, [1, 2]],
[[11, 22, 31, 43, 56], 120, [56, 43, 31, 22, 11]],
[[14, 25, 27, 29, 30, 40, 55, 89, 100, 239], 238,
[14, 25, 27, 29, 40, 239, 100, 55, 89, 30]],
[[14, 25, 27, 29, 30, 40, 55, 89, 100, 239], 3628800,
[239, 100, 89, 55, 40, 30, 29, 27, 25, 14]],
[[50, 100, 123, 789], 15, [123, 100, 50, 789]]
]
for t in tests:
res = kthPermutation(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: kthPermutation({}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: kthPermutation({}, {}) returned {}, "
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights kth permutation problem<commit_after>#!/usr/local/bin/python
# Code Fights Kth Permutation Problem
from itertools import permutations
def kthPermutation(numbers, k):
return list(list(permutations(numbers, len(numbers)))[k - 1])
def main():
tests = [
[[1, 2, 3, 4, 5], 4, [1, 2, 4, 5, 3]],
[[1, 2], 1, [1, 2]],
[[11, 22, 31, 43, 56], 120, [56, 43, 31, 22, 11]],
[[14, 25, 27, 29, 30, 40, 55, 89, 100, 239], 238,
[14, 25, 27, 29, 40, 239, 100, 55, 89, 30]],
[[14, 25, 27, 29, 30, 40, 55, 89, 100, 239], 3628800,
[239, 100, 89, 55, 40, 30, 29, 27, 25, 14]],
[[50, 100, 123, 789], 15, [123, 100, 50, 789]]
]
for t in tests:
res = kthPermutation(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: kthPermutation({}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: kthPermutation({}, {}) returned {}, "
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
|
aab8426c7f917315c6d08dd4389b6c5bbcd53441
|
change_line_breaks.py
|
change_line_breaks.py
|
"""
Reformat a single entry fasta file.
E.g. useful if a fasta file contains a sequence in a single long
line. The Biopython SeqIO writer will generate a sequence with
proper line lenght of 60 character.s
"""
import argparse
from Bio import SeqIO
parser = argparse.ArgumentParser()
parser.add_argument("input_fasta")
parser.add_argument("output_fasta")
args = parser.parse_args()
SeqIO.write(SeqIO.read(args.input_fasta, "fasta"), args.output_fasta, "fasta")
|
Add fasta line break script
|
Add fasta line break script
|
Python
|
isc
|
konrad/kuf_bio_scripts
|
Add fasta line break script
|
"""
Reformat a single entry fasta file.
E.g. useful if a fasta file contains a sequence in a single long
line. The Biopython SeqIO writer will generate a sequence with
proper line lenght of 60 character.s
"""
import argparse
from Bio import SeqIO
parser = argparse.ArgumentParser()
parser.add_argument("input_fasta")
parser.add_argument("output_fasta")
args = parser.parse_args()
SeqIO.write(SeqIO.read(args.input_fasta, "fasta"), args.output_fasta, "fasta")
|
<commit_before><commit_msg>Add fasta line break script<commit_after>
|
"""
Reformat a single entry fasta file.
E.g. useful if a fasta file contains a sequence in a single long
line. The Biopython SeqIO writer will generate a sequence with
proper line lenght of 60 character.s
"""
import argparse
from Bio import SeqIO
parser = argparse.ArgumentParser()
parser.add_argument("input_fasta")
parser.add_argument("output_fasta")
args = parser.parse_args()
SeqIO.write(SeqIO.read(args.input_fasta, "fasta"), args.output_fasta, "fasta")
|
Add fasta line break script"""
Reformat a single entry fasta file.
E.g. useful if a fasta file contains a sequence in a single long
line. The Biopython SeqIO writer will generate a sequence with
proper line lenght of 60 character.s
"""
import argparse
from Bio import SeqIO
parser = argparse.ArgumentParser()
parser.add_argument("input_fasta")
parser.add_argument("output_fasta")
args = parser.parse_args()
SeqIO.write(SeqIO.read(args.input_fasta, "fasta"), args.output_fasta, "fasta")
|
<commit_before><commit_msg>Add fasta line break script<commit_after>"""
Reformat a single entry fasta file.
E.g. useful if a fasta file contains a sequence in a single long
line. The Biopython SeqIO writer will generate a sequence with
proper line lenght of 60 character.s
"""
import argparse
from Bio import SeqIO
parser = argparse.ArgumentParser()
parser.add_argument("input_fasta")
parser.add_argument("output_fasta")
args = parser.parse_args()
SeqIO.write(SeqIO.read(args.input_fasta, "fasta"), args.output_fasta, "fasta")
|
|
3ea556af950be81db5b1eec7a78429e286715688
|
unit_tests/test_template.py
|
unit_tests/test_template.py
|
#!/usr/bin/python
""" Template for writing new test classes.
"""
import unittest
import sys, os, re
# from multiqc import ...
# This line allows the tests to run if you just naively run this script.
# But the preferred way is to use run_tests.sh
sys.path.insert(0,'../MultiQC')
class T(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
|
Add a template for making new tests.
|
Add a template for making new tests.
|
Python
|
mit
|
ewels/MultiQC_TestData,ewels/MultiQC_TestData,ewels/MultiQC_TestData,ewels/MultiQC_TestData,ewels/MultiQC_TestData
|
Add a template for making new tests.
|
#!/usr/bin/python
""" Template for writing new test classes.
"""
import unittest
import sys, os, re
# from multiqc import ...
# This line allows the tests to run if you just naively run this script.
# But the preferred way is to use run_tests.sh
sys.path.insert(0,'../MultiQC')
class T(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a template for making new tests.<commit_after>
|
#!/usr/bin/python
""" Template for writing new test classes.
"""
import unittest
import sys, os, re
# from multiqc import ...
# This line allows the tests to run if you just naively run this script.
# But the preferred way is to use run_tests.sh
sys.path.insert(0,'../MultiQC')
class T(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
|
Add a template for making new tests.#!/usr/bin/python
""" Template for writing new test classes.
"""
import unittest
import sys, os, re
# from multiqc import ...
# This line allows the tests to run if you just naively run this script.
# But the preferred way is to use run_tests.sh
sys.path.insert(0,'../MultiQC')
class T(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a template for making new tests.<commit_after>#!/usr/bin/python
""" Template for writing new test classes.
"""
import unittest
import sys, os, re
# from multiqc import ...
# This line allows the tests to run if you just naively run this script.
# But the preferred way is to use run_tests.sh
sys.path.insert(0,'../MultiQC')
class T(unittest.TestCase):
def setUp(self):
pass
def test_something(self):
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.