commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
23c15c3bdf2db8f23b23bef7eaaa9b6bbbe600d7
|
__init__.py
|
__init__.py
|
"""Tink package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import google_type_annotations
from __future__ import print_function
from google3.third_party.tink.python import aead
from google3.third_party.tink.python import key_manager
from google3.third_party.tink.python import tink_config
Aead = aead.Aead
KeyManager = key_manager.KeyManager
PrivateKeyManager = key_manager.PrivateKeyManager
del key_manager
|
Make tink a package and refactor aead into a package.
|
Make tink a package and refactor aead into a package.
PiperOrigin-RevId: 249473289
|
Python
|
apache-2.0
|
google/tink,google/tink,google/tink,google/tink,google/tink,google/tink,google/tink,google/tink
|
Make tink a package and refactor aead into a package.
PiperOrigin-RevId: 249473289
|
"""Tink package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import google_type_annotations
from __future__ import print_function
from google3.third_party.tink.python import aead
from google3.third_party.tink.python import key_manager
from google3.third_party.tink.python import tink_config
Aead = aead.Aead
KeyManager = key_manager.KeyManager
PrivateKeyManager = key_manager.PrivateKeyManager
del key_manager
|
<commit_before><commit_msg>Make tink a package and refactor aead into a package.
PiperOrigin-RevId: 249473289<commit_after>
|
"""Tink package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import google_type_annotations
from __future__ import print_function
from google3.third_party.tink.python import aead
from google3.third_party.tink.python import key_manager
from google3.third_party.tink.python import tink_config
Aead = aead.Aead
KeyManager = key_manager.KeyManager
PrivateKeyManager = key_manager.PrivateKeyManager
del key_manager
|
Make tink a package and refactor aead into a package.
PiperOrigin-RevId: 249473289"""Tink package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import google_type_annotations
from __future__ import print_function
from google3.third_party.tink.python import aead
from google3.third_party.tink.python import key_manager
from google3.third_party.tink.python import tink_config
Aead = aead.Aead
KeyManager = key_manager.KeyManager
PrivateKeyManager = key_manager.PrivateKeyManager
del key_manager
|
<commit_before><commit_msg>Make tink a package and refactor aead into a package.
PiperOrigin-RevId: 249473289<commit_after>"""Tink package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import google_type_annotations
from __future__ import print_function
from google3.third_party.tink.python import aead
from google3.third_party.tink.python import key_manager
from google3.third_party.tink.python import tink_config
Aead = aead.Aead
KeyManager = key_manager.KeyManager
PrivateKeyManager = key_manager.PrivateKeyManager
del key_manager
|
|
1ef37363a55e8e2e98d70c8ee58d76fe34bc92b0
|
tests/test_util.py
|
tests/test_util.py
|
import os
from aiodownload.util import clean_filename, make_dirs, default_url_transform
def test_clean_filename():
sanitized_filename = clean_filename('français.txt')
assert sanitized_filename == 'francais.txt'
def test_make_dirs(tmpdir):
test_path = os.path.sep.join([tmpdir.strpath, 'test', 'make', 'dir'])
make_dirs(os.path.sep.join([test_path, 'mock.txt']))
assert os.path.isdir(test_path)
def test_default_url_transformation():
transformed_url = default_url_transform('https://httpbin.org/drip?duration=5&numbytes=5&code=200')
assert transformed_url == os.path.sep.join(['httpbin.org', 'drip_duration_5-numbytes_5-code_200'])
|
Add tests for utl module
|
Add tests for utl module
|
Python
|
mit
|
jelloslinger/aiodownload
|
Add tests for utl module
|
import os
from aiodownload.util import clean_filename, make_dirs, default_url_transform
def test_clean_filename():
sanitized_filename = clean_filename('français.txt')
assert sanitized_filename == 'francais.txt'
def test_make_dirs(tmpdir):
test_path = os.path.sep.join([tmpdir.strpath, 'test', 'make', 'dir'])
make_dirs(os.path.sep.join([test_path, 'mock.txt']))
assert os.path.isdir(test_path)
def test_default_url_transformation():
transformed_url = default_url_transform('https://httpbin.org/drip?duration=5&numbytes=5&code=200')
assert transformed_url == os.path.sep.join(['httpbin.org', 'drip_duration_5-numbytes_5-code_200'])
|
<commit_before><commit_msg>Add tests for utl module<commit_after>
|
import os
from aiodownload.util import clean_filename, make_dirs, default_url_transform
def test_clean_filename():
sanitized_filename = clean_filename('français.txt')
assert sanitized_filename == 'francais.txt'
def test_make_dirs(tmpdir):
test_path = os.path.sep.join([tmpdir.strpath, 'test', 'make', 'dir'])
make_dirs(os.path.sep.join([test_path, 'mock.txt']))
assert os.path.isdir(test_path)
def test_default_url_transformation():
transformed_url = default_url_transform('https://httpbin.org/drip?duration=5&numbytes=5&code=200')
assert transformed_url == os.path.sep.join(['httpbin.org', 'drip_duration_5-numbytes_5-code_200'])
|
Add tests for utl moduleimport os
from aiodownload.util import clean_filename, make_dirs, default_url_transform
def test_clean_filename():
sanitized_filename = clean_filename('français.txt')
assert sanitized_filename == 'francais.txt'
def test_make_dirs(tmpdir):
test_path = os.path.sep.join([tmpdir.strpath, 'test', 'make', 'dir'])
make_dirs(os.path.sep.join([test_path, 'mock.txt']))
assert os.path.isdir(test_path)
def test_default_url_transformation():
transformed_url = default_url_transform('https://httpbin.org/drip?duration=5&numbytes=5&code=200')
assert transformed_url == os.path.sep.join(['httpbin.org', 'drip_duration_5-numbytes_5-code_200'])
|
<commit_before><commit_msg>Add tests for utl module<commit_after>import os
from aiodownload.util import clean_filename, make_dirs, default_url_transform
def test_clean_filename():
sanitized_filename = clean_filename('français.txt')
assert sanitized_filename == 'francais.txt'
def test_make_dirs(tmpdir):
test_path = os.path.sep.join([tmpdir.strpath, 'test', 'make', 'dir'])
make_dirs(os.path.sep.join([test_path, 'mock.txt']))
assert os.path.isdir(test_path)
def test_default_url_transformation():
transformed_url = default_url_transform('https://httpbin.org/drip?duration=5&numbytes=5&code=200')
assert transformed_url == os.path.sep.join(['httpbin.org', 'drip_duration_5-numbytes_5-code_200'])
|
|
0333216c2054effbe68a4af6e38de80f3e52dc7b
|
sorting.py
|
sorting.py
|
def bubble_sort(arr_list):
# For bigO benchmarking
num_of_comparison = 0
num_of_exchanges = 0
for pass_num in range(len(arr_list) - 1, 0, -1):
for j in range(pass_num):
num_of_comparison += 1 # For bigO benchmarking
if arr_list[j] > arr_list[j + 1]:
arr_list[j], arr_list[j + 1] = arr_list[j + 1], arr_list[j]
num_of_exchanges += 1 # For bigO benchmarking
return '%s comparisons and %s exchanges.' % (num_of_comparison, num_of_exchanges) # For bigO benchmarking
if __name__ == '__main__':
import timeit
unsorted_list = [54, 26, 93, 17, 77, 31, 44, 55, 20]
sorted_list = [17, 20, 26, 31, 44, 54, 55, 77, 93]
sorting_algos = [bubble_sort,]
for sorting_algo in sorting_algos:
print('{}: {} sec, {}'.format(
sorting_algo.__name__,
timeit.timeit(
'%s(%s)' % (sorting_algo.__name__, unsorted_list),
setup='from __main__ import %s' % sorting_algo.__name__,
number=1000
),
sorting_algo([54, 26, 93, 17, 77, 31, 44, 55, 20])
)
)
|
Implement bubble sort algorithm with benchmarking
|
Implement bubble sort algorithm with benchmarking
|
Python
|
mit
|
andela-kerinoso/data_structures_algo
|
Implement bubble sort algorithm with benchmarking
|
def bubble_sort(arr_list):
# For bigO benchmarking
num_of_comparison = 0
num_of_exchanges = 0
for pass_num in range(len(arr_list) - 1, 0, -1):
for j in range(pass_num):
num_of_comparison += 1 # For bigO benchmarking
if arr_list[j] > arr_list[j + 1]:
arr_list[j], arr_list[j + 1] = arr_list[j + 1], arr_list[j]
num_of_exchanges += 1 # For bigO benchmarking
return '%s comparisons and %s exchanges.' % (num_of_comparison, num_of_exchanges) # For bigO benchmarking
if __name__ == '__main__':
import timeit
unsorted_list = [54, 26, 93, 17, 77, 31, 44, 55, 20]
sorted_list = [17, 20, 26, 31, 44, 54, 55, 77, 93]
sorting_algos = [bubble_sort,]
for sorting_algo in sorting_algos:
print('{}: {} sec, {}'.format(
sorting_algo.__name__,
timeit.timeit(
'%s(%s)' % (sorting_algo.__name__, unsorted_list),
setup='from __main__ import %s' % sorting_algo.__name__,
number=1000
),
sorting_algo([54, 26, 93, 17, 77, 31, 44, 55, 20])
)
)
|
<commit_before><commit_msg>Implement bubble sort algorithm with benchmarking<commit_after>
|
def bubble_sort(arr_list):
# For bigO benchmarking
num_of_comparison = 0
num_of_exchanges = 0
for pass_num in range(len(arr_list) - 1, 0, -1):
for j in range(pass_num):
num_of_comparison += 1 # For bigO benchmarking
if arr_list[j] > arr_list[j + 1]:
arr_list[j], arr_list[j + 1] = arr_list[j + 1], arr_list[j]
num_of_exchanges += 1 # For bigO benchmarking
return '%s comparisons and %s exchanges.' % (num_of_comparison, num_of_exchanges) # For bigO benchmarking
if __name__ == '__main__':
import timeit
unsorted_list = [54, 26, 93, 17, 77, 31, 44, 55, 20]
sorted_list = [17, 20, 26, 31, 44, 54, 55, 77, 93]
sorting_algos = [bubble_sort,]
for sorting_algo in sorting_algos:
print('{}: {} sec, {}'.format(
sorting_algo.__name__,
timeit.timeit(
'%s(%s)' % (sorting_algo.__name__, unsorted_list),
setup='from __main__ import %s' % sorting_algo.__name__,
number=1000
),
sorting_algo([54, 26, 93, 17, 77, 31, 44, 55, 20])
)
)
|
Implement bubble sort algorithm with benchmarkingdef bubble_sort(arr_list):
# For bigO benchmarking
num_of_comparison = 0
num_of_exchanges = 0
for pass_num in range(len(arr_list) - 1, 0, -1):
for j in range(pass_num):
num_of_comparison += 1 # For bigO benchmarking
if arr_list[j] > arr_list[j + 1]:
arr_list[j], arr_list[j + 1] = arr_list[j + 1], arr_list[j]
num_of_exchanges += 1 # For bigO benchmarking
return '%s comparisons and %s exchanges.' % (num_of_comparison, num_of_exchanges) # For bigO benchmarking
if __name__ == '__main__':
import timeit
unsorted_list = [54, 26, 93, 17, 77, 31, 44, 55, 20]
sorted_list = [17, 20, 26, 31, 44, 54, 55, 77, 93]
sorting_algos = [bubble_sort,]
for sorting_algo in sorting_algos:
print('{}: {} sec, {}'.format(
sorting_algo.__name__,
timeit.timeit(
'%s(%s)' % (sorting_algo.__name__, unsorted_list),
setup='from __main__ import %s' % sorting_algo.__name__,
number=1000
),
sorting_algo([54, 26, 93, 17, 77, 31, 44, 55, 20])
)
)
|
<commit_before><commit_msg>Implement bubble sort algorithm with benchmarking<commit_after>def bubble_sort(arr_list):
# For bigO benchmarking
num_of_comparison = 0
num_of_exchanges = 0
for pass_num in range(len(arr_list) - 1, 0, -1):
for j in range(pass_num):
num_of_comparison += 1 # For bigO benchmarking
if arr_list[j] > arr_list[j + 1]:
arr_list[j], arr_list[j + 1] = arr_list[j + 1], arr_list[j]
num_of_exchanges += 1 # For bigO benchmarking
return '%s comparisons and %s exchanges.' % (num_of_comparison, num_of_exchanges) # For bigO benchmarking
if __name__ == '__main__':
import timeit
unsorted_list = [54, 26, 93, 17, 77, 31, 44, 55, 20]
sorted_list = [17, 20, 26, 31, 44, 54, 55, 77, 93]
sorting_algos = [bubble_sort,]
for sorting_algo in sorting_algos:
print('{}: {} sec, {}'.format(
sorting_algo.__name__,
timeit.timeit(
'%s(%s)' % (sorting_algo.__name__, unsorted_list),
setup='from __main__ import %s' % sorting_algo.__name__,
number=1000
),
sorting_algo([54, 26, 93, 17, 77, 31, 44, 55, 20])
)
)
|
|
381ac12d14a248748e36d79cf5ec32770391f2e6
|
tests/test_plugins.py
|
tests/test_plugins.py
|
from django.utils import unittest
from hydra_agent import plugins
class TestPlugins(unittest.TestCase):
def test_scan_plugins(self):
"""Test that we get a list of plugin names."""
self.assertNotEqual(plugins.scan_plugins(), [])
@unittest.skip("test not implemented")
def test_load_plugins(self):
"""Test that plugins get imported."""
pass
def test_find_plugins(self):
"""Test that we get a list of loaded plugin instances."""
self.assertNotEqual(plugins.find_plugins(), [])
|
Add a little test coverage for plugins, needs more.
|
Add a little test coverage for plugins, needs more.
|
Python
|
mit
|
intel-hpdd/intel-manager-for-lustre,intel-hpdd/intel-manager-for-lustre,intel-hpdd/intel-manager-for-lustre
|
Add a little test coverage for plugins, needs more.
|
from django.utils import unittest
from hydra_agent import plugins
class TestPlugins(unittest.TestCase):
def test_scan_plugins(self):
"""Test that we get a list of plugin names."""
self.assertNotEqual(plugins.scan_plugins(), [])
@unittest.skip("test not implemented")
def test_load_plugins(self):
"""Test that plugins get imported."""
pass
def test_find_plugins(self):
"""Test that we get a list of loaded plugin instances."""
self.assertNotEqual(plugins.find_plugins(), [])
|
<commit_before><commit_msg>Add a little test coverage for plugins, needs more.<commit_after>
|
from django.utils import unittest
from hydra_agent import plugins
class TestPlugins(unittest.TestCase):
def test_scan_plugins(self):
"""Test that we get a list of plugin names."""
self.assertNotEqual(plugins.scan_plugins(), [])
@unittest.skip("test not implemented")
def test_load_plugins(self):
"""Test that plugins get imported."""
pass
def test_find_plugins(self):
"""Test that we get a list of loaded plugin instances."""
self.assertNotEqual(plugins.find_plugins(), [])
|
Add a little test coverage for plugins, needs more.from django.utils import unittest
from hydra_agent import plugins
class TestPlugins(unittest.TestCase):
def test_scan_plugins(self):
"""Test that we get a list of plugin names."""
self.assertNotEqual(plugins.scan_plugins(), [])
@unittest.skip("test not implemented")
def test_load_plugins(self):
"""Test that plugins get imported."""
pass
def test_find_plugins(self):
"""Test that we get a list of loaded plugin instances."""
self.assertNotEqual(plugins.find_plugins(), [])
|
<commit_before><commit_msg>Add a little test coverage for plugins, needs more.<commit_after>from django.utils import unittest
from hydra_agent import plugins
class TestPlugins(unittest.TestCase):
def test_scan_plugins(self):
"""Test that we get a list of plugin names."""
self.assertNotEqual(plugins.scan_plugins(), [])
@unittest.skip("test not implemented")
def test_load_plugins(self):
"""Test that plugins get imported."""
pass
def test_find_plugins(self):
"""Test that we get a list of loaded plugin instances."""
self.assertNotEqual(plugins.find_plugins(), [])
|
|
0d7401d0b651a9c2889a7b0a3ead41ef0c28cedb
|
tools/sensorama.py
|
tools/sensorama.py
|
#!/usr/bin/env python
import sys
import json
def usage():
print "validate.py <jsonfilename>\n"
sys.exit(1)
def main():
if len(sys.argv) != 2:
usage()
fn = sys.argv[1]
print "# fn=" + fn
with open(fn, "r") as f:
js = json.load(f)
f.close()
js = json.dumps(js, sort_keys=True, indent=4, separators=(',', ': '))
print js
if __name__ == "__main__":
main();
|
Add a simple validator for the Sensorama JSON format.
|
Add a simple validator for the Sensorama JSON format.
|
Python
|
bsd-2-clause
|
wkoszek/sensorama,wkoszek/sensorama
|
Add a simple validator for the Sensorama JSON format.
|
#!/usr/bin/env python
import sys
import json
def usage():
print "validate.py <jsonfilename>\n"
sys.exit(1)
def main():
if len(sys.argv) != 2:
usage()
fn = sys.argv[1]
print "# fn=" + fn
with open(fn, "r") as f:
js = json.load(f)
f.close()
js = json.dumps(js, sort_keys=True, indent=4, separators=(',', ': '))
print js
if __name__ == "__main__":
main();
|
<commit_before><commit_msg>Add a simple validator for the Sensorama JSON format.<commit_after>
|
#!/usr/bin/env python
import sys
import json
def usage():
print "validate.py <jsonfilename>\n"
sys.exit(1)
def main():
if len(sys.argv) != 2:
usage()
fn = sys.argv[1]
print "# fn=" + fn
with open(fn, "r") as f:
js = json.load(f)
f.close()
js = json.dumps(js, sort_keys=True, indent=4, separators=(',', ': '))
print js
if __name__ == "__main__":
main();
|
Add a simple validator for the Sensorama JSON format.#!/usr/bin/env python
import sys
import json
def usage():
print "validate.py <jsonfilename>\n"
sys.exit(1)
def main():
if len(sys.argv) != 2:
usage()
fn = sys.argv[1]
print "# fn=" + fn
with open(fn, "r") as f:
js = json.load(f)
f.close()
js = json.dumps(js, sort_keys=True, indent=4, separators=(',', ': '))
print js
if __name__ == "__main__":
main();
|
<commit_before><commit_msg>Add a simple validator for the Sensorama JSON format.<commit_after>#!/usr/bin/env python
import sys
import json
def usage():
print "validate.py <jsonfilename>\n"
sys.exit(1)
def main():
if len(sys.argv) != 2:
usage()
fn = sys.argv[1]
print "# fn=" + fn
with open(fn, "r") as f:
js = json.load(f)
f.close()
js = json.dumps(js, sort_keys=True, indent=4, separators=(',', ': '))
print js
if __name__ == "__main__":
main();
|
|
bd99d5fe158d911e08b329c05fc8ba46c909d7b4
|
scripts/ua/mine_mtarchive.py
|
scripts/ua/mine_mtarchive.py
|
import subprocess
import datetime
import pytz
import urllib2
from ingest_from_rucsoundings import RAOB
import psycopg2
POSTGIS = psycopg2.connect(database='postgis', host='iemdb')
def conv( raw):
if float(raw) < -9998:
return None
return float(raw)
sts = datetime.datetime(1946,1,1)
ets = datetime.datetime(1990,1,1)
interval = datetime.timedelta(days=1)
now = sts
while now < ets:
print now
uri = now.strftime("http://mtarchive.geol.iastate.edu/%Y/%m/%d/gempak/upperair/%Y%m%d_upa.gem")
try:
data = urllib2.urlopen(uri).read()
o = open('data.gem', 'wb')
o.write(data)
o.close()
except Exception, exp:
print exp
now += interval
continue
o = open('fn', 'w')
o.write("""
SNFILE=data.gem
AREA=DSET
DATTIM=ALL
SNPARM=TMPC;DWPC;HGHT;DRCT;SKNT
LEVELS=ALL
VCOORD = PRES
OUTPUT = T
MRGDAT = YES
run
exit
""")
o.close()
p = subprocess.Popen("snlist < fn", stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
data = p.stdout.read()
myraob = None
for line in data.split("\n"):
if line.strip()[:4] == 'STID':
if myraob is not None:
print str(myraob)
txn = POSTGIS.cursor()
myraob.database_save(txn)
txn.close()
POSTGIS.commit()
myraob = None
tokens = line.strip().split()
myraob = RAOB()
myraob.station = tokens[2]
valid = datetime.datetime.strptime("19"+tokens[-1], '%Y%m%d/%H%M')
myraob.valid = valid.replace(tzinfo=pytz.timezone("UTC"))
if line.find(".") > 0 and line.find("=") == -1 and line.find("PRES") == -1:
tokens = line.strip().split()
if len(tokens) < 6:
continue
myraob.profile.append({'levelcode': None,
'pressure': float(tokens[0]),
'height': conv(float(tokens[3])),
'tmpc': conv(float(tokens[1])),
'dwpc': conv(float(tokens[2])),
'drct': conv(float(tokens[4])),
'smps': myraob.conv_speed(tokens[5]),
'ts': None,
'bearing': None,
'range': None
})
now += interval
|
Add script to process mtarchive's sounding archive
|
Add script to process mtarchive's sounding archive
|
Python
|
mit
|
akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem
|
Add script to process mtarchive's sounding archive
|
import subprocess
import datetime
import pytz
import urllib2
from ingest_from_rucsoundings import RAOB
import psycopg2
POSTGIS = psycopg2.connect(database='postgis', host='iemdb')
def conv( raw):
if float(raw) < -9998:
return None
return float(raw)
sts = datetime.datetime(1946,1,1)
ets = datetime.datetime(1990,1,1)
interval = datetime.timedelta(days=1)
now = sts
while now < ets:
print now
uri = now.strftime("http://mtarchive.geol.iastate.edu/%Y/%m/%d/gempak/upperair/%Y%m%d_upa.gem")
try:
data = urllib2.urlopen(uri).read()
o = open('data.gem', 'wb')
o.write(data)
o.close()
except Exception, exp:
print exp
now += interval
continue
o = open('fn', 'w')
o.write("""
SNFILE=data.gem
AREA=DSET
DATTIM=ALL
SNPARM=TMPC;DWPC;HGHT;DRCT;SKNT
LEVELS=ALL
VCOORD = PRES
OUTPUT = T
MRGDAT = YES
run
exit
""")
o.close()
p = subprocess.Popen("snlist < fn", stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
data = p.stdout.read()
myraob = None
for line in data.split("\n"):
if line.strip()[:4] == 'STID':
if myraob is not None:
print str(myraob)
txn = POSTGIS.cursor()
myraob.database_save(txn)
txn.close()
POSTGIS.commit()
myraob = None
tokens = line.strip().split()
myraob = RAOB()
myraob.station = tokens[2]
valid = datetime.datetime.strptime("19"+tokens[-1], '%Y%m%d/%H%M')
myraob.valid = valid.replace(tzinfo=pytz.timezone("UTC"))
if line.find(".") > 0 and line.find("=") == -1 and line.find("PRES") == -1:
tokens = line.strip().split()
if len(tokens) < 6:
continue
myraob.profile.append({'levelcode': None,
'pressure': float(tokens[0]),
'height': conv(float(tokens[3])),
'tmpc': conv(float(tokens[1])),
'dwpc': conv(float(tokens[2])),
'drct': conv(float(tokens[4])),
'smps': myraob.conv_speed(tokens[5]),
'ts': None,
'bearing': None,
'range': None
})
now += interval
|
<commit_before><commit_msg>Add script to process mtarchive's sounding archive<commit_after>
|
import subprocess
import datetime
import pytz
import urllib2
from ingest_from_rucsoundings import RAOB
import psycopg2
POSTGIS = psycopg2.connect(database='postgis', host='iemdb')
def conv( raw):
if float(raw) < -9998:
return None
return float(raw)
sts = datetime.datetime(1946,1,1)
ets = datetime.datetime(1990,1,1)
interval = datetime.timedelta(days=1)
now = sts
while now < ets:
print now
uri = now.strftime("http://mtarchive.geol.iastate.edu/%Y/%m/%d/gempak/upperair/%Y%m%d_upa.gem")
try:
data = urllib2.urlopen(uri).read()
o = open('data.gem', 'wb')
o.write(data)
o.close()
except Exception, exp:
print exp
now += interval
continue
o = open('fn', 'w')
o.write("""
SNFILE=data.gem
AREA=DSET
DATTIM=ALL
SNPARM=TMPC;DWPC;HGHT;DRCT;SKNT
LEVELS=ALL
VCOORD = PRES
OUTPUT = T
MRGDAT = YES
run
exit
""")
o.close()
p = subprocess.Popen("snlist < fn", stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
data = p.stdout.read()
myraob = None
for line in data.split("\n"):
if line.strip()[:4] == 'STID':
if myraob is not None:
print str(myraob)
txn = POSTGIS.cursor()
myraob.database_save(txn)
txn.close()
POSTGIS.commit()
myraob = None
tokens = line.strip().split()
myraob = RAOB()
myraob.station = tokens[2]
valid = datetime.datetime.strptime("19"+tokens[-1], '%Y%m%d/%H%M')
myraob.valid = valid.replace(tzinfo=pytz.timezone("UTC"))
if line.find(".") > 0 and line.find("=") == -1 and line.find("PRES") == -1:
tokens = line.strip().split()
if len(tokens) < 6:
continue
myraob.profile.append({'levelcode': None,
'pressure': float(tokens[0]),
'height': conv(float(tokens[3])),
'tmpc': conv(float(tokens[1])),
'dwpc': conv(float(tokens[2])),
'drct': conv(float(tokens[4])),
'smps': myraob.conv_speed(tokens[5]),
'ts': None,
'bearing': None,
'range': None
})
now += interval
|
Add script to process mtarchive's sounding archiveimport subprocess
import datetime
import pytz
import urllib2
from ingest_from_rucsoundings import RAOB
import psycopg2
POSTGIS = psycopg2.connect(database='postgis', host='iemdb')
def conv( raw):
if float(raw) < -9998:
return None
return float(raw)
sts = datetime.datetime(1946,1,1)
ets = datetime.datetime(1990,1,1)
interval = datetime.timedelta(days=1)
now = sts
while now < ets:
print now
uri = now.strftime("http://mtarchive.geol.iastate.edu/%Y/%m/%d/gempak/upperair/%Y%m%d_upa.gem")
try:
data = urllib2.urlopen(uri).read()
o = open('data.gem', 'wb')
o.write(data)
o.close()
except Exception, exp:
print exp
now += interval
continue
o = open('fn', 'w')
o.write("""
SNFILE=data.gem
AREA=DSET
DATTIM=ALL
SNPARM=TMPC;DWPC;HGHT;DRCT;SKNT
LEVELS=ALL
VCOORD = PRES
OUTPUT = T
MRGDAT = YES
run
exit
""")
o.close()
p = subprocess.Popen("snlist < fn", stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
data = p.stdout.read()
myraob = None
for line in data.split("\n"):
if line.strip()[:4] == 'STID':
if myraob is not None:
print str(myraob)
txn = POSTGIS.cursor()
myraob.database_save(txn)
txn.close()
POSTGIS.commit()
myraob = None
tokens = line.strip().split()
myraob = RAOB()
myraob.station = tokens[2]
valid = datetime.datetime.strptime("19"+tokens[-1], '%Y%m%d/%H%M')
myraob.valid = valid.replace(tzinfo=pytz.timezone("UTC"))
if line.find(".") > 0 and line.find("=") == -1 and line.find("PRES") == -1:
tokens = line.strip().split()
if len(tokens) < 6:
continue
myraob.profile.append({'levelcode': None,
'pressure': float(tokens[0]),
'height': conv(float(tokens[3])),
'tmpc': conv(float(tokens[1])),
'dwpc': conv(float(tokens[2])),
'drct': conv(float(tokens[4])),
'smps': myraob.conv_speed(tokens[5]),
'ts': None,
'bearing': None,
'range': None
})
now += interval
|
<commit_before><commit_msg>Add script to process mtarchive's sounding archive<commit_after>import subprocess
import datetime
import pytz
import urllib2
from ingest_from_rucsoundings import RAOB
import psycopg2
POSTGIS = psycopg2.connect(database='postgis', host='iemdb')
def conv( raw):
if float(raw) < -9998:
return None
return float(raw)
sts = datetime.datetime(1946,1,1)
ets = datetime.datetime(1990,1,1)
interval = datetime.timedelta(days=1)
now = sts
while now < ets:
print now
uri = now.strftime("http://mtarchive.geol.iastate.edu/%Y/%m/%d/gempak/upperair/%Y%m%d_upa.gem")
try:
data = urllib2.urlopen(uri).read()
o = open('data.gem', 'wb')
o.write(data)
o.close()
except Exception, exp:
print exp
now += interval
continue
o = open('fn', 'w')
o.write("""
SNFILE=data.gem
AREA=DSET
DATTIM=ALL
SNPARM=TMPC;DWPC;HGHT;DRCT;SKNT
LEVELS=ALL
VCOORD = PRES
OUTPUT = T
MRGDAT = YES
run
exit
""")
o.close()
p = subprocess.Popen("snlist < fn", stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
data = p.stdout.read()
myraob = None
for line in data.split("\n"):
if line.strip()[:4] == 'STID':
if myraob is not None:
print str(myraob)
txn = POSTGIS.cursor()
myraob.database_save(txn)
txn.close()
POSTGIS.commit()
myraob = None
tokens = line.strip().split()
myraob = RAOB()
myraob.station = tokens[2]
valid = datetime.datetime.strptime("19"+tokens[-1], '%Y%m%d/%H%M')
myraob.valid = valid.replace(tzinfo=pytz.timezone("UTC"))
if line.find(".") > 0 and line.find("=") == -1 and line.find("PRES") == -1:
tokens = line.strip().split()
if len(tokens) < 6:
continue
myraob.profile.append({'levelcode': None,
'pressure': float(tokens[0]),
'height': conv(float(tokens[3])),
'tmpc': conv(float(tokens[1])),
'dwpc': conv(float(tokens[2])),
'drct': conv(float(tokens[4])),
'smps': myraob.conv_speed(tokens[5]),
'ts': None,
'bearing': None,
'range': None
})
now += interval
|
|
f4a27896f9c60c64631e27633154474e6ddc3181
|
py2/EulerRunner.py
|
py2/EulerRunner.py
|
#EulerRunner.py
import datetime
def solve_problem(solve_func):
start = datetime.datetime.now()
result = str(solve_func())
end = datetime.datetime.now()
print 'The answer is "' + result + '". Solved in ' + str((end - start).total_seconds()) + 's'
|
Add problem solver to display results and give accurate measure of times to solve
|
Add problem solver to display results and give accurate measure of times to solve
|
Python
|
mit
|
DanielGarrett/ProjectEuler
|
Add problem solver to display results and give accurate measure of times to solve
|
#EulerRunner.py
import datetime
def solve_problem(solve_func):
start = datetime.datetime.now()
result = str(solve_func())
end = datetime.datetime.now()
print 'The answer is "' + result + '". Solved in ' + str((end - start).total_seconds()) + 's'
|
<commit_before><commit_msg>Add problem solver to display results and give accurate measure of times to solve<commit_after>
|
#EulerRunner.py
import datetime
def solve_problem(solve_func):
start = datetime.datetime.now()
result = str(solve_func())
end = datetime.datetime.now()
print 'The answer is "' + result + '". Solved in ' + str((end - start).total_seconds()) + 's'
|
Add problem solver to display results and give accurate measure of times to solve#EulerRunner.py
import datetime
def solve_problem(solve_func):
start = datetime.datetime.now()
result = str(solve_func())
end = datetime.datetime.now()
print 'The answer is "' + result + '". Solved in ' + str((end - start).total_seconds()) + 's'
|
<commit_before><commit_msg>Add problem solver to display results and give accurate measure of times to solve<commit_after>#EulerRunner.py
import datetime
def solve_problem(solve_func):
start = datetime.datetime.now()
result = str(solve_func())
end = datetime.datetime.now()
print 'The answer is "' + result + '". Solved in ' + str((end - start).total_seconds()) + 's'
|
|
a9aefba284ca775939e41ce4a280cb27f3cd8bf6
|
app.py
|
app.py
|
"""
Usage:
create_room <name> <type_room>
add_person <firstname> <surname> [wants_accomodation="N"]
dojo (-i | --interactive)
dojo (-h | --help | --version)
Options:
-i, --interactive Interactive Mode
-h, --help Show this screen and exit.
"""
from docopt import docopt, DocoptExit
import cmd
import os
import sys
from models.dojo import Dojo
from termcolor import colored
def docopt_cmd(func):
"""
This decorator is used to simplify the try/except block and pass the result
of the docopt parsing to the called action
"""
def fn(self, arg):
try:
opt = docopt(fn.__doc__, arg)
except DocoptExit as e:
# The DocoptExit is thrown when the args do not match
# We print a message to the user and the usage block
print('Invalid Command!')
print(e)
return
except SystemExit:
# The SystemExit exception prints the usage for --help
# We do not need to do the print here
return
return func(self, opt)
fn.__name__ = func.__name__
fn.__doc__ = func.__doc__
fn.__dict__.update(func.__dict__)
return fn
def intro():
os.system("clear")
print(__doc__)
class DOJO(cmd.Cmd):
instance=Dojo()
prompt = colored('DOJO$$$', 'magenta', attrs=['blink','bold'])
@docopt_cmd
def do_quit(self, arg):
"""Usage: quit"""
os.system('clear')
print ('Dojo Exiting')
exit()
@docopt_cmd
def do_create_room(self, arg):
"""Usage: create_room <name> <type_room>"""
pass
@docopt_cmd
def do_add_person(self, arg):
"""Usage: add_person <firstname> <surname> [wants_accomodation="N"]"""
pass
if __name__ == "__main__":
try:
intro()
DOJO().cmdloop()
except KeyboardInterrupt:
os.system("clear")
print('Dojo Exiting')
|
Make room arg list and Dojo instance
|
Make room arg list and Dojo instance
|
Python
|
mit
|
JoshuaOndieki/dojo
|
Make room arg list and Dojo instance
|
"""
Usage:
create_room <name> <type_room>
add_person <firstname> <surname> [wants_accomodation="N"]
dojo (-i | --interactive)
dojo (-h | --help | --version)
Options:
-i, --interactive Interactive Mode
-h, --help Show this screen and exit.
"""
from docopt import docopt, DocoptExit
import cmd
import os
import sys
from models.dojo import Dojo
from termcolor import colored
def docopt_cmd(func):
"""
This decorator is used to simplify the try/except block and pass the result
of the docopt parsing to the called action
"""
def fn(self, arg):
try:
opt = docopt(fn.__doc__, arg)
except DocoptExit as e:
# The DocoptExit is thrown when the args do not match
# We print a message to the user and the usage block
print('Invalid Command!')
print(e)
return
except SystemExit:
# The SystemExit exception prints the usage for --help
# We do not need to do the print here
return
return func(self, opt)
fn.__name__ = func.__name__
fn.__doc__ = func.__doc__
fn.__dict__.update(func.__dict__)
return fn
def intro():
os.system("clear")
print(__doc__)
class DOJO(cmd.Cmd):
instance=Dojo()
prompt = colored('DOJO$$$', 'magenta', attrs=['blink','bold'])
@docopt_cmd
def do_quit(self, arg):
"""Usage: quit"""
os.system('clear')
print ('Dojo Exiting')
exit()
@docopt_cmd
def do_create_room(self, arg):
"""Usage: create_room <name> <type_room>"""
pass
@docopt_cmd
def do_add_person(self, arg):
"""Usage: add_person <firstname> <surname> [wants_accomodation="N"]"""
pass
if __name__ == "__main__":
try:
intro()
DOJO().cmdloop()
except KeyboardInterrupt:
os.system("clear")
print('Dojo Exiting')
|
<commit_before><commit_msg>Make room arg list and Dojo instance<commit_after>
|
"""
Usage:
create_room <name> <type_room>
add_person <firstname> <surname> [wants_accomodation="N"]
dojo (-i | --interactive)
dojo (-h | --help | --version)
Options:
-i, --interactive Interactive Mode
-h, --help Show this screen and exit.
"""
from docopt import docopt, DocoptExit
import cmd
import os
import sys
from models.dojo import Dojo
from termcolor import colored
def docopt_cmd(func):
"""
This decorator is used to simplify the try/except block and pass the result
of the docopt parsing to the called action
"""
def fn(self, arg):
try:
opt = docopt(fn.__doc__, arg)
except DocoptExit as e:
# The DocoptExit is thrown when the args do not match
# We print a message to the user and the usage block
print('Invalid Command!')
print(e)
return
except SystemExit:
# The SystemExit exception prints the usage for --help
# We do not need to do the print here
return
return func(self, opt)
fn.__name__ = func.__name__
fn.__doc__ = func.__doc__
fn.__dict__.update(func.__dict__)
return fn
def intro():
os.system("clear")
print(__doc__)
class DOJO(cmd.Cmd):
instance=Dojo()
prompt = colored('DOJO$$$', 'magenta', attrs=['blink','bold'])
@docopt_cmd
def do_quit(self, arg):
"""Usage: quit"""
os.system('clear')
print ('Dojo Exiting')
exit()
@docopt_cmd
def do_create_room(self, arg):
"""Usage: create_room <name> <type_room>"""
pass
@docopt_cmd
def do_add_person(self, arg):
"""Usage: add_person <firstname> <surname> [wants_accomodation="N"]"""
pass
if __name__ == "__main__":
try:
intro()
DOJO().cmdloop()
except KeyboardInterrupt:
os.system("clear")
print('Dojo Exiting')
|
Make room arg list and Dojo instance"""
Usage:
create_room <name> <type_room>
add_person <firstname> <surname> [wants_accomodation="N"]
dojo (-i | --interactive)
dojo (-h | --help | --version)
Options:
-i, --interactive Interactive Mode
-h, --help Show this screen and exit.
"""
from docopt import docopt, DocoptExit
import cmd
import os
import sys
from models.dojo import Dojo
from termcolor import colored
def docopt_cmd(func):
"""
This decorator is used to simplify the try/except block and pass the result
of the docopt parsing to the called action
"""
def fn(self, arg):
try:
opt = docopt(fn.__doc__, arg)
except DocoptExit as e:
# The DocoptExit is thrown when the args do not match
# We print a message to the user and the usage block
print('Invalid Command!')
print(e)
return
except SystemExit:
# The SystemExit exception prints the usage for --help
# We do not need to do the print here
return
return func(self, opt)
fn.__name__ = func.__name__
fn.__doc__ = func.__doc__
fn.__dict__.update(func.__dict__)
return fn
def intro():
os.system("clear")
print(__doc__)
class DOJO(cmd.Cmd):
instance=Dojo()
prompt = colored('DOJO$$$', 'magenta', attrs=['blink','bold'])
@docopt_cmd
def do_quit(self, arg):
"""Usage: quit"""
os.system('clear')
print ('Dojo Exiting')
exit()
@docopt_cmd
def do_create_room(self, arg):
"""Usage: create_room <name> <type_room>"""
pass
@docopt_cmd
def do_add_person(self, arg):
"""Usage: add_person <firstname> <surname> [wants_accomodation="N"]"""
pass
if __name__ == "__main__":
try:
intro()
DOJO().cmdloop()
except KeyboardInterrupt:
os.system("clear")
print('Dojo Exiting')
|
<commit_before><commit_msg>Make room arg list and Dojo instance<commit_after>"""
Usage:
create_room <name> <type_room>
add_person <firstname> <surname> [wants_accomodation="N"]
dojo (-i | --interactive)
dojo (-h | --help | --version)
Options:
-i, --interactive Interactive Mode
-h, --help Show this screen and exit.
"""
from docopt import docopt, DocoptExit
import cmd
import os
import sys
from models.dojo import Dojo
from termcolor import colored
def docopt_cmd(func):
"""
This decorator is used to simplify the try/except block and pass the result
of the docopt parsing to the called action
"""
def fn(self, arg):
try:
opt = docopt(fn.__doc__, arg)
except DocoptExit as e:
# The DocoptExit is thrown when the args do not match
# We print a message to the user and the usage block
print('Invalid Command!')
print(e)
return
except SystemExit:
# The SystemExit exception prints the usage for --help
# We do not need to do the print here
return
return func(self, opt)
fn.__name__ = func.__name__
fn.__doc__ = func.__doc__
fn.__dict__.update(func.__dict__)
return fn
def intro():
os.system("clear")
print(__doc__)
class DOJO(cmd.Cmd):
instance=Dojo()
prompt = colored('DOJO$$$', 'magenta', attrs=['blink','bold'])
@docopt_cmd
def do_quit(self, arg):
"""Usage: quit"""
os.system('clear')
print ('Dojo Exiting')
exit()
@docopt_cmd
def do_create_room(self, arg):
"""Usage: create_room <name> <type_room>"""
pass
@docopt_cmd
def do_add_person(self, arg):
"""Usage: add_person <firstname> <surname> [wants_accomodation="N"]"""
pass
if __name__ == "__main__":
try:
intro()
DOJO().cmdloop()
except KeyboardInterrupt:
os.system("clear")
print('Dojo Exiting')
|
|
d9cd5401a782313a2e02c81c3bc69f25f8fb1acc
|
peakachulib/float_range.py
|
peakachulib/float_range.py
|
class FloatRange(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __eq__(self, other):
return self.start <= other <= self.end
|
Add small helper class to check if float parameters are in allowed range
|
Add small helper class to check if float parameters are in allowed range
|
Python
|
isc
|
tbischler/PEAKachu
|
Add small helper class to check if float parameters are in allowed range
|
class FloatRange(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __eq__(self, other):
return self.start <= other <= self.end
|
<commit_before><commit_msg>Add small helper class to check if float parameters are in allowed range<commit_after>
|
class FloatRange(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __eq__(self, other):
return self.start <= other <= self.end
|
Add small helper class to check if float parameters are in allowed rangeclass FloatRange(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __eq__(self, other):
return self.start <= other <= self.end
|
<commit_before><commit_msg>Add small helper class to check if float parameters are in allowed range<commit_after>class FloatRange(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __eq__(self, other):
return self.start <= other <= self.end
|
|
7dbb1bf87a1f423d0241eec4aab7b603801f4f53
|
calculate_pnps.py
|
calculate_pnps.py
|
#!/usr/bin/env python
"""Module to calculate pn ps."""
from Bio import AlignIO
from Bio.Align import MultipleSeqAlignment
def calculate_pnps(genomes_a, genomes_b, sico_files):
""""""
refseq_ids_a = [genome['RefSeq project ID'] for genome in genomes_a]
refseq_ids_b = [genome['RefSeq project ID'] for genome in genomes_b]
print refseq_ids_a
print refseq_ids_b
#Separate genomes_a from genomes_b in sico_files
alignments = (AlignIO.read(sico_file, 'fasta') for sico_file in sico_files)
for ali in alignments:
id_seqr_tuples = [(seqr.id.split('|')[0], seqr) for seqr in ali]
alignment_a = MultipleSeqAlignment(seqr for id, seqr in id_seqr_tuples if id in refseq_ids_a)
alignment_b = MultipleSeqAlignment(seqr for id, seqr in id_seqr_tuples if id in refseq_ids_b)
print 'Ali A:\n', alignment_a
print 'Ali B:\n', alignment_b
if __name__ == '__main__':
pass
|
Add initial modules to calculate pn & ps
|
Add initial modules to calculate pn & ps
|
Python
|
mit
|
ODoSE/odose.nl
|
Add initial modules to calculate pn & ps
|
#!/usr/bin/env python
"""Module to calculate pn ps."""
from Bio import AlignIO
from Bio.Align import MultipleSeqAlignment
def calculate_pnps(genomes_a, genomes_b, sico_files):
""""""
refseq_ids_a = [genome['RefSeq project ID'] for genome in genomes_a]
refseq_ids_b = [genome['RefSeq project ID'] for genome in genomes_b]
print refseq_ids_a
print refseq_ids_b
#Separate genomes_a from genomes_b in sico_files
alignments = (AlignIO.read(sico_file, 'fasta') for sico_file in sico_files)
for ali in alignments:
id_seqr_tuples = [(seqr.id.split('|')[0], seqr) for seqr in ali]
alignment_a = MultipleSeqAlignment(seqr for id, seqr in id_seqr_tuples if id in refseq_ids_a)
alignment_b = MultipleSeqAlignment(seqr for id, seqr in id_seqr_tuples if id in refseq_ids_b)
print 'Ali A:\n', alignment_a
print 'Ali B:\n', alignment_b
if __name__ == '__main__':
pass
|
<commit_before><commit_msg>Add initial modules to calculate pn & ps<commit_after>
|
#!/usr/bin/env python
"""Module to calculate pn ps."""
from Bio import AlignIO
from Bio.Align import MultipleSeqAlignment
def calculate_pnps(genomes_a, genomes_b, sico_files):
""""""
refseq_ids_a = [genome['RefSeq project ID'] for genome in genomes_a]
refseq_ids_b = [genome['RefSeq project ID'] for genome in genomes_b]
print refseq_ids_a
print refseq_ids_b
#Separate genomes_a from genomes_b in sico_files
alignments = (AlignIO.read(sico_file, 'fasta') for sico_file in sico_files)
for ali in alignments:
id_seqr_tuples = [(seqr.id.split('|')[0], seqr) for seqr in ali]
alignment_a = MultipleSeqAlignment(seqr for id, seqr in id_seqr_tuples if id in refseq_ids_a)
alignment_b = MultipleSeqAlignment(seqr for id, seqr in id_seqr_tuples if id in refseq_ids_b)
print 'Ali A:\n', alignment_a
print 'Ali B:\n', alignment_b
if __name__ == '__main__':
pass
|
Add initial modules to calculate pn & ps#!/usr/bin/env python
"""Module to calculate pn ps."""
from Bio import AlignIO
from Bio.Align import MultipleSeqAlignment
def calculate_pnps(genomes_a, genomes_b, sico_files):
""""""
refseq_ids_a = [genome['RefSeq project ID'] for genome in genomes_a]
refseq_ids_b = [genome['RefSeq project ID'] for genome in genomes_b]
print refseq_ids_a
print refseq_ids_b
#Separate genomes_a from genomes_b in sico_files
alignments = (AlignIO.read(sico_file, 'fasta') for sico_file in sico_files)
for ali in alignments:
id_seqr_tuples = [(seqr.id.split('|')[0], seqr) for seqr in ali]
alignment_a = MultipleSeqAlignment(seqr for id, seqr in id_seqr_tuples if id in refseq_ids_a)
alignment_b = MultipleSeqAlignment(seqr for id, seqr in id_seqr_tuples if id in refseq_ids_b)
print 'Ali A:\n', alignment_a
print 'Ali B:\n', alignment_b
if __name__ == '__main__':
pass
|
<commit_before><commit_msg>Add initial modules to calculate pn & ps<commit_after>#!/usr/bin/env python
"""Module to calculate pn ps."""
from Bio import AlignIO
from Bio.Align import MultipleSeqAlignment
def calculate_pnps(genomes_a, genomes_b, sico_files):
""""""
refseq_ids_a = [genome['RefSeq project ID'] for genome in genomes_a]
refseq_ids_b = [genome['RefSeq project ID'] for genome in genomes_b]
print refseq_ids_a
print refseq_ids_b
#Separate genomes_a from genomes_b in sico_files
alignments = (AlignIO.read(sico_file, 'fasta') for sico_file in sico_files)
for ali in alignments:
id_seqr_tuples = [(seqr.id.split('|')[0], seqr) for seqr in ali]
alignment_a = MultipleSeqAlignment(seqr for id, seqr in id_seqr_tuples if id in refseq_ids_a)
alignment_b = MultipleSeqAlignment(seqr for id, seqr in id_seqr_tuples if id in refseq_ids_b)
print 'Ali A:\n', alignment_a
print 'Ali B:\n', alignment_b
if __name__ == '__main__':
pass
|
|
6700d784ff69c800dbc50abed341b20f806bddd2
|
configs/new_baselines/mask_rcnn_R_50_FPN_50ep_LSJ.py
|
configs/new_baselines/mask_rcnn_R_50_FPN_50ep_LSJ.py
|
from .mask_rcnn_R_50_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
train.max_iter //= 2 # 100ep -> 50ep
|
Add 50ep R50 Mask RCNN training recipe to D2 new baselines
|
Add 50ep R50 Mask RCNN training recipe to D2 new baselines
Summary: Added a training recipe with a shorter training time, reducing `train.max_iter` using the same pattern used to length it for the recipes >100ep.
Reviewed By: rbgirshick
Differential Revision: D28935200
fbshipit-source-id: 8423d125bc628885990a343cbb09a1a5f0d5a1b2
|
Python
|
apache-2.0
|
facebookresearch/detectron2,facebookresearch/detectron2,facebookresearch/detectron2
|
Add 50ep R50 Mask RCNN training recipe to D2 new baselines
Summary: Added a training recipe with a shorter training time, reducing `train.max_iter` using the same pattern used to length it for the recipes >100ep.
Reviewed By: rbgirshick
Differential Revision: D28935200
fbshipit-source-id: 8423d125bc628885990a343cbb09a1a5f0d5a1b2
|
from .mask_rcnn_R_50_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
train.max_iter //= 2 # 100ep -> 50ep
|
<commit_before><commit_msg>Add 50ep R50 Mask RCNN training recipe to D2 new baselines
Summary: Added a training recipe with a shorter training time, reducing `train.max_iter` using the same pattern used to length it for the recipes >100ep.
Reviewed By: rbgirshick
Differential Revision: D28935200
fbshipit-source-id: 8423d125bc628885990a343cbb09a1a5f0d5a1b2<commit_after>
|
from .mask_rcnn_R_50_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
train.max_iter //= 2 # 100ep -> 50ep
|
Add 50ep R50 Mask RCNN training recipe to D2 new baselines
Summary: Added a training recipe with a shorter training time, reducing `train.max_iter` using the same pattern used to length it for the recipes >100ep.
Reviewed By: rbgirshick
Differential Revision: D28935200
fbshipit-source-id: 8423d125bc628885990a343cbb09a1a5f0d5a1b2from .mask_rcnn_R_50_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
train.max_iter //= 2 # 100ep -> 50ep
|
<commit_before><commit_msg>Add 50ep R50 Mask RCNN training recipe to D2 new baselines
Summary: Added a training recipe with a shorter training time, reducing `train.max_iter` using the same pattern used to length it for the recipes >100ep.
Reviewed By: rbgirshick
Differential Revision: D28935200
fbshipit-source-id: 8423d125bc628885990a343cbb09a1a5f0d5a1b2<commit_after>from .mask_rcnn_R_50_FPN_100ep_LSJ import (
dataloader,
lr_multiplier,
model,
optimizer,
train,
)
train.max_iter //= 2 # 100ep -> 50ep
|
|
426ccdfd15c703ea2c67e85175265acc76c4beff
|
capture_circles.py
|
capture_circles.py
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Capture calibration circles
#
# External dependencies
import cv2
import numpy as np
# Get the camera
camera = cv2.VideoCapture( 0 )
# Acquisition loop
while( True ) :
# Capture image-by-image
_, image = camera.read()
# Convert it to gray
gray = cv2.cvtColor( image, cv2.COLOR_BGR2GRAY )
# Smooth it, otherwise a lot of false circles may be detected
gray = cv2.GaussianBlur( gray, ( 9, 9 ), 2, 2 )
# Detect the circles
circles = cv2.HoughCircles( gray, cv2.cv.CV_HOUGH_GRADIENT, 1.5, 20 )
# Display the circles if found
if circles is not None :
# Convert the (x, y) coordinates and radius of the circles to integers
circles = np.round( circles[ 0, : ] ).astype( int )
for ( x, y, r ) in circles :
cv2.circle( image, ( x, y ), 3, ( 0, 255, 0 ), -1, 8, 0 )
cv2.circle( image, ( x, y ), r, ( 0, 0, 255 ), 3, 8, 0 )
# Display the resulting image
cv2.imshow( 'USB Camera', image )
# Keyboard interruption
key = cv2.waitKey( 1 ) & 0xFF
# Escape : quit the application
if key == 27 : break
# Release the camera
camera.release()
# Close OpenCV windows
cv2.destroyAllWindows()
|
Add a circle detection script.
|
Add a circle detection script.
|
Python
|
mit
|
microy/RobotVision,microy/RobotVision
|
Add a circle detection script.
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Capture calibration circles
#
# External dependencies
import cv2
import numpy as np
# Get the camera
camera = cv2.VideoCapture( 0 )
# Acquisition loop
while( True ) :
# Capture image-by-image
_, image = camera.read()
# Convert it to gray
gray = cv2.cvtColor( image, cv2.COLOR_BGR2GRAY )
# Smooth it, otherwise a lot of false circles may be detected
gray = cv2.GaussianBlur( gray, ( 9, 9 ), 2, 2 )
# Detect the circles
circles = cv2.HoughCircles( gray, cv2.cv.CV_HOUGH_GRADIENT, 1.5, 20 )
# Display the circles if found
if circles is not None :
# Convert the (x, y) coordinates and radius of the circles to integers
circles = np.round( circles[ 0, : ] ).astype( int )
for ( x, y, r ) in circles :
cv2.circle( image, ( x, y ), 3, ( 0, 255, 0 ), -1, 8, 0 )
cv2.circle( image, ( x, y ), r, ( 0, 0, 255 ), 3, 8, 0 )
# Display the resulting image
cv2.imshow( 'USB Camera', image )
# Keyboard interruption
key = cv2.waitKey( 1 ) & 0xFF
# Escape : quit the application
if key == 27 : break
# Release the camera
camera.release()
# Close OpenCV windows
cv2.destroyAllWindows()
|
<commit_before><commit_msg>Add a circle detection script.<commit_after>
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Capture calibration circles
#
# External dependencies
import cv2
import numpy as np
# Get the camera
camera = cv2.VideoCapture( 0 )
# Acquisition loop
while( True ) :
# Capture image-by-image
_, image = camera.read()
# Convert it to gray
gray = cv2.cvtColor( image, cv2.COLOR_BGR2GRAY )
# Smooth it, otherwise a lot of false circles may be detected
gray = cv2.GaussianBlur( gray, ( 9, 9 ), 2, 2 )
# Detect the circles
circles = cv2.HoughCircles( gray, cv2.cv.CV_HOUGH_GRADIENT, 1.5, 20 )
# Display the circles if found
if circles is not None :
# Convert the (x, y) coordinates and radius of the circles to integers
circles = np.round( circles[ 0, : ] ).astype( int )
for ( x, y, r ) in circles :
cv2.circle( image, ( x, y ), 3, ( 0, 255, 0 ), -1, 8, 0 )
cv2.circle( image, ( x, y ), r, ( 0, 0, 255 ), 3, 8, 0 )
# Display the resulting image
cv2.imshow( 'USB Camera', image )
# Keyboard interruption
key = cv2.waitKey( 1 ) & 0xFF
# Escape : quit the application
if key == 27 : break
# Release the camera
camera.release()
# Close OpenCV windows
cv2.destroyAllWindows()
|
Add a circle detection script.#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Capture calibration circles
#
# External dependencies
import cv2
import numpy as np
# Get the camera
camera = cv2.VideoCapture( 0 )
# Acquisition loop
while( True ) :
# Capture image-by-image
_, image = camera.read()
# Convert it to gray
gray = cv2.cvtColor( image, cv2.COLOR_BGR2GRAY )
# Smooth it, otherwise a lot of false circles may be detected
gray = cv2.GaussianBlur( gray, ( 9, 9 ), 2, 2 )
# Detect the circles
circles = cv2.HoughCircles( gray, cv2.cv.CV_HOUGH_GRADIENT, 1.5, 20 )
# Display the circles if found
if circles is not None :
# Convert the (x, y) coordinates and radius of the circles to integers
circles = np.round( circles[ 0, : ] ).astype( int )
for ( x, y, r ) in circles :
cv2.circle( image, ( x, y ), 3, ( 0, 255, 0 ), -1, 8, 0 )
cv2.circle( image, ( x, y ), r, ( 0, 0, 255 ), 3, 8, 0 )
# Display the resulting image
cv2.imshow( 'USB Camera', image )
# Keyboard interruption
key = cv2.waitKey( 1 ) & 0xFF
# Escape : quit the application
if key == 27 : break
# Release the camera
camera.release()
# Close OpenCV windows
cv2.destroyAllWindows()
|
<commit_before><commit_msg>Add a circle detection script.<commit_after>#! /usr/bin/env python
# -*- coding:utf-8 -*-
#
# Capture calibration circles
#
# External dependencies
import cv2
import numpy as np
# Get the camera
camera = cv2.VideoCapture( 0 )
# Acquisition loop
while( True ) :
# Capture image-by-image
_, image = camera.read()
# Convert it to gray
gray = cv2.cvtColor( image, cv2.COLOR_BGR2GRAY )
# Smooth it, otherwise a lot of false circles may be detected
gray = cv2.GaussianBlur( gray, ( 9, 9 ), 2, 2 )
# Detect the circles
circles = cv2.HoughCircles( gray, cv2.cv.CV_HOUGH_GRADIENT, 1.5, 20 )
# Display the circles if found
if circles is not None :
# Convert the (x, y) coordinates and radius of the circles to integers
circles = np.round( circles[ 0, : ] ).astype( int )
for ( x, y, r ) in circles :
cv2.circle( image, ( x, y ), 3, ( 0, 255, 0 ), -1, 8, 0 )
cv2.circle( image, ( x, y ), r, ( 0, 0, 255 ), 3, 8, 0 )
# Display the resulting image
cv2.imshow( 'USB Camera', image )
# Keyboard interruption
key = cv2.waitKey( 1 ) & 0xFF
# Escape : quit the application
if key == 27 : break
# Release the camera
camera.release()
# Close OpenCV windows
cv2.destroyAllWindows()
|
|
65c91f14d3131d5e9cbd34cedcbeb508a108203b
|
src/test/test_orientation.py
|
src/test/test_orientation.py
|
#!/usr/bin/env python
import unittest
from math import pi
from orientation import get_angle_between_0_and_2_pi
class OrientationTest(unittest.TestCase):
def setUp(self):
self.delta = 0.000001
def test_when_angle_is_between_0_and_2_pi_then_angle_is_returned(self):
angle = 50 * pi / 180.0
self.assertEqual(angle, get_angle_between_0_and_2_pi(angle))
def test_when_angle_is_negative_then_a_positive_angle_is_returned(self):
angle = -pi / 2
expected_angle = 3 * pi / 2
self.assertEqual(expected_angle, get_angle_between_0_and_2_pi(angle))
def test_when_angle_is_2_pi_then_0_is_returned(self):
angle = 2 * pi
expected_angle = 0
self.assertEqual(expected_angle, get_angle_between_0_and_2_pi(angle))
def test_when_angle_is_greater_than_2_pi_then_an_angle_between_0_and_2_pi_is_returned(self):
angle = 370.5 * pi / 180
expected_angle = 10.5 * pi / 180
self.assertAlmostEqual(expected_angle, get_angle_between_0_and_2_pi(angle), delta=self.delta)
def test_when_angle_is_less_than_minus_2_pi_then_an_angle_between_0_and_2_pi_is_returned(self):
angle = -370 * pi / 180
expected_angle = 350 * pi / 180
self.assertAlmostEqual(expected_angle, get_angle_between_0_and_2_pi(angle), delta=self.delta)
|
Return angle between 0 and 2pi
|
refactor: Return angle between 0 and 2pi
Consider cases when angle is greater than 2pi and less than -2pi, and when it is exactly 2pi.
|
Python
|
mit
|
bit0001/trajectory_tracking,bit0001/trajectory_tracking
|
refactor: Return angle between 0 and 2pi
Consider cases when angle is greater than 2pi and less than -2pi, and when it is exactly 2pi.
|
#!/usr/bin/env python
import unittest
from math import pi
from orientation import get_angle_between_0_and_2_pi
class OrientationTest(unittest.TestCase):
def setUp(self):
self.delta = 0.000001
def test_when_angle_is_between_0_and_2_pi_then_angle_is_returned(self):
angle = 50 * pi / 180.0
self.assertEqual(angle, get_angle_between_0_and_2_pi(angle))
def test_when_angle_is_negative_then_a_positive_angle_is_returned(self):
angle = -pi / 2
expected_angle = 3 * pi / 2
self.assertEqual(expected_angle, get_angle_between_0_and_2_pi(angle))
def test_when_angle_is_2_pi_then_0_is_returned(self):
angle = 2 * pi
expected_angle = 0
self.assertEqual(expected_angle, get_angle_between_0_and_2_pi(angle))
def test_when_angle_is_greater_than_2_pi_then_an_angle_between_0_and_2_pi_is_returned(self):
angle = 370.5 * pi / 180
expected_angle = 10.5 * pi / 180
self.assertAlmostEqual(expected_angle, get_angle_between_0_and_2_pi(angle), delta=self.delta)
def test_when_angle_is_less_than_minus_2_pi_then_an_angle_between_0_and_2_pi_is_returned(self):
angle = -370 * pi / 180
expected_angle = 350 * pi / 180
self.assertAlmostEqual(expected_angle, get_angle_between_0_and_2_pi(angle), delta=self.delta)
|
<commit_before><commit_msg>refactor: Return angle between 0 and 2pi
Consider cases when angle is greater than 2pi and less than -2pi, and when it is exactly 2pi.<commit_after>
|
#!/usr/bin/env python
import unittest
from math import pi
from orientation import get_angle_between_0_and_2_pi
class OrientationTest(unittest.TestCase):
def setUp(self):
self.delta = 0.000001
def test_when_angle_is_between_0_and_2_pi_then_angle_is_returned(self):
angle = 50 * pi / 180.0
self.assertEqual(angle, get_angle_between_0_and_2_pi(angle))
def test_when_angle_is_negative_then_a_positive_angle_is_returned(self):
angle = -pi / 2
expected_angle = 3 * pi / 2
self.assertEqual(expected_angle, get_angle_between_0_and_2_pi(angle))
def test_when_angle_is_2_pi_then_0_is_returned(self):
angle = 2 * pi
expected_angle = 0
self.assertEqual(expected_angle, get_angle_between_0_and_2_pi(angle))
def test_when_angle_is_greater_than_2_pi_then_an_angle_between_0_and_2_pi_is_returned(self):
angle = 370.5 * pi / 180
expected_angle = 10.5 * pi / 180
self.assertAlmostEqual(expected_angle, get_angle_between_0_and_2_pi(angle), delta=self.delta)
def test_when_angle_is_less_than_minus_2_pi_then_an_angle_between_0_and_2_pi_is_returned(self):
angle = -370 * pi / 180
expected_angle = 350 * pi / 180
self.assertAlmostEqual(expected_angle, get_angle_between_0_and_2_pi(angle), delta=self.delta)
|
refactor: Return angle between 0 and 2pi
Consider cases when angle is greater than 2pi and less than -2pi, and when it is exactly 2pi.#!/usr/bin/env python
import unittest
from math import pi
from orientation import get_angle_between_0_and_2_pi
class OrientationTest(unittest.TestCase):
def setUp(self):
self.delta = 0.000001
def test_when_angle_is_between_0_and_2_pi_then_angle_is_returned(self):
angle = 50 * pi / 180.0
self.assertEqual(angle, get_angle_between_0_and_2_pi(angle))
def test_when_angle_is_negative_then_a_positive_angle_is_returned(self):
angle = -pi / 2
expected_angle = 3 * pi / 2
self.assertEqual(expected_angle, get_angle_between_0_and_2_pi(angle))
def test_when_angle_is_2_pi_then_0_is_returned(self):
angle = 2 * pi
expected_angle = 0
self.assertEqual(expected_angle, get_angle_between_0_and_2_pi(angle))
def test_when_angle_is_greater_than_2_pi_then_an_angle_between_0_and_2_pi_is_returned(self):
angle = 370.5 * pi / 180
expected_angle = 10.5 * pi / 180
self.assertAlmostEqual(expected_angle, get_angle_between_0_and_2_pi(angle), delta=self.delta)
def test_when_angle_is_less_than_minus_2_pi_then_an_angle_between_0_and_2_pi_is_returned(self):
angle = -370 * pi / 180
expected_angle = 350 * pi / 180
self.assertAlmostEqual(expected_angle, get_angle_between_0_and_2_pi(angle), delta=self.delta)
|
<commit_before><commit_msg>refactor: Return angle between 0 and 2pi
Consider cases when angle is greater than 2pi and less than -2pi, and when it is exactly 2pi.<commit_after>#!/usr/bin/env python
import unittest
from math import pi
from orientation import get_angle_between_0_and_2_pi
class OrientationTest(unittest.TestCase):
def setUp(self):
self.delta = 0.000001
def test_when_angle_is_between_0_and_2_pi_then_angle_is_returned(self):
angle = 50 * pi / 180.0
self.assertEqual(angle, get_angle_between_0_and_2_pi(angle))
def test_when_angle_is_negative_then_a_positive_angle_is_returned(self):
angle = -pi / 2
expected_angle = 3 * pi / 2
self.assertEqual(expected_angle, get_angle_between_0_and_2_pi(angle))
def test_when_angle_is_2_pi_then_0_is_returned(self):
angle = 2 * pi
expected_angle = 0
self.assertEqual(expected_angle, get_angle_between_0_and_2_pi(angle))
def test_when_angle_is_greater_than_2_pi_then_an_angle_between_0_and_2_pi_is_returned(self):
angle = 370.5 * pi / 180
expected_angle = 10.5 * pi / 180
self.assertAlmostEqual(expected_angle, get_angle_between_0_and_2_pi(angle), delta=self.delta)
def test_when_angle_is_less_than_minus_2_pi_then_an_angle_between_0_and_2_pi_is_returned(self):
angle = -370 * pi / 180
expected_angle = 350 * pi / 180
self.assertAlmostEqual(expected_angle, get_angle_between_0_and_2_pi(angle), delta=self.delta)
|
|
a1229bd5cc1446c11950232ee551acfc99092fcf
|
kaiser_shift.py
|
kaiser_shift.py
|
#!/usr/bin/env python2.7
# ##### BEGIN AGPL LICENSE BLOCK #####
# This file is part of SimpleMMO.
#
# Copyright (C) 2012 Charles Nelson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ##### END AGPL LICENSE BLOCK #####
VOWELS = "aeiouy"
CONSTS = "bcdfghjklmnpqrstvwxz"
def shift(message, shift):
'''Shift a string by a number to produce a string resembling language.
It pays attention to whether a letter is a vowel or a consonant, and
shifts them separately.
This means a vowel will never become a consonant, and neither vice versa.
This also means that it will stay looking like a language because there
will always be about enough vowels to be pronouncable/readable.
Example:
"This is a thing!"
When shifted by 1 becomes:
"Vjot ot e vjoph!"
And when shifted by 2:
"Wkuv uv i wkuqj!"
'''
# TODO: Randomize the VOWELS and CONSTS strings.
new_message = ""
for char in message:
lowerchar = char.lower()
for charlist in (VOWELS, CONSTS):
if lowerchar in charlist:
index = list(charlist).index(lowerchar)
newindex = (index+shift)
newchar = charlist[newindex % len(charlist)]
if char.istitle():
newchar = newchar.title()
new_message += newchar
break
else:
new_message += char
return new_message
if __name__ == "__main__":
for i in xrange(26):
print i, shift("This is a thing!", i)
|
Add a dumb language "translator" function.
|
Add a dumb language "translator" function.
|
Python
|
agpl-3.0
|
cnelsonsic/SimpleMMO,cnelsonsic/SimpleMMO,cnelsonsic/SimpleMMO
|
Add a dumb language "translator" function.
|
#!/usr/bin/env python2.7
# ##### BEGIN AGPL LICENSE BLOCK #####
# This file is part of SimpleMMO.
#
# Copyright (C) 2012 Charles Nelson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ##### END AGPL LICENSE BLOCK #####
VOWELS = "aeiouy"
CONSTS = "bcdfghjklmnpqrstvwxz"
def shift(message, shift):
'''Shift a string by a number to produce a string resembling language.
It pays attention to whether a letter is a vowel or a consonant, and
shifts them separately.
This means a vowel will never become a consonant, and neither vice versa.
This also means that it will stay looking like a language because there
will always be about enough vowels to be pronouncable/readable.
Example:
"This is a thing!"
When shifted by 1 becomes:
"Vjot ot e vjoph!"
And when shifted by 2:
"Wkuv uv i wkuqj!"
'''
# TODO: Randomize the VOWELS and CONSTS strings.
new_message = ""
for char in message:
lowerchar = char.lower()
for charlist in (VOWELS, CONSTS):
if lowerchar in charlist:
index = list(charlist).index(lowerchar)
newindex = (index+shift)
newchar = charlist[newindex % len(charlist)]
if char.istitle():
newchar = newchar.title()
new_message += newchar
break
else:
new_message += char
return new_message
if __name__ == "__main__":
for i in xrange(26):
print i, shift("This is a thing!", i)
|
<commit_before><commit_msg>Add a dumb language "translator" function.<commit_after>
|
#!/usr/bin/env python2.7
# ##### BEGIN AGPL LICENSE BLOCK #####
# This file is part of SimpleMMO.
#
# Copyright (C) 2012 Charles Nelson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ##### END AGPL LICENSE BLOCK #####
VOWELS = "aeiouy"
CONSTS = "bcdfghjklmnpqrstvwxz"
def shift(message, shift):
'''Shift a string by a number to produce a string resembling language.
It pays attention to whether a letter is a vowel or a consonant, and
shifts them separately.
This means a vowel will never become a consonant, and neither vice versa.
This also means that it will stay looking like a language because there
will always be about enough vowels to be pronouncable/readable.
Example:
"This is a thing!"
When shifted by 1 becomes:
"Vjot ot e vjoph!"
And when shifted by 2:
"Wkuv uv i wkuqj!"
'''
# TODO: Randomize the VOWELS and CONSTS strings.
new_message = ""
for char in message:
lowerchar = char.lower()
for charlist in (VOWELS, CONSTS):
if lowerchar in charlist:
index = list(charlist).index(lowerchar)
newindex = (index+shift)
newchar = charlist[newindex % len(charlist)]
if char.istitle():
newchar = newchar.title()
new_message += newchar
break
else:
new_message += char
return new_message
if __name__ == "__main__":
for i in xrange(26):
print i, shift("This is a thing!", i)
|
Add a dumb language "translator" function.#!/usr/bin/env python2.7
# ##### BEGIN AGPL LICENSE BLOCK #####
# This file is part of SimpleMMO.
#
# Copyright (C) 2012 Charles Nelson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ##### END AGPL LICENSE BLOCK #####
VOWELS = "aeiouy"
CONSTS = "bcdfghjklmnpqrstvwxz"
def shift(message, shift):
'''Shift a string by a number to produce a string resembling language.
It pays attention to whether a letter is a vowel or a consonant, and
shifts them separately.
This means a vowel will never become a consonant, and neither vice versa.
This also means that it will stay looking like a language because there
will always be about enough vowels to be pronouncable/readable.
Example:
"This is a thing!"
When shifted by 1 becomes:
"Vjot ot e vjoph!"
And when shifted by 2:
"Wkuv uv i wkuqj!"
'''
# TODO: Randomize the VOWELS and CONSTS strings.
new_message = ""
for char in message:
lowerchar = char.lower()
for charlist in (VOWELS, CONSTS):
if lowerchar in charlist:
index = list(charlist).index(lowerchar)
newindex = (index+shift)
newchar = charlist[newindex % len(charlist)]
if char.istitle():
newchar = newchar.title()
new_message += newchar
break
else:
new_message += char
return new_message
if __name__ == "__main__":
for i in xrange(26):
print i, shift("This is a thing!", i)
|
<commit_before><commit_msg>Add a dumb language "translator" function.<commit_after>#!/usr/bin/env python2.7
# ##### BEGIN AGPL LICENSE BLOCK #####
# This file is part of SimpleMMO.
#
# Copyright (C) 2012 Charles Nelson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ##### END AGPL LICENSE BLOCK #####
VOWELS = "aeiouy"
CONSTS = "bcdfghjklmnpqrstvwxz"
def shift(message, shift):
'''Shift a string by a number to produce a string resembling language.
It pays attention to whether a letter is a vowel or a consonant, and
shifts them separately.
This means a vowel will never become a consonant, and neither vice versa.
This also means that it will stay looking like a language because there
will always be about enough vowels to be pronouncable/readable.
Example:
"This is a thing!"
When shifted by 1 becomes:
"Vjot ot e vjoph!"
And when shifted by 2:
"Wkuv uv i wkuqj!"
'''
# TODO: Randomize the VOWELS and CONSTS strings.
new_message = ""
for char in message:
lowerchar = char.lower()
for charlist in (VOWELS, CONSTS):
if lowerchar in charlist:
index = list(charlist).index(lowerchar)
newindex = (index+shift)
newchar = charlist[newindex % len(charlist)]
if char.istitle():
newchar = newchar.title()
new_message += newchar
break
else:
new_message += char
return new_message
if __name__ == "__main__":
for i in xrange(26):
print i, shift("This is a thing!", i)
|
|
bd56f87db537a0381252b68ed06dc4f13eca3018
|
sconsole/cmdbar.py
|
sconsole/cmdbar.py
|
'''
Define the command bar
'''
# Import third party libs
import urwid
class CommandBar(object):
'''
The object to manage the command bar
'''
def __init__(self, opts):
self.opts = opts
self.tgt_txt = urwid.Text('Target')
self.tgt_edit = urwid.Edit()
self.fun_txt = urwid.Text('Function')
self.fun_edit = urwid.Edit()
self.arg_txt = urwid.Text('Arguments')
self.arg_edit = urwid.Edit()
self.go_button = urwid.Button('GO!')
self.grid = urwid.GridFlow(
[self.tgt_txt,
self.tgt_edit,
self.fun_txt,
self.fun_edit,
self.arg_txt,
self.arg_edit,
self.go_button],
cell_width=10,
h_sep=1,
v_sep=1,
align='left')
|
Add initial command bar module
|
Add initial command bar module
|
Python
|
apache-2.0
|
saltstack/salt-console
|
Add initial command bar module
|
'''
Define the command bar
'''
# Import third party libs
import urwid
class CommandBar(object):
'''
The object to manage the command bar
'''
def __init__(self, opts):
self.opts = opts
self.tgt_txt = urwid.Text('Target')
self.tgt_edit = urwid.Edit()
self.fun_txt = urwid.Text('Function')
self.fun_edit = urwid.Edit()
self.arg_txt = urwid.Text('Arguments')
self.arg_edit = urwid.Edit()
self.go_button = urwid.Button('GO!')
self.grid = urwid.GridFlow(
[self.tgt_txt,
self.tgt_edit,
self.fun_txt,
self.fun_edit,
self.arg_txt,
self.arg_edit,
self.go_button],
cell_width=10,
h_sep=1,
v_sep=1,
align='left')
|
<commit_before><commit_msg>Add initial command bar module<commit_after>
|
'''
Define the command bar
'''
# Import third party libs
import urwid
class CommandBar(object):
'''
The object to manage the command bar
'''
def __init__(self, opts):
self.opts = opts
self.tgt_txt = urwid.Text('Target')
self.tgt_edit = urwid.Edit()
self.fun_txt = urwid.Text('Function')
self.fun_edit = urwid.Edit()
self.arg_txt = urwid.Text('Arguments')
self.arg_edit = urwid.Edit()
self.go_button = urwid.Button('GO!')
self.grid = urwid.GridFlow(
[self.tgt_txt,
self.tgt_edit,
self.fun_txt,
self.fun_edit,
self.arg_txt,
self.arg_edit,
self.go_button],
cell_width=10,
h_sep=1,
v_sep=1,
align='left')
|
Add initial command bar module'''
Define the command bar
'''
# Import third party libs
import urwid
class CommandBar(object):
'''
The object to manage the command bar
'''
def __init__(self, opts):
self.opts = opts
self.tgt_txt = urwid.Text('Target')
self.tgt_edit = urwid.Edit()
self.fun_txt = urwid.Text('Function')
self.fun_edit = urwid.Edit()
self.arg_txt = urwid.Text('Arguments')
self.arg_edit = urwid.Edit()
self.go_button = urwid.Button('GO!')
self.grid = urwid.GridFlow(
[self.tgt_txt,
self.tgt_edit,
self.fun_txt,
self.fun_edit,
self.arg_txt,
self.arg_edit,
self.go_button],
cell_width=10,
h_sep=1,
v_sep=1,
align='left')
|
<commit_before><commit_msg>Add initial command bar module<commit_after>'''
Define the command bar
'''
# Import third party libs
import urwid
class CommandBar(object):
'''
The object to manage the command bar
'''
def __init__(self, opts):
self.opts = opts
self.tgt_txt = urwid.Text('Target')
self.tgt_edit = urwid.Edit()
self.fun_txt = urwid.Text('Function')
self.fun_edit = urwid.Edit()
self.arg_txt = urwid.Text('Arguments')
self.arg_edit = urwid.Edit()
self.go_button = urwid.Button('GO!')
self.grid = urwid.GridFlow(
[self.tgt_txt,
self.tgt_edit,
self.fun_txt,
self.fun_edit,
self.arg_txt,
self.arg_edit,
self.go_button],
cell_width=10,
h_sep=1,
v_sep=1,
align='left')
|
|
89839288d015e5d90c45279aeca3b6e67d2af00b
|
examples/test_gen_each_case.py
|
examples/test_gen_each_case.py
|
# -*- coding: utf-8 -*-
"""
autodoc.tests.test_unittest
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Autodoc for UnitTest.
:copyright: (c) 2014 Shinya Ohyanagi, All rights reserved.
:license: BSD, see LICENSE for more details.
"""
from unittest import TestCase
from webtest import TestApp
from autodoc import autodoc
from tests.app import create_app
class TestUnittest(TestCase):
def setUp(self):
app = create_app
self.client = TestApp(app)
@classmethod
def tearDownClass(cls):
pass
@autodoc.generate('var/test_get.rst')
@autodoc.describe('GET /')
def test_get(self):
""" GET / """
res = self.client.get('/')
self.assertEqual(res.status_code, 200)
return res
@autodoc.generate('var/test_post.rst')
@autodoc.describe('POST /')
def test_post(self):
""" POST / """
res = self.client.post_json('/', params={'id': 1, 'message': 'foo'})
self.assertEqual(res.status_code, 200)
return res
@autodoc.generate('var/test_foo_bar.rst')
@autodoc.describe('POST /foo/bar')
def test_foo_bar(self):
""" POST /foo/bar """
res = self.client.post_json('/foo/bar', params={'id': 1, 'message': 'foo'})
self.assertEqual(res.status_code, 200)
return res
|
Add example of generate document by each test case.
|
Add example of generate document by each test case.
|
Python
|
bsd-3-clause
|
heavenshell/py-autodoc
|
Add example of generate document by each test case.
|
# -*- coding: utf-8 -*-
"""
autodoc.tests.test_unittest
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Autodoc for UnitTest.
:copyright: (c) 2014 Shinya Ohyanagi, All rights reserved.
:license: BSD, see LICENSE for more details.
"""
from unittest import TestCase
from webtest import TestApp
from autodoc import autodoc
from tests.app import create_app
class TestUnittest(TestCase):
def setUp(self):
app = create_app
self.client = TestApp(app)
@classmethod
def tearDownClass(cls):
pass
@autodoc.generate('var/test_get.rst')
@autodoc.describe('GET /')
def test_get(self):
""" GET / """
res = self.client.get('/')
self.assertEqual(res.status_code, 200)
return res
@autodoc.generate('var/test_post.rst')
@autodoc.describe('POST /')
def test_post(self):
""" POST / """
res = self.client.post_json('/', params={'id': 1, 'message': 'foo'})
self.assertEqual(res.status_code, 200)
return res
@autodoc.generate('var/test_foo_bar.rst')
@autodoc.describe('POST /foo/bar')
def test_foo_bar(self):
""" POST /foo/bar """
res = self.client.post_json('/foo/bar', params={'id': 1, 'message': 'foo'})
self.assertEqual(res.status_code, 200)
return res
|
<commit_before><commit_msg>Add example of generate document by each test case.<commit_after>
|
# -*- coding: utf-8 -*-
"""
autodoc.tests.test_unittest
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Autodoc for UnitTest.
:copyright: (c) 2014 Shinya Ohyanagi, All rights reserved.
:license: BSD, see LICENSE for more details.
"""
from unittest import TestCase
from webtest import TestApp
from autodoc import autodoc
from tests.app import create_app
class TestUnittest(TestCase):
def setUp(self):
app = create_app
self.client = TestApp(app)
@classmethod
def tearDownClass(cls):
pass
@autodoc.generate('var/test_get.rst')
@autodoc.describe('GET /')
def test_get(self):
""" GET / """
res = self.client.get('/')
self.assertEqual(res.status_code, 200)
return res
@autodoc.generate('var/test_post.rst')
@autodoc.describe('POST /')
def test_post(self):
""" POST / """
res = self.client.post_json('/', params={'id': 1, 'message': 'foo'})
self.assertEqual(res.status_code, 200)
return res
@autodoc.generate('var/test_foo_bar.rst')
@autodoc.describe('POST /foo/bar')
def test_foo_bar(self):
""" POST /foo/bar """
res = self.client.post_json('/foo/bar', params={'id': 1, 'message': 'foo'})
self.assertEqual(res.status_code, 200)
return res
|
Add example of generate document by each test case.# -*- coding: utf-8 -*-
"""
autodoc.tests.test_unittest
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Autodoc for UnitTest.
:copyright: (c) 2014 Shinya Ohyanagi, All rights reserved.
:license: BSD, see LICENSE for more details.
"""
from unittest import TestCase
from webtest import TestApp
from autodoc import autodoc
from tests.app import create_app
class TestUnittest(TestCase):
def setUp(self):
app = create_app
self.client = TestApp(app)
@classmethod
def tearDownClass(cls):
pass
@autodoc.generate('var/test_get.rst')
@autodoc.describe('GET /')
def test_get(self):
""" GET / """
res = self.client.get('/')
self.assertEqual(res.status_code, 200)
return res
@autodoc.generate('var/test_post.rst')
@autodoc.describe('POST /')
def test_post(self):
""" POST / """
res = self.client.post_json('/', params={'id': 1, 'message': 'foo'})
self.assertEqual(res.status_code, 200)
return res
@autodoc.generate('var/test_foo_bar.rst')
@autodoc.describe('POST /foo/bar')
def test_foo_bar(self):
""" POST /foo/bar """
res = self.client.post_json('/foo/bar', params={'id': 1, 'message': 'foo'})
self.assertEqual(res.status_code, 200)
return res
|
<commit_before><commit_msg>Add example of generate document by each test case.<commit_after># -*- coding: utf-8 -*-
"""
autodoc.tests.test_unittest
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Autodoc for UnitTest.
:copyright: (c) 2014 Shinya Ohyanagi, All rights reserved.
:license: BSD, see LICENSE for more details.
"""
from unittest import TestCase
from webtest import TestApp
from autodoc import autodoc
from tests.app import create_app
class TestUnittest(TestCase):
def setUp(self):
app = create_app
self.client = TestApp(app)
@classmethod
def tearDownClass(cls):
pass
@autodoc.generate('var/test_get.rst')
@autodoc.describe('GET /')
def test_get(self):
""" GET / """
res = self.client.get('/')
self.assertEqual(res.status_code, 200)
return res
@autodoc.generate('var/test_post.rst')
@autodoc.describe('POST /')
def test_post(self):
""" POST / """
res = self.client.post_json('/', params={'id': 1, 'message': 'foo'})
self.assertEqual(res.status_code, 200)
return res
@autodoc.generate('var/test_foo_bar.rst')
@autodoc.describe('POST /foo/bar')
def test_foo_bar(self):
""" POST /foo/bar """
res = self.client.post_json('/foo/bar', params={'id': 1, 'message': 'foo'})
self.assertEqual(res.status_code, 200)
return res
|
|
81a90abae2545b29d04b400923725a18816bacba
|
test/test_github_trending.py
|
test/test_github_trending.py
|
import unittest
from githubtrending import trending as githubtrending
from . import data
class TestGithubTrending(unittest.TestCase):
def test_read_page(self):
for each in data.READ_PAGE_DATA:
url = each.get('url')
expected_status_code = each.get('status_code')
response, status_code = githubtrending.read_page(url)
self.assertEqual(status_code, expected_status_code)
if __name__ == '__main__':
unittest.main()
|
Add test case for read_page
|
Test: Add test case for read_page
|
Python
|
mit
|
staranjeet/github-trending-cli
|
Test: Add test case for read_page
|
import unittest
from githubtrending import trending as githubtrending
from . import data
class TestGithubTrending(unittest.TestCase):
def test_read_page(self):
for each in data.READ_PAGE_DATA:
url = each.get('url')
expected_status_code = each.get('status_code')
response, status_code = githubtrending.read_page(url)
self.assertEqual(status_code, expected_status_code)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test: Add test case for read_page<commit_after>
|
import unittest
from githubtrending import trending as githubtrending
from . import data
class TestGithubTrending(unittest.TestCase):
def test_read_page(self):
for each in data.READ_PAGE_DATA:
url = each.get('url')
expected_status_code = each.get('status_code')
response, status_code = githubtrending.read_page(url)
self.assertEqual(status_code, expected_status_code)
if __name__ == '__main__':
unittest.main()
|
Test: Add test case for read_pageimport unittest
from githubtrending import trending as githubtrending
from . import data
class TestGithubTrending(unittest.TestCase):
def test_read_page(self):
for each in data.READ_PAGE_DATA:
url = each.get('url')
expected_status_code = each.get('status_code')
response, status_code = githubtrending.read_page(url)
self.assertEqual(status_code, expected_status_code)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Test: Add test case for read_page<commit_after>import unittest
from githubtrending import trending as githubtrending
from . import data
class TestGithubTrending(unittest.TestCase):
def test_read_page(self):
for each in data.READ_PAGE_DATA:
url = each.get('url')
expected_status_code = each.get('status_code')
response, status_code = githubtrending.read_page(url)
self.assertEqual(status_code, expected_status_code)
if __name__ == '__main__':
unittest.main()
|
|
76090f2972386140c71abdf5e008e383cd57a79c
|
tests/test_infrastructure.py
|
tests/test_infrastructure.py
|
from nose.tools import eq_, assert_raises
from utils import with_app, pretty_print_xml
from sphinxcontrib.traceables.infrastructure import (Traceable,
TraceablesStorage)
# =============================================================================
# Tests
@with_app(buildername="xml", srcdir="basics")
def test_infrastructure(app, status, warning):
app.build()
storage = TraceablesStorage(app.env)
# Verify exception on invalid relationship name.
assert_raises(ValueError, storage.get_relationship_direction, "invalid")
assert_raises(ValueError, storage.get_relationship_opposite, "invalid")
# Verify Traceable.__str__() doesn't fail.
for traceable in storage.traceables_set:
ignored_output = str(traceable)
|
Add tests of certain basic infrastructure behavior
|
Add tests of certain basic infrastructure behavior
|
Python
|
apache-2.0
|
t4ngo/sphinxcontrib-traceables
|
Add tests of certain basic infrastructure behavior
|
from nose.tools import eq_, assert_raises
from utils import with_app, pretty_print_xml
from sphinxcontrib.traceables.infrastructure import (Traceable,
TraceablesStorage)
# =============================================================================
# Tests
@with_app(buildername="xml", srcdir="basics")
def test_infrastructure(app, status, warning):
app.build()
storage = TraceablesStorage(app.env)
# Verify exception on invalid relationship name.
assert_raises(ValueError, storage.get_relationship_direction, "invalid")
assert_raises(ValueError, storage.get_relationship_opposite, "invalid")
# Verify Traceable.__str__() doesn't fail.
for traceable in storage.traceables_set:
ignored_output = str(traceable)
|
<commit_before><commit_msg>Add tests of certain basic infrastructure behavior<commit_after>
|
from nose.tools import eq_, assert_raises
from utils import with_app, pretty_print_xml
from sphinxcontrib.traceables.infrastructure import (Traceable,
TraceablesStorage)
# =============================================================================
# Tests
@with_app(buildername="xml", srcdir="basics")
def test_infrastructure(app, status, warning):
app.build()
storage = TraceablesStorage(app.env)
# Verify exception on invalid relationship name.
assert_raises(ValueError, storage.get_relationship_direction, "invalid")
assert_raises(ValueError, storage.get_relationship_opposite, "invalid")
# Verify Traceable.__str__() doesn't fail.
for traceable in storage.traceables_set:
ignored_output = str(traceable)
|
Add tests of certain basic infrastructure behavior
from nose.tools import eq_, assert_raises
from utils import with_app, pretty_print_xml
from sphinxcontrib.traceables.infrastructure import (Traceable,
TraceablesStorage)
# =============================================================================
# Tests
@with_app(buildername="xml", srcdir="basics")
def test_infrastructure(app, status, warning):
app.build()
storage = TraceablesStorage(app.env)
# Verify exception on invalid relationship name.
assert_raises(ValueError, storage.get_relationship_direction, "invalid")
assert_raises(ValueError, storage.get_relationship_opposite, "invalid")
# Verify Traceable.__str__() doesn't fail.
for traceable in storage.traceables_set:
ignored_output = str(traceable)
|
<commit_before><commit_msg>Add tests of certain basic infrastructure behavior<commit_after>
from nose.tools import eq_, assert_raises
from utils import with_app, pretty_print_xml
from sphinxcontrib.traceables.infrastructure import (Traceable,
TraceablesStorage)
# =============================================================================
# Tests
@with_app(buildername="xml", srcdir="basics")
def test_infrastructure(app, status, warning):
app.build()
storage = TraceablesStorage(app.env)
# Verify exception on invalid relationship name.
assert_raises(ValueError, storage.get_relationship_direction, "invalid")
assert_raises(ValueError, storage.get_relationship_opposite, "invalid")
# Verify Traceable.__str__() doesn't fail.
for traceable in storage.traceables_set:
ignored_output = str(traceable)
|
|
38a0e3c7681bdc6bd74d8b80a0aea68c264f418f
|
tools/heapcheck/PRESUBMIT.py
|
tools/heapcheck/PRESUBMIT.py
|
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def CheckChange(input_api, output_api):
"""Checks the memcheck suppressions files for bad data."""
errors = []
skip_next_line = False
func_re = input_api.re.compile('[a-z_.]+\(.+\)$')
for f, line_num, line in input_api.RightHandSideLines(lambda x:
x.LocalPath().endswith('.txt')):
line = line.lstrip()
if line.startswith('#') or not line:
continue
if skip_next_line:
skip_next_line = False
continue
if line == '{':
skip_next_line = True
continue
if (line.startswith('fun:') or line.startswith('obj:') or
line == 'Heapcheck:Leak' or line == '}' or
line == '...'):
continue
if func_re.match(line):
continue
errors.append('"%s" is probably wrong: %s line %s' % (line, f.LocalPath(),
line_num))
if errors:
return [output_api.PresubmitError('\n'.join(errors))]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
|
Add presubmit checks for suppressions.
|
Heapchecker: Add presubmit checks for suppressions.
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/3197014
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@57132 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
dednal/chromium.src,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,timopulkkinen/BubbleFish,mohamed--abdel-maksoud/chromium.src,keishi/chromium,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,littlstar/chromium.src,jaruba/chromium.src,jaruba/chromium.src,zcbenz/cefode-chromium,hujiajie/pa-chromium,rogerwang/chromium,Chilledheart/chromium,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,ltilve/chromium,TheTypoMaster/chromium-crosswalk,chuan9/chromium-crosswalk,rogerwang/chromium,patrickm/chromium.src,fujunwei/chromium-crosswalk,M4sse/chromium.src,patrickm/chromium.src,dushu1203/chromium.src,patrickm/chromium.src,dushu1203/chromium.src,dednal/chromium.src,timopulkkinen/BubbleFish,pozdnyakov/chromium-crosswalk,junmin-zhu/chromium-rivertrail,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,keishi/chromium,bright-sparks/chromium-spacewalk,ChromiumWebApps/chromium,bright-sparks/chromium-spacewalk,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,krieger-od/nwjs_chromium.src,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,hujiajie/pa-chromium,PeterWangIntel/chromium-crosswalk,jaruba/chromium.src,pozdnyakov/chromium-crosswalk,nacl-webkit/chrome_deps,dushu1203/chromium.src,krieger-od/nwjs_chromium.src,rogerwang/chromium,anirudhSK/chromium,dushu1203/chromium.src,Jonekee/chromium.src,rogerwang/chromium,keishi/chromium,hujiajie/pa-chromium,crosswalk-project/chromium-crosswalk-efl,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,chuan9/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,mogoweb/chromium-crosswalk,Fireblend/chromium-crosswalk,patrickm/chromium.src,anirudhSK/chromium,pozdnyakov/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,anirudhSK/chromium,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,Jonekee/chromium.src,anirudhSK/chromium,Jonekee/chromium.src,dednal/chromium.src,PeterWangIntel/chromium-crosswalk,M4sse/chromium.src,keishi/chromium,Fireblend/chromium-crosswalk,mogoweb/chromium-crosswalk,littlstar/chromium.src,mohamed--abdel-maksoud/chromium.src,jaruba/chromium.src,TheTypoMaster/chromium-crosswalk,mogoweb/chromium-crosswalk,bright-sparks/chromium-spacewalk,robclark/chromium,dushu1203/chromium.src,M4sse/chromium.src,robclark/chromium,pozdnyakov/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,zcbenz/cefode-chromium,patrickm/chromium.src,Fireblend/chromium-crosswalk,fujunwei/chromium-crosswalk,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,zcbenz/cefode-chromium,hgl888/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Just-D/chromium-1,Chilledheart/chromium,dednal/chromium.src,hgl888/chromium-crosswalk,hgl888/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,zcbenz/cefode-chromium,axinging/chromium-crosswalk,anirudhSK/chromium,anirudhSK/chromium,timopulkkinen/BubbleFish,ondra-novak/chromium.src,crosswalk-project/chromium-crosswalk-efl,fujunwei/chromium-crosswalk,rogerwang/chromium,littlstar/chromium.src,markYoungH/chromium.src,krieger-od/nwjs_chromium.src,junmin-zhu/chromium-rivertrail,robclark/chromium,nacl-webkit/chrome_deps,crosswalk-project/chromium-crosswalk-efl,dushu1203/chromium.src,ltilve/chromium,zcbenz/cefode-chromium,mohamed--abdel-maksoud/chromium.src,nacl-webkit/chrome_deps,markYoungH/chromium.src,hujiajie/pa-chromium,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,Pluto-tv/chromium-crosswalk,mogoweb/chromium-crosswalk,timopulkkinen/BubbleFish,hujiajie/pa-chromium,timopulkkinen/BubbleFish,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk,markYoungH/chromium.src,ltilve/chromium,patrickm/chromium.src,ChromiumWebApps/chromium,PeterWangIntel/chromium-crosswalk,mogoweb/chromium-crosswalk,ondra-novak/chromium.src,jaruba/chromium.src,mogoweb/chromium-crosswalk,Fireblend/chromium-crosswalk,ChromiumWebApps/chromium,dednal/chromium.src,Just-D/chromium-1,junmin-zhu/chromium-rivertrail,keishi/chromium,axinging/chromium-crosswalk,hgl888/chromium-crosswalk-efl,pozdnyakov/chromium-crosswalk,nacl-webkit/chrome_deps,Jonekee/chromium.src,timopulkkinen/BubbleFish,junmin-zhu/chromium-rivertrail,jaruba/chromium.src,mogoweb/chromium-crosswalk,timopulkkinen/BubbleFish,Pluto-tv/chromium-crosswalk,markYoungH/chromium.src,junmin-zhu/chromium-rivertrail,krieger-od/nwjs_chromium.src,dushu1203/chromium.src,PeterWangIntel/chromium-crosswalk,fujunwei/chromium-crosswalk,keishi/chromium,timopulkkinen/BubbleFish,axinging/chromium-crosswalk,junmin-zhu/chromium-rivertrail,axinging/chromium-crosswalk,chuan9/chromium-crosswalk,robclark/chromium,dushu1203/chromium.src,M4sse/chromium.src,nacl-webkit/chrome_deps,crosswalk-project/chromium-crosswalk-efl,chuan9/chromium-crosswalk,Chilledheart/chromium,krieger-od/nwjs_chromium.src,dednal/chromium.src,hgl888/chromium-crosswalk,rogerwang/chromium,ChromiumWebApps/chromium,hgl888/chromium-crosswalk,hujiajie/pa-chromium,M4sse/chromium.src,mohamed--abdel-maksoud/chromium.src,nacl-webkit/chrome_deps,anirudhSK/chromium,ChromiumWebApps/chromium,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,hujiajie/pa-chromium,ondra-novak/chromium.src,markYoungH/chromium.src,zcbenz/cefode-chromium,anirudhSK/chromium,Just-D/chromium-1,nacl-webkit/chrome_deps,hgl888/chromium-crosswalk-efl,junmin-zhu/chromium-rivertrail,Fireblend/chromium-crosswalk,bright-sparks/chromium-spacewalk,keishi/chromium,ondra-novak/chromium.src,Just-D/chromium-1,rogerwang/chromium,Fireblend/chromium-crosswalk,Chilledheart/chromium,ChromiumWebApps/chromium,pozdnyakov/chromium-crosswalk,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk-efl,rogerwang/chromium,robclark/chromium,ChromiumWebApps/chromium,nacl-webkit/chrome_deps,littlstar/chromium.src,krieger-od/nwjs_chromium.src,Jonekee/chromium.src,nacl-webkit/chrome_deps,Just-D/chromium-1,pozdnyakov/chromium-crosswalk,keishi/chromium,hgl888/chromium-crosswalk,mogoweb/chromium-crosswalk,robclark/chromium,M4sse/chromium.src,zcbenz/cefode-chromium,markYoungH/chromium.src,junmin-zhu/chromium-rivertrail,Just-D/chromium-1,PeterWangIntel/chromium-crosswalk,junmin-zhu/chromium-rivertrail,PeterWangIntel/chromium-crosswalk,patrickm/chromium.src,ondra-novak/chromium.src,ChromiumWebApps/chromium,dushu1203/chromium.src,pozdnyakov/chromium-crosswalk,Chilledheart/chromium,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,hgl888/chromium-crosswalk,junmin-zhu/chromium-rivertrail,anirudhSK/chromium,TheTypoMaster/chromium-crosswalk,ltilve/chromium,patrickm/chromium.src,ondra-novak/chromium.src,jaruba/chromium.src,dushu1203/chromium.src,markYoungH/chromium.src,Jonekee/chromium.src,Jonekee/chromium.src,ondra-novak/chromium.src,robclark/chromium,ltilve/chromium,hgl888/chromium-crosswalk,littlstar/chromium.src,nacl-webkit/chrome_deps,M4sse/chromium.src,fujunwei/chromium-crosswalk,pozdnyakov/chromium-crosswalk,Chilledheart/chromium,littlstar/chromium.src,dushu1203/chromium.src,robclark/chromium,hgl888/chromium-crosswalk-efl,dednal/chromium.src,zcbenz/cefode-chromium,dednal/chromium.src,dednal/chromium.src,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,hujiajie/pa-chromium,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,markYoungH/chromium.src,M4sse/chromium.src,anirudhSK/chromium,ltilve/chromium,TheTypoMaster/chromium-crosswalk,rogerwang/chromium,crosswalk-project/chromium-crosswalk-efl,timopulkkinen/BubbleFish,dednal/chromium.src,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,hujiajie/pa-chromium,hujiajie/pa-chromium,Just-D/chromium-1,junmin-zhu/chromium-rivertrail,pozdnyakov/chromium-crosswalk,axinging/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,zcbenz/cefode-chromium,keishi/chromium,anirudhSK/chromium,M4sse/chromium.src,anirudhSK/chromium,dednal/chromium.src,jaruba/chromium.src,jaruba/chromium.src,krieger-od/nwjs_chromium.src,keishi/chromium,hujiajie/pa-chromium,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,bright-sparks/chromium-spacewalk,ltilve/chromium,Jonekee/chromium.src,littlstar/chromium.src,pozdnyakov/chromium-crosswalk,ltilve/chromium,ChromiumWebApps/chromium,hgl888/chromium-crosswalk-efl,ChromiumWebApps/chromium,axinging/chromium-crosswalk,hgl888/chromium-crosswalk-efl,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,M4sse/chromium.src,bright-sparks/chromium-spacewalk,jaruba/chromium.src,keishi/chromium,zcbenz/cefode-chromium,krieger-od/nwjs_chromium.src,Fireblend/chromium-crosswalk,mogoweb/chromium-crosswalk,fujunwei/chromium-crosswalk,timopulkkinen/BubbleFish,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,ChromiumWebApps/chromium,ChromiumWebApps/chromium,rogerwang/chromium,ondra-novak/chromium.src,zcbenz/cefode-chromium,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,bright-sparks/chromium-spacewalk,timopulkkinen/BubbleFish,patrickm/chromium.src,robclark/chromium,crosswalk-project/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,robclark/chromium,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,nacl-webkit/chrome_deps,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,bright-sparks/chromium-spacewalk,chuan9/chromium-crosswalk,markYoungH/chromium.src,Jonekee/chromium.src,mogoweb/chromium-crosswalk
|
Heapchecker: Add presubmit checks for suppressions.
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/3197014
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@57132 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def CheckChange(input_api, output_api):
"""Checks the memcheck suppressions files for bad data."""
errors = []
skip_next_line = False
func_re = input_api.re.compile('[a-z_.]+\(.+\)$')
for f, line_num, line in input_api.RightHandSideLines(lambda x:
x.LocalPath().endswith('.txt')):
line = line.lstrip()
if line.startswith('#') or not line:
continue
if skip_next_line:
skip_next_line = False
continue
if line == '{':
skip_next_line = True
continue
if (line.startswith('fun:') or line.startswith('obj:') or
line == 'Heapcheck:Leak' or line == '}' or
line == '...'):
continue
if func_re.match(line):
continue
errors.append('"%s" is probably wrong: %s line %s' % (line, f.LocalPath(),
line_num))
if errors:
return [output_api.PresubmitError('\n'.join(errors))]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
|
<commit_before><commit_msg>Heapchecker: Add presubmit checks for suppressions.
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/3197014
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@57132 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def CheckChange(input_api, output_api):
"""Checks the memcheck suppressions files for bad data."""
errors = []
skip_next_line = False
func_re = input_api.re.compile('[a-z_.]+\(.+\)$')
for f, line_num, line in input_api.RightHandSideLines(lambda x:
x.LocalPath().endswith('.txt')):
line = line.lstrip()
if line.startswith('#') or not line:
continue
if skip_next_line:
skip_next_line = False
continue
if line == '{':
skip_next_line = True
continue
if (line.startswith('fun:') or line.startswith('obj:') or
line == 'Heapcheck:Leak' or line == '}' or
line == '...'):
continue
if func_re.match(line):
continue
errors.append('"%s" is probably wrong: %s line %s' % (line, f.LocalPath(),
line_num))
if errors:
return [output_api.PresubmitError('\n'.join(errors))]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
|
Heapchecker: Add presubmit checks for suppressions.
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/3197014
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@57132 0039d316-1c4b-4281-b951-d872f2087c98# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def CheckChange(input_api, output_api):
"""Checks the memcheck suppressions files for bad data."""
errors = []
skip_next_line = False
func_re = input_api.re.compile('[a-z_.]+\(.+\)$')
for f, line_num, line in input_api.RightHandSideLines(lambda x:
x.LocalPath().endswith('.txt')):
line = line.lstrip()
if line.startswith('#') or not line:
continue
if skip_next_line:
skip_next_line = False
continue
if line == '{':
skip_next_line = True
continue
if (line.startswith('fun:') or line.startswith('obj:') or
line == 'Heapcheck:Leak' or line == '}' or
line == '...'):
continue
if func_re.match(line):
continue
errors.append('"%s" is probably wrong: %s line %s' % (line, f.LocalPath(),
line_num))
if errors:
return [output_api.PresubmitError('\n'.join(errors))]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
|
<commit_before><commit_msg>Heapchecker: Add presubmit checks for suppressions.
BUG=none
TEST=none
Review URL: http://codereview.chromium.org/3197014
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@57132 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details on the presubmit API built into gcl.
"""
def CheckChange(input_api, output_api):
"""Checks the memcheck suppressions files for bad data."""
errors = []
skip_next_line = False
func_re = input_api.re.compile('[a-z_.]+\(.+\)$')
for f, line_num, line in input_api.RightHandSideLines(lambda x:
x.LocalPath().endswith('.txt')):
line = line.lstrip()
if line.startswith('#') or not line:
continue
if skip_next_line:
skip_next_line = False
continue
if line == '{':
skip_next_line = True
continue
if (line.startswith('fun:') or line.startswith('obj:') or
line == 'Heapcheck:Leak' or line == '}' or
line == '...'):
continue
if func_re.match(line):
continue
errors.append('"%s" is probably wrong: %s line %s' % (line, f.LocalPath(),
line_num))
if errors:
return [output_api.PresubmitError('\n'.join(errors))]
return []
def CheckChangeOnUpload(input_api, output_api):
return CheckChange(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return CheckChange(input_api, output_api)
|
|
d7c3bf6f7176f595198c078003a1fc8e8f50ea0f
|
molo/core/migrations/0071_remove_old_image_hashes.py
|
molo/core/migrations/0071_remove_old_image_hashes.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def delete_imageinfo(apps, schema_editor):
ImageInfo = apps.get_model('core.ImageInfo')
ImageInfo.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('core', '0070_add_service_directory_api'),
]
operations = [
migrations.RunPython(delete_imageinfo),
]
|
Remove old type of image hashes
|
Remove old type of image hashes
|
Python
|
bsd-2-clause
|
praekelt/molo,praekelt/molo,praekelt/molo,praekelt/molo
|
Remove old type of image hashes
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def delete_imageinfo(apps, schema_editor):
ImageInfo = apps.get_model('core.ImageInfo')
ImageInfo.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('core', '0070_add_service_directory_api'),
]
operations = [
migrations.RunPython(delete_imageinfo),
]
|
<commit_before><commit_msg>Remove old type of image hashes<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def delete_imageinfo(apps, schema_editor):
ImageInfo = apps.get_model('core.ImageInfo')
ImageInfo.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('core', '0070_add_service_directory_api'),
]
operations = [
migrations.RunPython(delete_imageinfo),
]
|
Remove old type of image hashes# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def delete_imageinfo(apps, schema_editor):
ImageInfo = apps.get_model('core.ImageInfo')
ImageInfo.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('core', '0070_add_service_directory_api'),
]
operations = [
migrations.RunPython(delete_imageinfo),
]
|
<commit_before><commit_msg>Remove old type of image hashes<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def delete_imageinfo(apps, schema_editor):
ImageInfo = apps.get_model('core.ImageInfo')
ImageInfo.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('core', '0070_add_service_directory_api'),
]
operations = [
migrations.RunPython(delete_imageinfo),
]
|
|
acc1e7d483b206665e3d749947359382d2ba53a5
|
tests/test_fixed.py
|
tests/test_fixed.py
|
from copperhead import *
import unittest
@cu
def dismantle((x, (y, z))):
return x
@cu
def tuple_inline_test(x):
return dismantle((x,(x,x)))
class TupleInlineTest(unittest.TestCase):
def testTupleInline(self):
self.assertEqual(tuple_inline_test(2), 2)
if __name__ == '__main__':
unittest.main()
|
Add tests for bugs which have been fixed.
|
Add tests for bugs which have been fixed.
|
Python
|
apache-2.0
|
shyamalschandra/copperhead,shyamalschandra/copperhead,beni55/copperhead,copperhead/copperhead,beni55/copperhead,copperhead/copperhead
|
Add tests for bugs which have been fixed.
|
from copperhead import *
import unittest
@cu
def dismantle((x, (y, z))):
return x
@cu
def tuple_inline_test(x):
return dismantle((x,(x,x)))
class TupleInlineTest(unittest.TestCase):
def testTupleInline(self):
self.assertEqual(tuple_inline_test(2), 2)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for bugs which have been fixed.<commit_after>
|
from copperhead import *
import unittest
@cu
def dismantle((x, (y, z))):
return x
@cu
def tuple_inline_test(x):
return dismantle((x,(x,x)))
class TupleInlineTest(unittest.TestCase):
def testTupleInline(self):
self.assertEqual(tuple_inline_test(2), 2)
if __name__ == '__main__':
unittest.main()
|
Add tests for bugs which have been fixed.from copperhead import *
import unittest
@cu
def dismantle((x, (y, z))):
return x
@cu
def tuple_inline_test(x):
return dismantle((x,(x,x)))
class TupleInlineTest(unittest.TestCase):
def testTupleInline(self):
self.assertEqual(tuple_inline_test(2), 2)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for bugs which have been fixed.<commit_after>from copperhead import *
import unittest
@cu
def dismantle((x, (y, z))):
return x
@cu
def tuple_inline_test(x):
return dismantle((x,(x,x)))
class TupleInlineTest(unittest.TestCase):
def testTupleInline(self):
self.assertEqual(tuple_inline_test(2), 2)
if __name__ == '__main__':
unittest.main()
|
|
32cd9776facaa8271b16b28c5dec4c31241f3064
|
annot/MODtree_bp_tbl-to-prot_targets.py
|
annot/MODtree_bp_tbl-to-prot_targets.py
|
#!/usr/bin/env python3
import sys
import gzip
import re
filename_tbl = sys.argv[1]
filename_base = re.sub(r'.bp\+\_tbl.gz', '', filename_tbl)
print(filename_tbl, filename_base)
sys.exit(1)
blastp_Evalue_cutoff = 0.0001
min_best_targets = 3
def open_file(tmp_filename):
f = open(tmp_filename, 'r')
if tmp_filename.endswith('.gz'):
f = gzip.open(tmp_filename, 'rt')
return f
blastp_list = dict()
sys.stderr.write('Read %s\n' % filename_tbl)
f_bp = open_file(filename_tbl)
for line in f_bp:
if line.startswith('#'):
continue
tokens = line.strip().split("\t")
q_id = tokens[0]
t_id = tokens[1]
evalue = float(tokens[-2])
bits = float(tokens[-1])
if evalue > blastp_Evalue_cutoff:
continue
if q_id not in blastp_list:
blastp_list[q_id] = dict()
if t_id not in blastp_list[q_id]:
blastp_list[q_id][t_id] = bits
elif bits > blastp_list[q_id][t_id]:
blastp_list[q_id][t_id] = bits
f_bp.close()
sys.stderr.write('Total Sequences: %d\n' % len(blastp_list))
f_prot_targets = open('%s.prot_targets' % filename_base, 'w')
for tmp_h in blastp_list.keys():
tmp_best_bits = max(blastp_list[tmp_h].values())
tmp_target_list = \
sorted(blastp_list[tmp_h].keys(), key=blastp_list[tmp_h].get, reverse=True)
tmp_target_str = ';'.join(["%s=%.1f" % (x, blastp_list[tmp_h][x]) for x in tmp_target_list])
f_prot_targets.write('%s\t%.1f\t%s\n' % (tmp_h, tmp_best_bits, tmp_target_str))
f_prot_targets.close()
|
Make a prot_target table from bp+_tbl
|
Make a prot_target table from bp+_tbl
|
Python
|
apache-2.0
|
taejoonlab/NuevoTx,marcottelab/NuevoTx,marcottelab/NuevoTx,taejoonlab/NuevoTx
|
Make a prot_target table from bp+_tbl
|
#!/usr/bin/env python3
import sys
import gzip
import re
filename_tbl = sys.argv[1]
filename_base = re.sub(r'.bp\+\_tbl.gz', '', filename_tbl)
print(filename_tbl, filename_base)
sys.exit(1)
blastp_Evalue_cutoff = 0.0001
min_best_targets = 3
def open_file(tmp_filename):
f = open(tmp_filename, 'r')
if tmp_filename.endswith('.gz'):
f = gzip.open(tmp_filename, 'rt')
return f
blastp_list = dict()
sys.stderr.write('Read %s\n' % filename_tbl)
f_bp = open_file(filename_tbl)
for line in f_bp:
if line.startswith('#'):
continue
tokens = line.strip().split("\t")
q_id = tokens[0]
t_id = tokens[1]
evalue = float(tokens[-2])
bits = float(tokens[-1])
if evalue > blastp_Evalue_cutoff:
continue
if q_id not in blastp_list:
blastp_list[q_id] = dict()
if t_id not in blastp_list[q_id]:
blastp_list[q_id][t_id] = bits
elif bits > blastp_list[q_id][t_id]:
blastp_list[q_id][t_id] = bits
f_bp.close()
sys.stderr.write('Total Sequences: %d\n' % len(blastp_list))
f_prot_targets = open('%s.prot_targets' % filename_base, 'w')
for tmp_h in blastp_list.keys():
tmp_best_bits = max(blastp_list[tmp_h].values())
tmp_target_list = \
sorted(blastp_list[tmp_h].keys(), key=blastp_list[tmp_h].get, reverse=True)
tmp_target_str = ';'.join(["%s=%.1f" % (x, blastp_list[tmp_h][x]) for x in tmp_target_list])
f_prot_targets.write('%s\t%.1f\t%s\n' % (tmp_h, tmp_best_bits, tmp_target_str))
f_prot_targets.close()
|
<commit_before><commit_msg>Make a prot_target table from bp+_tbl<commit_after>
|
#!/usr/bin/env python3
import sys
import gzip
import re
filename_tbl = sys.argv[1]
filename_base = re.sub(r'.bp\+\_tbl.gz', '', filename_tbl)
print(filename_tbl, filename_base)
sys.exit(1)
blastp_Evalue_cutoff = 0.0001
min_best_targets = 3
def open_file(tmp_filename):
f = open(tmp_filename, 'r')
if tmp_filename.endswith('.gz'):
f = gzip.open(tmp_filename, 'rt')
return f
blastp_list = dict()
sys.stderr.write('Read %s\n' % filename_tbl)
f_bp = open_file(filename_tbl)
for line in f_bp:
if line.startswith('#'):
continue
tokens = line.strip().split("\t")
q_id = tokens[0]
t_id = tokens[1]
evalue = float(tokens[-2])
bits = float(tokens[-1])
if evalue > blastp_Evalue_cutoff:
continue
if q_id not in blastp_list:
blastp_list[q_id] = dict()
if t_id not in blastp_list[q_id]:
blastp_list[q_id][t_id] = bits
elif bits > blastp_list[q_id][t_id]:
blastp_list[q_id][t_id] = bits
f_bp.close()
sys.stderr.write('Total Sequences: %d\n' % len(blastp_list))
f_prot_targets = open('%s.prot_targets' % filename_base, 'w')
for tmp_h in blastp_list.keys():
tmp_best_bits = max(blastp_list[tmp_h].values())
tmp_target_list = \
sorted(blastp_list[tmp_h].keys(), key=blastp_list[tmp_h].get, reverse=True)
tmp_target_str = ';'.join(["%s=%.1f" % (x, blastp_list[tmp_h][x]) for x in tmp_target_list])
f_prot_targets.write('%s\t%.1f\t%s\n' % (tmp_h, tmp_best_bits, tmp_target_str))
f_prot_targets.close()
|
Make a prot_target table from bp+_tbl#!/usr/bin/env python3
import sys
import gzip
import re
filename_tbl = sys.argv[1]
filename_base = re.sub(r'.bp\+\_tbl.gz', '', filename_tbl)
print(filename_tbl, filename_base)
sys.exit(1)
blastp_Evalue_cutoff = 0.0001
min_best_targets = 3
def open_file(tmp_filename):
f = open(tmp_filename, 'r')
if tmp_filename.endswith('.gz'):
f = gzip.open(tmp_filename, 'rt')
return f
blastp_list = dict()
sys.stderr.write('Read %s\n' % filename_tbl)
f_bp = open_file(filename_tbl)
for line in f_bp:
if line.startswith('#'):
continue
tokens = line.strip().split("\t")
q_id = tokens[0]
t_id = tokens[1]
evalue = float(tokens[-2])
bits = float(tokens[-1])
if evalue > blastp_Evalue_cutoff:
continue
if q_id not in blastp_list:
blastp_list[q_id] = dict()
if t_id not in blastp_list[q_id]:
blastp_list[q_id][t_id] = bits
elif bits > blastp_list[q_id][t_id]:
blastp_list[q_id][t_id] = bits
f_bp.close()
sys.stderr.write('Total Sequences: %d\n' % len(blastp_list))
f_prot_targets = open('%s.prot_targets' % filename_base, 'w')
for tmp_h in blastp_list.keys():
tmp_best_bits = max(blastp_list[tmp_h].values())
tmp_target_list = \
sorted(blastp_list[tmp_h].keys(), key=blastp_list[tmp_h].get, reverse=True)
tmp_target_str = ';'.join(["%s=%.1f" % (x, blastp_list[tmp_h][x]) for x in tmp_target_list])
f_prot_targets.write('%s\t%.1f\t%s\n' % (tmp_h, tmp_best_bits, tmp_target_str))
f_prot_targets.close()
|
<commit_before><commit_msg>Make a prot_target table from bp+_tbl<commit_after>#!/usr/bin/env python3
import sys
import gzip
import re
filename_tbl = sys.argv[1]
filename_base = re.sub(r'.bp\+\_tbl.gz', '', filename_tbl)
print(filename_tbl, filename_base)
sys.exit(1)
blastp_Evalue_cutoff = 0.0001
min_best_targets = 3
def open_file(tmp_filename):
f = open(tmp_filename, 'r')
if tmp_filename.endswith('.gz'):
f = gzip.open(tmp_filename, 'rt')
return f
blastp_list = dict()
sys.stderr.write('Read %s\n' % filename_tbl)
f_bp = open_file(filename_tbl)
for line in f_bp:
if line.startswith('#'):
continue
tokens = line.strip().split("\t")
q_id = tokens[0]
t_id = tokens[1]
evalue = float(tokens[-2])
bits = float(tokens[-1])
if evalue > blastp_Evalue_cutoff:
continue
if q_id not in blastp_list:
blastp_list[q_id] = dict()
if t_id not in blastp_list[q_id]:
blastp_list[q_id][t_id] = bits
elif bits > blastp_list[q_id][t_id]:
blastp_list[q_id][t_id] = bits
f_bp.close()
sys.stderr.write('Total Sequences: %d\n' % len(blastp_list))
f_prot_targets = open('%s.prot_targets' % filename_base, 'w')
for tmp_h in blastp_list.keys():
tmp_best_bits = max(blastp_list[tmp_h].values())
tmp_target_list = \
sorted(blastp_list[tmp_h].keys(), key=blastp_list[tmp_h].get, reverse=True)
tmp_target_str = ';'.join(["%s=%.1f" % (x, blastp_list[tmp_h][x]) for x in tmp_target_list])
f_prot_targets.write('%s\t%.1f\t%s\n' % (tmp_h, tmp_best_bits, tmp_target_str))
f_prot_targets.close()
|
|
145df11bb07120d29fd50a6a37a3a2441e63904b
|
integration-tests/stress_test.py
|
integration-tests/stress_test.py
|
#!/usr/bin/env python3
from json import dumps
from random import randint
from sys import argv
from urllib.error import HTTPError
from urllib.request import Request, urlopen
def put_payload(addr: str, payload):
auth = 'mS7karSP9QbD2FFdgBk2QmuTna7fJyp7ll0Vg8gnffIBHKILSrusMslucBzMhwO'
url = 'http://localhost:3000/{}?auth={}'.format(addr, auth)
headers = {'Content-Type': 'application/json'}
payload_bytes = dumps(payload).encode('utf8')
request = Request(url, payload_bytes, headers, method='PUT')
try:
response = urlopen(request)
except HTTPError as error:
response = error
print('{}: {}'.format(response.code, response.read().decode('utf8')))
def put_random_payload():
x = str(randint(0, 1000))
y = str(randint(0, 100))
z = str(randint(0, 10))
addr = '{}/{}/{}'.format(x, y, z)
payloads = ('fmap', 'bimap', '<$>', '>>=', 'pure', 'join')
payload = payloads[randint(0, len(payloads) - 1)]
put_payload(addr, payload)
def main():
"""
usage: stress_test.py <count>
"""
if len(argv) < 2:
print(main.__doc__)
return
count = int(argv[1])
for i in range(0, count):
put_random_payload()
main()
|
Add Python script to stress test the system
|
Add Python script to stress test the system
|
Python
|
bsd-3-clause
|
channable/icepeak,channable/icepeak,channable/icepeak
|
Add Python script to stress test the system
|
#!/usr/bin/env python3
from json import dumps
from random import randint
from sys import argv
from urllib.error import HTTPError
from urllib.request import Request, urlopen
def put_payload(addr: str, payload):
auth = 'mS7karSP9QbD2FFdgBk2QmuTna7fJyp7ll0Vg8gnffIBHKILSrusMslucBzMhwO'
url = 'http://localhost:3000/{}?auth={}'.format(addr, auth)
headers = {'Content-Type': 'application/json'}
payload_bytes = dumps(payload).encode('utf8')
request = Request(url, payload_bytes, headers, method='PUT')
try:
response = urlopen(request)
except HTTPError as error:
response = error
print('{}: {}'.format(response.code, response.read().decode('utf8')))
def put_random_payload():
x = str(randint(0, 1000))
y = str(randint(0, 100))
z = str(randint(0, 10))
addr = '{}/{}/{}'.format(x, y, z)
payloads = ('fmap', 'bimap', '<$>', '>>=', 'pure', 'join')
payload = payloads[randint(0, len(payloads) - 1)]
put_payload(addr, payload)
def main():
"""
usage: stress_test.py <count>
"""
if len(argv) < 2:
print(main.__doc__)
return
count = int(argv[1])
for i in range(0, count):
put_random_payload()
main()
|
<commit_before><commit_msg>Add Python script to stress test the system<commit_after>
|
#!/usr/bin/env python3
from json import dumps
from random import randint
from sys import argv
from urllib.error import HTTPError
from urllib.request import Request, urlopen
def put_payload(addr: str, payload):
auth = 'mS7karSP9QbD2FFdgBk2QmuTna7fJyp7ll0Vg8gnffIBHKILSrusMslucBzMhwO'
url = 'http://localhost:3000/{}?auth={}'.format(addr, auth)
headers = {'Content-Type': 'application/json'}
payload_bytes = dumps(payload).encode('utf8')
request = Request(url, payload_bytes, headers, method='PUT')
try:
response = urlopen(request)
except HTTPError as error:
response = error
print('{}: {}'.format(response.code, response.read().decode('utf8')))
def put_random_payload():
x = str(randint(0, 1000))
y = str(randint(0, 100))
z = str(randint(0, 10))
addr = '{}/{}/{}'.format(x, y, z)
payloads = ('fmap', 'bimap', '<$>', '>>=', 'pure', 'join')
payload = payloads[randint(0, len(payloads) - 1)]
put_payload(addr, payload)
def main():
"""
usage: stress_test.py <count>
"""
if len(argv) < 2:
print(main.__doc__)
return
count = int(argv[1])
for i in range(0, count):
put_random_payload()
main()
|
Add Python script to stress test the system#!/usr/bin/env python3
from json import dumps
from random import randint
from sys import argv
from urllib.error import HTTPError
from urllib.request import Request, urlopen
def put_payload(addr: str, payload):
auth = 'mS7karSP9QbD2FFdgBk2QmuTna7fJyp7ll0Vg8gnffIBHKILSrusMslucBzMhwO'
url = 'http://localhost:3000/{}?auth={}'.format(addr, auth)
headers = {'Content-Type': 'application/json'}
payload_bytes = dumps(payload).encode('utf8')
request = Request(url, payload_bytes, headers, method='PUT')
try:
response = urlopen(request)
except HTTPError as error:
response = error
print('{}: {}'.format(response.code, response.read().decode('utf8')))
def put_random_payload():
x = str(randint(0, 1000))
y = str(randint(0, 100))
z = str(randint(0, 10))
addr = '{}/{}/{}'.format(x, y, z)
payloads = ('fmap', 'bimap', '<$>', '>>=', 'pure', 'join')
payload = payloads[randint(0, len(payloads) - 1)]
put_payload(addr, payload)
def main():
"""
usage: stress_test.py <count>
"""
if len(argv) < 2:
print(main.__doc__)
return
count = int(argv[1])
for i in range(0, count):
put_random_payload()
main()
|
<commit_before><commit_msg>Add Python script to stress test the system<commit_after>#!/usr/bin/env python3
from json import dumps
from random import randint
from sys import argv
from urllib.error import HTTPError
from urllib.request import Request, urlopen
def put_payload(addr: str, payload):
auth = 'mS7karSP9QbD2FFdgBk2QmuTna7fJyp7ll0Vg8gnffIBHKILSrusMslucBzMhwO'
url = 'http://localhost:3000/{}?auth={}'.format(addr, auth)
headers = {'Content-Type': 'application/json'}
payload_bytes = dumps(payload).encode('utf8')
request = Request(url, payload_bytes, headers, method='PUT')
try:
response = urlopen(request)
except HTTPError as error:
response = error
print('{}: {}'.format(response.code, response.read().decode('utf8')))
def put_random_payload():
x = str(randint(0, 1000))
y = str(randint(0, 100))
z = str(randint(0, 10))
addr = '{}/{}/{}'.format(x, y, z)
payloads = ('fmap', 'bimap', '<$>', '>>=', 'pure', 'join')
payload = payloads[randint(0, len(payloads) - 1)]
put_payload(addr, payload)
def main():
"""
usage: stress_test.py <count>
"""
if len(argv) < 2:
print(main.__doc__)
return
count = int(argv[1])
for i in range(0, count):
put_random_payload()
main()
|
|
a38763f7fb02f574bae8f17c987cff7e7f802b2d
|
py/two-sum-iv-input-is-a-bst.py
|
py/two-sum-iv-input-is-a-bst.py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inOrderAsc(self, root):
stack = []
visited = set()
stack.append(root)
while stack:
cur = stack[-1]
if cur.left and cur.left.val not in visited:
stack.append(cur.left)
elif cur.val not in visited:
yield cur.val
visited.add(cur.val)
elif cur.right and cur.right.val not in visited:
stack.append(cur.right)
else:
stack.pop()
def inOrderDesc(self, root):
stack = []
visited = set()
stack.append(root)
while stack:
cur = stack[-1]
if cur.right and cur.right.val not in visited:
stack.append(cur.right)
elif cur.val not in visited:
yield cur.val
visited.add(cur.val)
elif cur.left and cur.left.val not in visited:
stack.append(cur.left)
else:
stack.pop()
def findTarget(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: bool
"""
if not root:
return False
iterAsc = self.inOrderAsc(root)
iterDesc = self.inOrderDesc(root)
curAsc = iterAsc.next()
curDesc = iterDesc.next()
while curAsc != curDesc:
s = curAsc + curDesc
if s < k:
curAsc = iterAsc.next()
elif s > k:
curDesc = iterDesc.next()
else:
return True
else:
return False
|
Add py solution for 653. Two Sum IV - Input is a BST
|
Add py solution for 653. Two Sum IV - Input is a BST
653. Two Sum IV - Input is a BST: https://leetcode.com/problems/two-sum-iv-input-is-a-bst/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 653. Two Sum IV - Input is a BST
653. Two Sum IV - Input is a BST: https://leetcode.com/problems/two-sum-iv-input-is-a-bst/
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inOrderAsc(self, root):
stack = []
visited = set()
stack.append(root)
while stack:
cur = stack[-1]
if cur.left and cur.left.val not in visited:
stack.append(cur.left)
elif cur.val not in visited:
yield cur.val
visited.add(cur.val)
elif cur.right and cur.right.val not in visited:
stack.append(cur.right)
else:
stack.pop()
def inOrderDesc(self, root):
stack = []
visited = set()
stack.append(root)
while stack:
cur = stack[-1]
if cur.right and cur.right.val not in visited:
stack.append(cur.right)
elif cur.val not in visited:
yield cur.val
visited.add(cur.val)
elif cur.left and cur.left.val not in visited:
stack.append(cur.left)
else:
stack.pop()
def findTarget(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: bool
"""
if not root:
return False
iterAsc = self.inOrderAsc(root)
iterDesc = self.inOrderDesc(root)
curAsc = iterAsc.next()
curDesc = iterDesc.next()
while curAsc != curDesc:
s = curAsc + curDesc
if s < k:
curAsc = iterAsc.next()
elif s > k:
curDesc = iterDesc.next()
else:
return True
else:
return False
|
<commit_before><commit_msg>Add py solution for 653. Two Sum IV - Input is a BST
653. Two Sum IV - Input is a BST: https://leetcode.com/problems/two-sum-iv-input-is-a-bst/<commit_after>
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inOrderAsc(self, root):
stack = []
visited = set()
stack.append(root)
while stack:
cur = stack[-1]
if cur.left and cur.left.val not in visited:
stack.append(cur.left)
elif cur.val not in visited:
yield cur.val
visited.add(cur.val)
elif cur.right and cur.right.val not in visited:
stack.append(cur.right)
else:
stack.pop()
def inOrderDesc(self, root):
stack = []
visited = set()
stack.append(root)
while stack:
cur = stack[-1]
if cur.right and cur.right.val not in visited:
stack.append(cur.right)
elif cur.val not in visited:
yield cur.val
visited.add(cur.val)
elif cur.left and cur.left.val not in visited:
stack.append(cur.left)
else:
stack.pop()
def findTarget(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: bool
"""
if not root:
return False
iterAsc = self.inOrderAsc(root)
iterDesc = self.inOrderDesc(root)
curAsc = iterAsc.next()
curDesc = iterDesc.next()
while curAsc != curDesc:
s = curAsc + curDesc
if s < k:
curAsc = iterAsc.next()
elif s > k:
curDesc = iterDesc.next()
else:
return True
else:
return False
|
Add py solution for 653. Two Sum IV - Input is a BST
653. Two Sum IV - Input is a BST: https://leetcode.com/problems/two-sum-iv-input-is-a-bst/# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inOrderAsc(self, root):
stack = []
visited = set()
stack.append(root)
while stack:
cur = stack[-1]
if cur.left and cur.left.val not in visited:
stack.append(cur.left)
elif cur.val not in visited:
yield cur.val
visited.add(cur.val)
elif cur.right and cur.right.val not in visited:
stack.append(cur.right)
else:
stack.pop()
def inOrderDesc(self, root):
stack = []
visited = set()
stack.append(root)
while stack:
cur = stack[-1]
if cur.right and cur.right.val not in visited:
stack.append(cur.right)
elif cur.val not in visited:
yield cur.val
visited.add(cur.val)
elif cur.left and cur.left.val not in visited:
stack.append(cur.left)
else:
stack.pop()
def findTarget(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: bool
"""
if not root:
return False
iterAsc = self.inOrderAsc(root)
iterDesc = self.inOrderDesc(root)
curAsc = iterAsc.next()
curDesc = iterDesc.next()
while curAsc != curDesc:
s = curAsc + curDesc
if s < k:
curAsc = iterAsc.next()
elif s > k:
curDesc = iterDesc.next()
else:
return True
else:
return False
|
<commit_before><commit_msg>Add py solution for 653. Two Sum IV - Input is a BST
653. Two Sum IV - Input is a BST: https://leetcode.com/problems/two-sum-iv-input-is-a-bst/<commit_after># Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inOrderAsc(self, root):
stack = []
visited = set()
stack.append(root)
while stack:
cur = stack[-1]
if cur.left and cur.left.val not in visited:
stack.append(cur.left)
elif cur.val not in visited:
yield cur.val
visited.add(cur.val)
elif cur.right and cur.right.val not in visited:
stack.append(cur.right)
else:
stack.pop()
def inOrderDesc(self, root):
stack = []
visited = set()
stack.append(root)
while stack:
cur = stack[-1]
if cur.right and cur.right.val not in visited:
stack.append(cur.right)
elif cur.val not in visited:
yield cur.val
visited.add(cur.val)
elif cur.left and cur.left.val not in visited:
stack.append(cur.left)
else:
stack.pop()
def findTarget(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: bool
"""
if not root:
return False
iterAsc = self.inOrderAsc(root)
iterDesc = self.inOrderDesc(root)
curAsc = iterAsc.next()
curDesc = iterDesc.next()
while curAsc != curDesc:
s = curAsc + curDesc
if s < k:
curAsc = iterAsc.next()
elif s > k:
curDesc = iterDesc.next()
else:
return True
else:
return False
|
|
5bcadebe3f5a1ae3e6e15580ac741660e929d642
|
tests/test_rmap.py
|
tests/test_rmap.py
|
from skrt.utils import rmap
def test_list():
list_ = [1, 2, 3, 4, 5]
assert rmap(list_, lambda x: x**2, int) == [1, 4, 9, 16, 25]
def test_tuple():
tuple_ = (1, 2, 3, 4, 5)
assert rmap(tuple_, lambda x: x**2, int) == (1, 4, 9, 16, 25)
def test_set():
set_ = {1, 2, 3, 4, 5}
assert rmap(set_, lambda x: x**2, int) == {1, 4, 9, 16, 25}
def test_dict():
dict_ = {'a': 1, 'b': 2, 'c': 3}
assert rmap(dict_, lambda x: x**2, int) == {'a': 1, 'b': 4, 'c': 9}
|
Add simple tests for rmap
|
Add simple tests for rmap
|
Python
|
mit
|
nvander1/skrt
|
Add simple tests for rmap
|
from skrt.utils import rmap
def test_list():
list_ = [1, 2, 3, 4, 5]
assert rmap(list_, lambda x: x**2, int) == [1, 4, 9, 16, 25]
def test_tuple():
tuple_ = (1, 2, 3, 4, 5)
assert rmap(tuple_, lambda x: x**2, int) == (1, 4, 9, 16, 25)
def test_set():
set_ = {1, 2, 3, 4, 5}
assert rmap(set_, lambda x: x**2, int) == {1, 4, 9, 16, 25}
def test_dict():
dict_ = {'a': 1, 'b': 2, 'c': 3}
assert rmap(dict_, lambda x: x**2, int) == {'a': 1, 'b': 4, 'c': 9}
|
<commit_before><commit_msg>Add simple tests for rmap<commit_after>
|
from skrt.utils import rmap
def test_list():
list_ = [1, 2, 3, 4, 5]
assert rmap(list_, lambda x: x**2, int) == [1, 4, 9, 16, 25]
def test_tuple():
tuple_ = (1, 2, 3, 4, 5)
assert rmap(tuple_, lambda x: x**2, int) == (1, 4, 9, 16, 25)
def test_set():
set_ = {1, 2, 3, 4, 5}
assert rmap(set_, lambda x: x**2, int) == {1, 4, 9, 16, 25}
def test_dict():
dict_ = {'a': 1, 'b': 2, 'c': 3}
assert rmap(dict_, lambda x: x**2, int) == {'a': 1, 'b': 4, 'c': 9}
|
Add simple tests for rmapfrom skrt.utils import rmap
def test_list():
list_ = [1, 2, 3, 4, 5]
assert rmap(list_, lambda x: x**2, int) == [1, 4, 9, 16, 25]
def test_tuple():
tuple_ = (1, 2, 3, 4, 5)
assert rmap(tuple_, lambda x: x**2, int) == (1, 4, 9, 16, 25)
def test_set():
set_ = {1, 2, 3, 4, 5}
assert rmap(set_, lambda x: x**2, int) == {1, 4, 9, 16, 25}
def test_dict():
dict_ = {'a': 1, 'b': 2, 'c': 3}
assert rmap(dict_, lambda x: x**2, int) == {'a': 1, 'b': 4, 'c': 9}
|
<commit_before><commit_msg>Add simple tests for rmap<commit_after>from skrt.utils import rmap
def test_list():
list_ = [1, 2, 3, 4, 5]
assert rmap(list_, lambda x: x**2, int) == [1, 4, 9, 16, 25]
def test_tuple():
tuple_ = (1, 2, 3, 4, 5)
assert rmap(tuple_, lambda x: x**2, int) == (1, 4, 9, 16, 25)
def test_set():
set_ = {1, 2, 3, 4, 5}
assert rmap(set_, lambda x: x**2, int) == {1, 4, 9, 16, 25}
def test_dict():
dict_ = {'a': 1, 'b': 2, 'c': 3}
assert rmap(dict_, lambda x: x**2, int) == {'a': 1, 'b': 4, 'c': 9}
|
|
2f83733b2f9526b9d81a5b088927218f78ee442b
|
tests/jobs/test_update_sample.py
|
tests/jobs/test_update_sample.py
|
import pytest
import virtool.jobs.create_sample
@pytest.fixture
def test_update_sample_job(mocker, tmpdir, loop, request, dbi, dbs, test_db_connection_string, test_db_name):
tmpdir.mkdir("samples")
tmpdir.mkdir("logs").mkdir("jobs")
settings = {
"data_path": str(tmpdir),
"db_name": test_db_name,
"create_sample_proc": 6
}
q = mocker.Mock()
job = virtool.jobs.create_sample.Job(
test_db_connection_string,
test_db_name,
settings,
"foobar",
q
)
dbs.jobs.insert_one({
"_id": "foobar",
"task": "update_sample",
"args": {
"sample_id": "baz",
"files": [
{
"id": "foo.fq.gz",
"replacement": {
"id": "foo_replacement.fq.gz"
}
}
]
},
"proc": 2,
"mem": 4
})
dbs.samples.insert_one({
"_id": "baz",
"paired": False
})
job.init_db()
return job
def test_check_db(mocker, test_update_sample_job):
expected = {
"id": "foo",
"name": "Bar"
}
m_get_sample_params = mocker.patch("virtool.jobs.utils.get_sample_params", return_value=expected)
test_update_sample_job.check_db()
# Make sure get_sample_params() called with correct parameters.
m_get_sample_params.assert_called_with(
test_update_sample_job.db,
test_update_sample_job.settings,
test_update_sample_job.task_args
)
# Result is set as Job.params attribute.
assert test_update_sample_job.params == expected
def test_copy_files(mocker, test_update_sample_job):
test_update_sample_job.copy_files()
m_copy_or_compress = mocker.patch("virtool.jobs.utils.copy_or_compress")
m_file_stats = mocker.patch("virtool.utils.file_stats", return_value={
"size": 12345
})
|
Add test module for update_sample job
|
Add test module for update_sample job
|
Python
|
mit
|
igboyes/virtool,virtool/virtool,virtool/virtool,igboyes/virtool
|
Add test module for update_sample job
|
import pytest
import virtool.jobs.create_sample
@pytest.fixture
def test_update_sample_job(mocker, tmpdir, loop, request, dbi, dbs, test_db_connection_string, test_db_name):
tmpdir.mkdir("samples")
tmpdir.mkdir("logs").mkdir("jobs")
settings = {
"data_path": str(tmpdir),
"db_name": test_db_name,
"create_sample_proc": 6
}
q = mocker.Mock()
job = virtool.jobs.create_sample.Job(
test_db_connection_string,
test_db_name,
settings,
"foobar",
q
)
dbs.jobs.insert_one({
"_id": "foobar",
"task": "update_sample",
"args": {
"sample_id": "baz",
"files": [
{
"id": "foo.fq.gz",
"replacement": {
"id": "foo_replacement.fq.gz"
}
}
]
},
"proc": 2,
"mem": 4
})
dbs.samples.insert_one({
"_id": "baz",
"paired": False
})
job.init_db()
return job
def test_check_db(mocker, test_update_sample_job):
expected = {
"id": "foo",
"name": "Bar"
}
m_get_sample_params = mocker.patch("virtool.jobs.utils.get_sample_params", return_value=expected)
test_update_sample_job.check_db()
# Make sure get_sample_params() called with correct parameters.
m_get_sample_params.assert_called_with(
test_update_sample_job.db,
test_update_sample_job.settings,
test_update_sample_job.task_args
)
# Result is set as Job.params attribute.
assert test_update_sample_job.params == expected
def test_copy_files(mocker, test_update_sample_job):
test_update_sample_job.copy_files()
m_copy_or_compress = mocker.patch("virtool.jobs.utils.copy_or_compress")
m_file_stats = mocker.patch("virtool.utils.file_stats", return_value={
"size": 12345
})
|
<commit_before><commit_msg>Add test module for update_sample job<commit_after>
|
import pytest
import virtool.jobs.create_sample
@pytest.fixture
def test_update_sample_job(mocker, tmpdir, loop, request, dbi, dbs, test_db_connection_string, test_db_name):
tmpdir.mkdir("samples")
tmpdir.mkdir("logs").mkdir("jobs")
settings = {
"data_path": str(tmpdir),
"db_name": test_db_name,
"create_sample_proc": 6
}
q = mocker.Mock()
job = virtool.jobs.create_sample.Job(
test_db_connection_string,
test_db_name,
settings,
"foobar",
q
)
dbs.jobs.insert_one({
"_id": "foobar",
"task": "update_sample",
"args": {
"sample_id": "baz",
"files": [
{
"id": "foo.fq.gz",
"replacement": {
"id": "foo_replacement.fq.gz"
}
}
]
},
"proc": 2,
"mem": 4
})
dbs.samples.insert_one({
"_id": "baz",
"paired": False
})
job.init_db()
return job
def test_check_db(mocker, test_update_sample_job):
expected = {
"id": "foo",
"name": "Bar"
}
m_get_sample_params = mocker.patch("virtool.jobs.utils.get_sample_params", return_value=expected)
test_update_sample_job.check_db()
# Make sure get_sample_params() called with correct parameters.
m_get_sample_params.assert_called_with(
test_update_sample_job.db,
test_update_sample_job.settings,
test_update_sample_job.task_args
)
# Result is set as Job.params attribute.
assert test_update_sample_job.params == expected
def test_copy_files(mocker, test_update_sample_job):
test_update_sample_job.copy_files()
m_copy_or_compress = mocker.patch("virtool.jobs.utils.copy_or_compress")
m_file_stats = mocker.patch("virtool.utils.file_stats", return_value={
"size": 12345
})
|
Add test module for update_sample jobimport pytest
import virtool.jobs.create_sample
@pytest.fixture
def test_update_sample_job(mocker, tmpdir, loop, request, dbi, dbs, test_db_connection_string, test_db_name):
tmpdir.mkdir("samples")
tmpdir.mkdir("logs").mkdir("jobs")
settings = {
"data_path": str(tmpdir),
"db_name": test_db_name,
"create_sample_proc": 6
}
q = mocker.Mock()
job = virtool.jobs.create_sample.Job(
test_db_connection_string,
test_db_name,
settings,
"foobar",
q
)
dbs.jobs.insert_one({
"_id": "foobar",
"task": "update_sample",
"args": {
"sample_id": "baz",
"files": [
{
"id": "foo.fq.gz",
"replacement": {
"id": "foo_replacement.fq.gz"
}
}
]
},
"proc": 2,
"mem": 4
})
dbs.samples.insert_one({
"_id": "baz",
"paired": False
})
job.init_db()
return job
def test_check_db(mocker, test_update_sample_job):
expected = {
"id": "foo",
"name": "Bar"
}
m_get_sample_params = mocker.patch("virtool.jobs.utils.get_sample_params", return_value=expected)
test_update_sample_job.check_db()
# Make sure get_sample_params() called with correct parameters.
m_get_sample_params.assert_called_with(
test_update_sample_job.db,
test_update_sample_job.settings,
test_update_sample_job.task_args
)
# Result is set as Job.params attribute.
assert test_update_sample_job.params == expected
def test_copy_files(mocker, test_update_sample_job):
test_update_sample_job.copy_files()
m_copy_or_compress = mocker.patch("virtool.jobs.utils.copy_or_compress")
m_file_stats = mocker.patch("virtool.utils.file_stats", return_value={
"size": 12345
})
|
<commit_before><commit_msg>Add test module for update_sample job<commit_after>import pytest
import virtool.jobs.create_sample
@pytest.fixture
def test_update_sample_job(mocker, tmpdir, loop, request, dbi, dbs, test_db_connection_string, test_db_name):
tmpdir.mkdir("samples")
tmpdir.mkdir("logs").mkdir("jobs")
settings = {
"data_path": str(tmpdir),
"db_name": test_db_name,
"create_sample_proc": 6
}
q = mocker.Mock()
job = virtool.jobs.create_sample.Job(
test_db_connection_string,
test_db_name,
settings,
"foobar",
q
)
dbs.jobs.insert_one({
"_id": "foobar",
"task": "update_sample",
"args": {
"sample_id": "baz",
"files": [
{
"id": "foo.fq.gz",
"replacement": {
"id": "foo_replacement.fq.gz"
}
}
]
},
"proc": 2,
"mem": 4
})
dbs.samples.insert_one({
"_id": "baz",
"paired": False
})
job.init_db()
return job
def test_check_db(mocker, test_update_sample_job):
expected = {
"id": "foo",
"name": "Bar"
}
m_get_sample_params = mocker.patch("virtool.jobs.utils.get_sample_params", return_value=expected)
test_update_sample_job.check_db()
# Make sure get_sample_params() called with correct parameters.
m_get_sample_params.assert_called_with(
test_update_sample_job.db,
test_update_sample_job.settings,
test_update_sample_job.task_args
)
# Result is set as Job.params attribute.
assert test_update_sample_job.params == expected
def test_copy_files(mocker, test_update_sample_job):
test_update_sample_job.copy_files()
m_copy_or_compress = mocker.patch("virtool.jobs.utils.copy_or_compress")
m_file_stats = mocker.patch("virtool.utils.file_stats", return_value={
"size": 12345
})
|
|
80f53a346be979e96788e5d45cfe14cf5d12b22a
|
micall/utils/remove_dupe_dirs.py
|
micall/utils/remove_dupe_dirs.py
|
import shutil
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from pathlib import Path
def parse_args():
# noinspection PyTypeChecker
parser = ArgumentParser(
description='Remove folders that have already been zipped.',
formatter_class=ArgumentDefaultsHelpFormatter)
# noinspection PyTypeChecker
parser.add_argument(
'raw_data',
type=Path,
nargs='?',
default=str(Path.home() / 'data' / 'RAW_DATA'),
help='Raw data folder to scan for duplicate results folders.')
parser.add_argument('--force', '-f',
action='store_true',
help="Don't ask for confirmation before deleting.")
return parser.parse_args()
def main():
args = parse_args()
duplicate_folders = []
runs_path: Path = args.raw_data / 'MiSeq' / 'runs'
for zip_path in runs_path.glob('*/Results/*.zip'):
results_folder = zip_path.parent
original_folder = results_folder / zip_path.stem
rel_path = original_folder.relative_to(runs_path)
if original_folder.is_dir():
print(rel_path)
duplicate_folders.append(original_folder)
if not duplicate_folders:
print('No duplicates found.')
return
if not args.force:
confirmation = input(f'Are you sure you want to delete '
f'{len(duplicate_folders)} folders? Y/[N] ')
if confirmation.upper() != 'Y':
exit('Aborted.')
duplicate_folders.sort()
for original_folder in duplicate_folders:
rel_path = original_folder.relative_to(runs_path)
print(f'deleting {rel_path}...')
shutil.rmtree(original_folder)
main()
|
Add script to remove duplicate results folders.
|
Add script to remove duplicate results folders.
|
Python
|
agpl-3.0
|
cfe-lab/MiCall,cfe-lab/MiCall,cfe-lab/MiCall
|
Add script to remove duplicate results folders.
|
import shutil
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from pathlib import Path
def parse_args():
# noinspection PyTypeChecker
parser = ArgumentParser(
description='Remove folders that have already been zipped.',
formatter_class=ArgumentDefaultsHelpFormatter)
# noinspection PyTypeChecker
parser.add_argument(
'raw_data',
type=Path,
nargs='?',
default=str(Path.home() / 'data' / 'RAW_DATA'),
help='Raw data folder to scan for duplicate results folders.')
parser.add_argument('--force', '-f',
action='store_true',
help="Don't ask for confirmation before deleting.")
return parser.parse_args()
def main():
args = parse_args()
duplicate_folders = []
runs_path: Path = args.raw_data / 'MiSeq' / 'runs'
for zip_path in runs_path.glob('*/Results/*.zip'):
results_folder = zip_path.parent
original_folder = results_folder / zip_path.stem
rel_path = original_folder.relative_to(runs_path)
if original_folder.is_dir():
print(rel_path)
duplicate_folders.append(original_folder)
if not duplicate_folders:
print('No duplicates found.')
return
if not args.force:
confirmation = input(f'Are you sure you want to delete '
f'{len(duplicate_folders)} folders? Y/[N] ')
if confirmation.upper() != 'Y':
exit('Aborted.')
duplicate_folders.sort()
for original_folder in duplicate_folders:
rel_path = original_folder.relative_to(runs_path)
print(f'deleting {rel_path}...')
shutil.rmtree(original_folder)
main()
|
<commit_before><commit_msg>Add script to remove duplicate results folders.<commit_after>
|
import shutil
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from pathlib import Path
def parse_args():
# noinspection PyTypeChecker
parser = ArgumentParser(
description='Remove folders that have already been zipped.',
formatter_class=ArgumentDefaultsHelpFormatter)
# noinspection PyTypeChecker
parser.add_argument(
'raw_data',
type=Path,
nargs='?',
default=str(Path.home() / 'data' / 'RAW_DATA'),
help='Raw data folder to scan for duplicate results folders.')
parser.add_argument('--force', '-f',
action='store_true',
help="Don't ask for confirmation before deleting.")
return parser.parse_args()
def main():
args = parse_args()
duplicate_folders = []
runs_path: Path = args.raw_data / 'MiSeq' / 'runs'
for zip_path in runs_path.glob('*/Results/*.zip'):
results_folder = zip_path.parent
original_folder = results_folder / zip_path.stem
rel_path = original_folder.relative_to(runs_path)
if original_folder.is_dir():
print(rel_path)
duplicate_folders.append(original_folder)
if not duplicate_folders:
print('No duplicates found.')
return
if not args.force:
confirmation = input(f'Are you sure you want to delete '
f'{len(duplicate_folders)} folders? Y/[N] ')
if confirmation.upper() != 'Y':
exit('Aborted.')
duplicate_folders.sort()
for original_folder in duplicate_folders:
rel_path = original_folder.relative_to(runs_path)
print(f'deleting {rel_path}...')
shutil.rmtree(original_folder)
main()
|
Add script to remove duplicate results folders.import shutil
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from pathlib import Path
def parse_args():
# noinspection PyTypeChecker
parser = ArgumentParser(
description='Remove folders that have already been zipped.',
formatter_class=ArgumentDefaultsHelpFormatter)
# noinspection PyTypeChecker
parser.add_argument(
'raw_data',
type=Path,
nargs='?',
default=str(Path.home() / 'data' / 'RAW_DATA'),
help='Raw data folder to scan for duplicate results folders.')
parser.add_argument('--force', '-f',
action='store_true',
help="Don't ask for confirmation before deleting.")
return parser.parse_args()
def main():
args = parse_args()
duplicate_folders = []
runs_path: Path = args.raw_data / 'MiSeq' / 'runs'
for zip_path in runs_path.glob('*/Results/*.zip'):
results_folder = zip_path.parent
original_folder = results_folder / zip_path.stem
rel_path = original_folder.relative_to(runs_path)
if original_folder.is_dir():
print(rel_path)
duplicate_folders.append(original_folder)
if not duplicate_folders:
print('No duplicates found.')
return
if not args.force:
confirmation = input(f'Are you sure you want to delete '
f'{len(duplicate_folders)} folders? Y/[N] ')
if confirmation.upper() != 'Y':
exit('Aborted.')
duplicate_folders.sort()
for original_folder in duplicate_folders:
rel_path = original_folder.relative_to(runs_path)
print(f'deleting {rel_path}...')
shutil.rmtree(original_folder)
main()
|
<commit_before><commit_msg>Add script to remove duplicate results folders.<commit_after>import shutil
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from pathlib import Path
def parse_args():
# noinspection PyTypeChecker
parser = ArgumentParser(
description='Remove folders that have already been zipped.',
formatter_class=ArgumentDefaultsHelpFormatter)
# noinspection PyTypeChecker
parser.add_argument(
'raw_data',
type=Path,
nargs='?',
default=str(Path.home() / 'data' / 'RAW_DATA'),
help='Raw data folder to scan for duplicate results folders.')
parser.add_argument('--force', '-f',
action='store_true',
help="Don't ask for confirmation before deleting.")
return parser.parse_args()
def main():
args = parse_args()
duplicate_folders = []
runs_path: Path = args.raw_data / 'MiSeq' / 'runs'
for zip_path in runs_path.glob('*/Results/*.zip'):
results_folder = zip_path.parent
original_folder = results_folder / zip_path.stem
rel_path = original_folder.relative_to(runs_path)
if original_folder.is_dir():
print(rel_path)
duplicate_folders.append(original_folder)
if not duplicate_folders:
print('No duplicates found.')
return
if not args.force:
confirmation = input(f'Are you sure you want to delete '
f'{len(duplicate_folders)} folders? Y/[N] ')
if confirmation.upper() != 'Y':
exit('Aborted.')
duplicate_folders.sort()
for original_folder in duplicate_folders:
rel_path = original_folder.relative_to(runs_path)
print(f'deleting {rel_path}...')
shutil.rmtree(original_folder)
main()
|
|
f1caaf8ed8c11e00342fc45472f2f723a923fa04
|
test/test_notification.py
|
test/test_notification.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import pytest
from pytest_girder.assertions import assertStatus, assertStatusOk
from girder.models.notification import Notification
@pytest.fixture
def notifications(user):
model = Notification()
doc1 = model.createNotification('type', {}, user)
doc1['updated'] = 1
doc1['time'] = 1
model.save(doc1)
doc2 = model.createNotification('type', {}, user)
yield [doc1, doc2]
model.remove(doc1)
model.remove(doc2)
def testListAllNotifications(server, user, notifications):
resp = server.request(path='/notification', user=user)
assertStatusOk(resp)
assert {m['_id'] for m in resp.json} == {str(m['_id']) for m in notifications}
def testListNotificationsSinceTime(server, user, notifications):
resp = server.request(path='/notification', user=user, params={'since': 10})
assertStatusOk(resp)
assert {m['_id'] for m in resp.json} == {str(notifications[-1]['_id'])}
def testListNotificationsAuthError(server, notifications):
resp = server.request(path='/notification')
assertStatus(resp, 401)
|
Add a test to the notification list endpoint
|
Add a test to the notification list endpoint
|
Python
|
apache-2.0
|
RafaelPalomar/girder,manthey/girder,RafaelPalomar/girder,data-exp-lab/girder,data-exp-lab/girder,girder/girder,data-exp-lab/girder,RafaelPalomar/girder,RafaelPalomar/girder,girder/girder,data-exp-lab/girder,data-exp-lab/girder,jbeezley/girder,Kitware/girder,manthey/girder,girder/girder,Kitware/girder,kotfic/girder,RafaelPalomar/girder,manthey/girder,kotfic/girder,manthey/girder,Kitware/girder,jbeezley/girder,girder/girder,jbeezley/girder,Kitware/girder,kotfic/girder,kotfic/girder,kotfic/girder,jbeezley/girder
|
Add a test to the notification list endpoint
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import pytest
from pytest_girder.assertions import assertStatus, assertStatusOk
from girder.models.notification import Notification
@pytest.fixture
def notifications(user):
model = Notification()
doc1 = model.createNotification('type', {}, user)
doc1['updated'] = 1
doc1['time'] = 1
model.save(doc1)
doc2 = model.createNotification('type', {}, user)
yield [doc1, doc2]
model.remove(doc1)
model.remove(doc2)
def testListAllNotifications(server, user, notifications):
resp = server.request(path='/notification', user=user)
assertStatusOk(resp)
assert {m['_id'] for m in resp.json} == {str(m['_id']) for m in notifications}
def testListNotificationsSinceTime(server, user, notifications):
resp = server.request(path='/notification', user=user, params={'since': 10})
assertStatusOk(resp)
assert {m['_id'] for m in resp.json} == {str(notifications[-1]['_id'])}
def testListNotificationsAuthError(server, notifications):
resp = server.request(path='/notification')
assertStatus(resp, 401)
|
<commit_before><commit_msg>Add a test to the notification list endpoint<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import pytest
from pytest_girder.assertions import assertStatus, assertStatusOk
from girder.models.notification import Notification
@pytest.fixture
def notifications(user):
model = Notification()
doc1 = model.createNotification('type', {}, user)
doc1['updated'] = 1
doc1['time'] = 1
model.save(doc1)
doc2 = model.createNotification('type', {}, user)
yield [doc1, doc2]
model.remove(doc1)
model.remove(doc2)
def testListAllNotifications(server, user, notifications):
resp = server.request(path='/notification', user=user)
assertStatusOk(resp)
assert {m['_id'] for m in resp.json} == {str(m['_id']) for m in notifications}
def testListNotificationsSinceTime(server, user, notifications):
resp = server.request(path='/notification', user=user, params={'since': 10})
assertStatusOk(resp)
assert {m['_id'] for m in resp.json} == {str(notifications[-1]['_id'])}
def testListNotificationsAuthError(server, notifications):
resp = server.request(path='/notification')
assertStatus(resp, 401)
|
Add a test to the notification list endpoint#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import pytest
from pytest_girder.assertions import assertStatus, assertStatusOk
from girder.models.notification import Notification
@pytest.fixture
def notifications(user):
model = Notification()
doc1 = model.createNotification('type', {}, user)
doc1['updated'] = 1
doc1['time'] = 1
model.save(doc1)
doc2 = model.createNotification('type', {}, user)
yield [doc1, doc2]
model.remove(doc1)
model.remove(doc2)
def testListAllNotifications(server, user, notifications):
resp = server.request(path='/notification', user=user)
assertStatusOk(resp)
assert {m['_id'] for m in resp.json} == {str(m['_id']) for m in notifications}
def testListNotificationsSinceTime(server, user, notifications):
resp = server.request(path='/notification', user=user, params={'since': 10})
assertStatusOk(resp)
assert {m['_id'] for m in resp.json} == {str(notifications[-1]['_id'])}
def testListNotificationsAuthError(server, notifications):
resp = server.request(path='/notification')
assertStatus(resp, 401)
|
<commit_before><commit_msg>Add a test to the notification list endpoint<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import pytest
from pytest_girder.assertions import assertStatus, assertStatusOk
from girder.models.notification import Notification
@pytest.fixture
def notifications(user):
model = Notification()
doc1 = model.createNotification('type', {}, user)
doc1['updated'] = 1
doc1['time'] = 1
model.save(doc1)
doc2 = model.createNotification('type', {}, user)
yield [doc1, doc2]
model.remove(doc1)
model.remove(doc2)
def testListAllNotifications(server, user, notifications):
resp = server.request(path='/notification', user=user)
assertStatusOk(resp)
assert {m['_id'] for m in resp.json} == {str(m['_id']) for m in notifications}
def testListNotificationsSinceTime(server, user, notifications):
resp = server.request(path='/notification', user=user, params={'since': 10})
assertStatusOk(resp)
assert {m['_id'] for m in resp.json} == {str(notifications[-1]['_id'])}
def testListNotificationsAuthError(server, notifications):
resp = server.request(path='/notification')
assertStatus(resp, 401)
|
|
484636805602348c883d8dc775082169f97cce76
|
crawler/management/commands/similar_apps_category_counter.py
|
crawler/management/commands/similar_apps_category_counter.py
|
import logging.config
from operator import or_
from django.core.management.base import BaseCommand
from crawler.models import *
logger = logging.getLogger('crawler.command')
class Command(BaseCommand):
help = 'Generate comparison between google similar app and ours'
def handle(self, *args, **options):
result_dict = dict()
similar_apps = self.get_my_similar()
for similar_app in similar_apps:
app = App.objects.filter(package_name=similar_app)
category = app.category_name()
if category not in result_dict:
count = 0
else:
count = result_dict[category]
result_dict[category] = count + 1
admin_file = open('similar_apps_category.csv', 'w')
admin_file.write('category;count')
for key in result_dict:
admin_file.write('{};{}\n'.format(key, result_dict[key]))
admin_file.close()
self.stdout.write(
self.style.SUCCESS('Finished category counter')
)
@staticmethod
def get_my_similar():
apps = SimilarApp.objects.order_by().values_list('source_package', flat=True).distinct()
similar_apps = SimilarApp.objects.order_by().values_list('similar_package', flat=True).distinct()
app_set = set(apps)
similar_set = set(similar_apps)
merged_set = reduce(or_, [app_set, similar_set])
return merged_set
|
Create similar category counter command
|
Create similar category counter command
|
Python
|
apache-2.0
|
bkosawa/admin-recommendation
|
Create similar category counter command
|
import logging.config
from operator import or_
from django.core.management.base import BaseCommand
from crawler.models import *
logger = logging.getLogger('crawler.command')
class Command(BaseCommand):
help = 'Generate comparison between google similar app and ours'
def handle(self, *args, **options):
result_dict = dict()
similar_apps = self.get_my_similar()
for similar_app in similar_apps:
app = App.objects.filter(package_name=similar_app)
category = app.category_name()
if category not in result_dict:
count = 0
else:
count = result_dict[category]
result_dict[category] = count + 1
admin_file = open('similar_apps_category.csv', 'w')
admin_file.write('category;count')
for key in result_dict:
admin_file.write('{};{}\n'.format(key, result_dict[key]))
admin_file.close()
self.stdout.write(
self.style.SUCCESS('Finished category counter')
)
@staticmethod
def get_my_similar():
apps = SimilarApp.objects.order_by().values_list('source_package', flat=True).distinct()
similar_apps = SimilarApp.objects.order_by().values_list('similar_package', flat=True).distinct()
app_set = set(apps)
similar_set = set(similar_apps)
merged_set = reduce(or_, [app_set, similar_set])
return merged_set
|
<commit_before><commit_msg>Create similar category counter command<commit_after>
|
import logging.config
from operator import or_
from django.core.management.base import BaseCommand
from crawler.models import *
logger = logging.getLogger('crawler.command')
class Command(BaseCommand):
help = 'Generate comparison between google similar app and ours'
def handle(self, *args, **options):
result_dict = dict()
similar_apps = self.get_my_similar()
for similar_app in similar_apps:
app = App.objects.filter(package_name=similar_app)
category = app.category_name()
if category not in result_dict:
count = 0
else:
count = result_dict[category]
result_dict[category] = count + 1
admin_file = open('similar_apps_category.csv', 'w')
admin_file.write('category;count')
for key in result_dict:
admin_file.write('{};{}\n'.format(key, result_dict[key]))
admin_file.close()
self.stdout.write(
self.style.SUCCESS('Finished category counter')
)
@staticmethod
def get_my_similar():
apps = SimilarApp.objects.order_by().values_list('source_package', flat=True).distinct()
similar_apps = SimilarApp.objects.order_by().values_list('similar_package', flat=True).distinct()
app_set = set(apps)
similar_set = set(similar_apps)
merged_set = reduce(or_, [app_set, similar_set])
return merged_set
|
Create similar category counter commandimport logging.config
from operator import or_
from django.core.management.base import BaseCommand
from crawler.models import *
logger = logging.getLogger('crawler.command')
class Command(BaseCommand):
help = 'Generate comparison between google similar app and ours'
def handle(self, *args, **options):
result_dict = dict()
similar_apps = self.get_my_similar()
for similar_app in similar_apps:
app = App.objects.filter(package_name=similar_app)
category = app.category_name()
if category not in result_dict:
count = 0
else:
count = result_dict[category]
result_dict[category] = count + 1
admin_file = open('similar_apps_category.csv', 'w')
admin_file.write('category;count')
for key in result_dict:
admin_file.write('{};{}\n'.format(key, result_dict[key]))
admin_file.close()
self.stdout.write(
self.style.SUCCESS('Finished category counter')
)
@staticmethod
def get_my_similar():
apps = SimilarApp.objects.order_by().values_list('source_package', flat=True).distinct()
similar_apps = SimilarApp.objects.order_by().values_list('similar_package', flat=True).distinct()
app_set = set(apps)
similar_set = set(similar_apps)
merged_set = reduce(or_, [app_set, similar_set])
return merged_set
|
<commit_before><commit_msg>Create similar category counter command<commit_after>import logging.config
from operator import or_
from django.core.management.base import BaseCommand
from crawler.models import *
logger = logging.getLogger('crawler.command')
class Command(BaseCommand):
help = 'Generate comparison between google similar app and ours'
def handle(self, *args, **options):
result_dict = dict()
similar_apps = self.get_my_similar()
for similar_app in similar_apps:
app = App.objects.filter(package_name=similar_app)
category = app.category_name()
if category not in result_dict:
count = 0
else:
count = result_dict[category]
result_dict[category] = count + 1
admin_file = open('similar_apps_category.csv', 'w')
admin_file.write('category;count')
for key in result_dict:
admin_file.write('{};{}\n'.format(key, result_dict[key]))
admin_file.close()
self.stdout.write(
self.style.SUCCESS('Finished category counter')
)
@staticmethod
def get_my_similar():
apps = SimilarApp.objects.order_by().values_list('source_package', flat=True).distinct()
similar_apps = SimilarApp.objects.order_by().values_list('similar_package', flat=True).distinct()
app_set = set(apps)
similar_set = set(similar_apps)
merged_set = reduce(or_, [app_set, similar_set])
return merged_set
|
|
f15b886db255e2fd91e98c3a03e9f6200ebb1bf4
|
twisted/plugins/txircd_plugin.py
|
twisted/plugins/txircd_plugin.py
|
from twisted.application.service import IServiceMaker
from twisted.plugin import IPlugin
from twisted.python import usage
from zope.interface import implements
class Options(usage.Options):
# If we ever start having options, don't forget to put them here
pass
class IRCdServiceMaker(object):
implements(IServiceMaker, IPlugin)
tapname = "txircd"
description = "Twisted IRC Server"
options = Options
def makeService(self, options):
# Return a service from here once I make one
pass
txircd = IRCdServiceMaker()
|
Add framework for new Twisted plugin to be filled in as functionality is actually implemented
|
Add framework for new Twisted plugin to be filled in as functionality is actually implemented
|
Python
|
bsd-3-clause
|
ElementalAlchemist/txircd,Heufneutje/txircd
|
Add framework for new Twisted plugin to be filled in as functionality is actually implemented
|
from twisted.application.service import IServiceMaker
from twisted.plugin import IPlugin
from twisted.python import usage
from zope.interface import implements
class Options(usage.Options):
# If we ever start having options, don't forget to put them here
pass
class IRCdServiceMaker(object):
implements(IServiceMaker, IPlugin)
tapname = "txircd"
description = "Twisted IRC Server"
options = Options
def makeService(self, options):
# Return a service from here once I make one
pass
txircd = IRCdServiceMaker()
|
<commit_before><commit_msg>Add framework for new Twisted plugin to be filled in as functionality is actually implemented<commit_after>
|
from twisted.application.service import IServiceMaker
from twisted.plugin import IPlugin
from twisted.python import usage
from zope.interface import implements
class Options(usage.Options):
# If we ever start having options, don't forget to put them here
pass
class IRCdServiceMaker(object):
implements(IServiceMaker, IPlugin)
tapname = "txircd"
description = "Twisted IRC Server"
options = Options
def makeService(self, options):
# Return a service from here once I make one
pass
txircd = IRCdServiceMaker()
|
Add framework for new Twisted plugin to be filled in as functionality is actually implementedfrom twisted.application.service import IServiceMaker
from twisted.plugin import IPlugin
from twisted.python import usage
from zope.interface import implements
class Options(usage.Options):
# If we ever start having options, don't forget to put them here
pass
class IRCdServiceMaker(object):
implements(IServiceMaker, IPlugin)
tapname = "txircd"
description = "Twisted IRC Server"
options = Options
def makeService(self, options):
# Return a service from here once I make one
pass
txircd = IRCdServiceMaker()
|
<commit_before><commit_msg>Add framework for new Twisted plugin to be filled in as functionality is actually implemented<commit_after>from twisted.application.service import IServiceMaker
from twisted.plugin import IPlugin
from twisted.python import usage
from zope.interface import implements
class Options(usage.Options):
# If we ever start having options, don't forget to put them here
pass
class IRCdServiceMaker(object):
implements(IServiceMaker, IPlugin)
tapname = "txircd"
description = "Twisted IRC Server"
options = Options
def makeService(self, options):
# Return a service from here once I make one
pass
txircd = IRCdServiceMaker()
|
|
29f0f543f93f0f3c9df3bf823f43c616199f8d4f
|
tests/helpers/test_net.py
|
tests/helpers/test_net.py
|
import unittest
from pycroft.helpers import net
class IpRegexTestCase(unittest.TestCase):
def test_ip_regex(self):
regex = net.ip_regex
self.assertTrue(regex.match("141.30.228.39"))
self.assertFalse(regex.match("141.3330.228.39"))
self.assertFalse(regex.match("141.3330.228.39."))
self.assertFalse(regex.match("ddddddd"))
|
Add simple test for IP regex
|
Add simple test for IP regex
Fixes #360
|
Python
|
apache-2.0
|
agdsn/pycroft,agdsn/pycroft,agdsn/pycroft,agdsn/pycroft,agdsn/pycroft
|
Add simple test for IP regex
Fixes #360
|
import unittest
from pycroft.helpers import net
class IpRegexTestCase(unittest.TestCase):
def test_ip_regex(self):
regex = net.ip_regex
self.assertTrue(regex.match("141.30.228.39"))
self.assertFalse(regex.match("141.3330.228.39"))
self.assertFalse(regex.match("141.3330.228.39."))
self.assertFalse(regex.match("ddddddd"))
|
<commit_before><commit_msg>Add simple test for IP regex
Fixes #360<commit_after>
|
import unittest
from pycroft.helpers import net
class IpRegexTestCase(unittest.TestCase):
def test_ip_regex(self):
regex = net.ip_regex
self.assertTrue(regex.match("141.30.228.39"))
self.assertFalse(regex.match("141.3330.228.39"))
self.assertFalse(regex.match("141.3330.228.39."))
self.assertFalse(regex.match("ddddddd"))
|
Add simple test for IP regex
Fixes #360import unittest
from pycroft.helpers import net
class IpRegexTestCase(unittest.TestCase):
def test_ip_regex(self):
regex = net.ip_regex
self.assertTrue(regex.match("141.30.228.39"))
self.assertFalse(regex.match("141.3330.228.39"))
self.assertFalse(regex.match("141.3330.228.39."))
self.assertFalse(regex.match("ddddddd"))
|
<commit_before><commit_msg>Add simple test for IP regex
Fixes #360<commit_after>import unittest
from pycroft.helpers import net
class IpRegexTestCase(unittest.TestCase):
def test_ip_regex(self):
regex = net.ip_regex
self.assertTrue(regex.match("141.30.228.39"))
self.assertFalse(regex.match("141.3330.228.39"))
self.assertFalse(regex.match("141.3330.228.39."))
self.assertFalse(regex.match("ddddddd"))
|
|
828e79b9b788b232ebffe99cc69257494b8e1dda
|
ci/utils.py
|
ci/utils.py
|
#!/usr/bin/python
# Copyright (C) 2017 Kubos Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
module_json = 'module.json'
# project_file = os.path.join(this_dir, 'projects.json')
class KubosUtils(object):
def __init__(self):
self.ignore_list = ['.git']
self.search_depth = 3
self.this_dir = os.path.dirname(__file__)
self.kubos_root = os.path.abspath(os.path.join(self.this_dir, '..'))
self.module_index = {}
self.discover_kubos_modules()
def discover_modules_rec(self, path, depth=1):
for thing in os.listdir(path):
if depth > self.search_depth:
return
thing_path = os.path.join(path, thing)
if thing == module_json:
module_name = self.get_module_name(thing_path)
if module_name is not None:
self.module_index[module_name] = path
return
elif os.path.isdir(thing_path):
self.discover_modules_rec(thing_path, depth=depth+1)
def get_module_name(self, path):
if not os.path.isfile(path):
return None
with open(path, 'r') as _file:
data = json.loads(_file.read())
if 'name' in data:
return data['name']
else:
return None
def discover_kubos_modules(self):
self.discover_modules_rec(self.kubos_root)
if __name__ == '__main__':
util = KubosUtils()
|
Add new utility base file for module discovery
|
Add new utility base file for module discovery
|
Python
|
apache-2.0
|
kubostech/KubOS,Psykar/kubos,Psykar/kubos,Psykar/kubos,Psykar/kubos,Psykar/kubos,Psykar/kubos,kubostech/KubOS,Psykar/kubos
|
Add new utility base file for module discovery
|
#!/usr/bin/python
# Copyright (C) 2017 Kubos Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
module_json = 'module.json'
# project_file = os.path.join(this_dir, 'projects.json')
class KubosUtils(object):
def __init__(self):
self.ignore_list = ['.git']
self.search_depth = 3
self.this_dir = os.path.dirname(__file__)
self.kubos_root = os.path.abspath(os.path.join(self.this_dir, '..'))
self.module_index = {}
self.discover_kubos_modules()
def discover_modules_rec(self, path, depth=1):
for thing in os.listdir(path):
if depth > self.search_depth:
return
thing_path = os.path.join(path, thing)
if thing == module_json:
module_name = self.get_module_name(thing_path)
if module_name is not None:
self.module_index[module_name] = path
return
elif os.path.isdir(thing_path):
self.discover_modules_rec(thing_path, depth=depth+1)
def get_module_name(self, path):
if not os.path.isfile(path):
return None
with open(path, 'r') as _file:
data = json.loads(_file.read())
if 'name' in data:
return data['name']
else:
return None
def discover_kubos_modules(self):
self.discover_modules_rec(self.kubos_root)
if __name__ == '__main__':
util = KubosUtils()
|
<commit_before><commit_msg>Add new utility base file for module discovery<commit_after>
|
#!/usr/bin/python
# Copyright (C) 2017 Kubos Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
module_json = 'module.json'
# project_file = os.path.join(this_dir, 'projects.json')
class KubosUtils(object):
def __init__(self):
self.ignore_list = ['.git']
self.search_depth = 3
self.this_dir = os.path.dirname(__file__)
self.kubos_root = os.path.abspath(os.path.join(self.this_dir, '..'))
self.module_index = {}
self.discover_kubos_modules()
def discover_modules_rec(self, path, depth=1):
for thing in os.listdir(path):
if depth > self.search_depth:
return
thing_path = os.path.join(path, thing)
if thing == module_json:
module_name = self.get_module_name(thing_path)
if module_name is not None:
self.module_index[module_name] = path
return
elif os.path.isdir(thing_path):
self.discover_modules_rec(thing_path, depth=depth+1)
def get_module_name(self, path):
if not os.path.isfile(path):
return None
with open(path, 'r') as _file:
data = json.loads(_file.read())
if 'name' in data:
return data['name']
else:
return None
def discover_kubos_modules(self):
self.discover_modules_rec(self.kubos_root)
if __name__ == '__main__':
util = KubosUtils()
|
Add new utility base file for module discovery#!/usr/bin/python
# Copyright (C) 2017 Kubos Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
module_json = 'module.json'
# project_file = os.path.join(this_dir, 'projects.json')
class KubosUtils(object):
def __init__(self):
self.ignore_list = ['.git']
self.search_depth = 3
self.this_dir = os.path.dirname(__file__)
self.kubos_root = os.path.abspath(os.path.join(self.this_dir, '..'))
self.module_index = {}
self.discover_kubos_modules()
def discover_modules_rec(self, path, depth=1):
for thing in os.listdir(path):
if depth > self.search_depth:
return
thing_path = os.path.join(path, thing)
if thing == module_json:
module_name = self.get_module_name(thing_path)
if module_name is not None:
self.module_index[module_name] = path
return
elif os.path.isdir(thing_path):
self.discover_modules_rec(thing_path, depth=depth+1)
def get_module_name(self, path):
if not os.path.isfile(path):
return None
with open(path, 'r') as _file:
data = json.loads(_file.read())
if 'name' in data:
return data['name']
else:
return None
def discover_kubos_modules(self):
self.discover_modules_rec(self.kubos_root)
if __name__ == '__main__':
util = KubosUtils()
|
<commit_before><commit_msg>Add new utility base file for module discovery<commit_after>#!/usr/bin/python
# Copyright (C) 2017 Kubos Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
module_json = 'module.json'
# project_file = os.path.join(this_dir, 'projects.json')
class KubosUtils(object):
def __init__(self):
self.ignore_list = ['.git']
self.search_depth = 3
self.this_dir = os.path.dirname(__file__)
self.kubos_root = os.path.abspath(os.path.join(self.this_dir, '..'))
self.module_index = {}
self.discover_kubos_modules()
def discover_modules_rec(self, path, depth=1):
for thing in os.listdir(path):
if depth > self.search_depth:
return
thing_path = os.path.join(path, thing)
if thing == module_json:
module_name = self.get_module_name(thing_path)
if module_name is not None:
self.module_index[module_name] = path
return
elif os.path.isdir(thing_path):
self.discover_modules_rec(thing_path, depth=depth+1)
def get_module_name(self, path):
if not os.path.isfile(path):
return None
with open(path, 'r') as _file:
data = json.loads(_file.read())
if 'name' in data:
return data['name']
else:
return None
def discover_kubos_modules(self):
self.discover_modules_rec(self.kubos_root)
if __name__ == '__main__':
util = KubosUtils()
|
|
8b85b1a46eade948a8c962b928a9cdd52da7a643
|
test/test_datahandler.py
|
test/test_datahandler.py
|
import sys
sys.path.append('..')
import unittest
from datahandler import data_handler_factory
from sweep import Sweep
class DataHandlerTestCase(unittest.TestCase):
def setUp(self):
path = 'C:/Dropbox/PhD/sandbox_phd/FolderBrowser/data/2016-09-19#015'
self.sweep = Sweep(path)
data = self.sweep.data
x = data['mL']
y = data['mR']
self.data_h = data_handler_factory(x, y)
def test_data_validity(self):
self.assertTrue(self.data_h.data_is_valid)
def test_imshow_eligible(self):
self.assertTrue(self.data_h.imshow_eligible)
def test_data_is_linear(self):
self.assertTrue(self.data_h.data_is_linear[0])
self.assertTrue(self.data_h.data_is_linear[1])
def test_data_is_linear_on_axis(self):
self.assertEqual(self.data_h.lin_axis_for_data[1], 0)
self.assertEqual(self.data_h.lin_axis_for_data[0], 1)
def test_not_reversed(self):
for i in (0,1):
elems_are_equal = self.data_h.data[i] == self.data_h.tdata[i]
self.assertTrue(elems_are_equal.all())
if __name__=='__main__':
unittest.main()
|
Add some tests for DataHandler
|
Add some tests for DataHandler
|
Python
|
mit
|
mchels/FolderBrowser
|
Add some tests for DataHandler
|
import sys
sys.path.append('..')
import unittest
from datahandler import data_handler_factory
from sweep import Sweep
class DataHandlerTestCase(unittest.TestCase):
def setUp(self):
path = 'C:/Dropbox/PhD/sandbox_phd/FolderBrowser/data/2016-09-19#015'
self.sweep = Sweep(path)
data = self.sweep.data
x = data['mL']
y = data['mR']
self.data_h = data_handler_factory(x, y)
def test_data_validity(self):
self.assertTrue(self.data_h.data_is_valid)
def test_imshow_eligible(self):
self.assertTrue(self.data_h.imshow_eligible)
def test_data_is_linear(self):
self.assertTrue(self.data_h.data_is_linear[0])
self.assertTrue(self.data_h.data_is_linear[1])
def test_data_is_linear_on_axis(self):
self.assertEqual(self.data_h.lin_axis_for_data[1], 0)
self.assertEqual(self.data_h.lin_axis_for_data[0], 1)
def test_not_reversed(self):
for i in (0,1):
elems_are_equal = self.data_h.data[i] == self.data_h.tdata[i]
self.assertTrue(elems_are_equal.all())
if __name__=='__main__':
unittest.main()
|
<commit_before><commit_msg>Add some tests for DataHandler<commit_after>
|
import sys
sys.path.append('..')
import unittest
from datahandler import data_handler_factory
from sweep import Sweep
class DataHandlerTestCase(unittest.TestCase):
def setUp(self):
path = 'C:/Dropbox/PhD/sandbox_phd/FolderBrowser/data/2016-09-19#015'
self.sweep = Sweep(path)
data = self.sweep.data
x = data['mL']
y = data['mR']
self.data_h = data_handler_factory(x, y)
def test_data_validity(self):
self.assertTrue(self.data_h.data_is_valid)
def test_imshow_eligible(self):
self.assertTrue(self.data_h.imshow_eligible)
def test_data_is_linear(self):
self.assertTrue(self.data_h.data_is_linear[0])
self.assertTrue(self.data_h.data_is_linear[1])
def test_data_is_linear_on_axis(self):
self.assertEqual(self.data_h.lin_axis_for_data[1], 0)
self.assertEqual(self.data_h.lin_axis_for_data[0], 1)
def test_not_reversed(self):
for i in (0,1):
elems_are_equal = self.data_h.data[i] == self.data_h.tdata[i]
self.assertTrue(elems_are_equal.all())
if __name__=='__main__':
unittest.main()
|
Add some tests for DataHandlerimport sys
sys.path.append('..')
import unittest
from datahandler import data_handler_factory
from sweep import Sweep
class DataHandlerTestCase(unittest.TestCase):
def setUp(self):
path = 'C:/Dropbox/PhD/sandbox_phd/FolderBrowser/data/2016-09-19#015'
self.sweep = Sweep(path)
data = self.sweep.data
x = data['mL']
y = data['mR']
self.data_h = data_handler_factory(x, y)
def test_data_validity(self):
self.assertTrue(self.data_h.data_is_valid)
def test_imshow_eligible(self):
self.assertTrue(self.data_h.imshow_eligible)
def test_data_is_linear(self):
self.assertTrue(self.data_h.data_is_linear[0])
self.assertTrue(self.data_h.data_is_linear[1])
def test_data_is_linear_on_axis(self):
self.assertEqual(self.data_h.lin_axis_for_data[1], 0)
self.assertEqual(self.data_h.lin_axis_for_data[0], 1)
def test_not_reversed(self):
for i in (0,1):
elems_are_equal = self.data_h.data[i] == self.data_h.tdata[i]
self.assertTrue(elems_are_equal.all())
if __name__=='__main__':
unittest.main()
|
<commit_before><commit_msg>Add some tests for DataHandler<commit_after>import sys
sys.path.append('..')
import unittest
from datahandler import data_handler_factory
from sweep import Sweep
class DataHandlerTestCase(unittest.TestCase):
def setUp(self):
path = 'C:/Dropbox/PhD/sandbox_phd/FolderBrowser/data/2016-09-19#015'
self.sweep = Sweep(path)
data = self.sweep.data
x = data['mL']
y = data['mR']
self.data_h = data_handler_factory(x, y)
def test_data_validity(self):
self.assertTrue(self.data_h.data_is_valid)
def test_imshow_eligible(self):
self.assertTrue(self.data_h.imshow_eligible)
def test_data_is_linear(self):
self.assertTrue(self.data_h.data_is_linear[0])
self.assertTrue(self.data_h.data_is_linear[1])
def test_data_is_linear_on_axis(self):
self.assertEqual(self.data_h.lin_axis_for_data[1], 0)
self.assertEqual(self.data_h.lin_axis_for_data[0], 1)
def test_not_reversed(self):
for i in (0,1):
elems_are_equal = self.data_h.data[i] == self.data_h.tdata[i]
self.assertTrue(elems_are_equal.all())
if __name__=='__main__':
unittest.main()
|
|
285f22ffb2fef2bdfdb5fc49fb0c3d72f54f6c88
|
fib_list.py
|
fib_list.py
|
from peer import begin_tran, end_tran, shared
begin_tran()
lst = shared.setdefault('lst', [0, 1])
end_tran()
while True:
begin_tran()
next_num = lst[-2] + lst[-1]
s.append(next_num)
end_tran()
print(lst[-10:])
|
Add a test Python program.
|
Add a test Python program.
|
Python
|
apache-2.0
|
snyderek/floating_temple,snyderek/floating_temple,snyderek/floating_temple
|
Add a test Python program.
|
from peer import begin_tran, end_tran, shared
begin_tran()
lst = shared.setdefault('lst', [0, 1])
end_tran()
while True:
begin_tran()
next_num = lst[-2] + lst[-1]
s.append(next_num)
end_tran()
print(lst[-10:])
|
<commit_before><commit_msg>Add a test Python program.<commit_after>
|
from peer import begin_tran, end_tran, shared
begin_tran()
lst = shared.setdefault('lst', [0, 1])
end_tran()
while True:
begin_tran()
next_num = lst[-2] + lst[-1]
s.append(next_num)
end_tran()
print(lst[-10:])
|
Add a test Python program.from peer import begin_tran, end_tran, shared
begin_tran()
lst = shared.setdefault('lst', [0, 1])
end_tran()
while True:
begin_tran()
next_num = lst[-2] + lst[-1]
s.append(next_num)
end_tran()
print(lst[-10:])
|
<commit_before><commit_msg>Add a test Python program.<commit_after>from peer import begin_tran, end_tran, shared
begin_tran()
lst = shared.setdefault('lst', [0, 1])
end_tran()
while True:
begin_tran()
next_num = lst[-2] + lst[-1]
s.append(next_num)
end_tran()
print(lst[-10:])
|
|
0c298a88290ed0d57359de90c9d61619c1d579cc
|
pykeg/src/pykeg/core/management/commands/common.py
|
pykeg/src/pykeg/core/management/commands/common.py
|
# Copyright 2010 Mike Wakerly <opensource@hoho.com>
#
# This file is part of the Pykeg package of the Kegbot project.
# For more information on Pykeg or Kegbot, see http://kegbot.org/
#
# Pykeg is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Pykeg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pykeg. If not, see <http://www.gnu.org/licenses/>.
import sys
def progbar(title, pos, total, width=40):
"""Prints a progress bar to stdout.
Args
title: title to show next to progress bar
pos: current position (integer)
total: total positions (integer)
width: width of the progres bar, in characters
"""
if not total:
chars = width
else:
chars = int((float(pos)/total)*width)
rem = width - chars
inner = '+'*chars + ' '*rem
sys.stdout.write('%-30s [%s] %i/%i\r' % (title, inner, pos, total))
sys.stdout.flush()
|
Add small utility module for kb_ commands.
|
Add small utility module for kb_ commands.
|
Python
|
mit
|
Kegbot/kegbot-server,Kegbot/kegbot-server,Kegbot/kegbot-server,Kegbot/kegbot-server,Kegbot/kegbot-server
|
Add small utility module for kb_ commands.
|
# Copyright 2010 Mike Wakerly <opensource@hoho.com>
#
# This file is part of the Pykeg package of the Kegbot project.
# For more information on Pykeg or Kegbot, see http://kegbot.org/
#
# Pykeg is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Pykeg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pykeg. If not, see <http://www.gnu.org/licenses/>.
import sys
def progbar(title, pos, total, width=40):
"""Prints a progress bar to stdout.
Args
title: title to show next to progress bar
pos: current position (integer)
total: total positions (integer)
width: width of the progres bar, in characters
"""
if not total:
chars = width
else:
chars = int((float(pos)/total)*width)
rem = width - chars
inner = '+'*chars + ' '*rem
sys.stdout.write('%-30s [%s] %i/%i\r' % (title, inner, pos, total))
sys.stdout.flush()
|
<commit_before><commit_msg>Add small utility module for kb_ commands.<commit_after>
|
# Copyright 2010 Mike Wakerly <opensource@hoho.com>
#
# This file is part of the Pykeg package of the Kegbot project.
# For more information on Pykeg or Kegbot, see http://kegbot.org/
#
# Pykeg is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Pykeg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pykeg. If not, see <http://www.gnu.org/licenses/>.
import sys
def progbar(title, pos, total, width=40):
"""Prints a progress bar to stdout.
Args
title: title to show next to progress bar
pos: current position (integer)
total: total positions (integer)
width: width of the progres bar, in characters
"""
if not total:
chars = width
else:
chars = int((float(pos)/total)*width)
rem = width - chars
inner = '+'*chars + ' '*rem
sys.stdout.write('%-30s [%s] %i/%i\r' % (title, inner, pos, total))
sys.stdout.flush()
|
Add small utility module for kb_ commands.# Copyright 2010 Mike Wakerly <opensource@hoho.com>
#
# This file is part of the Pykeg package of the Kegbot project.
# For more information on Pykeg or Kegbot, see http://kegbot.org/
#
# Pykeg is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Pykeg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pykeg. If not, see <http://www.gnu.org/licenses/>.
import sys
def progbar(title, pos, total, width=40):
"""Prints a progress bar to stdout.
Args
title: title to show next to progress bar
pos: current position (integer)
total: total positions (integer)
width: width of the progres bar, in characters
"""
if not total:
chars = width
else:
chars = int((float(pos)/total)*width)
rem = width - chars
inner = '+'*chars + ' '*rem
sys.stdout.write('%-30s [%s] %i/%i\r' % (title, inner, pos, total))
sys.stdout.flush()
|
<commit_before><commit_msg>Add small utility module for kb_ commands.<commit_after># Copyright 2010 Mike Wakerly <opensource@hoho.com>
#
# This file is part of the Pykeg package of the Kegbot project.
# For more information on Pykeg or Kegbot, see http://kegbot.org/
#
# Pykeg is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Pykeg is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Pykeg. If not, see <http://www.gnu.org/licenses/>.
import sys
def progbar(title, pos, total, width=40):
"""Prints a progress bar to stdout.
Args
title: title to show next to progress bar
pos: current position (integer)
total: total positions (integer)
width: width of the progres bar, in characters
"""
if not total:
chars = width
else:
chars = int((float(pos)/total)*width)
rem = width - chars
inner = '+'*chars + ' '*rem
sys.stdout.write('%-30s [%s] %i/%i\r' % (title, inner, pos, total))
sys.stdout.flush()
|
|
b13ddcc7d001606faa27637b2cb09e789dff4271
|
thinc/tests/layers/test_layers_api.py
|
thinc/tests/layers/test_layers_api.py
|
from thinc.api import registry
import pytest
@pytest.mark.parametrize(
"name,kwargs",
[
("CauchySimilarity.v0", {}),
("Dropout.v0", {}),
("Embed.v0", {}),
("ExtractWindow.v0", {}),
("FeatureExtractor.v0", {"columns": [1, 2]}),
("HashEmbed.v0", {"nO": 1, "nV": 2}),
("LayerNorm.v0", {}),
("Linear.v0", {}),
("BiLSTM.v0", {}),
("LSTM.v0", {}),
("Maxout.v0", {}),
("MaxPool.v0", {}),
("MeanPool.v0", {}),
("Mish.v0", {}),
("MultiSoftmax.v0", {"nOs": (1, 2, 3)}),
("ParametricAttention.v0", {}),
("ReLu.v0", {}),
("Softmax.v0", {}),
("SparseLinear.v0", {}),
("StaticVectors.v0", {"lang": "en", "nO": 5}),
("SumPool.v0", {}),
],
)
def test_layers_from_config(name, kwargs):
cfg = {"@layers": name, **kwargs}
filled = registry.fill_config({"config": cfg})
registry.make_from_config(filled)
|
Add layer creation sanity checks
|
Add layer creation sanity checks
|
Python
|
mit
|
explosion/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc,spacy-io/thinc,spacy-io/thinc,explosion/thinc
|
Add layer creation sanity checks
|
from thinc.api import registry
import pytest
@pytest.mark.parametrize(
"name,kwargs",
[
("CauchySimilarity.v0", {}),
("Dropout.v0", {}),
("Embed.v0", {}),
("ExtractWindow.v0", {}),
("FeatureExtractor.v0", {"columns": [1, 2]}),
("HashEmbed.v0", {"nO": 1, "nV": 2}),
("LayerNorm.v0", {}),
("Linear.v0", {}),
("BiLSTM.v0", {}),
("LSTM.v0", {}),
("Maxout.v0", {}),
("MaxPool.v0", {}),
("MeanPool.v0", {}),
("Mish.v0", {}),
("MultiSoftmax.v0", {"nOs": (1, 2, 3)}),
("ParametricAttention.v0", {}),
("ReLu.v0", {}),
("Softmax.v0", {}),
("SparseLinear.v0", {}),
("StaticVectors.v0", {"lang": "en", "nO": 5}),
("SumPool.v0", {}),
],
)
def test_layers_from_config(name, kwargs):
cfg = {"@layers": name, **kwargs}
filled = registry.fill_config({"config": cfg})
registry.make_from_config(filled)
|
<commit_before><commit_msg>Add layer creation sanity checks<commit_after>
|
from thinc.api import registry
import pytest
@pytest.mark.parametrize(
"name,kwargs",
[
("CauchySimilarity.v0", {}),
("Dropout.v0", {}),
("Embed.v0", {}),
("ExtractWindow.v0", {}),
("FeatureExtractor.v0", {"columns": [1, 2]}),
("HashEmbed.v0", {"nO": 1, "nV": 2}),
("LayerNorm.v0", {}),
("Linear.v0", {}),
("BiLSTM.v0", {}),
("LSTM.v0", {}),
("Maxout.v0", {}),
("MaxPool.v0", {}),
("MeanPool.v0", {}),
("Mish.v0", {}),
("MultiSoftmax.v0", {"nOs": (1, 2, 3)}),
("ParametricAttention.v0", {}),
("ReLu.v0", {}),
("Softmax.v0", {}),
("SparseLinear.v0", {}),
("StaticVectors.v0", {"lang": "en", "nO": 5}),
("SumPool.v0", {}),
],
)
def test_layers_from_config(name, kwargs):
cfg = {"@layers": name, **kwargs}
filled = registry.fill_config({"config": cfg})
registry.make_from_config(filled)
|
Add layer creation sanity checksfrom thinc.api import registry
import pytest
@pytest.mark.parametrize(
"name,kwargs",
[
("CauchySimilarity.v0", {}),
("Dropout.v0", {}),
("Embed.v0", {}),
("ExtractWindow.v0", {}),
("FeatureExtractor.v0", {"columns": [1, 2]}),
("HashEmbed.v0", {"nO": 1, "nV": 2}),
("LayerNorm.v0", {}),
("Linear.v0", {}),
("BiLSTM.v0", {}),
("LSTM.v0", {}),
("Maxout.v0", {}),
("MaxPool.v0", {}),
("MeanPool.v0", {}),
("Mish.v0", {}),
("MultiSoftmax.v0", {"nOs": (1, 2, 3)}),
("ParametricAttention.v0", {}),
("ReLu.v0", {}),
("Softmax.v0", {}),
("SparseLinear.v0", {}),
("StaticVectors.v0", {"lang": "en", "nO": 5}),
("SumPool.v0", {}),
],
)
def test_layers_from_config(name, kwargs):
cfg = {"@layers": name, **kwargs}
filled = registry.fill_config({"config": cfg})
registry.make_from_config(filled)
|
<commit_before><commit_msg>Add layer creation sanity checks<commit_after>from thinc.api import registry
import pytest
@pytest.mark.parametrize(
"name,kwargs",
[
("CauchySimilarity.v0", {}),
("Dropout.v0", {}),
("Embed.v0", {}),
("ExtractWindow.v0", {}),
("FeatureExtractor.v0", {"columns": [1, 2]}),
("HashEmbed.v0", {"nO": 1, "nV": 2}),
("LayerNorm.v0", {}),
("Linear.v0", {}),
("BiLSTM.v0", {}),
("LSTM.v0", {}),
("Maxout.v0", {}),
("MaxPool.v0", {}),
("MeanPool.v0", {}),
("Mish.v0", {}),
("MultiSoftmax.v0", {"nOs": (1, 2, 3)}),
("ParametricAttention.v0", {}),
("ReLu.v0", {}),
("Softmax.v0", {}),
("SparseLinear.v0", {}),
("StaticVectors.v0", {"lang": "en", "nO": 5}),
("SumPool.v0", {}),
],
)
def test_layers_from_config(name, kwargs):
cfg = {"@layers": name, **kwargs}
filled = registry.fill_config({"config": cfg})
registry.make_from_config(filled)
|
|
20550e0890e31d98d0fd7e5abd058dcf33aa2ba7
|
p002_even_fibonacci_numbers.py
|
p002_even_fibonacci_numbers.py
|
#
'''
Project Euler - Problem 2 - Even Fibonacci numbers
https://projecteuler.net/problem=2
Each new term in the Fibonacci sequence is generated by adding the previous two
terms. By starting with 1 and 2, the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values do not exceed
four million, find the sum of the even-valued terms.
Task: Sum Even Fibonacci terms under four million
'''
import sys
def gen_even_fibo(prev_num=1, number=1, limit=100):
"""Generate a continuous series of Even Fibonacci numbers upto limit value
Since the results are not to exceed limit, the generated is seeded with the
first output number (1) and yeild immediately. On next gen, the new result
is calculated and only yeilded if the while holds true
"""
while number < limit:
if number % 2 == 0:
# only yeild even numbers
yield number
prev_num, number = number, prev_num + number
def main():
'''Sum Even Fibonacci terms under four million
'''
result = sum(gen_even_fibo(limit=4000000))
print("Sum Even Fibonacci terms under four million: "
"{0}".format(result))
if __name__ == '__main__':
# interactive run main, capture keyboard interrupts
try:
sys.exit(main())
except KeyboardInterrupt:
pass
|
Add problem 2 even fibonacci numbers python solution
|
Add problem 2 even fibonacci numbers python solution
|
Python
|
mit
|
ChrisFreeman/project-euler
|
Add problem 2 even fibonacci numbers python solution
|
#
'''
Project Euler - Problem 2 - Even Fibonacci numbers
https://projecteuler.net/problem=2
Each new term in the Fibonacci sequence is generated by adding the previous two
terms. By starting with 1 and 2, the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values do not exceed
four million, find the sum of the even-valued terms.
Task: Sum Even Fibonacci terms under four million
'''
import sys
def gen_even_fibo(prev_num=1, number=1, limit=100):
"""Generate a continuous series of Even Fibonacci numbers upto limit value
Since the results are not to exceed limit, the generated is seeded with the
first output number (1) and yeild immediately. On next gen, the new result
is calculated and only yeilded if the while holds true
"""
while number < limit:
if number % 2 == 0:
# only yeild even numbers
yield number
prev_num, number = number, prev_num + number
def main():
'''Sum Even Fibonacci terms under four million
'''
result = sum(gen_even_fibo(limit=4000000))
print("Sum Even Fibonacci terms under four million: "
"{0}".format(result))
if __name__ == '__main__':
# interactive run main, capture keyboard interrupts
try:
sys.exit(main())
except KeyboardInterrupt:
pass
|
<commit_before><commit_msg>Add problem 2 even fibonacci numbers python solution<commit_after>
|
#
'''
Project Euler - Problem 2 - Even Fibonacci numbers
https://projecteuler.net/problem=2
Each new term in the Fibonacci sequence is generated by adding the previous two
terms. By starting with 1 and 2, the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values do not exceed
four million, find the sum of the even-valued terms.
Task: Sum Even Fibonacci terms under four million
'''
import sys
def gen_even_fibo(prev_num=1, number=1, limit=100):
"""Generate a continuous series of Even Fibonacci numbers upto limit value
Since the results are not to exceed limit, the generated is seeded with the
first output number (1) and yeild immediately. On next gen, the new result
is calculated and only yeilded if the while holds true
"""
while number < limit:
if number % 2 == 0:
# only yeild even numbers
yield number
prev_num, number = number, prev_num + number
def main():
'''Sum Even Fibonacci terms under four million
'''
result = sum(gen_even_fibo(limit=4000000))
print("Sum Even Fibonacci terms under four million: "
"{0}".format(result))
if __name__ == '__main__':
# interactive run main, capture keyboard interrupts
try:
sys.exit(main())
except KeyboardInterrupt:
pass
|
Add problem 2 even fibonacci numbers python solution#
'''
Project Euler - Problem 2 - Even Fibonacci numbers
https://projecteuler.net/problem=2
Each new term in the Fibonacci sequence is generated by adding the previous two
terms. By starting with 1 and 2, the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values do not exceed
four million, find the sum of the even-valued terms.
Task: Sum Even Fibonacci terms under four million
'''
import sys
def gen_even_fibo(prev_num=1, number=1, limit=100):
"""Generate a continuous series of Even Fibonacci numbers upto limit value
Since the results are not to exceed limit, the generated is seeded with the
first output number (1) and yeild immediately. On next gen, the new result
is calculated and only yeilded if the while holds true
"""
while number < limit:
if number % 2 == 0:
# only yeild even numbers
yield number
prev_num, number = number, prev_num + number
def main():
'''Sum Even Fibonacci terms under four million
'''
result = sum(gen_even_fibo(limit=4000000))
print("Sum Even Fibonacci terms under four million: "
"{0}".format(result))
if __name__ == '__main__':
# interactive run main, capture keyboard interrupts
try:
sys.exit(main())
except KeyboardInterrupt:
pass
|
<commit_before><commit_msg>Add problem 2 even fibonacci numbers python solution<commit_after>#
'''
Project Euler - Problem 2 - Even Fibonacci numbers
https://projecteuler.net/problem=2
Each new term in the Fibonacci sequence is generated by adding the previous two
terms. By starting with 1 and 2, the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values do not exceed
four million, find the sum of the even-valued terms.
Task: Sum Even Fibonacci terms under four million
'''
import sys
def gen_even_fibo(prev_num=1, number=1, limit=100):
"""Generate a continuous series of Even Fibonacci numbers upto limit value
Since the results are not to exceed limit, the generated is seeded with the
first output number (1) and yeild immediately. On next gen, the new result
is calculated and only yeilded if the while holds true
"""
while number < limit:
if number % 2 == 0:
# only yeild even numbers
yield number
prev_num, number = number, prev_num + number
def main():
'''Sum Even Fibonacci terms under four million
'''
result = sum(gen_even_fibo(limit=4000000))
print("Sum Even Fibonacci terms under four million: "
"{0}".format(result))
if __name__ == '__main__':
# interactive run main, capture keyboard interrupts
try:
sys.exit(main())
except KeyboardInterrupt:
pass
|
|
ff2c8d3f74a69dd8873019ae2e966833ef4d79fd
|
pombola/south_africa/management/commands/south_africa_export_committee_members.py
|
pombola/south_africa/management/commands/south_africa_export_committee_members.py
|
"""Export a CSV listing committee members with term dates."""
import unicodecsv as csv
import os
import collections
from pombola.core.models import Person, Organisation, OrganisationKind
from django.core.management.base import BaseCommand, CommandError
from django.utils import dateformat
def formatApproxDate(date):
if date:
if date.future:
return 'future'
if date.past:
return 'past'
elif date.year and date.month and date.day:
return dateformat.format(date, 'Y-m-d')
elif date.year and date.month:
return dateformat.format(date, 'Y-m')
elif date.year:
return dateformat.format(date, 'Y')
else:
return None
class Command(BaseCommand):
args = 'destination'
help = 'Export a CSV listing committee members with term dates.'
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("You must provide a destination.")
destination = args[0]
organisationKind = OrganisationKind.objects.filter(slug='committee').get()
organisations = Organisation.objects.filter(kind=organisationKind)
fields = [
'name',
'title',
'given_name',
'family_name',
'committee',
'position',
'url',
'start_date',
'end_date',
'parties',
]
with open(os.path.join(destination), 'wb') as output_file:
writer = csv.DictWriter(output_file, fieldnames=fields)
writer.writeheader()
for organisation in organisations:
# Get the list of positions
positions = organisation.position_set.filter(person__hidden=False)
# Write all the outputs
for position in positions:
print position
person = position.person
parties = []
for party in person.parties():
parties.append(party.name)
position_output = {
'name': person.name,
'title': person.title,
'given_name': person.given_name,
'family_name': person.family_name,
'committee': organisation.name,
'position': position.title,
'url': 'https://www.pa.org.za/person/{}/'.format(person.slug),
'start_date': formatApproxDate(position.start_date),
'end_date': formatApproxDate(position.end_date),
'parties': ', '.join(parties)
}
writer.writerow(position_output)
print "Done! Exported CSV of " + str(len(positions)) + " positions."
|
Add export script for committee members
|
Add export script for committee members
|
Python
|
agpl-3.0
|
mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola,mysociety/pombola
|
Add export script for committee members
|
"""Export a CSV listing committee members with term dates."""
import unicodecsv as csv
import os
import collections
from pombola.core.models import Person, Organisation, OrganisationKind
from django.core.management.base import BaseCommand, CommandError
from django.utils import dateformat
def formatApproxDate(date):
if date:
if date.future:
return 'future'
if date.past:
return 'past'
elif date.year and date.month and date.day:
return dateformat.format(date, 'Y-m-d')
elif date.year and date.month:
return dateformat.format(date, 'Y-m')
elif date.year:
return dateformat.format(date, 'Y')
else:
return None
class Command(BaseCommand):
args = 'destination'
help = 'Export a CSV listing committee members with term dates.'
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("You must provide a destination.")
destination = args[0]
organisationKind = OrganisationKind.objects.filter(slug='committee').get()
organisations = Organisation.objects.filter(kind=organisationKind)
fields = [
'name',
'title',
'given_name',
'family_name',
'committee',
'position',
'url',
'start_date',
'end_date',
'parties',
]
with open(os.path.join(destination), 'wb') as output_file:
writer = csv.DictWriter(output_file, fieldnames=fields)
writer.writeheader()
for organisation in organisations:
# Get the list of positions
positions = organisation.position_set.filter(person__hidden=False)
# Write all the outputs
for position in positions:
print position
person = position.person
parties = []
for party in person.parties():
parties.append(party.name)
position_output = {
'name': person.name,
'title': person.title,
'given_name': person.given_name,
'family_name': person.family_name,
'committee': organisation.name,
'position': position.title,
'url': 'https://www.pa.org.za/person/{}/'.format(person.slug),
'start_date': formatApproxDate(position.start_date),
'end_date': formatApproxDate(position.end_date),
'parties': ', '.join(parties)
}
writer.writerow(position_output)
print "Done! Exported CSV of " + str(len(positions)) + " positions."
|
<commit_before><commit_msg>Add export script for committee members<commit_after>
|
"""Export a CSV listing committee members with term dates."""
import unicodecsv as csv
import os
import collections
from pombola.core.models import Person, Organisation, OrganisationKind
from django.core.management.base import BaseCommand, CommandError
from django.utils import dateformat
def formatApproxDate(date):
if date:
if date.future:
return 'future'
if date.past:
return 'past'
elif date.year and date.month and date.day:
return dateformat.format(date, 'Y-m-d')
elif date.year and date.month:
return dateformat.format(date, 'Y-m')
elif date.year:
return dateformat.format(date, 'Y')
else:
return None
class Command(BaseCommand):
args = 'destination'
help = 'Export a CSV listing committee members with term dates.'
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("You must provide a destination.")
destination = args[0]
organisationKind = OrganisationKind.objects.filter(slug='committee').get()
organisations = Organisation.objects.filter(kind=organisationKind)
fields = [
'name',
'title',
'given_name',
'family_name',
'committee',
'position',
'url',
'start_date',
'end_date',
'parties',
]
with open(os.path.join(destination), 'wb') as output_file:
writer = csv.DictWriter(output_file, fieldnames=fields)
writer.writeheader()
for organisation in organisations:
# Get the list of positions
positions = organisation.position_set.filter(person__hidden=False)
# Write all the outputs
for position in positions:
print position
person = position.person
parties = []
for party in person.parties():
parties.append(party.name)
position_output = {
'name': person.name,
'title': person.title,
'given_name': person.given_name,
'family_name': person.family_name,
'committee': organisation.name,
'position': position.title,
'url': 'https://www.pa.org.za/person/{}/'.format(person.slug),
'start_date': formatApproxDate(position.start_date),
'end_date': formatApproxDate(position.end_date),
'parties': ', '.join(parties)
}
writer.writerow(position_output)
print "Done! Exported CSV of " + str(len(positions)) + " positions."
|
Add export script for committee members"""Export a CSV listing committee members with term dates."""
import unicodecsv as csv
import os
import collections
from pombola.core.models import Person, Organisation, OrganisationKind
from django.core.management.base import BaseCommand, CommandError
from django.utils import dateformat
def formatApproxDate(date):
if date:
if date.future:
return 'future'
if date.past:
return 'past'
elif date.year and date.month and date.day:
return dateformat.format(date, 'Y-m-d')
elif date.year and date.month:
return dateformat.format(date, 'Y-m')
elif date.year:
return dateformat.format(date, 'Y')
else:
return None
class Command(BaseCommand):
args = 'destination'
help = 'Export a CSV listing committee members with term dates.'
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("You must provide a destination.")
destination = args[0]
organisationKind = OrganisationKind.objects.filter(slug='committee').get()
organisations = Organisation.objects.filter(kind=organisationKind)
fields = [
'name',
'title',
'given_name',
'family_name',
'committee',
'position',
'url',
'start_date',
'end_date',
'parties',
]
with open(os.path.join(destination), 'wb') as output_file:
writer = csv.DictWriter(output_file, fieldnames=fields)
writer.writeheader()
for organisation in organisations:
# Get the list of positions
positions = organisation.position_set.filter(person__hidden=False)
# Write all the outputs
for position in positions:
print position
person = position.person
parties = []
for party in person.parties():
parties.append(party.name)
position_output = {
'name': person.name,
'title': person.title,
'given_name': person.given_name,
'family_name': person.family_name,
'committee': organisation.name,
'position': position.title,
'url': 'https://www.pa.org.za/person/{}/'.format(person.slug),
'start_date': formatApproxDate(position.start_date),
'end_date': formatApproxDate(position.end_date),
'parties': ', '.join(parties)
}
writer.writerow(position_output)
print "Done! Exported CSV of " + str(len(positions)) + " positions."
|
<commit_before><commit_msg>Add export script for committee members<commit_after>"""Export a CSV listing committee members with term dates."""
import unicodecsv as csv
import os
import collections
from pombola.core.models import Person, Organisation, OrganisationKind
from django.core.management.base import BaseCommand, CommandError
from django.utils import dateformat
def formatApproxDate(date):
if date:
if date.future:
return 'future'
if date.past:
return 'past'
elif date.year and date.month and date.day:
return dateformat.format(date, 'Y-m-d')
elif date.year and date.month:
return dateformat.format(date, 'Y-m')
elif date.year:
return dateformat.format(date, 'Y')
else:
return None
class Command(BaseCommand):
args = 'destination'
help = 'Export a CSV listing committee members with term dates.'
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("You must provide a destination.")
destination = args[0]
organisationKind = OrganisationKind.objects.filter(slug='committee').get()
organisations = Organisation.objects.filter(kind=organisationKind)
fields = [
'name',
'title',
'given_name',
'family_name',
'committee',
'position',
'url',
'start_date',
'end_date',
'parties',
]
with open(os.path.join(destination), 'wb') as output_file:
writer = csv.DictWriter(output_file, fieldnames=fields)
writer.writeheader()
for organisation in organisations:
# Get the list of positions
positions = organisation.position_set.filter(person__hidden=False)
# Write all the outputs
for position in positions:
print position
person = position.person
parties = []
for party in person.parties():
parties.append(party.name)
position_output = {
'name': person.name,
'title': person.title,
'given_name': person.given_name,
'family_name': person.family_name,
'committee': organisation.name,
'position': position.title,
'url': 'https://www.pa.org.za/person/{}/'.format(person.slug),
'start_date': formatApproxDate(position.start_date),
'end_date': formatApproxDate(position.end_date),
'parties': ', '.join(parties)
}
writer.writerow(position_output)
print "Done! Exported CSV of " + str(len(positions)) + " positions."
|
|
e552a70777ee5733434707f1d1fa334a8909cd87
|
tests/test_crossmatch.py
|
tests/test_crossmatch.py
|
from __future__ import unicode_literals, division, absolute_import
from tests import FlexGetBase
class TestCrossmatch(FlexGetBase):
__yaml__ = """
tasks:
test_title:
mock:
- title: entry 1
- title: entry 2
crossmatch:
from:
- mock:
- title: entry 2
action: reject
fields: [title]
"""
def test_reject_title(self):
self.execute_task('test_title')
assert self.task.find_entry('rejected', title='entry 2')
assert len(self.task.rejected) == 1
|
Add a test for crossmatch
|
Add a test for crossmatch
|
Python
|
mit
|
jacobmetrick/Flexget,thalamus/Flexget,ianstalk/Flexget,ibrahimkarahan/Flexget,qk4l/Flexget,qvazzler/Flexget,ibrahimkarahan/Flexget,lildadou/Flexget,Flexget/Flexget,drwyrm/Flexget,vfrc2/Flexget,ZefQ/Flexget,patsissons/Flexget,ratoaq2/Flexget,JorisDeRieck/Flexget,Pretagonist/Flexget,grrr2/Flexget,offbyone/Flexget,thalamus/Flexget,v17al/Flexget,malkavi/Flexget,malkavi/Flexget,thalamus/Flexget,crawln45/Flexget,tobinjt/Flexget,poulpito/Flexget,Flexget/Flexget,spencerjanssen/Flexget,gazpachoking/Flexget,cvium/Flexget,dsemi/Flexget,malkavi/Flexget,oxc/Flexget,tarzasai/Flexget,tarzasai/Flexget,camon/Flexget,LynxyssCZ/Flexget,drwyrm/Flexget,qvazzler/Flexget,lildadou/Flexget,cvium/Flexget,camon/Flexget,ratoaq2/Flexget,grrr2/Flexget,vfrc2/Flexget,jawilson/Flexget,Danfocus/Flexget,v17al/Flexget,antivirtel/Flexget,Pretagonist/Flexget,jawilson/Flexget,sean797/Flexget,LynxyssCZ/Flexget,antivirtel/Flexget,gazpachoking/Flexget,OmgOhnoes/Flexget,JorisDeRieck/Flexget,poulpito/Flexget,jawilson/Flexget,X-dark/Flexget,voriux/Flexget,spencerjanssen/Flexget,tsnoam/Flexget,X-dark/Flexget,vfrc2/Flexget,tarzasai/Flexget,qk4l/Flexget,jawilson/Flexget,ibrahimkarahan/Flexget,ZefQ/Flexget,ianstalk/Flexget,jacobmetrick/Flexget,drwyrm/Flexget,qvazzler/Flexget,OmgOhnoes/Flexget,spencerjanssen/Flexget,tsnoam/Flexget,crawln45/Flexget,X-dark/Flexget,jacobmetrick/Flexget,ZefQ/Flexget,patsissons/Flexget,xfouloux/Flexget,Pretagonist/Flexget,xfouloux/Flexget,voriux/Flexget,lildadou/Flexget,offbyone/Flexget,Flexget/Flexget,Danfocus/Flexget,patsissons/Flexget,JorisDeRieck/Flexget,tobinjt/Flexget,tvcsantos/Flexget,dsemi/Flexget,antivirtel/Flexget,crawln45/Flexget,crawln45/Flexget,offbyone/Flexget,tobinjt/Flexget,sean797/Flexget,grrr2/Flexget,tsnoam/Flexget,tvcsantos/Flexget,poulpito/Flexget,dsemi/Flexget,OmgOhnoes/Flexget,v17al/Flexget,ratoaq2/Flexget,ianstalk/Flexget,oxc/Flexget,malkavi/Flexget,oxc/Flexget,Danfocus/Flexget,LynxyssCZ/Flexget,xfouloux/Flexget,Danfocus/Flexget,cvium/Flexget,tobinjt/Flexget,qk4l/Flexget,JorisDeRieck/Flexget,LynxyssCZ/Flexget,Flexget/Flexget,sean797/Flexget
|
Add a test for crossmatch
|
from __future__ import unicode_literals, division, absolute_import
from tests import FlexGetBase
class TestCrossmatch(FlexGetBase):
__yaml__ = """
tasks:
test_title:
mock:
- title: entry 1
- title: entry 2
crossmatch:
from:
- mock:
- title: entry 2
action: reject
fields: [title]
"""
def test_reject_title(self):
self.execute_task('test_title')
assert self.task.find_entry('rejected', title='entry 2')
assert len(self.task.rejected) == 1
|
<commit_before><commit_msg>Add a test for crossmatch<commit_after>
|
from __future__ import unicode_literals, division, absolute_import
from tests import FlexGetBase
class TestCrossmatch(FlexGetBase):
__yaml__ = """
tasks:
test_title:
mock:
- title: entry 1
- title: entry 2
crossmatch:
from:
- mock:
- title: entry 2
action: reject
fields: [title]
"""
def test_reject_title(self):
self.execute_task('test_title')
assert self.task.find_entry('rejected', title='entry 2')
assert len(self.task.rejected) == 1
|
Add a test for crossmatchfrom __future__ import unicode_literals, division, absolute_import
from tests import FlexGetBase
class TestCrossmatch(FlexGetBase):
__yaml__ = """
tasks:
test_title:
mock:
- title: entry 1
- title: entry 2
crossmatch:
from:
- mock:
- title: entry 2
action: reject
fields: [title]
"""
def test_reject_title(self):
self.execute_task('test_title')
assert self.task.find_entry('rejected', title='entry 2')
assert len(self.task.rejected) == 1
|
<commit_before><commit_msg>Add a test for crossmatch<commit_after>from __future__ import unicode_literals, division, absolute_import
from tests import FlexGetBase
class TestCrossmatch(FlexGetBase):
__yaml__ = """
tasks:
test_title:
mock:
- title: entry 1
- title: entry 2
crossmatch:
from:
- mock:
- title: entry 2
action: reject
fields: [title]
"""
def test_reject_title(self):
self.execute_task('test_title')
assert self.task.find_entry('rejected', title='entry 2')
assert len(self.task.rejected) == 1
|
|
66d84f16e01ac92d3f2973bcb7b9297e3e04ad85
|
tests/models.py
|
tests/models.py
|
import logging
class MockHandler(logging.Handler):
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
|
Add a mock log handler for aggregating log messages
|
Add a mock log handler for aggregating log messages
|
Python
|
bsd-2-clause
|
chop-dbhi/varify,chop-dbhi/varify,chop-dbhi/varify,chop-dbhi/varify
|
Add a mock log handler for aggregating log messages
|
import logging
class MockHandler(logging.Handler):
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
|
<commit_before><commit_msg>Add a mock log handler for aggregating log messages<commit_after>
|
import logging
class MockHandler(logging.Handler):
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
|
Add a mock log handler for aggregating log messagesimport logging
class MockHandler(logging.Handler):
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
|
<commit_before><commit_msg>Add a mock log handler for aggregating log messages<commit_after>import logging
class MockHandler(logging.Handler):
def __init__(self, *args, **kwargs):
self.reset()
logging.Handler.__init__(self, *args, **kwargs)
def emit(self, record):
self.messages[record.levelname.lower()].append(record.getMessage())
def reset(self):
self.messages = {
'debug': [],
'info': [],
'warning': [],
'error': [],
'critical': [],
}
|
|
1c2f6cccdda57b52ab779539acbe42a427e4b21e
|
conftest.py
|
conftest.py
|
import pandana.network as pdna
# Pandana uses global state for networks,
# so we have to pre-declare here how many networks will be
# created during testing.
#
# If you see an error that looks like:
# AssertionError: Adding more networks than have been reserved
# then you probably need to update this number.
num_networks_tested = 2
pdna.reserve_num_graphs(num_networks_tested)
|
Declare number of Networks used during testing
|
Declare number of Networks used during testing
Done via conftest.py file (a special feature of pytest)
that calls pandana.network.reserve_num_graphs.
|
Python
|
agpl-3.0
|
waddell/pandana,rafapereirabr/pandana,rafapereirabr/pandana,synthicity/pandana,rafapereirabr/pandana,synthicity/pandana,SANDAG/pandana,UDST/pandana,synthicity/pandana,UDST/pandana,SANDAG/pandana,UDST/pandana,waddell/pandana,SANDAG/pandana,UDST/pandana,rafapereirabr/pandana,waddell/pandana,synthicity/pandana,waddell/pandana,SANDAG/pandana
|
Declare number of Networks used during testing
Done via conftest.py file (a special feature of pytest)
that calls pandana.network.reserve_num_graphs.
|
import pandana.network as pdna
# Pandana uses global state for networks,
# so we have to pre-declare here how many networks will be
# created during testing.
#
# If you see an error that looks like:
# AssertionError: Adding more networks than have been reserved
# then you probably need to update this number.
num_networks_tested = 2
pdna.reserve_num_graphs(num_networks_tested)
|
<commit_before><commit_msg>Declare number of Networks used during testing
Done via conftest.py file (a special feature of pytest)
that calls pandana.network.reserve_num_graphs.<commit_after>
|
import pandana.network as pdna
# Pandana uses global state for networks,
# so we have to pre-declare here how many networks will be
# created during testing.
#
# If you see an error that looks like:
# AssertionError: Adding more networks than have been reserved
# then you probably need to update this number.
num_networks_tested = 2
pdna.reserve_num_graphs(num_networks_tested)
|
Declare number of Networks used during testing
Done via conftest.py file (a special feature of pytest)
that calls pandana.network.reserve_num_graphs.import pandana.network as pdna
# Pandana uses global state for networks,
# so we have to pre-declare here how many networks will be
# created during testing.
#
# If you see an error that looks like:
# AssertionError: Adding more networks than have been reserved
# then you probably need to update this number.
num_networks_tested = 2
pdna.reserve_num_graphs(num_networks_tested)
|
<commit_before><commit_msg>Declare number of Networks used during testing
Done via conftest.py file (a special feature of pytest)
that calls pandana.network.reserve_num_graphs.<commit_after>import pandana.network as pdna
# Pandana uses global state for networks,
# so we have to pre-declare here how many networks will be
# created during testing.
#
# If you see an error that looks like:
# AssertionError: Adding more networks than have been reserved
# then you probably need to update this number.
num_networks_tested = 2
pdna.reserve_num_graphs(num_networks_tested)
|
|
a3907ed778c827a2807cb66d67f639abe706a808
|
examples/btest.py
|
examples/btest.py
|
import numpy as np
import loopy as lp
target = lp.CudaTarget()
kernel = lp.make_kernel(
"{ [i_node,j_node]: 0<=i_node,j_node<n_node}",
"""
<float32> coupling_value = params(1)
<float32> speed_value = params(0)
<float32> dt=0.1
<float32> M_PI_F = 2.0
<float32> rec_n = 1.0f / n_node
<float32> rec_speed_dt = 1.0f / speed_value / dt
<float32> omega = 10.0 * 2.0 * M_PI_F / 1e3
<float32> sig = sqrt(dt) * sqrt(2.0 * 1e-5)
<float32> rand = 1.0
for i_node
tavg[i_node]=0.0f {id = clear}
end
for i_node
<float32> theta_i = state[i_node] {id = coupling1, dep=*}
<float32> sum = 0.0 {id = coupling2}
for j_node
<float32> wij = weights[j_node] {id = coupling3, dep=coupling1:coupling2}
if wij != 0.0
<int> dij = lengths[j_node] * rec_speed_dt {id = coupling4, dep=coupling3}
<float32> theta_j = state[j_node]
sum = sum + wij * sin(theta_j - theta_i)
end
end
theta_i = theta_i + dt * (omega + coupling_value * rec_n * sum) {id = out1, dep=coupling4}
theta_i = theta_i + (sig * rand) {id = out2, dep=out1}
theta_i = wrap_2_pi(theta_i) {id = out3, dep=out2}
tavg[i_node] = tavg[i_node] + sin(theta_i) {id = out4, dep=out3}
state[i_node] = theta_i {dep=*coupling1}
end
""", assumptions="n_node>=0")
kernel = lp.add_dtypes(kernel, dict(tavg=np.float32, state=np.float32, weights=np.float32, lengths=np.float32))
kernel = kernel.copy(target=lp.CudaTarget())
code = lp.generate_code_v2(kernel)
print (kernel)
print (code.host_code())
print (code.device_code())
|
Test example to recreate kernel from hackathon and explore loopy tutorial
|
Test example to recreate kernel from hackathon and explore loopy tutorial
|
Python
|
apache-2.0
|
the-virtual-brain/tvb-hpc,the-virtual-brain/tvb-hpc,the-virtual-brain/tvb-hpc
|
Test example to recreate kernel from hackathon and explore loopy tutorial
|
import numpy as np
import loopy as lp
target = lp.CudaTarget()
kernel = lp.make_kernel(
"{ [i_node,j_node]: 0<=i_node,j_node<n_node}",
"""
<float32> coupling_value = params(1)
<float32> speed_value = params(0)
<float32> dt=0.1
<float32> M_PI_F = 2.0
<float32> rec_n = 1.0f / n_node
<float32> rec_speed_dt = 1.0f / speed_value / dt
<float32> omega = 10.0 * 2.0 * M_PI_F / 1e3
<float32> sig = sqrt(dt) * sqrt(2.0 * 1e-5)
<float32> rand = 1.0
for i_node
tavg[i_node]=0.0f {id = clear}
end
for i_node
<float32> theta_i = state[i_node] {id = coupling1, dep=*}
<float32> sum = 0.0 {id = coupling2}
for j_node
<float32> wij = weights[j_node] {id = coupling3, dep=coupling1:coupling2}
if wij != 0.0
<int> dij = lengths[j_node] * rec_speed_dt {id = coupling4, dep=coupling3}
<float32> theta_j = state[j_node]
sum = sum + wij * sin(theta_j - theta_i)
end
end
theta_i = theta_i + dt * (omega + coupling_value * rec_n * sum) {id = out1, dep=coupling4}
theta_i = theta_i + (sig * rand) {id = out2, dep=out1}
theta_i = wrap_2_pi(theta_i) {id = out3, dep=out2}
tavg[i_node] = tavg[i_node] + sin(theta_i) {id = out4, dep=out3}
state[i_node] = theta_i {dep=*coupling1}
end
""", assumptions="n_node>=0")
kernel = lp.add_dtypes(kernel, dict(tavg=np.float32, state=np.float32, weights=np.float32, lengths=np.float32))
kernel = kernel.copy(target=lp.CudaTarget())
code = lp.generate_code_v2(kernel)
print (kernel)
print (code.host_code())
print (code.device_code())
|
<commit_before><commit_msg>Test example to recreate kernel from hackathon and explore loopy tutorial<commit_after>
|
import numpy as np
import loopy as lp
target = lp.CudaTarget()
kernel = lp.make_kernel(
"{ [i_node,j_node]: 0<=i_node,j_node<n_node}",
"""
<float32> coupling_value = params(1)
<float32> speed_value = params(0)
<float32> dt=0.1
<float32> M_PI_F = 2.0
<float32> rec_n = 1.0f / n_node
<float32> rec_speed_dt = 1.0f / speed_value / dt
<float32> omega = 10.0 * 2.0 * M_PI_F / 1e3
<float32> sig = sqrt(dt) * sqrt(2.0 * 1e-5)
<float32> rand = 1.0
for i_node
tavg[i_node]=0.0f {id = clear}
end
for i_node
<float32> theta_i = state[i_node] {id = coupling1, dep=*}
<float32> sum = 0.0 {id = coupling2}
for j_node
<float32> wij = weights[j_node] {id = coupling3, dep=coupling1:coupling2}
if wij != 0.0
<int> dij = lengths[j_node] * rec_speed_dt {id = coupling4, dep=coupling3}
<float32> theta_j = state[j_node]
sum = sum + wij * sin(theta_j - theta_i)
end
end
theta_i = theta_i + dt * (omega + coupling_value * rec_n * sum) {id = out1, dep=coupling4}
theta_i = theta_i + (sig * rand) {id = out2, dep=out1}
theta_i = wrap_2_pi(theta_i) {id = out3, dep=out2}
tavg[i_node] = tavg[i_node] + sin(theta_i) {id = out4, dep=out3}
state[i_node] = theta_i {dep=*coupling1}
end
""", assumptions="n_node>=0")
kernel = lp.add_dtypes(kernel, dict(tavg=np.float32, state=np.float32, weights=np.float32, lengths=np.float32))
kernel = kernel.copy(target=lp.CudaTarget())
code = lp.generate_code_v2(kernel)
print (kernel)
print (code.host_code())
print (code.device_code())
|
Test example to recreate kernel from hackathon and explore loopy tutorialimport numpy as np
import loopy as lp
target = lp.CudaTarget()
kernel = lp.make_kernel(
"{ [i_node,j_node]: 0<=i_node,j_node<n_node}",
"""
<float32> coupling_value = params(1)
<float32> speed_value = params(0)
<float32> dt=0.1
<float32> M_PI_F = 2.0
<float32> rec_n = 1.0f / n_node
<float32> rec_speed_dt = 1.0f / speed_value / dt
<float32> omega = 10.0 * 2.0 * M_PI_F / 1e3
<float32> sig = sqrt(dt) * sqrt(2.0 * 1e-5)
<float32> rand = 1.0
for i_node
tavg[i_node]=0.0f {id = clear}
end
for i_node
<float32> theta_i = state[i_node] {id = coupling1, dep=*}
<float32> sum = 0.0 {id = coupling2}
for j_node
<float32> wij = weights[j_node] {id = coupling3, dep=coupling1:coupling2}
if wij != 0.0
<int> dij = lengths[j_node] * rec_speed_dt {id = coupling4, dep=coupling3}
<float32> theta_j = state[j_node]
sum = sum + wij * sin(theta_j - theta_i)
end
end
theta_i = theta_i + dt * (omega + coupling_value * rec_n * sum) {id = out1, dep=coupling4}
theta_i = theta_i + (sig * rand) {id = out2, dep=out1}
theta_i = wrap_2_pi(theta_i) {id = out3, dep=out2}
tavg[i_node] = tavg[i_node] + sin(theta_i) {id = out4, dep=out3}
state[i_node] = theta_i {dep=*coupling1}
end
""", assumptions="n_node>=0")
kernel = lp.add_dtypes(kernel, dict(tavg=np.float32, state=np.float32, weights=np.float32, lengths=np.float32))
kernel = kernel.copy(target=lp.CudaTarget())
code = lp.generate_code_v2(kernel)
print (kernel)
print (code.host_code())
print (code.device_code())
|
<commit_before><commit_msg>Test example to recreate kernel from hackathon and explore loopy tutorial<commit_after>import numpy as np
import loopy as lp
target = lp.CudaTarget()
kernel = lp.make_kernel(
"{ [i_node,j_node]: 0<=i_node,j_node<n_node}",
"""
<float32> coupling_value = params(1)
<float32> speed_value = params(0)
<float32> dt=0.1
<float32> M_PI_F = 2.0
<float32> rec_n = 1.0f / n_node
<float32> rec_speed_dt = 1.0f / speed_value / dt
<float32> omega = 10.0 * 2.0 * M_PI_F / 1e3
<float32> sig = sqrt(dt) * sqrt(2.0 * 1e-5)
<float32> rand = 1.0
for i_node
tavg[i_node]=0.0f {id = clear}
end
for i_node
<float32> theta_i = state[i_node] {id = coupling1, dep=*}
<float32> sum = 0.0 {id = coupling2}
for j_node
<float32> wij = weights[j_node] {id = coupling3, dep=coupling1:coupling2}
if wij != 0.0
<int> dij = lengths[j_node] * rec_speed_dt {id = coupling4, dep=coupling3}
<float32> theta_j = state[j_node]
sum = sum + wij * sin(theta_j - theta_i)
end
end
theta_i = theta_i + dt * (omega + coupling_value * rec_n * sum) {id = out1, dep=coupling4}
theta_i = theta_i + (sig * rand) {id = out2, dep=out1}
theta_i = wrap_2_pi(theta_i) {id = out3, dep=out2}
tavg[i_node] = tavg[i_node] + sin(theta_i) {id = out4, dep=out3}
state[i_node] = theta_i {dep=*coupling1}
end
""", assumptions="n_node>=0")
kernel = lp.add_dtypes(kernel, dict(tavg=np.float32, state=np.float32, weights=np.float32, lengths=np.float32))
kernel = kernel.copy(target=lp.CudaTarget())
code = lp.generate_code_v2(kernel)
print (kernel)
print (code.host_code())
print (code.device_code())
|
|
db92e3818e9dac883288bf24e04d64a5d94054ac
|
xos/helloworld/models.py
|
xos/helloworld/models.py
|
from django.db import models
from core.models import User, Service, SingletonModel, PlCoreBase, Instance
from core.models.plcorebase import StrippedCharField
import os
from django.db import models
from django.forms.models import model_to_dict
from django.db.models import Q
# Create your models here.
class Hello(PlCoreBase):
name = models.CharField(max_length=254,help_text="Salutation e.g. Hello or Bonjour")
sliver_backref = models.ForeignKey(Instance)
class World(PlCoreBase):
name = models.CharField(max_length=254,help_text="Name of planet")
hello = models.ForeignKey(Hello)
|
from django.db import models
from core.models import User, Service, SingletonModel, PlCoreBase, Instance
from core.models.plcorebase import StrippedCharField
import os
from django.db import models
from django.forms.models import model_to_dict
from django.db.models import Q
# Create your models here.
class Hello(PlCoreBase):
name = models.CharField(max_length=254,help_text="Salutation e.g. Hello or Bonjour")
instance_backref = models.ForeignKey(Instance)
class World(PlCoreBase):
name = models.CharField(max_length=254,help_text="Name of planet")
hello = models.ForeignKey(Hello)
|
Change old 'sliver' ref to instance
|
Change old 'sliver' ref to instance
|
Python
|
apache-2.0
|
xmaruto/mcord,cboling/xos,xmaruto/mcord,xmaruto/mcord,jermowery/xos,cboling/xos,cboling/xos,cboling/xos,jermowery/xos,jermowery/xos,xmaruto/mcord,jermowery/xos,cboling/xos
|
from django.db import models
from core.models import User, Service, SingletonModel, PlCoreBase, Instance
from core.models.plcorebase import StrippedCharField
import os
from django.db import models
from django.forms.models import model_to_dict
from django.db.models import Q
# Create your models here.
class Hello(PlCoreBase):
name = models.CharField(max_length=254,help_text="Salutation e.g. Hello or Bonjour")
sliver_backref = models.ForeignKey(Instance)
class World(PlCoreBase):
name = models.CharField(max_length=254,help_text="Name of planet")
hello = models.ForeignKey(Hello)
Change old 'sliver' ref to instance
|
from django.db import models
from core.models import User, Service, SingletonModel, PlCoreBase, Instance
from core.models.plcorebase import StrippedCharField
import os
from django.db import models
from django.forms.models import model_to_dict
from django.db.models import Q
# Create your models here.
class Hello(PlCoreBase):
name = models.CharField(max_length=254,help_text="Salutation e.g. Hello or Bonjour")
instance_backref = models.ForeignKey(Instance)
class World(PlCoreBase):
name = models.CharField(max_length=254,help_text="Name of planet")
hello = models.ForeignKey(Hello)
|
<commit_before>from django.db import models
from core.models import User, Service, SingletonModel, PlCoreBase, Instance
from core.models.plcorebase import StrippedCharField
import os
from django.db import models
from django.forms.models import model_to_dict
from django.db.models import Q
# Create your models here.
class Hello(PlCoreBase):
name = models.CharField(max_length=254,help_text="Salutation e.g. Hello or Bonjour")
sliver_backref = models.ForeignKey(Instance)
class World(PlCoreBase):
name = models.CharField(max_length=254,help_text="Name of planet")
hello = models.ForeignKey(Hello)
<commit_msg>Change old 'sliver' ref to instance<commit_after>
|
from django.db import models
from core.models import User, Service, SingletonModel, PlCoreBase, Instance
from core.models.plcorebase import StrippedCharField
import os
from django.db import models
from django.forms.models import model_to_dict
from django.db.models import Q
# Create your models here.
class Hello(PlCoreBase):
name = models.CharField(max_length=254,help_text="Salutation e.g. Hello or Bonjour")
instance_backref = models.ForeignKey(Instance)
class World(PlCoreBase):
name = models.CharField(max_length=254,help_text="Name of planet")
hello = models.ForeignKey(Hello)
|
from django.db import models
from core.models import User, Service, SingletonModel, PlCoreBase, Instance
from core.models.plcorebase import StrippedCharField
import os
from django.db import models
from django.forms.models import model_to_dict
from django.db.models import Q
# Create your models here.
class Hello(PlCoreBase):
name = models.CharField(max_length=254,help_text="Salutation e.g. Hello or Bonjour")
sliver_backref = models.ForeignKey(Instance)
class World(PlCoreBase):
name = models.CharField(max_length=254,help_text="Name of planet")
hello = models.ForeignKey(Hello)
Change old 'sliver' ref to instancefrom django.db import models
from core.models import User, Service, SingletonModel, PlCoreBase, Instance
from core.models.plcorebase import StrippedCharField
import os
from django.db import models
from django.forms.models import model_to_dict
from django.db.models import Q
# Create your models here.
class Hello(PlCoreBase):
name = models.CharField(max_length=254,help_text="Salutation e.g. Hello or Bonjour")
instance_backref = models.ForeignKey(Instance)
class World(PlCoreBase):
name = models.CharField(max_length=254,help_text="Name of planet")
hello = models.ForeignKey(Hello)
|
<commit_before>from django.db import models
from core.models import User, Service, SingletonModel, PlCoreBase, Instance
from core.models.plcorebase import StrippedCharField
import os
from django.db import models
from django.forms.models import model_to_dict
from django.db.models import Q
# Create your models here.
class Hello(PlCoreBase):
name = models.CharField(max_length=254,help_text="Salutation e.g. Hello or Bonjour")
sliver_backref = models.ForeignKey(Instance)
class World(PlCoreBase):
name = models.CharField(max_length=254,help_text="Name of planet")
hello = models.ForeignKey(Hello)
<commit_msg>Change old 'sliver' ref to instance<commit_after>from django.db import models
from core.models import User, Service, SingletonModel, PlCoreBase, Instance
from core.models.plcorebase import StrippedCharField
import os
from django.db import models
from django.forms.models import model_to_dict
from django.db.models import Q
# Create your models here.
class Hello(PlCoreBase):
name = models.CharField(max_length=254,help_text="Salutation e.g. Hello or Bonjour")
instance_backref = models.ForeignKey(Instance)
class World(PlCoreBase):
name = models.CharField(max_length=254,help_text="Name of planet")
hello = models.ForeignKey(Hello)
|
eb984f5b3267f090cbfe0c0b5974126ebef7190c
|
tests/unit/test__compat.py
|
tests/unit/test__compat.py
|
# -*- coding: utf-8 -*-
'''
Unit tests for salt.config
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import sys
# Import Salt Testing libs
from tests.support.helpers import with_tempdir, with_tempfile, destructiveTest
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
from tests.support.paths import TMP
from tests.support.unit import skipIf, TestCase
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, Mock, MagicMock, patch
# Import Salt libs
import salt._compat as compat
# Import 3rd Party libs
from salt.ext.six import binary_type, string_types, text_type, integer_types
from salt.ext.six.moves import cStringIO, StringIO
log = logging.getLogger(__name__)
PY3 = sys.version_info.major == 3
class CompatTestCase(TestCase):
def test_text(self):
ret = compat.text_('test string')
self.assertTrue(isinstance(ret, text_type))
def test_text_binary(self):
ret = compat.text_(b'test string')
self.assertTrue(isinstance(ret, text_type))
def test_bytes(self):
ret = compat.bytes_('test string')
self.assertTrue(isinstance(ret, binary_type))
def test_bytes_binary(self):
ret = compat.bytes_(b'test string')
self.assertTrue(isinstance(ret, binary_type))
def test_ascii_native(self):
ret = compat.ascii_native_('test string')
self.assertTrue(isinstance(ret, str))
def test_ascii_native_binary(self):
ret = compat.ascii_native_(b'test string')
self.assertTrue(isinstance(ret, str))
def test_native(self):
ret = compat.native_('test string')
self.assertTrue(isinstance(ret, str))
def test_native_binary(self):
ret = compat.native_(b'test string')
self.assertTrue(isinstance(ret, str))
def test_string_io(self):
ret = compat.string_io('test string')
if PY3:
expected = 'io.StringIO object'
else:
expected = 'cStringIO.StringI object'
self.assertTrue(expected in repr(ret))
def test_string_io_unicode(self):
ret = compat.string_io(u'test string \xf8')
if PY3:
expected = 'io.StringIO object'
else:
expected = 'StringIO.StringIO instance'
self.assertTrue(expected in repr(ret))
|
Add unit tests for _compat.py
|
Add unit tests for _compat.py
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add unit tests for _compat.py
|
# -*- coding: utf-8 -*-
'''
Unit tests for salt.config
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import sys
# Import Salt Testing libs
from tests.support.helpers import with_tempdir, with_tempfile, destructiveTest
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
from tests.support.paths import TMP
from tests.support.unit import skipIf, TestCase
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, Mock, MagicMock, patch
# Import Salt libs
import salt._compat as compat
# Import 3rd Party libs
from salt.ext.six import binary_type, string_types, text_type, integer_types
from salt.ext.six.moves import cStringIO, StringIO
log = logging.getLogger(__name__)
PY3 = sys.version_info.major == 3
class CompatTestCase(TestCase):
def test_text(self):
ret = compat.text_('test string')
self.assertTrue(isinstance(ret, text_type))
def test_text_binary(self):
ret = compat.text_(b'test string')
self.assertTrue(isinstance(ret, text_type))
def test_bytes(self):
ret = compat.bytes_('test string')
self.assertTrue(isinstance(ret, binary_type))
def test_bytes_binary(self):
ret = compat.bytes_(b'test string')
self.assertTrue(isinstance(ret, binary_type))
def test_ascii_native(self):
ret = compat.ascii_native_('test string')
self.assertTrue(isinstance(ret, str))
def test_ascii_native_binary(self):
ret = compat.ascii_native_(b'test string')
self.assertTrue(isinstance(ret, str))
def test_native(self):
ret = compat.native_('test string')
self.assertTrue(isinstance(ret, str))
def test_native_binary(self):
ret = compat.native_(b'test string')
self.assertTrue(isinstance(ret, str))
def test_string_io(self):
ret = compat.string_io('test string')
if PY3:
expected = 'io.StringIO object'
else:
expected = 'cStringIO.StringI object'
self.assertTrue(expected in repr(ret))
def test_string_io_unicode(self):
ret = compat.string_io(u'test string \xf8')
if PY3:
expected = 'io.StringIO object'
else:
expected = 'StringIO.StringIO instance'
self.assertTrue(expected in repr(ret))
|
<commit_before><commit_msg>Add unit tests for _compat.py<commit_after>
|
# -*- coding: utf-8 -*-
'''
Unit tests for salt.config
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import sys
# Import Salt Testing libs
from tests.support.helpers import with_tempdir, with_tempfile, destructiveTest
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
from tests.support.paths import TMP
from tests.support.unit import skipIf, TestCase
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, Mock, MagicMock, patch
# Import Salt libs
import salt._compat as compat
# Import 3rd Party libs
from salt.ext.six import binary_type, string_types, text_type, integer_types
from salt.ext.six.moves import cStringIO, StringIO
log = logging.getLogger(__name__)
PY3 = sys.version_info.major == 3
class CompatTestCase(TestCase):
def test_text(self):
ret = compat.text_('test string')
self.assertTrue(isinstance(ret, text_type))
def test_text_binary(self):
ret = compat.text_(b'test string')
self.assertTrue(isinstance(ret, text_type))
def test_bytes(self):
ret = compat.bytes_('test string')
self.assertTrue(isinstance(ret, binary_type))
def test_bytes_binary(self):
ret = compat.bytes_(b'test string')
self.assertTrue(isinstance(ret, binary_type))
def test_ascii_native(self):
ret = compat.ascii_native_('test string')
self.assertTrue(isinstance(ret, str))
def test_ascii_native_binary(self):
ret = compat.ascii_native_(b'test string')
self.assertTrue(isinstance(ret, str))
def test_native(self):
ret = compat.native_('test string')
self.assertTrue(isinstance(ret, str))
def test_native_binary(self):
ret = compat.native_(b'test string')
self.assertTrue(isinstance(ret, str))
def test_string_io(self):
ret = compat.string_io('test string')
if PY3:
expected = 'io.StringIO object'
else:
expected = 'cStringIO.StringI object'
self.assertTrue(expected in repr(ret))
def test_string_io_unicode(self):
ret = compat.string_io(u'test string \xf8')
if PY3:
expected = 'io.StringIO object'
else:
expected = 'StringIO.StringIO instance'
self.assertTrue(expected in repr(ret))
|
Add unit tests for _compat.py# -*- coding: utf-8 -*-
'''
Unit tests for salt.config
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import sys
# Import Salt Testing libs
from tests.support.helpers import with_tempdir, with_tempfile, destructiveTest
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
from tests.support.paths import TMP
from tests.support.unit import skipIf, TestCase
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, Mock, MagicMock, patch
# Import Salt libs
import salt._compat as compat
# Import 3rd Party libs
from salt.ext.six import binary_type, string_types, text_type, integer_types
from salt.ext.six.moves import cStringIO, StringIO
log = logging.getLogger(__name__)
PY3 = sys.version_info.major == 3
class CompatTestCase(TestCase):
def test_text(self):
ret = compat.text_('test string')
self.assertTrue(isinstance(ret, text_type))
def test_text_binary(self):
ret = compat.text_(b'test string')
self.assertTrue(isinstance(ret, text_type))
def test_bytes(self):
ret = compat.bytes_('test string')
self.assertTrue(isinstance(ret, binary_type))
def test_bytes_binary(self):
ret = compat.bytes_(b'test string')
self.assertTrue(isinstance(ret, binary_type))
def test_ascii_native(self):
ret = compat.ascii_native_('test string')
self.assertTrue(isinstance(ret, str))
def test_ascii_native_binary(self):
ret = compat.ascii_native_(b'test string')
self.assertTrue(isinstance(ret, str))
def test_native(self):
ret = compat.native_('test string')
self.assertTrue(isinstance(ret, str))
def test_native_binary(self):
ret = compat.native_(b'test string')
self.assertTrue(isinstance(ret, str))
def test_string_io(self):
ret = compat.string_io('test string')
if PY3:
expected = 'io.StringIO object'
else:
expected = 'cStringIO.StringI object'
self.assertTrue(expected in repr(ret))
def test_string_io_unicode(self):
ret = compat.string_io(u'test string \xf8')
if PY3:
expected = 'io.StringIO object'
else:
expected = 'StringIO.StringIO instance'
self.assertTrue(expected in repr(ret))
|
<commit_before><commit_msg>Add unit tests for _compat.py<commit_after># -*- coding: utf-8 -*-
'''
Unit tests for salt.config
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import sys
# Import Salt Testing libs
from tests.support.helpers import with_tempdir, with_tempfile, destructiveTest
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
from tests.support.paths import TMP
from tests.support.unit import skipIf, TestCase
from tests.support.mock import NO_MOCK, NO_MOCK_REASON, Mock, MagicMock, patch
# Import Salt libs
import salt._compat as compat
# Import 3rd Party libs
from salt.ext.six import binary_type, string_types, text_type, integer_types
from salt.ext.six.moves import cStringIO, StringIO
log = logging.getLogger(__name__)
PY3 = sys.version_info.major == 3
class CompatTestCase(TestCase):
def test_text(self):
ret = compat.text_('test string')
self.assertTrue(isinstance(ret, text_type))
def test_text_binary(self):
ret = compat.text_(b'test string')
self.assertTrue(isinstance(ret, text_type))
def test_bytes(self):
ret = compat.bytes_('test string')
self.assertTrue(isinstance(ret, binary_type))
def test_bytes_binary(self):
ret = compat.bytes_(b'test string')
self.assertTrue(isinstance(ret, binary_type))
def test_ascii_native(self):
ret = compat.ascii_native_('test string')
self.assertTrue(isinstance(ret, str))
def test_ascii_native_binary(self):
ret = compat.ascii_native_(b'test string')
self.assertTrue(isinstance(ret, str))
def test_native(self):
ret = compat.native_('test string')
self.assertTrue(isinstance(ret, str))
def test_native_binary(self):
ret = compat.native_(b'test string')
self.assertTrue(isinstance(ret, str))
def test_string_io(self):
ret = compat.string_io('test string')
if PY3:
expected = 'io.StringIO object'
else:
expected = 'cStringIO.StringI object'
self.assertTrue(expected in repr(ret))
def test_string_io_unicode(self):
ret = compat.string_io(u'test string \xf8')
if PY3:
expected = 'io.StringIO object'
else:
expected = 'StringIO.StringIO instance'
self.assertTrue(expected in repr(ret))
|
|
fcc2013d1d3a21a1c23cafbaf24cb49a74d054ea
|
tools/committee_counter.py
|
tools/committee_counter.py
|
#!/usr/bin/env
from pupa.core import db
for orga in db.organizations.find({"classification": "committee"}):
memberships = db.memberships.find({
"organization_id": orga['_id'],
"end_date": None
}).count()
print memberships, orga['jurisdiction_id'], orga['name']
|
Add a counter to help with debugging.
|
Add a counter to help with debugging.
|
Python
|
bsd-3-clause
|
rshorey/pupa,influence-usa/pupa,opencivicdata/pupa,datamade/pupa,mileswwatkins/pupa,mileswwatkins/pupa,opencivicdata/pupa,datamade/pupa,rshorey/pupa,influence-usa/pupa
|
Add a counter to help with debugging.
|
#!/usr/bin/env
from pupa.core import db
for orga in db.organizations.find({"classification": "committee"}):
memberships = db.memberships.find({
"organization_id": orga['_id'],
"end_date": None
}).count()
print memberships, orga['jurisdiction_id'], orga['name']
|
<commit_before><commit_msg>Add a counter to help with debugging.<commit_after>
|
#!/usr/bin/env
from pupa.core import db
for orga in db.organizations.find({"classification": "committee"}):
memberships = db.memberships.find({
"organization_id": orga['_id'],
"end_date": None
}).count()
print memberships, orga['jurisdiction_id'], orga['name']
|
Add a counter to help with debugging.#!/usr/bin/env
from pupa.core import db
for orga in db.organizations.find({"classification": "committee"}):
memberships = db.memberships.find({
"organization_id": orga['_id'],
"end_date": None
}).count()
print memberships, orga['jurisdiction_id'], orga['name']
|
<commit_before><commit_msg>Add a counter to help with debugging.<commit_after>#!/usr/bin/env
from pupa.core import db
for orga in db.organizations.find({"classification": "committee"}):
memberships = db.memberships.find({
"organization_id": orga['_id'],
"end_date": None
}).count()
print memberships, orga['jurisdiction_id'], orga['name']
|
|
15012a11fc0e2f28982ea7956f822dff3af800d7
|
application/list_locations.py
|
application/list_locations.py
|
#!/usr/bin/python3
'''
Created on Dec 15, 2014
@author: lwoydziak
'''
from jsonconfigfile import Env
from providers import digitalOceanHosting
'''usage:
list_locations.py # clean up
'''
def ListLocations():
initialJson = '{ \
"DigitalOcean" : { \
"Client ID" : "None", \
"API Key" : "None", \
"location" : "None", \
"image" : "None", \
"size" : "None" \
},\
"BaseHostName": "None"\
}'
Env(initialJson, ".dynamicMachine", "DYNAMIC_MACHINE_CONFIG")
onDigitalOcean = digitalOceanHosting()
print("Available Targets:")
for target in onDigitalOcean.list_locations():
print (str(target))
if __name__ == '__main__':
try:
ListLocations()
exit(0)
except Exception as e:
print (str(e))
exit(1)
|
Add a factility to list regions/locations.
|
Add a factility to list regions/locations.
|
Python
|
mit
|
Pipe-s/dynamic_machine,Pipe-s/dynamic_machine
|
Add a factility to list regions/locations.
|
#!/usr/bin/python3
'''
Created on Dec 15, 2014
@author: lwoydziak
'''
from jsonconfigfile import Env
from providers import digitalOceanHosting
'''usage:
list_locations.py # clean up
'''
def ListLocations():
initialJson = '{ \
"DigitalOcean" : { \
"Client ID" : "None", \
"API Key" : "None", \
"location" : "None", \
"image" : "None", \
"size" : "None" \
},\
"BaseHostName": "None"\
}'
Env(initialJson, ".dynamicMachine", "DYNAMIC_MACHINE_CONFIG")
onDigitalOcean = digitalOceanHosting()
print("Available Targets:")
for target in onDigitalOcean.list_locations():
print (str(target))
if __name__ == '__main__':
try:
ListLocations()
exit(0)
except Exception as e:
print (str(e))
exit(1)
|
<commit_before><commit_msg>Add a factility to list regions/locations.<commit_after>
|
#!/usr/bin/python3
'''
Created on Dec 15, 2014
@author: lwoydziak
'''
from jsonconfigfile import Env
from providers import digitalOceanHosting
'''usage:
list_locations.py # clean up
'''
def ListLocations():
initialJson = '{ \
"DigitalOcean" : { \
"Client ID" : "None", \
"API Key" : "None", \
"location" : "None", \
"image" : "None", \
"size" : "None" \
},\
"BaseHostName": "None"\
}'
Env(initialJson, ".dynamicMachine", "DYNAMIC_MACHINE_CONFIG")
onDigitalOcean = digitalOceanHosting()
print("Available Targets:")
for target in onDigitalOcean.list_locations():
print (str(target))
if __name__ == '__main__':
try:
ListLocations()
exit(0)
except Exception as e:
print (str(e))
exit(1)
|
Add a factility to list regions/locations.#!/usr/bin/python3
'''
Created on Dec 15, 2014
@author: lwoydziak
'''
from jsonconfigfile import Env
from providers import digitalOceanHosting
'''usage:
list_locations.py # clean up
'''
def ListLocations():
initialJson = '{ \
"DigitalOcean" : { \
"Client ID" : "None", \
"API Key" : "None", \
"location" : "None", \
"image" : "None", \
"size" : "None" \
},\
"BaseHostName": "None"\
}'
Env(initialJson, ".dynamicMachine", "DYNAMIC_MACHINE_CONFIG")
onDigitalOcean = digitalOceanHosting()
print("Available Targets:")
for target in onDigitalOcean.list_locations():
print (str(target))
if __name__ == '__main__':
try:
ListLocations()
exit(0)
except Exception as e:
print (str(e))
exit(1)
|
<commit_before><commit_msg>Add a factility to list regions/locations.<commit_after>#!/usr/bin/python3
'''
Created on Dec 15, 2014
@author: lwoydziak
'''
from jsonconfigfile import Env
from providers import digitalOceanHosting
'''usage:
list_locations.py # clean up
'''
def ListLocations():
initialJson = '{ \
"DigitalOcean" : { \
"Client ID" : "None", \
"API Key" : "None", \
"location" : "None", \
"image" : "None", \
"size" : "None" \
},\
"BaseHostName": "None"\
}'
Env(initialJson, ".dynamicMachine", "DYNAMIC_MACHINE_CONFIG")
onDigitalOcean = digitalOceanHosting()
print("Available Targets:")
for target in onDigitalOcean.list_locations():
print (str(target))
if __name__ == '__main__':
try:
ListLocations()
exit(0)
except Exception as e:
print (str(e))
exit(1)
|
|
5410e89233fc4b01e8f74003ecc6dbc126b50728
|
bin/fake_fastq.py
|
bin/fake_fastq.py
|
import os
import sys
import argparse
from rob import read_fasta
__author__ = 'Rob Edwards'
parser = argparse.ArgumentParser(description='Convert a fasta file to fastq, faking the qual scores')
parser.add_argument('-f', help='fasta file', required=True)
parser.add_argument('-q', help='fastq output file', required=True)
parser.add_argument('-s', help='quality score. Default = 40', default=40, type=int)
args = parser.parse_args()
c = chr(args.s)
fa = read_fasta(args.f)
with open(args.q, 'w') as out:
for i in fa:
l = len(fa[i])
out.write("@{}\n{}\n+\n{}\n".format(i, fa[i], l * c))
|
Convert a fasta file to fastq
|
Convert a fasta file to fastq
|
Python
|
mit
|
linsalrob/EdwardsLab,linsalrob/EdwardsLab,linsalrob/EdwardsLab,linsalrob/EdwardsLab,linsalrob/EdwardsLab
|
Convert a fasta file to fastq
|
import os
import sys
import argparse
from rob import read_fasta
__author__ = 'Rob Edwards'
parser = argparse.ArgumentParser(description='Convert a fasta file to fastq, faking the qual scores')
parser.add_argument('-f', help='fasta file', required=True)
parser.add_argument('-q', help='fastq output file', required=True)
parser.add_argument('-s', help='quality score. Default = 40', default=40, type=int)
args = parser.parse_args()
c = chr(args.s)
fa = read_fasta(args.f)
with open(args.q, 'w') as out:
for i in fa:
l = len(fa[i])
out.write("@{}\n{}\n+\n{}\n".format(i, fa[i], l * c))
|
<commit_before><commit_msg>Convert a fasta file to fastq<commit_after>
|
import os
import sys
import argparse
from rob import read_fasta
__author__ = 'Rob Edwards'
parser = argparse.ArgumentParser(description='Convert a fasta file to fastq, faking the qual scores')
parser.add_argument('-f', help='fasta file', required=True)
parser.add_argument('-q', help='fastq output file', required=True)
parser.add_argument('-s', help='quality score. Default = 40', default=40, type=int)
args = parser.parse_args()
c = chr(args.s)
fa = read_fasta(args.f)
with open(args.q, 'w') as out:
for i in fa:
l = len(fa[i])
out.write("@{}\n{}\n+\n{}\n".format(i, fa[i], l * c))
|
Convert a fasta file to fastqimport os
import sys
import argparse
from rob import read_fasta
__author__ = 'Rob Edwards'
parser = argparse.ArgumentParser(description='Convert a fasta file to fastq, faking the qual scores')
parser.add_argument('-f', help='fasta file', required=True)
parser.add_argument('-q', help='fastq output file', required=True)
parser.add_argument('-s', help='quality score. Default = 40', default=40, type=int)
args = parser.parse_args()
c = chr(args.s)
fa = read_fasta(args.f)
with open(args.q, 'w') as out:
for i in fa:
l = len(fa[i])
out.write("@{}\n{}\n+\n{}\n".format(i, fa[i], l * c))
|
<commit_before><commit_msg>Convert a fasta file to fastq<commit_after>import os
import sys
import argparse
from rob import read_fasta
__author__ = 'Rob Edwards'
parser = argparse.ArgumentParser(description='Convert a fasta file to fastq, faking the qual scores')
parser.add_argument('-f', help='fasta file', required=True)
parser.add_argument('-q', help='fastq output file', required=True)
parser.add_argument('-s', help='quality score. Default = 40', default=40, type=int)
args = parser.parse_args()
c = chr(args.s)
fa = read_fasta(args.f)
with open(args.q, 'w') as out:
for i in fa:
l = len(fa[i])
out.write("@{}\n{}\n+\n{}\n".format(i, fa[i], l * c))
|
|
8df96bee4fd11d552ffeb01ec8b7025c7cacd8ca
|
convertPositions.py
|
convertPositions.py
|
"""
Will read a gzipped file containing build conversion info for snps (based on
snpdb), and save these extra values to the fasttrack database.
Recent version is 38. We use this because HUNT is now on 37 (19).
First parameter is build number (now we use 37). Second is gzip filename (now
we use b147_SNPChrPosOnRef_105.bcp.gz).
"""
import gzip
from argparse import ArgumentParser
from pymongo import MongoClient
mongo_client = MongoClient()
db = mongo_client.fasttrack
parser = ArgumentParser()
parser.add_argument('build_number', help='build_number to add columns for')
parser.add_argument('file',
help='filename for gzipped file containing position info for this build')
args = parser.parse_args()
no_position = 0
db.gwas.ensure_index('snp_id_current')
# Create a set containing all gwas data we have a known snp_id_current for.
known = set()
for gwas in db.gwas.find().sort('snp_id_current'):
try:
rsid = gwas['snp_id_current']
except KeyError:
print("fail")
continue
else:
if not rsid:
continue
known.add(rsid)
# Loop through gzip file and set new positions where we have a known
# snp_id_current.
updated = 0
_known = 0
_unknown = 0
with gzip.open(args.file, 'rt', encoding='utf-8') as zipfile:
for i, line in enumerate(zipfile):
if i % 100000 == 0:
print(i/1000000)
splitted = line.split('\t')
try:
snp, chr, pos, *extra = splitted
except:
continue
if snp not in known:
_unknown += 1
continue
_known += 1
if chr == "X":
num_chr = 100
elif chr == "Y":
num_chr = 101
elif chr == "MT":
num_chr = 200
elif chr == "PAR":
pass
else:
num_chr = int(chr)
result = db.gwas.update_many({'snp_id_current': snp}, {'$set': {
'build' + args.build_number + '_chr_id': chr,
'build' + args.build_number + '_chr_num': num_chr,
'build' + args.build_number + '_pos': pos,
}})
updated += result.modified_count
print("Updated {}, known {}, unknown (only in file) {}".format(updated, _known, _unknown))
|
Add script to convert positions by adding extra columns for build
|
Add script to convert positions by adding extra columns for build
|
Python
|
agpl-3.0
|
hunt-genes/gwasc,hunt-genes/fasttrack,hunt-genes/gwasc,hunt-genes/fasttrack
|
Add script to convert positions by adding extra columns for build
|
"""
Will read a gzipped file containing build conversion info for snps (based on
snpdb), and save these extra values to the fasttrack database.
Recent version is 38. We use this because HUNT is now on 37 (19).
First parameter is build number (now we use 37). Second is gzip filename (now
we use b147_SNPChrPosOnRef_105.bcp.gz).
"""
import gzip
from argparse import ArgumentParser
from pymongo import MongoClient
mongo_client = MongoClient()
db = mongo_client.fasttrack
parser = ArgumentParser()
parser.add_argument('build_number', help='build_number to add columns for')
parser.add_argument('file',
help='filename for gzipped file containing position info for this build')
args = parser.parse_args()
no_position = 0
db.gwas.ensure_index('snp_id_current')
# Create a set containing all gwas data we have a known snp_id_current for.
known = set()
for gwas in db.gwas.find().sort('snp_id_current'):
try:
rsid = gwas['snp_id_current']
except KeyError:
print("fail")
continue
else:
if not rsid:
continue
known.add(rsid)
# Loop through gzip file and set new positions where we have a known
# snp_id_current.
updated = 0
_known = 0
_unknown = 0
with gzip.open(args.file, 'rt', encoding='utf-8') as zipfile:
for i, line in enumerate(zipfile):
if i % 100000 == 0:
print(i/1000000)
splitted = line.split('\t')
try:
snp, chr, pos, *extra = splitted
except:
continue
if snp not in known:
_unknown += 1
continue
_known += 1
if chr == "X":
num_chr = 100
elif chr == "Y":
num_chr = 101
elif chr == "MT":
num_chr = 200
elif chr == "PAR":
pass
else:
num_chr = int(chr)
result = db.gwas.update_many({'snp_id_current': snp}, {'$set': {
'build' + args.build_number + '_chr_id': chr,
'build' + args.build_number + '_chr_num': num_chr,
'build' + args.build_number + '_pos': pos,
}})
updated += result.modified_count
print("Updated {}, known {}, unknown (only in file) {}".format(updated, _known, _unknown))
|
<commit_before><commit_msg>Add script to convert positions by adding extra columns for build<commit_after>
|
"""
Will read a gzipped file containing build conversion info for snps (based on
snpdb), and save these extra values to the fasttrack database.
Recent version is 38. We use this because HUNT is now on 37 (19).
First parameter is build number (now we use 37). Second is gzip filename (now
we use b147_SNPChrPosOnRef_105.bcp.gz).
"""
import gzip
from argparse import ArgumentParser
from pymongo import MongoClient
mongo_client = MongoClient()
db = mongo_client.fasttrack
parser = ArgumentParser()
parser.add_argument('build_number', help='build_number to add columns for')
parser.add_argument('file',
help='filename for gzipped file containing position info for this build')
args = parser.parse_args()
no_position = 0
db.gwas.ensure_index('snp_id_current')
# Create a set containing all gwas data we have a known snp_id_current for.
known = set()
for gwas in db.gwas.find().sort('snp_id_current'):
try:
rsid = gwas['snp_id_current']
except KeyError:
print("fail")
continue
else:
if not rsid:
continue
known.add(rsid)
# Loop through gzip file and set new positions where we have a known
# snp_id_current.
updated = 0
_known = 0
_unknown = 0
with gzip.open(args.file, 'rt', encoding='utf-8') as zipfile:
for i, line in enumerate(zipfile):
if i % 100000 == 0:
print(i/1000000)
splitted = line.split('\t')
try:
snp, chr, pos, *extra = splitted
except:
continue
if snp not in known:
_unknown += 1
continue
_known += 1
if chr == "X":
num_chr = 100
elif chr == "Y":
num_chr = 101
elif chr == "MT":
num_chr = 200
elif chr == "PAR":
pass
else:
num_chr = int(chr)
result = db.gwas.update_many({'snp_id_current': snp}, {'$set': {
'build' + args.build_number + '_chr_id': chr,
'build' + args.build_number + '_chr_num': num_chr,
'build' + args.build_number + '_pos': pos,
}})
updated += result.modified_count
print("Updated {}, known {}, unknown (only in file) {}".format(updated, _known, _unknown))
|
Add script to convert positions by adding extra columns for build"""
Will read a gzipped file containing build conversion info for snps (based on
snpdb), and save these extra values to the fasttrack database.
Recent version is 38. We use this because HUNT is now on 37 (19).
First parameter is build number (now we use 37). Second is gzip filename (now
we use b147_SNPChrPosOnRef_105.bcp.gz).
"""
import gzip
from argparse import ArgumentParser
from pymongo import MongoClient
mongo_client = MongoClient()
db = mongo_client.fasttrack
parser = ArgumentParser()
parser.add_argument('build_number', help='build_number to add columns for')
parser.add_argument('file',
help='filename for gzipped file containing position info for this build')
args = parser.parse_args()
no_position = 0
db.gwas.ensure_index('snp_id_current')
# Create a set containing all gwas data we have a known snp_id_current for.
known = set()
for gwas in db.gwas.find().sort('snp_id_current'):
try:
rsid = gwas['snp_id_current']
except KeyError:
print("fail")
continue
else:
if not rsid:
continue
known.add(rsid)
# Loop through gzip file and set new positions where we have a known
# snp_id_current.
updated = 0
_known = 0
_unknown = 0
with gzip.open(args.file, 'rt', encoding='utf-8') as zipfile:
for i, line in enumerate(zipfile):
if i % 100000 == 0:
print(i/1000000)
splitted = line.split('\t')
try:
snp, chr, pos, *extra = splitted
except:
continue
if snp not in known:
_unknown += 1
continue
_known += 1
if chr == "X":
num_chr = 100
elif chr == "Y":
num_chr = 101
elif chr == "MT":
num_chr = 200
elif chr == "PAR":
pass
else:
num_chr = int(chr)
result = db.gwas.update_many({'snp_id_current': snp}, {'$set': {
'build' + args.build_number + '_chr_id': chr,
'build' + args.build_number + '_chr_num': num_chr,
'build' + args.build_number + '_pos': pos,
}})
updated += result.modified_count
print("Updated {}, known {}, unknown (only in file) {}".format(updated, _known, _unknown))
|
<commit_before><commit_msg>Add script to convert positions by adding extra columns for build<commit_after>"""
Will read a gzipped file containing build conversion info for snps (based on
snpdb), and save these extra values to the fasttrack database.
Recent version is 38. We use this because HUNT is now on 37 (19).
First parameter is build number (now we use 37). Second is gzip filename (now
we use b147_SNPChrPosOnRef_105.bcp.gz).
"""
import gzip
from argparse import ArgumentParser
from pymongo import MongoClient
mongo_client = MongoClient()
db = mongo_client.fasttrack
parser = ArgumentParser()
parser.add_argument('build_number', help='build_number to add columns for')
parser.add_argument('file',
help='filename for gzipped file containing position info for this build')
args = parser.parse_args()
no_position = 0
db.gwas.ensure_index('snp_id_current')
# Create a set containing all gwas data we have a known snp_id_current for.
known = set()
for gwas in db.gwas.find().sort('snp_id_current'):
try:
rsid = gwas['snp_id_current']
except KeyError:
print("fail")
continue
else:
if not rsid:
continue
known.add(rsid)
# Loop through gzip file and set new positions where we have a known
# snp_id_current.
updated = 0
_known = 0
_unknown = 0
with gzip.open(args.file, 'rt', encoding='utf-8') as zipfile:
for i, line in enumerate(zipfile):
if i % 100000 == 0:
print(i/1000000)
splitted = line.split('\t')
try:
snp, chr, pos, *extra = splitted
except:
continue
if snp not in known:
_unknown += 1
continue
_known += 1
if chr == "X":
num_chr = 100
elif chr == "Y":
num_chr = 101
elif chr == "MT":
num_chr = 200
elif chr == "PAR":
pass
else:
num_chr = int(chr)
result = db.gwas.update_many({'snp_id_current': snp}, {'$set': {
'build' + args.build_number + '_chr_id': chr,
'build' + args.build_number + '_chr_num': num_chr,
'build' + args.build_number + '_pos': pos,
}})
updated += result.modified_count
print("Updated {}, known {}, unknown (only in file) {}".format(updated, _known, _unknown))
|
|
7cd74b37f0f2be43a6b56f0898f68ef166de3f87
|
server/src/weblab/db/upgrade/scheduling/versions/2ecc7c4ec0c5_add_experiment_infor.py
|
server/src/weblab/db/upgrade/scheduling/versions/2ecc7c4ec0c5_add_experiment_infor.py
|
"""Add experiment information
Revision ID: 2ecc7c4ec0c5
Revises: None
Create Date: 2013-04-21 19:16:09.441855
"""
# revision identifiers, used by Alembic.
revision = '2ecc7c4ec0c5'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
pass
def downgrade():
pass
|
Add initial upgrade for supporting exp_info
|
Add initial upgrade for supporting exp_info
|
Python
|
bsd-2-clause
|
weblabdeusto/weblabdeusto,porduna/weblabdeusto,porduna/weblabdeusto,zstars/weblabdeusto,weblabdeusto/weblabdeusto,morelab/weblabdeusto,weblabdeusto/weblabdeusto,morelab/weblabdeusto,zstars/weblabdeusto,porduna/weblabdeusto,zstars/weblabdeusto,weblabdeusto/weblabdeusto,porduna/weblabdeusto,zstars/weblabdeusto,weblabdeusto/weblabdeusto,morelab/weblabdeusto,morelab/weblabdeusto,morelab/weblabdeusto,weblabdeusto/weblabdeusto,morelab/weblabdeusto,porduna/weblabdeusto,porduna/weblabdeusto,zstars/weblabdeusto,porduna/weblabdeusto,zstars/weblabdeusto,zstars/weblabdeusto,porduna/weblabdeusto,weblabdeusto/weblabdeusto,morelab/weblabdeusto,weblabdeusto/weblabdeusto,morelab/weblabdeusto,porduna/weblabdeusto,weblabdeusto/weblabdeusto,zstars/weblabdeusto,zstars/weblabdeusto,zstars/weblabdeusto,morelab/weblabdeusto,porduna/weblabdeusto,morelab/weblabdeusto,weblabdeusto/weblabdeusto
|
Add initial upgrade for supporting exp_info
|
"""Add experiment information
Revision ID: 2ecc7c4ec0c5
Revises: None
Create Date: 2013-04-21 19:16:09.441855
"""
# revision identifiers, used by Alembic.
revision = '2ecc7c4ec0c5'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
pass
def downgrade():
pass
|
<commit_before><commit_msg>Add initial upgrade for supporting exp_info<commit_after>
|
"""Add experiment information
Revision ID: 2ecc7c4ec0c5
Revises: None
Create Date: 2013-04-21 19:16:09.441855
"""
# revision identifiers, used by Alembic.
revision = '2ecc7c4ec0c5'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
pass
def downgrade():
pass
|
Add initial upgrade for supporting exp_info"""Add experiment information
Revision ID: 2ecc7c4ec0c5
Revises: None
Create Date: 2013-04-21 19:16:09.441855
"""
# revision identifiers, used by Alembic.
revision = '2ecc7c4ec0c5'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
pass
def downgrade():
pass
|
<commit_before><commit_msg>Add initial upgrade for supporting exp_info<commit_after>"""Add experiment information
Revision ID: 2ecc7c4ec0c5
Revises: None
Create Date: 2013-04-21 19:16:09.441855
"""
# revision identifiers, used by Alembic.
revision = '2ecc7c4ec0c5'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
pass
def downgrade():
pass
|
|
88bfcb37227b1e02abfc44fa931a940cce354e03
|
photutils/utils/_optional_deps.py
|
photutils/utils/_optional_deps.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Checks for optional dependencies using lazy import from
`PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_.
"""
import importlib
# This list is a duplicate of the dependencies in setup.cfg "all".
optional_deps = ['scipy', 'matplotlib', 'scikit-image', 'scikit-learn',
'gwcs']
deps = {key.upper(): key for key in optional_deps}
__all__ = [f'HAS_{pkg}' for pkg in deps]
def __getattr__(name):
if name in __all__:
try:
importlib.import_module(deps[name[4:]])
except (ImportError, ModuleNotFoundError):
return False
return True
raise AttributeError(f'Module {__name__!r} has no attribute {name!r}.')
|
Add module for optional dependency checking
|
Add module for optional dependency checking
|
Python
|
bsd-3-clause
|
larrybradley/photutils,astropy/photutils
|
Add module for optional dependency checking
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Checks for optional dependencies using lazy import from
`PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_.
"""
import importlib
# This list is a duplicate of the dependencies in setup.cfg "all".
optional_deps = ['scipy', 'matplotlib', 'scikit-image', 'scikit-learn',
'gwcs']
deps = {key.upper(): key for key in optional_deps}
__all__ = [f'HAS_{pkg}' for pkg in deps]
def __getattr__(name):
if name in __all__:
try:
importlib.import_module(deps[name[4:]])
except (ImportError, ModuleNotFoundError):
return False
return True
raise AttributeError(f'Module {__name__!r} has no attribute {name!r}.')
|
<commit_before><commit_msg>Add module for optional dependency checking<commit_after>
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Checks for optional dependencies using lazy import from
`PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_.
"""
import importlib
# This list is a duplicate of the dependencies in setup.cfg "all".
optional_deps = ['scipy', 'matplotlib', 'scikit-image', 'scikit-learn',
'gwcs']
deps = {key.upper(): key for key in optional_deps}
__all__ = [f'HAS_{pkg}' for pkg in deps]
def __getattr__(name):
if name in __all__:
try:
importlib.import_module(deps[name[4:]])
except (ImportError, ModuleNotFoundError):
return False
return True
raise AttributeError(f'Module {__name__!r} has no attribute {name!r}.')
|
Add module for optional dependency checking# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Checks for optional dependencies using lazy import from
`PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_.
"""
import importlib
# This list is a duplicate of the dependencies in setup.cfg "all".
optional_deps = ['scipy', 'matplotlib', 'scikit-image', 'scikit-learn',
'gwcs']
deps = {key.upper(): key for key in optional_deps}
__all__ = [f'HAS_{pkg}' for pkg in deps]
def __getattr__(name):
if name in __all__:
try:
importlib.import_module(deps[name[4:]])
except (ImportError, ModuleNotFoundError):
return False
return True
raise AttributeError(f'Module {__name__!r} has no attribute {name!r}.')
|
<commit_before><commit_msg>Add module for optional dependency checking<commit_after># Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Checks for optional dependencies using lazy import from
`PEP 562 <https://www.python.org/dev/peps/pep-0562/>`_.
"""
import importlib
# This list is a duplicate of the dependencies in setup.cfg "all".
optional_deps = ['scipy', 'matplotlib', 'scikit-image', 'scikit-learn',
'gwcs']
deps = {key.upper(): key for key in optional_deps}
__all__ = [f'HAS_{pkg}' for pkg in deps]
def __getattr__(name):
if name in __all__:
try:
importlib.import_module(deps[name[4:]])
except (ImportError, ModuleNotFoundError):
return False
return True
raise AttributeError(f'Module {__name__!r} has no attribute {name!r}.')
|
|
7a971038a1a2c045387523aef858c841ae3d4bdc
|
tests/integration/suite/test_notifier.py
|
tests/integration/suite/test_notifier.py
|
from kubernetes.client import CustomObjectsApi
from .common import random_str
def test_notifier_smtp_password(admin_mc, remove_resource):
client = admin_mc.client
name = random_str()
password = random_str()
notifier = client.create_notifier(clusterId="local",
name=name,
smtpConfig={
"defaultRecipient": "test",
"host": "test",
"port": "587",
"sender": "test",
"tls": "true",
"username": "test",
"password": password
})
remove_resource(notifier)
assert notifier is not None
# Test password not present in api
assert notifier['smtpConfig'].get('password') is None
crd_client = get_crd_client(admin_mc)
ns, name = notifier["id"].split(":")
# Test password is in k8s after creation
verify_smtp_password(crd_client, ns, name, password)
# Test noop, password field should be as it is
notifier = client.update(notifier, smtpConfig=notifier['smtpConfig'])
verify_smtp_password(crd_client, ns, name, password)
# Test updating password
new_password = random_str()
notifier = client.update(notifier, smtpConfig={
"password": new_password})
verify_smtp_password(crd_client, ns, name, new_password)
# Test updating field non-password related
notifier = client.update(notifier, smtpConfig={"username": "test2"})
notifier = client.reload(notifier)
assert notifier["smtpConfig"]["username"] == "test2"
# Test the password in crd remains the same value after updating username
verify_smtp_password(crd_client, ns, name, new_password)
def verify_smtp_password(crd_client, ns, name, password):
crd_dict = {
'group': 'management.cattle.io',
'version': 'v3',
'namespace': 'local',
'plural': 'notifiers',
'name': name,
}
k8s_notifier = crd_client.get_namespaced_custom_object(**crd_dict)
smtp_password = k8s_notifier['spec']['smtpConfig']['password']
assert smtp_password == password
def get_crd_client(admin_mc):
return CustomObjectsApi(admin_mc.k8s_client)
|
Test Email Notifier SMTP Password
|
Test Email Notifier SMTP Password
|
Python
|
apache-2.0
|
rancher/rancher,rancher/rancher,rancherio/rancher,rancher/rancher,cjellick/rancher,cjellick/rancher,rancher/rancher,rancherio/rancher,cjellick/rancher
|
Test Email Notifier SMTP Password
|
from kubernetes.client import CustomObjectsApi
from .common import random_str
def test_notifier_smtp_password(admin_mc, remove_resource):
client = admin_mc.client
name = random_str()
password = random_str()
notifier = client.create_notifier(clusterId="local",
name=name,
smtpConfig={
"defaultRecipient": "test",
"host": "test",
"port": "587",
"sender": "test",
"tls": "true",
"username": "test",
"password": password
})
remove_resource(notifier)
assert notifier is not None
# Test password not present in api
assert notifier['smtpConfig'].get('password') is None
crd_client = get_crd_client(admin_mc)
ns, name = notifier["id"].split(":")
# Test password is in k8s after creation
verify_smtp_password(crd_client, ns, name, password)
# Test noop, password field should be as it is
notifier = client.update(notifier, smtpConfig=notifier['smtpConfig'])
verify_smtp_password(crd_client, ns, name, password)
# Test updating password
new_password = random_str()
notifier = client.update(notifier, smtpConfig={
"password": new_password})
verify_smtp_password(crd_client, ns, name, new_password)
# Test updating field non-password related
notifier = client.update(notifier, smtpConfig={"username": "test2"})
notifier = client.reload(notifier)
assert notifier["smtpConfig"]["username"] == "test2"
# Test the password in crd remains the same value after updating username
verify_smtp_password(crd_client, ns, name, new_password)
def verify_smtp_password(crd_client, ns, name, password):
crd_dict = {
'group': 'management.cattle.io',
'version': 'v3',
'namespace': 'local',
'plural': 'notifiers',
'name': name,
}
k8s_notifier = crd_client.get_namespaced_custom_object(**crd_dict)
smtp_password = k8s_notifier['spec']['smtpConfig']['password']
assert smtp_password == password
def get_crd_client(admin_mc):
return CustomObjectsApi(admin_mc.k8s_client)
|
<commit_before><commit_msg>Test Email Notifier SMTP Password<commit_after>
|
from kubernetes.client import CustomObjectsApi
from .common import random_str
def test_notifier_smtp_password(admin_mc, remove_resource):
client = admin_mc.client
name = random_str()
password = random_str()
notifier = client.create_notifier(clusterId="local",
name=name,
smtpConfig={
"defaultRecipient": "test",
"host": "test",
"port": "587",
"sender": "test",
"tls": "true",
"username": "test",
"password": password
})
remove_resource(notifier)
assert notifier is not None
# Test password not present in api
assert notifier['smtpConfig'].get('password') is None
crd_client = get_crd_client(admin_mc)
ns, name = notifier["id"].split(":")
# Test password is in k8s after creation
verify_smtp_password(crd_client, ns, name, password)
# Test noop, password field should be as it is
notifier = client.update(notifier, smtpConfig=notifier['smtpConfig'])
verify_smtp_password(crd_client, ns, name, password)
# Test updating password
new_password = random_str()
notifier = client.update(notifier, smtpConfig={
"password": new_password})
verify_smtp_password(crd_client, ns, name, new_password)
# Test updating field non-password related
notifier = client.update(notifier, smtpConfig={"username": "test2"})
notifier = client.reload(notifier)
assert notifier["smtpConfig"]["username"] == "test2"
# Test the password in crd remains the same value after updating username
verify_smtp_password(crd_client, ns, name, new_password)
def verify_smtp_password(crd_client, ns, name, password):
crd_dict = {
'group': 'management.cattle.io',
'version': 'v3',
'namespace': 'local',
'plural': 'notifiers',
'name': name,
}
k8s_notifier = crd_client.get_namespaced_custom_object(**crd_dict)
smtp_password = k8s_notifier['spec']['smtpConfig']['password']
assert smtp_password == password
def get_crd_client(admin_mc):
return CustomObjectsApi(admin_mc.k8s_client)
|
Test Email Notifier SMTP Passwordfrom kubernetes.client import CustomObjectsApi
from .common import random_str
def test_notifier_smtp_password(admin_mc, remove_resource):
client = admin_mc.client
name = random_str()
password = random_str()
notifier = client.create_notifier(clusterId="local",
name=name,
smtpConfig={
"defaultRecipient": "test",
"host": "test",
"port": "587",
"sender": "test",
"tls": "true",
"username": "test",
"password": password
})
remove_resource(notifier)
assert notifier is not None
# Test password not present in api
assert notifier['smtpConfig'].get('password') is None
crd_client = get_crd_client(admin_mc)
ns, name = notifier["id"].split(":")
# Test password is in k8s after creation
verify_smtp_password(crd_client, ns, name, password)
# Test noop, password field should be as it is
notifier = client.update(notifier, smtpConfig=notifier['smtpConfig'])
verify_smtp_password(crd_client, ns, name, password)
# Test updating password
new_password = random_str()
notifier = client.update(notifier, smtpConfig={
"password": new_password})
verify_smtp_password(crd_client, ns, name, new_password)
# Test updating field non-password related
notifier = client.update(notifier, smtpConfig={"username": "test2"})
notifier = client.reload(notifier)
assert notifier["smtpConfig"]["username"] == "test2"
# Test the password in crd remains the same value after updating username
verify_smtp_password(crd_client, ns, name, new_password)
def verify_smtp_password(crd_client, ns, name, password):
crd_dict = {
'group': 'management.cattle.io',
'version': 'v3',
'namespace': 'local',
'plural': 'notifiers',
'name': name,
}
k8s_notifier = crd_client.get_namespaced_custom_object(**crd_dict)
smtp_password = k8s_notifier['spec']['smtpConfig']['password']
assert smtp_password == password
def get_crd_client(admin_mc):
return CustomObjectsApi(admin_mc.k8s_client)
|
<commit_before><commit_msg>Test Email Notifier SMTP Password<commit_after>from kubernetes.client import CustomObjectsApi
from .common import random_str
def test_notifier_smtp_password(admin_mc, remove_resource):
client = admin_mc.client
name = random_str()
password = random_str()
notifier = client.create_notifier(clusterId="local",
name=name,
smtpConfig={
"defaultRecipient": "test",
"host": "test",
"port": "587",
"sender": "test",
"tls": "true",
"username": "test",
"password": password
})
remove_resource(notifier)
assert notifier is not None
# Test password not present in api
assert notifier['smtpConfig'].get('password') is None
crd_client = get_crd_client(admin_mc)
ns, name = notifier["id"].split(":")
# Test password is in k8s after creation
verify_smtp_password(crd_client, ns, name, password)
# Test noop, password field should be as it is
notifier = client.update(notifier, smtpConfig=notifier['smtpConfig'])
verify_smtp_password(crd_client, ns, name, password)
# Test updating password
new_password = random_str()
notifier = client.update(notifier, smtpConfig={
"password": new_password})
verify_smtp_password(crd_client, ns, name, new_password)
# Test updating field non-password related
notifier = client.update(notifier, smtpConfig={"username": "test2"})
notifier = client.reload(notifier)
assert notifier["smtpConfig"]["username"] == "test2"
# Test the password in crd remains the same value after updating username
verify_smtp_password(crd_client, ns, name, new_password)
def verify_smtp_password(crd_client, ns, name, password):
crd_dict = {
'group': 'management.cattle.io',
'version': 'v3',
'namespace': 'local',
'plural': 'notifiers',
'name': name,
}
k8s_notifier = crd_client.get_namespaced_custom_object(**crd_dict)
smtp_password = k8s_notifier['spec']['smtpConfig']['password']
assert smtp_password == password
def get_crd_client(admin_mc):
return CustomObjectsApi(admin_mc.k8s_client)
|
|
c12357b7dfb673639418078994d48c2696848b06
|
html5lib/tests/tokenizertotree.py
|
html5lib/tests/tokenizertotree.py
|
import sys
import os
import json
import re
import html5lib
import support
import test_parser
import test_tokenizer
p = html5lib.HTMLParser()
unnamespaceExpected = re.compile(r"^(\s*)<html (\S+)>", re.M).sub
def main(out_path):
if not os.path.exists(out_path):
sys.stderr.write("Path %s does not exist"%out_path)
sys.exit(1)
for filename in support.html5lib_test_files('tokenizer', '*.test'):
run_file(filename, out_path)
def run_file(filename, out_path):
try:
tests_data = json.load(file(filename))
except ValueError:
sys.stderr.write("Failed to load %s\n"%filename)
return
name = os.path.splitext(os.path.split(filename)[1])[0]
output_file = open(os.path.join(out_path, "tokenizer_%s.dat"%name), "w")
if 'tests' in tests_data:
for test_data in tests_data['tests']:
if 'initialStates' not in test_data:
test_data["initialStates"] = ["Data state"]
for initial_state in test_data["initialStates"]:
if initial_state != "Data state":
#don't support this yet
continue
test = make_test(test_data)
output_file.write(test)
output_file.close()
def make_test(test_data):
if 'doubleEscaped' in test_data:
test_data = test_tokenizer.unescape_test(test_data)
rv = []
rv.append("#data")
rv.append(test_data["input"].encode("utf8"))
rv.append("#errors")
rv.append("#document")
tree = p.parse(test_data["input"])
output = test_parser.convertTreeDump(p.tree.testSerializer(tree))
output = test_parser.attrlist.sub(test_parser.sortattrs, output)
output = unnamespaceExpected(r"\1<\2>", output)
rv.append(output.encode("utf8"))
rv.append("")
return "\n".join(rv)
if __name__ == "__main__":
main(sys.argv[1])
|
Add file for converting the tokenizer tests to tree tests (assuming html5lib works with them correctly)
|
Add file for converting the tokenizer tests to tree tests (assuming html5lib works with them correctly)
|
Python
|
mit
|
alex/html5lib-python,dstufft/html5lib-python,alex/html5lib-python,gsnedders/html5lib-python,mgilson/html5lib-python,dstufft/html5lib-python,mindw/html5lib-python,ordbogen/html5lib-python,html5lib/html5lib-python,html5lib/html5lib-python,mindw/html5lib-python,ordbogen/html5lib-python,alex/html5lib-python,dstufft/html5lib-python,ordbogen/html5lib-python,mindw/html5lib-python,html5lib/html5lib-python,mgilson/html5lib-python,mgilson/html5lib-python,gsnedders/html5lib-python
|
Add file for converting the tokenizer tests to tree tests (assuming html5lib works with them correctly)
|
import sys
import os
import json
import re
import html5lib
import support
import test_parser
import test_tokenizer
p = html5lib.HTMLParser()
unnamespaceExpected = re.compile(r"^(\s*)<html (\S+)>", re.M).sub
def main(out_path):
if not os.path.exists(out_path):
sys.stderr.write("Path %s does not exist"%out_path)
sys.exit(1)
for filename in support.html5lib_test_files('tokenizer', '*.test'):
run_file(filename, out_path)
def run_file(filename, out_path):
try:
tests_data = json.load(file(filename))
except ValueError:
sys.stderr.write("Failed to load %s\n"%filename)
return
name = os.path.splitext(os.path.split(filename)[1])[0]
output_file = open(os.path.join(out_path, "tokenizer_%s.dat"%name), "w")
if 'tests' in tests_data:
for test_data in tests_data['tests']:
if 'initialStates' not in test_data:
test_data["initialStates"] = ["Data state"]
for initial_state in test_data["initialStates"]:
if initial_state != "Data state":
#don't support this yet
continue
test = make_test(test_data)
output_file.write(test)
output_file.close()
def make_test(test_data):
if 'doubleEscaped' in test_data:
test_data = test_tokenizer.unescape_test(test_data)
rv = []
rv.append("#data")
rv.append(test_data["input"].encode("utf8"))
rv.append("#errors")
rv.append("#document")
tree = p.parse(test_data["input"])
output = test_parser.convertTreeDump(p.tree.testSerializer(tree))
output = test_parser.attrlist.sub(test_parser.sortattrs, output)
output = unnamespaceExpected(r"\1<\2>", output)
rv.append(output.encode("utf8"))
rv.append("")
return "\n".join(rv)
if __name__ == "__main__":
main(sys.argv[1])
|
<commit_before><commit_msg>Add file for converting the tokenizer tests to tree tests (assuming html5lib works with them correctly)<commit_after>
|
import sys
import os
import json
import re
import html5lib
import support
import test_parser
import test_tokenizer
p = html5lib.HTMLParser()
unnamespaceExpected = re.compile(r"^(\s*)<html (\S+)>", re.M).sub
def main(out_path):
if not os.path.exists(out_path):
sys.stderr.write("Path %s does not exist"%out_path)
sys.exit(1)
for filename in support.html5lib_test_files('tokenizer', '*.test'):
run_file(filename, out_path)
def run_file(filename, out_path):
try:
tests_data = json.load(file(filename))
except ValueError:
sys.stderr.write("Failed to load %s\n"%filename)
return
name = os.path.splitext(os.path.split(filename)[1])[0]
output_file = open(os.path.join(out_path, "tokenizer_%s.dat"%name), "w")
if 'tests' in tests_data:
for test_data in tests_data['tests']:
if 'initialStates' not in test_data:
test_data["initialStates"] = ["Data state"]
for initial_state in test_data["initialStates"]:
if initial_state != "Data state":
#don't support this yet
continue
test = make_test(test_data)
output_file.write(test)
output_file.close()
def make_test(test_data):
if 'doubleEscaped' in test_data:
test_data = test_tokenizer.unescape_test(test_data)
rv = []
rv.append("#data")
rv.append(test_data["input"].encode("utf8"))
rv.append("#errors")
rv.append("#document")
tree = p.parse(test_data["input"])
output = test_parser.convertTreeDump(p.tree.testSerializer(tree))
output = test_parser.attrlist.sub(test_parser.sortattrs, output)
output = unnamespaceExpected(r"\1<\2>", output)
rv.append(output.encode("utf8"))
rv.append("")
return "\n".join(rv)
if __name__ == "__main__":
main(sys.argv[1])
|
Add file for converting the tokenizer tests to tree tests (assuming html5lib works with them correctly)import sys
import os
import json
import re
import html5lib
import support
import test_parser
import test_tokenizer
p = html5lib.HTMLParser()
unnamespaceExpected = re.compile(r"^(\s*)<html (\S+)>", re.M).sub
def main(out_path):
if not os.path.exists(out_path):
sys.stderr.write("Path %s does not exist"%out_path)
sys.exit(1)
for filename in support.html5lib_test_files('tokenizer', '*.test'):
run_file(filename, out_path)
def run_file(filename, out_path):
try:
tests_data = json.load(file(filename))
except ValueError:
sys.stderr.write("Failed to load %s\n"%filename)
return
name = os.path.splitext(os.path.split(filename)[1])[0]
output_file = open(os.path.join(out_path, "tokenizer_%s.dat"%name), "w")
if 'tests' in tests_data:
for test_data in tests_data['tests']:
if 'initialStates' not in test_data:
test_data["initialStates"] = ["Data state"]
for initial_state in test_data["initialStates"]:
if initial_state != "Data state":
#don't support this yet
continue
test = make_test(test_data)
output_file.write(test)
output_file.close()
def make_test(test_data):
if 'doubleEscaped' in test_data:
test_data = test_tokenizer.unescape_test(test_data)
rv = []
rv.append("#data")
rv.append(test_data["input"].encode("utf8"))
rv.append("#errors")
rv.append("#document")
tree = p.parse(test_data["input"])
output = test_parser.convertTreeDump(p.tree.testSerializer(tree))
output = test_parser.attrlist.sub(test_parser.sortattrs, output)
output = unnamespaceExpected(r"\1<\2>", output)
rv.append(output.encode("utf8"))
rv.append("")
return "\n".join(rv)
if __name__ == "__main__":
main(sys.argv[1])
|
<commit_before><commit_msg>Add file for converting the tokenizer tests to tree tests (assuming html5lib works with them correctly)<commit_after>import sys
import os
import json
import re
import html5lib
import support
import test_parser
import test_tokenizer
p = html5lib.HTMLParser()
unnamespaceExpected = re.compile(r"^(\s*)<html (\S+)>", re.M).sub
def main(out_path):
if not os.path.exists(out_path):
sys.stderr.write("Path %s does not exist"%out_path)
sys.exit(1)
for filename in support.html5lib_test_files('tokenizer', '*.test'):
run_file(filename, out_path)
def run_file(filename, out_path):
try:
tests_data = json.load(file(filename))
except ValueError:
sys.stderr.write("Failed to load %s\n"%filename)
return
name = os.path.splitext(os.path.split(filename)[1])[0]
output_file = open(os.path.join(out_path, "tokenizer_%s.dat"%name), "w")
if 'tests' in tests_data:
for test_data in tests_data['tests']:
if 'initialStates' not in test_data:
test_data["initialStates"] = ["Data state"]
for initial_state in test_data["initialStates"]:
if initial_state != "Data state":
#don't support this yet
continue
test = make_test(test_data)
output_file.write(test)
output_file.close()
def make_test(test_data):
if 'doubleEscaped' in test_data:
test_data = test_tokenizer.unescape_test(test_data)
rv = []
rv.append("#data")
rv.append(test_data["input"].encode("utf8"))
rv.append("#errors")
rv.append("#document")
tree = p.parse(test_data["input"])
output = test_parser.convertTreeDump(p.tree.testSerializer(tree))
output = test_parser.attrlist.sub(test_parser.sortattrs, output)
output = unnamespaceExpected(r"\1<\2>", output)
rv.append(output.encode("utf8"))
rv.append("")
return "\n".join(rv)
if __name__ == "__main__":
main(sys.argv[1])
|
|
8901e618318c901287d2af5f701ce9ae44c79f18
|
avena/tests/test-xcor2.py
|
avena/tests/test-xcor2.py
|
#!/usr/bin/env python
from numpy import all, array
from .. import np, xcor2
def test_zeropad():
x = array([[1]])
y = array([[1, 0, 0], [0, 0, 0], [0, 0, 0]])
z = xcor2._zeropad(x, y.shape)
assert all(z == y)
def test_xcor2_shape():
x = (3, 3)
y = (1, 1)
z = (4, 4)
assert xcor2._xcor2_shape((x, y)) == z
def test_center():
x = array([[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6]])
y = array([[2, 3], [3, 4], [4, 5]])
assert all(xcor2._center(x, y.shape) == y)
def test_xcor2():
x = array([[0, 0, 0], [0, 1, 0], [0, 0, 0]])
y = array([[1]])
z = xcor2._xcor2(x, y)
assert np.peak(z) == (1, 1)
if __name__ == '__main__':
pass
|
Add some unit tests for the xcor2 module.
|
Add some unit tests for the xcor2 module.
|
Python
|
isc
|
eliteraspberries/avena
|
Add some unit tests for the xcor2 module.
|
#!/usr/bin/env python
from numpy import all, array
from .. import np, xcor2
def test_zeropad():
x = array([[1]])
y = array([[1, 0, 0], [0, 0, 0], [0, 0, 0]])
z = xcor2._zeropad(x, y.shape)
assert all(z == y)
def test_xcor2_shape():
x = (3, 3)
y = (1, 1)
z = (4, 4)
assert xcor2._xcor2_shape((x, y)) == z
def test_center():
x = array([[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6]])
y = array([[2, 3], [3, 4], [4, 5]])
assert all(xcor2._center(x, y.shape) == y)
def test_xcor2():
x = array([[0, 0, 0], [0, 1, 0], [0, 0, 0]])
y = array([[1]])
z = xcor2._xcor2(x, y)
assert np.peak(z) == (1, 1)
if __name__ == '__main__':
pass
|
<commit_before><commit_msg>Add some unit tests for the xcor2 module.<commit_after>
|
#!/usr/bin/env python
from numpy import all, array
from .. import np, xcor2
def test_zeropad():
x = array([[1]])
y = array([[1, 0, 0], [0, 0, 0], [0, 0, 0]])
z = xcor2._zeropad(x, y.shape)
assert all(z == y)
def test_xcor2_shape():
x = (3, 3)
y = (1, 1)
z = (4, 4)
assert xcor2._xcor2_shape((x, y)) == z
def test_center():
x = array([[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6]])
y = array([[2, 3], [3, 4], [4, 5]])
assert all(xcor2._center(x, y.shape) == y)
def test_xcor2():
x = array([[0, 0, 0], [0, 1, 0], [0, 0, 0]])
y = array([[1]])
z = xcor2._xcor2(x, y)
assert np.peak(z) == (1, 1)
if __name__ == '__main__':
pass
|
Add some unit tests for the xcor2 module.#!/usr/bin/env python
from numpy import all, array
from .. import np, xcor2
def test_zeropad():
x = array([[1]])
y = array([[1, 0, 0], [0, 0, 0], [0, 0, 0]])
z = xcor2._zeropad(x, y.shape)
assert all(z == y)
def test_xcor2_shape():
x = (3, 3)
y = (1, 1)
z = (4, 4)
assert xcor2._xcor2_shape((x, y)) == z
def test_center():
x = array([[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6]])
y = array([[2, 3], [3, 4], [4, 5]])
assert all(xcor2._center(x, y.shape) == y)
def test_xcor2():
x = array([[0, 0, 0], [0, 1, 0], [0, 0, 0]])
y = array([[1]])
z = xcor2._xcor2(x, y)
assert np.peak(z) == (1, 1)
if __name__ == '__main__':
pass
|
<commit_before><commit_msg>Add some unit tests for the xcor2 module.<commit_after>#!/usr/bin/env python
from numpy import all, array
from .. import np, xcor2
def test_zeropad():
x = array([[1]])
y = array([[1, 0, 0], [0, 0, 0], [0, 0, 0]])
z = xcor2._zeropad(x, y.shape)
assert all(z == y)
def test_xcor2_shape():
x = (3, 3)
y = (1, 1)
z = (4, 4)
assert xcor2._xcor2_shape((x, y)) == z
def test_center():
x = array([[1, 2, 3, 4], [2, 3, 4, 5], [3, 4, 5, 6]])
y = array([[2, 3], [3, 4], [4, 5]])
assert all(xcor2._center(x, y.shape) == y)
def test_xcor2():
x = array([[0, 0, 0], [0, 1, 0], [0, 0, 0]])
y = array([[1]])
z = xcor2._xcor2(x, y)
assert np.peak(z) == (1, 1)
if __name__ == '__main__':
pass
|
|
25fc278e90857c4e26a87e7795a784d159c33d89
|
py/convert-bst-to-greater-tree.py
|
py/convert-bst-to-greater-tree.py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inOrderDesc(self, root):
if root:
self.inOrderDesc(root.right)
root.val = self.increment = root.val + self.increment
self.inOrderDesc(root.left)
def convertBST(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
self.increment = 0
self.inOrderDesc(root)
return root
|
Add py solution for 538. Convert BST to Greater Tree
|
Add py solution for 538. Convert BST to Greater Tree
538. Convert BST to Greater Tree: https://leetcode.com/problems/convert-bst-to-greater-tree/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 538. Convert BST to Greater Tree
538. Convert BST to Greater Tree: https://leetcode.com/problems/convert-bst-to-greater-tree/
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inOrderDesc(self, root):
if root:
self.inOrderDesc(root.right)
root.val = self.increment = root.val + self.increment
self.inOrderDesc(root.left)
def convertBST(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
self.increment = 0
self.inOrderDesc(root)
return root
|
<commit_before><commit_msg>Add py solution for 538. Convert BST to Greater Tree
538. Convert BST to Greater Tree: https://leetcode.com/problems/convert-bst-to-greater-tree/<commit_after>
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inOrderDesc(self, root):
if root:
self.inOrderDesc(root.right)
root.val = self.increment = root.val + self.increment
self.inOrderDesc(root.left)
def convertBST(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
self.increment = 0
self.inOrderDesc(root)
return root
|
Add py solution for 538. Convert BST to Greater Tree
538. Convert BST to Greater Tree: https://leetcode.com/problems/convert-bst-to-greater-tree/# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inOrderDesc(self, root):
if root:
self.inOrderDesc(root.right)
root.val = self.increment = root.val + self.increment
self.inOrderDesc(root.left)
def convertBST(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
self.increment = 0
self.inOrderDesc(root)
return root
|
<commit_before><commit_msg>Add py solution for 538. Convert BST to Greater Tree
538. Convert BST to Greater Tree: https://leetcode.com/problems/convert-bst-to-greater-tree/<commit_after># Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inOrderDesc(self, root):
if root:
self.inOrderDesc(root.right)
root.val = self.increment = root.val + self.increment
self.inOrderDesc(root.left)
def convertBST(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
self.increment = 0
self.inOrderDesc(root)
return root
|
|
edb2622e6da18bd2cff523fe96ec56cec0c1b94f
|
lvsr/configs/timit_bothgru_cumsum_250.py
|
lvsr/configs/timit_bothgru_cumsum_250.py
|
Config(
net=Config(
dim_dec=250,
dim_bidir=250,
dims_bottom=[250],
dec_transition='GatedRecurrent',
enc_transition='GatedRecurrent',
attention_type='content_and_cumsum'),
initialization=[
("/recognizer", "rec_weights_init", "IsotropicGaussian(0.1)")],
data=Config(
normalization="norm.pkl"))
|
Add config without dropout for 250 hidden units
|
Add config without dropout for 250 hidden units
|
Python
|
mit
|
rizar/attention-lvcsr,rizar/attention-lvcsr,nke001/attention-lvcsr,rizar/attention-lvcsr,nke001/attention-lvcsr,rizar/attention-lvcsr,nke001/attention-lvcsr,nke001/attention-lvcsr,rizar/attention-lvcsr,nke001/attention-lvcsr
|
Add config without dropout for 250 hidden units
|
Config(
net=Config(
dim_dec=250,
dim_bidir=250,
dims_bottom=[250],
dec_transition='GatedRecurrent',
enc_transition='GatedRecurrent',
attention_type='content_and_cumsum'),
initialization=[
("/recognizer", "rec_weights_init", "IsotropicGaussian(0.1)")],
data=Config(
normalization="norm.pkl"))
|
<commit_before><commit_msg>Add config without dropout for 250 hidden units<commit_after>
|
Config(
net=Config(
dim_dec=250,
dim_bidir=250,
dims_bottom=[250],
dec_transition='GatedRecurrent',
enc_transition='GatedRecurrent',
attention_type='content_and_cumsum'),
initialization=[
("/recognizer", "rec_weights_init", "IsotropicGaussian(0.1)")],
data=Config(
normalization="norm.pkl"))
|
Add config without dropout for 250 hidden unitsConfig(
net=Config(
dim_dec=250,
dim_bidir=250,
dims_bottom=[250],
dec_transition='GatedRecurrent',
enc_transition='GatedRecurrent',
attention_type='content_and_cumsum'),
initialization=[
("/recognizer", "rec_weights_init", "IsotropicGaussian(0.1)")],
data=Config(
normalization="norm.pkl"))
|
<commit_before><commit_msg>Add config without dropout for 250 hidden units<commit_after>Config(
net=Config(
dim_dec=250,
dim_bidir=250,
dims_bottom=[250],
dec_transition='GatedRecurrent',
enc_transition='GatedRecurrent',
attention_type='content_and_cumsum'),
initialization=[
("/recognizer", "rec_weights_init", "IsotropicGaussian(0.1)")],
data=Config(
normalization="norm.pkl"))
|
|
acdc2c8c6d47e6c6ffc15c9fea1aff20a2645363
|
helenae/gui/widgets/InputLinkCtrl.py
|
helenae/gui/widgets/InputLinkCtrl.py
|
# -*- coding: utf-8 -*-
import wx
import platform
class InputLink(wx.Dialog):
def __init__(self, parent, id, title, ico_folder):
if platform.system() == 'Darwin':
wx.Dialog.__init__(self, parent, id, title, style=wx.DEFAULT_FRAME_STYLE &
~ (wx.RESIZE_BORDER | wx.RESIZE_BOX | wx.MAXIMIZE_BOX))
labelPos = (10, 15)
fieldSize = (180, 20)
fieldPos = (125, 15)
size = (320, 105)
else:
wx.Dialog.__init__(self, parent, id, title, style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)
labelPos = (10, 20)
fieldSize = (195, 20)
fieldPos = (110, 15)
size = (320, 80)
self.label = wx.StaticText(self, label="Ссылка на файл:", pos=labelPos)
self.field = wx.TextCtrl(self, value="", size=fieldSize, pos=fieldPos)
self.button_ok = wx.Button(self, label="Ок", id=wx.ID_OK, pos=(125, 45))
self.button_cancel = wx.Button(self, label="Отмена", id=wx.ID_CANCEL, pos=(217, 45))
self.Bind(wx.EVT_BUTTON, self.onOK, id=wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.onCancel, id=wx.ID_CANCEL)
# self.icon = wx.Icon(ico_folder + '/icons/app.ico', wx.BITMAP_TYPE_ICO)
# self.SetIcon(self.icon)
self.SetSize(size)
self.result = None
self.Center()
def onOK(self, event):
self.result = self.field.GetValue()
self.EndModal(wx.ID_OK)
self.Destroy()
def onCancel(self, event):
self.result = None
self.EndModal(wx.ID_CANCEL)
self.Destroy()
if __name__ =='__main__':
app = wx.App(0)
ico_folder = '..'
frame = InputLink(None, -1, 'Введите ссылку', ico_folder)
res = frame.ShowModal()
app.MainLoop()
|
Add input dialog for links
|
Add input dialog for links
|
Python
|
mit
|
Relrin/Helenae,Relrin/Helenae,Relrin/Helenae
|
Add input dialog for links
|
# -*- coding: utf-8 -*-
import wx
import platform
class InputLink(wx.Dialog):
def __init__(self, parent, id, title, ico_folder):
if platform.system() == 'Darwin':
wx.Dialog.__init__(self, parent, id, title, style=wx.DEFAULT_FRAME_STYLE &
~ (wx.RESIZE_BORDER | wx.RESIZE_BOX | wx.MAXIMIZE_BOX))
labelPos = (10, 15)
fieldSize = (180, 20)
fieldPos = (125, 15)
size = (320, 105)
else:
wx.Dialog.__init__(self, parent, id, title, style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)
labelPos = (10, 20)
fieldSize = (195, 20)
fieldPos = (110, 15)
size = (320, 80)
self.label = wx.StaticText(self, label="Ссылка на файл:", pos=labelPos)
self.field = wx.TextCtrl(self, value="", size=fieldSize, pos=fieldPos)
self.button_ok = wx.Button(self, label="Ок", id=wx.ID_OK, pos=(125, 45))
self.button_cancel = wx.Button(self, label="Отмена", id=wx.ID_CANCEL, pos=(217, 45))
self.Bind(wx.EVT_BUTTON, self.onOK, id=wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.onCancel, id=wx.ID_CANCEL)
# self.icon = wx.Icon(ico_folder + '/icons/app.ico', wx.BITMAP_TYPE_ICO)
# self.SetIcon(self.icon)
self.SetSize(size)
self.result = None
self.Center()
def onOK(self, event):
self.result = self.field.GetValue()
self.EndModal(wx.ID_OK)
self.Destroy()
def onCancel(self, event):
self.result = None
self.EndModal(wx.ID_CANCEL)
self.Destroy()
if __name__ =='__main__':
app = wx.App(0)
ico_folder = '..'
frame = InputLink(None, -1, 'Введите ссылку', ico_folder)
res = frame.ShowModal()
app.MainLoop()
|
<commit_before><commit_msg>Add input dialog for links<commit_after>
|
# -*- coding: utf-8 -*-
import wx
import platform
class InputLink(wx.Dialog):
def __init__(self, parent, id, title, ico_folder):
if platform.system() == 'Darwin':
wx.Dialog.__init__(self, parent, id, title, style=wx.DEFAULT_FRAME_STYLE &
~ (wx.RESIZE_BORDER | wx.RESIZE_BOX | wx.MAXIMIZE_BOX))
labelPos = (10, 15)
fieldSize = (180, 20)
fieldPos = (125, 15)
size = (320, 105)
else:
wx.Dialog.__init__(self, parent, id, title, style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)
labelPos = (10, 20)
fieldSize = (195, 20)
fieldPos = (110, 15)
size = (320, 80)
self.label = wx.StaticText(self, label="Ссылка на файл:", pos=labelPos)
self.field = wx.TextCtrl(self, value="", size=fieldSize, pos=fieldPos)
self.button_ok = wx.Button(self, label="Ок", id=wx.ID_OK, pos=(125, 45))
self.button_cancel = wx.Button(self, label="Отмена", id=wx.ID_CANCEL, pos=(217, 45))
self.Bind(wx.EVT_BUTTON, self.onOK, id=wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.onCancel, id=wx.ID_CANCEL)
# self.icon = wx.Icon(ico_folder + '/icons/app.ico', wx.BITMAP_TYPE_ICO)
# self.SetIcon(self.icon)
self.SetSize(size)
self.result = None
self.Center()
def onOK(self, event):
self.result = self.field.GetValue()
self.EndModal(wx.ID_OK)
self.Destroy()
def onCancel(self, event):
self.result = None
self.EndModal(wx.ID_CANCEL)
self.Destroy()
if __name__ =='__main__':
app = wx.App(0)
ico_folder = '..'
frame = InputLink(None, -1, 'Введите ссылку', ico_folder)
res = frame.ShowModal()
app.MainLoop()
|
Add input dialog for links# -*- coding: utf-8 -*-
import wx
import platform
class InputLink(wx.Dialog):
def __init__(self, parent, id, title, ico_folder):
if platform.system() == 'Darwin':
wx.Dialog.__init__(self, parent, id, title, style=wx.DEFAULT_FRAME_STYLE &
~ (wx.RESIZE_BORDER | wx.RESIZE_BOX | wx.MAXIMIZE_BOX))
labelPos = (10, 15)
fieldSize = (180, 20)
fieldPos = (125, 15)
size = (320, 105)
else:
wx.Dialog.__init__(self, parent, id, title, style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)
labelPos = (10, 20)
fieldSize = (195, 20)
fieldPos = (110, 15)
size = (320, 80)
self.label = wx.StaticText(self, label="Ссылка на файл:", pos=labelPos)
self.field = wx.TextCtrl(self, value="", size=fieldSize, pos=fieldPos)
self.button_ok = wx.Button(self, label="Ок", id=wx.ID_OK, pos=(125, 45))
self.button_cancel = wx.Button(self, label="Отмена", id=wx.ID_CANCEL, pos=(217, 45))
self.Bind(wx.EVT_BUTTON, self.onOK, id=wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.onCancel, id=wx.ID_CANCEL)
# self.icon = wx.Icon(ico_folder + '/icons/app.ico', wx.BITMAP_TYPE_ICO)
# self.SetIcon(self.icon)
self.SetSize(size)
self.result = None
self.Center()
def onOK(self, event):
self.result = self.field.GetValue()
self.EndModal(wx.ID_OK)
self.Destroy()
def onCancel(self, event):
self.result = None
self.EndModal(wx.ID_CANCEL)
self.Destroy()
if __name__ =='__main__':
app = wx.App(0)
ico_folder = '..'
frame = InputLink(None, -1, 'Введите ссылку', ico_folder)
res = frame.ShowModal()
app.MainLoop()
|
<commit_before><commit_msg>Add input dialog for links<commit_after># -*- coding: utf-8 -*-
import wx
import platform
class InputLink(wx.Dialog):
def __init__(self, parent, id, title, ico_folder):
if platform.system() == 'Darwin':
wx.Dialog.__init__(self, parent, id, title, style=wx.DEFAULT_FRAME_STYLE &
~ (wx.RESIZE_BORDER | wx.RESIZE_BOX | wx.MAXIMIZE_BOX))
labelPos = (10, 15)
fieldSize = (180, 20)
fieldPos = (125, 15)
size = (320, 105)
else:
wx.Dialog.__init__(self, parent, id, title, style=wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER)
labelPos = (10, 20)
fieldSize = (195, 20)
fieldPos = (110, 15)
size = (320, 80)
self.label = wx.StaticText(self, label="Ссылка на файл:", pos=labelPos)
self.field = wx.TextCtrl(self, value="", size=fieldSize, pos=fieldPos)
self.button_ok = wx.Button(self, label="Ок", id=wx.ID_OK, pos=(125, 45))
self.button_cancel = wx.Button(self, label="Отмена", id=wx.ID_CANCEL, pos=(217, 45))
self.Bind(wx.EVT_BUTTON, self.onOK, id=wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.onCancel, id=wx.ID_CANCEL)
# self.icon = wx.Icon(ico_folder + '/icons/app.ico', wx.BITMAP_TYPE_ICO)
# self.SetIcon(self.icon)
self.SetSize(size)
self.result = None
self.Center()
def onOK(self, event):
self.result = self.field.GetValue()
self.EndModal(wx.ID_OK)
self.Destroy()
def onCancel(self, event):
self.result = None
self.EndModal(wx.ID_CANCEL)
self.Destroy()
if __name__ =='__main__':
app = wx.App(0)
ico_folder = '..'
frame = InputLink(None, -1, 'Введите ссылку', ico_folder)
res = frame.ShowModal()
app.MainLoop()
|
|
cef078ebfa96bfea7d36b5cb1bf90f8cbd070df5
|
domino/utils/gender.py
|
domino/utils/gender.py
|
# Author: Álvaro Parafita (parafita.alvaro@gmail.com)
class Gender:
"""
Class to represent gender as ints.
Definition of each gender is according to ISO/IEC 5218.
An instance of Gender must be created passing it a converter function
that takes any value and returns a string in
UNKNOWN, MALE, FEMALE, NOT_APPLICABLE
Any Exception inside this method should be catched
and reraised as ValueError or TypeError accordingly.
To use the class, call the instance directly with any value
to convert it to its int representation.
To pass from an int to its ISO string representation,
use the 'name' function.
"""
UNKNOWN = 0
MALE = 1
FEMALE = 2
NOT_APPLICABLE = 9
def __init__(self, converter=None):
if converter is None:
def default_converter(x):
if x in ['UNKNOWN', 'MALE', 'FEMALE', 'NOT_APPLICABLE']:
return x
else:
raise ValueError(x)
converter = default_converter
self.converter = converter
def __call__(self, gender):
converted_gender = self.converter(gender)
try:
return getattr(self, converted_gender)
except AttributeError:
raise ValueError(
'Gender "%s" became "%s", '
'which is not recognised by Gender' % (
gender, converted_gender
)
)
def name(self, gender):
if type(gender) == int:
if gender == Gender.UNKNOWN:
return 'UNKNOWN'
elif gender == Gender.MALE:
return 'MALE'
elif gender == Gender.FEMALE:
return 'FEMALE'
elif gender == Gender.NOT_APPLICABLE:
return 'NOT_APPLICABLE'
else:
raise ValueError(gender)
else:
raise TypeError(gender)
Gender.default = Gender()
def SimpleGender(convert):
def lower_dict_comparison(x):
try:
x = x.lower()
except AttributeError:
return 'UNKNOWN'
return convert.get(x, 'UNKNOWN')
return Gender(lower_dict_comparison)
|
Add utils package with offtopic functions
|
Add utils package with offtopic functions
Add Gender class for proper Gender representation
|
Python
|
mit
|
aparafita/domino
|
Add utils package with offtopic functions
Add Gender class for proper Gender representation
|
# Author: Álvaro Parafita (parafita.alvaro@gmail.com)
class Gender:
"""
Class to represent gender as ints.
Definition of each gender is according to ISO/IEC 5218.
An instance of Gender must be created passing it a converter function
that takes any value and returns a string in
UNKNOWN, MALE, FEMALE, NOT_APPLICABLE
Any Exception inside this method should be catched
and reraised as ValueError or TypeError accordingly.
To use the class, call the instance directly with any value
to convert it to its int representation.
To pass from an int to its ISO string representation,
use the 'name' function.
"""
UNKNOWN = 0
MALE = 1
FEMALE = 2
NOT_APPLICABLE = 9
def __init__(self, converter=None):
if converter is None:
def default_converter(x):
if x in ['UNKNOWN', 'MALE', 'FEMALE', 'NOT_APPLICABLE']:
return x
else:
raise ValueError(x)
converter = default_converter
self.converter = converter
def __call__(self, gender):
converted_gender = self.converter(gender)
try:
return getattr(self, converted_gender)
except AttributeError:
raise ValueError(
'Gender "%s" became "%s", '
'which is not recognised by Gender' % (
gender, converted_gender
)
)
def name(self, gender):
if type(gender) == int:
if gender == Gender.UNKNOWN:
return 'UNKNOWN'
elif gender == Gender.MALE:
return 'MALE'
elif gender == Gender.FEMALE:
return 'FEMALE'
elif gender == Gender.NOT_APPLICABLE:
return 'NOT_APPLICABLE'
else:
raise ValueError(gender)
else:
raise TypeError(gender)
Gender.default = Gender()
def SimpleGender(convert):
def lower_dict_comparison(x):
try:
x = x.lower()
except AttributeError:
return 'UNKNOWN'
return convert.get(x, 'UNKNOWN')
return Gender(lower_dict_comparison)
|
<commit_before><commit_msg>Add utils package with offtopic functions
Add Gender class for proper Gender representation<commit_after>
|
# Author: Álvaro Parafita (parafita.alvaro@gmail.com)
class Gender:
"""
Class to represent gender as ints.
Definition of each gender is according to ISO/IEC 5218.
An instance of Gender must be created passing it a converter function
that takes any value and returns a string in
UNKNOWN, MALE, FEMALE, NOT_APPLICABLE
Any Exception inside this method should be catched
and reraised as ValueError or TypeError accordingly.
To use the class, call the instance directly with any value
to convert it to its int representation.
To pass from an int to its ISO string representation,
use the 'name' function.
"""
UNKNOWN = 0
MALE = 1
FEMALE = 2
NOT_APPLICABLE = 9
def __init__(self, converter=None):
if converter is None:
def default_converter(x):
if x in ['UNKNOWN', 'MALE', 'FEMALE', 'NOT_APPLICABLE']:
return x
else:
raise ValueError(x)
converter = default_converter
self.converter = converter
def __call__(self, gender):
converted_gender = self.converter(gender)
try:
return getattr(self, converted_gender)
except AttributeError:
raise ValueError(
'Gender "%s" became "%s", '
'which is not recognised by Gender' % (
gender, converted_gender
)
)
def name(self, gender):
if type(gender) == int:
if gender == Gender.UNKNOWN:
return 'UNKNOWN'
elif gender == Gender.MALE:
return 'MALE'
elif gender == Gender.FEMALE:
return 'FEMALE'
elif gender == Gender.NOT_APPLICABLE:
return 'NOT_APPLICABLE'
else:
raise ValueError(gender)
else:
raise TypeError(gender)
Gender.default = Gender()
def SimpleGender(convert):
def lower_dict_comparison(x):
try:
x = x.lower()
except AttributeError:
return 'UNKNOWN'
return convert.get(x, 'UNKNOWN')
return Gender(lower_dict_comparison)
|
Add utils package with offtopic functions
Add Gender class for proper Gender representation# Author: Álvaro Parafita (parafita.alvaro@gmail.com)
class Gender:
"""
Class to represent gender as ints.
Definition of each gender is according to ISO/IEC 5218.
An instance of Gender must be created passing it a converter function
that takes any value and returns a string in
UNKNOWN, MALE, FEMALE, NOT_APPLICABLE
Any Exception inside this method should be catched
and reraised as ValueError or TypeError accordingly.
To use the class, call the instance directly with any value
to convert it to its int representation.
To pass from an int to its ISO string representation,
use the 'name' function.
"""
UNKNOWN = 0
MALE = 1
FEMALE = 2
NOT_APPLICABLE = 9
def __init__(self, converter=None):
if converter is None:
def default_converter(x):
if x in ['UNKNOWN', 'MALE', 'FEMALE', 'NOT_APPLICABLE']:
return x
else:
raise ValueError(x)
converter = default_converter
self.converter = converter
def __call__(self, gender):
converted_gender = self.converter(gender)
try:
return getattr(self, converted_gender)
except AttributeError:
raise ValueError(
'Gender "%s" became "%s", '
'which is not recognised by Gender' % (
gender, converted_gender
)
)
def name(self, gender):
if type(gender) == int:
if gender == Gender.UNKNOWN:
return 'UNKNOWN'
elif gender == Gender.MALE:
return 'MALE'
elif gender == Gender.FEMALE:
return 'FEMALE'
elif gender == Gender.NOT_APPLICABLE:
return 'NOT_APPLICABLE'
else:
raise ValueError(gender)
else:
raise TypeError(gender)
Gender.default = Gender()
def SimpleGender(convert):
def lower_dict_comparison(x):
try:
x = x.lower()
except AttributeError:
return 'UNKNOWN'
return convert.get(x, 'UNKNOWN')
return Gender(lower_dict_comparison)
|
<commit_before><commit_msg>Add utils package with offtopic functions
Add Gender class for proper Gender representation<commit_after># Author: Álvaro Parafita (parafita.alvaro@gmail.com)
class Gender:
"""
Class to represent gender as ints.
Definition of each gender is according to ISO/IEC 5218.
An instance of Gender must be created passing it a converter function
that takes any value and returns a string in
UNKNOWN, MALE, FEMALE, NOT_APPLICABLE
Any Exception inside this method should be catched
and reraised as ValueError or TypeError accordingly.
To use the class, call the instance directly with any value
to convert it to its int representation.
To pass from an int to its ISO string representation,
use the 'name' function.
"""
UNKNOWN = 0
MALE = 1
FEMALE = 2
NOT_APPLICABLE = 9
def __init__(self, converter=None):
if converter is None:
def default_converter(x):
if x in ['UNKNOWN', 'MALE', 'FEMALE', 'NOT_APPLICABLE']:
return x
else:
raise ValueError(x)
converter = default_converter
self.converter = converter
def __call__(self, gender):
converted_gender = self.converter(gender)
try:
return getattr(self, converted_gender)
except AttributeError:
raise ValueError(
'Gender "%s" became "%s", '
'which is not recognised by Gender' % (
gender, converted_gender
)
)
def name(self, gender):
if type(gender) == int:
if gender == Gender.UNKNOWN:
return 'UNKNOWN'
elif gender == Gender.MALE:
return 'MALE'
elif gender == Gender.FEMALE:
return 'FEMALE'
elif gender == Gender.NOT_APPLICABLE:
return 'NOT_APPLICABLE'
else:
raise ValueError(gender)
else:
raise TypeError(gender)
Gender.default = Gender()
def SimpleGender(convert):
def lower_dict_comparison(x):
try:
x = x.lower()
except AttributeError:
return 'UNKNOWN'
return convert.get(x, 'UNKNOWN')
return Gender(lower_dict_comparison)
|
|
c413b91e7316055dc6ff5c82ee35e94e2a32fa62
|
lpthw/ex21.py
|
lpthw/ex21.py
|
def add(a, b):
print "ADDING %d + %d" % (a, b)
return a + b
def subtract(a, b):
print "SUBTRACTING %d - %d" % (a, b)
return a -b
def multiply(a, b):
print "MULTIPLYING %d * %d" % (a, b)
return a * b
def divide(a, b):
print "DIVIDING %d / %d" % (a, b)
return a / b
print "Let's do some math with just functions!"
age = add(30, 5)
height = subtract(78, 4)
weight = multiply(90, 2)
iq = divide(100, 2)
print "Age: %d, Height: %d, Weight: %d, IQ: %d" % (age, height, weight, iq)
# a puzzle for the eztra credit, type it in anyway.
print "Here is a puzzle."
what = add(age, subtract(height, multiply(weight, divide(iq, 2))))
print "That becomes: ", what, "Can you do it by hand?"
# yep
|
Add work from exercise 21 in lpthw.
|
Add work from exercise 21 in lpthw.
|
Python
|
mit
|
jaredmanning/learning,jaredmanning/learning
|
Add work from exercise 21 in lpthw.
|
def add(a, b):
print "ADDING %d + %d" % (a, b)
return a + b
def subtract(a, b):
print "SUBTRACTING %d - %d" % (a, b)
return a -b
def multiply(a, b):
print "MULTIPLYING %d * %d" % (a, b)
return a * b
def divide(a, b):
print "DIVIDING %d / %d" % (a, b)
return a / b
print "Let's do some math with just functions!"
age = add(30, 5)
height = subtract(78, 4)
weight = multiply(90, 2)
iq = divide(100, 2)
print "Age: %d, Height: %d, Weight: %d, IQ: %d" % (age, height, weight, iq)
# a puzzle for the eztra credit, type it in anyway.
print "Here is a puzzle."
what = add(age, subtract(height, multiply(weight, divide(iq, 2))))
print "That becomes: ", what, "Can you do it by hand?"
# yep
|
<commit_before><commit_msg>Add work from exercise 21 in lpthw.<commit_after>
|
def add(a, b):
print "ADDING %d + %d" % (a, b)
return a + b
def subtract(a, b):
print "SUBTRACTING %d - %d" % (a, b)
return a -b
def multiply(a, b):
print "MULTIPLYING %d * %d" % (a, b)
return a * b
def divide(a, b):
print "DIVIDING %d / %d" % (a, b)
return a / b
print "Let's do some math with just functions!"
age = add(30, 5)
height = subtract(78, 4)
weight = multiply(90, 2)
iq = divide(100, 2)
print "Age: %d, Height: %d, Weight: %d, IQ: %d" % (age, height, weight, iq)
# a puzzle for the eztra credit, type it in anyway.
print "Here is a puzzle."
what = add(age, subtract(height, multiply(weight, divide(iq, 2))))
print "That becomes: ", what, "Can you do it by hand?"
# yep
|
Add work from exercise 21 in lpthw.def add(a, b):
print "ADDING %d + %d" % (a, b)
return a + b
def subtract(a, b):
print "SUBTRACTING %d - %d" % (a, b)
return a -b
def multiply(a, b):
print "MULTIPLYING %d * %d" % (a, b)
return a * b
def divide(a, b):
print "DIVIDING %d / %d" % (a, b)
return a / b
print "Let's do some math with just functions!"
age = add(30, 5)
height = subtract(78, 4)
weight = multiply(90, 2)
iq = divide(100, 2)
print "Age: %d, Height: %d, Weight: %d, IQ: %d" % (age, height, weight, iq)
# a puzzle for the eztra credit, type it in anyway.
print "Here is a puzzle."
what = add(age, subtract(height, multiply(weight, divide(iq, 2))))
print "That becomes: ", what, "Can you do it by hand?"
# yep
|
<commit_before><commit_msg>Add work from exercise 21 in lpthw.<commit_after>def add(a, b):
print "ADDING %d + %d" % (a, b)
return a + b
def subtract(a, b):
print "SUBTRACTING %d - %d" % (a, b)
return a -b
def multiply(a, b):
print "MULTIPLYING %d * %d" % (a, b)
return a * b
def divide(a, b):
print "DIVIDING %d / %d" % (a, b)
return a / b
print "Let's do some math with just functions!"
age = add(30, 5)
height = subtract(78, 4)
weight = multiply(90, 2)
iq = divide(100, 2)
print "Age: %d, Height: %d, Weight: %d, IQ: %d" % (age, height, weight, iq)
# a puzzle for the eztra credit, type it in anyway.
print "Here is a puzzle."
what = add(age, subtract(height, multiply(weight, divide(iq, 2))))
print "That becomes: ", what, "Can you do it by hand?"
# yep
|
|
6c1ecad37e4735ed77403f82ad2464115447b4c9
|
tests/graphics/toolbarpalettes.py
|
tests/graphics/toolbarpalettes.py
|
# Copyright (C) 2007, Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""
Test palette positioning for toolbar and tray.
"""
import gtk
from sugar.graphics.tray import HTray, TrayButton
from sugar.graphics.toolbutton import ToolButton
import common
test = common.Test()
vbox = gtk.VBox()
theme_icons = gtk.icon_theme_get_default().list_icons()
toolbar = gtk.Toolbar()
vbox.pack_start(toolbar, False)
toolbar.show()
for i in range(0, 5):
button = ToolButton(icon_name=theme_icons[i])
button.set_tooltip('Icon %d' % i)
toolbar.insert(button, -1)
button.show()
content = gtk.Label()
vbox.pack_start(content)
content.show()
tray = HTray()
vbox.pack_start(tray, False)
tray.show()
for i in range(0, 30):
button = TrayButton(icon_name=theme_icons[i])
button.set_tooltip('Icon %d' % i)
tray.add_item(button)
button.show()
test.pack_start(vbox)
vbox.show()
test.show()
if __name__ == "__main__":
common.main(test)
|
Add testcase for toolbar and tray palettes.
|
Add testcase for toolbar and tray palettes.
|
Python
|
lgpl-2.1
|
i5o/sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,godiard/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit,tchx84/debian-pkg-sugar-toolkit,samdroid-apps/sugar-toolkit-gtk3,Daksh/sugar-toolkit-gtk3,manuq/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit,i5o/sugar-toolkit-gtk3,manuq/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,samdroid-apps/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,quozl/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit,sugarlabs/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,tchx84/sugar-toolkit-gtk3,tchx84/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,tchx84/debian-pkg-sugar-toolkit,tchx84/debian-pkg-sugar-toolkit-gtk3,sugarlabs/sugar-toolkit-gtk3,puneetgkaur/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit,puneetgkaur/backup_sugar_sugartoolkit,sugarlabs/sugar-toolkit,samdroid-apps/sugar-toolkit-gtk3,puneetgkaur/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,Daksh/sugar-toolkit-gtk3,manuq/sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3,puneetgkaur/sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,tchx84/sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,Daksh/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,godiard/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,godiard/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,samdroid-apps/sugar-toolkit-gtk3
|
Add testcase for toolbar and tray palettes.
|
# Copyright (C) 2007, Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""
Test palette positioning for toolbar and tray.
"""
import gtk
from sugar.graphics.tray import HTray, TrayButton
from sugar.graphics.toolbutton import ToolButton
import common
test = common.Test()
vbox = gtk.VBox()
theme_icons = gtk.icon_theme_get_default().list_icons()
toolbar = gtk.Toolbar()
vbox.pack_start(toolbar, False)
toolbar.show()
for i in range(0, 5):
button = ToolButton(icon_name=theme_icons[i])
button.set_tooltip('Icon %d' % i)
toolbar.insert(button, -1)
button.show()
content = gtk.Label()
vbox.pack_start(content)
content.show()
tray = HTray()
vbox.pack_start(tray, False)
tray.show()
for i in range(0, 30):
button = TrayButton(icon_name=theme_icons[i])
button.set_tooltip('Icon %d' % i)
tray.add_item(button)
button.show()
test.pack_start(vbox)
vbox.show()
test.show()
if __name__ == "__main__":
common.main(test)
|
<commit_before><commit_msg>Add testcase for toolbar and tray palettes.<commit_after>
|
# Copyright (C) 2007, Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""
Test palette positioning for toolbar and tray.
"""
import gtk
from sugar.graphics.tray import HTray, TrayButton
from sugar.graphics.toolbutton import ToolButton
import common
test = common.Test()
vbox = gtk.VBox()
theme_icons = gtk.icon_theme_get_default().list_icons()
toolbar = gtk.Toolbar()
vbox.pack_start(toolbar, False)
toolbar.show()
for i in range(0, 5):
button = ToolButton(icon_name=theme_icons[i])
button.set_tooltip('Icon %d' % i)
toolbar.insert(button, -1)
button.show()
content = gtk.Label()
vbox.pack_start(content)
content.show()
tray = HTray()
vbox.pack_start(tray, False)
tray.show()
for i in range(0, 30):
button = TrayButton(icon_name=theme_icons[i])
button.set_tooltip('Icon %d' % i)
tray.add_item(button)
button.show()
test.pack_start(vbox)
vbox.show()
test.show()
if __name__ == "__main__":
common.main(test)
|
Add testcase for toolbar and tray palettes.# Copyright (C) 2007, Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""
Test palette positioning for toolbar and tray.
"""
import gtk
from sugar.graphics.tray import HTray, TrayButton
from sugar.graphics.toolbutton import ToolButton
import common
test = common.Test()
vbox = gtk.VBox()
theme_icons = gtk.icon_theme_get_default().list_icons()
toolbar = gtk.Toolbar()
vbox.pack_start(toolbar, False)
toolbar.show()
for i in range(0, 5):
button = ToolButton(icon_name=theme_icons[i])
button.set_tooltip('Icon %d' % i)
toolbar.insert(button, -1)
button.show()
content = gtk.Label()
vbox.pack_start(content)
content.show()
tray = HTray()
vbox.pack_start(tray, False)
tray.show()
for i in range(0, 30):
button = TrayButton(icon_name=theme_icons[i])
button.set_tooltip('Icon %d' % i)
tray.add_item(button)
button.show()
test.pack_start(vbox)
vbox.show()
test.show()
if __name__ == "__main__":
common.main(test)
|
<commit_before><commit_msg>Add testcase for toolbar and tray palettes.<commit_after># Copyright (C) 2007, Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
"""
Test palette positioning for toolbar and tray.
"""
import gtk
from sugar.graphics.tray import HTray, TrayButton
from sugar.graphics.toolbutton import ToolButton
import common
test = common.Test()
vbox = gtk.VBox()
theme_icons = gtk.icon_theme_get_default().list_icons()
toolbar = gtk.Toolbar()
vbox.pack_start(toolbar, False)
toolbar.show()
for i in range(0, 5):
button = ToolButton(icon_name=theme_icons[i])
button.set_tooltip('Icon %d' % i)
toolbar.insert(button, -1)
button.show()
content = gtk.Label()
vbox.pack_start(content)
content.show()
tray = HTray()
vbox.pack_start(tray, False)
tray.show()
for i in range(0, 30):
button = TrayButton(icon_name=theme_icons[i])
button.set_tooltip('Icon %d' % i)
tray.add_item(button)
button.show()
test.pack_start(vbox)
vbox.show()
test.show()
if __name__ == "__main__":
common.main(test)
|
|
ac6a4aff86c312b507353e7f0ae3f3447328d8f2
|
decorate_all_functions.py
|
decorate_all_functions.py
|
from functools import wraps
def decorate_all_functions(func_decorator):
def decorator(cls):
for name, obj in vars(cls).items():
if callable(obj):
try:
obj = obj.__func__ # unwrap Python 2 unbound method
except AttributeError:
pass # not needed in Python 3
setattr(cls, name, func_decorator(obj))
return cls
return decorator
def print_on_call(func):
@wraps(func)
def wrapper(*args, **kw):
print('{} called!!!'.format(func.__name__))
try:
res = func(*args, **kw)
finally:
print('{} finished!!!'.format(func.__name__))
return res
return wrapper
@decorate_all_functions(print_on_call)
class Foo:
def func1(self):
print('1')
def func2(self):
print('2')
c = Foo()
c.func1()
c.func2()
|
Add an example to decorate each of the functions in the class.
|
Add an example to decorate each of the functions in the class.
|
Python
|
mit
|
iandmyhand/python-utils
|
Add an example to decorate each of the functions in the class.
|
from functools import wraps
def decorate_all_functions(func_decorator):
def decorator(cls):
for name, obj in vars(cls).items():
if callable(obj):
try:
obj = obj.__func__ # unwrap Python 2 unbound method
except AttributeError:
pass # not needed in Python 3
setattr(cls, name, func_decorator(obj))
return cls
return decorator
def print_on_call(func):
@wraps(func)
def wrapper(*args, **kw):
print('{} called!!!'.format(func.__name__))
try:
res = func(*args, **kw)
finally:
print('{} finished!!!'.format(func.__name__))
return res
return wrapper
@decorate_all_functions(print_on_call)
class Foo:
def func1(self):
print('1')
def func2(self):
print('2')
c = Foo()
c.func1()
c.func2()
|
<commit_before><commit_msg>Add an example to decorate each of the functions in the class.<commit_after>
|
from functools import wraps
def decorate_all_functions(func_decorator):
def decorator(cls):
for name, obj in vars(cls).items():
if callable(obj):
try:
obj = obj.__func__ # unwrap Python 2 unbound method
except AttributeError:
pass # not needed in Python 3
setattr(cls, name, func_decorator(obj))
return cls
return decorator
def print_on_call(func):
@wraps(func)
def wrapper(*args, **kw):
print('{} called!!!'.format(func.__name__))
try:
res = func(*args, **kw)
finally:
print('{} finished!!!'.format(func.__name__))
return res
return wrapper
@decorate_all_functions(print_on_call)
class Foo:
def func1(self):
print('1')
def func2(self):
print('2')
c = Foo()
c.func1()
c.func2()
|
Add an example to decorate each of the functions in the class.from functools import wraps
def decorate_all_functions(func_decorator):
def decorator(cls):
for name, obj in vars(cls).items():
if callable(obj):
try:
obj = obj.__func__ # unwrap Python 2 unbound method
except AttributeError:
pass # not needed in Python 3
setattr(cls, name, func_decorator(obj))
return cls
return decorator
def print_on_call(func):
@wraps(func)
def wrapper(*args, **kw):
print('{} called!!!'.format(func.__name__))
try:
res = func(*args, **kw)
finally:
print('{} finished!!!'.format(func.__name__))
return res
return wrapper
@decorate_all_functions(print_on_call)
class Foo:
def func1(self):
print('1')
def func2(self):
print('2')
c = Foo()
c.func1()
c.func2()
|
<commit_before><commit_msg>Add an example to decorate each of the functions in the class.<commit_after>from functools import wraps
def decorate_all_functions(func_decorator):
def decorator(cls):
for name, obj in vars(cls).items():
if callable(obj):
try:
obj = obj.__func__ # unwrap Python 2 unbound method
except AttributeError:
pass # not needed in Python 3
setattr(cls, name, func_decorator(obj))
return cls
return decorator
def print_on_call(func):
@wraps(func)
def wrapper(*args, **kw):
print('{} called!!!'.format(func.__name__))
try:
res = func(*args, **kw)
finally:
print('{} finished!!!'.format(func.__name__))
return res
return wrapper
@decorate_all_functions(print_on_call)
class Foo:
def func1(self):
print('1')
def func2(self):
print('2')
c = Foo()
c.func1()
c.func2()
|
|
36bc44a04e9a2bbed30ce19522bdf608afa3bfea
|
tests/test_gross_total_volume.py
|
tests/test_gross_total_volume.py
|
from gypsy import gross_total_volume
def test_pl():
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
# TODO: scalar and array
gross_total_volume.gtv_pl(0, 0)
assert False
def test_aw():
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
# TODO: scalar and array
gross_total_volume.gtv_aw(0, 0)
assert False
def test_sb():
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
# TODO: scalar and array
gross_total_volume.gtv_sb(0, 0)
assert False
def test_sw():
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
# TODO: scalar and array
gross_total_volume.gtv_sw(0, 0)
assert False
def test_gross_total_volume():
# TODO: test this delegates properly
assert False
|
Add reminder for testing gross total volume
|
Add reminder for testing gross total volume
|
Python
|
mit
|
tesera/pygypsy,tesera/pygypsy
|
Add reminder for testing gross total volume
|
from gypsy import gross_total_volume
def test_pl():
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
# TODO: scalar and array
gross_total_volume.gtv_pl(0, 0)
assert False
def test_aw():
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
# TODO: scalar and array
gross_total_volume.gtv_aw(0, 0)
assert False
def test_sb():
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
# TODO: scalar and array
gross_total_volume.gtv_sb(0, 0)
assert False
def test_sw():
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
# TODO: scalar and array
gross_total_volume.gtv_sw(0, 0)
assert False
def test_gross_total_volume():
# TODO: test this delegates properly
assert False
|
<commit_before><commit_msg>Add reminder for testing gross total volume<commit_after>
|
from gypsy import gross_total_volume
def test_pl():
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
# TODO: scalar and array
gross_total_volume.gtv_pl(0, 0)
assert False
def test_aw():
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
# TODO: scalar and array
gross_total_volume.gtv_aw(0, 0)
assert False
def test_sb():
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
# TODO: scalar and array
gross_total_volume.gtv_sb(0, 0)
assert False
def test_sw():
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
# TODO: scalar and array
gross_total_volume.gtv_sw(0, 0)
assert False
def test_gross_total_volume():
# TODO: test this delegates properly
assert False
|
Add reminder for testing gross total volumefrom gypsy import gross_total_volume
def test_pl():
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
# TODO: scalar and array
gross_total_volume.gtv_pl(0, 0)
assert False
def test_aw():
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
# TODO: scalar and array
gross_total_volume.gtv_aw(0, 0)
assert False
def test_sb():
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
# TODO: scalar and array
gross_total_volume.gtv_sb(0, 0)
assert False
def test_sw():
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
# TODO: scalar and array
gross_total_volume.gtv_sw(0, 0)
assert False
def test_gross_total_volume():
# TODO: test this delegates properly
assert False
|
<commit_before><commit_msg>Add reminder for testing gross total volume<commit_after>from gypsy import gross_total_volume
def test_pl():
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
# TODO: scalar and array
gross_total_volume.gtv_pl(0, 0)
assert False
def test_aw():
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
# TODO: scalar and array
gross_total_volume.gtv_aw(0, 0)
assert False
def test_sb():
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
# TODO: scalar and array
gross_total_volume.gtv_sb(0, 0)
assert False
def test_sw():
# TODO: known value
# TODO: 0 as an input
# TODO: negative as an input
# TODO: scalar and array
gross_total_volume.gtv_sw(0, 0)
assert False
def test_gross_total_volume():
# TODO: test this delegates properly
assert False
|
|
35b275cb3cb714bcc1634d84d6b4272f598e6bf3
|
exercises/chapter_04/exercise_04_01/exercise_04_01.py
|
exercises/chapter_04/exercise_04_01/exercise_04_01.py
|
# 4-1. Pizzas
favorite_pizzas = ["Columpus", "Marco Polo", "Amerikana"]
for pizza in favorite_pizzas:
print(pizza)
|
Add first basic version of exercise 4.1.
|
Add first basic version of exercise 4.1.
|
Python
|
mit
|
HenrikSamuelsson/python-crash-course
|
Add first basic version of exercise 4.1.
|
# 4-1. Pizzas
favorite_pizzas = ["Columpus", "Marco Polo", "Amerikana"]
for pizza in favorite_pizzas:
print(pizza)
|
<commit_before><commit_msg>Add first basic version of exercise 4.1.<commit_after>
|
# 4-1. Pizzas
favorite_pizzas = ["Columpus", "Marco Polo", "Amerikana"]
for pizza in favorite_pizzas:
print(pizza)
|
Add first basic version of exercise 4.1.# 4-1. Pizzas
favorite_pizzas = ["Columpus", "Marco Polo", "Amerikana"]
for pizza in favorite_pizzas:
print(pizza)
|
<commit_before><commit_msg>Add first basic version of exercise 4.1.<commit_after># 4-1. Pizzas
favorite_pizzas = ["Columpus", "Marco Polo", "Amerikana"]
for pizza in favorite_pizzas:
print(pizza)
|
|
69918ffce16158842e61d1cc89ec81d8f791ab43
|
support/upgrade.py
|
support/upgrade.py
|
# encoding: utf-8
# Copyright 2010 California Institute of Technology. ALL RIGHTS
# RESERVED. U.S. Government Sponsorship acknowledged.
#
# Upgrade an existing installation of the EDRN public portal.
#
# Execute with a Zope instance's "run" command, ie:
# bin/instance-debug run support/upgrade.py
#
# Assumes that the instance already has a previous edition of the EDRN portal installed.
_adminUser = 'admin' # Name of the Zope administrative user
_policy = 'edrnsite.policy' # Name of the policy that orchestrates everything
_siteID = 'edrn' # Object ID of the PloneSite object in the Zope app server
from AccessControl.SecurityManagement import newSecurityManager, noSecurityManager
from Products.CMFCore.utils import getToolByName
from Testing import makerequest
from zope.app.component.hooks import setSite
import transaction, sys
def main(app, siteID, adminUser, policy):
# Get a test request installed.
app = makerequest.makerequest(app)
# Set up security.
acl_users = app.acl_users
user = acl_users.getUser(adminUser)
if user:
user = user.__of__(acl_users)
newSecurityManager(None, user)
else:
raise Exception('Admin user "%s" does not exist' % adminUser)
# Get the portal.
portal = getattr(app, siteID)
setSite(portal)
# Disable CacheFu. If we don't, the CMF Squid Tool will start a purge thread, and that
# purge thread isn't a daemon thread (it probably should be). Since it's not a daemon,
# we won't ever terminate.
cacheTool = getToolByName(portal, 'portal_cache_settings')
cacheTool.setEnabled(False)
cacheTool.setDomains([])
# Upgrade the portal.
qi = getToolByName(portal, 'portal_quickinstaller')
qi.upgradeProduct('edrnsite.policy')
# Commit everything and shut down.
transaction.commit()
noSecurityManager()
return True
if __name__ == '__main__':
rc = main(app, _siteID, _adminUser, _policy) # ``app`` comes from ``instance run`` magic.
sys.exit(rc and 0 or -1)
|
Upgrade procedure for EDRN portal
|
Upgrade procedure for EDRN portal
|
Python
|
apache-2.0
|
EDRN/PublicPortal,EDRN/PublicPortal
|
Upgrade procedure for EDRN portal
|
# encoding: utf-8
# Copyright 2010 California Institute of Technology. ALL RIGHTS
# RESERVED. U.S. Government Sponsorship acknowledged.
#
# Upgrade an existing installation of the EDRN public portal.
#
# Execute with a Zope instance's "run" command, ie:
# bin/instance-debug run support/upgrade.py
#
# Assumes that the instance already has a previous edition of the EDRN portal installed.
_adminUser = 'admin' # Name of the Zope administrative user
_policy = 'edrnsite.policy' # Name of the policy that orchestrates everything
_siteID = 'edrn' # Object ID of the PloneSite object in the Zope app server
from AccessControl.SecurityManagement import newSecurityManager, noSecurityManager
from Products.CMFCore.utils import getToolByName
from Testing import makerequest
from zope.app.component.hooks import setSite
import transaction, sys
def main(app, siteID, adminUser, policy):
# Get a test request installed.
app = makerequest.makerequest(app)
# Set up security.
acl_users = app.acl_users
user = acl_users.getUser(adminUser)
if user:
user = user.__of__(acl_users)
newSecurityManager(None, user)
else:
raise Exception('Admin user "%s" does not exist' % adminUser)
# Get the portal.
portal = getattr(app, siteID)
setSite(portal)
# Disable CacheFu. If we don't, the CMF Squid Tool will start a purge thread, and that
# purge thread isn't a daemon thread (it probably should be). Since it's not a daemon,
# we won't ever terminate.
cacheTool = getToolByName(portal, 'portal_cache_settings')
cacheTool.setEnabled(False)
cacheTool.setDomains([])
# Upgrade the portal.
qi = getToolByName(portal, 'portal_quickinstaller')
qi.upgradeProduct('edrnsite.policy')
# Commit everything and shut down.
transaction.commit()
noSecurityManager()
return True
if __name__ == '__main__':
rc = main(app, _siteID, _adminUser, _policy) # ``app`` comes from ``instance run`` magic.
sys.exit(rc and 0 or -1)
|
<commit_before><commit_msg>Upgrade procedure for EDRN portal<commit_after>
|
# encoding: utf-8
# Copyright 2010 California Institute of Technology. ALL RIGHTS
# RESERVED. U.S. Government Sponsorship acknowledged.
#
# Upgrade an existing installation of the EDRN public portal.
#
# Execute with a Zope instance's "run" command, ie:
# bin/instance-debug run support/upgrade.py
#
# Assumes that the instance already has a previous edition of the EDRN portal installed.
_adminUser = 'admin' # Name of the Zope administrative user
_policy = 'edrnsite.policy' # Name of the policy that orchestrates everything
_siteID = 'edrn' # Object ID of the PloneSite object in the Zope app server
from AccessControl.SecurityManagement import newSecurityManager, noSecurityManager
from Products.CMFCore.utils import getToolByName
from Testing import makerequest
from zope.app.component.hooks import setSite
import transaction, sys
def main(app, siteID, adminUser, policy):
# Get a test request installed.
app = makerequest.makerequest(app)
# Set up security.
acl_users = app.acl_users
user = acl_users.getUser(adminUser)
if user:
user = user.__of__(acl_users)
newSecurityManager(None, user)
else:
raise Exception('Admin user "%s" does not exist' % adminUser)
# Get the portal.
portal = getattr(app, siteID)
setSite(portal)
# Disable CacheFu. If we don't, the CMF Squid Tool will start a purge thread, and that
# purge thread isn't a daemon thread (it probably should be). Since it's not a daemon,
# we won't ever terminate.
cacheTool = getToolByName(portal, 'portal_cache_settings')
cacheTool.setEnabled(False)
cacheTool.setDomains([])
# Upgrade the portal.
qi = getToolByName(portal, 'portal_quickinstaller')
qi.upgradeProduct('edrnsite.policy')
# Commit everything and shut down.
transaction.commit()
noSecurityManager()
return True
if __name__ == '__main__':
rc = main(app, _siteID, _adminUser, _policy) # ``app`` comes from ``instance run`` magic.
sys.exit(rc and 0 or -1)
|
Upgrade procedure for EDRN portal# encoding: utf-8
# Copyright 2010 California Institute of Technology. ALL RIGHTS
# RESERVED. U.S. Government Sponsorship acknowledged.
#
# Upgrade an existing installation of the EDRN public portal.
#
# Execute with a Zope instance's "run" command, ie:
# bin/instance-debug run support/upgrade.py
#
# Assumes that the instance already has a previous edition of the EDRN portal installed.
_adminUser = 'admin' # Name of the Zope administrative user
_policy = 'edrnsite.policy' # Name of the policy that orchestrates everything
_siteID = 'edrn' # Object ID of the PloneSite object in the Zope app server
from AccessControl.SecurityManagement import newSecurityManager, noSecurityManager
from Products.CMFCore.utils import getToolByName
from Testing import makerequest
from zope.app.component.hooks import setSite
import transaction, sys
def main(app, siteID, adminUser, policy):
# Get a test request installed.
app = makerequest.makerequest(app)
# Set up security.
acl_users = app.acl_users
user = acl_users.getUser(adminUser)
if user:
user = user.__of__(acl_users)
newSecurityManager(None, user)
else:
raise Exception('Admin user "%s" does not exist' % adminUser)
# Get the portal.
portal = getattr(app, siteID)
setSite(portal)
# Disable CacheFu. If we don't, the CMF Squid Tool will start a purge thread, and that
# purge thread isn't a daemon thread (it probably should be). Since it's not a daemon,
# we won't ever terminate.
cacheTool = getToolByName(portal, 'portal_cache_settings')
cacheTool.setEnabled(False)
cacheTool.setDomains([])
# Upgrade the portal.
qi = getToolByName(portal, 'portal_quickinstaller')
qi.upgradeProduct('edrnsite.policy')
# Commit everything and shut down.
transaction.commit()
noSecurityManager()
return True
if __name__ == '__main__':
rc = main(app, _siteID, _adminUser, _policy) # ``app`` comes from ``instance run`` magic.
sys.exit(rc and 0 or -1)
|
<commit_before><commit_msg>Upgrade procedure for EDRN portal<commit_after># encoding: utf-8
# Copyright 2010 California Institute of Technology. ALL RIGHTS
# RESERVED. U.S. Government Sponsorship acknowledged.
#
# Upgrade an existing installation of the EDRN public portal.
#
# Execute with a Zope instance's "run" command, ie:
# bin/instance-debug run support/upgrade.py
#
# Assumes that the instance already has a previous edition of the EDRN portal installed.
_adminUser = 'admin' # Name of the Zope administrative user
_policy = 'edrnsite.policy' # Name of the policy that orchestrates everything
_siteID = 'edrn' # Object ID of the PloneSite object in the Zope app server
from AccessControl.SecurityManagement import newSecurityManager, noSecurityManager
from Products.CMFCore.utils import getToolByName
from Testing import makerequest
from zope.app.component.hooks import setSite
import transaction, sys
def main(app, siteID, adminUser, policy):
# Get a test request installed.
app = makerequest.makerequest(app)
# Set up security.
acl_users = app.acl_users
user = acl_users.getUser(adminUser)
if user:
user = user.__of__(acl_users)
newSecurityManager(None, user)
else:
raise Exception('Admin user "%s" does not exist' % adminUser)
# Get the portal.
portal = getattr(app, siteID)
setSite(portal)
# Disable CacheFu. If we don't, the CMF Squid Tool will start a purge thread, and that
# purge thread isn't a daemon thread (it probably should be). Since it's not a daemon,
# we won't ever terminate.
cacheTool = getToolByName(portal, 'portal_cache_settings')
cacheTool.setEnabled(False)
cacheTool.setDomains([])
# Upgrade the portal.
qi = getToolByName(portal, 'portal_quickinstaller')
qi.upgradeProduct('edrnsite.policy')
# Commit everything and shut down.
transaction.commit()
noSecurityManager()
return True
if __name__ == '__main__':
rc = main(app, _siteID, _adminUser, _policy) # ``app`` comes from ``instance run`` magic.
sys.exit(rc and 0 or -1)
|
|
c74c7f7799c9f4ebf0930bfc3b83226b56eb7fcc
|
tools/testDevice/testDevice.py
|
tools/testDevice/testDevice.py
|
#!/usr/bin/python
#
# Copyright 2015 - 2016 Boling Consulting Solutions, bcsw.net
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, Response
from xrd import Element, XRD, Link
app = Flask(__name__)
@app.route('/')
def index():
return "This is a test program to simulate a RESTCONF capable device.<br\>"
@app.route('/.well-known/host-meta', methods=['GET'])
def get_host_meta():
"""
This function services the well-known host-meta XRD data for RESTCONF
API root discovery.
"""
xrd_obj = XRD()
# Add a few extra elements and links before RESTCONF to help make sure
# the parsing/XPATH is correct
xrd_obj.elements.append(Element('hm:Host', 'testDevice'))
xrd_obj.links.append(Link(rel='license', href='http://www.apache.org/licenses/LICENSE-2.0'))
xrd_obj.links.append(Link(rel='author', href='http://bcsw.net'))
# Add the link for RESTCONF
xrd_obj.links.append(Link(rel='restconf', href='top/restconf'))
# Add some extra links here as well
xrd_obj.links.append(Link(rel='testPath', href='this/does/not/exist'))
xrd_obj.links.append(Link(rel='http://oexchange.org/spec/0.8/rel/resident-target',
type_='application/xrd+xml',
href='http://twitter.com/oexchange.xrd'))
# Convert to XML, pretty-print it to aid in debugging
xrd_doc = xrd_obj.to_xml()
return Response(xrd_doc.toprettyxml(indent=' '), mimetype='application/xrd+xml')
@app.route('/config/reset', methods=['POST'])
def do_reset():
"""
This url (/config/reset) can be called to reset configurable parameters
to their defaults.
This allows you to run the server once and call various 'config/set/*' pages
with variables to alter the behaviour (for a particular test) and then call
this to reset items back to normal so you can do more tests.
"""
pass # TODO: Need to implement
if __name__ == '__main__':
app.run(debug=True)
|
Test RESTCONF device (for XRD and eventually other debugging purposes)
|
Test RESTCONF device (for XRD and eventually other debugging purposes)
|
Python
|
apache-2.0
|
cboling/onos-restconf-providers,cboling/onos-restconf-providers,cboling/onos-restconf-providers
|
Test RESTCONF device (for XRD and eventually other debugging purposes)
|
#!/usr/bin/python
#
# Copyright 2015 - 2016 Boling Consulting Solutions, bcsw.net
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, Response
from xrd import Element, XRD, Link
app = Flask(__name__)
@app.route('/')
def index():
return "This is a test program to simulate a RESTCONF capable device.<br\>"
@app.route('/.well-known/host-meta', methods=['GET'])
def get_host_meta():
"""
This function services the well-known host-meta XRD data for RESTCONF
API root discovery.
"""
xrd_obj = XRD()
# Add a few extra elements and links before RESTCONF to help make sure
# the parsing/XPATH is correct
xrd_obj.elements.append(Element('hm:Host', 'testDevice'))
xrd_obj.links.append(Link(rel='license', href='http://www.apache.org/licenses/LICENSE-2.0'))
xrd_obj.links.append(Link(rel='author', href='http://bcsw.net'))
# Add the link for RESTCONF
xrd_obj.links.append(Link(rel='restconf', href='top/restconf'))
# Add some extra links here as well
xrd_obj.links.append(Link(rel='testPath', href='this/does/not/exist'))
xrd_obj.links.append(Link(rel='http://oexchange.org/spec/0.8/rel/resident-target',
type_='application/xrd+xml',
href='http://twitter.com/oexchange.xrd'))
# Convert to XML, pretty-print it to aid in debugging
xrd_doc = xrd_obj.to_xml()
return Response(xrd_doc.toprettyxml(indent=' '), mimetype='application/xrd+xml')
@app.route('/config/reset', methods=['POST'])
def do_reset():
"""
This url (/config/reset) can be called to reset configurable parameters
to their defaults.
This allows you to run the server once and call various 'config/set/*' pages
with variables to alter the behaviour (for a particular test) and then call
this to reset items back to normal so you can do more tests.
"""
pass # TODO: Need to implement
if __name__ == '__main__':
app.run(debug=True)
|
<commit_before><commit_msg>Test RESTCONF device (for XRD and eventually other debugging purposes)<commit_after>
|
#!/usr/bin/python
#
# Copyright 2015 - 2016 Boling Consulting Solutions, bcsw.net
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, Response
from xrd import Element, XRD, Link
app = Flask(__name__)
@app.route('/')
def index():
return "This is a test program to simulate a RESTCONF capable device.<br\>"
@app.route('/.well-known/host-meta', methods=['GET'])
def get_host_meta():
"""
This function services the well-known host-meta XRD data for RESTCONF
API root discovery.
"""
xrd_obj = XRD()
# Add a few extra elements and links before RESTCONF to help make sure
# the parsing/XPATH is correct
xrd_obj.elements.append(Element('hm:Host', 'testDevice'))
xrd_obj.links.append(Link(rel='license', href='http://www.apache.org/licenses/LICENSE-2.0'))
xrd_obj.links.append(Link(rel='author', href='http://bcsw.net'))
# Add the link for RESTCONF
xrd_obj.links.append(Link(rel='restconf', href='top/restconf'))
# Add some extra links here as well
xrd_obj.links.append(Link(rel='testPath', href='this/does/not/exist'))
xrd_obj.links.append(Link(rel='http://oexchange.org/spec/0.8/rel/resident-target',
type_='application/xrd+xml',
href='http://twitter.com/oexchange.xrd'))
# Convert to XML, pretty-print it to aid in debugging
xrd_doc = xrd_obj.to_xml()
return Response(xrd_doc.toprettyxml(indent=' '), mimetype='application/xrd+xml')
@app.route('/config/reset', methods=['POST'])
def do_reset():
"""
This url (/config/reset) can be called to reset configurable parameters
to their defaults.
This allows you to run the server once and call various 'config/set/*' pages
with variables to alter the behaviour (for a particular test) and then call
this to reset items back to normal so you can do more tests.
"""
pass # TODO: Need to implement
if __name__ == '__main__':
app.run(debug=True)
|
Test RESTCONF device (for XRD and eventually other debugging purposes)#!/usr/bin/python
#
# Copyright 2015 - 2016 Boling Consulting Solutions, bcsw.net
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, Response
from xrd import Element, XRD, Link
app = Flask(__name__)
@app.route('/')
def index():
return "This is a test program to simulate a RESTCONF capable device.<br\>"
@app.route('/.well-known/host-meta', methods=['GET'])
def get_host_meta():
"""
This function services the well-known host-meta XRD data for RESTCONF
API root discovery.
"""
xrd_obj = XRD()
# Add a few extra elements and links before RESTCONF to help make sure
# the parsing/XPATH is correct
xrd_obj.elements.append(Element('hm:Host', 'testDevice'))
xrd_obj.links.append(Link(rel='license', href='http://www.apache.org/licenses/LICENSE-2.0'))
xrd_obj.links.append(Link(rel='author', href='http://bcsw.net'))
# Add the link for RESTCONF
xrd_obj.links.append(Link(rel='restconf', href='top/restconf'))
# Add some extra links here as well
xrd_obj.links.append(Link(rel='testPath', href='this/does/not/exist'))
xrd_obj.links.append(Link(rel='http://oexchange.org/spec/0.8/rel/resident-target',
type_='application/xrd+xml',
href='http://twitter.com/oexchange.xrd'))
# Convert to XML, pretty-print it to aid in debugging
xrd_doc = xrd_obj.to_xml()
return Response(xrd_doc.toprettyxml(indent=' '), mimetype='application/xrd+xml')
@app.route('/config/reset', methods=['POST'])
def do_reset():
"""
This url (/config/reset) can be called to reset configurable parameters
to their defaults.
This allows you to run the server once and call various 'config/set/*' pages
with variables to alter the behaviour (for a particular test) and then call
this to reset items back to normal so you can do more tests.
"""
pass # TODO: Need to implement
if __name__ == '__main__':
app.run(debug=True)
|
<commit_before><commit_msg>Test RESTCONF device (for XRD and eventually other debugging purposes)<commit_after>#!/usr/bin/python
#
# Copyright 2015 - 2016 Boling Consulting Solutions, bcsw.net
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from flask import Flask, Response
from xrd import Element, XRD, Link
app = Flask(__name__)
@app.route('/')
def index():
return "This is a test program to simulate a RESTCONF capable device.<br\>"
@app.route('/.well-known/host-meta', methods=['GET'])
def get_host_meta():
"""
This function services the well-known host-meta XRD data for RESTCONF
API root discovery.
"""
xrd_obj = XRD()
# Add a few extra elements and links before RESTCONF to help make sure
# the parsing/XPATH is correct
xrd_obj.elements.append(Element('hm:Host', 'testDevice'))
xrd_obj.links.append(Link(rel='license', href='http://www.apache.org/licenses/LICENSE-2.0'))
xrd_obj.links.append(Link(rel='author', href='http://bcsw.net'))
# Add the link for RESTCONF
xrd_obj.links.append(Link(rel='restconf', href='top/restconf'))
# Add some extra links here as well
xrd_obj.links.append(Link(rel='testPath', href='this/does/not/exist'))
xrd_obj.links.append(Link(rel='http://oexchange.org/spec/0.8/rel/resident-target',
type_='application/xrd+xml',
href='http://twitter.com/oexchange.xrd'))
# Convert to XML, pretty-print it to aid in debugging
xrd_doc = xrd_obj.to_xml()
return Response(xrd_doc.toprettyxml(indent=' '), mimetype='application/xrd+xml')
@app.route('/config/reset', methods=['POST'])
def do_reset():
"""
This url (/config/reset) can be called to reset configurable parameters
to their defaults.
This allows you to run the server once and call various 'config/set/*' pages
with variables to alter the behaviour (for a particular test) and then call
this to reset items back to normal so you can do more tests.
"""
pass # TODO: Need to implement
if __name__ == '__main__':
app.run(debug=True)
|
|
f4e35be12b0c0af40f7a9e18871526c7f482b5be
|
python/examples/bspline_pickle.py
|
python/examples/bspline_pickle.py
|
###################################################################################
# Pickle/unpickle functionality for the BSpline class. #
# Works by creating a named temporary file which the BSpline is saved to. #
# The temp file is then read and the data is returned to Pickle. #
# When unpickling the data we get from Pickle is written to a temporary file #
# which is then read by the SPLINTER backend. #
# #
# This is a hack and tested for 5 minutes before I went to bed, so use with care. #
###################################################################################
import pickle
import copyreg
import tempfile
import splinter
# TODO: Change me
splinter.load("/home/awenhaug/projects/splinter/build/splinter-python/lib/linux/x86-64/libsplinter-3-0.so")
def constructor(serialized_data) -> splinter.BSpline:
print(f"Unpickling BSpline")
with tempfile.NamedTemporaryFile() as temp:
with open(temp.name, "wb") as f:
f.write(serialized_data)
return splinter.BSpline(temp.name)
def reducer(bspline: splinter.BSpline):
print(f"Pickling BSpline")
with tempfile.NamedTemporaryFile() as temp:
bspline.save(temp.name)
with open(temp.name, "rb") as f:
data = f.read()
return constructor, (data, )
# Register reducer as the reducer for objects of type BSpline
copyreg.pickle(splinter.BSpline, reducer)
xs = list(range(10))
ys = [4.1*x**3 - 1.3*x**2 for x in xs]
filename = "bspline_pickle_example.p"
try:
bspline = pickle.load(open(filename, "rb"))
print(f"Loaded BSpline from {filename}")
except FileNotFoundError:
bspline = splinter.BSplineBuilder(xs, ys).build()
pickle.dump(bspline, open(filename, "wb"))
print(f"Saved BSpline to {filename}")
print(bspline.eval([(x1+x0)/2 for x0, x1 in zip(xs, xs[1:])]))
|
Add example showing how to pickle/unpickle a BSpline object
|
Add example showing how to pickle/unpickle a BSpline object
|
Python
|
mpl-2.0
|
bgrimstad/splinter,bgrimstad/splinter,bgrimstad/splinter,bgrimstad/splinter,bgrimstad/splinter
|
Add example showing how to pickle/unpickle a BSpline object
|
###################################################################################
# Pickle/unpickle functionality for the BSpline class. #
# Works by creating a named temporary file which the BSpline is saved to. #
# The temp file is then read and the data is returned to Pickle. #
# When unpickling the data we get from Pickle is written to a temporary file #
# which is then read by the SPLINTER backend. #
# #
# This is a hack and tested for 5 minutes before I went to bed, so use with care. #
###################################################################################
import pickle
import copyreg
import tempfile
import splinter
# TODO: Change me
splinter.load("/home/awenhaug/projects/splinter/build/splinter-python/lib/linux/x86-64/libsplinter-3-0.so")
def constructor(serialized_data) -> splinter.BSpline:
print(f"Unpickling BSpline")
with tempfile.NamedTemporaryFile() as temp:
with open(temp.name, "wb") as f:
f.write(serialized_data)
return splinter.BSpline(temp.name)
def reducer(bspline: splinter.BSpline):
print(f"Pickling BSpline")
with tempfile.NamedTemporaryFile() as temp:
bspline.save(temp.name)
with open(temp.name, "rb") as f:
data = f.read()
return constructor, (data, )
# Register reducer as the reducer for objects of type BSpline
copyreg.pickle(splinter.BSpline, reducer)
xs = list(range(10))
ys = [4.1*x**3 - 1.3*x**2 for x in xs]
filename = "bspline_pickle_example.p"
try:
bspline = pickle.load(open(filename, "rb"))
print(f"Loaded BSpline from {filename}")
except FileNotFoundError:
bspline = splinter.BSplineBuilder(xs, ys).build()
pickle.dump(bspline, open(filename, "wb"))
print(f"Saved BSpline to {filename}")
print(bspline.eval([(x1+x0)/2 for x0, x1 in zip(xs, xs[1:])]))
|
<commit_before><commit_msg>Add example showing how to pickle/unpickle a BSpline object<commit_after>
|
###################################################################################
# Pickle/unpickle functionality for the BSpline class. #
# Works by creating a named temporary file which the BSpline is saved to. #
# The temp file is then read and the data is returned to Pickle. #
# When unpickling the data we get from Pickle is written to a temporary file #
# which is then read by the SPLINTER backend. #
# #
# This is a hack and tested for 5 minutes before I went to bed, so use with care. #
###################################################################################
import pickle
import copyreg
import tempfile
import splinter
# TODO: Change me
splinter.load("/home/awenhaug/projects/splinter/build/splinter-python/lib/linux/x86-64/libsplinter-3-0.so")
def constructor(serialized_data) -> splinter.BSpline:
print(f"Unpickling BSpline")
with tempfile.NamedTemporaryFile() as temp:
with open(temp.name, "wb") as f:
f.write(serialized_data)
return splinter.BSpline(temp.name)
def reducer(bspline: splinter.BSpline):
print(f"Pickling BSpline")
with tempfile.NamedTemporaryFile() as temp:
bspline.save(temp.name)
with open(temp.name, "rb") as f:
data = f.read()
return constructor, (data, )
# Register reducer as the reducer for objects of type BSpline
copyreg.pickle(splinter.BSpline, reducer)
xs = list(range(10))
ys = [4.1*x**3 - 1.3*x**2 for x in xs]
filename = "bspline_pickle_example.p"
try:
bspline = pickle.load(open(filename, "rb"))
print(f"Loaded BSpline from {filename}")
except FileNotFoundError:
bspline = splinter.BSplineBuilder(xs, ys).build()
pickle.dump(bspline, open(filename, "wb"))
print(f"Saved BSpline to {filename}")
print(bspline.eval([(x1+x0)/2 for x0, x1 in zip(xs, xs[1:])]))
|
Add example showing how to pickle/unpickle a BSpline object###################################################################################
# Pickle/unpickle functionality for the BSpline class. #
# Works by creating a named temporary file which the BSpline is saved to. #
# The temp file is then read and the data is returned to Pickle. #
# When unpickling the data we get from Pickle is written to a temporary file #
# which is then read by the SPLINTER backend. #
# #
# This is a hack and tested for 5 minutes before I went to bed, so use with care. #
###################################################################################
import pickle
import copyreg
import tempfile
import splinter
# TODO: Change me
splinter.load("/home/awenhaug/projects/splinter/build/splinter-python/lib/linux/x86-64/libsplinter-3-0.so")
def constructor(serialized_data) -> splinter.BSpline:
print(f"Unpickling BSpline")
with tempfile.NamedTemporaryFile() as temp:
with open(temp.name, "wb") as f:
f.write(serialized_data)
return splinter.BSpline(temp.name)
def reducer(bspline: splinter.BSpline):
print(f"Pickling BSpline")
with tempfile.NamedTemporaryFile() as temp:
bspline.save(temp.name)
with open(temp.name, "rb") as f:
data = f.read()
return constructor, (data, )
# Register reducer as the reducer for objects of type BSpline
copyreg.pickle(splinter.BSpline, reducer)
xs = list(range(10))
ys = [4.1*x**3 - 1.3*x**2 for x in xs]
filename = "bspline_pickle_example.p"
try:
bspline = pickle.load(open(filename, "rb"))
print(f"Loaded BSpline from {filename}")
except FileNotFoundError:
bspline = splinter.BSplineBuilder(xs, ys).build()
pickle.dump(bspline, open(filename, "wb"))
print(f"Saved BSpline to {filename}")
print(bspline.eval([(x1+x0)/2 for x0, x1 in zip(xs, xs[1:])]))
|
<commit_before><commit_msg>Add example showing how to pickle/unpickle a BSpline object<commit_after>###################################################################################
# Pickle/unpickle functionality for the BSpline class. #
# Works by creating a named temporary file which the BSpline is saved to. #
# The temp file is then read and the data is returned to Pickle. #
# When unpickling the data we get from Pickle is written to a temporary file #
# which is then read by the SPLINTER backend. #
# #
# This is a hack and tested for 5 minutes before I went to bed, so use with care. #
###################################################################################
import pickle
import copyreg
import tempfile
import splinter
# TODO: Change me
splinter.load("/home/awenhaug/projects/splinter/build/splinter-python/lib/linux/x86-64/libsplinter-3-0.so")
def constructor(serialized_data) -> splinter.BSpline:
print(f"Unpickling BSpline")
with tempfile.NamedTemporaryFile() as temp:
with open(temp.name, "wb") as f:
f.write(serialized_data)
return splinter.BSpline(temp.name)
def reducer(bspline: splinter.BSpline):
print(f"Pickling BSpline")
with tempfile.NamedTemporaryFile() as temp:
bspline.save(temp.name)
with open(temp.name, "rb") as f:
data = f.read()
return constructor, (data, )
# Register reducer as the reducer for objects of type BSpline
copyreg.pickle(splinter.BSpline, reducer)
xs = list(range(10))
ys = [4.1*x**3 - 1.3*x**2 for x in xs]
filename = "bspline_pickle_example.p"
try:
bspline = pickle.load(open(filename, "rb"))
print(f"Loaded BSpline from {filename}")
except FileNotFoundError:
bspline = splinter.BSplineBuilder(xs, ys).build()
pickle.dump(bspline, open(filename, "wb"))
print(f"Saved BSpline to {filename}")
print(bspline.eval([(x1+x0)/2 for x0, x1 in zip(xs, xs[1:])]))
|
|
b0b2e6d0fe5656825bc4ded8314297983f3401d9
|
utils.py
|
utils.py
|
#!/usr/bin/env python
import argparse
import sys
def parse_basic_args(args=sys.argv[1:]):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--input', '-i', metavar='FILE',
default=sys.stdin, type=argparse.FileType('r'),
help='the file to process (default: stdin)',
)
parser.add_argument(
'--output', '-o', metavar='FILE',
default=sys.stdout,
type=argparse.FileType('w'),
help='the file to write to (default: stdout)',
)
return parser.parse_args()
|
Add basic argument parser that will be used in basic srt tools
|
Add basic argument parser that will be used in basic srt tools
|
Python
|
mit
|
cdown/srt
|
Add basic argument parser that will be used in basic srt tools
|
#!/usr/bin/env python
import argparse
import sys
def parse_basic_args(args=sys.argv[1:]):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--input', '-i', metavar='FILE',
default=sys.stdin, type=argparse.FileType('r'),
help='the file to process (default: stdin)',
)
parser.add_argument(
'--output', '-o', metavar='FILE',
default=sys.stdout,
type=argparse.FileType('w'),
help='the file to write to (default: stdout)',
)
return parser.parse_args()
|
<commit_before><commit_msg>Add basic argument parser that will be used in basic srt tools<commit_after>
|
#!/usr/bin/env python
import argparse
import sys
def parse_basic_args(args=sys.argv[1:]):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--input', '-i', metavar='FILE',
default=sys.stdin, type=argparse.FileType('r'),
help='the file to process (default: stdin)',
)
parser.add_argument(
'--output', '-o', metavar='FILE',
default=sys.stdout,
type=argparse.FileType('w'),
help='the file to write to (default: stdout)',
)
return parser.parse_args()
|
Add basic argument parser that will be used in basic srt tools#!/usr/bin/env python
import argparse
import sys
def parse_basic_args(args=sys.argv[1:]):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--input', '-i', metavar='FILE',
default=sys.stdin, type=argparse.FileType('r'),
help='the file to process (default: stdin)',
)
parser.add_argument(
'--output', '-o', metavar='FILE',
default=sys.stdout,
type=argparse.FileType('w'),
help='the file to write to (default: stdout)',
)
return parser.parse_args()
|
<commit_before><commit_msg>Add basic argument parser that will be used in basic srt tools<commit_after>#!/usr/bin/env python
import argparse
import sys
def parse_basic_args(args=sys.argv[1:]):
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'--input', '-i', metavar='FILE',
default=sys.stdin, type=argparse.FileType('r'),
help='the file to process (default: stdin)',
)
parser.add_argument(
'--output', '-o', metavar='FILE',
default=sys.stdout,
type=argparse.FileType('w'),
help='the file to write to (default: stdout)',
)
return parser.parse_args()
|
|
bdd8a51d29da86d764bf472219dbe17eea3f3674
|
tests/gl_test_2.py
|
tests/gl_test_2.py
|
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import pyglet.window
from pyglet.window.event import *
import time
from pyglet.GL.VERSION_1_1 import *
from pyglet.GLU.VERSION_1_1 import *
from pyglet import clock
factory = pyglet.window.WindowFactory()
factory.config._attributes['doublebuffer'] = 1
class ExitHandler(object):
running = True
def on_close(self):
self.running = False
def on_keypress(self, symbol, modifiers):
if symbol == pyglet.window.key.K_ESCAPE:
self.running = False
return EVENT_UNHANDLED
exit_handler = ExitHandler()
def setup():
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., 1., 1., 100.)
glMatrixMode(GL_MODELVIEW)
glClearColor(1, 1, 1, 1)
glColor4f(.5, .5, .5, .5)
def draw():
global r
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
r += 1
if r > 360: r = 0
glRotatef(r, 0, 0, 1)
glBegin(GL_QUADS)
glVertex3f(-1., -1., -5.)
glVertex3f(-1., 1., -5.)
glVertex3f(1., 1., -5.)
glVertex3f(1., -1., -5.)
glEnd()
w1 = factory.create(width=200, height=200)
w1.push_handlers(exit_handler)
w1.switch_to()
setup()
c = clock.Clock()
w2 = factory.create(width=400, height=400)
w2.push_handlers(exit_handler)
w2.switch_to()
setup()
r = 0
while exit_handler.running:
c.set_fps(60)
w1.switch_to()
w1.dispatch_events()
draw()
w1.flip()
w2.switch_to()
w2.dispatch_events()
draw()
w2.flip()
|
Test two windows drawing GL with different contexts.
|
Test two windows drawing GL with different contexts.
git-svn-id: d4fdfcd4de20a449196f78acc655f735742cd30d@45 14d46d22-621c-0410-bb3d-6f67920f7d95
|
Python
|
bsd-3-clause
|
regular/pyglet-avbin-optimizations,regular/pyglet-avbin-optimizations,regular/pyglet-avbin-optimizations,regular/pyglet-avbin-optimizations
|
Test two windows drawing GL with different contexts.
git-svn-id: d4fdfcd4de20a449196f78acc655f735742cd30d@45 14d46d22-621c-0410-bb3d-6f67920f7d95
|
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import pyglet.window
from pyglet.window.event import *
import time
from pyglet.GL.VERSION_1_1 import *
from pyglet.GLU.VERSION_1_1 import *
from pyglet import clock
factory = pyglet.window.WindowFactory()
factory.config._attributes['doublebuffer'] = 1
class ExitHandler(object):
running = True
def on_close(self):
self.running = False
def on_keypress(self, symbol, modifiers):
if symbol == pyglet.window.key.K_ESCAPE:
self.running = False
return EVENT_UNHANDLED
exit_handler = ExitHandler()
def setup():
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., 1., 1., 100.)
glMatrixMode(GL_MODELVIEW)
glClearColor(1, 1, 1, 1)
glColor4f(.5, .5, .5, .5)
def draw():
global r
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
r += 1
if r > 360: r = 0
glRotatef(r, 0, 0, 1)
glBegin(GL_QUADS)
glVertex3f(-1., -1., -5.)
glVertex3f(-1., 1., -5.)
glVertex3f(1., 1., -5.)
glVertex3f(1., -1., -5.)
glEnd()
w1 = factory.create(width=200, height=200)
w1.push_handlers(exit_handler)
w1.switch_to()
setup()
c = clock.Clock()
w2 = factory.create(width=400, height=400)
w2.push_handlers(exit_handler)
w2.switch_to()
setup()
r = 0
while exit_handler.running:
c.set_fps(60)
w1.switch_to()
w1.dispatch_events()
draw()
w1.flip()
w2.switch_to()
w2.dispatch_events()
draw()
w2.flip()
|
<commit_before><commit_msg>Test two windows drawing GL with different contexts.
git-svn-id: d4fdfcd4de20a449196f78acc655f735742cd30d@45 14d46d22-621c-0410-bb3d-6f67920f7d95<commit_after>
|
#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import pyglet.window
from pyglet.window.event import *
import time
from pyglet.GL.VERSION_1_1 import *
from pyglet.GLU.VERSION_1_1 import *
from pyglet import clock
factory = pyglet.window.WindowFactory()
factory.config._attributes['doublebuffer'] = 1
class ExitHandler(object):
running = True
def on_close(self):
self.running = False
def on_keypress(self, symbol, modifiers):
if symbol == pyglet.window.key.K_ESCAPE:
self.running = False
return EVENT_UNHANDLED
exit_handler = ExitHandler()
def setup():
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., 1., 1., 100.)
glMatrixMode(GL_MODELVIEW)
glClearColor(1, 1, 1, 1)
glColor4f(.5, .5, .5, .5)
def draw():
global r
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
r += 1
if r > 360: r = 0
glRotatef(r, 0, 0, 1)
glBegin(GL_QUADS)
glVertex3f(-1., -1., -5.)
glVertex3f(-1., 1., -5.)
glVertex3f(1., 1., -5.)
glVertex3f(1., -1., -5.)
glEnd()
w1 = factory.create(width=200, height=200)
w1.push_handlers(exit_handler)
w1.switch_to()
setup()
c = clock.Clock()
w2 = factory.create(width=400, height=400)
w2.push_handlers(exit_handler)
w2.switch_to()
setup()
r = 0
while exit_handler.running:
c.set_fps(60)
w1.switch_to()
w1.dispatch_events()
draw()
w1.flip()
w2.switch_to()
w2.dispatch_events()
draw()
w2.flip()
|
Test two windows drawing GL with different contexts.
git-svn-id: d4fdfcd4de20a449196f78acc655f735742cd30d@45 14d46d22-621c-0410-bb3d-6f67920f7d95#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import pyglet.window
from pyglet.window.event import *
import time
from pyglet.GL.VERSION_1_1 import *
from pyglet.GLU.VERSION_1_1 import *
from pyglet import clock
factory = pyglet.window.WindowFactory()
factory.config._attributes['doublebuffer'] = 1
class ExitHandler(object):
running = True
def on_close(self):
self.running = False
def on_keypress(self, symbol, modifiers):
if symbol == pyglet.window.key.K_ESCAPE:
self.running = False
return EVENT_UNHANDLED
exit_handler = ExitHandler()
def setup():
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., 1., 1., 100.)
glMatrixMode(GL_MODELVIEW)
glClearColor(1, 1, 1, 1)
glColor4f(.5, .5, .5, .5)
def draw():
global r
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
r += 1
if r > 360: r = 0
glRotatef(r, 0, 0, 1)
glBegin(GL_QUADS)
glVertex3f(-1., -1., -5.)
glVertex3f(-1., 1., -5.)
glVertex3f(1., 1., -5.)
glVertex3f(1., -1., -5.)
glEnd()
w1 = factory.create(width=200, height=200)
w1.push_handlers(exit_handler)
w1.switch_to()
setup()
c = clock.Clock()
w2 = factory.create(width=400, height=400)
w2.push_handlers(exit_handler)
w2.switch_to()
setup()
r = 0
while exit_handler.running:
c.set_fps(60)
w1.switch_to()
w1.dispatch_events()
draw()
w1.flip()
w2.switch_to()
w2.dispatch_events()
draw()
w2.flip()
|
<commit_before><commit_msg>Test two windows drawing GL with different contexts.
git-svn-id: d4fdfcd4de20a449196f78acc655f735742cd30d@45 14d46d22-621c-0410-bb3d-6f67920f7d95<commit_after>#!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import pyglet.window
from pyglet.window.event import *
import time
from pyglet.GL.VERSION_1_1 import *
from pyglet.GLU.VERSION_1_1 import *
from pyglet import clock
factory = pyglet.window.WindowFactory()
factory.config._attributes['doublebuffer'] = 1
class ExitHandler(object):
running = True
def on_close(self):
self.running = False
def on_keypress(self, symbol, modifiers):
if symbol == pyglet.window.key.K_ESCAPE:
self.running = False
return EVENT_UNHANDLED
exit_handler = ExitHandler()
def setup():
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60., 1., 1., 100.)
glMatrixMode(GL_MODELVIEW)
glClearColor(1, 1, 1, 1)
glColor4f(.5, .5, .5, .5)
def draw():
global r
glClear(GL_COLOR_BUFFER_BIT)
glLoadIdentity()
r += 1
if r > 360: r = 0
glRotatef(r, 0, 0, 1)
glBegin(GL_QUADS)
glVertex3f(-1., -1., -5.)
glVertex3f(-1., 1., -5.)
glVertex3f(1., 1., -5.)
glVertex3f(1., -1., -5.)
glEnd()
w1 = factory.create(width=200, height=200)
w1.push_handlers(exit_handler)
w1.switch_to()
setup()
c = clock.Clock()
w2 = factory.create(width=400, height=400)
w2.push_handlers(exit_handler)
w2.switch_to()
setup()
r = 0
while exit_handler.running:
c.set_fps(60)
w1.switch_to()
w1.dispatch_events()
draw()
w1.flip()
w2.switch_to()
w2.dispatch_events()
draw()
w2.flip()
|
|
79a451ca7ff1e08bf4b126e8a4565166c203227e
|
scripts/extract_syscall.py
|
scripts/extract_syscall.py
|
#! /usr/bin/env python
from __future__ import print_function
import argparse
import sys
import re
PREAMBULE = """
#include <python2.7/Python.h>
#include "PythonBindings.h"
#include "Registers.h"
#include "asm/unistd_64.h"
void initLinux64Env(PyObject *idLinux64ClassDict)
{\
"""
SMT = ' PyDict_SetItemString(idLinux64ClassDict, "%s", PyInt_FromLong(%s));'
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("file",
help="this file must contains the syscalls definitions",
type=str)
args = parser.parse_args()
regex = re.compile(r"(__NR_\w+)")
print(PREAMBULE)
with open(args.file) as hfile:
for match in regex.finditer(hfile.read()):
name = match.groups()[0]
print(SMT % (name[5:].upper(), name))
print("}")
|
Create a script that parse asm/unistd_64.h to fetch syscalls ids.
|
Create a script that parse asm/unistd_64.h to fetch syscalls ids.
|
Python
|
apache-2.0
|
JonathanSalwan/Triton,JonathanSalwan/Triton,JonathanSalwan/Triton,JonathanSalwan/Triton,JonathanSalwan/Triton
|
Create a script that parse asm/unistd_64.h to fetch syscalls ids.
|
#! /usr/bin/env python
from __future__ import print_function
import argparse
import sys
import re
PREAMBULE = """
#include <python2.7/Python.h>
#include "PythonBindings.h"
#include "Registers.h"
#include "asm/unistd_64.h"
void initLinux64Env(PyObject *idLinux64ClassDict)
{\
"""
SMT = ' PyDict_SetItemString(idLinux64ClassDict, "%s", PyInt_FromLong(%s));'
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("file",
help="this file must contains the syscalls definitions",
type=str)
args = parser.parse_args()
regex = re.compile(r"(__NR_\w+)")
print(PREAMBULE)
with open(args.file) as hfile:
for match in regex.finditer(hfile.read()):
name = match.groups()[0]
print(SMT % (name[5:].upper(), name))
print("}")
|
<commit_before><commit_msg>Create a script that parse asm/unistd_64.h to fetch syscalls ids.<commit_after>
|
#! /usr/bin/env python
from __future__ import print_function
import argparse
import sys
import re
PREAMBULE = """
#include <python2.7/Python.h>
#include "PythonBindings.h"
#include "Registers.h"
#include "asm/unistd_64.h"
void initLinux64Env(PyObject *idLinux64ClassDict)
{\
"""
SMT = ' PyDict_SetItemString(idLinux64ClassDict, "%s", PyInt_FromLong(%s));'
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("file",
help="this file must contains the syscalls definitions",
type=str)
args = parser.parse_args()
regex = re.compile(r"(__NR_\w+)")
print(PREAMBULE)
with open(args.file) as hfile:
for match in regex.finditer(hfile.read()):
name = match.groups()[0]
print(SMT % (name[5:].upper(), name))
print("}")
|
Create a script that parse asm/unistd_64.h to fetch syscalls ids.#! /usr/bin/env python
from __future__ import print_function
import argparse
import sys
import re
PREAMBULE = """
#include <python2.7/Python.h>
#include "PythonBindings.h"
#include "Registers.h"
#include "asm/unistd_64.h"
void initLinux64Env(PyObject *idLinux64ClassDict)
{\
"""
SMT = ' PyDict_SetItemString(idLinux64ClassDict, "%s", PyInt_FromLong(%s));'
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("file",
help="this file must contains the syscalls definitions",
type=str)
args = parser.parse_args()
regex = re.compile(r"(__NR_\w+)")
print(PREAMBULE)
with open(args.file) as hfile:
for match in regex.finditer(hfile.read()):
name = match.groups()[0]
print(SMT % (name[5:].upper(), name))
print("}")
|
<commit_before><commit_msg>Create a script that parse asm/unistd_64.h to fetch syscalls ids.<commit_after>#! /usr/bin/env python
from __future__ import print_function
import argparse
import sys
import re
PREAMBULE = """
#include <python2.7/Python.h>
#include "PythonBindings.h"
#include "Registers.h"
#include "asm/unistd_64.h"
void initLinux64Env(PyObject *idLinux64ClassDict)
{\
"""
SMT = ' PyDict_SetItemString(idLinux64ClassDict, "%s", PyInt_FromLong(%s));'
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("file",
help="this file must contains the syscalls definitions",
type=str)
args = parser.parse_args()
regex = re.compile(r"(__NR_\w+)")
print(PREAMBULE)
with open(args.file) as hfile:
for match in regex.finditer(hfile.read()):
name = match.groups()[0]
print(SMT % (name[5:].upper(), name))
print("}")
|
|
9c4224a13c38e319dd10be6964f36b6f0ae12553
|
python/ember/examples/example_cylindrical_inward.py
|
python/ember/examples/example_cylindrical_inward.py
|
#!/usr/bin/env python
"""
An inwardly-propagating, cylindrical, strained, lean flame.
The flame radius, defined by the centroid of the heat release
rate, is specified. The stagnation surface is a cylinder located
at a greater radius than the flame (on the burned side).
In this configuration, the curvature and stretching of the
flame can be varied independently.
"""
from ember import *
output = 'run/ex_cylindrical_inward'
conf = Config(
Paths(outputDir=output,
# logFile='ex_cylindrical_inward.log'
),
Chemistry(mechanismFile='gri30.xml'),
General(flameGeometry='cylindrical',
unburnedLeft=True,
fixedLeftLocation=True,
nThreads=4),
InitialCondition(fuel='CH4:0.5, H2:0.5',
equivalenceRatio=0.60,
xLeft=0.0,
xRight=0.006),
StrainParameters(initial=200,
final=200),
PositionControl(xInitial=0.002,
xFinal=0.002),
TerminationCondition(tEnd=10,
measurement='dTdt'),
Times(profileStepInterval=10,
regridStepInterval=10),
)
if __name__ == '__main__':
conf.run()
|
Add example of an inwardly-propagating cylindrical flame
|
Add example of an inwardly-propagating cylindrical flame
|
Python
|
mit
|
speth/ember,speth/ember,speth/ember
|
Add example of an inwardly-propagating cylindrical flame
|
#!/usr/bin/env python
"""
An inwardly-propagating, cylindrical, strained, lean flame.
The flame radius, defined by the centroid of the heat release
rate, is specified. The stagnation surface is a cylinder located
at a greater radius than the flame (on the burned side).
In this configuration, the curvature and stretching of the
flame can be varied independently.
"""
from ember import *
output = 'run/ex_cylindrical_inward'
conf = Config(
Paths(outputDir=output,
# logFile='ex_cylindrical_inward.log'
),
Chemistry(mechanismFile='gri30.xml'),
General(flameGeometry='cylindrical',
unburnedLeft=True,
fixedLeftLocation=True,
nThreads=4),
InitialCondition(fuel='CH4:0.5, H2:0.5',
equivalenceRatio=0.60,
xLeft=0.0,
xRight=0.006),
StrainParameters(initial=200,
final=200),
PositionControl(xInitial=0.002,
xFinal=0.002),
TerminationCondition(tEnd=10,
measurement='dTdt'),
Times(profileStepInterval=10,
regridStepInterval=10),
)
if __name__ == '__main__':
conf.run()
|
<commit_before><commit_msg>Add example of an inwardly-propagating cylindrical flame<commit_after>
|
#!/usr/bin/env python
"""
An inwardly-propagating, cylindrical, strained, lean flame.
The flame radius, defined by the centroid of the heat release
rate, is specified. The stagnation surface is a cylinder located
at a greater radius than the flame (on the burned side).
In this configuration, the curvature and stretching of the
flame can be varied independently.
"""
from ember import *
output = 'run/ex_cylindrical_inward'
conf = Config(
Paths(outputDir=output,
# logFile='ex_cylindrical_inward.log'
),
Chemistry(mechanismFile='gri30.xml'),
General(flameGeometry='cylindrical',
unburnedLeft=True,
fixedLeftLocation=True,
nThreads=4),
InitialCondition(fuel='CH4:0.5, H2:0.5',
equivalenceRatio=0.60,
xLeft=0.0,
xRight=0.006),
StrainParameters(initial=200,
final=200),
PositionControl(xInitial=0.002,
xFinal=0.002),
TerminationCondition(tEnd=10,
measurement='dTdt'),
Times(profileStepInterval=10,
regridStepInterval=10),
)
if __name__ == '__main__':
conf.run()
|
Add example of an inwardly-propagating cylindrical flame#!/usr/bin/env python
"""
An inwardly-propagating, cylindrical, strained, lean flame.
The flame radius, defined by the centroid of the heat release
rate, is specified. The stagnation surface is a cylinder located
at a greater radius than the flame (on the burned side).
In this configuration, the curvature and stretching of the
flame can be varied independently.
"""
from ember import *
output = 'run/ex_cylindrical_inward'
conf = Config(
Paths(outputDir=output,
# logFile='ex_cylindrical_inward.log'
),
Chemistry(mechanismFile='gri30.xml'),
General(flameGeometry='cylindrical',
unburnedLeft=True,
fixedLeftLocation=True,
nThreads=4),
InitialCondition(fuel='CH4:0.5, H2:0.5',
equivalenceRatio=0.60,
xLeft=0.0,
xRight=0.006),
StrainParameters(initial=200,
final=200),
PositionControl(xInitial=0.002,
xFinal=0.002),
TerminationCondition(tEnd=10,
measurement='dTdt'),
Times(profileStepInterval=10,
regridStepInterval=10),
)
if __name__ == '__main__':
conf.run()
|
<commit_before><commit_msg>Add example of an inwardly-propagating cylindrical flame<commit_after>#!/usr/bin/env python
"""
An inwardly-propagating, cylindrical, strained, lean flame.
The flame radius, defined by the centroid of the heat release
rate, is specified. The stagnation surface is a cylinder located
at a greater radius than the flame (on the burned side).
In this configuration, the curvature and stretching of the
flame can be varied independently.
"""
from ember import *
output = 'run/ex_cylindrical_inward'
conf = Config(
Paths(outputDir=output,
# logFile='ex_cylindrical_inward.log'
),
Chemistry(mechanismFile='gri30.xml'),
General(flameGeometry='cylindrical',
unburnedLeft=True,
fixedLeftLocation=True,
nThreads=4),
InitialCondition(fuel='CH4:0.5, H2:0.5',
equivalenceRatio=0.60,
xLeft=0.0,
xRight=0.006),
StrainParameters(initial=200,
final=200),
PositionControl(xInitial=0.002,
xFinal=0.002),
TerminationCondition(tEnd=10,
measurement='dTdt'),
Times(profileStepInterval=10,
regridStepInterval=10),
)
if __name__ == '__main__':
conf.run()
|
|
6413bf641060658bd057c330a4779d8460250d8b
|
tests/grammar_term-nonterm_test/TerminalHaveTest.py
|
tests/grammar_term-nonterm_test/TerminalHaveTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase
from grammpy import Grammar
class TempClass:
pass
class TerminalAddingTest(TestCase):
def test_haveTermEmpty(self):
gr = Grammar()
self.assertFalse(gr.have_term(TempClass))
self.assertFalse(gr.have_term(1))
self.assertFalse(gr.have_term('asdf'))
def test_haveTermClass(self):
gr = Grammar()
gr.add_term(TempClass)
self.assertTrue(gr.have_term(TempClass))
def test_haveTermArray(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertTrue(gr.have_term([0, 'asdf']))
def test_dontHaveTermArray(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertFalse(gr.have_term([TempClass, 'a']))
def test_haveTermTuple(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertTrue(gr.have_term((0, 'asdf')))
def test_dontHaveTermTuple(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertFalse(gr.have_term((TempClass, 'a')))
|
Add test for have_term method if array is passed
|
Add test for have_term method if array is passed
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add test for have_term method if array is passed
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase
from grammpy import Grammar
class TempClass:
pass
class TerminalAddingTest(TestCase):
def test_haveTermEmpty(self):
gr = Grammar()
self.assertFalse(gr.have_term(TempClass))
self.assertFalse(gr.have_term(1))
self.assertFalse(gr.have_term('asdf'))
def test_haveTermClass(self):
gr = Grammar()
gr.add_term(TempClass)
self.assertTrue(gr.have_term(TempClass))
def test_haveTermArray(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertTrue(gr.have_term([0, 'asdf']))
def test_dontHaveTermArray(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertFalse(gr.have_term([TempClass, 'a']))
def test_haveTermTuple(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertTrue(gr.have_term((0, 'asdf')))
def test_dontHaveTermTuple(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertFalse(gr.have_term((TempClass, 'a')))
|
<commit_before><commit_msg>Add test for have_term method if array is passed<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase
from grammpy import Grammar
class TempClass:
pass
class TerminalAddingTest(TestCase):
def test_haveTermEmpty(self):
gr = Grammar()
self.assertFalse(gr.have_term(TempClass))
self.assertFalse(gr.have_term(1))
self.assertFalse(gr.have_term('asdf'))
def test_haveTermClass(self):
gr = Grammar()
gr.add_term(TempClass)
self.assertTrue(gr.have_term(TempClass))
def test_haveTermArray(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertTrue(gr.have_term([0, 'asdf']))
def test_dontHaveTermArray(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertFalse(gr.have_term([TempClass, 'a']))
def test_haveTermTuple(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertTrue(gr.have_term((0, 'asdf')))
def test_dontHaveTermTuple(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertFalse(gr.have_term((TempClass, 'a')))
|
Add test for have_term method if array is passed#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase
from grammpy import Grammar
class TempClass:
pass
class TerminalAddingTest(TestCase):
def test_haveTermEmpty(self):
gr = Grammar()
self.assertFalse(gr.have_term(TempClass))
self.assertFalse(gr.have_term(1))
self.assertFalse(gr.have_term('asdf'))
def test_haveTermClass(self):
gr = Grammar()
gr.add_term(TempClass)
self.assertTrue(gr.have_term(TempClass))
def test_haveTermArray(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertTrue(gr.have_term([0, 'asdf']))
def test_dontHaveTermArray(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertFalse(gr.have_term([TempClass, 'a']))
def test_haveTermTuple(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertTrue(gr.have_term((0, 'asdf')))
def test_dontHaveTermTuple(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertFalse(gr.have_term((TempClass, 'a')))
|
<commit_before><commit_msg>Add test for have_term method if array is passed<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase
from grammpy import Grammar
class TempClass:
pass
class TerminalAddingTest(TestCase):
def test_haveTermEmpty(self):
gr = Grammar()
self.assertFalse(gr.have_term(TempClass))
self.assertFalse(gr.have_term(1))
self.assertFalse(gr.have_term('asdf'))
def test_haveTermClass(self):
gr = Grammar()
gr.add_term(TempClass)
self.assertTrue(gr.have_term(TempClass))
def test_haveTermArray(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertTrue(gr.have_term([0, 'asdf']))
def test_dontHaveTermArray(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertFalse(gr.have_term([TempClass, 'a']))
def test_haveTermTuple(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertTrue(gr.have_term((0, 'asdf')))
def test_dontHaveTermTuple(self):
gr = Grammar()
gr.add_term([TempClass, 0, 'asdf'])
self.assertFalse(gr.have_term((TempClass, 'a')))
|
|
7a5af8d2c60327a76ae96572e5ee807be299e347
|
django_mc/migration_operations.py
|
django_mc/migration_operations.py
|
from django.db import migrations
ALL_REGIONS = object()
class _ManageComponentTypeInRegions(migrations.RunPython):
"""
Requires all migrations of ``django.contrib.contenttypes`` to be applied.
"""
def __init__(self, component_app_label, component_model,
regions=ALL_REGIONS):
self.component_app_label = component_app_label
self.component_model = component_model
self.regions = regions
super(_ManageComponentTypeInRegions, self).__init__(
self.run_forwards,
self.run_backwards)
def get_content_type_model(self, apps):
return apps.get_model('contenttypes', 'ContentType')
def get_region_model(self, apps):
return apps.get_model('django_mc', 'Region')
def get_component_content_type(self, apps):
ContentType = self.get_content_type_model(apps)
content_type, created = ContentType.objects.get_or_create(
app_label=self.component_app_label,
model=self.component_model)
return content_type
def get_regions(self, apps):
Region = self.get_region_model(apps)
if self.regions is ALL_REGIONS:
return Region.objects.all()
else:
return Region.objects.filter(slug__in=self.regions)
def add_to_regions(self, apps):
content_type = self.get_component_content_type(apps)
for region in self.get_regions(apps):
region.available_components.add(content_type)
def remove_from_regions(self, apps):
content_type = self.get_component_content_type(apps)
for region in self.get_regions(apps):
region.available_components.remove(content_type)
def run_forwards(self):
raise NotImplementedError()
def run_backwards(self):
raise NotImplementedError()
class AddComponentTypeToRegions(_ManageComponentTypeInRegions):
def run_forwards(self, apps, schema_editor):
self.add_to_regions(apps)
def run_backwards(self, apps, schema_editor):
self.remove_from_regions(apps)
class RemoveComponentTypeFromRegions(_ManageComponentTypeInRegions):
def run_forwards(self, apps, schema_editor):
self.remove_from_regions(apps)
def run_backwards(self, apps, schema_editor):
self.add_to_regions(apps)
|
Add migration operation for easy add/remove component types in regions
|
Add migration operation for easy add/remove component types in regions
|
Python
|
bsd-3-clause
|
team23/django_mc
|
Add migration operation for easy add/remove component types in regions
|
from django.db import migrations
ALL_REGIONS = object()
class _ManageComponentTypeInRegions(migrations.RunPython):
"""
Requires all migrations of ``django.contrib.contenttypes`` to be applied.
"""
def __init__(self, component_app_label, component_model,
regions=ALL_REGIONS):
self.component_app_label = component_app_label
self.component_model = component_model
self.regions = regions
super(_ManageComponentTypeInRegions, self).__init__(
self.run_forwards,
self.run_backwards)
def get_content_type_model(self, apps):
return apps.get_model('contenttypes', 'ContentType')
def get_region_model(self, apps):
return apps.get_model('django_mc', 'Region')
def get_component_content_type(self, apps):
ContentType = self.get_content_type_model(apps)
content_type, created = ContentType.objects.get_or_create(
app_label=self.component_app_label,
model=self.component_model)
return content_type
def get_regions(self, apps):
Region = self.get_region_model(apps)
if self.regions is ALL_REGIONS:
return Region.objects.all()
else:
return Region.objects.filter(slug__in=self.regions)
def add_to_regions(self, apps):
content_type = self.get_component_content_type(apps)
for region in self.get_regions(apps):
region.available_components.add(content_type)
def remove_from_regions(self, apps):
content_type = self.get_component_content_type(apps)
for region in self.get_regions(apps):
region.available_components.remove(content_type)
def run_forwards(self):
raise NotImplementedError()
def run_backwards(self):
raise NotImplementedError()
class AddComponentTypeToRegions(_ManageComponentTypeInRegions):
def run_forwards(self, apps, schema_editor):
self.add_to_regions(apps)
def run_backwards(self, apps, schema_editor):
self.remove_from_regions(apps)
class RemoveComponentTypeFromRegions(_ManageComponentTypeInRegions):
def run_forwards(self, apps, schema_editor):
self.remove_from_regions(apps)
def run_backwards(self, apps, schema_editor):
self.add_to_regions(apps)
|
<commit_before><commit_msg>Add migration operation for easy add/remove component types in regions<commit_after>
|
from django.db import migrations
ALL_REGIONS = object()
class _ManageComponentTypeInRegions(migrations.RunPython):
"""
Requires all migrations of ``django.contrib.contenttypes`` to be applied.
"""
def __init__(self, component_app_label, component_model,
regions=ALL_REGIONS):
self.component_app_label = component_app_label
self.component_model = component_model
self.regions = regions
super(_ManageComponentTypeInRegions, self).__init__(
self.run_forwards,
self.run_backwards)
def get_content_type_model(self, apps):
return apps.get_model('contenttypes', 'ContentType')
def get_region_model(self, apps):
return apps.get_model('django_mc', 'Region')
def get_component_content_type(self, apps):
ContentType = self.get_content_type_model(apps)
content_type, created = ContentType.objects.get_or_create(
app_label=self.component_app_label,
model=self.component_model)
return content_type
def get_regions(self, apps):
Region = self.get_region_model(apps)
if self.regions is ALL_REGIONS:
return Region.objects.all()
else:
return Region.objects.filter(slug__in=self.regions)
def add_to_regions(self, apps):
content_type = self.get_component_content_type(apps)
for region in self.get_regions(apps):
region.available_components.add(content_type)
def remove_from_regions(self, apps):
content_type = self.get_component_content_type(apps)
for region in self.get_regions(apps):
region.available_components.remove(content_type)
def run_forwards(self):
raise NotImplementedError()
def run_backwards(self):
raise NotImplementedError()
class AddComponentTypeToRegions(_ManageComponentTypeInRegions):
def run_forwards(self, apps, schema_editor):
self.add_to_regions(apps)
def run_backwards(self, apps, schema_editor):
self.remove_from_regions(apps)
class RemoveComponentTypeFromRegions(_ManageComponentTypeInRegions):
def run_forwards(self, apps, schema_editor):
self.remove_from_regions(apps)
def run_backwards(self, apps, schema_editor):
self.add_to_regions(apps)
|
Add migration operation for easy add/remove component types in regionsfrom django.db import migrations
ALL_REGIONS = object()
class _ManageComponentTypeInRegions(migrations.RunPython):
"""
Requires all migrations of ``django.contrib.contenttypes`` to be applied.
"""
def __init__(self, component_app_label, component_model,
regions=ALL_REGIONS):
self.component_app_label = component_app_label
self.component_model = component_model
self.regions = regions
super(_ManageComponentTypeInRegions, self).__init__(
self.run_forwards,
self.run_backwards)
def get_content_type_model(self, apps):
return apps.get_model('contenttypes', 'ContentType')
def get_region_model(self, apps):
return apps.get_model('django_mc', 'Region')
def get_component_content_type(self, apps):
ContentType = self.get_content_type_model(apps)
content_type, created = ContentType.objects.get_or_create(
app_label=self.component_app_label,
model=self.component_model)
return content_type
def get_regions(self, apps):
Region = self.get_region_model(apps)
if self.regions is ALL_REGIONS:
return Region.objects.all()
else:
return Region.objects.filter(slug__in=self.regions)
def add_to_regions(self, apps):
content_type = self.get_component_content_type(apps)
for region in self.get_regions(apps):
region.available_components.add(content_type)
def remove_from_regions(self, apps):
content_type = self.get_component_content_type(apps)
for region in self.get_regions(apps):
region.available_components.remove(content_type)
def run_forwards(self):
raise NotImplementedError()
def run_backwards(self):
raise NotImplementedError()
class AddComponentTypeToRegions(_ManageComponentTypeInRegions):
def run_forwards(self, apps, schema_editor):
self.add_to_regions(apps)
def run_backwards(self, apps, schema_editor):
self.remove_from_regions(apps)
class RemoveComponentTypeFromRegions(_ManageComponentTypeInRegions):
def run_forwards(self, apps, schema_editor):
self.remove_from_regions(apps)
def run_backwards(self, apps, schema_editor):
self.add_to_regions(apps)
|
<commit_before><commit_msg>Add migration operation for easy add/remove component types in regions<commit_after>from django.db import migrations
ALL_REGIONS = object()
class _ManageComponentTypeInRegions(migrations.RunPython):
"""
Requires all migrations of ``django.contrib.contenttypes`` to be applied.
"""
def __init__(self, component_app_label, component_model,
regions=ALL_REGIONS):
self.component_app_label = component_app_label
self.component_model = component_model
self.regions = regions
super(_ManageComponentTypeInRegions, self).__init__(
self.run_forwards,
self.run_backwards)
def get_content_type_model(self, apps):
return apps.get_model('contenttypes', 'ContentType')
def get_region_model(self, apps):
return apps.get_model('django_mc', 'Region')
def get_component_content_type(self, apps):
ContentType = self.get_content_type_model(apps)
content_type, created = ContentType.objects.get_or_create(
app_label=self.component_app_label,
model=self.component_model)
return content_type
def get_regions(self, apps):
Region = self.get_region_model(apps)
if self.regions is ALL_REGIONS:
return Region.objects.all()
else:
return Region.objects.filter(slug__in=self.regions)
def add_to_regions(self, apps):
content_type = self.get_component_content_type(apps)
for region in self.get_regions(apps):
region.available_components.add(content_type)
def remove_from_regions(self, apps):
content_type = self.get_component_content_type(apps)
for region in self.get_regions(apps):
region.available_components.remove(content_type)
def run_forwards(self):
raise NotImplementedError()
def run_backwards(self):
raise NotImplementedError()
class AddComponentTypeToRegions(_ManageComponentTypeInRegions):
def run_forwards(self, apps, schema_editor):
self.add_to_regions(apps)
def run_backwards(self, apps, schema_editor):
self.remove_from_regions(apps)
class RemoveComponentTypeFromRegions(_ManageComponentTypeInRegions):
def run_forwards(self, apps, schema_editor):
self.remove_from_regions(apps)
def run_backwards(self, apps, schema_editor):
self.add_to_regions(apps)
|
|
548bcc45c4e8c98cd4f8ae8722f422f648b7f83b
|
winsys/extras/isapi_monitor.py
|
winsys/extras/isapi_monitor.py
|
import win32traceutil
import monitor_directory
import isapi_wsgi
def __ExtensionFactory__ ():
return isapi_wsgi.ISAPISimpleHandler (monitor_directory.App ())
def set_auth (params, options, target_dir):
#
# Make sure directory authentication is:
# - Anonymous
# - NTLM
#
target_dir.AuthFlags = 5
target_dir.SetInfo ()
if __name__ == '__main__':
from isapi.install import *
params = ISAPIParameters ()
sm = [
ScriptMapParams (Extension="*", Flags=0)
]
vd = VirtualDirParameters (
Name="monitor",
Description = "Monitor Directory",
ScriptMaps = sm,
ScriptMapUpdate = "replace",
PostInstall=set_auth
)
params.VirtualDirs = [vd]
HandleCommandLine (params)
|
Set up ISAPI link for directory monitor
|
Set up ISAPI link for directory monitor
added isapi_monitor.py
|
Python
|
mit
|
one2pret/winsys,one2pret/winsys
|
Set up ISAPI link for directory monitor
added isapi_monitor.py
|
import win32traceutil
import monitor_directory
import isapi_wsgi
def __ExtensionFactory__ ():
return isapi_wsgi.ISAPISimpleHandler (monitor_directory.App ())
def set_auth (params, options, target_dir):
#
# Make sure directory authentication is:
# - Anonymous
# - NTLM
#
target_dir.AuthFlags = 5
target_dir.SetInfo ()
if __name__ == '__main__':
from isapi.install import *
params = ISAPIParameters ()
sm = [
ScriptMapParams (Extension="*", Flags=0)
]
vd = VirtualDirParameters (
Name="monitor",
Description = "Monitor Directory",
ScriptMaps = sm,
ScriptMapUpdate = "replace",
PostInstall=set_auth
)
params.VirtualDirs = [vd]
HandleCommandLine (params)
|
<commit_before><commit_msg>Set up ISAPI link for directory monitor
added isapi_monitor.py<commit_after>
|
import win32traceutil
import monitor_directory
import isapi_wsgi
def __ExtensionFactory__ ():
return isapi_wsgi.ISAPISimpleHandler (monitor_directory.App ())
def set_auth (params, options, target_dir):
#
# Make sure directory authentication is:
# - Anonymous
# - NTLM
#
target_dir.AuthFlags = 5
target_dir.SetInfo ()
if __name__ == '__main__':
from isapi.install import *
params = ISAPIParameters ()
sm = [
ScriptMapParams (Extension="*", Flags=0)
]
vd = VirtualDirParameters (
Name="monitor",
Description = "Monitor Directory",
ScriptMaps = sm,
ScriptMapUpdate = "replace",
PostInstall=set_auth
)
params.VirtualDirs = [vd]
HandleCommandLine (params)
|
Set up ISAPI link for directory monitor
added isapi_monitor.pyimport win32traceutil
import monitor_directory
import isapi_wsgi
def __ExtensionFactory__ ():
return isapi_wsgi.ISAPISimpleHandler (monitor_directory.App ())
def set_auth (params, options, target_dir):
#
# Make sure directory authentication is:
# - Anonymous
# - NTLM
#
target_dir.AuthFlags = 5
target_dir.SetInfo ()
if __name__ == '__main__':
from isapi.install import *
params = ISAPIParameters ()
sm = [
ScriptMapParams (Extension="*", Flags=0)
]
vd = VirtualDirParameters (
Name="monitor",
Description = "Monitor Directory",
ScriptMaps = sm,
ScriptMapUpdate = "replace",
PostInstall=set_auth
)
params.VirtualDirs = [vd]
HandleCommandLine (params)
|
<commit_before><commit_msg>Set up ISAPI link for directory monitor
added isapi_monitor.py<commit_after>import win32traceutil
import monitor_directory
import isapi_wsgi
def __ExtensionFactory__ ():
return isapi_wsgi.ISAPISimpleHandler (monitor_directory.App ())
def set_auth (params, options, target_dir):
#
# Make sure directory authentication is:
# - Anonymous
# - NTLM
#
target_dir.AuthFlags = 5
target_dir.SetInfo ()
if __name__ == '__main__':
from isapi.install import *
params = ISAPIParameters ()
sm = [
ScriptMapParams (Extension="*", Flags=0)
]
vd = VirtualDirParameters (
Name="monitor",
Description = "Monitor Directory",
ScriptMaps = sm,
ScriptMapUpdate = "replace",
PostInstall=set_auth
)
params.VirtualDirs = [vd]
HandleCommandLine (params)
|
|
b11bd211a117b695f2a1a2aa09763f4332e37ace
|
tests/ratings/test_rating_signals.py
|
tests/ratings/test_rating_signals.py
|
import pytest
from django.core.exceptions import ObjectDoesNotExist
from adhocracy4.ratings import models
@pytest.mark.django_db
def test_delete_of_content_object(rating):
question = rating.content_object
question.delete()
with pytest.raises(ObjectDoesNotExist):
models.Rating.objects.get(id=rating.id)
|
Add test for rating signals
|
Add test for rating signals
|
Python
|
agpl-3.0
|
liqd/adhocracy4,liqd/adhocracy4,liqd/adhocracy4,liqd/adhocracy4
|
Add test for rating signals
|
import pytest
from django.core.exceptions import ObjectDoesNotExist
from adhocracy4.ratings import models
@pytest.mark.django_db
def test_delete_of_content_object(rating):
question = rating.content_object
question.delete()
with pytest.raises(ObjectDoesNotExist):
models.Rating.objects.get(id=rating.id)
|
<commit_before><commit_msg>Add test for rating signals<commit_after>
|
import pytest
from django.core.exceptions import ObjectDoesNotExist
from adhocracy4.ratings import models
@pytest.mark.django_db
def test_delete_of_content_object(rating):
question = rating.content_object
question.delete()
with pytest.raises(ObjectDoesNotExist):
models.Rating.objects.get(id=rating.id)
|
Add test for rating signalsimport pytest
from django.core.exceptions import ObjectDoesNotExist
from adhocracy4.ratings import models
@pytest.mark.django_db
def test_delete_of_content_object(rating):
question = rating.content_object
question.delete()
with pytest.raises(ObjectDoesNotExist):
models.Rating.objects.get(id=rating.id)
|
<commit_before><commit_msg>Add test for rating signals<commit_after>import pytest
from django.core.exceptions import ObjectDoesNotExist
from adhocracy4.ratings import models
@pytest.mark.django_db
def test_delete_of_content_object(rating):
question = rating.content_object
question.delete()
with pytest.raises(ObjectDoesNotExist):
models.Rating.objects.get(id=rating.id)
|
|
5be2a1721f345afae9c92dfd2d999fdc6393a38a
|
tests/test_mailparsers_accepted_upload.py
|
tests/test_mailparsers_accepted_upload.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from DebianChangesBot.mailparsers import AcceptedUploadParser as p
class TestMailParserAcceptedUpload(unittest.TestCase):
def setUp(self):
self.body = [
'-----BEGIN PGP SIGNED MESSAGE-----',
'Hash: SHA1',
'',
'Format: 1.7',
'Date: Thu, 03 Apr 2008 11:45:26 +0100',
'Source: haskell-irc',
'Binary: libghc6-irc-dev libghc6-irc-doc',
'Architecture: source i386 all',
'Version: 0.4.2-1',
'Distribution: unstable',
'Urgency: low',
'Maintainer: Chris Lamb <chris@chris-lamb.co.uk>',
'Changed-By: Chris Lamb <chris@chris-lamb.co.uk>',
'Description: ',
' libghc6-irc-dev - GHC 6 libraries for the Haskell IRC library',
' libghc6-irc-doc - GHC 6 libraries for the Haskell IRC library (documentation)',
'Changes: ',
# etc.
]
def testSimple(self):
msg = p.parse({}, self.body)
self.assert_(msg)
self.assertEqual(msg.package, 'haskell-irc')
self.assertEqual(msg.version, '0.4.2-1')
self.assertEqual(msg.distribution, 'unstable')
self.assertEqual(msg.urgency, 'low')
self.assertEqual(msg.by, 'Chris Lamb <chris@chris-lamb.co.uk>')
self.assertEqual(msg.closes, None)
def testCloses(self):
self.body.append('Closes: 123456 456123')
msg = p.parse({}, self.body)
self.assert_(msg)
self.assertEqual(msg.closes, [123456, 456123])
if __name__ == "__main__":
unittest.main()
|
Add some tests for the AcceptedUpload parser
|
Add some tests for the AcceptedUpload parser
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@chris-lamb.co.uk>
|
Python
|
agpl-3.0
|
lamby/debian-devel-changes-bot,xtaran/debian-devel-changes-bot,xtaran/debian-devel-changes-bot,sebastinas/debian-devel-changes-bot,lamby/debian-devel-changes-bot,lamby/debian-devel-changes-bot
|
Add some tests for the AcceptedUpload parser
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@chris-lamb.co.uk>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from DebianChangesBot.mailparsers import AcceptedUploadParser as p
class TestMailParserAcceptedUpload(unittest.TestCase):
def setUp(self):
self.body = [
'-----BEGIN PGP SIGNED MESSAGE-----',
'Hash: SHA1',
'',
'Format: 1.7',
'Date: Thu, 03 Apr 2008 11:45:26 +0100',
'Source: haskell-irc',
'Binary: libghc6-irc-dev libghc6-irc-doc',
'Architecture: source i386 all',
'Version: 0.4.2-1',
'Distribution: unstable',
'Urgency: low',
'Maintainer: Chris Lamb <chris@chris-lamb.co.uk>',
'Changed-By: Chris Lamb <chris@chris-lamb.co.uk>',
'Description: ',
' libghc6-irc-dev - GHC 6 libraries for the Haskell IRC library',
' libghc6-irc-doc - GHC 6 libraries for the Haskell IRC library (documentation)',
'Changes: ',
# etc.
]
def testSimple(self):
msg = p.parse({}, self.body)
self.assert_(msg)
self.assertEqual(msg.package, 'haskell-irc')
self.assertEqual(msg.version, '0.4.2-1')
self.assertEqual(msg.distribution, 'unstable')
self.assertEqual(msg.urgency, 'low')
self.assertEqual(msg.by, 'Chris Lamb <chris@chris-lamb.co.uk>')
self.assertEqual(msg.closes, None)
def testCloses(self):
self.body.append('Closes: 123456 456123')
msg = p.parse({}, self.body)
self.assert_(msg)
self.assertEqual(msg.closes, [123456, 456123])
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add some tests for the AcceptedUpload parser
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@chris-lamb.co.uk><commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from DebianChangesBot.mailparsers import AcceptedUploadParser as p
class TestMailParserAcceptedUpload(unittest.TestCase):
def setUp(self):
self.body = [
'-----BEGIN PGP SIGNED MESSAGE-----',
'Hash: SHA1',
'',
'Format: 1.7',
'Date: Thu, 03 Apr 2008 11:45:26 +0100',
'Source: haskell-irc',
'Binary: libghc6-irc-dev libghc6-irc-doc',
'Architecture: source i386 all',
'Version: 0.4.2-1',
'Distribution: unstable',
'Urgency: low',
'Maintainer: Chris Lamb <chris@chris-lamb.co.uk>',
'Changed-By: Chris Lamb <chris@chris-lamb.co.uk>',
'Description: ',
' libghc6-irc-dev - GHC 6 libraries for the Haskell IRC library',
' libghc6-irc-doc - GHC 6 libraries for the Haskell IRC library (documentation)',
'Changes: ',
# etc.
]
def testSimple(self):
msg = p.parse({}, self.body)
self.assert_(msg)
self.assertEqual(msg.package, 'haskell-irc')
self.assertEqual(msg.version, '0.4.2-1')
self.assertEqual(msg.distribution, 'unstable')
self.assertEqual(msg.urgency, 'low')
self.assertEqual(msg.by, 'Chris Lamb <chris@chris-lamb.co.uk>')
self.assertEqual(msg.closes, None)
def testCloses(self):
self.body.append('Closes: 123456 456123')
msg = p.parse({}, self.body)
self.assert_(msg)
self.assertEqual(msg.closes, [123456, 456123])
if __name__ == "__main__":
unittest.main()
|
Add some tests for the AcceptedUpload parser
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@chris-lamb.co.uk>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from DebianChangesBot.mailparsers import AcceptedUploadParser as p
class TestMailParserAcceptedUpload(unittest.TestCase):
def setUp(self):
self.body = [
'-----BEGIN PGP SIGNED MESSAGE-----',
'Hash: SHA1',
'',
'Format: 1.7',
'Date: Thu, 03 Apr 2008 11:45:26 +0100',
'Source: haskell-irc',
'Binary: libghc6-irc-dev libghc6-irc-doc',
'Architecture: source i386 all',
'Version: 0.4.2-1',
'Distribution: unstable',
'Urgency: low',
'Maintainer: Chris Lamb <chris@chris-lamb.co.uk>',
'Changed-By: Chris Lamb <chris@chris-lamb.co.uk>',
'Description: ',
' libghc6-irc-dev - GHC 6 libraries for the Haskell IRC library',
' libghc6-irc-doc - GHC 6 libraries for the Haskell IRC library (documentation)',
'Changes: ',
# etc.
]
def testSimple(self):
msg = p.parse({}, self.body)
self.assert_(msg)
self.assertEqual(msg.package, 'haskell-irc')
self.assertEqual(msg.version, '0.4.2-1')
self.assertEqual(msg.distribution, 'unstable')
self.assertEqual(msg.urgency, 'low')
self.assertEqual(msg.by, 'Chris Lamb <chris@chris-lamb.co.uk>')
self.assertEqual(msg.closes, None)
def testCloses(self):
self.body.append('Closes: 123456 456123')
msg = p.parse({}, self.body)
self.assert_(msg)
self.assertEqual(msg.closes, [123456, 456123])
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add some tests for the AcceptedUpload parser
Signed-off-by: Chris Lamb <711c73f64afdce07b7e38039a96d2224209e9a6c@chris-lamb.co.uk><commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from DebianChangesBot.mailparsers import AcceptedUploadParser as p
class TestMailParserAcceptedUpload(unittest.TestCase):
def setUp(self):
self.body = [
'-----BEGIN PGP SIGNED MESSAGE-----',
'Hash: SHA1',
'',
'Format: 1.7',
'Date: Thu, 03 Apr 2008 11:45:26 +0100',
'Source: haskell-irc',
'Binary: libghc6-irc-dev libghc6-irc-doc',
'Architecture: source i386 all',
'Version: 0.4.2-1',
'Distribution: unstable',
'Urgency: low',
'Maintainer: Chris Lamb <chris@chris-lamb.co.uk>',
'Changed-By: Chris Lamb <chris@chris-lamb.co.uk>',
'Description: ',
' libghc6-irc-dev - GHC 6 libraries for the Haskell IRC library',
' libghc6-irc-doc - GHC 6 libraries for the Haskell IRC library (documentation)',
'Changes: ',
# etc.
]
def testSimple(self):
msg = p.parse({}, self.body)
self.assert_(msg)
self.assertEqual(msg.package, 'haskell-irc')
self.assertEqual(msg.version, '0.4.2-1')
self.assertEqual(msg.distribution, 'unstable')
self.assertEqual(msg.urgency, 'low')
self.assertEqual(msg.by, 'Chris Lamb <chris@chris-lamb.co.uk>')
self.assertEqual(msg.closes, None)
def testCloses(self):
self.body.append('Closes: 123456 456123')
msg = p.parse({}, self.body)
self.assert_(msg)
self.assertEqual(msg.closes, [123456, 456123])
if __name__ == "__main__":
unittest.main()
|
|
49dd2d8bdd6bceea79f7799be41844f01ba4df1e
|
txircd/modules/extra/stats_onlineopers.py
|
txircd/modules/extra/stats_onlineopers.py
|
from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from txircd.utils import now
from zope.interface import implements
class StatsOnlineOpers(ModuleData):
implements(IPlugin, IModuleData)
name = "StatsOnlineOpers"
def actions(self):
return [ ("statsruntype-onlineopers", 10, self.listOnlineOpers) ]
def listOnlineOpers(self):
info = {}
for user in self.ircd.users.itervalues():
if self.ircd.runActionUntilValue("userhasoperpermission", user, "", users=[user]):
info[user.nick] = "{} ({}@{}) Idle: {} secs".format(user.nick, user.ident, user.host(), int((now() - user.idleSince).total_seconds()))
return info
statsOnlineOpers = StatsOnlineOpers()
|
Add STATS type to display online opers
|
Add STATS type to display online opers
|
Python
|
bsd-3-clause
|
Heufneutje/txircd
|
Add STATS type to display online opers
|
from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from txircd.utils import now
from zope.interface import implements
class StatsOnlineOpers(ModuleData):
implements(IPlugin, IModuleData)
name = "StatsOnlineOpers"
def actions(self):
return [ ("statsruntype-onlineopers", 10, self.listOnlineOpers) ]
def listOnlineOpers(self):
info = {}
for user in self.ircd.users.itervalues():
if self.ircd.runActionUntilValue("userhasoperpermission", user, "", users=[user]):
info[user.nick] = "{} ({}@{}) Idle: {} secs".format(user.nick, user.ident, user.host(), int((now() - user.idleSince).total_seconds()))
return info
statsOnlineOpers = StatsOnlineOpers()
|
<commit_before><commit_msg>Add STATS type to display online opers<commit_after>
|
from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from txircd.utils import now
from zope.interface import implements
class StatsOnlineOpers(ModuleData):
implements(IPlugin, IModuleData)
name = "StatsOnlineOpers"
def actions(self):
return [ ("statsruntype-onlineopers", 10, self.listOnlineOpers) ]
def listOnlineOpers(self):
info = {}
for user in self.ircd.users.itervalues():
if self.ircd.runActionUntilValue("userhasoperpermission", user, "", users=[user]):
info[user.nick] = "{} ({}@{}) Idle: {} secs".format(user.nick, user.ident, user.host(), int((now() - user.idleSince).total_seconds()))
return info
statsOnlineOpers = StatsOnlineOpers()
|
Add STATS type to display online opersfrom twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from txircd.utils import now
from zope.interface import implements
class StatsOnlineOpers(ModuleData):
implements(IPlugin, IModuleData)
name = "StatsOnlineOpers"
def actions(self):
return [ ("statsruntype-onlineopers", 10, self.listOnlineOpers) ]
def listOnlineOpers(self):
info = {}
for user in self.ircd.users.itervalues():
if self.ircd.runActionUntilValue("userhasoperpermission", user, "", users=[user]):
info[user.nick] = "{} ({}@{}) Idle: {} secs".format(user.nick, user.ident, user.host(), int((now() - user.idleSince).total_seconds()))
return info
statsOnlineOpers = StatsOnlineOpers()
|
<commit_before><commit_msg>Add STATS type to display online opers<commit_after>from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from txircd.utils import now
from zope.interface import implements
class StatsOnlineOpers(ModuleData):
implements(IPlugin, IModuleData)
name = "StatsOnlineOpers"
def actions(self):
return [ ("statsruntype-onlineopers", 10, self.listOnlineOpers) ]
def listOnlineOpers(self):
info = {}
for user in self.ircd.users.itervalues():
if self.ircd.runActionUntilValue("userhasoperpermission", user, "", users=[user]):
info[user.nick] = "{} ({}@{}) Idle: {} secs".format(user.nick, user.ident, user.host(), int((now() - user.idleSince).total_seconds()))
return info
statsOnlineOpers = StatsOnlineOpers()
|
|
372e2d788e13bd1825edc2bdb31dfd4dda5353cb
|
kolibri/content/utils/channels.py
|
kolibri/content/utils/channels.py
|
import fnmatch
import logging as logger
import os
import uuid
logging = logger.getLogger(__name__)
def _is_valid_hex_uuid(uuid_to_test):
try:
uuid_obj = uuid.UUID(uuid_to_test)
except ValueError:
return False
return uuid_to_test == uuid_obj.hex
def get_channel_id_list_from_scanning_content_database_dir(content_database_dir):
"""
Returns a list of channel IDs for the channel databases that exist in a content database directory.
"""
db_list = fnmatch.filter(os.listdir(content_database_dir), '*.sqlite3')
db_names = [db.split('.sqlite3', 1)[0] for db in db_list]
valid_db_names = [name for name in db_names if _is_valid_hex_uuid(name)]
invalid_db_names = set(db_names) - set(valid_db_names)
if invalid_db_names:
logging.warning("Ignoring databases in content database directory '{directory}' with invalid names: {names}"
.format(directory=content_database_dir, names=invalid_db_names))
return valid_db_names
|
import fnmatch
import logging as logger
import os
import uuid
logging = logger.getLogger(__name__)
def _is_valid_hex_uuid(uuid_to_test):
try:
uuid_obj = uuid.UUID(uuid_to_test)
except ValueError:
return False
return uuid_to_test == uuid_obj.hex
def get_channel_id_list_from_scanning_content_database_dir(content_database_dir, return_full_dir=False):
"""
Returns a list of channel IDs for the channel databases that exist in a content database directory.
"""
db_list = fnmatch.filter(os.listdir(content_database_dir), '*.sqlite3')
db_names = [db.split('.sqlite3', 1)[0] for db in db_list]
valid_db_names = [name for name in db_names if _is_valid_hex_uuid(name)]
invalid_db_names = set(db_names) - set(valid_db_names)
if invalid_db_names:
logging.warning("Ignoring databases in content database directory '{directory}' with invalid names: {names}"
.format(directory=content_database_dir, names=invalid_db_names))
if not return_full_dir:
return valid_db_names
else:
full_dir_template = os.path.join(content_database_dir, "{}.sqlite3")
return [full_dir_template.format(f) for f in valid_db_names]
|
Extend get_channel_id_* to return the full path to the exported DBs.
|
Extend get_channel_id_* to return the full path to the exported DBs.
|
Python
|
mit
|
learningequality/kolibri,jtamiace/kolibri,indirectlylit/kolibri,MingDai/kolibri,jtamiace/kolibri,ralphiee22/kolibri,learningequality/kolibri,mrpau/kolibri,mrpau/kolibri,lyw07/kolibri,aronasorman/kolibri,learningequality/kolibri,christianmemije/kolibri,jtamiace/kolibri,jonboiser/kolibri,jayoshih/kolibri,jtamiace/kolibri,ralphiee22/kolibri,66eli77/kolibri,mrpau/kolibri,rtibbles/kolibri,lyw07/kolibri,jayoshih/kolibri,whitzhu/kolibri,benjaoming/kolibri,benjaoming/kolibri,aronasorman/kolibri,DXCanas/kolibri,christianmemije/kolibri,whitzhu/kolibri,mrpau/kolibri,MingDai/kolibri,DXCanas/kolibri,66eli77/kolibri,jamalex/kolibri,jayoshih/kolibri,aronasorman/kolibri,lyw07/kolibri,whitzhu/kolibri,benjaoming/kolibri,indirectlylit/kolibri,MingDai/kolibri,indirectlylit/kolibri,learningequality/kolibri,benjaoming/kolibri,whitzhu/kolibri,66eli77/kolibri,jonboiser/kolibri,MingDai/kolibri,rtibbles/kolibri,jonboiser/kolibri,indirectlylit/kolibri,66eli77/kolibri,aronasorman/kolibri,rtibbles/kolibri,jayoshih/kolibri,ralphiee22/kolibri,rtibbles/kolibri,DXCanas/kolibri,christianmemije/kolibri,jamalex/kolibri,jamalex/kolibri,jonboiser/kolibri,ralphiee22/kolibri,christianmemije/kolibri,lyw07/kolibri,jamalex/kolibri,DXCanas/kolibri
|
import fnmatch
import logging as logger
import os
import uuid
logging = logger.getLogger(__name__)
def _is_valid_hex_uuid(uuid_to_test):
try:
uuid_obj = uuid.UUID(uuid_to_test)
except ValueError:
return False
return uuid_to_test == uuid_obj.hex
def get_channel_id_list_from_scanning_content_database_dir(content_database_dir):
"""
Returns a list of channel IDs for the channel databases that exist in a content database directory.
"""
db_list = fnmatch.filter(os.listdir(content_database_dir), '*.sqlite3')
db_names = [db.split('.sqlite3', 1)[0] for db in db_list]
valid_db_names = [name for name in db_names if _is_valid_hex_uuid(name)]
invalid_db_names = set(db_names) - set(valid_db_names)
if invalid_db_names:
logging.warning("Ignoring databases in content database directory '{directory}' with invalid names: {names}"
.format(directory=content_database_dir, names=invalid_db_names))
return valid_db_names
Extend get_channel_id_* to return the full path to the exported DBs.
|
import fnmatch
import logging as logger
import os
import uuid
logging = logger.getLogger(__name__)
def _is_valid_hex_uuid(uuid_to_test):
try:
uuid_obj = uuid.UUID(uuid_to_test)
except ValueError:
return False
return uuid_to_test == uuid_obj.hex
def get_channel_id_list_from_scanning_content_database_dir(content_database_dir, return_full_dir=False):
"""
Returns a list of channel IDs for the channel databases that exist in a content database directory.
"""
db_list = fnmatch.filter(os.listdir(content_database_dir), '*.sqlite3')
db_names = [db.split('.sqlite3', 1)[0] for db in db_list]
valid_db_names = [name for name in db_names if _is_valid_hex_uuid(name)]
invalid_db_names = set(db_names) - set(valid_db_names)
if invalid_db_names:
logging.warning("Ignoring databases in content database directory '{directory}' with invalid names: {names}"
.format(directory=content_database_dir, names=invalid_db_names))
if not return_full_dir:
return valid_db_names
else:
full_dir_template = os.path.join(content_database_dir, "{}.sqlite3")
return [full_dir_template.format(f) for f in valid_db_names]
|
<commit_before>import fnmatch
import logging as logger
import os
import uuid
logging = logger.getLogger(__name__)
def _is_valid_hex_uuid(uuid_to_test):
try:
uuid_obj = uuid.UUID(uuid_to_test)
except ValueError:
return False
return uuid_to_test == uuid_obj.hex
def get_channel_id_list_from_scanning_content_database_dir(content_database_dir):
"""
Returns a list of channel IDs for the channel databases that exist in a content database directory.
"""
db_list = fnmatch.filter(os.listdir(content_database_dir), '*.sqlite3')
db_names = [db.split('.sqlite3', 1)[0] for db in db_list]
valid_db_names = [name for name in db_names if _is_valid_hex_uuid(name)]
invalid_db_names = set(db_names) - set(valid_db_names)
if invalid_db_names:
logging.warning("Ignoring databases in content database directory '{directory}' with invalid names: {names}"
.format(directory=content_database_dir, names=invalid_db_names))
return valid_db_names
<commit_msg>Extend get_channel_id_* to return the full path to the exported DBs.<commit_after>
|
import fnmatch
import logging as logger
import os
import uuid
logging = logger.getLogger(__name__)
def _is_valid_hex_uuid(uuid_to_test):
try:
uuid_obj = uuid.UUID(uuid_to_test)
except ValueError:
return False
return uuid_to_test == uuid_obj.hex
def get_channel_id_list_from_scanning_content_database_dir(content_database_dir, return_full_dir=False):
"""
Returns a list of channel IDs for the channel databases that exist in a content database directory.
"""
db_list = fnmatch.filter(os.listdir(content_database_dir), '*.sqlite3')
db_names = [db.split('.sqlite3', 1)[0] for db in db_list]
valid_db_names = [name for name in db_names if _is_valid_hex_uuid(name)]
invalid_db_names = set(db_names) - set(valid_db_names)
if invalid_db_names:
logging.warning("Ignoring databases in content database directory '{directory}' with invalid names: {names}"
.format(directory=content_database_dir, names=invalid_db_names))
if not return_full_dir:
return valid_db_names
else:
full_dir_template = os.path.join(content_database_dir, "{}.sqlite3")
return [full_dir_template.format(f) for f in valid_db_names]
|
import fnmatch
import logging as logger
import os
import uuid
logging = logger.getLogger(__name__)
def _is_valid_hex_uuid(uuid_to_test):
try:
uuid_obj = uuid.UUID(uuid_to_test)
except ValueError:
return False
return uuid_to_test == uuid_obj.hex
def get_channel_id_list_from_scanning_content_database_dir(content_database_dir):
"""
Returns a list of channel IDs for the channel databases that exist in a content database directory.
"""
db_list = fnmatch.filter(os.listdir(content_database_dir), '*.sqlite3')
db_names = [db.split('.sqlite3', 1)[0] for db in db_list]
valid_db_names = [name for name in db_names if _is_valid_hex_uuid(name)]
invalid_db_names = set(db_names) - set(valid_db_names)
if invalid_db_names:
logging.warning("Ignoring databases in content database directory '{directory}' with invalid names: {names}"
.format(directory=content_database_dir, names=invalid_db_names))
return valid_db_names
Extend get_channel_id_* to return the full path to the exported DBs.import fnmatch
import logging as logger
import os
import uuid
logging = logger.getLogger(__name__)
def _is_valid_hex_uuid(uuid_to_test):
try:
uuid_obj = uuid.UUID(uuid_to_test)
except ValueError:
return False
return uuid_to_test == uuid_obj.hex
def get_channel_id_list_from_scanning_content_database_dir(content_database_dir, return_full_dir=False):
"""
Returns a list of channel IDs for the channel databases that exist in a content database directory.
"""
db_list = fnmatch.filter(os.listdir(content_database_dir), '*.sqlite3')
db_names = [db.split('.sqlite3', 1)[0] for db in db_list]
valid_db_names = [name for name in db_names if _is_valid_hex_uuid(name)]
invalid_db_names = set(db_names) - set(valid_db_names)
if invalid_db_names:
logging.warning("Ignoring databases in content database directory '{directory}' with invalid names: {names}"
.format(directory=content_database_dir, names=invalid_db_names))
if not return_full_dir:
return valid_db_names
else:
full_dir_template = os.path.join(content_database_dir, "{}.sqlite3")
return [full_dir_template.format(f) for f in valid_db_names]
|
<commit_before>import fnmatch
import logging as logger
import os
import uuid
logging = logger.getLogger(__name__)
def _is_valid_hex_uuid(uuid_to_test):
try:
uuid_obj = uuid.UUID(uuid_to_test)
except ValueError:
return False
return uuid_to_test == uuid_obj.hex
def get_channel_id_list_from_scanning_content_database_dir(content_database_dir):
"""
Returns a list of channel IDs for the channel databases that exist in a content database directory.
"""
db_list = fnmatch.filter(os.listdir(content_database_dir), '*.sqlite3')
db_names = [db.split('.sqlite3', 1)[0] for db in db_list]
valid_db_names = [name for name in db_names if _is_valid_hex_uuid(name)]
invalid_db_names = set(db_names) - set(valid_db_names)
if invalid_db_names:
logging.warning("Ignoring databases in content database directory '{directory}' with invalid names: {names}"
.format(directory=content_database_dir, names=invalid_db_names))
return valid_db_names
<commit_msg>Extend get_channel_id_* to return the full path to the exported DBs.<commit_after>import fnmatch
import logging as logger
import os
import uuid
logging = logger.getLogger(__name__)
def _is_valid_hex_uuid(uuid_to_test):
try:
uuid_obj = uuid.UUID(uuid_to_test)
except ValueError:
return False
return uuid_to_test == uuid_obj.hex
def get_channel_id_list_from_scanning_content_database_dir(content_database_dir, return_full_dir=False):
"""
Returns a list of channel IDs for the channel databases that exist in a content database directory.
"""
db_list = fnmatch.filter(os.listdir(content_database_dir), '*.sqlite3')
db_names = [db.split('.sqlite3', 1)[0] for db in db_list]
valid_db_names = [name for name in db_names if _is_valid_hex_uuid(name)]
invalid_db_names = set(db_names) - set(valid_db_names)
if invalid_db_names:
logging.warning("Ignoring databases in content database directory '{directory}' with invalid names: {names}"
.format(directory=content_database_dir, names=invalid_db_names))
if not return_full_dir:
return valid_db_names
else:
full_dir_template = os.path.join(content_database_dir, "{}.sqlite3")
return [full_dir_template.format(f) for f in valid_db_names]
|
dac42830664dd1dd4b636c942b730a18f81663b2
|
myhdl/test/bugs/test_issue_180.py
|
myhdl/test/bugs/test_issue_180.py
|
from __future__ import absolute_import
from myhdl import *
times_called = 0
@block
def Demonstration():
a = Signal(False)
@instance
def poke_loop():
yield delay(1)
a.next = not a
@always(a)
def comb_loop():
global times_called
times_called += 1
a.next = not a
assert times_called < 1001
return (comb_loop, poke_loop)
def test_issue_180():
demo_inst = Demonstration()
demo_inst.config_sim(trace=True)
sim = Simulation(demo_inst)
sim.run(10)
|
Add a test case for combinatorial loops
|
Add a test case for combinatorial loops
|
Python
|
lgpl-2.1
|
juhasch/myhdl,juhasch/myhdl,juhasch/myhdl
|
Add a test case for combinatorial loops
|
from __future__ import absolute_import
from myhdl import *
times_called = 0
@block
def Demonstration():
a = Signal(False)
@instance
def poke_loop():
yield delay(1)
a.next = not a
@always(a)
def comb_loop():
global times_called
times_called += 1
a.next = not a
assert times_called < 1001
return (comb_loop, poke_loop)
def test_issue_180():
demo_inst = Demonstration()
demo_inst.config_sim(trace=True)
sim = Simulation(demo_inst)
sim.run(10)
|
<commit_before><commit_msg>Add a test case for combinatorial loops<commit_after>
|
from __future__ import absolute_import
from myhdl import *
times_called = 0
@block
def Demonstration():
a = Signal(False)
@instance
def poke_loop():
yield delay(1)
a.next = not a
@always(a)
def comb_loop():
global times_called
times_called += 1
a.next = not a
assert times_called < 1001
return (comb_loop, poke_loop)
def test_issue_180():
demo_inst = Demonstration()
demo_inst.config_sim(trace=True)
sim = Simulation(demo_inst)
sim.run(10)
|
Add a test case for combinatorial loopsfrom __future__ import absolute_import
from myhdl import *
times_called = 0
@block
def Demonstration():
a = Signal(False)
@instance
def poke_loop():
yield delay(1)
a.next = not a
@always(a)
def comb_loop():
global times_called
times_called += 1
a.next = not a
assert times_called < 1001
return (comb_loop, poke_loop)
def test_issue_180():
demo_inst = Demonstration()
demo_inst.config_sim(trace=True)
sim = Simulation(demo_inst)
sim.run(10)
|
<commit_before><commit_msg>Add a test case for combinatorial loops<commit_after>from __future__ import absolute_import
from myhdl import *
times_called = 0
@block
def Demonstration():
a = Signal(False)
@instance
def poke_loop():
yield delay(1)
a.next = not a
@always(a)
def comb_loop():
global times_called
times_called += 1
a.next = not a
assert times_called < 1001
return (comb_loop, poke_loop)
def test_issue_180():
demo_inst = Demonstration()
demo_inst.config_sim(trace=True)
sim = Simulation(demo_inst)
sim.run(10)
|
|
eeaba2ee8a0e0717a11624781445ad7c3a8d854d
|
examples/unpFromLattice.py
|
examples/unpFromLattice.py
|
import uci.BorisUpdater as BorisUpdater
import uci.Ptcls as Ptcls
import numpy as np
# Some helpful constants.
fund_charge = 1.602176565e-19
# Mass of Sr88 ions.
atomic_unit = 1.66053892e-27
ion_mass = 87.9056 * atomic_unit
# initialize particles
n_wells = 100
n_ions = n_wells**3
ptcls = Ptcls.Ptcls()
ptcls.set_nptcls(2 * n_ions)
# ions
lattice_spacing = 1.0e-6
xmin = -0.5 * (n_wells - 1) * lattice_spacing
for i in range(n_ions):
ptcls.x()[i] = xmin + lattice_spacing * ((i / (n_wells**0)) % n_wells)
ptcls.y()[i] = xmin + lattice_spacing * ((i / (n_wells**1)) % n_wells)
ptcls.z()[i] = xmin + lattice_spacing * ((i / (n_wells**2)) % n_wells)
ptcls.ptclList[3:6,:n_ions] = 0
ptcls.ptclList[6,:n_ions] = fund_charge
ptcls.ptclList[7,:n_ions] = ion_mass
#electrons
ptcls.ptclList[0:3,n_ions:] = np.random.normal(0.0, 0.5 * abs(xmin),
ptcls.ptclList[0:3,n_ions:].shape)
electron_temperature = 300.0
electron_mass = 9.10938291e-31
kB = 1.3806e-23
vThermal = np.sqrt(kB * electron_temperature / electron_mass)
ptcls.ptclList[3:6,n_ions:] = np.random.normal(0.0, vThermal,
ptcls.ptclList[3:6,n_ions:].shape)
ptcls.ptclList[6,n_ions:] = -fund_charge
ptcls.ptclList[7,n_ions:] = electron_mass
|
Create initial state for UNP from lattice simulation.
|
Create initial state for UNP from lattice simulation.
|
Python
|
mit
|
hosseinsadeghi/ultracold-ions,Tech-XCorp/ultracold-ions,Tech-XCorp/ultracold-ions,hosseinsadeghi/ultracold-ions
|
Create initial state for UNP from lattice simulation.
|
import uci.BorisUpdater as BorisUpdater
import uci.Ptcls as Ptcls
import numpy as np
# Some helpful constants.
fund_charge = 1.602176565e-19
# Mass of Sr88 ions.
atomic_unit = 1.66053892e-27
ion_mass = 87.9056 * atomic_unit
# initialize particles
n_wells = 100
n_ions = n_wells**3
ptcls = Ptcls.Ptcls()
ptcls.set_nptcls(2 * n_ions)
# ions
lattice_spacing = 1.0e-6
xmin = -0.5 * (n_wells - 1) * lattice_spacing
for i in range(n_ions):
ptcls.x()[i] = xmin + lattice_spacing * ((i / (n_wells**0)) % n_wells)
ptcls.y()[i] = xmin + lattice_spacing * ((i / (n_wells**1)) % n_wells)
ptcls.z()[i] = xmin + lattice_spacing * ((i / (n_wells**2)) % n_wells)
ptcls.ptclList[3:6,:n_ions] = 0
ptcls.ptclList[6,:n_ions] = fund_charge
ptcls.ptclList[7,:n_ions] = ion_mass
#electrons
ptcls.ptclList[0:3,n_ions:] = np.random.normal(0.0, 0.5 * abs(xmin),
ptcls.ptclList[0:3,n_ions:].shape)
electron_temperature = 300.0
electron_mass = 9.10938291e-31
kB = 1.3806e-23
vThermal = np.sqrt(kB * electron_temperature / electron_mass)
ptcls.ptclList[3:6,n_ions:] = np.random.normal(0.0, vThermal,
ptcls.ptclList[3:6,n_ions:].shape)
ptcls.ptclList[6,n_ions:] = -fund_charge
ptcls.ptclList[7,n_ions:] = electron_mass
|
<commit_before><commit_msg>Create initial state for UNP from lattice simulation.<commit_after>
|
import uci.BorisUpdater as BorisUpdater
import uci.Ptcls as Ptcls
import numpy as np
# Some helpful constants.
fund_charge = 1.602176565e-19
# Mass of Sr88 ions.
atomic_unit = 1.66053892e-27
ion_mass = 87.9056 * atomic_unit
# initialize particles
n_wells = 100
n_ions = n_wells**3
ptcls = Ptcls.Ptcls()
ptcls.set_nptcls(2 * n_ions)
# ions
lattice_spacing = 1.0e-6
xmin = -0.5 * (n_wells - 1) * lattice_spacing
for i in range(n_ions):
ptcls.x()[i] = xmin + lattice_spacing * ((i / (n_wells**0)) % n_wells)
ptcls.y()[i] = xmin + lattice_spacing * ((i / (n_wells**1)) % n_wells)
ptcls.z()[i] = xmin + lattice_spacing * ((i / (n_wells**2)) % n_wells)
ptcls.ptclList[3:6,:n_ions] = 0
ptcls.ptclList[6,:n_ions] = fund_charge
ptcls.ptclList[7,:n_ions] = ion_mass
#electrons
ptcls.ptclList[0:3,n_ions:] = np.random.normal(0.0, 0.5 * abs(xmin),
ptcls.ptclList[0:3,n_ions:].shape)
electron_temperature = 300.0
electron_mass = 9.10938291e-31
kB = 1.3806e-23
vThermal = np.sqrt(kB * electron_temperature / electron_mass)
ptcls.ptclList[3:6,n_ions:] = np.random.normal(0.0, vThermal,
ptcls.ptclList[3:6,n_ions:].shape)
ptcls.ptclList[6,n_ions:] = -fund_charge
ptcls.ptclList[7,n_ions:] = electron_mass
|
Create initial state for UNP from lattice simulation.import uci.BorisUpdater as BorisUpdater
import uci.Ptcls as Ptcls
import numpy as np
# Some helpful constants.
fund_charge = 1.602176565e-19
# Mass of Sr88 ions.
atomic_unit = 1.66053892e-27
ion_mass = 87.9056 * atomic_unit
# initialize particles
n_wells = 100
n_ions = n_wells**3
ptcls = Ptcls.Ptcls()
ptcls.set_nptcls(2 * n_ions)
# ions
lattice_spacing = 1.0e-6
xmin = -0.5 * (n_wells - 1) * lattice_spacing
for i in range(n_ions):
ptcls.x()[i] = xmin + lattice_spacing * ((i / (n_wells**0)) % n_wells)
ptcls.y()[i] = xmin + lattice_spacing * ((i / (n_wells**1)) % n_wells)
ptcls.z()[i] = xmin + lattice_spacing * ((i / (n_wells**2)) % n_wells)
ptcls.ptclList[3:6,:n_ions] = 0
ptcls.ptclList[6,:n_ions] = fund_charge
ptcls.ptclList[7,:n_ions] = ion_mass
#electrons
ptcls.ptclList[0:3,n_ions:] = np.random.normal(0.0, 0.5 * abs(xmin),
ptcls.ptclList[0:3,n_ions:].shape)
electron_temperature = 300.0
electron_mass = 9.10938291e-31
kB = 1.3806e-23
vThermal = np.sqrt(kB * electron_temperature / electron_mass)
ptcls.ptclList[3:6,n_ions:] = np.random.normal(0.0, vThermal,
ptcls.ptclList[3:6,n_ions:].shape)
ptcls.ptclList[6,n_ions:] = -fund_charge
ptcls.ptclList[7,n_ions:] = electron_mass
|
<commit_before><commit_msg>Create initial state for UNP from lattice simulation.<commit_after>import uci.BorisUpdater as BorisUpdater
import uci.Ptcls as Ptcls
import numpy as np
# Some helpful constants.
fund_charge = 1.602176565e-19
# Mass of Sr88 ions.
atomic_unit = 1.66053892e-27
ion_mass = 87.9056 * atomic_unit
# initialize particles
n_wells = 100
n_ions = n_wells**3
ptcls = Ptcls.Ptcls()
ptcls.set_nptcls(2 * n_ions)
# ions
lattice_spacing = 1.0e-6
xmin = -0.5 * (n_wells - 1) * lattice_spacing
for i in range(n_ions):
ptcls.x()[i] = xmin + lattice_spacing * ((i / (n_wells**0)) % n_wells)
ptcls.y()[i] = xmin + lattice_spacing * ((i / (n_wells**1)) % n_wells)
ptcls.z()[i] = xmin + lattice_spacing * ((i / (n_wells**2)) % n_wells)
ptcls.ptclList[3:6,:n_ions] = 0
ptcls.ptclList[6,:n_ions] = fund_charge
ptcls.ptclList[7,:n_ions] = ion_mass
#electrons
ptcls.ptclList[0:3,n_ions:] = np.random.normal(0.0, 0.5 * abs(xmin),
ptcls.ptclList[0:3,n_ions:].shape)
electron_temperature = 300.0
electron_mass = 9.10938291e-31
kB = 1.3806e-23
vThermal = np.sqrt(kB * electron_temperature / electron_mass)
ptcls.ptclList[3:6,n_ions:] = np.random.normal(0.0, vThermal,
ptcls.ptclList[3:6,n_ions:].shape)
ptcls.ptclList[6,n_ions:] = -fund_charge
ptcls.ptclList[7,n_ions:] = electron_mass
|
|
ff49b55303ef4df360add9659067ac60c700d611
|
ass3/hw3.py
|
ass3/hw3.py
|
import numpy as np
from scipy.special import comb
def hoeffding_inequality_sample_size_needed(probability, error, num_hypothesis):
return np.log(probability/2/num_hypothesis)/-2/error**2
def question_one_two_three():
return hoeffding_inequality_sample_size_needed(0.03, 0.05, 1), hoeffding_inequality_sample_size_needed(0.03, 0.05, 10), hoeffding_inequality_sample_size_needed(0.03, 0.05, 100)
ans1 = 'b'
ans2 = 'c'
ans3 = 'd'
ans4 = 'a'
ans5 = 'e'
ans5 = 'c'
# find the growth function of the two interval hypothesis set
def growth_function_two_interval(n):
return first_black(n)
def first_black(n):
total = 0
for i in range(n-3):
total += first_white(n-i-1)
return total
def first_white(n):
total = 0
for i in range(n-2):
total += second_black(n-i-1)
return total
def second_black(n):
# total = 0
# for i in range(n-1):
# total += 1
return n - 1
def growth_function_two_interval_analytical(n):
return sum([ comb(n-i-1, 2) for i in range(1, n-3+1) ])
def question_seven():
return [ (growth_function_two_interval(n), growth_function_two_interval_analytical(n)) for n in range(4, 14) ]
def main():
# print(question_one_two_three())
print(question_seven())
if __name__ == '__main__':
main()
|
Add ass3 work until question 7, Come up with both iterative and analytical growth function for the two interval problem
|
Add ass3 work until question 7, Come up with both iterative and analytical growth function for the two interval problem
|
Python
|
mit
|
zhiyanfoo/caltech-machine-learning
|
Add ass3 work until question 7, Come up with both iterative and analytical growth function for the two interval problem
|
import numpy as np
from scipy.special import comb
def hoeffding_inequality_sample_size_needed(probability, error, num_hypothesis):
return np.log(probability/2/num_hypothesis)/-2/error**2
def question_one_two_three():
return hoeffding_inequality_sample_size_needed(0.03, 0.05, 1), hoeffding_inequality_sample_size_needed(0.03, 0.05, 10), hoeffding_inequality_sample_size_needed(0.03, 0.05, 100)
ans1 = 'b'
ans2 = 'c'
ans3 = 'd'
ans4 = 'a'
ans5 = 'e'
ans5 = 'c'
# find the growth function of the two interval hypothesis set
def growth_function_two_interval(n):
return first_black(n)
def first_black(n):
total = 0
for i in range(n-3):
total += first_white(n-i-1)
return total
def first_white(n):
total = 0
for i in range(n-2):
total += second_black(n-i-1)
return total
def second_black(n):
# total = 0
# for i in range(n-1):
# total += 1
return n - 1
def growth_function_two_interval_analytical(n):
return sum([ comb(n-i-1, 2) for i in range(1, n-3+1) ])
def question_seven():
return [ (growth_function_two_interval(n), growth_function_two_interval_analytical(n)) for n in range(4, 14) ]
def main():
# print(question_one_two_three())
print(question_seven())
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add ass3 work until question 7, Come up with both iterative and analytical growth function for the two interval problem<commit_after>
|
import numpy as np
from scipy.special import comb
def hoeffding_inequality_sample_size_needed(probability, error, num_hypothesis):
return np.log(probability/2/num_hypothesis)/-2/error**2
def question_one_two_three():
return hoeffding_inequality_sample_size_needed(0.03, 0.05, 1), hoeffding_inequality_sample_size_needed(0.03, 0.05, 10), hoeffding_inequality_sample_size_needed(0.03, 0.05, 100)
ans1 = 'b'
ans2 = 'c'
ans3 = 'd'
ans4 = 'a'
ans5 = 'e'
ans5 = 'c'
# find the growth function of the two interval hypothesis set
def growth_function_two_interval(n):
return first_black(n)
def first_black(n):
total = 0
for i in range(n-3):
total += first_white(n-i-1)
return total
def first_white(n):
total = 0
for i in range(n-2):
total += second_black(n-i-1)
return total
def second_black(n):
# total = 0
# for i in range(n-1):
# total += 1
return n - 1
def growth_function_two_interval_analytical(n):
return sum([ comb(n-i-1, 2) for i in range(1, n-3+1) ])
def question_seven():
return [ (growth_function_two_interval(n), growth_function_two_interval_analytical(n)) for n in range(4, 14) ]
def main():
# print(question_one_two_three())
print(question_seven())
if __name__ == '__main__':
main()
|
Add ass3 work until question 7, Come up with both iterative and analytical growth function for the two interval problemimport numpy as np
from scipy.special import comb
def hoeffding_inequality_sample_size_needed(probability, error, num_hypothesis):
return np.log(probability/2/num_hypothesis)/-2/error**2
def question_one_two_three():
return hoeffding_inequality_sample_size_needed(0.03, 0.05, 1), hoeffding_inequality_sample_size_needed(0.03, 0.05, 10), hoeffding_inequality_sample_size_needed(0.03, 0.05, 100)
ans1 = 'b'
ans2 = 'c'
ans3 = 'd'
ans4 = 'a'
ans5 = 'e'
ans5 = 'c'
# find the growth function of the two interval hypothesis set
def growth_function_two_interval(n):
return first_black(n)
def first_black(n):
total = 0
for i in range(n-3):
total += first_white(n-i-1)
return total
def first_white(n):
total = 0
for i in range(n-2):
total += second_black(n-i-1)
return total
def second_black(n):
# total = 0
# for i in range(n-1):
# total += 1
return n - 1
def growth_function_two_interval_analytical(n):
return sum([ comb(n-i-1, 2) for i in range(1, n-3+1) ])
def question_seven():
return [ (growth_function_two_interval(n), growth_function_two_interval_analytical(n)) for n in range(4, 14) ]
def main():
# print(question_one_two_three())
print(question_seven())
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add ass3 work until question 7, Come up with both iterative and analytical growth function for the two interval problem<commit_after>import numpy as np
from scipy.special import comb
def hoeffding_inequality_sample_size_needed(probability, error, num_hypothesis):
return np.log(probability/2/num_hypothesis)/-2/error**2
def question_one_two_three():
return hoeffding_inequality_sample_size_needed(0.03, 0.05, 1), hoeffding_inequality_sample_size_needed(0.03, 0.05, 10), hoeffding_inequality_sample_size_needed(0.03, 0.05, 100)
ans1 = 'b'
ans2 = 'c'
ans3 = 'd'
ans4 = 'a'
ans5 = 'e'
ans5 = 'c'
# find the growth function of the two interval hypothesis set
def growth_function_two_interval(n):
return first_black(n)
def first_black(n):
total = 0
for i in range(n-3):
total += first_white(n-i-1)
return total
def first_white(n):
total = 0
for i in range(n-2):
total += second_black(n-i-1)
return total
def second_black(n):
# total = 0
# for i in range(n-1):
# total += 1
return n - 1
def growth_function_two_interval_analytical(n):
return sum([ comb(n-i-1, 2) for i in range(1, n-3+1) ])
def question_seven():
return [ (growth_function_two_interval(n), growth_function_two_interval_analytical(n)) for n in range(4, 14) ]
def main():
# print(question_one_two_three())
print(question_seven())
if __name__ == '__main__':
main()
|
|
f0bb66143cf8f48026beaf4a98dcc553a7f94d7f
|
tt/eqtools.py
|
tt/eqtools.py
|
"""
A module for extracting an manipulating information from Boolean equations.
"""
from tt.utils import without_spaces
eq_transform_sym_dict = {
"~" : ["not", "NOT", "~", "!"],
"&" : ["and", "AND", "&", "&&", "/\\"],
"|" : ["or", "OR", "|", "||", "\\/"]
}
def transform_eq_to_generic_schema(raw_eq):
"""Receives a user-specified Boolean equation and attempts to transform it
to the generic Python boolean schema, using keywords and, or, not.
Currently unsupported:
nand
nor
xor
xnor
"""
transformed_eq = raw_eq
for tt_schema_sym, other_schema_sym_list in eq_transform_sym_dict.items():
for sym in other_schema_sym_list:
if sym in transformed_eq:
transformed_eq = transformed_eq.replace(sym, tt_schema_sym)
return without_spaces(transformed_eq)
def extract_expr_from_eq(eq):
pass
def extract_eq_symbols(eq):
"""Returns a list of the symbols in the passed Boolean equation.
All symbols are assumed to be uppercase and one character.
All other characters are discarded in the processing of the equation.
The first symbols in the list is the result of the equation.
"""
# TODO: more advanced logic in determining valid symbols
return [sym for sym in eq if sym.isalnum() and sym.isupper()]
def extract_eq_intermediates(eq):
pass
def generate_eq_inputs(symbol_list):
for sym in symbol_list:
pass
|
Add basic functionality for transforming boolean equation to tt schema
|
Add basic functionality for transforming boolean equation to tt schema
|
Python
|
mit
|
welchbj/tt,welchbj/tt,welchbj/tt
|
Add basic functionality for transforming boolean equation to tt schema
|
"""
A module for extracting an manipulating information from Boolean equations.
"""
from tt.utils import without_spaces
eq_transform_sym_dict = {
"~" : ["not", "NOT", "~", "!"],
"&" : ["and", "AND", "&", "&&", "/\\"],
"|" : ["or", "OR", "|", "||", "\\/"]
}
def transform_eq_to_generic_schema(raw_eq):
"""Receives a user-specified Boolean equation and attempts to transform it
to the generic Python boolean schema, using keywords and, or, not.
Currently unsupported:
nand
nor
xor
xnor
"""
transformed_eq = raw_eq
for tt_schema_sym, other_schema_sym_list in eq_transform_sym_dict.items():
for sym in other_schema_sym_list:
if sym in transformed_eq:
transformed_eq = transformed_eq.replace(sym, tt_schema_sym)
return without_spaces(transformed_eq)
def extract_expr_from_eq(eq):
pass
def extract_eq_symbols(eq):
"""Returns a list of the symbols in the passed Boolean equation.
All symbols are assumed to be uppercase and one character.
All other characters are discarded in the processing of the equation.
The first symbols in the list is the result of the equation.
"""
# TODO: more advanced logic in determining valid symbols
return [sym for sym in eq if sym.isalnum() and sym.isupper()]
def extract_eq_intermediates(eq):
pass
def generate_eq_inputs(symbol_list):
for sym in symbol_list:
pass
|
<commit_before><commit_msg>Add basic functionality for transforming boolean equation to tt schema<commit_after>
|
"""
A module for extracting an manipulating information from Boolean equations.
"""
from tt.utils import without_spaces
eq_transform_sym_dict = {
"~" : ["not", "NOT", "~", "!"],
"&" : ["and", "AND", "&", "&&", "/\\"],
"|" : ["or", "OR", "|", "||", "\\/"]
}
def transform_eq_to_generic_schema(raw_eq):
"""Receives a user-specified Boolean equation and attempts to transform it
to the generic Python boolean schema, using keywords and, or, not.
Currently unsupported:
nand
nor
xor
xnor
"""
transformed_eq = raw_eq
for tt_schema_sym, other_schema_sym_list in eq_transform_sym_dict.items():
for sym in other_schema_sym_list:
if sym in transformed_eq:
transformed_eq = transformed_eq.replace(sym, tt_schema_sym)
return without_spaces(transformed_eq)
def extract_expr_from_eq(eq):
pass
def extract_eq_symbols(eq):
"""Returns a list of the symbols in the passed Boolean equation.
All symbols are assumed to be uppercase and one character.
All other characters are discarded in the processing of the equation.
The first symbols in the list is the result of the equation.
"""
# TODO: more advanced logic in determining valid symbols
return [sym for sym in eq if sym.isalnum() and sym.isupper()]
def extract_eq_intermediates(eq):
pass
def generate_eq_inputs(symbol_list):
for sym in symbol_list:
pass
|
Add basic functionality for transforming boolean equation to tt schema"""
A module for extracting an manipulating information from Boolean equations.
"""
from tt.utils import without_spaces
eq_transform_sym_dict = {
"~" : ["not", "NOT", "~", "!"],
"&" : ["and", "AND", "&", "&&", "/\\"],
"|" : ["or", "OR", "|", "||", "\\/"]
}
def transform_eq_to_generic_schema(raw_eq):
"""Receives a user-specified Boolean equation and attempts to transform it
to the generic Python boolean schema, using keywords and, or, not.
Currently unsupported:
nand
nor
xor
xnor
"""
transformed_eq = raw_eq
for tt_schema_sym, other_schema_sym_list in eq_transform_sym_dict.items():
for sym in other_schema_sym_list:
if sym in transformed_eq:
transformed_eq = transformed_eq.replace(sym, tt_schema_sym)
return without_spaces(transformed_eq)
def extract_expr_from_eq(eq):
pass
def extract_eq_symbols(eq):
"""Returns a list of the symbols in the passed Boolean equation.
All symbols are assumed to be uppercase and one character.
All other characters are discarded in the processing of the equation.
The first symbols in the list is the result of the equation.
"""
# TODO: more advanced logic in determining valid symbols
return [sym for sym in eq if sym.isalnum() and sym.isupper()]
def extract_eq_intermediates(eq):
pass
def generate_eq_inputs(symbol_list):
for sym in symbol_list:
pass
|
<commit_before><commit_msg>Add basic functionality for transforming boolean equation to tt schema<commit_after>"""
A module for extracting an manipulating information from Boolean equations.
"""
from tt.utils import without_spaces
eq_transform_sym_dict = {
"~" : ["not", "NOT", "~", "!"],
"&" : ["and", "AND", "&", "&&", "/\\"],
"|" : ["or", "OR", "|", "||", "\\/"]
}
def transform_eq_to_generic_schema(raw_eq):
"""Receives a user-specified Boolean equation and attempts to transform it
to the generic Python boolean schema, using keywords and, or, not.
Currently unsupported:
nand
nor
xor
xnor
"""
transformed_eq = raw_eq
for tt_schema_sym, other_schema_sym_list in eq_transform_sym_dict.items():
for sym in other_schema_sym_list:
if sym in transformed_eq:
transformed_eq = transformed_eq.replace(sym, tt_schema_sym)
return without_spaces(transformed_eq)
def extract_expr_from_eq(eq):
pass
def extract_eq_symbols(eq):
"""Returns a list of the symbols in the passed Boolean equation.
All symbols are assumed to be uppercase and one character.
All other characters are discarded in the processing of the equation.
The first symbols in the list is the result of the equation.
"""
# TODO: more advanced logic in determining valid symbols
return [sym for sym in eq if sym.isalnum() and sym.isupper()]
def extract_eq_intermediates(eq):
pass
def generate_eq_inputs(symbol_list):
for sym in symbol_list:
pass
|
|
cfcabcafeddd851659cacd48265686e20492a657
|
scripts/get_mems.py
|
scripts/get_mems.py
|
import argparse
import os
parser = argparse.ArgumentParser(description='Collect memories from a corpus')
parser.add_argument('-c', '--corpus_dir', default='hp_corpus',
type=str, help='Path to corpus directory')
parser.add_argument('-n', '--name', default='harry potter',
type=str, help='Character name')
parser.add_argument('-m', '--mood_dir', default='mood_files',
type=str, help='Path to mood file directory')
parser.add_argument('-i', '--images', action='store_true',
help='Get images')
parser.add_argument('-s', '--save_dir', default='memories')
args = parser.parse_args()
print('Extracting memories for {} from text in directory:'.format(args.name))
print(os.path.abspath(args.corpus_dir))
print()
print('Mood files are found in directory:')
print(os.path.abspath(args.mood_dir))
print()
print('Get images is set to '+str(args.images))
print()
print('Memories will be saved to:')
print(os.path.abspath(args.save_dir))
print()
if not os.path.isdir(os.path.abspath(args.save_dir)):
os.makedirs(os.path.abspath(args.save_dir))
import pensieve
corpus = pensieve.Corpus(corpus_dir=os.path.abspath(args.corpus_dir))
corpus.gather_corpus_memories(char_name=args.name.title().split(),
save=os.path.abspath(args.save_dir),
get_img=args.images)
|
Add script to get all memories
|
Add script to get all memories
|
Python
|
apache-2.0
|
CDIPS-AI-2017/pensieve
|
Add script to get all memories
|
import argparse
import os
parser = argparse.ArgumentParser(description='Collect memories from a corpus')
parser.add_argument('-c', '--corpus_dir', default='hp_corpus',
type=str, help='Path to corpus directory')
parser.add_argument('-n', '--name', default='harry potter',
type=str, help='Character name')
parser.add_argument('-m', '--mood_dir', default='mood_files',
type=str, help='Path to mood file directory')
parser.add_argument('-i', '--images', action='store_true',
help='Get images')
parser.add_argument('-s', '--save_dir', default='memories')
args = parser.parse_args()
print('Extracting memories for {} from text in directory:'.format(args.name))
print(os.path.abspath(args.corpus_dir))
print()
print('Mood files are found in directory:')
print(os.path.abspath(args.mood_dir))
print()
print('Get images is set to '+str(args.images))
print()
print('Memories will be saved to:')
print(os.path.abspath(args.save_dir))
print()
if not os.path.isdir(os.path.abspath(args.save_dir)):
os.makedirs(os.path.abspath(args.save_dir))
import pensieve
corpus = pensieve.Corpus(corpus_dir=os.path.abspath(args.corpus_dir))
corpus.gather_corpus_memories(char_name=args.name.title().split(),
save=os.path.abspath(args.save_dir),
get_img=args.images)
|
<commit_before><commit_msg>Add script to get all memories<commit_after>
|
import argparse
import os
parser = argparse.ArgumentParser(description='Collect memories from a corpus')
parser.add_argument('-c', '--corpus_dir', default='hp_corpus',
type=str, help='Path to corpus directory')
parser.add_argument('-n', '--name', default='harry potter',
type=str, help='Character name')
parser.add_argument('-m', '--mood_dir', default='mood_files',
type=str, help='Path to mood file directory')
parser.add_argument('-i', '--images', action='store_true',
help='Get images')
parser.add_argument('-s', '--save_dir', default='memories')
args = parser.parse_args()
print('Extracting memories for {} from text in directory:'.format(args.name))
print(os.path.abspath(args.corpus_dir))
print()
print('Mood files are found in directory:')
print(os.path.abspath(args.mood_dir))
print()
print('Get images is set to '+str(args.images))
print()
print('Memories will be saved to:')
print(os.path.abspath(args.save_dir))
print()
if not os.path.isdir(os.path.abspath(args.save_dir)):
os.makedirs(os.path.abspath(args.save_dir))
import pensieve
corpus = pensieve.Corpus(corpus_dir=os.path.abspath(args.corpus_dir))
corpus.gather_corpus_memories(char_name=args.name.title().split(),
save=os.path.abspath(args.save_dir),
get_img=args.images)
|
Add script to get all memoriesimport argparse
import os
parser = argparse.ArgumentParser(description='Collect memories from a corpus')
parser.add_argument('-c', '--corpus_dir', default='hp_corpus',
type=str, help='Path to corpus directory')
parser.add_argument('-n', '--name', default='harry potter',
type=str, help='Character name')
parser.add_argument('-m', '--mood_dir', default='mood_files',
type=str, help='Path to mood file directory')
parser.add_argument('-i', '--images', action='store_true',
help='Get images')
parser.add_argument('-s', '--save_dir', default='memories')
args = parser.parse_args()
print('Extracting memories for {} from text in directory:'.format(args.name))
print(os.path.abspath(args.corpus_dir))
print()
print('Mood files are found in directory:')
print(os.path.abspath(args.mood_dir))
print()
print('Get images is set to '+str(args.images))
print()
print('Memories will be saved to:')
print(os.path.abspath(args.save_dir))
print()
if not os.path.isdir(os.path.abspath(args.save_dir)):
os.makedirs(os.path.abspath(args.save_dir))
import pensieve
corpus = pensieve.Corpus(corpus_dir=os.path.abspath(args.corpus_dir))
corpus.gather_corpus_memories(char_name=args.name.title().split(),
save=os.path.abspath(args.save_dir),
get_img=args.images)
|
<commit_before><commit_msg>Add script to get all memories<commit_after>import argparse
import os
parser = argparse.ArgumentParser(description='Collect memories from a corpus')
parser.add_argument('-c', '--corpus_dir', default='hp_corpus',
type=str, help='Path to corpus directory')
parser.add_argument('-n', '--name', default='harry potter',
type=str, help='Character name')
parser.add_argument('-m', '--mood_dir', default='mood_files',
type=str, help='Path to mood file directory')
parser.add_argument('-i', '--images', action='store_true',
help='Get images')
parser.add_argument('-s', '--save_dir', default='memories')
args = parser.parse_args()
print('Extracting memories for {} from text in directory:'.format(args.name))
print(os.path.abspath(args.corpus_dir))
print()
print('Mood files are found in directory:')
print(os.path.abspath(args.mood_dir))
print()
print('Get images is set to '+str(args.images))
print()
print('Memories will be saved to:')
print(os.path.abspath(args.save_dir))
print()
if not os.path.isdir(os.path.abspath(args.save_dir)):
os.makedirs(os.path.abspath(args.save_dir))
import pensieve
corpus = pensieve.Corpus(corpus_dir=os.path.abspath(args.corpus_dir))
corpus.gather_corpus_memories(char_name=args.name.title().split(),
save=os.path.abspath(args.save_dir),
get_img=args.images)
|
|
58bb4cb3a0974e3a433cbd903b15e95efba6acca
|
apps/splash/migrations/0006_auto_20151213_0309.py
|
apps/splash/migrations/0006_auto_20151213_0309.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('splash', '0005_auto_20150422_2236'),
]
operations = [
migrations.AlterField(
model_name='splashevent',
name='created',
field=django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created'),
),
migrations.AlterField(
model_name='splashevent',
name='modified',
field=django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified'),
),
]
|
Add migrations to models from django18 upgrade
|
Add migrations to models from django18 upgrade
|
Python
|
mit
|
dotKom/onlineweb4,dotKom/onlineweb4,dotKom/onlineweb4,dotKom/onlineweb4
|
Add migrations to models from django18 upgrade
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('splash', '0005_auto_20150422_2236'),
]
operations = [
migrations.AlterField(
model_name='splashevent',
name='created',
field=django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created'),
),
migrations.AlterField(
model_name='splashevent',
name='modified',
field=django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified'),
),
]
|
<commit_before><commit_msg>Add migrations to models from django18 upgrade<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('splash', '0005_auto_20150422_2236'),
]
operations = [
migrations.AlterField(
model_name='splashevent',
name='created',
field=django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created'),
),
migrations.AlterField(
model_name='splashevent',
name='modified',
field=django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified'),
),
]
|
Add migrations to models from django18 upgrade# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('splash', '0005_auto_20150422_2236'),
]
operations = [
migrations.AlterField(
model_name='splashevent',
name='created',
field=django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created'),
),
migrations.AlterField(
model_name='splashevent',
name='modified',
field=django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified'),
),
]
|
<commit_before><commit_msg>Add migrations to models from django18 upgrade<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('splash', '0005_auto_20150422_2236'),
]
operations = [
migrations.AlterField(
model_name='splashevent',
name='created',
field=django_extensions.db.fields.CreationDateTimeField(auto_now_add=True, verbose_name='created'),
),
migrations.AlterField(
model_name='splashevent',
name='modified',
field=django_extensions.db.fields.ModificationDateTimeField(auto_now=True, verbose_name='modified'),
),
]
|
|
868b4edfbf85aaee096021580146419904476e7d
|
snapshot_archive.py
|
snapshot_archive.py
|
#!/usr/bin/env python
"""
Crawl the SOLr indexes to get all dataset documents for a particular project
and extract enough information to create a snapshot of the current state.
Querying SOLr directly should work better than via the esgf search api.
Currently this script assumes dataset versions are not tampered with and that
it is sufficient to extract these values from each dataset:
1. instance_id
2. data_node
3. index_node
4. size
6. replica (True of False)
7. timestamp
"""
import sys
from xml.etree import ElementTree as ET
import urllib2
import json
DUMP_PROPERTIES = ['instance_id', 'data_node', 'index_node', 'size',
'replica', 'timestamp']
SOLR_CORE = 'datasets'
BATCH_SIZE = 500
SHARDS_XML = '/esg/config/esgf_shards_static.xml'
ESGF_WHITELIST_NS = "http://www.esgf.org/whitelist"
def get_shards(shards_file=SHARDS_XML):
shards_xml = ET.parse(open(shards_file))
for elem in shards_xml.findall('.//{%s}value' % ESGF_WHITELIST_NS):
yield elem.text
def make_query(shard, core, project, properties, start, rows=BATCH_SIZE):
url = ('http://{shard}/{core}/select?'
'q=project:{project}&fl={properties}'
'&wt=json&start={start}&rows={rows}'.format(
core=core,
shard=shard,
project=project,
properties=','.join(properties),
rows=rows, start=start))
return url
def iter_docs(shard, project, properties, rows=BATCH_SIZE):
start = 0
while 1:
url = make_query(shard, SOLR_CORE, project, properties, start, rows)
response = urllib2.urlopen(url)
resp_json = json.load(response)
for doc in resp_json['response']['docs']:
yield doc
num_found = resp_json['response']['numFound']
if start == 0:
print '### Num found = %s' % num_found
start += rows
if start > num_found:
return
def main(argv=sys.argv):
project, outfile = argv[1:]
shards = get_shards()
# Override for debug
#shards = ['localhost:8984/solr']
with open(outfile, 'w') as fh:
for shard in shards:
print '\n## Querying shard %s' % shard
print >>fh, '#', '\t'.join(DUMP_PROPERTIES)
for i, result in enumerate(iter_docs(shard, project, DUMP_PROPERTIES)):
print >>fh, '\t'.join(str(result[x]) for x in DUMP_PROPERTIES)
if i % BATCH_SIZE == 0:
print '[%d]' % (i, ),
sys.stdout.flush()
if __name__ == '__main__':
main()
|
Add script for extracting SOLr data
|
Add script for extracting SOLr data
|
Python
|
bsd-3-clause
|
stephenpascoe/esgf-analytics
|
Add script for extracting SOLr data
|
#!/usr/bin/env python
"""
Crawl the SOLr indexes to get all dataset documents for a particular project
and extract enough information to create a snapshot of the current state.
Querying SOLr directly should work better than via the esgf search api.
Currently this script assumes dataset versions are not tampered with and that
it is sufficient to extract these values from each dataset:
1. instance_id
2. data_node
3. index_node
4. size
6. replica (True of False)
7. timestamp
"""
import sys
from xml.etree import ElementTree as ET
import urllib2
import json
DUMP_PROPERTIES = ['instance_id', 'data_node', 'index_node', 'size',
'replica', 'timestamp']
SOLR_CORE = 'datasets'
BATCH_SIZE = 500
SHARDS_XML = '/esg/config/esgf_shards_static.xml'
ESGF_WHITELIST_NS = "http://www.esgf.org/whitelist"
def get_shards(shards_file=SHARDS_XML):
shards_xml = ET.parse(open(shards_file))
for elem in shards_xml.findall('.//{%s}value' % ESGF_WHITELIST_NS):
yield elem.text
def make_query(shard, core, project, properties, start, rows=BATCH_SIZE):
url = ('http://{shard}/{core}/select?'
'q=project:{project}&fl={properties}'
'&wt=json&start={start}&rows={rows}'.format(
core=core,
shard=shard,
project=project,
properties=','.join(properties),
rows=rows, start=start))
return url
def iter_docs(shard, project, properties, rows=BATCH_SIZE):
start = 0
while 1:
url = make_query(shard, SOLR_CORE, project, properties, start, rows)
response = urllib2.urlopen(url)
resp_json = json.load(response)
for doc in resp_json['response']['docs']:
yield doc
num_found = resp_json['response']['numFound']
if start == 0:
print '### Num found = %s' % num_found
start += rows
if start > num_found:
return
def main(argv=sys.argv):
project, outfile = argv[1:]
shards = get_shards()
# Override for debug
#shards = ['localhost:8984/solr']
with open(outfile, 'w') as fh:
for shard in shards:
print '\n## Querying shard %s' % shard
print >>fh, '#', '\t'.join(DUMP_PROPERTIES)
for i, result in enumerate(iter_docs(shard, project, DUMP_PROPERTIES)):
print >>fh, '\t'.join(str(result[x]) for x in DUMP_PROPERTIES)
if i % BATCH_SIZE == 0:
print '[%d]' % (i, ),
sys.stdout.flush()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for extracting SOLr data<commit_after>
|
#!/usr/bin/env python
"""
Crawl the SOLr indexes to get all dataset documents for a particular project
and extract enough information to create a snapshot of the current state.
Querying SOLr directly should work better than via the esgf search api.
Currently this script assumes dataset versions are not tampered with and that
it is sufficient to extract these values from each dataset:
1. instance_id
2. data_node
3. index_node
4. size
6. replica (True of False)
7. timestamp
"""
import sys
from xml.etree import ElementTree as ET
import urllib2
import json
DUMP_PROPERTIES = ['instance_id', 'data_node', 'index_node', 'size',
'replica', 'timestamp']
SOLR_CORE = 'datasets'
BATCH_SIZE = 500
SHARDS_XML = '/esg/config/esgf_shards_static.xml'
ESGF_WHITELIST_NS = "http://www.esgf.org/whitelist"
def get_shards(shards_file=SHARDS_XML):
shards_xml = ET.parse(open(shards_file))
for elem in shards_xml.findall('.//{%s}value' % ESGF_WHITELIST_NS):
yield elem.text
def make_query(shard, core, project, properties, start, rows=BATCH_SIZE):
url = ('http://{shard}/{core}/select?'
'q=project:{project}&fl={properties}'
'&wt=json&start={start}&rows={rows}'.format(
core=core,
shard=shard,
project=project,
properties=','.join(properties),
rows=rows, start=start))
return url
def iter_docs(shard, project, properties, rows=BATCH_SIZE):
start = 0
while 1:
url = make_query(shard, SOLR_CORE, project, properties, start, rows)
response = urllib2.urlopen(url)
resp_json = json.load(response)
for doc in resp_json['response']['docs']:
yield doc
num_found = resp_json['response']['numFound']
if start == 0:
print '### Num found = %s' % num_found
start += rows
if start > num_found:
return
def main(argv=sys.argv):
project, outfile = argv[1:]
shards = get_shards()
# Override for debug
#shards = ['localhost:8984/solr']
with open(outfile, 'w') as fh:
for shard in shards:
print '\n## Querying shard %s' % shard
print >>fh, '#', '\t'.join(DUMP_PROPERTIES)
for i, result in enumerate(iter_docs(shard, project, DUMP_PROPERTIES)):
print >>fh, '\t'.join(str(result[x]) for x in DUMP_PROPERTIES)
if i % BATCH_SIZE == 0:
print '[%d]' % (i, ),
sys.stdout.flush()
if __name__ == '__main__':
main()
|
Add script for extracting SOLr data#!/usr/bin/env python
"""
Crawl the SOLr indexes to get all dataset documents for a particular project
and extract enough information to create a snapshot of the current state.
Querying SOLr directly should work better than via the esgf search api.
Currently this script assumes dataset versions are not tampered with and that
it is sufficient to extract these values from each dataset:
1. instance_id
2. data_node
3. index_node
4. size
6. replica (True of False)
7. timestamp
"""
import sys
from xml.etree import ElementTree as ET
import urllib2
import json
DUMP_PROPERTIES = ['instance_id', 'data_node', 'index_node', 'size',
'replica', 'timestamp']
SOLR_CORE = 'datasets'
BATCH_SIZE = 500
SHARDS_XML = '/esg/config/esgf_shards_static.xml'
ESGF_WHITELIST_NS = "http://www.esgf.org/whitelist"
def get_shards(shards_file=SHARDS_XML):
shards_xml = ET.parse(open(shards_file))
for elem in shards_xml.findall('.//{%s}value' % ESGF_WHITELIST_NS):
yield elem.text
def make_query(shard, core, project, properties, start, rows=BATCH_SIZE):
url = ('http://{shard}/{core}/select?'
'q=project:{project}&fl={properties}'
'&wt=json&start={start}&rows={rows}'.format(
core=core,
shard=shard,
project=project,
properties=','.join(properties),
rows=rows, start=start))
return url
def iter_docs(shard, project, properties, rows=BATCH_SIZE):
start = 0
while 1:
url = make_query(shard, SOLR_CORE, project, properties, start, rows)
response = urllib2.urlopen(url)
resp_json = json.load(response)
for doc in resp_json['response']['docs']:
yield doc
num_found = resp_json['response']['numFound']
if start == 0:
print '### Num found = %s' % num_found
start += rows
if start > num_found:
return
def main(argv=sys.argv):
project, outfile = argv[1:]
shards = get_shards()
# Override for debug
#shards = ['localhost:8984/solr']
with open(outfile, 'w') as fh:
for shard in shards:
print '\n## Querying shard %s' % shard
print >>fh, '#', '\t'.join(DUMP_PROPERTIES)
for i, result in enumerate(iter_docs(shard, project, DUMP_PROPERTIES)):
print >>fh, '\t'.join(str(result[x]) for x in DUMP_PROPERTIES)
if i % BATCH_SIZE == 0:
print '[%d]' % (i, ),
sys.stdout.flush()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for extracting SOLr data<commit_after>#!/usr/bin/env python
"""
Crawl the SOLr indexes to get all dataset documents for a particular project
and extract enough information to create a snapshot of the current state.
Querying SOLr directly should work better than via the esgf search api.
Currently this script assumes dataset versions are not tampered with and that
it is sufficient to extract these values from each dataset:
1. instance_id
2. data_node
3. index_node
4. size
6. replica (True of False)
7. timestamp
"""
import sys
from xml.etree import ElementTree as ET
import urllib2
import json
DUMP_PROPERTIES = ['instance_id', 'data_node', 'index_node', 'size',
'replica', 'timestamp']
SOLR_CORE = 'datasets'
BATCH_SIZE = 500
SHARDS_XML = '/esg/config/esgf_shards_static.xml'
ESGF_WHITELIST_NS = "http://www.esgf.org/whitelist"
def get_shards(shards_file=SHARDS_XML):
shards_xml = ET.parse(open(shards_file))
for elem in shards_xml.findall('.//{%s}value' % ESGF_WHITELIST_NS):
yield elem.text
def make_query(shard, core, project, properties, start, rows=BATCH_SIZE):
url = ('http://{shard}/{core}/select?'
'q=project:{project}&fl={properties}'
'&wt=json&start={start}&rows={rows}'.format(
core=core,
shard=shard,
project=project,
properties=','.join(properties),
rows=rows, start=start))
return url
def iter_docs(shard, project, properties, rows=BATCH_SIZE):
start = 0
while 1:
url = make_query(shard, SOLR_CORE, project, properties, start, rows)
response = urllib2.urlopen(url)
resp_json = json.load(response)
for doc in resp_json['response']['docs']:
yield doc
num_found = resp_json['response']['numFound']
if start == 0:
print '### Num found = %s' % num_found
start += rows
if start > num_found:
return
def main(argv=sys.argv):
project, outfile = argv[1:]
shards = get_shards()
# Override for debug
#shards = ['localhost:8984/solr']
with open(outfile, 'w') as fh:
for shard in shards:
print '\n## Querying shard %s' % shard
print >>fh, '#', '\t'.join(DUMP_PROPERTIES)
for i, result in enumerate(iter_docs(shard, project, DUMP_PROPERTIES)):
print >>fh, '\t'.join(str(result[x]) for x in DUMP_PROPERTIES)
if i % BATCH_SIZE == 0:
print '[%d]' % (i, ),
sys.stdout.flush()
if __name__ == '__main__':
main()
|
|
fbaa823a58d20e6a6755382139d66bb962b4f181
|
fix_tags.py
|
fix_tags.py
|
import codecs
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import act, sentence, word, speaker_turn, note
import argparse
import os
import re
import sys
if __name__ == '__main__':
folia = '/home/jvdzwaan/data/embem-annotatie/vinc001pefr02_01.xml'
tag = '/home/jvdzwaan/data/embem-annotatie-tag-fix/vinc001pefr02/vinc001pefr02_01__act-01.tag'
#tag = '/home/jvdzwaan/data/kaf2folia/vinc001pefr02/vinc001pefr02_01__act-01.tag'
out_dir = '/home/jvdzwaan/data/embem-annotatie-tag-fix/'
d, file_name = os.path.split(tag)
out_file = os.path.join(out_dir, file_name)
print 'Saving file to', out_file
# lees tag file in geheugen
with codecs.open(tag, 'rb', 'utf-8') as f:
tag_lines = f.readlines()
print 'Found {} tags'.format(len(tag_lines))
tags = iter(tag_lines)
curr_tag = None
# word ids that start with lg-added must be replaced
reg = r'^lg-added-\d+'
word_tag = '{http://ilk.uvt.nl/folia}w'
# Load folia document
context = etree.iterparse(folia, events=('end',), tag=word_tag)
# open file for output
with codecs.open(out_file, 'wb', 'utf-8') as f:
for event, elem in context:
if not curr_tag:
try:
curr_tag = tags.next()
parts = curr_tag.split('\t')
tag_id = parts[0]
tag_word = parts[1]
print 'looking for', tag_id
except StopIteration:
sys.exit()
if event == 'end':
word_xml = BeautifulSoup(etree.tostring(elem), 'xml')
w = word_xml.find(word)
w_id = w.attrs.get('xml:id')
w_word = w.t.string
match_part = re.sub(reg, '', tag_id)
#if re.match(reg, tag_id):
# print 'tag_id matches regex', tag_id, w_id
if w_id.endswith(match_part) and w_word == tag_word:
print 'found match:'
print w_id, w_word
print tag_id, tag_word
#print '{}\t{}'.format(w_id, '\t'.join(parts[1:]).encode('utf-8'))
f.write('{}\t{}'.format(w_id, '\t'.join(parts[1:]).encode('utf-8')).decode('utf-8'))
curr_tag = None
|
Add a script that fixes tag files
|
Add a script that fixes tag files
The kaf files used for annotation were generated using folia files
contain strange ids (starting with lg-added). Unfortunately, these
folia files were not saved. Therefore, the word ids in the kaf files
must be updated to the word ids in the new folia files. This script does
that for hardcoded input.
The next step is to turn these parameters into command line arguments
and to call this script with a bash script that runs the fix_tags script
for all tag files in a directory.
|
Python
|
apache-2.0
|
NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts
|
Add a script that fixes tag files
The kaf files used for annotation were generated using folia files
contain strange ids (starting with lg-added). Unfortunately, these
folia files were not saved. Therefore, the word ids in the kaf files
must be updated to the word ids in the new folia files. This script does
that for hardcoded input.
The next step is to turn these parameters into command line arguments
and to call this script with a bash script that runs the fix_tags script
for all tag files in a directory.
|
import codecs
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import act, sentence, word, speaker_turn, note
import argparse
import os
import re
import sys
if __name__ == '__main__':
folia = '/home/jvdzwaan/data/embem-annotatie/vinc001pefr02_01.xml'
tag = '/home/jvdzwaan/data/embem-annotatie-tag-fix/vinc001pefr02/vinc001pefr02_01__act-01.tag'
#tag = '/home/jvdzwaan/data/kaf2folia/vinc001pefr02/vinc001pefr02_01__act-01.tag'
out_dir = '/home/jvdzwaan/data/embem-annotatie-tag-fix/'
d, file_name = os.path.split(tag)
out_file = os.path.join(out_dir, file_name)
print 'Saving file to', out_file
# lees tag file in geheugen
with codecs.open(tag, 'rb', 'utf-8') as f:
tag_lines = f.readlines()
print 'Found {} tags'.format(len(tag_lines))
tags = iter(tag_lines)
curr_tag = None
# word ids that start with lg-added must be replaced
reg = r'^lg-added-\d+'
word_tag = '{http://ilk.uvt.nl/folia}w'
# Load folia document
context = etree.iterparse(folia, events=('end',), tag=word_tag)
# open file for output
with codecs.open(out_file, 'wb', 'utf-8') as f:
for event, elem in context:
if not curr_tag:
try:
curr_tag = tags.next()
parts = curr_tag.split('\t')
tag_id = parts[0]
tag_word = parts[1]
print 'looking for', tag_id
except StopIteration:
sys.exit()
if event == 'end':
word_xml = BeautifulSoup(etree.tostring(elem), 'xml')
w = word_xml.find(word)
w_id = w.attrs.get('xml:id')
w_word = w.t.string
match_part = re.sub(reg, '', tag_id)
#if re.match(reg, tag_id):
# print 'tag_id matches regex', tag_id, w_id
if w_id.endswith(match_part) and w_word == tag_word:
print 'found match:'
print w_id, w_word
print tag_id, tag_word
#print '{}\t{}'.format(w_id, '\t'.join(parts[1:]).encode('utf-8'))
f.write('{}\t{}'.format(w_id, '\t'.join(parts[1:]).encode('utf-8')).decode('utf-8'))
curr_tag = None
|
<commit_before><commit_msg>Add a script that fixes tag files
The kaf files used for annotation were generated using folia files
contain strange ids (starting with lg-added). Unfortunately, these
folia files were not saved. Therefore, the word ids in the kaf files
must be updated to the word ids in the new folia files. This script does
that for hardcoded input.
The next step is to turn these parameters into command line arguments
and to call this script with a bash script that runs the fix_tags script
for all tag files in a directory.<commit_after>
|
import codecs
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import act, sentence, word, speaker_turn, note
import argparse
import os
import re
import sys
if __name__ == '__main__':
folia = '/home/jvdzwaan/data/embem-annotatie/vinc001pefr02_01.xml'
tag = '/home/jvdzwaan/data/embem-annotatie-tag-fix/vinc001pefr02/vinc001pefr02_01__act-01.tag'
#tag = '/home/jvdzwaan/data/kaf2folia/vinc001pefr02/vinc001pefr02_01__act-01.tag'
out_dir = '/home/jvdzwaan/data/embem-annotatie-tag-fix/'
d, file_name = os.path.split(tag)
out_file = os.path.join(out_dir, file_name)
print 'Saving file to', out_file
# lees tag file in geheugen
with codecs.open(tag, 'rb', 'utf-8') as f:
tag_lines = f.readlines()
print 'Found {} tags'.format(len(tag_lines))
tags = iter(tag_lines)
curr_tag = None
# word ids that start with lg-added must be replaced
reg = r'^lg-added-\d+'
word_tag = '{http://ilk.uvt.nl/folia}w'
# Load folia document
context = etree.iterparse(folia, events=('end',), tag=word_tag)
# open file for output
with codecs.open(out_file, 'wb', 'utf-8') as f:
for event, elem in context:
if not curr_tag:
try:
curr_tag = tags.next()
parts = curr_tag.split('\t')
tag_id = parts[0]
tag_word = parts[1]
print 'looking for', tag_id
except StopIteration:
sys.exit()
if event == 'end':
word_xml = BeautifulSoup(etree.tostring(elem), 'xml')
w = word_xml.find(word)
w_id = w.attrs.get('xml:id')
w_word = w.t.string
match_part = re.sub(reg, '', tag_id)
#if re.match(reg, tag_id):
# print 'tag_id matches regex', tag_id, w_id
if w_id.endswith(match_part) and w_word == tag_word:
print 'found match:'
print w_id, w_word
print tag_id, tag_word
#print '{}\t{}'.format(w_id, '\t'.join(parts[1:]).encode('utf-8'))
f.write('{}\t{}'.format(w_id, '\t'.join(parts[1:]).encode('utf-8')).decode('utf-8'))
curr_tag = None
|
Add a script that fixes tag files
The kaf files used for annotation were generated using folia files
contain strange ids (starting with lg-added). Unfortunately, these
folia files were not saved. Therefore, the word ids in the kaf files
must be updated to the word ids in the new folia files. This script does
that for hardcoded input.
The next step is to turn these parameters into command line arguments
and to call this script with a bash script that runs the fix_tags script
for all tag files in a directory.import codecs
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import act, sentence, word, speaker_turn, note
import argparse
import os
import re
import sys
if __name__ == '__main__':
folia = '/home/jvdzwaan/data/embem-annotatie/vinc001pefr02_01.xml'
tag = '/home/jvdzwaan/data/embem-annotatie-tag-fix/vinc001pefr02/vinc001pefr02_01__act-01.tag'
#tag = '/home/jvdzwaan/data/kaf2folia/vinc001pefr02/vinc001pefr02_01__act-01.tag'
out_dir = '/home/jvdzwaan/data/embem-annotatie-tag-fix/'
d, file_name = os.path.split(tag)
out_file = os.path.join(out_dir, file_name)
print 'Saving file to', out_file
# lees tag file in geheugen
with codecs.open(tag, 'rb', 'utf-8') as f:
tag_lines = f.readlines()
print 'Found {} tags'.format(len(tag_lines))
tags = iter(tag_lines)
curr_tag = None
# word ids that start with lg-added must be replaced
reg = r'^lg-added-\d+'
word_tag = '{http://ilk.uvt.nl/folia}w'
# Load folia document
context = etree.iterparse(folia, events=('end',), tag=word_tag)
# open file for output
with codecs.open(out_file, 'wb', 'utf-8') as f:
for event, elem in context:
if not curr_tag:
try:
curr_tag = tags.next()
parts = curr_tag.split('\t')
tag_id = parts[0]
tag_word = parts[1]
print 'looking for', tag_id
except StopIteration:
sys.exit()
if event == 'end':
word_xml = BeautifulSoup(etree.tostring(elem), 'xml')
w = word_xml.find(word)
w_id = w.attrs.get('xml:id')
w_word = w.t.string
match_part = re.sub(reg, '', tag_id)
#if re.match(reg, tag_id):
# print 'tag_id matches regex', tag_id, w_id
if w_id.endswith(match_part) and w_word == tag_word:
print 'found match:'
print w_id, w_word
print tag_id, tag_word
#print '{}\t{}'.format(w_id, '\t'.join(parts[1:]).encode('utf-8'))
f.write('{}\t{}'.format(w_id, '\t'.join(parts[1:]).encode('utf-8')).decode('utf-8'))
curr_tag = None
|
<commit_before><commit_msg>Add a script that fixes tag files
The kaf files used for annotation were generated using folia files
contain strange ids (starting with lg-added). Unfortunately, these
folia files were not saved. Therefore, the word ids in the kaf files
must be updated to the word ids in the new folia files. This script does
that for hardcoded input.
The next step is to turn these parameters into command line arguments
and to call this script with a bash script that runs the fix_tags script
for all tag files in a directory.<commit_after>import codecs
from lxml import etree
from bs4 import BeautifulSoup
from emotools.bs4_helpers import act, sentence, word, speaker_turn, note
import argparse
import os
import re
import sys
if __name__ == '__main__':
folia = '/home/jvdzwaan/data/embem-annotatie/vinc001pefr02_01.xml'
tag = '/home/jvdzwaan/data/embem-annotatie-tag-fix/vinc001pefr02/vinc001pefr02_01__act-01.tag'
#tag = '/home/jvdzwaan/data/kaf2folia/vinc001pefr02/vinc001pefr02_01__act-01.tag'
out_dir = '/home/jvdzwaan/data/embem-annotatie-tag-fix/'
d, file_name = os.path.split(tag)
out_file = os.path.join(out_dir, file_name)
print 'Saving file to', out_file
# lees tag file in geheugen
with codecs.open(tag, 'rb', 'utf-8') as f:
tag_lines = f.readlines()
print 'Found {} tags'.format(len(tag_lines))
tags = iter(tag_lines)
curr_tag = None
# word ids that start with lg-added must be replaced
reg = r'^lg-added-\d+'
word_tag = '{http://ilk.uvt.nl/folia}w'
# Load folia document
context = etree.iterparse(folia, events=('end',), tag=word_tag)
# open file for output
with codecs.open(out_file, 'wb', 'utf-8') as f:
for event, elem in context:
if not curr_tag:
try:
curr_tag = tags.next()
parts = curr_tag.split('\t')
tag_id = parts[0]
tag_word = parts[1]
print 'looking for', tag_id
except StopIteration:
sys.exit()
if event == 'end':
word_xml = BeautifulSoup(etree.tostring(elem), 'xml')
w = word_xml.find(word)
w_id = w.attrs.get('xml:id')
w_word = w.t.string
match_part = re.sub(reg, '', tag_id)
#if re.match(reg, tag_id):
# print 'tag_id matches regex', tag_id, w_id
if w_id.endswith(match_part) and w_word == tag_word:
print 'found match:'
print w_id, w_word
print tag_id, tag_word
#print '{}\t{}'.format(w_id, '\t'.join(parts[1:]).encode('utf-8'))
f.write('{}\t{}'.format(w_id, '\t'.join(parts[1:]).encode('utf-8')).decode('utf-8'))
curr_tag = None
|
|
bc927060950dafdc9b1f3d401261f79b108a2bf1
|
tests/strings/string_format_d_simple.py
|
tests/strings/string_format_d_simple.py
|
a = 1.123456
b = 10
c = -30
d = 34
e = 123.456
f = 19892122
# form 0
s = "b=%d" % b
print s
# form 1
s = "b,c,d=%d+%d+%d" % (b,c,d)
print s
# form 2
#s = "b=%(b)0d and c=%(c)d and d=%(d)d" % { 'b':b,'c':c,'d':d }
print s
# width,flags
#s = "e=%020d e=%+d e=%20d e=%-20d (e=%- 20d)" % (e,e,e,e,e)
print s
|
Add a simplified test that works
|
Add a simplified test that works
|
Python
|
mit
|
qsnake/py2js,buchuki/pyjaco,qsnake/py2js,mattpap/py2js,buchuki/pyjaco,buchuki/pyjaco,chrivers/pyjaco,chrivers/pyjaco,mattpap/py2js,chrivers/pyjaco
|
Add a simplified test that works
|
a = 1.123456
b = 10
c = -30
d = 34
e = 123.456
f = 19892122
# form 0
s = "b=%d" % b
print s
# form 1
s = "b,c,d=%d+%d+%d" % (b,c,d)
print s
# form 2
#s = "b=%(b)0d and c=%(c)d and d=%(d)d" % { 'b':b,'c':c,'d':d }
print s
# width,flags
#s = "e=%020d e=%+d e=%20d e=%-20d (e=%- 20d)" % (e,e,e,e,e)
print s
|
<commit_before><commit_msg>Add a simplified test that works<commit_after>
|
a = 1.123456
b = 10
c = -30
d = 34
e = 123.456
f = 19892122
# form 0
s = "b=%d" % b
print s
# form 1
s = "b,c,d=%d+%d+%d" % (b,c,d)
print s
# form 2
#s = "b=%(b)0d and c=%(c)d and d=%(d)d" % { 'b':b,'c':c,'d':d }
print s
# width,flags
#s = "e=%020d e=%+d e=%20d e=%-20d (e=%- 20d)" % (e,e,e,e,e)
print s
|
Add a simplified test that works
a = 1.123456
b = 10
c = -30
d = 34
e = 123.456
f = 19892122
# form 0
s = "b=%d" % b
print s
# form 1
s = "b,c,d=%d+%d+%d" % (b,c,d)
print s
# form 2
#s = "b=%(b)0d and c=%(c)d and d=%(d)d" % { 'b':b,'c':c,'d':d }
print s
# width,flags
#s = "e=%020d e=%+d e=%20d e=%-20d (e=%- 20d)" % (e,e,e,e,e)
print s
|
<commit_before><commit_msg>Add a simplified test that works<commit_after>
a = 1.123456
b = 10
c = -30
d = 34
e = 123.456
f = 19892122
# form 0
s = "b=%d" % b
print s
# form 1
s = "b,c,d=%d+%d+%d" % (b,c,d)
print s
# form 2
#s = "b=%(b)0d and c=%(c)d and d=%(d)d" % { 'b':b,'c':c,'d':d }
print s
# width,flags
#s = "e=%020d e=%+d e=%20d e=%-20d (e=%- 20d)" % (e,e,e,e,e)
print s
|
|
f7aabc3a0abf0f45d55b31e01fd487d8fc7dc4b4
|
cinder/tests/functional/__init__.py
|
cinder/tests/functional/__init__.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import objects
# NOTE(e0ne): Make sure we have all of the objects loaded. We do this
# at module import time, because we may be using mock decorators in our
# tests that run at import time.
objects.register_all()
|
Fix cinder functional tests job
|
Fix cinder functional tests job
We have to register cinder objects to run functional tests.
Closes-Bug: #1674627
Change-Id: Ib385382878185c958ea813e324b0e88961662a3a
|
Python
|
apache-2.0
|
mahak/cinder,eharney/cinder,mahak/cinder,openstack/cinder,j-griffith/cinder,eharney/cinder,j-griffith/cinder,openstack/cinder,phenoxim/cinder,Datera/cinder,phenoxim/cinder,Datera/cinder
|
Fix cinder functional tests job
We have to register cinder objects to run functional tests.
Closes-Bug: #1674627
Change-Id: Ib385382878185c958ea813e324b0e88961662a3a
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import objects
# NOTE(e0ne): Make sure we have all of the objects loaded. We do this
# at module import time, because we may be using mock decorators in our
# tests that run at import time.
objects.register_all()
|
<commit_before><commit_msg>Fix cinder functional tests job
We have to register cinder objects to run functional tests.
Closes-Bug: #1674627
Change-Id: Ib385382878185c958ea813e324b0e88961662a3a<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import objects
# NOTE(e0ne): Make sure we have all of the objects loaded. We do this
# at module import time, because we may be using mock decorators in our
# tests that run at import time.
objects.register_all()
|
Fix cinder functional tests job
We have to register cinder objects to run functional tests.
Closes-Bug: #1674627
Change-Id: Ib385382878185c958ea813e324b0e88961662a3a# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import objects
# NOTE(e0ne): Make sure we have all of the objects loaded. We do this
# at module import time, because we may be using mock decorators in our
# tests that run at import time.
objects.register_all()
|
<commit_before><commit_msg>Fix cinder functional tests job
We have to register cinder objects to run functional tests.
Closes-Bug: #1674627
Change-Id: Ib385382878185c958ea813e324b0e88961662a3a<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import objects
# NOTE(e0ne): Make sure we have all of the objects loaded. We do this
# at module import time, because we may be using mock decorators in our
# tests that run at import time.
objects.register_all()
|
|
d7cdb10b11c950be7aee7e7bbddb9b164512ab8a
|
src/nodeconductor_openstack/openstack_tenant/migrations/0025_copy_certifications_from_existing_settings.py
|
src/nodeconductor_openstack/openstack_tenant/migrations/0025_copy_certifications_from_existing_settings.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.db import migrations
def copy_certifications_from_openstack_settings_to_openstack_tenant_settings(apps, schema_editor):
ServiceSettings = apps.get_model('structure', 'ServiceSettings')
Tenant = apps.get_model('openstack', 'Tenant')
tenant_content_type = ContentType.objects.get_for_model(Tenant)
openstack_tenant_settings = ServiceSettings.objects.filter(type='OpenStackTenant')
for settings in openstack_tenant_settings.iterator():
if settings.content_type_id != tenant_content_type.id:
continue
try:
# GenericRelation is not available in migration, thus tenant has to be access directly through object_id
tenant = Tenant.objects.get(pk=settings.object_id)
except Tenant.DoesNotExist:
continue
else:
admin_settings = tenant.service_project_link.service.settings
settings.certifications.clear()
settings.certifications.add(*admin_settings.certifications.all())
class Migration(migrations.Migration):
dependencies = [
('openstack_tenant', '0024_add_backup_size'),
]
operations = [
migrations.RunPython(copy_certifications_from_openstack_settings_to_openstack_tenant_settings),
]
|
Migrate certification for existing settings
|
Migrate certification for existing settings
- [WAL-622]
Copy certifications from existing openstack service settings to
existings openstack_tenant service settings.
|
Python
|
mit
|
opennode/nodeconductor-openstack
|
Migrate certification for existing settings
- [WAL-622]
Copy certifications from existing openstack service settings to
existings openstack_tenant service settings.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.db import migrations
def copy_certifications_from_openstack_settings_to_openstack_tenant_settings(apps, schema_editor):
ServiceSettings = apps.get_model('structure', 'ServiceSettings')
Tenant = apps.get_model('openstack', 'Tenant')
tenant_content_type = ContentType.objects.get_for_model(Tenant)
openstack_tenant_settings = ServiceSettings.objects.filter(type='OpenStackTenant')
for settings in openstack_tenant_settings.iterator():
if settings.content_type_id != tenant_content_type.id:
continue
try:
# GenericRelation is not available in migration, thus tenant has to be access directly through object_id
tenant = Tenant.objects.get(pk=settings.object_id)
except Tenant.DoesNotExist:
continue
else:
admin_settings = tenant.service_project_link.service.settings
settings.certifications.clear()
settings.certifications.add(*admin_settings.certifications.all())
class Migration(migrations.Migration):
dependencies = [
('openstack_tenant', '0024_add_backup_size'),
]
operations = [
migrations.RunPython(copy_certifications_from_openstack_settings_to_openstack_tenant_settings),
]
|
<commit_before><commit_msg>Migrate certification for existing settings
- [WAL-622]
Copy certifications from existing openstack service settings to
existings openstack_tenant service settings.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.db import migrations
def copy_certifications_from_openstack_settings_to_openstack_tenant_settings(apps, schema_editor):
ServiceSettings = apps.get_model('structure', 'ServiceSettings')
Tenant = apps.get_model('openstack', 'Tenant')
tenant_content_type = ContentType.objects.get_for_model(Tenant)
openstack_tenant_settings = ServiceSettings.objects.filter(type='OpenStackTenant')
for settings in openstack_tenant_settings.iterator():
if settings.content_type_id != tenant_content_type.id:
continue
try:
# GenericRelation is not available in migration, thus tenant has to be access directly through object_id
tenant = Tenant.objects.get(pk=settings.object_id)
except Tenant.DoesNotExist:
continue
else:
admin_settings = tenant.service_project_link.service.settings
settings.certifications.clear()
settings.certifications.add(*admin_settings.certifications.all())
class Migration(migrations.Migration):
dependencies = [
('openstack_tenant', '0024_add_backup_size'),
]
operations = [
migrations.RunPython(copy_certifications_from_openstack_settings_to_openstack_tenant_settings),
]
|
Migrate certification for existing settings
- [WAL-622]
Copy certifications from existing openstack service settings to
existings openstack_tenant service settings.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.db import migrations
def copy_certifications_from_openstack_settings_to_openstack_tenant_settings(apps, schema_editor):
ServiceSettings = apps.get_model('structure', 'ServiceSettings')
Tenant = apps.get_model('openstack', 'Tenant')
tenant_content_type = ContentType.objects.get_for_model(Tenant)
openstack_tenant_settings = ServiceSettings.objects.filter(type='OpenStackTenant')
for settings in openstack_tenant_settings.iterator():
if settings.content_type_id != tenant_content_type.id:
continue
try:
# GenericRelation is not available in migration, thus tenant has to be access directly through object_id
tenant = Tenant.objects.get(pk=settings.object_id)
except Tenant.DoesNotExist:
continue
else:
admin_settings = tenant.service_project_link.service.settings
settings.certifications.clear()
settings.certifications.add(*admin_settings.certifications.all())
class Migration(migrations.Migration):
dependencies = [
('openstack_tenant', '0024_add_backup_size'),
]
operations = [
migrations.RunPython(copy_certifications_from_openstack_settings_to_openstack_tenant_settings),
]
|
<commit_before><commit_msg>Migrate certification for existing settings
- [WAL-622]
Copy certifications from existing openstack service settings to
existings openstack_tenant service settings.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.db import migrations
def copy_certifications_from_openstack_settings_to_openstack_tenant_settings(apps, schema_editor):
ServiceSettings = apps.get_model('structure', 'ServiceSettings')
Tenant = apps.get_model('openstack', 'Tenant')
tenant_content_type = ContentType.objects.get_for_model(Tenant)
openstack_tenant_settings = ServiceSettings.objects.filter(type='OpenStackTenant')
for settings in openstack_tenant_settings.iterator():
if settings.content_type_id != tenant_content_type.id:
continue
try:
# GenericRelation is not available in migration, thus tenant has to be access directly through object_id
tenant = Tenant.objects.get(pk=settings.object_id)
except Tenant.DoesNotExist:
continue
else:
admin_settings = tenant.service_project_link.service.settings
settings.certifications.clear()
settings.certifications.add(*admin_settings.certifications.all())
class Migration(migrations.Migration):
dependencies = [
('openstack_tenant', '0024_add_backup_size'),
]
operations = [
migrations.RunPython(copy_certifications_from_openstack_settings_to_openstack_tenant_settings),
]
|
|
921f4e12bd04fb1c6b625833c0d7353ff52a5953
|
derrida/interventions/iiif_urls.py
|
derrida/interventions/iiif_urls.py
|
from django.conf.urls import url
from .views import ManifestList, ManifestDetail, CanvasDetail, \
CanvasAutocomplete
# override the default djiffy views to require view permissions
# in order to access to digitized content
urlpatterns = [
url(r'^$', ManifestList.as_view(), name='list'),
url(r'^(?P<id>[^/]+)/$', ManifestDetail.as_view(), name='manifest'),
url(r'^(?P<manifest_id>[^/]+)/canvases/(?P<id>[^/]+)/$',
CanvasDetail.as_view(), name='canvas'),
url(r'^canvas/autocomplete/$', CanvasAutocomplete.as_view(),
name='canvas-autocomplete'),
]
|
Update intervention language editing to use list passed in via context
|
Update intervention language editing to use list passed in via context
|
Python
|
apache-2.0
|
Princeton-CDH/derrida-django,Princeton-CDH/derrida-django,Princeton-CDH/derrida-django,Princeton-CDH/derrida-django
|
Update intervention language editing to use list passed in via context
|
from django.conf.urls import url
from .views import ManifestList, ManifestDetail, CanvasDetail, \
CanvasAutocomplete
# override the default djiffy views to require view permissions
# in order to access to digitized content
urlpatterns = [
url(r'^$', ManifestList.as_view(), name='list'),
url(r'^(?P<id>[^/]+)/$', ManifestDetail.as_view(), name='manifest'),
url(r'^(?P<manifest_id>[^/]+)/canvases/(?P<id>[^/]+)/$',
CanvasDetail.as_view(), name='canvas'),
url(r'^canvas/autocomplete/$', CanvasAutocomplete.as_view(),
name='canvas-autocomplete'),
]
|
<commit_before><commit_msg>Update intervention language editing to use list passed in via context<commit_after>
|
from django.conf.urls import url
from .views import ManifestList, ManifestDetail, CanvasDetail, \
CanvasAutocomplete
# override the default djiffy views to require view permissions
# in order to access to digitized content
urlpatterns = [
url(r'^$', ManifestList.as_view(), name='list'),
url(r'^(?P<id>[^/]+)/$', ManifestDetail.as_view(), name='manifest'),
url(r'^(?P<manifest_id>[^/]+)/canvases/(?P<id>[^/]+)/$',
CanvasDetail.as_view(), name='canvas'),
url(r'^canvas/autocomplete/$', CanvasAutocomplete.as_view(),
name='canvas-autocomplete'),
]
|
Update intervention language editing to use list passed in via contextfrom django.conf.urls import url
from .views import ManifestList, ManifestDetail, CanvasDetail, \
CanvasAutocomplete
# override the default djiffy views to require view permissions
# in order to access to digitized content
urlpatterns = [
url(r'^$', ManifestList.as_view(), name='list'),
url(r'^(?P<id>[^/]+)/$', ManifestDetail.as_view(), name='manifest'),
url(r'^(?P<manifest_id>[^/]+)/canvases/(?P<id>[^/]+)/$',
CanvasDetail.as_view(), name='canvas'),
url(r'^canvas/autocomplete/$', CanvasAutocomplete.as_view(),
name='canvas-autocomplete'),
]
|
<commit_before><commit_msg>Update intervention language editing to use list passed in via context<commit_after>from django.conf.urls import url
from .views import ManifestList, ManifestDetail, CanvasDetail, \
CanvasAutocomplete
# override the default djiffy views to require view permissions
# in order to access to digitized content
urlpatterns = [
url(r'^$', ManifestList.as_view(), name='list'),
url(r'^(?P<id>[^/]+)/$', ManifestDetail.as_view(), name='manifest'),
url(r'^(?P<manifest_id>[^/]+)/canvases/(?P<id>[^/]+)/$',
CanvasDetail.as_view(), name='canvas'),
url(r'^canvas/autocomplete/$', CanvasAutocomplete.as_view(),
name='canvas-autocomplete'),
]
|
|
da8b3841903dd6b64a0adb0e36e791cbb35a9276
|
testCompletenessOfTokensAgainstUSFM.py
|
testCompletenessOfTokensAgainstUSFM.py
|
'''
This program tests the completeness of the token list against the usfm
files from the tokens created. This will help us in testing if any words or
phrases that are not correctly represented in the token list.
Input: usfm files (under the usfm folder)
token file (in this case ROM-REV_Full.csv)
Output: usfm files after replacing the words in token file with "". This
will get stored in the 'out' folder
'''
import codecs
import os
fileList = os.listdir("usfm\\")
tokenFile = codecs.open("ROM-REV_Full.csv", mode='r', encoding="utf-8")
tokens = tokenFile.readlines()
# Sorting the tokens in the reverse order to prevent smaller words get replaced before the longer ones.
tokens = sorted(tokens, key=len, reverse=True)
for fil in fileList:
b=fil.split(".")
bk = b[0]
if b[1]=="usfm":
f = codecs.open("usfm\\" + fil, mode = "r", encoding = "utf-8")
fc = f.read()
f.close()
#Replacing the matching tokens with ""
#We can modify this step and can get a usfm file translated
#into a new language if the translation of the tokens available.
for token in tokens:
fc = fc.replace(token.strip("\n"), "")
o = codecs.open("out\\" + fil, mode="w", encoding="utf-8")
o.write(fc)
o.close()
|
Test Completeness of Tokens against the USFM files
|
Test Completeness of Tokens against the USFM files
This program tests the completeness of the token list against the usfm
files from the tokens created. This will help us in testing if any words or
phrases that are not correctly represented in the token list.
Input: usfm files (under the usfm folder)
token file (in this case ROM-REV_Full.csv)
Output: usfm files after replacing the words in token file with "". This
will get stored in the 'out' folder
|
Python
|
mit
|
beniza/learningPython
|
Test Completeness of Tokens against the USFM files
This program tests the completeness of the token list against the usfm
files from the tokens created. This will help us in testing if any words or
phrases that are not correctly represented in the token list.
Input: usfm files (under the usfm folder)
token file (in this case ROM-REV_Full.csv)
Output: usfm files after replacing the words in token file with "". This
will get stored in the 'out' folder
|
'''
This program tests the completeness of the token list against the usfm
files from the tokens created. This will help us in testing if any words or
phrases that are not correctly represented in the token list.
Input: usfm files (under the usfm folder)
token file (in this case ROM-REV_Full.csv)
Output: usfm files after replacing the words in token file with "". This
will get stored in the 'out' folder
'''
import codecs
import os
fileList = os.listdir("usfm\\")
tokenFile = codecs.open("ROM-REV_Full.csv", mode='r', encoding="utf-8")
tokens = tokenFile.readlines()
# Sorting the tokens in the reverse order to prevent smaller words get replaced before the longer ones.
tokens = sorted(tokens, key=len, reverse=True)
for fil in fileList:
b=fil.split(".")
bk = b[0]
if b[1]=="usfm":
f = codecs.open("usfm\\" + fil, mode = "r", encoding = "utf-8")
fc = f.read()
f.close()
#Replacing the matching tokens with ""
#We can modify this step and can get a usfm file translated
#into a new language if the translation of the tokens available.
for token in tokens:
fc = fc.replace(token.strip("\n"), "")
o = codecs.open("out\\" + fil, mode="w", encoding="utf-8")
o.write(fc)
o.close()
|
<commit_before><commit_msg>Test Completeness of Tokens against the USFM files
This program tests the completeness of the token list against the usfm
files from the tokens created. This will help us in testing if any words or
phrases that are not correctly represented in the token list.
Input: usfm files (under the usfm folder)
token file (in this case ROM-REV_Full.csv)
Output: usfm files after replacing the words in token file with "". This
will get stored in the 'out' folder<commit_after>
|
'''
This program tests the completeness of the token list against the usfm
files from the tokens created. This will help us in testing if any words or
phrases that are not correctly represented in the token list.
Input: usfm files (under the usfm folder)
token file (in this case ROM-REV_Full.csv)
Output: usfm files after replacing the words in token file with "". This
will get stored in the 'out' folder
'''
import codecs
import os
fileList = os.listdir("usfm\\")
tokenFile = codecs.open("ROM-REV_Full.csv", mode='r', encoding="utf-8")
tokens = tokenFile.readlines()
# Sorting the tokens in the reverse order to prevent smaller words get replaced before the longer ones.
tokens = sorted(tokens, key=len, reverse=True)
for fil in fileList:
b=fil.split(".")
bk = b[0]
if b[1]=="usfm":
f = codecs.open("usfm\\" + fil, mode = "r", encoding = "utf-8")
fc = f.read()
f.close()
#Replacing the matching tokens with ""
#We can modify this step and can get a usfm file translated
#into a new language if the translation of the tokens available.
for token in tokens:
fc = fc.replace(token.strip("\n"), "")
o = codecs.open("out\\" + fil, mode="w", encoding="utf-8")
o.write(fc)
o.close()
|
Test Completeness of Tokens against the USFM files
This program tests the completeness of the token list against the usfm
files from the tokens created. This will help us in testing if any words or
phrases that are not correctly represented in the token list.
Input: usfm files (under the usfm folder)
token file (in this case ROM-REV_Full.csv)
Output: usfm files after replacing the words in token file with "". This
will get stored in the 'out' folder'''
This program tests the completeness of the token list against the usfm
files from the tokens created. This will help us in testing if any words or
phrases that are not correctly represented in the token list.
Input: usfm files (under the usfm folder)
token file (in this case ROM-REV_Full.csv)
Output: usfm files after replacing the words in token file with "". This
will get stored in the 'out' folder
'''
import codecs
import os
fileList = os.listdir("usfm\\")
tokenFile = codecs.open("ROM-REV_Full.csv", mode='r', encoding="utf-8")
tokens = tokenFile.readlines()
# Sorting the tokens in the reverse order to prevent smaller words get replaced before the longer ones.
tokens = sorted(tokens, key=len, reverse=True)
for fil in fileList:
b=fil.split(".")
bk = b[0]
if b[1]=="usfm":
f = codecs.open("usfm\\" + fil, mode = "r", encoding = "utf-8")
fc = f.read()
f.close()
#Replacing the matching tokens with ""
#We can modify this step and can get a usfm file translated
#into a new language if the translation of the tokens available.
for token in tokens:
fc = fc.replace(token.strip("\n"), "")
o = codecs.open("out\\" + fil, mode="w", encoding="utf-8")
o.write(fc)
o.close()
|
<commit_before><commit_msg>Test Completeness of Tokens against the USFM files
This program tests the completeness of the token list against the usfm
files from the tokens created. This will help us in testing if any words or
phrases that are not correctly represented in the token list.
Input: usfm files (under the usfm folder)
token file (in this case ROM-REV_Full.csv)
Output: usfm files after replacing the words in token file with "". This
will get stored in the 'out' folder<commit_after>'''
This program tests the completeness of the token list against the usfm
files from the tokens created. This will help us in testing if any words or
phrases that are not correctly represented in the token list.
Input: usfm files (under the usfm folder)
token file (in this case ROM-REV_Full.csv)
Output: usfm files after replacing the words in token file with "". This
will get stored in the 'out' folder
'''
import codecs
import os
fileList = os.listdir("usfm\\")
tokenFile = codecs.open("ROM-REV_Full.csv", mode='r', encoding="utf-8")
tokens = tokenFile.readlines()
# Sorting the tokens in the reverse order to prevent smaller words get replaced before the longer ones.
tokens = sorted(tokens, key=len, reverse=True)
for fil in fileList:
b=fil.split(".")
bk = b[0]
if b[1]=="usfm":
f = codecs.open("usfm\\" + fil, mode = "r", encoding = "utf-8")
fc = f.read()
f.close()
#Replacing the matching tokens with ""
#We can modify this step and can get a usfm file translated
#into a new language if the translation of the tokens available.
for token in tokens:
fc = fc.replace(token.strip("\n"), "")
o = codecs.open("out\\" + fil, mode="w", encoding="utf-8")
o.write(fc)
o.close()
|
|
7dc6ad8b5f6553d3c1b503cfc55b8fd8264e0d2e
|
tests/gallery/test_raster_transform.py
|
tests/gallery/test_raster_transform.py
|
"""
Reproject a Raster using ST_Transform
=====================================
The `ST_Transform()` function (and a few others like `ST_SnapToGrid()`) can be used on
both `Geometry` and `Raster` types. In `GeoAlchemy2`, this function is only defined for
`Geometry` as it can not be defined for several types at the same time. Thus using this
function on `Raster` requires minor tweaking.
This example uses SQLAlchemy core queries.
"""
from sqlalchemy import Column
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import Table
from geoalchemy2 import Geometry
from geoalchemy2 import Raster
metadata = MetaData()
table = Table(
"raster_table",
metadata,
Column("id", Integer, primary_key=True),
Column("geom", Geometry("POLYGON", 4326)),
Column("rast", Raster(srid=4326)),
)
def test_transform():
# Define the transform query for both the geometry and the raster in a naive way
wrong_query = select([
func.ST_Transform(table.c.geom, 2154),
func.ST_Transform(table.c.rast, 2154)
])
# Check the query
assert str(wrong_query) == (
"SELECT "
"ST_AsEWKB("
"ST_Transform(raster_table.geom, :ST_Transform_2)) AS \"ST_Transform_1\", "
"ST_AsEWKB(" # <= Note that the raster is processed as a Geometry here
"ST_Transform(raster_table.rast, :ST_Transform_4)) AS \"ST_Transform_3\" \n"
"FROM raster_table"
)
# Define the transform query for both the geometry and the raster in the correct way
correct_query = select([
func.ST_Transform(table.c.geom, 2154),
func.ST_Transform(table.c.rast, 2154, type_=Raster)
])
# Check the query
assert str(correct_query) == (
"SELECT "
"ST_AsEWKB("
"ST_Transform(raster_table.geom, :ST_Transform_2)) AS \"ST_Transform_1\", "
"raster(" # <= This time the raster is correctly processed as a Raster
"ST_Transform(raster_table.rast, :ST_Transform_4)) AS \"ST_Transform_3\" \n"
"FROM raster_table"
)
|
Add a test in the gallery to show how to deal with function that can return both Geometry and Raster
|
Add a test in the gallery to show how to deal with function that can return both Geometry and Raster
|
Python
|
mit
|
geoalchemy/geoalchemy2
|
Add a test in the gallery to show how to deal with function that can return both Geometry and Raster
|
"""
Reproject a Raster using ST_Transform
=====================================
The `ST_Transform()` function (and a few others like `ST_SnapToGrid()`) can be used on
both `Geometry` and `Raster` types. In `GeoAlchemy2`, this function is only defined for
`Geometry` as it can not be defined for several types at the same time. Thus using this
function on `Raster` requires minor tweaking.
This example uses SQLAlchemy core queries.
"""
from sqlalchemy import Column
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import Table
from geoalchemy2 import Geometry
from geoalchemy2 import Raster
metadata = MetaData()
table = Table(
"raster_table",
metadata,
Column("id", Integer, primary_key=True),
Column("geom", Geometry("POLYGON", 4326)),
Column("rast", Raster(srid=4326)),
)
def test_transform():
# Define the transform query for both the geometry and the raster in a naive way
wrong_query = select([
func.ST_Transform(table.c.geom, 2154),
func.ST_Transform(table.c.rast, 2154)
])
# Check the query
assert str(wrong_query) == (
"SELECT "
"ST_AsEWKB("
"ST_Transform(raster_table.geom, :ST_Transform_2)) AS \"ST_Transform_1\", "
"ST_AsEWKB(" # <= Note that the raster is processed as a Geometry here
"ST_Transform(raster_table.rast, :ST_Transform_4)) AS \"ST_Transform_3\" \n"
"FROM raster_table"
)
# Define the transform query for both the geometry and the raster in the correct way
correct_query = select([
func.ST_Transform(table.c.geom, 2154),
func.ST_Transform(table.c.rast, 2154, type_=Raster)
])
# Check the query
assert str(correct_query) == (
"SELECT "
"ST_AsEWKB("
"ST_Transform(raster_table.geom, :ST_Transform_2)) AS \"ST_Transform_1\", "
"raster(" # <= This time the raster is correctly processed as a Raster
"ST_Transform(raster_table.rast, :ST_Transform_4)) AS \"ST_Transform_3\" \n"
"FROM raster_table"
)
|
<commit_before><commit_msg>Add a test in the gallery to show how to deal with function that can return both Geometry and Raster<commit_after>
|
"""
Reproject a Raster using ST_Transform
=====================================
The `ST_Transform()` function (and a few others like `ST_SnapToGrid()`) can be used on
both `Geometry` and `Raster` types. In `GeoAlchemy2`, this function is only defined for
`Geometry` as it can not be defined for several types at the same time. Thus using this
function on `Raster` requires minor tweaking.
This example uses SQLAlchemy core queries.
"""
from sqlalchemy import Column
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import Table
from geoalchemy2 import Geometry
from geoalchemy2 import Raster
metadata = MetaData()
table = Table(
"raster_table",
metadata,
Column("id", Integer, primary_key=True),
Column("geom", Geometry("POLYGON", 4326)),
Column("rast", Raster(srid=4326)),
)
def test_transform():
# Define the transform query for both the geometry and the raster in a naive way
wrong_query = select([
func.ST_Transform(table.c.geom, 2154),
func.ST_Transform(table.c.rast, 2154)
])
# Check the query
assert str(wrong_query) == (
"SELECT "
"ST_AsEWKB("
"ST_Transform(raster_table.geom, :ST_Transform_2)) AS \"ST_Transform_1\", "
"ST_AsEWKB(" # <= Note that the raster is processed as a Geometry here
"ST_Transform(raster_table.rast, :ST_Transform_4)) AS \"ST_Transform_3\" \n"
"FROM raster_table"
)
# Define the transform query for both the geometry and the raster in the correct way
correct_query = select([
func.ST_Transform(table.c.geom, 2154),
func.ST_Transform(table.c.rast, 2154, type_=Raster)
])
# Check the query
assert str(correct_query) == (
"SELECT "
"ST_AsEWKB("
"ST_Transform(raster_table.geom, :ST_Transform_2)) AS \"ST_Transform_1\", "
"raster(" # <= This time the raster is correctly processed as a Raster
"ST_Transform(raster_table.rast, :ST_Transform_4)) AS \"ST_Transform_3\" \n"
"FROM raster_table"
)
|
Add a test in the gallery to show how to deal with function that can return both Geometry and Raster"""
Reproject a Raster using ST_Transform
=====================================
The `ST_Transform()` function (and a few others like `ST_SnapToGrid()`) can be used on
both `Geometry` and `Raster` types. In `GeoAlchemy2`, this function is only defined for
`Geometry` as it can not be defined for several types at the same time. Thus using this
function on `Raster` requires minor tweaking.
This example uses SQLAlchemy core queries.
"""
from sqlalchemy import Column
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import Table
from geoalchemy2 import Geometry
from geoalchemy2 import Raster
metadata = MetaData()
table = Table(
"raster_table",
metadata,
Column("id", Integer, primary_key=True),
Column("geom", Geometry("POLYGON", 4326)),
Column("rast", Raster(srid=4326)),
)
def test_transform():
# Define the transform query for both the geometry and the raster in a naive way
wrong_query = select([
func.ST_Transform(table.c.geom, 2154),
func.ST_Transform(table.c.rast, 2154)
])
# Check the query
assert str(wrong_query) == (
"SELECT "
"ST_AsEWKB("
"ST_Transform(raster_table.geom, :ST_Transform_2)) AS \"ST_Transform_1\", "
"ST_AsEWKB(" # <= Note that the raster is processed as a Geometry here
"ST_Transform(raster_table.rast, :ST_Transform_4)) AS \"ST_Transform_3\" \n"
"FROM raster_table"
)
# Define the transform query for both the geometry and the raster in the correct way
correct_query = select([
func.ST_Transform(table.c.geom, 2154),
func.ST_Transform(table.c.rast, 2154, type_=Raster)
])
# Check the query
assert str(correct_query) == (
"SELECT "
"ST_AsEWKB("
"ST_Transform(raster_table.geom, :ST_Transform_2)) AS \"ST_Transform_1\", "
"raster(" # <= This time the raster is correctly processed as a Raster
"ST_Transform(raster_table.rast, :ST_Transform_4)) AS \"ST_Transform_3\" \n"
"FROM raster_table"
)
|
<commit_before><commit_msg>Add a test in the gallery to show how to deal with function that can return both Geometry and Raster<commit_after>"""
Reproject a Raster using ST_Transform
=====================================
The `ST_Transform()` function (and a few others like `ST_SnapToGrid()`) can be used on
both `Geometry` and `Raster` types. In `GeoAlchemy2`, this function is only defined for
`Geometry` as it can not be defined for several types at the same time. Thus using this
function on `Raster` requires minor tweaking.
This example uses SQLAlchemy core queries.
"""
from sqlalchemy import Column
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import Table
from geoalchemy2 import Geometry
from geoalchemy2 import Raster
metadata = MetaData()
table = Table(
"raster_table",
metadata,
Column("id", Integer, primary_key=True),
Column("geom", Geometry("POLYGON", 4326)),
Column("rast", Raster(srid=4326)),
)
def test_transform():
# Define the transform query for both the geometry and the raster in a naive way
wrong_query = select([
func.ST_Transform(table.c.geom, 2154),
func.ST_Transform(table.c.rast, 2154)
])
# Check the query
assert str(wrong_query) == (
"SELECT "
"ST_AsEWKB("
"ST_Transform(raster_table.geom, :ST_Transform_2)) AS \"ST_Transform_1\", "
"ST_AsEWKB(" # <= Note that the raster is processed as a Geometry here
"ST_Transform(raster_table.rast, :ST_Transform_4)) AS \"ST_Transform_3\" \n"
"FROM raster_table"
)
# Define the transform query for both the geometry and the raster in the correct way
correct_query = select([
func.ST_Transform(table.c.geom, 2154),
func.ST_Transform(table.c.rast, 2154, type_=Raster)
])
# Check the query
assert str(correct_query) == (
"SELECT "
"ST_AsEWKB("
"ST_Transform(raster_table.geom, :ST_Transform_2)) AS \"ST_Transform_1\", "
"raster(" # <= This time the raster is correctly processed as a Raster
"ST_Transform(raster_table.rast, :ST_Transform_4)) AS \"ST_Transform_3\" \n"
"FROM raster_table"
)
|
|
36ae1dbecaa821715cad982fd566ce9904a71a6f
|
waterbutler/server/handlers/folders.py
|
waterbutler/server/handlers/folders.py
|
import asyncio
from waterbutler.server import utils
from waterbutler.server.handlers import core
class FolderHandler(core.BaseHandler):
ACTION_MAP = {
'GET': 'create_folder',
}
@utils.coroutine
def prepare(self):
yield from super().prepare()
@utils.coroutine
def get(self):
"""Create a folder"""
self.set_status(201)
self.write((yield from self.provider.create_folder(**self.arguments))
|
Add the folder creating endpoint
|
Add the folder creating endpoint
|
Python
|
apache-2.0
|
rafaeldelucena/waterbutler,chrisseto/waterbutler,Johnetordoff/waterbutler,kwierman/waterbutler,Ghalko/waterbutler,felliott/waterbutler,icereval/waterbutler,rdhyee/waterbutler,cosenal/waterbutler,TomBaxter/waterbutler,RCOSDP/waterbutler,CenterForOpenScience/waterbutler,hmoco/waterbutler
|
Add the folder creating endpoint
|
import asyncio
from waterbutler.server import utils
from waterbutler.server.handlers import core
class FolderHandler(core.BaseHandler):
ACTION_MAP = {
'GET': 'create_folder',
}
@utils.coroutine
def prepare(self):
yield from super().prepare()
@utils.coroutine
def get(self):
"""Create a folder"""
self.set_status(201)
self.write((yield from self.provider.create_folder(**self.arguments))
|
<commit_before><commit_msg>Add the folder creating endpoint<commit_after>
|
import asyncio
from waterbutler.server import utils
from waterbutler.server.handlers import core
class FolderHandler(core.BaseHandler):
ACTION_MAP = {
'GET': 'create_folder',
}
@utils.coroutine
def prepare(self):
yield from super().prepare()
@utils.coroutine
def get(self):
"""Create a folder"""
self.set_status(201)
self.write((yield from self.provider.create_folder(**self.arguments))
|
Add the folder creating endpointimport asyncio
from waterbutler.server import utils
from waterbutler.server.handlers import core
class FolderHandler(core.BaseHandler):
ACTION_MAP = {
'GET': 'create_folder',
}
@utils.coroutine
def prepare(self):
yield from super().prepare()
@utils.coroutine
def get(self):
"""Create a folder"""
self.set_status(201)
self.write((yield from self.provider.create_folder(**self.arguments))
|
<commit_before><commit_msg>Add the folder creating endpoint<commit_after>import asyncio
from waterbutler.server import utils
from waterbutler.server.handlers import core
class FolderHandler(core.BaseHandler):
ACTION_MAP = {
'GET': 'create_folder',
}
@utils.coroutine
def prepare(self):
yield from super().prepare()
@utils.coroutine
def get(self):
"""Create a folder"""
self.set_status(201)
self.write((yield from self.provider.create_folder(**self.arguments))
|
|
aa206b740e5046444f6ea470f66ed9116c70a472
|
opps/core/models/publisher.py
|
opps/core/models/publisher.py
|
#!/usr/bin/env python
from django.db import models
from django.utils.translation import ugettext_lazy as _
from datetime import datetime
class PublisherMnager(models.Manager):
def all_published(self):
return super(PublisherMnager, self).get_query_set().filter(
date_available__lte=datetime.now(), published=True)
class Publisher(models.Model):
date_insert = models.DateTimeField(_(u"Date insert"), auto_now_add=True)
date_update = models.DateTimeField(_(u"Date update"), auto_now=True)
date_available = models.DateTimeField(_(u"Date available"),
default=datetime.now, null=True)
published = models.BooleanField(_(u"Published"), default=False)
objects = PublisherMnager()
kero = models.Manager()
class Meta:
abstract = True
def is_published(self):
return self.published and self.date_available <= datetime.now()
def save(self, *args, **kwargs):
self.date_update = datetime.now()
super(Publisher, self).save(*args, **kwargs)
|
Create Publisher models basic architecture publication
|
Create Publisher models
basic architecture publication
|
Python
|
mit
|
jeanmask/opps,opps/opps,opps/opps,jeanmask/opps,YACOWS/opps,williamroot/opps,YACOWS/opps,williamroot/opps,jeanmask/opps,YACOWS/opps,opps/opps,williamroot/opps,williamroot/opps,YACOWS/opps,jeanmask/opps,opps/opps
|
Create Publisher models
basic architecture publication
|
#!/usr/bin/env python
from django.db import models
from django.utils.translation import ugettext_lazy as _
from datetime import datetime
class PublisherMnager(models.Manager):
def all_published(self):
return super(PublisherMnager, self).get_query_set().filter(
date_available__lte=datetime.now(), published=True)
class Publisher(models.Model):
date_insert = models.DateTimeField(_(u"Date insert"), auto_now_add=True)
date_update = models.DateTimeField(_(u"Date update"), auto_now=True)
date_available = models.DateTimeField(_(u"Date available"),
default=datetime.now, null=True)
published = models.BooleanField(_(u"Published"), default=False)
objects = PublisherMnager()
kero = models.Manager()
class Meta:
abstract = True
def is_published(self):
return self.published and self.date_available <= datetime.now()
def save(self, *args, **kwargs):
self.date_update = datetime.now()
super(Publisher, self).save(*args, **kwargs)
|
<commit_before><commit_msg>Create Publisher models
basic architecture publication<commit_after>
|
#!/usr/bin/env python
from django.db import models
from django.utils.translation import ugettext_lazy as _
from datetime import datetime
class PublisherMnager(models.Manager):
def all_published(self):
return super(PublisherMnager, self).get_query_set().filter(
date_available__lte=datetime.now(), published=True)
class Publisher(models.Model):
date_insert = models.DateTimeField(_(u"Date insert"), auto_now_add=True)
date_update = models.DateTimeField(_(u"Date update"), auto_now=True)
date_available = models.DateTimeField(_(u"Date available"),
default=datetime.now, null=True)
published = models.BooleanField(_(u"Published"), default=False)
objects = PublisherMnager()
kero = models.Manager()
class Meta:
abstract = True
def is_published(self):
return self.published and self.date_available <= datetime.now()
def save(self, *args, **kwargs):
self.date_update = datetime.now()
super(Publisher, self).save(*args, **kwargs)
|
Create Publisher models
basic architecture publication#!/usr/bin/env python
from django.db import models
from django.utils.translation import ugettext_lazy as _
from datetime import datetime
class PublisherMnager(models.Manager):
def all_published(self):
return super(PublisherMnager, self).get_query_set().filter(
date_available__lte=datetime.now(), published=True)
class Publisher(models.Model):
date_insert = models.DateTimeField(_(u"Date insert"), auto_now_add=True)
date_update = models.DateTimeField(_(u"Date update"), auto_now=True)
date_available = models.DateTimeField(_(u"Date available"),
default=datetime.now, null=True)
published = models.BooleanField(_(u"Published"), default=False)
objects = PublisherMnager()
kero = models.Manager()
class Meta:
abstract = True
def is_published(self):
return self.published and self.date_available <= datetime.now()
def save(self, *args, **kwargs):
self.date_update = datetime.now()
super(Publisher, self).save(*args, **kwargs)
|
<commit_before><commit_msg>Create Publisher models
basic architecture publication<commit_after>#!/usr/bin/env python
from django.db import models
from django.utils.translation import ugettext_lazy as _
from datetime import datetime
class PublisherMnager(models.Manager):
def all_published(self):
return super(PublisherMnager, self).get_query_set().filter(
date_available__lte=datetime.now(), published=True)
class Publisher(models.Model):
date_insert = models.DateTimeField(_(u"Date insert"), auto_now_add=True)
date_update = models.DateTimeField(_(u"Date update"), auto_now=True)
date_available = models.DateTimeField(_(u"Date available"),
default=datetime.now, null=True)
published = models.BooleanField(_(u"Published"), default=False)
objects = PublisherMnager()
kero = models.Manager()
class Meta:
abstract = True
def is_published(self):
return self.published and self.date_available <= datetime.now()
def save(self, *args, **kwargs):
self.date_update = datetime.now()
super(Publisher, self).save(*args, **kwargs)
|
|
72307bb21c403c76f59ecd93aab52a3b57da4e61
|
qgis_raster_transparency.py
|
qgis_raster_transparency.py
|
from qgis.core import QgsRasterTransparency
print 'Start'
active_layer = l = qgis.utils.iface.mapCanvas().currentLayer()
raster_transpareny = active_layer.renderer().rasterTransparency()
ltr = QgsRasterTransparency.TransparentSingleValuePixel()
tr_list = []
ltr.min = 0
ltr.max = 0
ltr.percentTransparent = 50
tr_list.append(ltr)
active_layer.renderer().rasterTransparency().setTransparentSingleValuePixelList(tr_list)
active_layer.triggerRepaint()
print 'Finish'
|
Set transparency of QGIS Raster Layer
|
Set transparency of QGIS Raster Layer
Run in QGIS Python Console.
|
Python
|
mit
|
ismailsunni/scripts
|
Set transparency of QGIS Raster Layer
Run in QGIS Python Console.
|
from qgis.core import QgsRasterTransparency
print 'Start'
active_layer = l = qgis.utils.iface.mapCanvas().currentLayer()
raster_transpareny = active_layer.renderer().rasterTransparency()
ltr = QgsRasterTransparency.TransparentSingleValuePixel()
tr_list = []
ltr.min = 0
ltr.max = 0
ltr.percentTransparent = 50
tr_list.append(ltr)
active_layer.renderer().rasterTransparency().setTransparentSingleValuePixelList(tr_list)
active_layer.triggerRepaint()
print 'Finish'
|
<commit_before><commit_msg>Set transparency of QGIS Raster Layer
Run in QGIS Python Console.<commit_after>
|
from qgis.core import QgsRasterTransparency
print 'Start'
active_layer = l = qgis.utils.iface.mapCanvas().currentLayer()
raster_transpareny = active_layer.renderer().rasterTransparency()
ltr = QgsRasterTransparency.TransparentSingleValuePixel()
tr_list = []
ltr.min = 0
ltr.max = 0
ltr.percentTransparent = 50
tr_list.append(ltr)
active_layer.renderer().rasterTransparency().setTransparentSingleValuePixelList(tr_list)
active_layer.triggerRepaint()
print 'Finish'
|
Set transparency of QGIS Raster Layer
Run in QGIS Python Console.from qgis.core import QgsRasterTransparency
print 'Start'
active_layer = l = qgis.utils.iface.mapCanvas().currentLayer()
raster_transpareny = active_layer.renderer().rasterTransparency()
ltr = QgsRasterTransparency.TransparentSingleValuePixel()
tr_list = []
ltr.min = 0
ltr.max = 0
ltr.percentTransparent = 50
tr_list.append(ltr)
active_layer.renderer().rasterTransparency().setTransparentSingleValuePixelList(tr_list)
active_layer.triggerRepaint()
print 'Finish'
|
<commit_before><commit_msg>Set transparency of QGIS Raster Layer
Run in QGIS Python Console.<commit_after>from qgis.core import QgsRasterTransparency
print 'Start'
active_layer = l = qgis.utils.iface.mapCanvas().currentLayer()
raster_transpareny = active_layer.renderer().rasterTransparency()
ltr = QgsRasterTransparency.TransparentSingleValuePixel()
tr_list = []
ltr.min = 0
ltr.max = 0
ltr.percentTransparent = 50
tr_list.append(ltr)
active_layer.renderer().rasterTransparency().setTransparentSingleValuePixelList(tr_list)
active_layer.triggerRepaint()
print 'Finish'
|
|
c14944b08df56337c74994bddc22fd9b675e1417
|
py/convert-a-number-to-hexadecimal.py
|
py/convert-a-number-to-hexadecimal.py
|
class Solution(object):
def toHex(self, num):
"""
:type num: int
:rtype: str
"""
ans = []
for i in xrange(8):
ans.append('0123456789abcdef'[num & 0xf])
num >>= 4
if num == 0:
break
return ''.join(ans[::-1])
|
Add py solution for 405. Convert a Number to Hexadecimal
|
Add py solution for 405. Convert a Number to Hexadecimal
405. Convert a Number to Hexadecimal: https://leetcode.com/problems/convert-a-number-to-hexadecimal/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 405. Convert a Number to Hexadecimal
405. Convert a Number to Hexadecimal: https://leetcode.com/problems/convert-a-number-to-hexadecimal/
|
class Solution(object):
def toHex(self, num):
"""
:type num: int
:rtype: str
"""
ans = []
for i in xrange(8):
ans.append('0123456789abcdef'[num & 0xf])
num >>= 4
if num == 0:
break
return ''.join(ans[::-1])
|
<commit_before><commit_msg>Add py solution for 405. Convert a Number to Hexadecimal
405. Convert a Number to Hexadecimal: https://leetcode.com/problems/convert-a-number-to-hexadecimal/<commit_after>
|
class Solution(object):
def toHex(self, num):
"""
:type num: int
:rtype: str
"""
ans = []
for i in xrange(8):
ans.append('0123456789abcdef'[num & 0xf])
num >>= 4
if num == 0:
break
return ''.join(ans[::-1])
|
Add py solution for 405. Convert a Number to Hexadecimal
405. Convert a Number to Hexadecimal: https://leetcode.com/problems/convert-a-number-to-hexadecimal/class Solution(object):
def toHex(self, num):
"""
:type num: int
:rtype: str
"""
ans = []
for i in xrange(8):
ans.append('0123456789abcdef'[num & 0xf])
num >>= 4
if num == 0:
break
return ''.join(ans[::-1])
|
<commit_before><commit_msg>Add py solution for 405. Convert a Number to Hexadecimal
405. Convert a Number to Hexadecimal: https://leetcode.com/problems/convert-a-number-to-hexadecimal/<commit_after>class Solution(object):
def toHex(self, num):
"""
:type num: int
:rtype: str
"""
ans = []
for i in xrange(8):
ans.append('0123456789abcdef'[num & 0xf])
num >>= 4
if num == 0:
break
return ''.join(ans[::-1])
|
|
5234ef22796e03d983b5681fdd288f28f61e520d
|
scripts/create_ticket_category.py
|
scripts/create_ticket_category.py
|
#!/usr/bin/env python
"""Create a ticket category for a party.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.ticketing import category_service
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
from bootstrap.validators import validate_party
@click.command()
@click.argument('party', callback=validate_party)
@click.argument('title')
def execute(party, title):
category_service.create_category(party.id, title)
click.secho('Done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
Add script to create a ticket category
|
Add script to create a ticket category
|
Python
|
bsd-3-clause
|
homeworkprod/byceps,m-ober/byceps,homeworkprod/byceps,m-ober/byceps,m-ober/byceps,homeworkprod/byceps
|
Add script to create a ticket category
|
#!/usr/bin/env python
"""Create a ticket category for a party.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.ticketing import category_service
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
from bootstrap.validators import validate_party
@click.command()
@click.argument('party', callback=validate_party)
@click.argument('title')
def execute(party, title):
category_service.create_category(party.id, title)
click.secho('Done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
<commit_before><commit_msg>Add script to create a ticket category<commit_after>
|
#!/usr/bin/env python
"""Create a ticket category for a party.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.ticketing import category_service
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
from bootstrap.validators import validate_party
@click.command()
@click.argument('party', callback=validate_party)
@click.argument('title')
def execute(party, title):
category_service.create_category(party.id, title)
click.secho('Done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
Add script to create a ticket category#!/usr/bin/env python
"""Create a ticket category for a party.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.ticketing import category_service
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
from bootstrap.validators import validate_party
@click.command()
@click.argument('party', callback=validate_party)
@click.argument('title')
def execute(party, title):
category_service.create_category(party.id, title)
click.secho('Done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
<commit_before><commit_msg>Add script to create a ticket category<commit_after>#!/usr/bin/env python
"""Create a ticket category for a party.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.ticketing import category_service
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.util import app_context
from bootstrap.validators import validate_party
@click.command()
@click.argument('party', callback=validate_party)
@click.argument('title')
def execute(party, title):
category_service.create_category(party.id, title)
click.secho('Done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
|
b518f05e2a51013e0540ecce240540e557099a03
|
olympiad/diagram.py
|
olympiad/diagram.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Fabian M.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
# Read four columns and rows of zero's and ones and print the
# amount of adjacents for zero and one.
if __name__ == "__main__":
adjacents = [[0] * 4, [0] * 4]
rows = list(map(lambda x: list(map(lambda x: int(x), input())), range(0, 4)))
for row in rows + list(zip(*rows[::-1])):
for key, group in itertools.groupby(row):
adjacents[key][len(list(group)) - 1] += 1
print("\n".join(["{0} {1}".format(adjacents[0][index], adjacents[1][index]) for index in range(1, 4)]))
|
Add solution for problem A4
|
Add solution for problem A4
|
Python
|
apache-2.0
|
fabianm/olympiad,fabianm/olympiad,fabianm/olympiad
|
Add solution for problem A4
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Fabian M.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
# Read four columns and rows of zero's and ones and print the
# amount of adjacents for zero and one.
if __name__ == "__main__":
adjacents = [[0] * 4, [0] * 4]
rows = list(map(lambda x: list(map(lambda x: int(x), input())), range(0, 4)))
for row in rows + list(zip(*rows[::-1])):
for key, group in itertools.groupby(row):
adjacents[key][len(list(group)) - 1] += 1
print("\n".join(["{0} {1}".format(adjacents[0][index], adjacents[1][index]) for index in range(1, 4)]))
|
<commit_before><commit_msg>Add solution for problem A4<commit_after>
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Fabian M.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
# Read four columns and rows of zero's and ones and print the
# amount of adjacents for zero and one.
if __name__ == "__main__":
adjacents = [[0] * 4, [0] * 4]
rows = list(map(lambda x: list(map(lambda x: int(x), input())), range(0, 4)))
for row in rows + list(zip(*rows[::-1])):
for key, group in itertools.groupby(row):
adjacents[key][len(list(group)) - 1] += 1
print("\n".join(["{0} {1}".format(adjacents[0][index], adjacents[1][index]) for index in range(1, 4)]))
|
Add solution for problem A4#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Fabian M.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
# Read four columns and rows of zero's and ones and print the
# amount of adjacents for zero and one.
if __name__ == "__main__":
adjacents = [[0] * 4, [0] * 4]
rows = list(map(lambda x: list(map(lambda x: int(x), input())), range(0, 4)))
for row in rows + list(zip(*rows[::-1])):
for key, group in itertools.groupby(row):
adjacents[key][len(list(group)) - 1] += 1
print("\n".join(["{0} {1}".format(adjacents[0][index], adjacents[1][index]) for index in range(1, 4)]))
|
<commit_before><commit_msg>Add solution for problem A4<commit_after>#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2014 Fabian M.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
# Read four columns and rows of zero's and ones and print the
# amount of adjacents for zero and one.
if __name__ == "__main__":
adjacents = [[0] * 4, [0] * 4]
rows = list(map(lambda x: list(map(lambda x: int(x), input())), range(0, 4)))
for row in rows + list(zip(*rows[::-1])):
for key, group in itertools.groupby(row):
adjacents[key][len(list(group)) - 1] += 1
print("\n".join(["{0} {1}".format(adjacents[0][index], adjacents[1][index]) for index in range(1, 4)]))
|
|
cce9ae43b007e59f310fbf063df24a41508b3066
|
scripts/parglare_qtree.py
|
scripts/parglare_qtree.py
|
#!/usr/bin/env python3
# Produce LaTex qtree output from the parglare parse trees.
from parglare import Grammar, GLRParser, NodeNonTerm
INPUT = '1 + 2 * 3 + 4'
grammar = r'''
E: E '+' E
| E '*' E
| '(' E ')'
| number;
terminals
number: /\d+/;
'''
g = Grammar.from_string(grammar)
parser = GLRParser(g, build_tree=True)
result = parser.parse(INPUT)
def tree_str(node, depth=0):
indent = ' ' * depth
if isinstance(node, NodeNonTerm):
s = '\n{}[.{} {}\n{}]'.format(indent,
node.production.symbol,
''.join([tree_str(n, depth+1)
for n in node.children]),
indent)
else:
s = '\n{}[.{} ]'.format(indent, node.value)
return s
with open('qtree_out.txt', 'w') as f:
f.write('\begin{{tabular}}{{{}}}\n'.format('c' * len(result)))
trees = '&\n'.join(['\\Tree {}'.format(tree_str(tree)) for tree in result])
f.write(trees)
|
Add script for generating LaTex qtree descriptions
|
Add script for generating LaTex qtree descriptions
|
Python
|
mit
|
igordejanovic/parglare,igordejanovic/parglare
|
Add script for generating LaTex qtree descriptions
|
#!/usr/bin/env python3
# Produce LaTex qtree output from the parglare parse trees.
from parglare import Grammar, GLRParser, NodeNonTerm
INPUT = '1 + 2 * 3 + 4'
grammar = r'''
E: E '+' E
| E '*' E
| '(' E ')'
| number;
terminals
number: /\d+/;
'''
g = Grammar.from_string(grammar)
parser = GLRParser(g, build_tree=True)
result = parser.parse(INPUT)
def tree_str(node, depth=0):
indent = ' ' * depth
if isinstance(node, NodeNonTerm):
s = '\n{}[.{} {}\n{}]'.format(indent,
node.production.symbol,
''.join([tree_str(n, depth+1)
for n in node.children]),
indent)
else:
s = '\n{}[.{} ]'.format(indent, node.value)
return s
with open('qtree_out.txt', 'w') as f:
f.write('\begin{{tabular}}{{{}}}\n'.format('c' * len(result)))
trees = '&\n'.join(['\\Tree {}'.format(tree_str(tree)) for tree in result])
f.write(trees)
|
<commit_before><commit_msg>Add script for generating LaTex qtree descriptions<commit_after>
|
#!/usr/bin/env python3
# Produce LaTex qtree output from the parglare parse trees.
from parglare import Grammar, GLRParser, NodeNonTerm
INPUT = '1 + 2 * 3 + 4'
grammar = r'''
E: E '+' E
| E '*' E
| '(' E ')'
| number;
terminals
number: /\d+/;
'''
g = Grammar.from_string(grammar)
parser = GLRParser(g, build_tree=True)
result = parser.parse(INPUT)
def tree_str(node, depth=0):
indent = ' ' * depth
if isinstance(node, NodeNonTerm):
s = '\n{}[.{} {}\n{}]'.format(indent,
node.production.symbol,
''.join([tree_str(n, depth+1)
for n in node.children]),
indent)
else:
s = '\n{}[.{} ]'.format(indent, node.value)
return s
with open('qtree_out.txt', 'w') as f:
f.write('\begin{{tabular}}{{{}}}\n'.format('c' * len(result)))
trees = '&\n'.join(['\\Tree {}'.format(tree_str(tree)) for tree in result])
f.write(trees)
|
Add script for generating LaTex qtree descriptions#!/usr/bin/env python3
# Produce LaTex qtree output from the parglare parse trees.
from parglare import Grammar, GLRParser, NodeNonTerm
INPUT = '1 + 2 * 3 + 4'
grammar = r'''
E: E '+' E
| E '*' E
| '(' E ')'
| number;
terminals
number: /\d+/;
'''
g = Grammar.from_string(grammar)
parser = GLRParser(g, build_tree=True)
result = parser.parse(INPUT)
def tree_str(node, depth=0):
indent = ' ' * depth
if isinstance(node, NodeNonTerm):
s = '\n{}[.{} {}\n{}]'.format(indent,
node.production.symbol,
''.join([tree_str(n, depth+1)
for n in node.children]),
indent)
else:
s = '\n{}[.{} ]'.format(indent, node.value)
return s
with open('qtree_out.txt', 'w') as f:
f.write('\begin{{tabular}}{{{}}}\n'.format('c' * len(result)))
trees = '&\n'.join(['\\Tree {}'.format(tree_str(tree)) for tree in result])
f.write(trees)
|
<commit_before><commit_msg>Add script for generating LaTex qtree descriptions<commit_after>#!/usr/bin/env python3
# Produce LaTex qtree output from the parglare parse trees.
from parglare import Grammar, GLRParser, NodeNonTerm
INPUT = '1 + 2 * 3 + 4'
grammar = r'''
E: E '+' E
| E '*' E
| '(' E ')'
| number;
terminals
number: /\d+/;
'''
g = Grammar.from_string(grammar)
parser = GLRParser(g, build_tree=True)
result = parser.parse(INPUT)
def tree_str(node, depth=0):
indent = ' ' * depth
if isinstance(node, NodeNonTerm):
s = '\n{}[.{} {}\n{}]'.format(indent,
node.production.symbol,
''.join([tree_str(n, depth+1)
for n in node.children]),
indent)
else:
s = '\n{}[.{} ]'.format(indent, node.value)
return s
with open('qtree_out.txt', 'w') as f:
f.write('\begin{{tabular}}{{{}}}\n'.format('c' * len(result)))
trees = '&\n'.join(['\\Tree {}'.format(tree_str(tree)) for tree in result])
f.write(trees)
|
|
211eefa9e607e16ad04e3617c2de1156697417e2
|
tests/test_apps.py
|
tests/test_apps.py
|
from unittest import TestCase
from clean_fields.apps import CleanFieldsConfig
class CleanFieldsConfigTestCase(TestCase):
def test_name_class_attr(self):
self.assertEqual(CleanFieldsConfig.name, 'clean_fields')
|
Add unit tests for AppConfig
|
Add unit tests for AppConfig
Oh, that sweet, sweet 100% coverage. Aw yiss.
|
Python
|
mit
|
lamarmeigs/django-clean-fields
|
Add unit tests for AppConfig
Oh, that sweet, sweet 100% coverage. Aw yiss.
|
from unittest import TestCase
from clean_fields.apps import CleanFieldsConfig
class CleanFieldsConfigTestCase(TestCase):
def test_name_class_attr(self):
self.assertEqual(CleanFieldsConfig.name, 'clean_fields')
|
<commit_before><commit_msg>Add unit tests for AppConfig
Oh, that sweet, sweet 100% coverage. Aw yiss.<commit_after>
|
from unittest import TestCase
from clean_fields.apps import CleanFieldsConfig
class CleanFieldsConfigTestCase(TestCase):
def test_name_class_attr(self):
self.assertEqual(CleanFieldsConfig.name, 'clean_fields')
|
Add unit tests for AppConfig
Oh, that sweet, sweet 100% coverage. Aw yiss.from unittest import TestCase
from clean_fields.apps import CleanFieldsConfig
class CleanFieldsConfigTestCase(TestCase):
def test_name_class_attr(self):
self.assertEqual(CleanFieldsConfig.name, 'clean_fields')
|
<commit_before><commit_msg>Add unit tests for AppConfig
Oh, that sweet, sweet 100% coverage. Aw yiss.<commit_after>from unittest import TestCase
from clean_fields.apps import CleanFieldsConfig
class CleanFieldsConfigTestCase(TestCase):
def test_name_class_attr(self):
self.assertEqual(CleanFieldsConfig.name, 'clean_fields')
|
|
99f6f8ff2a1fd95ff2ea4b0205edace9b0b08df3
|
paw/tests/test_choose_algo.py
|
paw/tests/test_choose_algo.py
|
import wlgen
import paw
from .base import paw_test
class choose_algo_test(paw_test):
def test_choose_gen_wordlist(self):
self.paw = paw.Paw(algo=0)
self.assertTrue(
self.paw.gen_wordlist.__code__.co_code
== wlgen.gen_wordlist_iter.__code__.co_code
)
def test_choose_gen_wordlist_iter(self):
self.paw = paw.Paw(algo=1)
self.assertTrue(
self.paw.gen_wordlist.__code__.co_code
== wlgen.gen_wordlist.__code__.co_code
)
def test_choose_gen_words(self):
self.paw = paw.Paw(algo=2)
self.assertTrue(
self.paw.gen_wordlist.__code__.co_code
== wlgen.gen_words.__code__.co_code
)
|
Add tests for algorithm choices
|
Add tests for algorithm choices
The addition of these tests increases coverage to 100%.
|
Python
|
mit
|
tehw0lf/paw
|
Add tests for algorithm choices
The addition of these tests increases coverage to 100%.
|
import wlgen
import paw
from .base import paw_test
class choose_algo_test(paw_test):
def test_choose_gen_wordlist(self):
self.paw = paw.Paw(algo=0)
self.assertTrue(
self.paw.gen_wordlist.__code__.co_code
== wlgen.gen_wordlist_iter.__code__.co_code
)
def test_choose_gen_wordlist_iter(self):
self.paw = paw.Paw(algo=1)
self.assertTrue(
self.paw.gen_wordlist.__code__.co_code
== wlgen.gen_wordlist.__code__.co_code
)
def test_choose_gen_words(self):
self.paw = paw.Paw(algo=2)
self.assertTrue(
self.paw.gen_wordlist.__code__.co_code
== wlgen.gen_words.__code__.co_code
)
|
<commit_before><commit_msg>Add tests for algorithm choices
The addition of these tests increases coverage to 100%.<commit_after>
|
import wlgen
import paw
from .base import paw_test
class choose_algo_test(paw_test):
def test_choose_gen_wordlist(self):
self.paw = paw.Paw(algo=0)
self.assertTrue(
self.paw.gen_wordlist.__code__.co_code
== wlgen.gen_wordlist_iter.__code__.co_code
)
def test_choose_gen_wordlist_iter(self):
self.paw = paw.Paw(algo=1)
self.assertTrue(
self.paw.gen_wordlist.__code__.co_code
== wlgen.gen_wordlist.__code__.co_code
)
def test_choose_gen_words(self):
self.paw = paw.Paw(algo=2)
self.assertTrue(
self.paw.gen_wordlist.__code__.co_code
== wlgen.gen_words.__code__.co_code
)
|
Add tests for algorithm choices
The addition of these tests increases coverage to 100%.import wlgen
import paw
from .base import paw_test
class choose_algo_test(paw_test):
def test_choose_gen_wordlist(self):
self.paw = paw.Paw(algo=0)
self.assertTrue(
self.paw.gen_wordlist.__code__.co_code
== wlgen.gen_wordlist_iter.__code__.co_code
)
def test_choose_gen_wordlist_iter(self):
self.paw = paw.Paw(algo=1)
self.assertTrue(
self.paw.gen_wordlist.__code__.co_code
== wlgen.gen_wordlist.__code__.co_code
)
def test_choose_gen_words(self):
self.paw = paw.Paw(algo=2)
self.assertTrue(
self.paw.gen_wordlist.__code__.co_code
== wlgen.gen_words.__code__.co_code
)
|
<commit_before><commit_msg>Add tests for algorithm choices
The addition of these tests increases coverage to 100%.<commit_after>import wlgen
import paw
from .base import paw_test
class choose_algo_test(paw_test):
def test_choose_gen_wordlist(self):
self.paw = paw.Paw(algo=0)
self.assertTrue(
self.paw.gen_wordlist.__code__.co_code
== wlgen.gen_wordlist_iter.__code__.co_code
)
def test_choose_gen_wordlist_iter(self):
self.paw = paw.Paw(algo=1)
self.assertTrue(
self.paw.gen_wordlist.__code__.co_code
== wlgen.gen_wordlist.__code__.co_code
)
def test_choose_gen_words(self):
self.paw = paw.Paw(algo=2)
self.assertTrue(
self.paw.gen_wordlist.__code__.co_code
== wlgen.gen_words.__code__.co_code
)
|
|
786494c6da00fae1d10b6d9190b4a8418d693576
|
methods/todd-ann.py
|
methods/todd-ann.py
|
# -*- coding: utf-8 -*-
"""
ANN based on Todd's design
"""
from pybrain.structure.connections.connection import Connection
from pybrain.datasets.sequential import SequentialDataSet
from scipy import dot
class WeightedPartialIdentityConnection(Connection):
"""Connection which connects the i'th element from the first module's
output buffer to the i'th element of the second module's input buffer,
multiplying the output by a weight value except for nodes greater than a
given index."""
def __init__(self, weight, maxIndex, *args, **kwargs):
Connection.__init__(self, *args, **kwargs)
assert self.indim == self.outdim, \
"Indim (%i) does not equal outdim (%i)" % (
self.indim, self.outdim)
self.weight = weight
self.maxIndex = maxIndex
def _forwardImplementation(self, inbuf, outbuf):
outbuf += dot([i*self.weight for i in inbuf],[1]*self.maxIndex + [0]*self.maxIndex)
def _backwardImplementation(self, outerr, inerr, inbuf):
inerr += dot([i*self.weight for i in outerr],[1]*self.maxIndex + [0]*self.maxIndex)
pitchCount = 12
planCount = 0
def sampleSize():
return pitchCount + planCount + 1
def outputSize():
return pitchCount + 1
def makeNoteSample(pitch, newNote, plan):
sample = [0] * sampleSize()
for i in range(pitchCount):
if i == pitch:
sample[i] = 1
else:
sample[i] = 0
sample[pitchCount] = newNote
for i in range(planCount):
if i == plan or plan == None:
# If there is no plan, use all the available plans so far
sample[i+pitchCount+1] = 1
else:
sample[i+pitchCount+1] = 0
return sample
class Melody():
def __init__(self,plan):
self.pitches = []
self.newNotes = []
self.plan = plan
def addSamples(self, dataSet):
for s in range(len(self.pitches)):
dataSet.addSample(makeNoteSample(self.pitches[s], self.newNotes[s], self.plan))
def addNote(self, pitch, newNote):
self.pitches.append(pitch)
self.newNotes.append(newNote)
def makeMelodyDataSet(melodies):
seqDataSet = SequentialDataSet(sampleSize(), outputSize())
for m in melodies:
seqDataSet.newSequence()
m.addSamples(seqDataSet)
|
Add methods for Todd ANN
|
Add methods for Todd ANN
|
Python
|
mit
|
Melamoto/ML-Melody-Co-composition
|
Add methods for Todd ANN
|
# -*- coding: utf-8 -*-
"""
ANN based on Todd's design
"""
from pybrain.structure.connections.connection import Connection
from pybrain.datasets.sequential import SequentialDataSet
from scipy import dot
class WeightedPartialIdentityConnection(Connection):
"""Connection which connects the i'th element from the first module's
output buffer to the i'th element of the second module's input buffer,
multiplying the output by a weight value except for nodes greater than a
given index."""
def __init__(self, weight, maxIndex, *args, **kwargs):
Connection.__init__(self, *args, **kwargs)
assert self.indim == self.outdim, \
"Indim (%i) does not equal outdim (%i)" % (
self.indim, self.outdim)
self.weight = weight
self.maxIndex = maxIndex
def _forwardImplementation(self, inbuf, outbuf):
outbuf += dot([i*self.weight for i in inbuf],[1]*self.maxIndex + [0]*self.maxIndex)
def _backwardImplementation(self, outerr, inerr, inbuf):
inerr += dot([i*self.weight for i in outerr],[1]*self.maxIndex + [0]*self.maxIndex)
pitchCount = 12
planCount = 0
def sampleSize():
return pitchCount + planCount + 1
def outputSize():
return pitchCount + 1
def makeNoteSample(pitch, newNote, plan):
sample = [0] * sampleSize()
for i in range(pitchCount):
if i == pitch:
sample[i] = 1
else:
sample[i] = 0
sample[pitchCount] = newNote
for i in range(planCount):
if i == plan or plan == None:
# If there is no plan, use all the available plans so far
sample[i+pitchCount+1] = 1
else:
sample[i+pitchCount+1] = 0
return sample
class Melody():
def __init__(self,plan):
self.pitches = []
self.newNotes = []
self.plan = plan
def addSamples(self, dataSet):
for s in range(len(self.pitches)):
dataSet.addSample(makeNoteSample(self.pitches[s], self.newNotes[s], self.plan))
def addNote(self, pitch, newNote):
self.pitches.append(pitch)
self.newNotes.append(newNote)
def makeMelodyDataSet(melodies):
seqDataSet = SequentialDataSet(sampleSize(), outputSize())
for m in melodies:
seqDataSet.newSequence()
m.addSamples(seqDataSet)
|
<commit_before><commit_msg>Add methods for Todd ANN<commit_after>
|
# -*- coding: utf-8 -*-
"""
ANN based on Todd's design
"""
from pybrain.structure.connections.connection import Connection
from pybrain.datasets.sequential import SequentialDataSet
from scipy import dot
class WeightedPartialIdentityConnection(Connection):
"""Connection which connects the i'th element from the first module's
output buffer to the i'th element of the second module's input buffer,
multiplying the output by a weight value except for nodes greater than a
given index."""
def __init__(self, weight, maxIndex, *args, **kwargs):
Connection.__init__(self, *args, **kwargs)
assert self.indim == self.outdim, \
"Indim (%i) does not equal outdim (%i)" % (
self.indim, self.outdim)
self.weight = weight
self.maxIndex = maxIndex
def _forwardImplementation(self, inbuf, outbuf):
outbuf += dot([i*self.weight for i in inbuf],[1]*self.maxIndex + [0]*self.maxIndex)
def _backwardImplementation(self, outerr, inerr, inbuf):
inerr += dot([i*self.weight for i in outerr],[1]*self.maxIndex + [0]*self.maxIndex)
pitchCount = 12
planCount = 0
def sampleSize():
return pitchCount + planCount + 1
def outputSize():
return pitchCount + 1
def makeNoteSample(pitch, newNote, plan):
sample = [0] * sampleSize()
for i in range(pitchCount):
if i == pitch:
sample[i] = 1
else:
sample[i] = 0
sample[pitchCount] = newNote
for i in range(planCount):
if i == plan or plan == None:
# If there is no plan, use all the available plans so far
sample[i+pitchCount+1] = 1
else:
sample[i+pitchCount+1] = 0
return sample
class Melody():
def __init__(self,plan):
self.pitches = []
self.newNotes = []
self.plan = plan
def addSamples(self, dataSet):
for s in range(len(self.pitches)):
dataSet.addSample(makeNoteSample(self.pitches[s], self.newNotes[s], self.plan))
def addNote(self, pitch, newNote):
self.pitches.append(pitch)
self.newNotes.append(newNote)
def makeMelodyDataSet(melodies):
seqDataSet = SequentialDataSet(sampleSize(), outputSize())
for m in melodies:
seqDataSet.newSequence()
m.addSamples(seqDataSet)
|
Add methods for Todd ANN# -*- coding: utf-8 -*-
"""
ANN based on Todd's design
"""
from pybrain.structure.connections.connection import Connection
from pybrain.datasets.sequential import SequentialDataSet
from scipy import dot
class WeightedPartialIdentityConnection(Connection):
"""Connection which connects the i'th element from the first module's
output buffer to the i'th element of the second module's input buffer,
multiplying the output by a weight value except for nodes greater than a
given index."""
def __init__(self, weight, maxIndex, *args, **kwargs):
Connection.__init__(self, *args, **kwargs)
assert self.indim == self.outdim, \
"Indim (%i) does not equal outdim (%i)" % (
self.indim, self.outdim)
self.weight = weight
self.maxIndex = maxIndex
def _forwardImplementation(self, inbuf, outbuf):
outbuf += dot([i*self.weight for i in inbuf],[1]*self.maxIndex + [0]*self.maxIndex)
def _backwardImplementation(self, outerr, inerr, inbuf):
inerr += dot([i*self.weight for i in outerr],[1]*self.maxIndex + [0]*self.maxIndex)
pitchCount = 12
planCount = 0
def sampleSize():
return pitchCount + planCount + 1
def outputSize():
return pitchCount + 1
def makeNoteSample(pitch, newNote, plan):
sample = [0] * sampleSize()
for i in range(pitchCount):
if i == pitch:
sample[i] = 1
else:
sample[i] = 0
sample[pitchCount] = newNote
for i in range(planCount):
if i == plan or plan == None:
# If there is no plan, use all the available plans so far
sample[i+pitchCount+1] = 1
else:
sample[i+pitchCount+1] = 0
return sample
class Melody():
def __init__(self,plan):
self.pitches = []
self.newNotes = []
self.plan = plan
def addSamples(self, dataSet):
for s in range(len(self.pitches)):
dataSet.addSample(makeNoteSample(self.pitches[s], self.newNotes[s], self.plan))
def addNote(self, pitch, newNote):
self.pitches.append(pitch)
self.newNotes.append(newNote)
def makeMelodyDataSet(melodies):
seqDataSet = SequentialDataSet(sampleSize(), outputSize())
for m in melodies:
seqDataSet.newSequence()
m.addSamples(seqDataSet)
|
<commit_before><commit_msg>Add methods for Todd ANN<commit_after># -*- coding: utf-8 -*-
"""
ANN based on Todd's design
"""
from pybrain.structure.connections.connection import Connection
from pybrain.datasets.sequential import SequentialDataSet
from scipy import dot
class WeightedPartialIdentityConnection(Connection):
"""Connection which connects the i'th element from the first module's
output buffer to the i'th element of the second module's input buffer,
multiplying the output by a weight value except for nodes greater than a
given index."""
def __init__(self, weight, maxIndex, *args, **kwargs):
Connection.__init__(self, *args, **kwargs)
assert self.indim == self.outdim, \
"Indim (%i) does not equal outdim (%i)" % (
self.indim, self.outdim)
self.weight = weight
self.maxIndex = maxIndex
def _forwardImplementation(self, inbuf, outbuf):
outbuf += dot([i*self.weight for i in inbuf],[1]*self.maxIndex + [0]*self.maxIndex)
def _backwardImplementation(self, outerr, inerr, inbuf):
inerr += dot([i*self.weight for i in outerr],[1]*self.maxIndex + [0]*self.maxIndex)
pitchCount = 12
planCount = 0
def sampleSize():
return pitchCount + planCount + 1
def outputSize():
return pitchCount + 1
def makeNoteSample(pitch, newNote, plan):
sample = [0] * sampleSize()
for i in range(pitchCount):
if i == pitch:
sample[i] = 1
else:
sample[i] = 0
sample[pitchCount] = newNote
for i in range(planCount):
if i == plan or plan == None:
# If there is no plan, use all the available plans so far
sample[i+pitchCount+1] = 1
else:
sample[i+pitchCount+1] = 0
return sample
class Melody():
def __init__(self,plan):
self.pitches = []
self.newNotes = []
self.plan = plan
def addSamples(self, dataSet):
for s in range(len(self.pitches)):
dataSet.addSample(makeNoteSample(self.pitches[s], self.newNotes[s], self.plan))
def addNote(self, pitch, newNote):
self.pitches.append(pitch)
self.newNotes.append(newNote)
def makeMelodyDataSet(melodies):
seqDataSet = SequentialDataSet(sampleSize(), outputSize())
for m in melodies:
seqDataSet.newSequence()
m.addSamples(seqDataSet)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.