commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7f74911cf02c624afbb85e63dedde68cda2f53f6
|
modules/bilingual_generator/bilingual-preprocess.py
|
modules/bilingual_generator/bilingual-preprocess.py
|
import itertools
import codecs
d=[]
for entry in a.findall("ar"):
foo = [x.text.split(";") for x in entry if x.text]
if len(foo) == 2:
english,hindi = foo
english = [e.strip() for e in english ]
hindi = [h.strip() for h in hindi]
english = [e for e in english if not " " in e]
hindi = [h for h in hindi if not " " in h]
for blah in itertools.product(english , hindi):
d.append(blah)
o = codecs.open('biling', 'w', encoding='utf-8')
for tup in d:
o.write("%s %s\n" %(tup[0], tup[1]))
o.close()
|
Add script for preprocess of Universal bilingual dictionary database
|
Add script for preprocess of Universal bilingual dictionary database
|
Python
|
mit
|
KshitijKarthick/tvecs,KshitijKarthick/tvecs,KshitijKarthick/tvecs
|
Add script for preprocess of Universal bilingual dictionary database
|
import itertools
import codecs
d=[]
for entry in a.findall("ar"):
foo = [x.text.split(";") for x in entry if x.text]
if len(foo) == 2:
english,hindi = foo
english = [e.strip() for e in english ]
hindi = [h.strip() for h in hindi]
english = [e for e in english if not " " in e]
hindi = [h for h in hindi if not " " in h]
for blah in itertools.product(english , hindi):
d.append(blah)
o = codecs.open('biling', 'w', encoding='utf-8')
for tup in d:
o.write("%s %s\n" %(tup[0], tup[1]))
o.close()
|
<commit_before><commit_msg>Add script for preprocess of Universal bilingual dictionary database<commit_after>
|
import itertools
import codecs
d=[]
for entry in a.findall("ar"):
foo = [x.text.split(";") for x in entry if x.text]
if len(foo) == 2:
english,hindi = foo
english = [e.strip() for e in english ]
hindi = [h.strip() for h in hindi]
english = [e for e in english if not " " in e]
hindi = [h for h in hindi if not " " in h]
for blah in itertools.product(english , hindi):
d.append(blah)
o = codecs.open('biling', 'w', encoding='utf-8')
for tup in d:
o.write("%s %s\n" %(tup[0], tup[1]))
o.close()
|
Add script for preprocess of Universal bilingual dictionary databaseimport itertools
import codecs
d=[]
for entry in a.findall("ar"):
foo = [x.text.split(";") for x in entry if x.text]
if len(foo) == 2:
english,hindi = foo
english = [e.strip() for e in english ]
hindi = [h.strip() for h in hindi]
english = [e for e in english if not " " in e]
hindi = [h for h in hindi if not " " in h]
for blah in itertools.product(english , hindi):
d.append(blah)
o = codecs.open('biling', 'w', encoding='utf-8')
for tup in d:
o.write("%s %s\n" %(tup[0], tup[1]))
o.close()
|
<commit_before><commit_msg>Add script for preprocess of Universal bilingual dictionary database<commit_after>import itertools
import codecs
d=[]
for entry in a.findall("ar"):
foo = [x.text.split(";") for x in entry if x.text]
if len(foo) == 2:
english,hindi = foo
english = [e.strip() for e in english ]
hindi = [h.strip() for h in hindi]
english = [e for e in english if not " " in e]
hindi = [h for h in hindi if not " " in h]
for blah in itertools.product(english , hindi):
d.append(blah)
o = codecs.open('biling', 'w', encoding='utf-8')
for tup in d:
o.write("%s %s\n" %(tup[0], tup[1]))
o.close()
|
|
a652707d889b84762f68ac381aa4e89801279e90
|
src/legendary_potato/composition.py
|
src/legendary_potato/composition.py
|
"Functions to create kernels from already existing kernels."
import numpy as np
def normalize(kernel, *args, **kwargs):
"""Return the normalized version.
This correspond to the new kernel
.. math::
K(x_1, x_2) = \frac{kernel(x_1, x2)}\
{sqrt(kernel(x_1, x_1) kernel(x_2, x_2))}
This is equivalent to normalize the feature map:
.. math::
\Phi(x) = \frac{\phi(x)}{\|\phi(x)\|}
"""
return lambda x1, x2: (
kernel(x1, x2, *args, **kwargs)
/ np.sqrt(kernel(x1, x1, *args, **kwargs)
* kernel(x2, x2, *args, **kwargs))
)
|
Add kernel construction based on kernels
|
Add kernel construction based on kernels
|
Python
|
mit
|
manu3618/legendary-potato
|
Add kernel construction based on kernels
|
"Functions to create kernels from already existing kernels."
import numpy as np
def normalize(kernel, *args, **kwargs):
"""Return the normalized version.
This correspond to the new kernel
.. math::
K(x_1, x_2) = \frac{kernel(x_1, x2)}\
{sqrt(kernel(x_1, x_1) kernel(x_2, x_2))}
This is equivalent to normalize the feature map:
.. math::
\Phi(x) = \frac{\phi(x)}{\|\phi(x)\|}
"""
return lambda x1, x2: (
kernel(x1, x2, *args, **kwargs)
/ np.sqrt(kernel(x1, x1, *args, **kwargs)
* kernel(x2, x2, *args, **kwargs))
)
|
<commit_before><commit_msg>Add kernel construction based on kernels<commit_after>
|
"Functions to create kernels from already existing kernels."
import numpy as np
def normalize(kernel, *args, **kwargs):
"""Return the normalized version.
This correspond to the new kernel
.. math::
K(x_1, x_2) = \frac{kernel(x_1, x2)}\
{sqrt(kernel(x_1, x_1) kernel(x_2, x_2))}
This is equivalent to normalize the feature map:
.. math::
\Phi(x) = \frac{\phi(x)}{\|\phi(x)\|}
"""
return lambda x1, x2: (
kernel(x1, x2, *args, **kwargs)
/ np.sqrt(kernel(x1, x1, *args, **kwargs)
* kernel(x2, x2, *args, **kwargs))
)
|
Add kernel construction based on kernels"Functions to create kernels from already existing kernels."
import numpy as np
def normalize(kernel, *args, **kwargs):
"""Return the normalized version.
This correspond to the new kernel
.. math::
K(x_1, x_2) = \frac{kernel(x_1, x2)}\
{sqrt(kernel(x_1, x_1) kernel(x_2, x_2))}
This is equivalent to normalize the feature map:
.. math::
\Phi(x) = \frac{\phi(x)}{\|\phi(x)\|}
"""
return lambda x1, x2: (
kernel(x1, x2, *args, **kwargs)
/ np.sqrt(kernel(x1, x1, *args, **kwargs)
* kernel(x2, x2, *args, **kwargs))
)
|
<commit_before><commit_msg>Add kernel construction based on kernels<commit_after>"Functions to create kernels from already existing kernels."
import numpy as np
def normalize(kernel, *args, **kwargs):
"""Return the normalized version.
This correspond to the new kernel
.. math::
K(x_1, x_2) = \frac{kernel(x_1, x2)}\
{sqrt(kernel(x_1, x_1) kernel(x_2, x_2))}
This is equivalent to normalize the feature map:
.. math::
\Phi(x) = \frac{\phi(x)}{\|\phi(x)\|}
"""
return lambda x1, x2: (
kernel(x1, x2, *args, **kwargs)
/ np.sqrt(kernel(x1, x1, *args, **kwargs)
* kernel(x2, x2, *args, **kwargs))
)
|
|
59d8a8c1218a9641ae559b0cd710c05d17e26409
|
graph_over_time.py
|
graph_over_time.py
|
#!/usr/bin/python2
import json
import subprocess
import time
import calendar
import dateutil.parser
import datetime
runs = json.load(open("list.json","r"))
num_master = 0
psnr = []
psnrhvs = []
ssim = []
fastssim = []
def unix_time(dt):
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.total_seconds()
for run in runs:
if u'master' in run['info']:
filename = 'runs/'+run['run_id']+'/'+run['info']['task']+'/total.out'
try:
distortion = subprocess.check_output(['/home/thomas/daala/tools/matlab/distortion.m',filename,'0.1'])
date_obj = dateutil.parser.parse(run['date']).replace(tzinfo=None)
date_js = unix_time(date_obj) * 1000
psnr.append([date_js,distortion.split('\n')[0]])
psnrhvs.append([date_js,distortion.split('\n')[1]])
ssim.append([date_js,distortion.split('\n')[2]])
fastssim.append([date_js,distortion.split('\n')[3]])
except subprocess.CalledProcessError:
continue
output = [psnr, psnrhvs, ssim, fastssim]
json.dump(output,open('time_series.json','w'))
|
Add tool to generate timeline data
|
Add tool to generate timeline data
|
Python
|
mit
|
mdinger/awcy,tdaede/awcy,tdaede/awcy,tdaede/awcy,tdaede/awcy,mdinger/awcy,mdinger/awcy,tdaede/awcy,mdinger/awcy,mdinger/awcy,tdaede/awcy
|
Add tool to generate timeline data
|
#!/usr/bin/python2
import json
import subprocess
import time
import calendar
import dateutil.parser
import datetime
runs = json.load(open("list.json","r"))
num_master = 0
psnr = []
psnrhvs = []
ssim = []
fastssim = []
def unix_time(dt):
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.total_seconds()
for run in runs:
if u'master' in run['info']:
filename = 'runs/'+run['run_id']+'/'+run['info']['task']+'/total.out'
try:
distortion = subprocess.check_output(['/home/thomas/daala/tools/matlab/distortion.m',filename,'0.1'])
date_obj = dateutil.parser.parse(run['date']).replace(tzinfo=None)
date_js = unix_time(date_obj) * 1000
psnr.append([date_js,distortion.split('\n')[0]])
psnrhvs.append([date_js,distortion.split('\n')[1]])
ssim.append([date_js,distortion.split('\n')[2]])
fastssim.append([date_js,distortion.split('\n')[3]])
except subprocess.CalledProcessError:
continue
output = [psnr, psnrhvs, ssim, fastssim]
json.dump(output,open('time_series.json','w'))
|
<commit_before><commit_msg>Add tool to generate timeline data<commit_after>
|
#!/usr/bin/python2
import json
import subprocess
import time
import calendar
import dateutil.parser
import datetime
runs = json.load(open("list.json","r"))
num_master = 0
psnr = []
psnrhvs = []
ssim = []
fastssim = []
def unix_time(dt):
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.total_seconds()
for run in runs:
if u'master' in run['info']:
filename = 'runs/'+run['run_id']+'/'+run['info']['task']+'/total.out'
try:
distortion = subprocess.check_output(['/home/thomas/daala/tools/matlab/distortion.m',filename,'0.1'])
date_obj = dateutil.parser.parse(run['date']).replace(tzinfo=None)
date_js = unix_time(date_obj) * 1000
psnr.append([date_js,distortion.split('\n')[0]])
psnrhvs.append([date_js,distortion.split('\n')[1]])
ssim.append([date_js,distortion.split('\n')[2]])
fastssim.append([date_js,distortion.split('\n')[3]])
except subprocess.CalledProcessError:
continue
output = [psnr, psnrhvs, ssim, fastssim]
json.dump(output,open('time_series.json','w'))
|
Add tool to generate timeline data#!/usr/bin/python2
import json
import subprocess
import time
import calendar
import dateutil.parser
import datetime
runs = json.load(open("list.json","r"))
num_master = 0
psnr = []
psnrhvs = []
ssim = []
fastssim = []
def unix_time(dt):
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.total_seconds()
for run in runs:
if u'master' in run['info']:
filename = 'runs/'+run['run_id']+'/'+run['info']['task']+'/total.out'
try:
distortion = subprocess.check_output(['/home/thomas/daala/tools/matlab/distortion.m',filename,'0.1'])
date_obj = dateutil.parser.parse(run['date']).replace(tzinfo=None)
date_js = unix_time(date_obj) * 1000
psnr.append([date_js,distortion.split('\n')[0]])
psnrhvs.append([date_js,distortion.split('\n')[1]])
ssim.append([date_js,distortion.split('\n')[2]])
fastssim.append([date_js,distortion.split('\n')[3]])
except subprocess.CalledProcessError:
continue
output = [psnr, psnrhvs, ssim, fastssim]
json.dump(output,open('time_series.json','w'))
|
<commit_before><commit_msg>Add tool to generate timeline data<commit_after>#!/usr/bin/python2
import json
import subprocess
import time
import calendar
import dateutil.parser
import datetime
runs = json.load(open("list.json","r"))
num_master = 0
psnr = []
psnrhvs = []
ssim = []
fastssim = []
def unix_time(dt):
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.total_seconds()
for run in runs:
if u'master' in run['info']:
filename = 'runs/'+run['run_id']+'/'+run['info']['task']+'/total.out'
try:
distortion = subprocess.check_output(['/home/thomas/daala/tools/matlab/distortion.m',filename,'0.1'])
date_obj = dateutil.parser.parse(run['date']).replace(tzinfo=None)
date_js = unix_time(date_obj) * 1000
psnr.append([date_js,distortion.split('\n')[0]])
psnrhvs.append([date_js,distortion.split('\n')[1]])
ssim.append([date_js,distortion.split('\n')[2]])
fastssim.append([date_js,distortion.split('\n')[3]])
except subprocess.CalledProcessError:
continue
output = [psnr, psnrhvs, ssim, fastssim]
json.dump(output,open('time_series.json','w'))
|
|
2981f89a1ffffbd3dae78543fd1e8e257e6a8a3f
|
copy_ftp.py
|
copy_ftp.py
|
#C:\Users\smst\AppData\Local\Radio Server Player 2>
#for /d %f in (profiles\*) do \code\rsab\nowplaying\copy_ftp.py setup.ini "%f\setup.ini"
start_time = __import__('time').time()
def selective_merge(target_ini, merge_from_ini, merge_items):
try:
set
except NameError:
from sets import Set as set
import shutil
import tempfile
import time
fm = open(merge_from_ini, 'r')
lines_m = fm.readlines()
fm.close()
merge_values = {}
merge_items = set(merge_items)
seen_sections = set()
seen_in_this_section = set()
section = None
for line in lines_m:
line = line.strip()
if not line:
continue
if line.startswith('['):
s = line[1:].split(']')[0]
if s not in seen_sections:
section = s
seen_sections.add(section)
seen_in_this_section.clear()
continue
if '=' not in line:
continue
k, v = line.split('=', 1)
k = k.strip()
v = v.strip()
if k not in seen_in_this_section:
seen_in_this_section.add(k)
if (section, k) in merge_items and (section, k) not in merge_values:
merge_values[(section, k)] = v
ft = open(target_ini, 'r')
lines_t = ft.readlines()
ft.close()
out_lines = []
section = None
for line in lines_t:
write_line = line
line = line.strip()
if line.startswith('['):
section = line[1:].split(']')[0]
elif '=' in line:
k, v = line.split('=', 1)
k = k.strip()
v = v.strip()
if (section, k) in merge_values:
write_line = '%s=%s\n' % (k, merge_values[(section, k)])
out_lines.append(write_line)
backup_ext = time.strftime('.%Y%m%d-%H%M%S', time.localtime(start_time))
shutil.move(target_ini, target_ini + backup_ext)
fw = open(target_ini, 'w')
for line in out_lines:
fw.write(line)
fw.close()
if __name__ == '__main__':
import sys
if len(sys.argv) < 3:
print 'Usage: %s SOURCE_INI TARGET_INI' % (sys.argv[0],)
sys.exit(0)
merge_from_ini = sys.argv[1]
target_ini = sys.argv[2]
print '%s -> %s' % (merge_from_ini, target_ini)
selective_merge(
target_ini,
merge_from_ini,
[
('player', 'ftpxtra'),
('ftp', 'enable'),
('ftp', 'server'),
('ftp', 'user'),
('ftp', 'pass'),
('ftp', 'path'),
],
)
|
Copy the RSP2 FTP settings from one INI file into another
|
Copy the RSP2 FTP settings from one INI file into another
|
Python
|
mit
|
radio-st-austell-bay/helpers
|
Copy the RSP2 FTP settings from one INI file into another
|
#C:\Users\smst\AppData\Local\Radio Server Player 2>
#for /d %f in (profiles\*) do \code\rsab\nowplaying\copy_ftp.py setup.ini "%f\setup.ini"
start_time = __import__('time').time()
def selective_merge(target_ini, merge_from_ini, merge_items):
try:
set
except NameError:
from sets import Set as set
import shutil
import tempfile
import time
fm = open(merge_from_ini, 'r')
lines_m = fm.readlines()
fm.close()
merge_values = {}
merge_items = set(merge_items)
seen_sections = set()
seen_in_this_section = set()
section = None
for line in lines_m:
line = line.strip()
if not line:
continue
if line.startswith('['):
s = line[1:].split(']')[0]
if s not in seen_sections:
section = s
seen_sections.add(section)
seen_in_this_section.clear()
continue
if '=' not in line:
continue
k, v = line.split('=', 1)
k = k.strip()
v = v.strip()
if k not in seen_in_this_section:
seen_in_this_section.add(k)
if (section, k) in merge_items and (section, k) not in merge_values:
merge_values[(section, k)] = v
ft = open(target_ini, 'r')
lines_t = ft.readlines()
ft.close()
out_lines = []
section = None
for line in lines_t:
write_line = line
line = line.strip()
if line.startswith('['):
section = line[1:].split(']')[0]
elif '=' in line:
k, v = line.split('=', 1)
k = k.strip()
v = v.strip()
if (section, k) in merge_values:
write_line = '%s=%s\n' % (k, merge_values[(section, k)])
out_lines.append(write_line)
backup_ext = time.strftime('.%Y%m%d-%H%M%S', time.localtime(start_time))
shutil.move(target_ini, target_ini + backup_ext)
fw = open(target_ini, 'w')
for line in out_lines:
fw.write(line)
fw.close()
if __name__ == '__main__':
import sys
if len(sys.argv) < 3:
print 'Usage: %s SOURCE_INI TARGET_INI' % (sys.argv[0],)
sys.exit(0)
merge_from_ini = sys.argv[1]
target_ini = sys.argv[2]
print '%s -> %s' % (merge_from_ini, target_ini)
selective_merge(
target_ini,
merge_from_ini,
[
('player', 'ftpxtra'),
('ftp', 'enable'),
('ftp', 'server'),
('ftp', 'user'),
('ftp', 'pass'),
('ftp', 'path'),
],
)
|
<commit_before><commit_msg>Copy the RSP2 FTP settings from one INI file into another<commit_after>
|
#C:\Users\smst\AppData\Local\Radio Server Player 2>
#for /d %f in (profiles\*) do \code\rsab\nowplaying\copy_ftp.py setup.ini "%f\setup.ini"
start_time = __import__('time').time()
def selective_merge(target_ini, merge_from_ini, merge_items):
try:
set
except NameError:
from sets import Set as set
import shutil
import tempfile
import time
fm = open(merge_from_ini, 'r')
lines_m = fm.readlines()
fm.close()
merge_values = {}
merge_items = set(merge_items)
seen_sections = set()
seen_in_this_section = set()
section = None
for line in lines_m:
line = line.strip()
if not line:
continue
if line.startswith('['):
s = line[1:].split(']')[0]
if s not in seen_sections:
section = s
seen_sections.add(section)
seen_in_this_section.clear()
continue
if '=' not in line:
continue
k, v = line.split('=', 1)
k = k.strip()
v = v.strip()
if k not in seen_in_this_section:
seen_in_this_section.add(k)
if (section, k) in merge_items and (section, k) not in merge_values:
merge_values[(section, k)] = v
ft = open(target_ini, 'r')
lines_t = ft.readlines()
ft.close()
out_lines = []
section = None
for line in lines_t:
write_line = line
line = line.strip()
if line.startswith('['):
section = line[1:].split(']')[0]
elif '=' in line:
k, v = line.split('=', 1)
k = k.strip()
v = v.strip()
if (section, k) in merge_values:
write_line = '%s=%s\n' % (k, merge_values[(section, k)])
out_lines.append(write_line)
backup_ext = time.strftime('.%Y%m%d-%H%M%S', time.localtime(start_time))
shutil.move(target_ini, target_ini + backup_ext)
fw = open(target_ini, 'w')
for line in out_lines:
fw.write(line)
fw.close()
if __name__ == '__main__':
import sys
if len(sys.argv) < 3:
print 'Usage: %s SOURCE_INI TARGET_INI' % (sys.argv[0],)
sys.exit(0)
merge_from_ini = sys.argv[1]
target_ini = sys.argv[2]
print '%s -> %s' % (merge_from_ini, target_ini)
selective_merge(
target_ini,
merge_from_ini,
[
('player', 'ftpxtra'),
('ftp', 'enable'),
('ftp', 'server'),
('ftp', 'user'),
('ftp', 'pass'),
('ftp', 'path'),
],
)
|
Copy the RSP2 FTP settings from one INI file into another#C:\Users\smst\AppData\Local\Radio Server Player 2>
#for /d %f in (profiles\*) do \code\rsab\nowplaying\copy_ftp.py setup.ini "%f\setup.ini"
start_time = __import__('time').time()
def selective_merge(target_ini, merge_from_ini, merge_items):
try:
set
except NameError:
from sets import Set as set
import shutil
import tempfile
import time
fm = open(merge_from_ini, 'r')
lines_m = fm.readlines()
fm.close()
merge_values = {}
merge_items = set(merge_items)
seen_sections = set()
seen_in_this_section = set()
section = None
for line in lines_m:
line = line.strip()
if not line:
continue
if line.startswith('['):
s = line[1:].split(']')[0]
if s not in seen_sections:
section = s
seen_sections.add(section)
seen_in_this_section.clear()
continue
if '=' not in line:
continue
k, v = line.split('=', 1)
k = k.strip()
v = v.strip()
if k not in seen_in_this_section:
seen_in_this_section.add(k)
if (section, k) in merge_items and (section, k) not in merge_values:
merge_values[(section, k)] = v
ft = open(target_ini, 'r')
lines_t = ft.readlines()
ft.close()
out_lines = []
section = None
for line in lines_t:
write_line = line
line = line.strip()
if line.startswith('['):
section = line[1:].split(']')[0]
elif '=' in line:
k, v = line.split('=', 1)
k = k.strip()
v = v.strip()
if (section, k) in merge_values:
write_line = '%s=%s\n' % (k, merge_values[(section, k)])
out_lines.append(write_line)
backup_ext = time.strftime('.%Y%m%d-%H%M%S', time.localtime(start_time))
shutil.move(target_ini, target_ini + backup_ext)
fw = open(target_ini, 'w')
for line in out_lines:
fw.write(line)
fw.close()
if __name__ == '__main__':
import sys
if len(sys.argv) < 3:
print 'Usage: %s SOURCE_INI TARGET_INI' % (sys.argv[0],)
sys.exit(0)
merge_from_ini = sys.argv[1]
target_ini = sys.argv[2]
print '%s -> %s' % (merge_from_ini, target_ini)
selective_merge(
target_ini,
merge_from_ini,
[
('player', 'ftpxtra'),
('ftp', 'enable'),
('ftp', 'server'),
('ftp', 'user'),
('ftp', 'pass'),
('ftp', 'path'),
],
)
|
<commit_before><commit_msg>Copy the RSP2 FTP settings from one INI file into another<commit_after>#C:\Users\smst\AppData\Local\Radio Server Player 2>
#for /d %f in (profiles\*) do \code\rsab\nowplaying\copy_ftp.py setup.ini "%f\setup.ini"
start_time = __import__('time').time()
def selective_merge(target_ini, merge_from_ini, merge_items):
try:
set
except NameError:
from sets import Set as set
import shutil
import tempfile
import time
fm = open(merge_from_ini, 'r')
lines_m = fm.readlines()
fm.close()
merge_values = {}
merge_items = set(merge_items)
seen_sections = set()
seen_in_this_section = set()
section = None
for line in lines_m:
line = line.strip()
if not line:
continue
if line.startswith('['):
s = line[1:].split(']')[0]
if s not in seen_sections:
section = s
seen_sections.add(section)
seen_in_this_section.clear()
continue
if '=' not in line:
continue
k, v = line.split('=', 1)
k = k.strip()
v = v.strip()
if k not in seen_in_this_section:
seen_in_this_section.add(k)
if (section, k) in merge_items and (section, k) not in merge_values:
merge_values[(section, k)] = v
ft = open(target_ini, 'r')
lines_t = ft.readlines()
ft.close()
out_lines = []
section = None
for line in lines_t:
write_line = line
line = line.strip()
if line.startswith('['):
section = line[1:].split(']')[0]
elif '=' in line:
k, v = line.split('=', 1)
k = k.strip()
v = v.strip()
if (section, k) in merge_values:
write_line = '%s=%s\n' % (k, merge_values[(section, k)])
out_lines.append(write_line)
backup_ext = time.strftime('.%Y%m%d-%H%M%S', time.localtime(start_time))
shutil.move(target_ini, target_ini + backup_ext)
fw = open(target_ini, 'w')
for line in out_lines:
fw.write(line)
fw.close()
if __name__ == '__main__':
import sys
if len(sys.argv) < 3:
print 'Usage: %s SOURCE_INI TARGET_INI' % (sys.argv[0],)
sys.exit(0)
merge_from_ini = sys.argv[1]
target_ini = sys.argv[2]
print '%s -> %s' % (merge_from_ini, target_ini)
selective_merge(
target_ini,
merge_from_ini,
[
('player', 'ftpxtra'),
('ftp', 'enable'),
('ftp', 'server'),
('ftp', 'user'),
('ftp', 'pass'),
('ftp', 'path'),
],
)
|
|
1c04af7fa83c8bbe841ecb0ab449b31780b7571f
|
de-boor/b-splines-with-de-Boor-recurrence.py
|
de-boor/b-splines-with-de-Boor-recurrence.py
|
import numpy as np
def extend_knots_vector(order, a, b, internal_knots, multiplicities=None, closed=False):
"""
This function produces an extended knots vector for BSpline functions given a simple one.
Parameters
==========
a:
Examples
========
Simple extension for an open curve
>>> extend_knots_vector(4, 3, 6, [4, 5])
array([ 3., 3., 3., 3., 4., 5., 6., 6., 6., 6.])
The same as before, only specify arguments' names for better readability
>>> extend_knots_vector(order=4, a=3, b=6, internal_knots=[4, 5])
array([ 3., 3., 3., 3., 4., 5., 6., 6., 6., 6.])
"""
if multiplicities is None:
multiplicities = np.repeat(1, len(internal_knots))
def open_case():
extended_vector = np.empty(order + sum(multiplicities) + order)
extended_vector[0:order] = np.repeat(a, order)
index = order
for (internal_knot, multiplicity) in zip(internal_knots, multiplicities):
extended_vector[index:index+multiplicity] = np.repeat(internal_knot, multiplicity)
index += multiplicity
extended_vector[-order:] = np.repeat(b, order)
return extended_vector
def closed_case(): ...
return closed_case() if closed else open_case()
|
Add a new Python implementation about de Boor recurrence to draw BSplines curves, this commit contains the initial work on a module which contains knot partition extension utilities.
|
Add a new Python implementation about de Boor recurrence to draw
BSplines curves, this commit contains the initial work on a module
which contains knot partition extension utilities.
|
Python
|
mit
|
massimo-nocentini/cagd,massimo-nocentini/cagd,massimo-nocentini/cagd
|
Add a new Python implementation about de Boor recurrence to draw
BSplines curves, this commit contains the initial work on a module
which contains knot partition extension utilities.
|
import numpy as np
def extend_knots_vector(order, a, b, internal_knots, multiplicities=None, closed=False):
"""
This function produces an extended knots vector for BSpline functions given a simple one.
Parameters
==========
a:
Examples
========
Simple extension for an open curve
>>> extend_knots_vector(4, 3, 6, [4, 5])
array([ 3., 3., 3., 3., 4., 5., 6., 6., 6., 6.])
The same as before, only specify arguments' names for better readability
>>> extend_knots_vector(order=4, a=3, b=6, internal_knots=[4, 5])
array([ 3., 3., 3., 3., 4., 5., 6., 6., 6., 6.])
"""
if multiplicities is None:
multiplicities = np.repeat(1, len(internal_knots))
def open_case():
extended_vector = np.empty(order + sum(multiplicities) + order)
extended_vector[0:order] = np.repeat(a, order)
index = order
for (internal_knot, multiplicity) in zip(internal_knots, multiplicities):
extended_vector[index:index+multiplicity] = np.repeat(internal_knot, multiplicity)
index += multiplicity
extended_vector[-order:] = np.repeat(b, order)
return extended_vector
def closed_case(): ...
return closed_case() if closed else open_case()
|
<commit_before><commit_msg>Add a new Python implementation about de Boor recurrence to draw
BSplines curves, this commit contains the initial work on a module
which contains knot partition extension utilities.<commit_after>
|
import numpy as np
def extend_knots_vector(order, a, b, internal_knots, multiplicities=None, closed=False):
"""
This function produces an extended knots vector for BSpline functions given a simple one.
Parameters
==========
a:
Examples
========
Simple extension for an open curve
>>> extend_knots_vector(4, 3, 6, [4, 5])
array([ 3., 3., 3., 3., 4., 5., 6., 6., 6., 6.])
The same as before, only specify arguments' names for better readability
>>> extend_knots_vector(order=4, a=3, b=6, internal_knots=[4, 5])
array([ 3., 3., 3., 3., 4., 5., 6., 6., 6., 6.])
"""
if multiplicities is None:
multiplicities = np.repeat(1, len(internal_knots))
def open_case():
extended_vector = np.empty(order + sum(multiplicities) + order)
extended_vector[0:order] = np.repeat(a, order)
index = order
for (internal_knot, multiplicity) in zip(internal_knots, multiplicities):
extended_vector[index:index+multiplicity] = np.repeat(internal_knot, multiplicity)
index += multiplicity
extended_vector[-order:] = np.repeat(b, order)
return extended_vector
def closed_case(): ...
return closed_case() if closed else open_case()
|
Add a new Python implementation about de Boor recurrence to draw
BSplines curves, this commit contains the initial work on a module
which contains knot partition extension utilities.
import numpy as np
def extend_knots_vector(order, a, b, internal_knots, multiplicities=None, closed=False):
"""
This function produces an extended knots vector for BSpline functions given a simple one.
Parameters
==========
a:
Examples
========
Simple extension for an open curve
>>> extend_knots_vector(4, 3, 6, [4, 5])
array([ 3., 3., 3., 3., 4., 5., 6., 6., 6., 6.])
The same as before, only specify arguments' names for better readability
>>> extend_knots_vector(order=4, a=3, b=6, internal_knots=[4, 5])
array([ 3., 3., 3., 3., 4., 5., 6., 6., 6., 6.])
"""
if multiplicities is None:
multiplicities = np.repeat(1, len(internal_knots))
def open_case():
extended_vector = np.empty(order + sum(multiplicities) + order)
extended_vector[0:order] = np.repeat(a, order)
index = order
for (internal_knot, multiplicity) in zip(internal_knots, multiplicities):
extended_vector[index:index+multiplicity] = np.repeat(internal_knot, multiplicity)
index += multiplicity
extended_vector[-order:] = np.repeat(b, order)
return extended_vector
def closed_case(): ...
return closed_case() if closed else open_case()
|
<commit_before><commit_msg>Add a new Python implementation about de Boor recurrence to draw
BSplines curves, this commit contains the initial work on a module
which contains knot partition extension utilities.<commit_after>
import numpy as np
def extend_knots_vector(order, a, b, internal_knots, multiplicities=None, closed=False):
"""
This function produces an extended knots vector for BSpline functions given a simple one.
Parameters
==========
a:
Examples
========
Simple extension for an open curve
>>> extend_knots_vector(4, 3, 6, [4, 5])
array([ 3., 3., 3., 3., 4., 5., 6., 6., 6., 6.])
The same as before, only specify arguments' names for better readability
>>> extend_knots_vector(order=4, a=3, b=6, internal_knots=[4, 5])
array([ 3., 3., 3., 3., 4., 5., 6., 6., 6., 6.])
"""
if multiplicities is None:
multiplicities = np.repeat(1, len(internal_knots))
def open_case():
extended_vector = np.empty(order + sum(multiplicities) + order)
extended_vector[0:order] = np.repeat(a, order)
index = order
for (internal_knot, multiplicity) in zip(internal_knots, multiplicities):
extended_vector[index:index+multiplicity] = np.repeat(internal_knot, multiplicity)
index += multiplicity
extended_vector[-order:] = np.repeat(b, order)
return extended_vector
def closed_case(): ...
return closed_case() if closed else open_case()
|
|
02ba3da06a40e3841e54ee1c93d4dc345a69cca3
|
motmetrics/tests/test_utils.py
|
motmetrics/tests/test_utils.py
|
# py-motmetrics - Metrics for multiple object tracker (MOT) benchmarking.
# https://github.com/cheind/py-motmetrics/
#
# MIT License
# Copyright (c) 2017-2020 Christoph Heindl, Jack Valmadre and others.
# See LICENSE file for terms.
"""Tests accumulation of events using utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
import pandas as pd
import motmetrics as mm
def test_annotations_xor_predictions_present():
"""Tests frames that contain only annotations or predictions."""
_ = None
anno_tracks = {
1: [0, 2, 4, 6, _, _, _],
2: [_, _, 0, 2, 4, _, _],
}
pred_tracks = {
1: [_, _, 3, 5, 7, 7, 7],
}
anno = _tracks_to_dataframe(anno_tracks)
pred = _tracks_to_dataframe(pred_tracks)
acc = mm.utils.compare_to_groundtruth(anno, pred, 'euc', distfields=['Position'], distth=2)
mh = mm.metrics.create()
metrics = mh.compute(acc, return_dataframe=False, metrics=[
'num_objects', 'num_predictions', 'num_unique_objects',
])
np.testing.assert_equal(metrics['num_objects'], 7)
np.testing.assert_equal(metrics['num_predictions'], 5)
np.testing.assert_equal(metrics['num_unique_objects'], 2)
def _tracks_to_dataframe(tracks):
rows = []
for track_id, track in tracks.items():
for frame_id, position in zip(itertools.count(1), track):
if position is None:
continue
rows.append({
'FrameId': frame_id,
'Id': track_id,
'Position': position,
})
return pd.DataFrame(rows).set_index(['FrameId', 'Id'])
|
Add test to catch bug in compare_to_groundtruth
|
Add test to catch bug in compare_to_groundtruth
|
Python
|
mit
|
cheind/py-motmetrics,cheind/py-motmetrics
|
Add test to catch bug in compare_to_groundtruth
|
# py-motmetrics - Metrics for multiple object tracker (MOT) benchmarking.
# https://github.com/cheind/py-motmetrics/
#
# MIT License
# Copyright (c) 2017-2020 Christoph Heindl, Jack Valmadre and others.
# See LICENSE file for terms.
"""Tests accumulation of events using utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
import pandas as pd
import motmetrics as mm
def test_annotations_xor_predictions_present():
"""Tests frames that contain only annotations or predictions."""
_ = None
anno_tracks = {
1: [0, 2, 4, 6, _, _, _],
2: [_, _, 0, 2, 4, _, _],
}
pred_tracks = {
1: [_, _, 3, 5, 7, 7, 7],
}
anno = _tracks_to_dataframe(anno_tracks)
pred = _tracks_to_dataframe(pred_tracks)
acc = mm.utils.compare_to_groundtruth(anno, pred, 'euc', distfields=['Position'], distth=2)
mh = mm.metrics.create()
metrics = mh.compute(acc, return_dataframe=False, metrics=[
'num_objects', 'num_predictions', 'num_unique_objects',
])
np.testing.assert_equal(metrics['num_objects'], 7)
np.testing.assert_equal(metrics['num_predictions'], 5)
np.testing.assert_equal(metrics['num_unique_objects'], 2)
def _tracks_to_dataframe(tracks):
rows = []
for track_id, track in tracks.items():
for frame_id, position in zip(itertools.count(1), track):
if position is None:
continue
rows.append({
'FrameId': frame_id,
'Id': track_id,
'Position': position,
})
return pd.DataFrame(rows).set_index(['FrameId', 'Id'])
|
<commit_before><commit_msg>Add test to catch bug in compare_to_groundtruth<commit_after>
|
# py-motmetrics - Metrics for multiple object tracker (MOT) benchmarking.
# https://github.com/cheind/py-motmetrics/
#
# MIT License
# Copyright (c) 2017-2020 Christoph Heindl, Jack Valmadre and others.
# See LICENSE file for terms.
"""Tests accumulation of events using utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
import pandas as pd
import motmetrics as mm
def test_annotations_xor_predictions_present():
"""Tests frames that contain only annotations or predictions."""
_ = None
anno_tracks = {
1: [0, 2, 4, 6, _, _, _],
2: [_, _, 0, 2, 4, _, _],
}
pred_tracks = {
1: [_, _, 3, 5, 7, 7, 7],
}
anno = _tracks_to_dataframe(anno_tracks)
pred = _tracks_to_dataframe(pred_tracks)
acc = mm.utils.compare_to_groundtruth(anno, pred, 'euc', distfields=['Position'], distth=2)
mh = mm.metrics.create()
metrics = mh.compute(acc, return_dataframe=False, metrics=[
'num_objects', 'num_predictions', 'num_unique_objects',
])
np.testing.assert_equal(metrics['num_objects'], 7)
np.testing.assert_equal(metrics['num_predictions'], 5)
np.testing.assert_equal(metrics['num_unique_objects'], 2)
def _tracks_to_dataframe(tracks):
rows = []
for track_id, track in tracks.items():
for frame_id, position in zip(itertools.count(1), track):
if position is None:
continue
rows.append({
'FrameId': frame_id,
'Id': track_id,
'Position': position,
})
return pd.DataFrame(rows).set_index(['FrameId', 'Id'])
|
Add test to catch bug in compare_to_groundtruth# py-motmetrics - Metrics for multiple object tracker (MOT) benchmarking.
# https://github.com/cheind/py-motmetrics/
#
# MIT License
# Copyright (c) 2017-2020 Christoph Heindl, Jack Valmadre and others.
# See LICENSE file for terms.
"""Tests accumulation of events using utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
import pandas as pd
import motmetrics as mm
def test_annotations_xor_predictions_present():
"""Tests frames that contain only annotations or predictions."""
_ = None
anno_tracks = {
1: [0, 2, 4, 6, _, _, _],
2: [_, _, 0, 2, 4, _, _],
}
pred_tracks = {
1: [_, _, 3, 5, 7, 7, 7],
}
anno = _tracks_to_dataframe(anno_tracks)
pred = _tracks_to_dataframe(pred_tracks)
acc = mm.utils.compare_to_groundtruth(anno, pred, 'euc', distfields=['Position'], distth=2)
mh = mm.metrics.create()
metrics = mh.compute(acc, return_dataframe=False, metrics=[
'num_objects', 'num_predictions', 'num_unique_objects',
])
np.testing.assert_equal(metrics['num_objects'], 7)
np.testing.assert_equal(metrics['num_predictions'], 5)
np.testing.assert_equal(metrics['num_unique_objects'], 2)
def _tracks_to_dataframe(tracks):
rows = []
for track_id, track in tracks.items():
for frame_id, position in zip(itertools.count(1), track):
if position is None:
continue
rows.append({
'FrameId': frame_id,
'Id': track_id,
'Position': position,
})
return pd.DataFrame(rows).set_index(['FrameId', 'Id'])
|
<commit_before><commit_msg>Add test to catch bug in compare_to_groundtruth<commit_after># py-motmetrics - Metrics for multiple object tracker (MOT) benchmarking.
# https://github.com/cheind/py-motmetrics/
#
# MIT License
# Copyright (c) 2017-2020 Christoph Heindl, Jack Valmadre and others.
# See LICENSE file for terms.
"""Tests accumulation of events using utility functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
import pandas as pd
import motmetrics as mm
def test_annotations_xor_predictions_present():
"""Tests frames that contain only annotations or predictions."""
_ = None
anno_tracks = {
1: [0, 2, 4, 6, _, _, _],
2: [_, _, 0, 2, 4, _, _],
}
pred_tracks = {
1: [_, _, 3, 5, 7, 7, 7],
}
anno = _tracks_to_dataframe(anno_tracks)
pred = _tracks_to_dataframe(pred_tracks)
acc = mm.utils.compare_to_groundtruth(anno, pred, 'euc', distfields=['Position'], distth=2)
mh = mm.metrics.create()
metrics = mh.compute(acc, return_dataframe=False, metrics=[
'num_objects', 'num_predictions', 'num_unique_objects',
])
np.testing.assert_equal(metrics['num_objects'], 7)
np.testing.assert_equal(metrics['num_predictions'], 5)
np.testing.assert_equal(metrics['num_unique_objects'], 2)
def _tracks_to_dataframe(tracks):
rows = []
for track_id, track in tracks.items():
for frame_id, position in zip(itertools.count(1), track):
if position is None:
continue
rows.append({
'FrameId': frame_id,
'Id': track_id,
'Position': position,
})
return pd.DataFrame(rows).set_index(['FrameId', 'Id'])
|
|
62933a44942088e512ec3aa8024f97ed48375519
|
djset/backends.py
|
djset/backends.py
|
import os
import codecs
import keyring.util.platform
from keyring.py27compat import configparser
from keyring.backends.file import BaseKeyring
from keyring.core import load_keyring
from keyring.util.escape import escape as escape_for_ini
from keyring.util import properties
class UnencryptedKeyring(BaseKeyring):
"""
UnencryptedKeyring is for storing settings which aren't sensitive.
For convenience we use the same interface as a regular keyring.
"""
filename = 'keyring_public.cfg'
@properties.NonDataProperty
def file_path(self):
"""
The path to the file where passwords are stored. This property
may be overridden by the subclass or at the instance level.
"""
return os.path.join(keyring.util.platform.data_root(), self.filename)
def encrypt(self, password):
"""Directly return the password itself.
"""
return password
def decrypt(self, password_encrypted):
"""Directly return encrypted password.
"""
return password_encrypted
def get_password(self, service, username):
"""Read the password from the file.
"""
service = escape_for_ini(service)
username = escape_for_ini(username)
# load the passwords from the file
config = configparser.RawConfigParser()
if os.path.exists(self.file_path):
config.read(self.file_path, encoding='utf-8')
# fetch the password
try:
password = config.get(service, username)
except (configparser.NoOptionError, configparser.NoSectionError):
password = None
return password
def set_password(self, service, username, password):
"""Write the password in the file.
"""
service = escape_for_ini(service)
username = escape_for_ini(username)
# ensure the file exists
self._ensure_file_path()
# load the keyring from the disk
config = configparser.RawConfigParser()
config.read(self.file_path)
# update the keyring with the password
if not config.has_section(service):
config.add_section(service)
config.set(service, username, password)
# save the keyring back to the file
config_file = codecs.open(self.file_path, 'w', 'utf-8')
try:
config.write(config_file)
finally:
config_file.close()
def supported(self):
"""Applicable for all platforms, but do not recommend.
"""
return 0
config_keyring = UnencryptedKeyring()
|
Add an unencrypted file based backend.
|
Add an unencrypted file based backend.
|
Python
|
mit
|
bretth/djset,bretth/djset
|
Add an unencrypted file based backend.
|
import os
import codecs
import keyring.util.platform
from keyring.py27compat import configparser
from keyring.backends.file import BaseKeyring
from keyring.core import load_keyring
from keyring.util.escape import escape as escape_for_ini
from keyring.util import properties
class UnencryptedKeyring(BaseKeyring):
"""
UnencryptedKeyring is for storing settings which aren't sensitive.
For convenience we use the same interface as a regular keyring.
"""
filename = 'keyring_public.cfg'
@properties.NonDataProperty
def file_path(self):
"""
The path to the file where passwords are stored. This property
may be overridden by the subclass or at the instance level.
"""
return os.path.join(keyring.util.platform.data_root(), self.filename)
def encrypt(self, password):
"""Directly return the password itself.
"""
return password
def decrypt(self, password_encrypted):
"""Directly return encrypted password.
"""
return password_encrypted
def get_password(self, service, username):
"""Read the password from the file.
"""
service = escape_for_ini(service)
username = escape_for_ini(username)
# load the passwords from the file
config = configparser.RawConfigParser()
if os.path.exists(self.file_path):
config.read(self.file_path, encoding='utf-8')
# fetch the password
try:
password = config.get(service, username)
except (configparser.NoOptionError, configparser.NoSectionError):
password = None
return password
def set_password(self, service, username, password):
"""Write the password in the file.
"""
service = escape_for_ini(service)
username = escape_for_ini(username)
# ensure the file exists
self._ensure_file_path()
# load the keyring from the disk
config = configparser.RawConfigParser()
config.read(self.file_path)
# update the keyring with the password
if not config.has_section(service):
config.add_section(service)
config.set(service, username, password)
# save the keyring back to the file
config_file = codecs.open(self.file_path, 'w', 'utf-8')
try:
config.write(config_file)
finally:
config_file.close()
def supported(self):
"""Applicable for all platforms, but do not recommend.
"""
return 0
config_keyring = UnencryptedKeyring()
|
<commit_before><commit_msg>Add an unencrypted file based backend.<commit_after>
|
import os
import codecs
import keyring.util.platform
from keyring.py27compat import configparser
from keyring.backends.file import BaseKeyring
from keyring.core import load_keyring
from keyring.util.escape import escape as escape_for_ini
from keyring.util import properties
class UnencryptedKeyring(BaseKeyring):
"""
UnencryptedKeyring is for storing settings which aren't sensitive.
For convenience we use the same interface as a regular keyring.
"""
filename = 'keyring_public.cfg'
@properties.NonDataProperty
def file_path(self):
"""
The path to the file where passwords are stored. This property
may be overridden by the subclass or at the instance level.
"""
return os.path.join(keyring.util.platform.data_root(), self.filename)
def encrypt(self, password):
"""Directly return the password itself.
"""
return password
def decrypt(self, password_encrypted):
"""Directly return encrypted password.
"""
return password_encrypted
def get_password(self, service, username):
"""Read the password from the file.
"""
service = escape_for_ini(service)
username = escape_for_ini(username)
# load the passwords from the file
config = configparser.RawConfigParser()
if os.path.exists(self.file_path):
config.read(self.file_path, encoding='utf-8')
# fetch the password
try:
password = config.get(service, username)
except (configparser.NoOptionError, configparser.NoSectionError):
password = None
return password
def set_password(self, service, username, password):
"""Write the password in the file.
"""
service = escape_for_ini(service)
username = escape_for_ini(username)
# ensure the file exists
self._ensure_file_path()
# load the keyring from the disk
config = configparser.RawConfigParser()
config.read(self.file_path)
# update the keyring with the password
if not config.has_section(service):
config.add_section(service)
config.set(service, username, password)
# save the keyring back to the file
config_file = codecs.open(self.file_path, 'w', 'utf-8')
try:
config.write(config_file)
finally:
config_file.close()
def supported(self):
"""Applicable for all platforms, but do not recommend.
"""
return 0
config_keyring = UnencryptedKeyring()
|
Add an unencrypted file based backend.import os
import codecs
import keyring.util.platform
from keyring.py27compat import configparser
from keyring.backends.file import BaseKeyring
from keyring.core import load_keyring
from keyring.util.escape import escape as escape_for_ini
from keyring.util import properties
class UnencryptedKeyring(BaseKeyring):
"""
UnencryptedKeyring is for storing settings which aren't sensitive.
For convenience we use the same interface as a regular keyring.
"""
filename = 'keyring_public.cfg'
@properties.NonDataProperty
def file_path(self):
"""
The path to the file where passwords are stored. This property
may be overridden by the subclass or at the instance level.
"""
return os.path.join(keyring.util.platform.data_root(), self.filename)
def encrypt(self, password):
"""Directly return the password itself.
"""
return password
def decrypt(self, password_encrypted):
"""Directly return encrypted password.
"""
return password_encrypted
def get_password(self, service, username):
"""Read the password from the file.
"""
service = escape_for_ini(service)
username = escape_for_ini(username)
# load the passwords from the file
config = configparser.RawConfigParser()
if os.path.exists(self.file_path):
config.read(self.file_path, encoding='utf-8')
# fetch the password
try:
password = config.get(service, username)
except (configparser.NoOptionError, configparser.NoSectionError):
password = None
return password
def set_password(self, service, username, password):
"""Write the password in the file.
"""
service = escape_for_ini(service)
username = escape_for_ini(username)
# ensure the file exists
self._ensure_file_path()
# load the keyring from the disk
config = configparser.RawConfigParser()
config.read(self.file_path)
# update the keyring with the password
if not config.has_section(service):
config.add_section(service)
config.set(service, username, password)
# save the keyring back to the file
config_file = codecs.open(self.file_path, 'w', 'utf-8')
try:
config.write(config_file)
finally:
config_file.close()
def supported(self):
"""Applicable for all platforms, but do not recommend.
"""
return 0
config_keyring = UnencryptedKeyring()
|
<commit_before><commit_msg>Add an unencrypted file based backend.<commit_after>import os
import codecs
import keyring.util.platform
from keyring.py27compat import configparser
from keyring.backends.file import BaseKeyring
from keyring.core import load_keyring
from keyring.util.escape import escape as escape_for_ini
from keyring.util import properties
class UnencryptedKeyring(BaseKeyring):
"""
UnencryptedKeyring is for storing settings which aren't sensitive.
For convenience we use the same interface as a regular keyring.
"""
filename = 'keyring_public.cfg'
@properties.NonDataProperty
def file_path(self):
"""
The path to the file where passwords are stored. This property
may be overridden by the subclass or at the instance level.
"""
return os.path.join(keyring.util.platform.data_root(), self.filename)
def encrypt(self, password):
"""Directly return the password itself.
"""
return password
def decrypt(self, password_encrypted):
"""Directly return encrypted password.
"""
return password_encrypted
def get_password(self, service, username):
"""Read the password from the file.
"""
service = escape_for_ini(service)
username = escape_for_ini(username)
# load the passwords from the file
config = configparser.RawConfigParser()
if os.path.exists(self.file_path):
config.read(self.file_path, encoding='utf-8')
# fetch the password
try:
password = config.get(service, username)
except (configparser.NoOptionError, configparser.NoSectionError):
password = None
return password
def set_password(self, service, username, password):
"""Write the password in the file.
"""
service = escape_for_ini(service)
username = escape_for_ini(username)
# ensure the file exists
self._ensure_file_path()
# load the keyring from the disk
config = configparser.RawConfigParser()
config.read(self.file_path)
# update the keyring with the password
if not config.has_section(service):
config.add_section(service)
config.set(service, username, password)
# save the keyring back to the file
config_file = codecs.open(self.file_path, 'w', 'utf-8')
try:
config.write(config_file)
finally:
config_file.close()
def supported(self):
"""Applicable for all platforms, but do not recommend.
"""
return 0
config_keyring = UnencryptedKeyring()
|
|
74e46d577bab048a473862b070d8abfb1db00ea1
|
workup/migrations/0005_auto_20160826_0620.py
|
workup/migrations/0005_auto_20160826_0620.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import workup.validators
class Migration(migrations.Migration):
dependencies = [
('workup', '0004_auto_20160328_1425'),
]
operations = [
migrations.AlterField(
model_name='historicalworkup',
name='bp',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_bp]),
),
migrations.AlterField(
model_name='historicalworkup',
name='height',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_height]),
),
migrations.AlterField(
model_name='historicalworkup',
name='hr',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_hr]),
),
migrations.AlterField(
model_name='historicalworkup',
name='rr',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_rr]),
),
migrations.AlterField(
model_name='historicalworkup',
name='t',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_t]),
),
migrations.AlterField(
model_name='historicalworkup',
name='weight',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_weight]),
),
migrations.AlterField(
model_name='workup',
name='bp',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_bp]),
),
migrations.AlterField(
model_name='workup',
name='height',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_height]),
),
migrations.AlterField(
model_name='workup',
name='hr',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_hr]),
),
migrations.AlterField(
model_name='workup',
name='rr',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_rr]),
),
migrations.AlterField(
model_name='workup',
name='t',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_t]),
),
migrations.AlterField(
model_name='workup',
name='weight',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_weight]),
),
]
|
Add migration for updates to workup (smallintegerfield -> charfield to fix vitals thing).
|
Add migration for updates to workup (smallintegerfield -> charfield to fix vitals thing).
|
Python
|
mit
|
SaturdayNeighborhoodHealthClinic/clintools,SaturdayNeighborhoodHealthClinic/clintools,SaturdayNeighborhoodHealthClinic/clintools
|
Add migration for updates to workup (smallintegerfield -> charfield to fix vitals thing).
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import workup.validators
class Migration(migrations.Migration):
dependencies = [
('workup', '0004_auto_20160328_1425'),
]
operations = [
migrations.AlterField(
model_name='historicalworkup',
name='bp',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_bp]),
),
migrations.AlterField(
model_name='historicalworkup',
name='height',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_height]),
),
migrations.AlterField(
model_name='historicalworkup',
name='hr',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_hr]),
),
migrations.AlterField(
model_name='historicalworkup',
name='rr',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_rr]),
),
migrations.AlterField(
model_name='historicalworkup',
name='t',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_t]),
),
migrations.AlterField(
model_name='historicalworkup',
name='weight',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_weight]),
),
migrations.AlterField(
model_name='workup',
name='bp',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_bp]),
),
migrations.AlterField(
model_name='workup',
name='height',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_height]),
),
migrations.AlterField(
model_name='workup',
name='hr',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_hr]),
),
migrations.AlterField(
model_name='workup',
name='rr',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_rr]),
),
migrations.AlterField(
model_name='workup',
name='t',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_t]),
),
migrations.AlterField(
model_name='workup',
name='weight',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_weight]),
),
]
|
<commit_before><commit_msg>Add migration for updates to workup (smallintegerfield -> charfield to fix vitals thing).<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import workup.validators
class Migration(migrations.Migration):
dependencies = [
('workup', '0004_auto_20160328_1425'),
]
operations = [
migrations.AlterField(
model_name='historicalworkup',
name='bp',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_bp]),
),
migrations.AlterField(
model_name='historicalworkup',
name='height',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_height]),
),
migrations.AlterField(
model_name='historicalworkup',
name='hr',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_hr]),
),
migrations.AlterField(
model_name='historicalworkup',
name='rr',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_rr]),
),
migrations.AlterField(
model_name='historicalworkup',
name='t',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_t]),
),
migrations.AlterField(
model_name='historicalworkup',
name='weight',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_weight]),
),
migrations.AlterField(
model_name='workup',
name='bp',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_bp]),
),
migrations.AlterField(
model_name='workup',
name='height',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_height]),
),
migrations.AlterField(
model_name='workup',
name='hr',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_hr]),
),
migrations.AlterField(
model_name='workup',
name='rr',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_rr]),
),
migrations.AlterField(
model_name='workup',
name='t',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_t]),
),
migrations.AlterField(
model_name='workup',
name='weight',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_weight]),
),
]
|
Add migration for updates to workup (smallintegerfield -> charfield to fix vitals thing).# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import workup.validators
class Migration(migrations.Migration):
dependencies = [
('workup', '0004_auto_20160328_1425'),
]
operations = [
migrations.AlterField(
model_name='historicalworkup',
name='bp',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_bp]),
),
migrations.AlterField(
model_name='historicalworkup',
name='height',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_height]),
),
migrations.AlterField(
model_name='historicalworkup',
name='hr',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_hr]),
),
migrations.AlterField(
model_name='historicalworkup',
name='rr',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_rr]),
),
migrations.AlterField(
model_name='historicalworkup',
name='t',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_t]),
),
migrations.AlterField(
model_name='historicalworkup',
name='weight',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_weight]),
),
migrations.AlterField(
model_name='workup',
name='bp',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_bp]),
),
migrations.AlterField(
model_name='workup',
name='height',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_height]),
),
migrations.AlterField(
model_name='workup',
name='hr',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_hr]),
),
migrations.AlterField(
model_name='workup',
name='rr',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_rr]),
),
migrations.AlterField(
model_name='workup',
name='t',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_t]),
),
migrations.AlterField(
model_name='workup',
name='weight',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_weight]),
),
]
|
<commit_before><commit_msg>Add migration for updates to workup (smallintegerfield -> charfield to fix vitals thing).<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import workup.validators
class Migration(migrations.Migration):
dependencies = [
('workup', '0004_auto_20160328_1425'),
]
operations = [
migrations.AlterField(
model_name='historicalworkup',
name='bp',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_bp]),
),
migrations.AlterField(
model_name='historicalworkup',
name='height',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_height]),
),
migrations.AlterField(
model_name='historicalworkup',
name='hr',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_hr]),
),
migrations.AlterField(
model_name='historicalworkup',
name='rr',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_rr]),
),
migrations.AlterField(
model_name='historicalworkup',
name='t',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_t]),
),
migrations.AlterField(
model_name='historicalworkup',
name='weight',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_weight]),
),
migrations.AlterField(
model_name='workup',
name='bp',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_bp]),
),
migrations.AlterField(
model_name='workup',
name='height',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_height]),
),
migrations.AlterField(
model_name='workup',
name='hr',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_hr]),
),
migrations.AlterField(
model_name='workup',
name='rr',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_rr]),
),
migrations.AlterField(
model_name='workup',
name='t',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_t]),
),
migrations.AlterField(
model_name='workup',
name='weight',
field=models.CharField(blank=True, max_length=12, null=True, validators=[workup.validators.validate_weight]),
),
]
|
|
51c589491257b870ce70bac66c358fac77b463d3
|
scripts/trectransform.py
|
scripts/trectransform.py
|
"""
Transform janky trec topics into lucene4ir topics.
"""
import argparse
import sys
def transform(data: str) -> str:
"""
:param data:
:return:
"""
topics = ''
num = None
for n, line in enumerate(data.split('\n')):
if '<num>' in line:
num = line.split()[-1]
elif '<title>' in line and num is not None:
title = line.replace('<title>', '').strip()
topics += '{} {}\n'.format(num, title)
num = None
elif '<title>' in line and num is None:
print('<title> tag appeared before a <num> tag on line {}'.format(n))
sys.exit(1)
return topics
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Transform TREC topics->lucene4ir topics.')
argparser.add_argument('-i', '--input', help='Input file', type=argparse.FileType('r'),
default=sys.stdin)
argparser.add_argument('-o', '--output', help='Output file', type=argparse.FileType('w'),
default=sys.stdout)
args = argparser.parse_args()
args.output.write(transform(args.input.read()))
|
Add script for transforming trec topic files to lucene4ir topic files.
|
Add script for transforming trec topic files to lucene4ir topic files.
|
Python
|
apache-2.0
|
leifos/lucene4ir,leifos/lucene4ir,lucene4ir/lucene4ir,lucene4ir/lucene4ir,leifos/lucene4ir,lucene4ir/lucene4ir
|
Add script for transforming trec topic files to lucene4ir topic files.
|
"""
Transform janky trec topics into lucene4ir topics.
"""
import argparse
import sys
def transform(data: str) -> str:
"""
:param data:
:return:
"""
topics = ''
num = None
for n, line in enumerate(data.split('\n')):
if '<num>' in line:
num = line.split()[-1]
elif '<title>' in line and num is not None:
title = line.replace('<title>', '').strip()
topics += '{} {}\n'.format(num, title)
num = None
elif '<title>' in line and num is None:
print('<title> tag appeared before a <num> tag on line {}'.format(n))
sys.exit(1)
return topics
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Transform TREC topics->lucene4ir topics.')
argparser.add_argument('-i', '--input', help='Input file', type=argparse.FileType('r'),
default=sys.stdin)
argparser.add_argument('-o', '--output', help='Output file', type=argparse.FileType('w'),
default=sys.stdout)
args = argparser.parse_args()
args.output.write(transform(args.input.read()))
|
<commit_before><commit_msg>Add script for transforming trec topic files to lucene4ir topic files.<commit_after>
|
"""
Transform janky trec topics into lucene4ir topics.
"""
import argparse
import sys
def transform(data: str) -> str:
"""
:param data:
:return:
"""
topics = ''
num = None
for n, line in enumerate(data.split('\n')):
if '<num>' in line:
num = line.split()[-1]
elif '<title>' in line and num is not None:
title = line.replace('<title>', '').strip()
topics += '{} {}\n'.format(num, title)
num = None
elif '<title>' in line and num is None:
print('<title> tag appeared before a <num> tag on line {}'.format(n))
sys.exit(1)
return topics
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Transform TREC topics->lucene4ir topics.')
argparser.add_argument('-i', '--input', help='Input file', type=argparse.FileType('r'),
default=sys.stdin)
argparser.add_argument('-o', '--output', help='Output file', type=argparse.FileType('w'),
default=sys.stdout)
args = argparser.parse_args()
args.output.write(transform(args.input.read()))
|
Add script for transforming trec topic files to lucene4ir topic files."""
Transform janky trec topics into lucene4ir topics.
"""
import argparse
import sys
def transform(data: str) -> str:
"""
:param data:
:return:
"""
topics = ''
num = None
for n, line in enumerate(data.split('\n')):
if '<num>' in line:
num = line.split()[-1]
elif '<title>' in line and num is not None:
title = line.replace('<title>', '').strip()
topics += '{} {}\n'.format(num, title)
num = None
elif '<title>' in line and num is None:
print('<title> tag appeared before a <num> tag on line {}'.format(n))
sys.exit(1)
return topics
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Transform TREC topics->lucene4ir topics.')
argparser.add_argument('-i', '--input', help='Input file', type=argparse.FileType('r'),
default=sys.stdin)
argparser.add_argument('-o', '--output', help='Output file', type=argparse.FileType('w'),
default=sys.stdout)
args = argparser.parse_args()
args.output.write(transform(args.input.read()))
|
<commit_before><commit_msg>Add script for transforming trec topic files to lucene4ir topic files.<commit_after>"""
Transform janky trec topics into lucene4ir topics.
"""
import argparse
import sys
def transform(data: str) -> str:
"""
:param data:
:return:
"""
topics = ''
num = None
for n, line in enumerate(data.split('\n')):
if '<num>' in line:
num = line.split()[-1]
elif '<title>' in line and num is not None:
title = line.replace('<title>', '').strip()
topics += '{} {}\n'.format(num, title)
num = None
elif '<title>' in line and num is None:
print('<title> tag appeared before a <num> tag on line {}'.format(n))
sys.exit(1)
return topics
if __name__ == '__main__':
argparser = argparse.ArgumentParser(description='Transform TREC topics->lucene4ir topics.')
argparser.add_argument('-i', '--input', help='Input file', type=argparse.FileType('r'),
default=sys.stdin)
argparser.add_argument('-o', '--output', help='Output file', type=argparse.FileType('w'),
default=sys.stdout)
args = argparser.parse_args()
args.output.write(transform(args.input.read()))
|
|
37ee7ede62cee88a925b2f1ed99a94b445ce6a88
|
lowfat/management/commands/fixfundingsource.py
|
lowfat/management/commands/fixfundingsource.py
|
import pandas as pd
from django.core.management.base import BaseCommand
from lowfat.models import Fund, Expense
class Command(BaseCommand):
help = "Fix funding source"
def add_arguments(self, parser):
parser.add_argument('csv', nargs='?', default='funds.csv')
# pylint: disable=too-many-branches,too-many-locals
def handle(self, *args, **options):
data = pd.read_csv(options['csv'])
for index, line in data.iterrows(): # pylint: disable=no-member,unused-variable
try:
funds =Fund.objects.filter(
claimant__forenames=line["forname_s"],
claimant__surname=line["surname"],
name=line["event_title"]
)
for fund in funds:
fund.funds_from_default = line["new_funding_source_subcategory"]
print("Changing {}...".format(fund))
fund.save()
print("Changed {}...".format(fund))
for expense in Expense.objects.filter(fund=fund):
expense.funds_from = line["new_funding_source_subcategory"]
print("Changing {}...".format(expense))
expense.save()
print("Changed {}...".format(expense))
except BaseException as exception:
print("Error: {}\n\t{}".format(exception, line))
|
Add script to fix funding source
|
Add script to fix funding source
|
Python
|
bsd-3-clause
|
softwaresaved/fat,softwaresaved/fat,softwaresaved/fat,softwaresaved/fat
|
Add script to fix funding source
|
import pandas as pd
from django.core.management.base import BaseCommand
from lowfat.models import Fund, Expense
class Command(BaseCommand):
help = "Fix funding source"
def add_arguments(self, parser):
parser.add_argument('csv', nargs='?', default='funds.csv')
# pylint: disable=too-many-branches,too-many-locals
def handle(self, *args, **options):
data = pd.read_csv(options['csv'])
for index, line in data.iterrows(): # pylint: disable=no-member,unused-variable
try:
funds =Fund.objects.filter(
claimant__forenames=line["forname_s"],
claimant__surname=line["surname"],
name=line["event_title"]
)
for fund in funds:
fund.funds_from_default = line["new_funding_source_subcategory"]
print("Changing {}...".format(fund))
fund.save()
print("Changed {}...".format(fund))
for expense in Expense.objects.filter(fund=fund):
expense.funds_from = line["new_funding_source_subcategory"]
print("Changing {}...".format(expense))
expense.save()
print("Changed {}...".format(expense))
except BaseException as exception:
print("Error: {}\n\t{}".format(exception, line))
|
<commit_before><commit_msg>Add script to fix funding source<commit_after>
|
import pandas as pd
from django.core.management.base import BaseCommand
from lowfat.models import Fund, Expense
class Command(BaseCommand):
help = "Fix funding source"
def add_arguments(self, parser):
parser.add_argument('csv', nargs='?', default='funds.csv')
# pylint: disable=too-many-branches,too-many-locals
def handle(self, *args, **options):
data = pd.read_csv(options['csv'])
for index, line in data.iterrows(): # pylint: disable=no-member,unused-variable
try:
funds =Fund.objects.filter(
claimant__forenames=line["forname_s"],
claimant__surname=line["surname"],
name=line["event_title"]
)
for fund in funds:
fund.funds_from_default = line["new_funding_source_subcategory"]
print("Changing {}...".format(fund))
fund.save()
print("Changed {}...".format(fund))
for expense in Expense.objects.filter(fund=fund):
expense.funds_from = line["new_funding_source_subcategory"]
print("Changing {}...".format(expense))
expense.save()
print("Changed {}...".format(expense))
except BaseException as exception:
print("Error: {}\n\t{}".format(exception, line))
|
Add script to fix funding sourceimport pandas as pd
from django.core.management.base import BaseCommand
from lowfat.models import Fund, Expense
class Command(BaseCommand):
help = "Fix funding source"
def add_arguments(self, parser):
parser.add_argument('csv', nargs='?', default='funds.csv')
# pylint: disable=too-many-branches,too-many-locals
def handle(self, *args, **options):
data = pd.read_csv(options['csv'])
for index, line in data.iterrows(): # pylint: disable=no-member,unused-variable
try:
funds =Fund.objects.filter(
claimant__forenames=line["forname_s"],
claimant__surname=line["surname"],
name=line["event_title"]
)
for fund in funds:
fund.funds_from_default = line["new_funding_source_subcategory"]
print("Changing {}...".format(fund))
fund.save()
print("Changed {}...".format(fund))
for expense in Expense.objects.filter(fund=fund):
expense.funds_from = line["new_funding_source_subcategory"]
print("Changing {}...".format(expense))
expense.save()
print("Changed {}...".format(expense))
except BaseException as exception:
print("Error: {}\n\t{}".format(exception, line))
|
<commit_before><commit_msg>Add script to fix funding source<commit_after>import pandas as pd
from django.core.management.base import BaseCommand
from lowfat.models import Fund, Expense
class Command(BaseCommand):
help = "Fix funding source"
def add_arguments(self, parser):
parser.add_argument('csv', nargs='?', default='funds.csv')
# pylint: disable=too-many-branches,too-many-locals
def handle(self, *args, **options):
data = pd.read_csv(options['csv'])
for index, line in data.iterrows(): # pylint: disable=no-member,unused-variable
try:
funds =Fund.objects.filter(
claimant__forenames=line["forname_s"],
claimant__surname=line["surname"],
name=line["event_title"]
)
for fund in funds:
fund.funds_from_default = line["new_funding_source_subcategory"]
print("Changing {}...".format(fund))
fund.save()
print("Changed {}...".format(fund))
for expense in Expense.objects.filter(fund=fund):
expense.funds_from = line["new_funding_source_subcategory"]
print("Changing {}...".format(expense))
expense.save()
print("Changed {}...".format(expense))
except BaseException as exception:
print("Error: {}\n\t{}".format(exception, line))
|
|
fe2f6e4b326ac0c441f561b8c9185cdad2c738fc
|
streamer/scripts/id_munging_test.py
|
streamer/scripts/id_munging_test.py
|
#!/usr/bin/env python
import json
import sys
count = 0
mismatches = 0
# Validate incoming twitter object id, id_str match and are expected types
# stdin is JSONL tweet objects (one fully-formed tweet per line of text)
try:
for line in sys.stdin:
try:
t = json.loads(line.strip())
except Exception as ex:
print("parsing error %s", ex)
continue
id = t["id"]
id_str = t["id_str"]
assert isinstance(id, int)
assert isinstance(id_str, str)
count += 1
if str(id) != id_str:
mismatches += 1
print(f"({mismatches}/{count}/{mismatches/count*100.0}) {id} != {id_str}")
except KeyboardInterrupt:
print()
finally:
error_percent = mismatches / count * 100.0 if count else 0.0
print(f"count={count}, mismatches={mismatches}, percent_mismatch={error_percent}")
|
Add script to validate incoming tweet IDs
|
Add script to validate incoming tweet IDs
|
Python
|
mit
|
inactivist/twitter-streamer,inactivist/twitter-streamer
|
Add script to validate incoming tweet IDs
|
#!/usr/bin/env python
import json
import sys
count = 0
mismatches = 0
# Validate incoming twitter object id, id_str match and are expected types
# stdin is JSONL tweet objects (one fully-formed tweet per line of text)
try:
for line in sys.stdin:
try:
t = json.loads(line.strip())
except Exception as ex:
print("parsing error %s", ex)
continue
id = t["id"]
id_str = t["id_str"]
assert isinstance(id, int)
assert isinstance(id_str, str)
count += 1
if str(id) != id_str:
mismatches += 1
print(f"({mismatches}/{count}/{mismatches/count*100.0}) {id} != {id_str}")
except KeyboardInterrupt:
print()
finally:
error_percent = mismatches / count * 100.0 if count else 0.0
print(f"count={count}, mismatches={mismatches}, percent_mismatch={error_percent}")
|
<commit_before><commit_msg>Add script to validate incoming tweet IDs<commit_after>
|
#!/usr/bin/env python
import json
import sys
count = 0
mismatches = 0
# Validate incoming twitter object id, id_str match and are expected types
# stdin is JSONL tweet objects (one fully-formed tweet per line of text)
try:
for line in sys.stdin:
try:
t = json.loads(line.strip())
except Exception as ex:
print("parsing error %s", ex)
continue
id = t["id"]
id_str = t["id_str"]
assert isinstance(id, int)
assert isinstance(id_str, str)
count += 1
if str(id) != id_str:
mismatches += 1
print(f"({mismatches}/{count}/{mismatches/count*100.0}) {id} != {id_str}")
except KeyboardInterrupt:
print()
finally:
error_percent = mismatches / count * 100.0 if count else 0.0
print(f"count={count}, mismatches={mismatches}, percent_mismatch={error_percent}")
|
Add script to validate incoming tweet IDs#!/usr/bin/env python
import json
import sys
count = 0
mismatches = 0
# Validate incoming twitter object id, id_str match and are expected types
# stdin is JSONL tweet objects (one fully-formed tweet per line of text)
try:
for line in sys.stdin:
try:
t = json.loads(line.strip())
except Exception as ex:
print("parsing error %s", ex)
continue
id = t["id"]
id_str = t["id_str"]
assert isinstance(id, int)
assert isinstance(id_str, str)
count += 1
if str(id) != id_str:
mismatches += 1
print(f"({mismatches}/{count}/{mismatches/count*100.0}) {id} != {id_str}")
except KeyboardInterrupt:
print()
finally:
error_percent = mismatches / count * 100.0 if count else 0.0
print(f"count={count}, mismatches={mismatches}, percent_mismatch={error_percent}")
|
<commit_before><commit_msg>Add script to validate incoming tweet IDs<commit_after>#!/usr/bin/env python
import json
import sys
count = 0
mismatches = 0
# Validate incoming twitter object id, id_str match and are expected types
# stdin is JSONL tweet objects (one fully-formed tweet per line of text)
try:
for line in sys.stdin:
try:
t = json.loads(line.strip())
except Exception as ex:
print("parsing error %s", ex)
continue
id = t["id"]
id_str = t["id_str"]
assert isinstance(id, int)
assert isinstance(id_str, str)
count += 1
if str(id) != id_str:
mismatches += 1
print(f"({mismatches}/{count}/{mismatches/count*100.0}) {id} != {id_str}")
except KeyboardInterrupt:
print()
finally:
error_percent = mismatches / count * 100.0 if count else 0.0
print(f"count={count}, mismatches={mismatches}, percent_mismatch={error_percent}")
|
|
9d79b33abc80d10d525dccae7d2c5419888d96c0
|
tests/test_config.py
|
tests/test_config.py
|
import unittest
class TestConfigMerge(unittest.TestCase):
def _call_fut(self, destcfg, srccfg):
from tilequeue.config import merge_cfg
return merge_cfg(destcfg, srccfg)
def test_both_empty(self):
self.assertEqual({}, self._call_fut({}, {}))
def test_complementary_scalar(self):
src = dict(foo='bar')
dest = dict(quux='morx')
self.assertEqual(dict(foo='bar', quux='morx'),
self._call_fut(dest, src))
def test_nested_complementary(self):
src = dict(foo=dict(bar='baz'))
dest = dict(quux=dict(morx='fleem'))
self.assertEqual(
dict(foo=dict(bar='baz'),
quux=dict(morx='fleem')),
self._call_fut(dest, src))
def test_merge_complementary(self):
src = dict(foo=dict(bar='baz'))
dest = dict(foo=dict(morx='fleem'))
self.assertEqual(
dict(foo=dict(bar='baz', morx='fleem')),
self._call_fut(dest, src))
def test_merge_override(self):
src = dict(foo=dict(bar='baz'))
dest = dict(foo=dict(bar='fleem'))
self.assertEqual(
dict(foo=dict(bar='baz')),
self._call_fut(dest, src))
|
Add tests for config dict merging
|
Add tests for config dict merging
|
Python
|
mit
|
tilezen/tilequeue,mapzen/tilequeue
|
Add tests for config dict merging
|
import unittest
class TestConfigMerge(unittest.TestCase):
def _call_fut(self, destcfg, srccfg):
from tilequeue.config import merge_cfg
return merge_cfg(destcfg, srccfg)
def test_both_empty(self):
self.assertEqual({}, self._call_fut({}, {}))
def test_complementary_scalar(self):
src = dict(foo='bar')
dest = dict(quux='morx')
self.assertEqual(dict(foo='bar', quux='morx'),
self._call_fut(dest, src))
def test_nested_complementary(self):
src = dict(foo=dict(bar='baz'))
dest = dict(quux=dict(morx='fleem'))
self.assertEqual(
dict(foo=dict(bar='baz'),
quux=dict(morx='fleem')),
self._call_fut(dest, src))
def test_merge_complementary(self):
src = dict(foo=dict(bar='baz'))
dest = dict(foo=dict(morx='fleem'))
self.assertEqual(
dict(foo=dict(bar='baz', morx='fleem')),
self._call_fut(dest, src))
def test_merge_override(self):
src = dict(foo=dict(bar='baz'))
dest = dict(foo=dict(bar='fleem'))
self.assertEqual(
dict(foo=dict(bar='baz')),
self._call_fut(dest, src))
|
<commit_before><commit_msg>Add tests for config dict merging<commit_after>
|
import unittest
class TestConfigMerge(unittest.TestCase):
def _call_fut(self, destcfg, srccfg):
from tilequeue.config import merge_cfg
return merge_cfg(destcfg, srccfg)
def test_both_empty(self):
self.assertEqual({}, self._call_fut({}, {}))
def test_complementary_scalar(self):
src = dict(foo='bar')
dest = dict(quux='morx')
self.assertEqual(dict(foo='bar', quux='morx'),
self._call_fut(dest, src))
def test_nested_complementary(self):
src = dict(foo=dict(bar='baz'))
dest = dict(quux=dict(morx='fleem'))
self.assertEqual(
dict(foo=dict(bar='baz'),
quux=dict(morx='fleem')),
self._call_fut(dest, src))
def test_merge_complementary(self):
src = dict(foo=dict(bar='baz'))
dest = dict(foo=dict(morx='fleem'))
self.assertEqual(
dict(foo=dict(bar='baz', morx='fleem')),
self._call_fut(dest, src))
def test_merge_override(self):
src = dict(foo=dict(bar='baz'))
dest = dict(foo=dict(bar='fleem'))
self.assertEqual(
dict(foo=dict(bar='baz')),
self._call_fut(dest, src))
|
Add tests for config dict mergingimport unittest
class TestConfigMerge(unittest.TestCase):
def _call_fut(self, destcfg, srccfg):
from tilequeue.config import merge_cfg
return merge_cfg(destcfg, srccfg)
def test_both_empty(self):
self.assertEqual({}, self._call_fut({}, {}))
def test_complementary_scalar(self):
src = dict(foo='bar')
dest = dict(quux='morx')
self.assertEqual(dict(foo='bar', quux='morx'),
self._call_fut(dest, src))
def test_nested_complementary(self):
src = dict(foo=dict(bar='baz'))
dest = dict(quux=dict(morx='fleem'))
self.assertEqual(
dict(foo=dict(bar='baz'),
quux=dict(morx='fleem')),
self._call_fut(dest, src))
def test_merge_complementary(self):
src = dict(foo=dict(bar='baz'))
dest = dict(foo=dict(morx='fleem'))
self.assertEqual(
dict(foo=dict(bar='baz', morx='fleem')),
self._call_fut(dest, src))
def test_merge_override(self):
src = dict(foo=dict(bar='baz'))
dest = dict(foo=dict(bar='fleem'))
self.assertEqual(
dict(foo=dict(bar='baz')),
self._call_fut(dest, src))
|
<commit_before><commit_msg>Add tests for config dict merging<commit_after>import unittest
class TestConfigMerge(unittest.TestCase):
def _call_fut(self, destcfg, srccfg):
from tilequeue.config import merge_cfg
return merge_cfg(destcfg, srccfg)
def test_both_empty(self):
self.assertEqual({}, self._call_fut({}, {}))
def test_complementary_scalar(self):
src = dict(foo='bar')
dest = dict(quux='morx')
self.assertEqual(dict(foo='bar', quux='morx'),
self._call_fut(dest, src))
def test_nested_complementary(self):
src = dict(foo=dict(bar='baz'))
dest = dict(quux=dict(morx='fleem'))
self.assertEqual(
dict(foo=dict(bar='baz'),
quux=dict(morx='fleem')),
self._call_fut(dest, src))
def test_merge_complementary(self):
src = dict(foo=dict(bar='baz'))
dest = dict(foo=dict(morx='fleem'))
self.assertEqual(
dict(foo=dict(bar='baz', morx='fleem')),
self._call_fut(dest, src))
def test_merge_override(self):
src = dict(foo=dict(bar='baz'))
dest = dict(foo=dict(bar='fleem'))
self.assertEqual(
dict(foo=dict(bar='baz')),
self._call_fut(dest, src))
|
|
734775524e14a8ae3997933afba64a4ac6a3cd47
|
tests/basics/class_super_closure.py
|
tests/basics/class_super_closure.py
|
# test that no-arg super() works when self is closed over
class A:
def __init__(self):
self.val = 4
def foo(self):
# we access a member of self to check that self is correct
return list(range(self.val))
class B(A):
def foo(self):
# self is closed over because it's referenced in the list comprehension
# and then super() must detect this and load from the closure cell
return [self.bar(i) for i in super().foo()]
def bar(self, x):
return 2 * x
print(A().foo())
print(B().foo())
|
Add test for super() when self is closed over.
|
tests/basics: Add test for super() when self is closed over.
|
Python
|
mit
|
toolmacher/micropython,HenrikSolver/micropython,henriknelson/micropython,oopy/micropython,HenrikSolver/micropython,adafruit/micropython,tralamazza/micropython,alex-robbins/micropython,cwyark/micropython,toolmacher/micropython,tralamazza/micropython,deshipu/micropython,torwag/micropython,swegener/micropython,lowRISC/micropython,TDAbboud/micropython,oopy/micropython,pozetroninc/micropython,tobbad/micropython,SHA2017-badge/micropython-esp32,pramasoul/micropython,selste/micropython,blazewicz/micropython,MrSurly/micropython,chrisdearman/micropython,PappaPeppar/micropython,cwyark/micropython,chrisdearman/micropython,selste/micropython,dmazzella/micropython,alex-robbins/micropython,bvernoux/micropython,Peetz0r/micropython-esp32,lowRISC/micropython,adafruit/circuitpython,micropython/micropython-esp32,HenrikSolver/micropython,deshipu/micropython,AriZuu/micropython,trezor/micropython,toolmacher/micropython,PappaPeppar/micropython,MrSurly/micropython,trezor/micropython,pramasoul/micropython,infinnovation/micropython,hiway/micropython,Timmenem/micropython,MrSurly/micropython-esp32,trezor/micropython,bvernoux/micropython,MrSurly/micropython-esp32,bvernoux/micropython,ryannathans/micropython,pramasoul/micropython,blazewicz/micropython,TDAbboud/micropython,oopy/micropython,adafruit/circuitpython,chrisdearman/micropython,hiway/micropython,MrSurly/micropython-esp32,micropython/micropython-esp32,dmazzella/micropython,tobbad/micropython,adafruit/micropython,AriZuu/micropython,adafruit/micropython,AriZuu/micropython,chrisdearman/micropython,deshipu/micropython,selste/micropython,puuu/micropython,MrSurly/micropython-esp32,selste/micropython,lowRISC/micropython,toolmacher/micropython,micropython/micropython-esp32,kerneltask/micropython,alex-robbins/micropython,trezor/micropython,torwag/micropython,infinnovation/micropython,pfalcon/micropython,infinnovation/micropython,puuu/micropython,dmazzella/micropython,henriknelson/micropython,hiway/micropython,oopy/micropython,pramasoul/micropython,adafruit/circuitpython,pfalcon/micropython,cwyark/micropython,Timmenem/micropython,henriknelson/micropython,tobbad/micropython,MrSurly/micropython-esp32,kerneltask/micropython,PappaPeppar/micropython,TDAbboud/micropython,pozetroninc/micropython,puuu/micropython,lowRISC/micropython,kerneltask/micropython,Peetz0r/micropython-esp32,adafruit/micropython,ryannathans/micropython,pfalcon/micropython,infinnovation/micropython,SHA2017-badge/micropython-esp32,puuu/micropython,pozetroninc/micropython,infinnovation/micropython,deshipu/micropython,cwyark/micropython,deshipu/micropython,chrisdearman/micropython,blazewicz/micropython,bvernoux/micropython,Timmenem/micropython,Timmenem/micropython,blazewicz/micropython,AriZuu/micropython,MrSurly/micropython,HenrikSolver/micropython,henriknelson/micropython,henriknelson/micropython,swegener/micropython,alex-robbins/micropython,ryannathans/micropython,pfalcon/micropython,oopy/micropython,alex-robbins/micropython,selste/micropython,torwag/micropython,Peetz0r/micropython-esp32,PappaPeppar/micropython,pfalcon/micropython,PappaPeppar/micropython,SHA2017-badge/micropython-esp32,hiway/micropython,swegener/micropython,SHA2017-badge/micropython-esp32,TDAbboud/micropython,MrSurly/micropython,kerneltask/micropython,torwag/micropython,adafruit/circuitpython,dmazzella/micropython,ryannathans/micropython,tralamazza/micropython,bvernoux/micropython,swegener/micropython,ryannathans/micropython,SHA2017-badge/micropython-esp32,tobbad/micropython,HenrikSolver/micropython,tralamazza/micropython,pramasoul/micropython,cwyark/micropython,adafruit/micropython,blazewicz/micropython,pozetroninc/micropython,Peetz0r/micropython-esp32,Timmenem/micropython,lowRISC/micropython,micropython/micropython-esp32,Peetz0r/micropython-esp32,tobbad/micropython,swegener/micropython,puuu/micropython,kerneltask/micropython,trezor/micropython,micropython/micropython-esp32,adafruit/circuitpython,hiway/micropython,adafruit/circuitpython,pozetroninc/micropython,toolmacher/micropython,torwag/micropython,AriZuu/micropython,MrSurly/micropython,TDAbboud/micropython
|
tests/basics: Add test for super() when self is closed over.
|
# test that no-arg super() works when self is closed over
class A:
def __init__(self):
self.val = 4
def foo(self):
# we access a member of self to check that self is correct
return list(range(self.val))
class B(A):
def foo(self):
# self is closed over because it's referenced in the list comprehension
# and then super() must detect this and load from the closure cell
return [self.bar(i) for i in super().foo()]
def bar(self, x):
return 2 * x
print(A().foo())
print(B().foo())
|
<commit_before><commit_msg>tests/basics: Add test for super() when self is closed over.<commit_after>
|
# test that no-arg super() works when self is closed over
class A:
def __init__(self):
self.val = 4
def foo(self):
# we access a member of self to check that self is correct
return list(range(self.val))
class B(A):
def foo(self):
# self is closed over because it's referenced in the list comprehension
# and then super() must detect this and load from the closure cell
return [self.bar(i) for i in super().foo()]
def bar(self, x):
return 2 * x
print(A().foo())
print(B().foo())
|
tests/basics: Add test for super() when self is closed over.# test that no-arg super() works when self is closed over
class A:
def __init__(self):
self.val = 4
def foo(self):
# we access a member of self to check that self is correct
return list(range(self.val))
class B(A):
def foo(self):
# self is closed over because it's referenced in the list comprehension
# and then super() must detect this and load from the closure cell
return [self.bar(i) for i in super().foo()]
def bar(self, x):
return 2 * x
print(A().foo())
print(B().foo())
|
<commit_before><commit_msg>tests/basics: Add test for super() when self is closed over.<commit_after># test that no-arg super() works when self is closed over
class A:
def __init__(self):
self.val = 4
def foo(self):
# we access a member of self to check that self is correct
return list(range(self.val))
class B(A):
def foo(self):
# self is closed over because it's referenced in the list comprehension
# and then super() must detect this and load from the closure cell
return [self.bar(i) for i in super().foo()]
def bar(self, x):
return 2 * x
print(A().foo())
print(B().foo())
|
|
ed491383b183d9001872591554d218603983ae35
|
tests/test_reader.py
|
tests/test_reader.py
|
from catsup.reader import get_reader, markdown_reader, txt_reader
def test_reader_choser():
assert get_reader("md") == markdown_reader
assert get_reader("markdown") == markdown_reader
assert get_reader("txt") == txt_reader
|
Add tests for ``get_reader`` func
|
Add tests for ``get_reader`` func
|
Python
|
mit
|
whtsky/catsup-docs-zh,whtsky/Catsup,whtsky/Catsup,whtsky/catsup-docs-zh
|
Add tests for ``get_reader`` func
|
from catsup.reader import get_reader, markdown_reader, txt_reader
def test_reader_choser():
assert get_reader("md") == markdown_reader
assert get_reader("markdown") == markdown_reader
assert get_reader("txt") == txt_reader
|
<commit_before><commit_msg>Add tests for ``get_reader`` func<commit_after>
|
from catsup.reader import get_reader, markdown_reader, txt_reader
def test_reader_choser():
assert get_reader("md") == markdown_reader
assert get_reader("markdown") == markdown_reader
assert get_reader("txt") == txt_reader
|
Add tests for ``get_reader`` funcfrom catsup.reader import get_reader, markdown_reader, txt_reader
def test_reader_choser():
assert get_reader("md") == markdown_reader
assert get_reader("markdown") == markdown_reader
assert get_reader("txt") == txt_reader
|
<commit_before><commit_msg>Add tests for ``get_reader`` func<commit_after>from catsup.reader import get_reader, markdown_reader, txt_reader
def test_reader_choser():
assert get_reader("md") == markdown_reader
assert get_reader("markdown") == markdown_reader
assert get_reader("txt") == txt_reader
|
|
5c308a46ae6863473db9cfe5f159e6fd70fe8691
|
profundity.py
|
profundity.py
|
#!/usr/bin/env python
# What an incredibly stupid application.
# I realize that it's released under the MIT License.
# But, seriously. Who cares?
import datetime
import json
import logging
import os
import sys
import BaseHTTPServer as http
def port_from_env():
return int(os.getenv('PROFOUND_PORT', '8080'))
def name_from_env():
return os.getenv('PROFOUND_NAME', 'Profundity (Xtream!)')
def configure_logger():
logging.basicConfig(format='%(asctime)s.%(msecs)03d #%(process)d - %(levelname)s %(name)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level='INFO')
return logging.getLogger()
def run(port, name, logger):
logger.info('xtream profundity server starting.')
server = http.HTTPServer(('', port), Profundity.named_handler(name))
logging.info('serving "%s" on port %d', name, port)
try:
server.serve_forever()
except KeyboardInterrupt:
logger.info('Received Ctrl-C; exiting.')
logger.info('Done.')
class Profundity(http.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.send_header('X-Service-Name', self.NAME)
self.end_headers()
response = json.dumps({
'service': self.NAME,
'time': datetime.datetime.utcnow().strftime('%Y-%m%-dT%H:%M:%SZ'),
'client': ':'.join(str(f) for f in self.client_address),
'request-path': self.path,
})
logging.getLogger().info('Responding: %s', response)
self.wfile.write(response)
@classmethod
def named_handler(cls, name):
class ProfundityHandler(cls):
NAME = name
return ProfundityHandler
def show_usage():
print >> sys.stderr, "Usage: %s\n\nEnvironment variables:" % __file__, \
"\n PROFOUND_PORT - port to listen on (8080)", \
"\n PROFOUND_NAME - name to use for identifying this excellent 'service' ('Profundity (Xtream!)')", \
"\n"
if __name__ == '__main__':
if len(sys.argv) > 1:
if '-h' in sys.argv[1:] or '--help' in sys.argv[1:]:
show_usage()
exit()
else:
show_usage()
exit(1)
run(port_from_env(), name_from_env(), configure_logger())
|
Add the most excellent web server ever conceived.
|
Add the most excellent web server ever conceived.
Use PROFOUND_PORT and PROFOUND_NAME to control the service port
and "name", respectively. It has sane defaults for each.
|
Python
|
mit
|
ethanrowe/docker-xtream-profundity,ethanrowe/docker-xtream-profundity
|
Add the most excellent web server ever conceived.
Use PROFOUND_PORT and PROFOUND_NAME to control the service port
and "name", respectively. It has sane defaults for each.
|
#!/usr/bin/env python
# What an incredibly stupid application.
# I realize that it's released under the MIT License.
# But, seriously. Who cares?
import datetime
import json
import logging
import os
import sys
import BaseHTTPServer as http
def port_from_env():
return int(os.getenv('PROFOUND_PORT', '8080'))
def name_from_env():
return os.getenv('PROFOUND_NAME', 'Profundity (Xtream!)')
def configure_logger():
logging.basicConfig(format='%(asctime)s.%(msecs)03d #%(process)d - %(levelname)s %(name)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level='INFO')
return logging.getLogger()
def run(port, name, logger):
logger.info('xtream profundity server starting.')
server = http.HTTPServer(('', port), Profundity.named_handler(name))
logging.info('serving "%s" on port %d', name, port)
try:
server.serve_forever()
except KeyboardInterrupt:
logger.info('Received Ctrl-C; exiting.')
logger.info('Done.')
class Profundity(http.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.send_header('X-Service-Name', self.NAME)
self.end_headers()
response = json.dumps({
'service': self.NAME,
'time': datetime.datetime.utcnow().strftime('%Y-%m%-dT%H:%M:%SZ'),
'client': ':'.join(str(f) for f in self.client_address),
'request-path': self.path,
})
logging.getLogger().info('Responding: %s', response)
self.wfile.write(response)
@classmethod
def named_handler(cls, name):
class ProfundityHandler(cls):
NAME = name
return ProfundityHandler
def show_usage():
print >> sys.stderr, "Usage: %s\n\nEnvironment variables:" % __file__, \
"\n PROFOUND_PORT - port to listen on (8080)", \
"\n PROFOUND_NAME - name to use for identifying this excellent 'service' ('Profundity (Xtream!)')", \
"\n"
if __name__ == '__main__':
if len(sys.argv) > 1:
if '-h' in sys.argv[1:] or '--help' in sys.argv[1:]:
show_usage()
exit()
else:
show_usage()
exit(1)
run(port_from_env(), name_from_env(), configure_logger())
|
<commit_before><commit_msg>Add the most excellent web server ever conceived.
Use PROFOUND_PORT and PROFOUND_NAME to control the service port
and "name", respectively. It has sane defaults for each.<commit_after>
|
#!/usr/bin/env python
# What an incredibly stupid application.
# I realize that it's released under the MIT License.
# But, seriously. Who cares?
import datetime
import json
import logging
import os
import sys
import BaseHTTPServer as http
def port_from_env():
return int(os.getenv('PROFOUND_PORT', '8080'))
def name_from_env():
return os.getenv('PROFOUND_NAME', 'Profundity (Xtream!)')
def configure_logger():
logging.basicConfig(format='%(asctime)s.%(msecs)03d #%(process)d - %(levelname)s %(name)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level='INFO')
return logging.getLogger()
def run(port, name, logger):
logger.info('xtream profundity server starting.')
server = http.HTTPServer(('', port), Profundity.named_handler(name))
logging.info('serving "%s" on port %d', name, port)
try:
server.serve_forever()
except KeyboardInterrupt:
logger.info('Received Ctrl-C; exiting.')
logger.info('Done.')
class Profundity(http.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.send_header('X-Service-Name', self.NAME)
self.end_headers()
response = json.dumps({
'service': self.NAME,
'time': datetime.datetime.utcnow().strftime('%Y-%m%-dT%H:%M:%SZ'),
'client': ':'.join(str(f) for f in self.client_address),
'request-path': self.path,
})
logging.getLogger().info('Responding: %s', response)
self.wfile.write(response)
@classmethod
def named_handler(cls, name):
class ProfundityHandler(cls):
NAME = name
return ProfundityHandler
def show_usage():
print >> sys.stderr, "Usage: %s\n\nEnvironment variables:" % __file__, \
"\n PROFOUND_PORT - port to listen on (8080)", \
"\n PROFOUND_NAME - name to use for identifying this excellent 'service' ('Profundity (Xtream!)')", \
"\n"
if __name__ == '__main__':
if len(sys.argv) > 1:
if '-h' in sys.argv[1:] or '--help' in sys.argv[1:]:
show_usage()
exit()
else:
show_usage()
exit(1)
run(port_from_env(), name_from_env(), configure_logger())
|
Add the most excellent web server ever conceived.
Use PROFOUND_PORT and PROFOUND_NAME to control the service port
and "name", respectively. It has sane defaults for each.#!/usr/bin/env python
# What an incredibly stupid application.
# I realize that it's released under the MIT License.
# But, seriously. Who cares?
import datetime
import json
import logging
import os
import sys
import BaseHTTPServer as http
def port_from_env():
return int(os.getenv('PROFOUND_PORT', '8080'))
def name_from_env():
return os.getenv('PROFOUND_NAME', 'Profundity (Xtream!)')
def configure_logger():
logging.basicConfig(format='%(asctime)s.%(msecs)03d #%(process)d - %(levelname)s %(name)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level='INFO')
return logging.getLogger()
def run(port, name, logger):
logger.info('xtream profundity server starting.')
server = http.HTTPServer(('', port), Profundity.named_handler(name))
logging.info('serving "%s" on port %d', name, port)
try:
server.serve_forever()
except KeyboardInterrupt:
logger.info('Received Ctrl-C; exiting.')
logger.info('Done.')
class Profundity(http.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.send_header('X-Service-Name', self.NAME)
self.end_headers()
response = json.dumps({
'service': self.NAME,
'time': datetime.datetime.utcnow().strftime('%Y-%m%-dT%H:%M:%SZ'),
'client': ':'.join(str(f) for f in self.client_address),
'request-path': self.path,
})
logging.getLogger().info('Responding: %s', response)
self.wfile.write(response)
@classmethod
def named_handler(cls, name):
class ProfundityHandler(cls):
NAME = name
return ProfundityHandler
def show_usage():
print >> sys.stderr, "Usage: %s\n\nEnvironment variables:" % __file__, \
"\n PROFOUND_PORT - port to listen on (8080)", \
"\n PROFOUND_NAME - name to use for identifying this excellent 'service' ('Profundity (Xtream!)')", \
"\n"
if __name__ == '__main__':
if len(sys.argv) > 1:
if '-h' in sys.argv[1:] or '--help' in sys.argv[1:]:
show_usage()
exit()
else:
show_usage()
exit(1)
run(port_from_env(), name_from_env(), configure_logger())
|
<commit_before><commit_msg>Add the most excellent web server ever conceived.
Use PROFOUND_PORT and PROFOUND_NAME to control the service port
and "name", respectively. It has sane defaults for each.<commit_after>#!/usr/bin/env python
# What an incredibly stupid application.
# I realize that it's released under the MIT License.
# But, seriously. Who cares?
import datetime
import json
import logging
import os
import sys
import BaseHTTPServer as http
def port_from_env():
return int(os.getenv('PROFOUND_PORT', '8080'))
def name_from_env():
return os.getenv('PROFOUND_NAME', 'Profundity (Xtream!)')
def configure_logger():
logging.basicConfig(format='%(asctime)s.%(msecs)03d #%(process)d - %(levelname)s %(name)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level='INFO')
return logging.getLogger()
def run(port, name, logger):
logger.info('xtream profundity server starting.')
server = http.HTTPServer(('', port), Profundity.named_handler(name))
logging.info('serving "%s" on port %d', name, port)
try:
server.serve_forever()
except KeyboardInterrupt:
logger.info('Received Ctrl-C; exiting.')
logger.info('Done.')
class Profundity(http.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.send_header('X-Service-Name', self.NAME)
self.end_headers()
response = json.dumps({
'service': self.NAME,
'time': datetime.datetime.utcnow().strftime('%Y-%m%-dT%H:%M:%SZ'),
'client': ':'.join(str(f) for f in self.client_address),
'request-path': self.path,
})
logging.getLogger().info('Responding: %s', response)
self.wfile.write(response)
@classmethod
def named_handler(cls, name):
class ProfundityHandler(cls):
NAME = name
return ProfundityHandler
def show_usage():
print >> sys.stderr, "Usage: %s\n\nEnvironment variables:" % __file__, \
"\n PROFOUND_PORT - port to listen on (8080)", \
"\n PROFOUND_NAME - name to use for identifying this excellent 'service' ('Profundity (Xtream!)')", \
"\n"
if __name__ == '__main__':
if len(sys.argv) > 1:
if '-h' in sys.argv[1:] or '--help' in sys.argv[1:]:
show_usage()
exit()
else:
show_usage()
exit(1)
run(port_from_env(), name_from_env(), configure_logger())
|
|
5420d368c064953842023ccc07b531b071ec3514
|
src/tests/test_login_page.py
|
src/tests/test_login_page.py
|
from src.lib.page.login import LoginPage
from src.lib.base import BaseTest
class TestLoginPage(BaseTest):
def test_login_as_admin(self):
login_page = LoginPage(self.driver)
login_page.login()
self.driver.find_element_by_css_selector("li.user.user-dropdown.dropdown")
|
from src.lib.page.login import LoginPage
from src.lib.base import BaseTest
from src.lib.constants import selector
class TestLoginPage(BaseTest):
def test_login_as_admin(self):
login_page = LoginPage(self.driver)
login_page.login()
self.driver.find_element_by_css_selector(selector.LoginPage.BUTTON_LOGIN)
|
Modify login page test to use selectors defined in the module constants.
|
Modify login page test to use selectors defined in the module constants.
|
Python
|
apache-2.0
|
edofic/ggrc-core,kr41/ggrc-core,jmakov/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,VinnieJohns/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,prasannav7/ggrc-core,NejcZupec/ggrc-core,jmakov/ggrc-core,josthkko/ggrc-core,VinnieJohns/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,jmakov/ggrc-core,prasannav7/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,jmakov/ggrc-core,prasannav7/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,selahssea/ggrc-core,NejcZupec/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,prasannav7/ggrc-core,kr41/ggrc-core,edofic/ggrc-core,jmakov/ggrc-core,josthkko/ggrc-core
|
from src.lib.page.login import LoginPage
from src.lib.base import BaseTest
class TestLoginPage(BaseTest):
def test_login_as_admin(self):
login_page = LoginPage(self.driver)
login_page.login()
self.driver.find_element_by_css_selector("li.user.user-dropdown.dropdown")
Modify login page test to use selectors defined in the module constants.
|
from src.lib.page.login import LoginPage
from src.lib.base import BaseTest
from src.lib.constants import selector
class TestLoginPage(BaseTest):
def test_login_as_admin(self):
login_page = LoginPage(self.driver)
login_page.login()
self.driver.find_element_by_css_selector(selector.LoginPage.BUTTON_LOGIN)
|
<commit_before>from src.lib.page.login import LoginPage
from src.lib.base import BaseTest
class TestLoginPage(BaseTest):
def test_login_as_admin(self):
login_page = LoginPage(self.driver)
login_page.login()
self.driver.find_element_by_css_selector("li.user.user-dropdown.dropdown")
<commit_msg>Modify login page test to use selectors defined in the module constants.<commit_after>
|
from src.lib.page.login import LoginPage
from src.lib.base import BaseTest
from src.lib.constants import selector
class TestLoginPage(BaseTest):
def test_login_as_admin(self):
login_page = LoginPage(self.driver)
login_page.login()
self.driver.find_element_by_css_selector(selector.LoginPage.BUTTON_LOGIN)
|
from src.lib.page.login import LoginPage
from src.lib.base import BaseTest
class TestLoginPage(BaseTest):
def test_login_as_admin(self):
login_page = LoginPage(self.driver)
login_page.login()
self.driver.find_element_by_css_selector("li.user.user-dropdown.dropdown")
Modify login page test to use selectors defined in the module constants.from src.lib.page.login import LoginPage
from src.lib.base import BaseTest
from src.lib.constants import selector
class TestLoginPage(BaseTest):
def test_login_as_admin(self):
login_page = LoginPage(self.driver)
login_page.login()
self.driver.find_element_by_css_selector(selector.LoginPage.BUTTON_LOGIN)
|
<commit_before>from src.lib.page.login import LoginPage
from src.lib.base import BaseTest
class TestLoginPage(BaseTest):
def test_login_as_admin(self):
login_page = LoginPage(self.driver)
login_page.login()
self.driver.find_element_by_css_selector("li.user.user-dropdown.dropdown")
<commit_msg>Modify login page test to use selectors defined in the module constants.<commit_after>from src.lib.page.login import LoginPage
from src.lib.base import BaseTest
from src.lib.constants import selector
class TestLoginPage(BaseTest):
def test_login_as_admin(self):
login_page = LoginPage(self.driver)
login_page.login()
self.driver.find_element_by_css_selector(selector.LoginPage.BUTTON_LOGIN)
|
10caef4f58f7176ccce3ba65d9d068dc393e7905
|
playhouse/mysql_ext.py
|
playhouse/mysql_ext.py
|
try:
import mysql.connector as mysql_connector
except ImportError:
mysql_connector = None
from peewee import ImproperlyConfigured
from peewee import MySQLDatabase
class MySQLConnectorDatabase(MySQLDatabase):
def _connect(self):
if mysql_connector is None:
raise ImproperlyConfigured('MySQL connector not installed!')
return mysql_connector.connect(db=self.database, **self.connect_params)
def cursor(self, commit=None):
if self.is_closed():
self.connect()
return self._state.conn.cursor(buffered=True)
|
Add support for MySQL-Connector python driver.
|
Add support for MySQL-Connector python driver.
|
Python
|
mit
|
coleifer/peewee,coleifer/peewee,coleifer/peewee
|
Add support for MySQL-Connector python driver.
|
try:
import mysql.connector as mysql_connector
except ImportError:
mysql_connector = None
from peewee import ImproperlyConfigured
from peewee import MySQLDatabase
class MySQLConnectorDatabase(MySQLDatabase):
def _connect(self):
if mysql_connector is None:
raise ImproperlyConfigured('MySQL connector not installed!')
return mysql_connector.connect(db=self.database, **self.connect_params)
def cursor(self, commit=None):
if self.is_closed():
self.connect()
return self._state.conn.cursor(buffered=True)
|
<commit_before><commit_msg>Add support for MySQL-Connector python driver.<commit_after>
|
try:
import mysql.connector as mysql_connector
except ImportError:
mysql_connector = None
from peewee import ImproperlyConfigured
from peewee import MySQLDatabase
class MySQLConnectorDatabase(MySQLDatabase):
def _connect(self):
if mysql_connector is None:
raise ImproperlyConfigured('MySQL connector not installed!')
return mysql_connector.connect(db=self.database, **self.connect_params)
def cursor(self, commit=None):
if self.is_closed():
self.connect()
return self._state.conn.cursor(buffered=True)
|
Add support for MySQL-Connector python driver.try:
import mysql.connector as mysql_connector
except ImportError:
mysql_connector = None
from peewee import ImproperlyConfigured
from peewee import MySQLDatabase
class MySQLConnectorDatabase(MySQLDatabase):
def _connect(self):
if mysql_connector is None:
raise ImproperlyConfigured('MySQL connector not installed!')
return mysql_connector.connect(db=self.database, **self.connect_params)
def cursor(self, commit=None):
if self.is_closed():
self.connect()
return self._state.conn.cursor(buffered=True)
|
<commit_before><commit_msg>Add support for MySQL-Connector python driver.<commit_after>try:
import mysql.connector as mysql_connector
except ImportError:
mysql_connector = None
from peewee import ImproperlyConfigured
from peewee import MySQLDatabase
class MySQLConnectorDatabase(MySQLDatabase):
def _connect(self):
if mysql_connector is None:
raise ImproperlyConfigured('MySQL connector not installed!')
return mysql_connector.connect(db=self.database, **self.connect_params)
def cursor(self, commit=None):
if self.is_closed():
self.connect()
return self._state.conn.cursor(buffered=True)
|
|
6c0a2c487671693c7efa61aca37b177eb2d18031
|
session1_Decorators/decorators.py
|
session1_Decorators/decorators.py
|
"""
Decorators
"""
# 1 Decorators##################################################################
# Have a look at the warnings examples in warnings.py. How would you
# go about writing a more general deprectation warning if you have
# multiple deprecated functions? Wouldn't it be nice to 'decorate' a function
# as 'deprecated' instead of explicitely raising a warning each time?
# One solution would be a wrapper function, which you apply to your
# deprecated functions, eg
def wrapper(old_function):
print 'do something before'
result = old_function()
print 'doing something after'
return result
def deprecated(old_function):
print 'A'
def wrapper():
print 'deprecated'
res = old_function()
return res
print 'C'
return wrapper
@deprecated
def myfunction():
print 'Myfunction'
print 'Calling wrapper explicitely'
wrapper(myfunction)
print 'Calling myfunction'
myfunction()
print myfunction
# -1 Predefined Decorators #####################################################
# Decorators to be used with methods in classes:
# @staticmethod, @classmethod, @abc.abstractmethod, @context.contextmanager
# Example code for @classmethod and @abc.abstractmethod
import abc
class BasePizza(object):
__metaclass__ = abc.ABCMeta
default_ingredients = ['cheese']
@classmethod
@abc.abstractmethod
def get_ingredients(cls):
"""Returns the ingredient list."""
return cls.default_ingredients
class DietPizza(BasePizza):
def get_ingredients(self):
return ['egg'] + super(DietPizza, self).get_ingredients()
|
Add solutions and demo scripts
|
Add solutions and demo scripts
|
Python
|
mit
|
INM-6/Python-Module-of-the-Week,INM-6/Python-Module-of-the-Week,INM-6/Python-Module-of-the-Week,INM-6/Python-Module-of-the-Week
|
Add solutions and demo scripts
|
"""
Decorators
"""
# 1 Decorators##################################################################
# Have a look at the warnings examples in warnings.py. How would you
# go about writing a more general deprectation warning if you have
# multiple deprecated functions? Wouldn't it be nice to 'decorate' a function
# as 'deprecated' instead of explicitely raising a warning each time?
# One solution would be a wrapper function, which you apply to your
# deprecated functions, eg
def wrapper(old_function):
print 'do something before'
result = old_function()
print 'doing something after'
return result
def deprecated(old_function):
print 'A'
def wrapper():
print 'deprecated'
res = old_function()
return res
print 'C'
return wrapper
@deprecated
def myfunction():
print 'Myfunction'
print 'Calling wrapper explicitely'
wrapper(myfunction)
print 'Calling myfunction'
myfunction()
print myfunction
# -1 Predefined Decorators #####################################################
# Decorators to be used with methods in classes:
# @staticmethod, @classmethod, @abc.abstractmethod, @context.contextmanager
# Example code for @classmethod and @abc.abstractmethod
import abc
class BasePizza(object):
__metaclass__ = abc.ABCMeta
default_ingredients = ['cheese']
@classmethod
@abc.abstractmethod
def get_ingredients(cls):
"""Returns the ingredient list."""
return cls.default_ingredients
class DietPizza(BasePizza):
def get_ingredients(self):
return ['egg'] + super(DietPizza, self).get_ingredients()
|
<commit_before><commit_msg>Add solutions and demo scripts<commit_after>
|
"""
Decorators
"""
# 1 Decorators##################################################################
# Have a look at the warnings examples in warnings.py. How would you
# go about writing a more general deprectation warning if you have
# multiple deprecated functions? Wouldn't it be nice to 'decorate' a function
# as 'deprecated' instead of explicitely raising a warning each time?
# One solution would be a wrapper function, which you apply to your
# deprecated functions, eg
def wrapper(old_function):
print 'do something before'
result = old_function()
print 'doing something after'
return result
def deprecated(old_function):
print 'A'
def wrapper():
print 'deprecated'
res = old_function()
return res
print 'C'
return wrapper
@deprecated
def myfunction():
print 'Myfunction'
print 'Calling wrapper explicitely'
wrapper(myfunction)
print 'Calling myfunction'
myfunction()
print myfunction
# -1 Predefined Decorators #####################################################
# Decorators to be used with methods in classes:
# @staticmethod, @classmethod, @abc.abstractmethod, @context.contextmanager
# Example code for @classmethod and @abc.abstractmethod
import abc
class BasePizza(object):
__metaclass__ = abc.ABCMeta
default_ingredients = ['cheese']
@classmethod
@abc.abstractmethod
def get_ingredients(cls):
"""Returns the ingredient list."""
return cls.default_ingredients
class DietPizza(BasePizza):
def get_ingredients(self):
return ['egg'] + super(DietPizza, self).get_ingredients()
|
Add solutions and demo scripts"""
Decorators
"""
# 1 Decorators##################################################################
# Have a look at the warnings examples in warnings.py. How would you
# go about writing a more general deprectation warning if you have
# multiple deprecated functions? Wouldn't it be nice to 'decorate' a function
# as 'deprecated' instead of explicitely raising a warning each time?
# One solution would be a wrapper function, which you apply to your
# deprecated functions, eg
def wrapper(old_function):
print 'do something before'
result = old_function()
print 'doing something after'
return result
def deprecated(old_function):
print 'A'
def wrapper():
print 'deprecated'
res = old_function()
return res
print 'C'
return wrapper
@deprecated
def myfunction():
print 'Myfunction'
print 'Calling wrapper explicitely'
wrapper(myfunction)
print 'Calling myfunction'
myfunction()
print myfunction
# -1 Predefined Decorators #####################################################
# Decorators to be used with methods in classes:
# @staticmethod, @classmethod, @abc.abstractmethod, @context.contextmanager
# Example code for @classmethod and @abc.abstractmethod
import abc
class BasePizza(object):
__metaclass__ = abc.ABCMeta
default_ingredients = ['cheese']
@classmethod
@abc.abstractmethod
def get_ingredients(cls):
"""Returns the ingredient list."""
return cls.default_ingredients
class DietPizza(BasePizza):
def get_ingredients(self):
return ['egg'] + super(DietPizza, self).get_ingredients()
|
<commit_before><commit_msg>Add solutions and demo scripts<commit_after>"""
Decorators
"""
# 1 Decorators##################################################################
# Have a look at the warnings examples in warnings.py. How would you
# go about writing a more general deprectation warning if you have
# multiple deprecated functions? Wouldn't it be nice to 'decorate' a function
# as 'deprecated' instead of explicitely raising a warning each time?
# One solution would be a wrapper function, which you apply to your
# deprecated functions, eg
def wrapper(old_function):
print 'do something before'
result = old_function()
print 'doing something after'
return result
def deprecated(old_function):
print 'A'
def wrapper():
print 'deprecated'
res = old_function()
return res
print 'C'
return wrapper
@deprecated
def myfunction():
print 'Myfunction'
print 'Calling wrapper explicitely'
wrapper(myfunction)
print 'Calling myfunction'
myfunction()
print myfunction
# -1 Predefined Decorators #####################################################
# Decorators to be used with methods in classes:
# @staticmethod, @classmethod, @abc.abstractmethod, @context.contextmanager
# Example code for @classmethod and @abc.abstractmethod
import abc
class BasePizza(object):
__metaclass__ = abc.ABCMeta
default_ingredients = ['cheese']
@classmethod
@abc.abstractmethod
def get_ingredients(cls):
"""Returns the ingredient list."""
return cls.default_ingredients
class DietPizza(BasePizza):
def get_ingredients(self):
return ['egg'] + super(DietPizza, self).get_ingredients()
|
|
b32f3db018612285d228a9757d9a644292ec278e
|
tests/scoring_engine/engine/checks/test_mysql.py
|
tests/scoring_engine/engine/checks/test_mysql.py
|
from tests.scoring_engine.engine.checks.check_test import CheckTest
class TestMYSQLCheck(CheckTest):
check_name = 'MYSQLCheck'
properties = {
'database': 'wordpressdb',
'command': 'show tables'
}
accounts = {
'pwnbus': 'pwnbuspass'
}
cmd = "mysql -h 127.0.0.1 -u pwnbus -ppwnbuspass wordpressdb -e 'show tables'"
|
from tests.scoring_engine.engine.checks.check_test import CheckTest
class TestMYSQLCheck(CheckTest):
check_name = 'MYSQLCheck'
properties = {
'database': 'wordpressdb',
'command': 'show tables'
}
accounts = {
'pwnbus': 'pwnbuspass'
}
cmd = "mysql -h 127.0.0.1 -u pwnbus -ppwnbuspass wordpressdb -e 'show tables'"
|
Fix pep8 in mysql test
|
Fix pep8 in mysql test
Signed-off-by: Brandon Myers <9cda508be11a1ae7ceef912b85c196946f0ec5f3@mozilla.com>
|
Python
|
mit
|
pwnbus/scoring_engine,pwnbus/scoring_engine,pwnbus/scoring_engine,pwnbus/scoring_engine
|
from tests.scoring_engine.engine.checks.check_test import CheckTest
class TestMYSQLCheck(CheckTest):
check_name = 'MYSQLCheck'
properties = {
'database': 'wordpressdb',
'command': 'show tables'
}
accounts = {
'pwnbus': 'pwnbuspass'
}
cmd = "mysql -h 127.0.0.1 -u pwnbus -ppwnbuspass wordpressdb -e 'show tables'"
Fix pep8 in mysql test
Signed-off-by: Brandon Myers <9cda508be11a1ae7ceef912b85c196946f0ec5f3@mozilla.com>
|
from tests.scoring_engine.engine.checks.check_test import CheckTest
class TestMYSQLCheck(CheckTest):
check_name = 'MYSQLCheck'
properties = {
'database': 'wordpressdb',
'command': 'show tables'
}
accounts = {
'pwnbus': 'pwnbuspass'
}
cmd = "mysql -h 127.0.0.1 -u pwnbus -ppwnbuspass wordpressdb -e 'show tables'"
|
<commit_before>from tests.scoring_engine.engine.checks.check_test import CheckTest
class TestMYSQLCheck(CheckTest):
check_name = 'MYSQLCheck'
properties = {
'database': 'wordpressdb',
'command': 'show tables'
}
accounts = {
'pwnbus': 'pwnbuspass'
}
cmd = "mysql -h 127.0.0.1 -u pwnbus -ppwnbuspass wordpressdb -e 'show tables'"
<commit_msg>Fix pep8 in mysql test
Signed-off-by: Brandon Myers <9cda508be11a1ae7ceef912b85c196946f0ec5f3@mozilla.com><commit_after>
|
from tests.scoring_engine.engine.checks.check_test import CheckTest
class TestMYSQLCheck(CheckTest):
check_name = 'MYSQLCheck'
properties = {
'database': 'wordpressdb',
'command': 'show tables'
}
accounts = {
'pwnbus': 'pwnbuspass'
}
cmd = "mysql -h 127.0.0.1 -u pwnbus -ppwnbuspass wordpressdb -e 'show tables'"
|
from tests.scoring_engine.engine.checks.check_test import CheckTest
class TestMYSQLCheck(CheckTest):
check_name = 'MYSQLCheck'
properties = {
'database': 'wordpressdb',
'command': 'show tables'
}
accounts = {
'pwnbus': 'pwnbuspass'
}
cmd = "mysql -h 127.0.0.1 -u pwnbus -ppwnbuspass wordpressdb -e 'show tables'"
Fix pep8 in mysql test
Signed-off-by: Brandon Myers <9cda508be11a1ae7ceef912b85c196946f0ec5f3@mozilla.com>from tests.scoring_engine.engine.checks.check_test import CheckTest
class TestMYSQLCheck(CheckTest):
check_name = 'MYSQLCheck'
properties = {
'database': 'wordpressdb',
'command': 'show tables'
}
accounts = {
'pwnbus': 'pwnbuspass'
}
cmd = "mysql -h 127.0.0.1 -u pwnbus -ppwnbuspass wordpressdb -e 'show tables'"
|
<commit_before>from tests.scoring_engine.engine.checks.check_test import CheckTest
class TestMYSQLCheck(CheckTest):
check_name = 'MYSQLCheck'
properties = {
'database': 'wordpressdb',
'command': 'show tables'
}
accounts = {
'pwnbus': 'pwnbuspass'
}
cmd = "mysql -h 127.0.0.1 -u pwnbus -ppwnbuspass wordpressdb -e 'show tables'"
<commit_msg>Fix pep8 in mysql test
Signed-off-by: Brandon Myers <9cda508be11a1ae7ceef912b85c196946f0ec5f3@mozilla.com><commit_after>from tests.scoring_engine.engine.checks.check_test import CheckTest
class TestMYSQLCheck(CheckTest):
check_name = 'MYSQLCheck'
properties = {
'database': 'wordpressdb',
'command': 'show tables'
}
accounts = {
'pwnbus': 'pwnbuspass'
}
cmd = "mysql -h 127.0.0.1 -u pwnbus -ppwnbuspass wordpressdb -e 'show tables'"
|
ca847aea1de45971327b0008c17e07c3d9d5281c
|
tests/unit/core/migrations_tests.py
|
tests/unit/core/migrations_tests.py
|
# -*- coding: utf-8 -*-
import os
from django.test import TestCase
import oscar.apps
class TestMigrations(TestCase):
def check_for_auth_model(self, filepath):
with open(filepath) as f:
s = f.read()
return 'auth.User' in s or 'auth.user' in s
def test_dont_contain_hardcoded_user_model(self):
root_path = os.path.dirname(oscar.apps.__file__)
matches = []
for dir, __, migrations in os.walk(root_path):
if dir.endswith('migrations'):
paths = [os.path.join(dir, migration) for migration in migrations
if migration.endswith('.py')]
matches += filter(self.check_for_auth_model, paths)
if matches:
pretty_matches = '\n'.join(
[match.replace(root_path, '') for match in matches])
self.fail('References to hardcoded User model found in the '
'following migration(s):\n' + pretty_matches)
|
Add test for hardcoded user model in migrations
|
Add test for hardcoded user model in migrations
New migrations need to be manually updated to correctly deal with custom
user models. This test fails if this isn't done.
|
Python
|
bsd-3-clause
|
jlmadurga/django-oscar,MatthewWilkes/django-oscar,faratro/django-oscar,pasqualguerrero/django-oscar,kapt/django-oscar,monikasulik/django-oscar,nfletton/django-oscar,eddiep1101/django-oscar,sasha0/django-oscar,okfish/django-oscar,jmt4/django-oscar,manevant/django-oscar,ka7eh/django-oscar,django-oscar/django-oscar,marcoantoniooliveira/labweb,jinnykoo/wuyisj,adamend/django-oscar,ahmetdaglarbas/e-commerce,ka7eh/django-oscar,Jannes123/django-oscar,sasha0/django-oscar,nfletton/django-oscar,michaelkuty/django-oscar,saadatqadri/django-oscar,adamend/django-oscar,spartonia/django-oscar,jinnykoo/christmas,DrOctogon/unwash_ecom,WadeYuChen/django-oscar,mexeniz/django-oscar,WadeYuChen/django-oscar,jlmadurga/django-oscar,binarydud/django-oscar,manevant/django-oscar,ahmetdaglarbas/e-commerce,binarydud/django-oscar,rocopartners/django-oscar,michaelkuty/django-oscar,jinnykoo/wuyisj,sonofatailor/django-oscar,sasha0/django-oscar,ka7eh/django-oscar,jmt4/django-oscar,rocopartners/django-oscar,pasqualguerrero/django-oscar,bschuon/django-oscar,amirrpp/django-oscar,dongguangming/django-oscar,django-oscar/django-oscar,vovanbo/django-oscar,QLGu/django-oscar,DrOctogon/unwash_ecom,kapari/django-oscar,solarissmoke/django-oscar,WadeYuChen/django-oscar,binarydud/django-oscar,WillisXChen/django-oscar,sonofatailor/django-oscar,josesanch/django-oscar,jinnykoo/wuyisj.com,nickpack/django-oscar,solarissmoke/django-oscar,rocopartners/django-oscar,josesanch/django-oscar,Jannes123/django-oscar,taedori81/django-oscar,vovanbo/django-oscar,WillisXChen/django-oscar,jlmadurga/django-oscar,mexeniz/django-oscar,thechampanurag/django-oscar,Jannes123/django-oscar,marcoantoniooliveira/labweb,mexeniz/django-oscar,john-parton/django-oscar,django-oscar/django-oscar,ademuk/django-oscar,anentropic/django-oscar,saadatqadri/django-oscar,ademuk/django-oscar,QLGu/django-oscar,Bogh/django-oscar,taedori81/django-oscar,sonofatailor/django-oscar,amirrpp/django-oscar,nfletton/django-oscar,lijoantony/django-oscar,kapari/django-oscar,MatthewWilkes/django-oscar,john-parton/django-oscar,jmt4/django-oscar,bnprk/django-oscar,WillisXChen/django-oscar,binarydud/django-oscar,anentropic/django-oscar,QLGu/django-oscar,adamend/django-oscar,eddiep1101/django-oscar,itbabu/django-oscar,machtfit/django-oscar,jinnykoo/christmas,spartonia/django-oscar,itbabu/django-oscar,okfish/django-oscar,john-parton/django-oscar,manevant/django-oscar,WadeYuChen/django-oscar,faratro/django-oscar,vovanbo/django-oscar,sonofatailor/django-oscar,machtfit/django-oscar,eddiep1101/django-oscar,jlmadurga/django-oscar,saadatqadri/django-oscar,eddiep1101/django-oscar,kapt/django-oscar,django-oscar/django-oscar,dongguangming/django-oscar,ka7eh/django-oscar,nickpack/django-oscar,pdonadeo/django-oscar,WillisXChen/django-oscar,spartonia/django-oscar,taedori81/django-oscar,pdonadeo/django-oscar,jinnykoo/wuyisj,amirrpp/django-oscar,itbabu/django-oscar,dongguangming/django-oscar,pasqualguerrero/django-oscar,ademuk/django-oscar,faratro/django-oscar,ahmetdaglarbas/e-commerce,bnprk/django-oscar,WillisXChen/django-oscar,jinnykoo/wuyisj.com,pdonadeo/django-oscar,saadatqadri/django-oscar,bschuon/django-oscar,pasqualguerrero/django-oscar,michaelkuty/django-oscar,bnprk/django-oscar,MatthewWilkes/django-oscar,monikasulik/django-oscar,ademuk/django-oscar,marcoantoniooliveira/labweb,nfletton/django-oscar,bschuon/django-oscar,jmt4/django-oscar,lijoantony/django-oscar,kapari/django-oscar,nickpack/django-oscar,pdonadeo/django-oscar,michaelkuty/django-oscar,nickpack/django-oscar,jinnykoo/wuyisj.com,manevant/django-oscar,mexeniz/django-oscar,adamend/django-oscar,lijoantony/django-oscar,josesanch/django-oscar,kapari/django-oscar,spartonia/django-oscar,jinnykoo/wuyisj.com,machtfit/django-oscar,solarissmoke/django-oscar,itbabu/django-oscar,bnprk/django-oscar,kapt/django-oscar,MatthewWilkes/django-oscar,monikasulik/django-oscar,marcoantoniooliveira/labweb,Bogh/django-oscar,lijoantony/django-oscar,DrOctogon/unwash_ecom,solarissmoke/django-oscar,taedori81/django-oscar,okfish/django-oscar,bschuon/django-oscar,Bogh/django-oscar,monikasulik/django-oscar,dongguangming/django-oscar,jinnykoo/christmas,thechampanurag/django-oscar,thechampanurag/django-oscar,sasha0/django-oscar,okfish/django-oscar,jinnykoo/wuyisj,john-parton/django-oscar,faratro/django-oscar,Jannes123/django-oscar,anentropic/django-oscar,QLGu/django-oscar,thechampanurag/django-oscar,ahmetdaglarbas/e-commerce,vovanbo/django-oscar,anentropic/django-oscar,amirrpp/django-oscar,WillisXChen/django-oscar,rocopartners/django-oscar,Bogh/django-oscar
|
Add test for hardcoded user model in migrations
New migrations need to be manually updated to correctly deal with custom
user models. This test fails if this isn't done.
|
# -*- coding: utf-8 -*-
import os
from django.test import TestCase
import oscar.apps
class TestMigrations(TestCase):
def check_for_auth_model(self, filepath):
with open(filepath) as f:
s = f.read()
return 'auth.User' in s or 'auth.user' in s
def test_dont_contain_hardcoded_user_model(self):
root_path = os.path.dirname(oscar.apps.__file__)
matches = []
for dir, __, migrations in os.walk(root_path):
if dir.endswith('migrations'):
paths = [os.path.join(dir, migration) for migration in migrations
if migration.endswith('.py')]
matches += filter(self.check_for_auth_model, paths)
if matches:
pretty_matches = '\n'.join(
[match.replace(root_path, '') for match in matches])
self.fail('References to hardcoded User model found in the '
'following migration(s):\n' + pretty_matches)
|
<commit_before><commit_msg>Add test for hardcoded user model in migrations
New migrations need to be manually updated to correctly deal with custom
user models. This test fails if this isn't done.<commit_after>
|
# -*- coding: utf-8 -*-
import os
from django.test import TestCase
import oscar.apps
class TestMigrations(TestCase):
def check_for_auth_model(self, filepath):
with open(filepath) as f:
s = f.read()
return 'auth.User' in s or 'auth.user' in s
def test_dont_contain_hardcoded_user_model(self):
root_path = os.path.dirname(oscar.apps.__file__)
matches = []
for dir, __, migrations in os.walk(root_path):
if dir.endswith('migrations'):
paths = [os.path.join(dir, migration) for migration in migrations
if migration.endswith('.py')]
matches += filter(self.check_for_auth_model, paths)
if matches:
pretty_matches = '\n'.join(
[match.replace(root_path, '') for match in matches])
self.fail('References to hardcoded User model found in the '
'following migration(s):\n' + pretty_matches)
|
Add test for hardcoded user model in migrations
New migrations need to be manually updated to correctly deal with custom
user models. This test fails if this isn't done.# -*- coding: utf-8 -*-
import os
from django.test import TestCase
import oscar.apps
class TestMigrations(TestCase):
def check_for_auth_model(self, filepath):
with open(filepath) as f:
s = f.read()
return 'auth.User' in s or 'auth.user' in s
def test_dont_contain_hardcoded_user_model(self):
root_path = os.path.dirname(oscar.apps.__file__)
matches = []
for dir, __, migrations in os.walk(root_path):
if dir.endswith('migrations'):
paths = [os.path.join(dir, migration) for migration in migrations
if migration.endswith('.py')]
matches += filter(self.check_for_auth_model, paths)
if matches:
pretty_matches = '\n'.join(
[match.replace(root_path, '') for match in matches])
self.fail('References to hardcoded User model found in the '
'following migration(s):\n' + pretty_matches)
|
<commit_before><commit_msg>Add test for hardcoded user model in migrations
New migrations need to be manually updated to correctly deal with custom
user models. This test fails if this isn't done.<commit_after># -*- coding: utf-8 -*-
import os
from django.test import TestCase
import oscar.apps
class TestMigrations(TestCase):
def check_for_auth_model(self, filepath):
with open(filepath) as f:
s = f.read()
return 'auth.User' in s or 'auth.user' in s
def test_dont_contain_hardcoded_user_model(self):
root_path = os.path.dirname(oscar.apps.__file__)
matches = []
for dir, __, migrations in os.walk(root_path):
if dir.endswith('migrations'):
paths = [os.path.join(dir, migration) for migration in migrations
if migration.endswith('.py')]
matches += filter(self.check_for_auth_model, paths)
if matches:
pretty_matches = '\n'.join(
[match.replace(root_path, '') for match in matches])
self.fail('References to hardcoded User model found in the '
'following migration(s):\n' + pretty_matches)
|
|
5caa1952c1e413922e313dd51903904fe8e23bef
|
calc_gain_read.py
|
calc_gain_read.py
|
from triage_fits_files import ImageFileCollection
from ccd_characterization import ccd_gain, ccd_read_noise
from numpy import array
from astropysics import ccd
def as_images(tbl, src_dir):
from os import path
img = []
for tb in tbl:
img.append(ccd.FitsImage(path.join(src_dir, tb['file'])).data)
return img
def calc_gain_read(src_dir):
"""Calculate gain and read noise from images in `src_dir`
Uses biases and any R band flats that are present.
"""
img_col = ImageFileCollection(location=src_dir,
keywords=['imagetyp', 'filter'],
info_file=None)
img_tbl = img_col.summary_info
bias_tbl = img_tbl.where(img_tbl['imagetyp']=='BIAS')
biases = as_images(bias_tbl, src_dir)
r_flat_tbl = img_tbl.where((img_tbl['imagetyp']=='FLAT') &
(img_tbl['filter']=='R'))
r_flats = as_images(r_flat_tbl, src_dir)
n_files = len(biases)
n_pairs = int(n_files/2)
gain = []
read_noise = []
for i in range(0,n_files,2):
gain.append(ccd_gain(biases[i:i+2], r_flats[i:i+2]))
read_noise.append(ccd_read_noise(biases[i:i+2],gain=gain[-1]))
return (array(gain),array(read_noise))
|
Add function to calculate gain/read noise given a directory.
|
Add function to calculate gain/read noise given a directory.
|
Python
|
bsd-3-clause
|
mwcraig/msumastro
|
Add function to calculate gain/read noise given a directory.
|
from triage_fits_files import ImageFileCollection
from ccd_characterization import ccd_gain, ccd_read_noise
from numpy import array
from astropysics import ccd
def as_images(tbl, src_dir):
from os import path
img = []
for tb in tbl:
img.append(ccd.FitsImage(path.join(src_dir, tb['file'])).data)
return img
def calc_gain_read(src_dir):
"""Calculate gain and read noise from images in `src_dir`
Uses biases and any R band flats that are present.
"""
img_col = ImageFileCollection(location=src_dir,
keywords=['imagetyp', 'filter'],
info_file=None)
img_tbl = img_col.summary_info
bias_tbl = img_tbl.where(img_tbl['imagetyp']=='BIAS')
biases = as_images(bias_tbl, src_dir)
r_flat_tbl = img_tbl.where((img_tbl['imagetyp']=='FLAT') &
(img_tbl['filter']=='R'))
r_flats = as_images(r_flat_tbl, src_dir)
n_files = len(biases)
n_pairs = int(n_files/2)
gain = []
read_noise = []
for i in range(0,n_files,2):
gain.append(ccd_gain(biases[i:i+2], r_flats[i:i+2]))
read_noise.append(ccd_read_noise(biases[i:i+2],gain=gain[-1]))
return (array(gain),array(read_noise))
|
<commit_before><commit_msg>Add function to calculate gain/read noise given a directory.<commit_after>
|
from triage_fits_files import ImageFileCollection
from ccd_characterization import ccd_gain, ccd_read_noise
from numpy import array
from astropysics import ccd
def as_images(tbl, src_dir):
from os import path
img = []
for tb in tbl:
img.append(ccd.FitsImage(path.join(src_dir, tb['file'])).data)
return img
def calc_gain_read(src_dir):
"""Calculate gain and read noise from images in `src_dir`
Uses biases and any R band flats that are present.
"""
img_col = ImageFileCollection(location=src_dir,
keywords=['imagetyp', 'filter'],
info_file=None)
img_tbl = img_col.summary_info
bias_tbl = img_tbl.where(img_tbl['imagetyp']=='BIAS')
biases = as_images(bias_tbl, src_dir)
r_flat_tbl = img_tbl.where((img_tbl['imagetyp']=='FLAT') &
(img_tbl['filter']=='R'))
r_flats = as_images(r_flat_tbl, src_dir)
n_files = len(biases)
n_pairs = int(n_files/2)
gain = []
read_noise = []
for i in range(0,n_files,2):
gain.append(ccd_gain(biases[i:i+2], r_flats[i:i+2]))
read_noise.append(ccd_read_noise(biases[i:i+2],gain=gain[-1]))
return (array(gain),array(read_noise))
|
Add function to calculate gain/read noise given a directory.from triage_fits_files import ImageFileCollection
from ccd_characterization import ccd_gain, ccd_read_noise
from numpy import array
from astropysics import ccd
def as_images(tbl, src_dir):
from os import path
img = []
for tb in tbl:
img.append(ccd.FitsImage(path.join(src_dir, tb['file'])).data)
return img
def calc_gain_read(src_dir):
"""Calculate gain and read noise from images in `src_dir`
Uses biases and any R band flats that are present.
"""
img_col = ImageFileCollection(location=src_dir,
keywords=['imagetyp', 'filter'],
info_file=None)
img_tbl = img_col.summary_info
bias_tbl = img_tbl.where(img_tbl['imagetyp']=='BIAS')
biases = as_images(bias_tbl, src_dir)
r_flat_tbl = img_tbl.where((img_tbl['imagetyp']=='FLAT') &
(img_tbl['filter']=='R'))
r_flats = as_images(r_flat_tbl, src_dir)
n_files = len(biases)
n_pairs = int(n_files/2)
gain = []
read_noise = []
for i in range(0,n_files,2):
gain.append(ccd_gain(biases[i:i+2], r_flats[i:i+2]))
read_noise.append(ccd_read_noise(biases[i:i+2],gain=gain[-1]))
return (array(gain),array(read_noise))
|
<commit_before><commit_msg>Add function to calculate gain/read noise given a directory.<commit_after>from triage_fits_files import ImageFileCollection
from ccd_characterization import ccd_gain, ccd_read_noise
from numpy import array
from astropysics import ccd
def as_images(tbl, src_dir):
from os import path
img = []
for tb in tbl:
img.append(ccd.FitsImage(path.join(src_dir, tb['file'])).data)
return img
def calc_gain_read(src_dir):
"""Calculate gain and read noise from images in `src_dir`
Uses biases and any R band flats that are present.
"""
img_col = ImageFileCollection(location=src_dir,
keywords=['imagetyp', 'filter'],
info_file=None)
img_tbl = img_col.summary_info
bias_tbl = img_tbl.where(img_tbl['imagetyp']=='BIAS')
biases = as_images(bias_tbl, src_dir)
r_flat_tbl = img_tbl.where((img_tbl['imagetyp']=='FLAT') &
(img_tbl['filter']=='R'))
r_flats = as_images(r_flat_tbl, src_dir)
n_files = len(biases)
n_pairs = int(n_files/2)
gain = []
read_noise = []
for i in range(0,n_files,2):
gain.append(ccd_gain(biases[i:i+2], r_flats[i:i+2]))
read_noise.append(ccd_read_noise(biases[i:i+2],gain=gain[-1]))
return (array(gain),array(read_noise))
|
|
346a6b5cc5426ce38195dd5ce4507894710ee8a7
|
fix-gpt-ubuntu.py
|
fix-gpt-ubuntu.py
|
#!/usr/bin/env python
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Implements parts of RFC 2131, 1541, 1497 and
# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx
# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx
#
import subprocess
"""
WARNING: This script will remove all partitions in resource disk and create
a new one using the entire disk space.
"""
if __name__ == '__main__':
print 'Umnout resource disk...'
subprocess.call(['umount', '/dev/sdb1'])
print 'Remove old partitions...'
subprocess.call(['parted', '/dev/sdb', 'rm', '1'])
subprocess.call(['parted', '/dev/sdb', 'rm', '2'])
print 'Create new partition using the entire resource disk...'
subprocess.call(['parted', '/dev/sdb','mkpart', 'primary', '0%', '100%'])
subprocess.call(['mkfs.ext4', '/dev/sdb1'])
subprocess.call(['mount', '/dev/sdb1', '/mnt'])
print 'Resource disk(/dev/sdb1) is mounted at /mnt'
|
Add script to fix gpt mounting issue with ubuntu.
|
Add script to fix gpt mounting issue with ubuntu.
|
Python
|
apache-2.0
|
fieryorc/WALinuxAgent,lizzha/WALinuxAgent,jerickso/WALinuxAgent,SuperScottz/WALinuxAgent,yuezh/WALinuxAgent,thomas1206/WALinuxAgent,AbelHu/WALinuxAgent,thomas1206/WALinuxAgent,karataliu/WALinuxAgent,karataliu/WALinuxAgent,AbelHu/WALinuxAgent,ryanmiao/WALinuxAgent,ryanmiao/WALinuxAgent,SuperScottz/WALinuxAgent,yuezh/WALinuxAgent,jerickso/WALinuxAgent,imikushin/WALinuxAgent,lizzha/WALinuxAgent,imikushin/WALinuxAgent,fieryorc/WALinuxAgent
|
Add script to fix gpt mounting issue with ubuntu.
|
#!/usr/bin/env python
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Implements parts of RFC 2131, 1541, 1497 and
# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx
# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx
#
import subprocess
"""
WARNING: This script will remove all partitions in resource disk and create
a new one using the entire disk space.
"""
if __name__ == '__main__':
print 'Umnout resource disk...'
subprocess.call(['umount', '/dev/sdb1'])
print 'Remove old partitions...'
subprocess.call(['parted', '/dev/sdb', 'rm', '1'])
subprocess.call(['parted', '/dev/sdb', 'rm', '2'])
print 'Create new partition using the entire resource disk...'
subprocess.call(['parted', '/dev/sdb','mkpart', 'primary', '0%', '100%'])
subprocess.call(['mkfs.ext4', '/dev/sdb1'])
subprocess.call(['mount', '/dev/sdb1', '/mnt'])
print 'Resource disk(/dev/sdb1) is mounted at /mnt'
|
<commit_before><commit_msg>Add script to fix gpt mounting issue with ubuntu.<commit_after>
|
#!/usr/bin/env python
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Implements parts of RFC 2131, 1541, 1497 and
# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx
# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx
#
import subprocess
"""
WARNING: This script will remove all partitions in resource disk and create
a new one using the entire disk space.
"""
if __name__ == '__main__':
print 'Umnout resource disk...'
subprocess.call(['umount', '/dev/sdb1'])
print 'Remove old partitions...'
subprocess.call(['parted', '/dev/sdb', 'rm', '1'])
subprocess.call(['parted', '/dev/sdb', 'rm', '2'])
print 'Create new partition using the entire resource disk...'
subprocess.call(['parted', '/dev/sdb','mkpart', 'primary', '0%', '100%'])
subprocess.call(['mkfs.ext4', '/dev/sdb1'])
subprocess.call(['mount', '/dev/sdb1', '/mnt'])
print 'Resource disk(/dev/sdb1) is mounted at /mnt'
|
Add script to fix gpt mounting issue with ubuntu.#!/usr/bin/env python
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Implements parts of RFC 2131, 1541, 1497 and
# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx
# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx
#
import subprocess
"""
WARNING: This script will remove all partitions in resource disk and create
a new one using the entire disk space.
"""
if __name__ == '__main__':
print 'Umnout resource disk...'
subprocess.call(['umount', '/dev/sdb1'])
print 'Remove old partitions...'
subprocess.call(['parted', '/dev/sdb', 'rm', '1'])
subprocess.call(['parted', '/dev/sdb', 'rm', '2'])
print 'Create new partition using the entire resource disk...'
subprocess.call(['parted', '/dev/sdb','mkpart', 'primary', '0%', '100%'])
subprocess.call(['mkfs.ext4', '/dev/sdb1'])
subprocess.call(['mount', '/dev/sdb1', '/mnt'])
print 'Resource disk(/dev/sdb1) is mounted at /mnt'
|
<commit_before><commit_msg>Add script to fix gpt mounting issue with ubuntu.<commit_after>#!/usr/bin/env python
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Implements parts of RFC 2131, 1541, 1497 and
# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx
# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx
#
import subprocess
"""
WARNING: This script will remove all partitions in resource disk and create
a new one using the entire disk space.
"""
if __name__ == '__main__':
print 'Umnout resource disk...'
subprocess.call(['umount', '/dev/sdb1'])
print 'Remove old partitions...'
subprocess.call(['parted', '/dev/sdb', 'rm', '1'])
subprocess.call(['parted', '/dev/sdb', 'rm', '2'])
print 'Create new partition using the entire resource disk...'
subprocess.call(['parted', '/dev/sdb','mkpart', 'primary', '0%', '100%'])
subprocess.call(['mkfs.ext4', '/dev/sdb1'])
subprocess.call(['mount', '/dev/sdb1', '/mnt'])
print 'Resource disk(/dev/sdb1) is mounted at /mnt'
|
|
aec7f2bee97d79777e418b4752b5e9ce4d9f2a2c
|
dit/math/tests/test_fraction.py
|
dit/math/tests/test_fraction.py
|
from __future__ import division
from nose.tools import *
from fractions import Fraction
from ..fraction import *
def test_fraction():
"""Smoke tests to convert float to fraction."""
numerators = range(10)
denominator = 10
xvals = [x / denominator for x in numerators]
af = lambda x: approximate_fraction(x, .01)
yvals = map(af, xvals)
yvals_ = map(lambda x: Fraction(x, denominator), numerators)
for y, y_ in zip(yvals, yvals_):
assert_equal(y, y_)
# Negative values
af = lambda x: approximate_fraction(-x, .01)
yvals = map(af, xvals)
yvals_ = map(lambda x: Fraction(-x, denominator), numerators)
for y, y_ in zip(yvals, yvals_):
assert_equal(y, y_)
def test_fraction_zero():
"""Convert float to fraction when closer to 0."""
x = .1
y = approximate_fraction(x, .2)
y_ = Fraction(0, 1)
assert_equal(y, y_)
y = approximate_fraction(-x, .2)
assert_equal(y, -y_)
def test_fraction_emptyinterval():
assert_raises(ValueError, approximate_fraction, 0.1, 0)
|
Add some unit tests for approximate fractions.
|
Add some unit tests for approximate fractions.
|
Python
|
bsd-3-clause
|
dit/dit,Autoplectic/dit,Autoplectic/dit,chebee7i/dit,dit/dit,chebee7i/dit,dit/dit,Autoplectic/dit,Autoplectic/dit,chebee7i/dit,Autoplectic/dit,dit/dit,dit/dit,chebee7i/dit
|
Add some unit tests for approximate fractions.
|
from __future__ import division
from nose.tools import *
from fractions import Fraction
from ..fraction import *
def test_fraction():
"""Smoke tests to convert float to fraction."""
numerators = range(10)
denominator = 10
xvals = [x / denominator for x in numerators]
af = lambda x: approximate_fraction(x, .01)
yvals = map(af, xvals)
yvals_ = map(lambda x: Fraction(x, denominator), numerators)
for y, y_ in zip(yvals, yvals_):
assert_equal(y, y_)
# Negative values
af = lambda x: approximate_fraction(-x, .01)
yvals = map(af, xvals)
yvals_ = map(lambda x: Fraction(-x, denominator), numerators)
for y, y_ in zip(yvals, yvals_):
assert_equal(y, y_)
def test_fraction_zero():
"""Convert float to fraction when closer to 0."""
x = .1
y = approximate_fraction(x, .2)
y_ = Fraction(0, 1)
assert_equal(y, y_)
y = approximate_fraction(-x, .2)
assert_equal(y, -y_)
def test_fraction_emptyinterval():
assert_raises(ValueError, approximate_fraction, 0.1, 0)
|
<commit_before><commit_msg>Add some unit tests for approximate fractions.<commit_after>
|
from __future__ import division
from nose.tools import *
from fractions import Fraction
from ..fraction import *
def test_fraction():
"""Smoke tests to convert float to fraction."""
numerators = range(10)
denominator = 10
xvals = [x / denominator for x in numerators]
af = lambda x: approximate_fraction(x, .01)
yvals = map(af, xvals)
yvals_ = map(lambda x: Fraction(x, denominator), numerators)
for y, y_ in zip(yvals, yvals_):
assert_equal(y, y_)
# Negative values
af = lambda x: approximate_fraction(-x, .01)
yvals = map(af, xvals)
yvals_ = map(lambda x: Fraction(-x, denominator), numerators)
for y, y_ in zip(yvals, yvals_):
assert_equal(y, y_)
def test_fraction_zero():
"""Convert float to fraction when closer to 0."""
x = .1
y = approximate_fraction(x, .2)
y_ = Fraction(0, 1)
assert_equal(y, y_)
y = approximate_fraction(-x, .2)
assert_equal(y, -y_)
def test_fraction_emptyinterval():
assert_raises(ValueError, approximate_fraction, 0.1, 0)
|
Add some unit tests for approximate fractions.from __future__ import division
from nose.tools import *
from fractions import Fraction
from ..fraction import *
def test_fraction():
"""Smoke tests to convert float to fraction."""
numerators = range(10)
denominator = 10
xvals = [x / denominator for x in numerators]
af = lambda x: approximate_fraction(x, .01)
yvals = map(af, xvals)
yvals_ = map(lambda x: Fraction(x, denominator), numerators)
for y, y_ in zip(yvals, yvals_):
assert_equal(y, y_)
# Negative values
af = lambda x: approximate_fraction(-x, .01)
yvals = map(af, xvals)
yvals_ = map(lambda x: Fraction(-x, denominator), numerators)
for y, y_ in zip(yvals, yvals_):
assert_equal(y, y_)
def test_fraction_zero():
"""Convert float to fraction when closer to 0."""
x = .1
y = approximate_fraction(x, .2)
y_ = Fraction(0, 1)
assert_equal(y, y_)
y = approximate_fraction(-x, .2)
assert_equal(y, -y_)
def test_fraction_emptyinterval():
assert_raises(ValueError, approximate_fraction, 0.1, 0)
|
<commit_before><commit_msg>Add some unit tests for approximate fractions.<commit_after>from __future__ import division
from nose.tools import *
from fractions import Fraction
from ..fraction import *
def test_fraction():
"""Smoke tests to convert float to fraction."""
numerators = range(10)
denominator = 10
xvals = [x / denominator for x in numerators]
af = lambda x: approximate_fraction(x, .01)
yvals = map(af, xvals)
yvals_ = map(lambda x: Fraction(x, denominator), numerators)
for y, y_ in zip(yvals, yvals_):
assert_equal(y, y_)
# Negative values
af = lambda x: approximate_fraction(-x, .01)
yvals = map(af, xvals)
yvals_ = map(lambda x: Fraction(-x, denominator), numerators)
for y, y_ in zip(yvals, yvals_):
assert_equal(y, y_)
def test_fraction_zero():
"""Convert float to fraction when closer to 0."""
x = .1
y = approximate_fraction(x, .2)
y_ = Fraction(0, 1)
assert_equal(y, y_)
y = approximate_fraction(-x, .2)
assert_equal(y, -y_)
def test_fraction_emptyinterval():
assert_raises(ValueError, approximate_fraction, 0.1, 0)
|
|
807ceb2de5dd0f9d84d17252b0929c1c7136919d
|
utils/generate_plenary_agenda_md.py
|
utils/generate_plenary_agenda_md.py
|
#!/usr/bin/env python3.8
import sys
import yaml
def print_votes(votes):
if votes:
for v in votes:
print("""* [#{num}](https://github.com/mpi-forum/mpi-issues/issues/{num}): {description}"""
.format(num=v['number'], description=v['description']))
def main():
agenda_year = sys.argv[1]
agenda_month = sys.argv[2]
filename = """../_data/meetings/{year}/{month}/agenda.yml""".format(year=agenda_year, month=agenda_month)
with open(filename, 'r') as f:
agenda = yaml.load(f, Loader=yaml.FullLoader)
print("# Votes")
print("## Procedure Votes")
print_votes(agenda['procedure-votes'])
print("")
print("## Errata Votes")
print_votes(agenda['errata-votes'])
print("")
print("## No No Votes")
print_votes(agenda['no-no-votes'])
print("")
print("## First Votes")
print_votes(agenda['first-votes'])
print("")
print("## Second Votes")
print_votes(agenda['second-votes'])
print("")
print("# Plenaries")
print("| Type | Issue | Title | Presenter |")
print("| ---- | ----- | ----- | --------- |")
for p in agenda['plenaries']:
try:
number="""[#{num}](https://github.com/mpi-forum/mpi-issues/issues/{num})""".format(num=p['issue_number'])
except:
number=''
print("""| {type} | {num} | {title} | {presenter} |"""
.format(type=p['type'], num=number, title=p['title'],
presenter=p['presenter']))
if __name__ == '__main__':
main()
|
Add script to convert agenda to markdown
|
Add script to convert agenda to markdown
|
Python
|
mit
|
mpi-forum/mpi-forum.github.io,mpi-forum/mpi-forum.github.io,mpi-forum/mpi-forum.github.io,mpi-forum/mpi-forum.github.io,mpi-forum/mpi-forum.github.io
|
Add script to convert agenda to markdown
|
#!/usr/bin/env python3.8
import sys
import yaml
def print_votes(votes):
if votes:
for v in votes:
print("""* [#{num}](https://github.com/mpi-forum/mpi-issues/issues/{num}): {description}"""
.format(num=v['number'], description=v['description']))
def main():
agenda_year = sys.argv[1]
agenda_month = sys.argv[2]
filename = """../_data/meetings/{year}/{month}/agenda.yml""".format(year=agenda_year, month=agenda_month)
with open(filename, 'r') as f:
agenda = yaml.load(f, Loader=yaml.FullLoader)
print("# Votes")
print("## Procedure Votes")
print_votes(agenda['procedure-votes'])
print("")
print("## Errata Votes")
print_votes(agenda['errata-votes'])
print("")
print("## No No Votes")
print_votes(agenda['no-no-votes'])
print("")
print("## First Votes")
print_votes(agenda['first-votes'])
print("")
print("## Second Votes")
print_votes(agenda['second-votes'])
print("")
print("# Plenaries")
print("| Type | Issue | Title | Presenter |")
print("| ---- | ----- | ----- | --------- |")
for p in agenda['plenaries']:
try:
number="""[#{num}](https://github.com/mpi-forum/mpi-issues/issues/{num})""".format(num=p['issue_number'])
except:
number=''
print("""| {type} | {num} | {title} | {presenter} |"""
.format(type=p['type'], num=number, title=p['title'],
presenter=p['presenter']))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to convert agenda to markdown<commit_after>
|
#!/usr/bin/env python3.8
import sys
import yaml
def print_votes(votes):
if votes:
for v in votes:
print("""* [#{num}](https://github.com/mpi-forum/mpi-issues/issues/{num}): {description}"""
.format(num=v['number'], description=v['description']))
def main():
agenda_year = sys.argv[1]
agenda_month = sys.argv[2]
filename = """../_data/meetings/{year}/{month}/agenda.yml""".format(year=agenda_year, month=agenda_month)
with open(filename, 'r') as f:
agenda = yaml.load(f, Loader=yaml.FullLoader)
print("# Votes")
print("## Procedure Votes")
print_votes(agenda['procedure-votes'])
print("")
print("## Errata Votes")
print_votes(agenda['errata-votes'])
print("")
print("## No No Votes")
print_votes(agenda['no-no-votes'])
print("")
print("## First Votes")
print_votes(agenda['first-votes'])
print("")
print("## Second Votes")
print_votes(agenda['second-votes'])
print("")
print("# Plenaries")
print("| Type | Issue | Title | Presenter |")
print("| ---- | ----- | ----- | --------- |")
for p in agenda['plenaries']:
try:
number="""[#{num}](https://github.com/mpi-forum/mpi-issues/issues/{num})""".format(num=p['issue_number'])
except:
number=''
print("""| {type} | {num} | {title} | {presenter} |"""
.format(type=p['type'], num=number, title=p['title'],
presenter=p['presenter']))
if __name__ == '__main__':
main()
|
Add script to convert agenda to markdown#!/usr/bin/env python3.8
import sys
import yaml
def print_votes(votes):
if votes:
for v in votes:
print("""* [#{num}](https://github.com/mpi-forum/mpi-issues/issues/{num}): {description}"""
.format(num=v['number'], description=v['description']))
def main():
agenda_year = sys.argv[1]
agenda_month = sys.argv[2]
filename = """../_data/meetings/{year}/{month}/agenda.yml""".format(year=agenda_year, month=agenda_month)
with open(filename, 'r') as f:
agenda = yaml.load(f, Loader=yaml.FullLoader)
print("# Votes")
print("## Procedure Votes")
print_votes(agenda['procedure-votes'])
print("")
print("## Errata Votes")
print_votes(agenda['errata-votes'])
print("")
print("## No No Votes")
print_votes(agenda['no-no-votes'])
print("")
print("## First Votes")
print_votes(agenda['first-votes'])
print("")
print("## Second Votes")
print_votes(agenda['second-votes'])
print("")
print("# Plenaries")
print("| Type | Issue | Title | Presenter |")
print("| ---- | ----- | ----- | --------- |")
for p in agenda['plenaries']:
try:
number="""[#{num}](https://github.com/mpi-forum/mpi-issues/issues/{num})""".format(num=p['issue_number'])
except:
number=''
print("""| {type} | {num} | {title} | {presenter} |"""
.format(type=p['type'], num=number, title=p['title'],
presenter=p['presenter']))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to convert agenda to markdown<commit_after>#!/usr/bin/env python3.8
import sys
import yaml
def print_votes(votes):
if votes:
for v in votes:
print("""* [#{num}](https://github.com/mpi-forum/mpi-issues/issues/{num}): {description}"""
.format(num=v['number'], description=v['description']))
def main():
agenda_year = sys.argv[1]
agenda_month = sys.argv[2]
filename = """../_data/meetings/{year}/{month}/agenda.yml""".format(year=agenda_year, month=agenda_month)
with open(filename, 'r') as f:
agenda = yaml.load(f, Loader=yaml.FullLoader)
print("# Votes")
print("## Procedure Votes")
print_votes(agenda['procedure-votes'])
print("")
print("## Errata Votes")
print_votes(agenda['errata-votes'])
print("")
print("## No No Votes")
print_votes(agenda['no-no-votes'])
print("")
print("## First Votes")
print_votes(agenda['first-votes'])
print("")
print("## Second Votes")
print_votes(agenda['second-votes'])
print("")
print("# Plenaries")
print("| Type | Issue | Title | Presenter |")
print("| ---- | ----- | ----- | --------- |")
for p in agenda['plenaries']:
try:
number="""[#{num}](https://github.com/mpi-forum/mpi-issues/issues/{num})""".format(num=p['issue_number'])
except:
number=''
print("""| {type} | {num} | {title} | {presenter} |"""
.format(type=p['type'], num=number, title=p['title'],
presenter=p['presenter']))
if __name__ == '__main__':
main()
|
|
541d4080821692ed879bfee47eb0ce1a8b278dac
|
Python/reverse-words-in-a-string-iii.py
|
Python/reverse-words-in-a-string-iii.py
|
# Time: O(n)
# Space: O(1)
# Given a string, you need to reverse the order of characters in each word within a sentence
# while still preserving whitespace and initial word order.
#
# Example 1:
# Input: "Let's take LeetCode contest"
# Output: "s'teL ekat edoCteeL tsetnoc"
# Note: In the string, each word is separated by single space and
# there will not be any extra space in the string.
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
def reverse(s, begin, end):
for i in xrange((end - begin) // 2):
s[begin + i], s[end - 1 - i] = s[end - 1 - i], s[begin + i]
s, i = list(s), 0
for j in xrange(len(s) + 1):
if j == len(s) or s[j] == ' ':
reverse(s, i, j)
i = j + 1
return "".join(s)
|
# Time: O(n)
# Space: O(1)
# Given a string, you need to reverse the order of characters in each word within a sentence
# while still preserving whitespace and initial word order.
#
# Example 1:
# Input: "Let's take LeetCode contest"
# Output: "s'teL ekat edoCteeL tsetnoc"
# Note: In the string, each word is separated by single space and
# there will not be any extra space in the string.
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
def reverse(s, begin, end):
for i in xrange((end - begin) // 2):
s[begin + i], s[end - 1 - i] = s[end - 1 - i], s[begin + i]
s, i = list(s), 0
for j in xrange(len(s) + 1):
if j == len(s) or s[j] == ' ':
reverse(s, i, j)
i = j + 1
return "".join(s)
class Solution2(object):
def reverseWords(self, s):
reversed_words = [word[::-1] for word in s.split(' ')]
return ' '.join(reversed_words)
|
Add alternative solution for 'Reverse words in string III'
|
Add alternative solution for 'Reverse words in string III'
|
Python
|
mit
|
kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015
|
# Time: O(n)
# Space: O(1)
# Given a string, you need to reverse the order of characters in each word within a sentence
# while still preserving whitespace and initial word order.
#
# Example 1:
# Input: "Let's take LeetCode contest"
# Output: "s'teL ekat edoCteeL tsetnoc"
# Note: In the string, each word is separated by single space and
# there will not be any extra space in the string.
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
def reverse(s, begin, end):
for i in xrange((end - begin) // 2):
s[begin + i], s[end - 1 - i] = s[end - 1 - i], s[begin + i]
s, i = list(s), 0
for j in xrange(len(s) + 1):
if j == len(s) or s[j] == ' ':
reverse(s, i, j)
i = j + 1
return "".join(s)
Add alternative solution for 'Reverse words in string III'
|
# Time: O(n)
# Space: O(1)
# Given a string, you need to reverse the order of characters in each word within a sentence
# while still preserving whitespace and initial word order.
#
# Example 1:
# Input: "Let's take LeetCode contest"
# Output: "s'teL ekat edoCteeL tsetnoc"
# Note: In the string, each word is separated by single space and
# there will not be any extra space in the string.
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
def reverse(s, begin, end):
for i in xrange((end - begin) // 2):
s[begin + i], s[end - 1 - i] = s[end - 1 - i], s[begin + i]
s, i = list(s), 0
for j in xrange(len(s) + 1):
if j == len(s) or s[j] == ' ':
reverse(s, i, j)
i = j + 1
return "".join(s)
class Solution2(object):
def reverseWords(self, s):
reversed_words = [word[::-1] for word in s.split(' ')]
return ' '.join(reversed_words)
|
<commit_before># Time: O(n)
# Space: O(1)
# Given a string, you need to reverse the order of characters in each word within a sentence
# while still preserving whitespace and initial word order.
#
# Example 1:
# Input: "Let's take LeetCode contest"
# Output: "s'teL ekat edoCteeL tsetnoc"
# Note: In the string, each word is separated by single space and
# there will not be any extra space in the string.
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
def reverse(s, begin, end):
for i in xrange((end - begin) // 2):
s[begin + i], s[end - 1 - i] = s[end - 1 - i], s[begin + i]
s, i = list(s), 0
for j in xrange(len(s) + 1):
if j == len(s) or s[j] == ' ':
reverse(s, i, j)
i = j + 1
return "".join(s)
<commit_msg>Add alternative solution for 'Reverse words in string III'<commit_after>
|
# Time: O(n)
# Space: O(1)
# Given a string, you need to reverse the order of characters in each word within a sentence
# while still preserving whitespace and initial word order.
#
# Example 1:
# Input: "Let's take LeetCode contest"
# Output: "s'teL ekat edoCteeL tsetnoc"
# Note: In the string, each word is separated by single space and
# there will not be any extra space in the string.
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
def reverse(s, begin, end):
for i in xrange((end - begin) // 2):
s[begin + i], s[end - 1 - i] = s[end - 1 - i], s[begin + i]
s, i = list(s), 0
for j in xrange(len(s) + 1):
if j == len(s) or s[j] == ' ':
reverse(s, i, j)
i = j + 1
return "".join(s)
class Solution2(object):
def reverseWords(self, s):
reversed_words = [word[::-1] for word in s.split(' ')]
return ' '.join(reversed_words)
|
# Time: O(n)
# Space: O(1)
# Given a string, you need to reverse the order of characters in each word within a sentence
# while still preserving whitespace and initial word order.
#
# Example 1:
# Input: "Let's take LeetCode contest"
# Output: "s'teL ekat edoCteeL tsetnoc"
# Note: In the string, each word is separated by single space and
# there will not be any extra space in the string.
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
def reverse(s, begin, end):
for i in xrange((end - begin) // 2):
s[begin + i], s[end - 1 - i] = s[end - 1 - i], s[begin + i]
s, i = list(s), 0
for j in xrange(len(s) + 1):
if j == len(s) or s[j] == ' ':
reverse(s, i, j)
i = j + 1
return "".join(s)
Add alternative solution for 'Reverse words in string III'# Time: O(n)
# Space: O(1)
# Given a string, you need to reverse the order of characters in each word within a sentence
# while still preserving whitespace and initial word order.
#
# Example 1:
# Input: "Let's take LeetCode contest"
# Output: "s'teL ekat edoCteeL tsetnoc"
# Note: In the string, each word is separated by single space and
# there will not be any extra space in the string.
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
def reverse(s, begin, end):
for i in xrange((end - begin) // 2):
s[begin + i], s[end - 1 - i] = s[end - 1 - i], s[begin + i]
s, i = list(s), 0
for j in xrange(len(s) + 1):
if j == len(s) or s[j] == ' ':
reverse(s, i, j)
i = j + 1
return "".join(s)
class Solution2(object):
def reverseWords(self, s):
reversed_words = [word[::-1] for word in s.split(' ')]
return ' '.join(reversed_words)
|
<commit_before># Time: O(n)
# Space: O(1)
# Given a string, you need to reverse the order of characters in each word within a sentence
# while still preserving whitespace and initial word order.
#
# Example 1:
# Input: "Let's take LeetCode contest"
# Output: "s'teL ekat edoCteeL tsetnoc"
# Note: In the string, each word is separated by single space and
# there will not be any extra space in the string.
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
def reverse(s, begin, end):
for i in xrange((end - begin) // 2):
s[begin + i], s[end - 1 - i] = s[end - 1 - i], s[begin + i]
s, i = list(s), 0
for j in xrange(len(s) + 1):
if j == len(s) or s[j] == ' ':
reverse(s, i, j)
i = j + 1
return "".join(s)
<commit_msg>Add alternative solution for 'Reverse words in string III'<commit_after># Time: O(n)
# Space: O(1)
# Given a string, you need to reverse the order of characters in each word within a sentence
# while still preserving whitespace and initial word order.
#
# Example 1:
# Input: "Let's take LeetCode contest"
# Output: "s'teL ekat edoCteeL tsetnoc"
# Note: In the string, each word is separated by single space and
# there will not be any extra space in the string.
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
def reverse(s, begin, end):
for i in xrange((end - begin) // 2):
s[begin + i], s[end - 1 - i] = s[end - 1 - i], s[begin + i]
s, i = list(s), 0
for j in xrange(len(s) + 1):
if j == len(s) or s[j] == ' ':
reverse(s, i, j)
i = j + 1
return "".join(s)
class Solution2(object):
def reverseWords(self, s):
reversed_words = [word[::-1] for word in s.split(' ')]
return ' '.join(reversed_words)
|
cee058cf81fc0baba625942d90a5fd3f67fb9bec
|
util/generate_loading_gif.py
|
util/generate_loading_gif.py
|
#!/usr/bin/env python3
""" Generate a "loading" or "waiting" animated gif. """
import math
import PIL.Image
import PIL.ImageDraw
SIZE = 16
TOTAL_DOTS = 8
VISUAL_DOTS = 4 # how many dots are visible in each frame.
DIAMETER = SIZE / 8.0
SECONDS = 1.25 # how long it takes to do a complete cycle.
OUTPUT = "loading.gif"
def draw_dot(draw, i, color):
""" Draw a dot around a circle with a color. """
# Positions around the big circle.
unit_x = 1 + math.sin(i/TOTAL_DOTS * 2*math.pi)
unit_y = 1 - math.cos(i/TOTAL_DOTS * 2*math.pi)
# Scale to the desired size and circle diameter.
x = round(((SIZE - DIAMETER)/2 - 1) * unit_x)
y = round(((SIZE - DIAMETER)/2 - 1) * unit_y)
# Center the pixels for the library. (Doesn't appear to have
# any effect at 16x16, but might possibly matter if we add
# some kind of filter.)
x += 0.5
y += 0.5
# These are drawn as rectangles in a 16x16 image, but if we
# increase the size then they're circles.
draw.ellipse((x, y, x + DIAMETER, y + DIAMETER), fill=color, width=1)
def draw_frame(framenum):
""" Draw a frame of the animation. """
# Create new image and drawing surface.
image = PIL.Image.new('LA', (SIZE, SIZE), (1, 255))
draw = PIL.ImageDraw.Draw(image)
# Draw the dots.
for i in range(VISUAL_DOTS):
pos = ((framenum - i) % TOTAL_DOTS)
# The Qt background is (239,239,239) so this fades from 0
# to 240 (but stops at 180).
gray = round(240/4*i)
draw_dot(draw, pos, (gray, 0))
return image
def main(filename):
""" Generate the animation and save it to the filename. """
# Generate frames.
images = []
for framenum in range(TOTAL_DOTS):
image = draw_frame(framenum)
images.append(image)
# Write gif.
images[0].save(filename, save_all=True, append_images=images[1:],
duration=SECONDS / TOTAL_DOTS * 1000,
loop=0,
transparency=1,
disposal=2)
if __name__ == "__main__":
main(OUTPUT)
|
Add script to generate the "loading" animation
|
Add script to generate the "loading" animation
|
Python
|
bsd-2-clause
|
Tarsnap/tarsnap-gui,Tarsnap/tarsnap-gui,Tarsnap/tarsnap-gui,Tarsnap/tarsnap-gui,Tarsnap/tarsnap-gui
|
Add script to generate the "loading" animation
|
#!/usr/bin/env python3
""" Generate a "loading" or "waiting" animated gif. """
import math
import PIL.Image
import PIL.ImageDraw
SIZE = 16
TOTAL_DOTS = 8
VISUAL_DOTS = 4 # how many dots are visible in each frame.
DIAMETER = SIZE / 8.0
SECONDS = 1.25 # how long it takes to do a complete cycle.
OUTPUT = "loading.gif"
def draw_dot(draw, i, color):
""" Draw a dot around a circle with a color. """
# Positions around the big circle.
unit_x = 1 + math.sin(i/TOTAL_DOTS * 2*math.pi)
unit_y = 1 - math.cos(i/TOTAL_DOTS * 2*math.pi)
# Scale to the desired size and circle diameter.
x = round(((SIZE - DIAMETER)/2 - 1) * unit_x)
y = round(((SIZE - DIAMETER)/2 - 1) * unit_y)
# Center the pixels for the library. (Doesn't appear to have
# any effect at 16x16, but might possibly matter if we add
# some kind of filter.)
x += 0.5
y += 0.5
# These are drawn as rectangles in a 16x16 image, but if we
# increase the size then they're circles.
draw.ellipse((x, y, x + DIAMETER, y + DIAMETER), fill=color, width=1)
def draw_frame(framenum):
""" Draw a frame of the animation. """
# Create new image and drawing surface.
image = PIL.Image.new('LA', (SIZE, SIZE), (1, 255))
draw = PIL.ImageDraw.Draw(image)
# Draw the dots.
for i in range(VISUAL_DOTS):
pos = ((framenum - i) % TOTAL_DOTS)
# The Qt background is (239,239,239) so this fades from 0
# to 240 (but stops at 180).
gray = round(240/4*i)
draw_dot(draw, pos, (gray, 0))
return image
def main(filename):
""" Generate the animation and save it to the filename. """
# Generate frames.
images = []
for framenum in range(TOTAL_DOTS):
image = draw_frame(framenum)
images.append(image)
# Write gif.
images[0].save(filename, save_all=True, append_images=images[1:],
duration=SECONDS / TOTAL_DOTS * 1000,
loop=0,
transparency=1,
disposal=2)
if __name__ == "__main__":
main(OUTPUT)
|
<commit_before><commit_msg>Add script to generate the "loading" animation<commit_after>
|
#!/usr/bin/env python3
""" Generate a "loading" or "waiting" animated gif. """
import math
import PIL.Image
import PIL.ImageDraw
SIZE = 16
TOTAL_DOTS = 8
VISUAL_DOTS = 4 # how many dots are visible in each frame.
DIAMETER = SIZE / 8.0
SECONDS = 1.25 # how long it takes to do a complete cycle.
OUTPUT = "loading.gif"
def draw_dot(draw, i, color):
""" Draw a dot around a circle with a color. """
# Positions around the big circle.
unit_x = 1 + math.sin(i/TOTAL_DOTS * 2*math.pi)
unit_y = 1 - math.cos(i/TOTAL_DOTS * 2*math.pi)
# Scale to the desired size and circle diameter.
x = round(((SIZE - DIAMETER)/2 - 1) * unit_x)
y = round(((SIZE - DIAMETER)/2 - 1) * unit_y)
# Center the pixels for the library. (Doesn't appear to have
# any effect at 16x16, but might possibly matter if we add
# some kind of filter.)
x += 0.5
y += 0.5
# These are drawn as rectangles in a 16x16 image, but if we
# increase the size then they're circles.
draw.ellipse((x, y, x + DIAMETER, y + DIAMETER), fill=color, width=1)
def draw_frame(framenum):
""" Draw a frame of the animation. """
# Create new image and drawing surface.
image = PIL.Image.new('LA', (SIZE, SIZE), (1, 255))
draw = PIL.ImageDraw.Draw(image)
# Draw the dots.
for i in range(VISUAL_DOTS):
pos = ((framenum - i) % TOTAL_DOTS)
# The Qt background is (239,239,239) so this fades from 0
# to 240 (but stops at 180).
gray = round(240/4*i)
draw_dot(draw, pos, (gray, 0))
return image
def main(filename):
""" Generate the animation and save it to the filename. """
# Generate frames.
images = []
for framenum in range(TOTAL_DOTS):
image = draw_frame(framenum)
images.append(image)
# Write gif.
images[0].save(filename, save_all=True, append_images=images[1:],
duration=SECONDS / TOTAL_DOTS * 1000,
loop=0,
transparency=1,
disposal=2)
if __name__ == "__main__":
main(OUTPUT)
|
Add script to generate the "loading" animation#!/usr/bin/env python3
""" Generate a "loading" or "waiting" animated gif. """
import math
import PIL.Image
import PIL.ImageDraw
SIZE = 16
TOTAL_DOTS = 8
VISUAL_DOTS = 4 # how many dots are visible in each frame.
DIAMETER = SIZE / 8.0
SECONDS = 1.25 # how long it takes to do a complete cycle.
OUTPUT = "loading.gif"
def draw_dot(draw, i, color):
""" Draw a dot around a circle with a color. """
# Positions around the big circle.
unit_x = 1 + math.sin(i/TOTAL_DOTS * 2*math.pi)
unit_y = 1 - math.cos(i/TOTAL_DOTS * 2*math.pi)
# Scale to the desired size and circle diameter.
x = round(((SIZE - DIAMETER)/2 - 1) * unit_x)
y = round(((SIZE - DIAMETER)/2 - 1) * unit_y)
# Center the pixels for the library. (Doesn't appear to have
# any effect at 16x16, but might possibly matter if we add
# some kind of filter.)
x += 0.5
y += 0.5
# These are drawn as rectangles in a 16x16 image, but if we
# increase the size then they're circles.
draw.ellipse((x, y, x + DIAMETER, y + DIAMETER), fill=color, width=1)
def draw_frame(framenum):
""" Draw a frame of the animation. """
# Create new image and drawing surface.
image = PIL.Image.new('LA', (SIZE, SIZE), (1, 255))
draw = PIL.ImageDraw.Draw(image)
# Draw the dots.
for i in range(VISUAL_DOTS):
pos = ((framenum - i) % TOTAL_DOTS)
# The Qt background is (239,239,239) so this fades from 0
# to 240 (but stops at 180).
gray = round(240/4*i)
draw_dot(draw, pos, (gray, 0))
return image
def main(filename):
""" Generate the animation and save it to the filename. """
# Generate frames.
images = []
for framenum in range(TOTAL_DOTS):
image = draw_frame(framenum)
images.append(image)
# Write gif.
images[0].save(filename, save_all=True, append_images=images[1:],
duration=SECONDS / TOTAL_DOTS * 1000,
loop=0,
transparency=1,
disposal=2)
if __name__ == "__main__":
main(OUTPUT)
|
<commit_before><commit_msg>Add script to generate the "loading" animation<commit_after>#!/usr/bin/env python3
""" Generate a "loading" or "waiting" animated gif. """
import math
import PIL.Image
import PIL.ImageDraw
SIZE = 16
TOTAL_DOTS = 8
VISUAL_DOTS = 4 # how many dots are visible in each frame.
DIAMETER = SIZE / 8.0
SECONDS = 1.25 # how long it takes to do a complete cycle.
OUTPUT = "loading.gif"
def draw_dot(draw, i, color):
""" Draw a dot around a circle with a color. """
# Positions around the big circle.
unit_x = 1 + math.sin(i/TOTAL_DOTS * 2*math.pi)
unit_y = 1 - math.cos(i/TOTAL_DOTS * 2*math.pi)
# Scale to the desired size and circle diameter.
x = round(((SIZE - DIAMETER)/2 - 1) * unit_x)
y = round(((SIZE - DIAMETER)/2 - 1) * unit_y)
# Center the pixels for the library. (Doesn't appear to have
# any effect at 16x16, but might possibly matter if we add
# some kind of filter.)
x += 0.5
y += 0.5
# These are drawn as rectangles in a 16x16 image, but if we
# increase the size then they're circles.
draw.ellipse((x, y, x + DIAMETER, y + DIAMETER), fill=color, width=1)
def draw_frame(framenum):
""" Draw a frame of the animation. """
# Create new image and drawing surface.
image = PIL.Image.new('LA', (SIZE, SIZE), (1, 255))
draw = PIL.ImageDraw.Draw(image)
# Draw the dots.
for i in range(VISUAL_DOTS):
pos = ((framenum - i) % TOTAL_DOTS)
# The Qt background is (239,239,239) so this fades from 0
# to 240 (but stops at 180).
gray = round(240/4*i)
draw_dot(draw, pos, (gray, 0))
return image
def main(filename):
""" Generate the animation and save it to the filename. """
# Generate frames.
images = []
for framenum in range(TOTAL_DOTS):
image = draw_frame(framenum)
images.append(image)
# Write gif.
images[0].save(filename, save_all=True, append_images=images[1:],
duration=SECONDS / TOTAL_DOTS * 1000,
loop=0,
transparency=1,
disposal=2)
if __name__ == "__main__":
main(OUTPUT)
|
|
fe3a40caf31aa1eaff4e1b6ce7dba3e01a27eec7
|
rgb_color_k_nearest.py
|
rgb_color_k_nearest.py
|
#!/usr/bin/env python
# rgb2_color_k_nearest.py
# by wilsonmar@gmail.com, ayush.original@gmail.com, paarth.n@gmail.com
# This is not the complete/correct approach. This is just the framework possibly of using ML
import numpy as np
from sklearn import preprocessing, cross_validation, neighbors
import pandas as pd
df = pd.read_csv('rgb_combined_v01.csv') # load into dataframe
df.drop(['_Hex','_Name','_grey','_X11','_SVG'], 1, inplace=True) #axis=1 denotes that we are referring to a column, not a row
#Here I've dropped all columns except those which give RGB values
#TEST: Data Loaded
print (df)
X= np.array(df.drop(['_Title'],1))# represents features
Y= np.array(df['_Title'])# represents labels
#partition in training and testing sets
X_train, X_test , Y_train, Y_test = cross_validation.train_test_split(X,Y,test_size=0.01)
clf = neighbors.KNeighborsClassifier()
clf.fit(X_test, Y_test)
accuracy = clf.score(X_test, Y_test)
print(accuracy)
example_rgb = np.array([222,184,135])
prediction = clf.predict(example_rgb)
print(prediction)
|
Add basic ML framework- does not work though
|
Add basic ML framework- does not work though
|
Python
|
mit
|
jetbloom/rgb2colorname,jetbloom/rgb2colorname
|
Add basic ML framework- does not work though
|
#!/usr/bin/env python
# rgb2_color_k_nearest.py
# by wilsonmar@gmail.com, ayush.original@gmail.com, paarth.n@gmail.com
# This is not the complete/correct approach. This is just the framework possibly of using ML
import numpy as np
from sklearn import preprocessing, cross_validation, neighbors
import pandas as pd
df = pd.read_csv('rgb_combined_v01.csv') # load into dataframe
df.drop(['_Hex','_Name','_grey','_X11','_SVG'], 1, inplace=True) #axis=1 denotes that we are referring to a column, not a row
#Here I've dropped all columns except those which give RGB values
#TEST: Data Loaded
print (df)
X= np.array(df.drop(['_Title'],1))# represents features
Y= np.array(df['_Title'])# represents labels
#partition in training and testing sets
X_train, X_test , Y_train, Y_test = cross_validation.train_test_split(X,Y,test_size=0.01)
clf = neighbors.KNeighborsClassifier()
clf.fit(X_test, Y_test)
accuracy = clf.score(X_test, Y_test)
print(accuracy)
example_rgb = np.array([222,184,135])
prediction = clf.predict(example_rgb)
print(prediction)
|
<commit_before><commit_msg>Add basic ML framework- does not work though<commit_after>
|
#!/usr/bin/env python
# rgb2_color_k_nearest.py
# by wilsonmar@gmail.com, ayush.original@gmail.com, paarth.n@gmail.com
# This is not the complete/correct approach. This is just the framework possibly of using ML
import numpy as np
from sklearn import preprocessing, cross_validation, neighbors
import pandas as pd
df = pd.read_csv('rgb_combined_v01.csv') # load into dataframe
df.drop(['_Hex','_Name','_grey','_X11','_SVG'], 1, inplace=True) #axis=1 denotes that we are referring to a column, not a row
#Here I've dropped all columns except those which give RGB values
#TEST: Data Loaded
print (df)
X= np.array(df.drop(['_Title'],1))# represents features
Y= np.array(df['_Title'])# represents labels
#partition in training and testing sets
X_train, X_test , Y_train, Y_test = cross_validation.train_test_split(X,Y,test_size=0.01)
clf = neighbors.KNeighborsClassifier()
clf.fit(X_test, Y_test)
accuracy = clf.score(X_test, Y_test)
print(accuracy)
example_rgb = np.array([222,184,135])
prediction = clf.predict(example_rgb)
print(prediction)
|
Add basic ML framework- does not work though#!/usr/bin/env python
# rgb2_color_k_nearest.py
# by wilsonmar@gmail.com, ayush.original@gmail.com, paarth.n@gmail.com
# This is not the complete/correct approach. This is just the framework possibly of using ML
import numpy as np
from sklearn import preprocessing, cross_validation, neighbors
import pandas as pd
df = pd.read_csv('rgb_combined_v01.csv') # load into dataframe
df.drop(['_Hex','_Name','_grey','_X11','_SVG'], 1, inplace=True) #axis=1 denotes that we are referring to a column, not a row
#Here I've dropped all columns except those which give RGB values
#TEST: Data Loaded
print (df)
X= np.array(df.drop(['_Title'],1))# represents features
Y= np.array(df['_Title'])# represents labels
#partition in training and testing sets
X_train, X_test , Y_train, Y_test = cross_validation.train_test_split(X,Y,test_size=0.01)
clf = neighbors.KNeighborsClassifier()
clf.fit(X_test, Y_test)
accuracy = clf.score(X_test, Y_test)
print(accuracy)
example_rgb = np.array([222,184,135])
prediction = clf.predict(example_rgb)
print(prediction)
|
<commit_before><commit_msg>Add basic ML framework- does not work though<commit_after>#!/usr/bin/env python
# rgb2_color_k_nearest.py
# by wilsonmar@gmail.com, ayush.original@gmail.com, paarth.n@gmail.com
# This is not the complete/correct approach. This is just the framework possibly of using ML
import numpy as np
from sklearn import preprocessing, cross_validation, neighbors
import pandas as pd
df = pd.read_csv('rgb_combined_v01.csv') # load into dataframe
df.drop(['_Hex','_Name','_grey','_X11','_SVG'], 1, inplace=True) #axis=1 denotes that we are referring to a column, not a row
#Here I've dropped all columns except those which give RGB values
#TEST: Data Loaded
print (df)
X= np.array(df.drop(['_Title'],1))# represents features
Y= np.array(df['_Title'])# represents labels
#partition in training and testing sets
X_train, X_test , Y_train, Y_test = cross_validation.train_test_split(X,Y,test_size=0.01)
clf = neighbors.KNeighborsClassifier()
clf.fit(X_test, Y_test)
accuracy = clf.score(X_test, Y_test)
print(accuracy)
example_rgb = np.array([222,184,135])
prediction = clf.predict(example_rgb)
print(prediction)
|
|
18b93a1c998c720554554b5797bcd1a5a38e4e77
|
util/hgfilesize.py
|
util/hgfilesize.py
|
from mercurial import context
from mercurial.i18n import _
'''
[extensions]
hgfilesize=~/m5/incoming/util/hgfilesize.py
[hooks]
pretxncommit = python:hgfilesize.limit_file_size
pretxnchangegroup = python:hgfilesize.limit_file_size
[limit_file_size]
maximum_file_size = 200000
'''
def limit_file_size(ui, repo, node=None, **kwargs):
'''forbid files over a given size'''
# default limit is 1 MB
limit = int(ui.config('limit_file_size', 'maximum_file_size', 1024*1024))
existing_tip = context.changectx(repo, node).rev()
new_tip = context.changectx(repo, 'tip').rev()
for rev in xrange(existing_tip, new_tip + 1):
ctx = context.changectx(repo, rev)
for f in ctx.files():
fctx = ctx.filectx(f)
if fctx.size() > limit:
ui.write(_('file %s of %s is too large: %d > %d\n') % \
(f, ctx, fctx.size(), limit))
return True # This is invalid
return False # Things are OK.
|
Add a hook to limit the size of any individual file
|
hooks: Add a hook to limit the size of any individual file
|
Python
|
bsd-3-clause
|
andrewfu0325/gem5-aladdin,andrewfu0325/gem5-aladdin,andrewfu0325/gem5-aladdin,andrewfu0325/gem5-aladdin,andrewfu0325/gem5-aladdin,andrewfu0325/gem5-aladdin,andrewfu0325/gem5-aladdin
|
hooks: Add a hook to limit the size of any individual file
|
from mercurial import context
from mercurial.i18n import _
'''
[extensions]
hgfilesize=~/m5/incoming/util/hgfilesize.py
[hooks]
pretxncommit = python:hgfilesize.limit_file_size
pretxnchangegroup = python:hgfilesize.limit_file_size
[limit_file_size]
maximum_file_size = 200000
'''
def limit_file_size(ui, repo, node=None, **kwargs):
'''forbid files over a given size'''
# default limit is 1 MB
limit = int(ui.config('limit_file_size', 'maximum_file_size', 1024*1024))
existing_tip = context.changectx(repo, node).rev()
new_tip = context.changectx(repo, 'tip').rev()
for rev in xrange(existing_tip, new_tip + 1):
ctx = context.changectx(repo, rev)
for f in ctx.files():
fctx = ctx.filectx(f)
if fctx.size() > limit:
ui.write(_('file %s of %s is too large: %d > %d\n') % \
(f, ctx, fctx.size(), limit))
return True # This is invalid
return False # Things are OK.
|
<commit_before><commit_msg>hooks: Add a hook to limit the size of any individual file<commit_after>
|
from mercurial import context
from mercurial.i18n import _
'''
[extensions]
hgfilesize=~/m5/incoming/util/hgfilesize.py
[hooks]
pretxncommit = python:hgfilesize.limit_file_size
pretxnchangegroup = python:hgfilesize.limit_file_size
[limit_file_size]
maximum_file_size = 200000
'''
def limit_file_size(ui, repo, node=None, **kwargs):
'''forbid files over a given size'''
# default limit is 1 MB
limit = int(ui.config('limit_file_size', 'maximum_file_size', 1024*1024))
existing_tip = context.changectx(repo, node).rev()
new_tip = context.changectx(repo, 'tip').rev()
for rev in xrange(existing_tip, new_tip + 1):
ctx = context.changectx(repo, rev)
for f in ctx.files():
fctx = ctx.filectx(f)
if fctx.size() > limit:
ui.write(_('file %s of %s is too large: %d > %d\n') % \
(f, ctx, fctx.size(), limit))
return True # This is invalid
return False # Things are OK.
|
hooks: Add a hook to limit the size of any individual filefrom mercurial import context
from mercurial.i18n import _
'''
[extensions]
hgfilesize=~/m5/incoming/util/hgfilesize.py
[hooks]
pretxncommit = python:hgfilesize.limit_file_size
pretxnchangegroup = python:hgfilesize.limit_file_size
[limit_file_size]
maximum_file_size = 200000
'''
def limit_file_size(ui, repo, node=None, **kwargs):
'''forbid files over a given size'''
# default limit is 1 MB
limit = int(ui.config('limit_file_size', 'maximum_file_size', 1024*1024))
existing_tip = context.changectx(repo, node).rev()
new_tip = context.changectx(repo, 'tip').rev()
for rev in xrange(existing_tip, new_tip + 1):
ctx = context.changectx(repo, rev)
for f in ctx.files():
fctx = ctx.filectx(f)
if fctx.size() > limit:
ui.write(_('file %s of %s is too large: %d > %d\n') % \
(f, ctx, fctx.size(), limit))
return True # This is invalid
return False # Things are OK.
|
<commit_before><commit_msg>hooks: Add a hook to limit the size of any individual file<commit_after>from mercurial import context
from mercurial.i18n import _
'''
[extensions]
hgfilesize=~/m5/incoming/util/hgfilesize.py
[hooks]
pretxncommit = python:hgfilesize.limit_file_size
pretxnchangegroup = python:hgfilesize.limit_file_size
[limit_file_size]
maximum_file_size = 200000
'''
def limit_file_size(ui, repo, node=None, **kwargs):
'''forbid files over a given size'''
# default limit is 1 MB
limit = int(ui.config('limit_file_size', 'maximum_file_size', 1024*1024))
existing_tip = context.changectx(repo, node).rev()
new_tip = context.changectx(repo, 'tip').rev()
for rev in xrange(existing_tip, new_tip + 1):
ctx = context.changectx(repo, rev)
for f in ctx.files():
fctx = ctx.filectx(f)
if fctx.size() > limit:
ui.write(_('file %s of %s is too large: %d > %d\n') % \
(f, ctx, fctx.size(), limit))
return True # This is invalid
return False # Things are OK.
|
|
fc60ac22d7497e8350351a5c82a86588c6a224fe
|
setup_base.py
|
setup_base.py
|
from distutils.core import setup
long_description="""HTML parser designed to follow the WHATWG HTML5
specification. The parser is designed to handle all flavours of HTML and
parses invalid documents using well-defined error handling rules compatible
with the behaviour of major desktop web browsers.
Output is to a tree structure; the current release supports output to
a custom tree similar to DOM and to ElementTree.
"""
classifiers=[
'Development Status :: %(status)s',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML'
],
setup(name='html5lib',
version='%(version)s',
url='http://code.google.com/p/html5lib/',
license="MIT License",
description='HTML parser based on the WHAT-WG Web Applications 1.0'
'("HTML5") specifcation',
long_description=long_description,
classifiers=classifiers,
maintainer='James Graham',
maintainer_email='jg307@cam.ac.uk',
packages=['html5lib', 'html5lib.treebuilders'],
package_dir = {'html5lib': 'src'}
)
|
Add base setup file for generating packages
|
Add base setup file for generating packages
--HG--
extra : convert_revision : svn%3Aacbfec75-9323-0410-a652-858a13e371e0/trunk%40527
|
Python
|
mit
|
gsnedders/html5lib-python,alex/html5lib-python,mindw/html5lib-python,mindw/html5lib-python,ordbogen/html5lib-python,ordbogen/html5lib-python,dstufft/html5lib-python,html5lib/html5lib-python,mgilson/html5lib-python,alex/html5lib-python,alex/html5lib-python,mgilson/html5lib-python,html5lib/html5lib-python,mgilson/html5lib-python,dstufft/html5lib-python,dstufft/html5lib-python,gsnedders/html5lib-python,mindw/html5lib-python,ordbogen/html5lib-python,html5lib/html5lib-python
|
Add base setup file for generating packages
--HG--
extra : convert_revision : svn%3Aacbfec75-9323-0410-a652-858a13e371e0/trunk%40527
|
from distutils.core import setup
long_description="""HTML parser designed to follow the WHATWG HTML5
specification. The parser is designed to handle all flavours of HTML and
parses invalid documents using well-defined error handling rules compatible
with the behaviour of major desktop web browsers.
Output is to a tree structure; the current release supports output to
a custom tree similar to DOM and to ElementTree.
"""
classifiers=[
'Development Status :: %(status)s',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML'
],
setup(name='html5lib',
version='%(version)s',
url='http://code.google.com/p/html5lib/',
license="MIT License",
description='HTML parser based on the WHAT-WG Web Applications 1.0'
'("HTML5") specifcation',
long_description=long_description,
classifiers=classifiers,
maintainer='James Graham',
maintainer_email='jg307@cam.ac.uk',
packages=['html5lib', 'html5lib.treebuilders'],
package_dir = {'html5lib': 'src'}
)
|
<commit_before><commit_msg>Add base setup file for generating packages
--HG--
extra : convert_revision : svn%3Aacbfec75-9323-0410-a652-858a13e371e0/trunk%40527<commit_after>
|
from distutils.core import setup
long_description="""HTML parser designed to follow the WHATWG HTML5
specification. The parser is designed to handle all flavours of HTML and
parses invalid documents using well-defined error handling rules compatible
with the behaviour of major desktop web browsers.
Output is to a tree structure; the current release supports output to
a custom tree similar to DOM and to ElementTree.
"""
classifiers=[
'Development Status :: %(status)s',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML'
],
setup(name='html5lib',
version='%(version)s',
url='http://code.google.com/p/html5lib/',
license="MIT License",
description='HTML parser based on the WHAT-WG Web Applications 1.0'
'("HTML5") specifcation',
long_description=long_description,
classifiers=classifiers,
maintainer='James Graham',
maintainer_email='jg307@cam.ac.uk',
packages=['html5lib', 'html5lib.treebuilders'],
package_dir = {'html5lib': 'src'}
)
|
Add base setup file for generating packages
--HG--
extra : convert_revision : svn%3Aacbfec75-9323-0410-a652-858a13e371e0/trunk%40527from distutils.core import setup
long_description="""HTML parser designed to follow the WHATWG HTML5
specification. The parser is designed to handle all flavours of HTML and
parses invalid documents using well-defined error handling rules compatible
with the behaviour of major desktop web browsers.
Output is to a tree structure; the current release supports output to
a custom tree similar to DOM and to ElementTree.
"""
classifiers=[
'Development Status :: %(status)s',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML'
],
setup(name='html5lib',
version='%(version)s',
url='http://code.google.com/p/html5lib/',
license="MIT License",
description='HTML parser based on the WHAT-WG Web Applications 1.0'
'("HTML5") specifcation',
long_description=long_description,
classifiers=classifiers,
maintainer='James Graham',
maintainer_email='jg307@cam.ac.uk',
packages=['html5lib', 'html5lib.treebuilders'],
package_dir = {'html5lib': 'src'}
)
|
<commit_before><commit_msg>Add base setup file for generating packages
--HG--
extra : convert_revision : svn%3Aacbfec75-9323-0410-a652-858a13e371e0/trunk%40527<commit_after>from distutils.core import setup
long_description="""HTML parser designed to follow the WHATWG HTML5
specification. The parser is designed to handle all flavours of HTML and
parses invalid documents using well-defined error handling rules compatible
with the behaviour of major desktop web browsers.
Output is to a tree structure; the current release supports output to
a custom tree similar to DOM and to ElementTree.
"""
classifiers=[
'Development Status :: %(status)s',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing :: Markup :: HTML'
],
setup(name='html5lib',
version='%(version)s',
url='http://code.google.com/p/html5lib/',
license="MIT License",
description='HTML parser based on the WHAT-WG Web Applications 1.0'
'("HTML5") specifcation',
long_description=long_description,
classifiers=classifiers,
maintainer='James Graham',
maintainer_email='jg307@cam.ac.uk',
packages=['html5lib', 'html5lib.treebuilders'],
package_dir = {'html5lib': 'src'}
)
|
|
1635bf028cf2a1a971f082370c94c446801ac444
|
ObjectTracking/DisplayVideoStream.py
|
ObjectTracking/DisplayVideoStream.py
|
import cv2
cv2.namedWindow("preview")
vc = cv2.VideoCapture(0)
if vc.isOpened(): # try to get the first frame
rval, frame = vc.read()
else:
rval = False
while rval:
cv2.imshow("preview", frame)
rval, frame = vc.read()
key = cv2.waitKey(20)
if key == 27: # exit on ESC
break
|
Add python script to diplay webcam video stream
|
Add python script to diplay webcam video stream
|
Python
|
mit
|
baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite
|
Add python script to diplay webcam video stream
|
import cv2
cv2.namedWindow("preview")
vc = cv2.VideoCapture(0)
if vc.isOpened(): # try to get the first frame
rval, frame = vc.read()
else:
rval = False
while rval:
cv2.imshow("preview", frame)
rval, frame = vc.read()
key = cv2.waitKey(20)
if key == 27: # exit on ESC
break
|
<commit_before><commit_msg>Add python script to diplay webcam video stream<commit_after>
|
import cv2
cv2.namedWindow("preview")
vc = cv2.VideoCapture(0)
if vc.isOpened(): # try to get the first frame
rval, frame = vc.read()
else:
rval = False
while rval:
cv2.imshow("preview", frame)
rval, frame = vc.read()
key = cv2.waitKey(20)
if key == 27: # exit on ESC
break
|
Add python script to diplay webcam video streamimport cv2
cv2.namedWindow("preview")
vc = cv2.VideoCapture(0)
if vc.isOpened(): # try to get the first frame
rval, frame = vc.read()
else:
rval = False
while rval:
cv2.imshow("preview", frame)
rval, frame = vc.read()
key = cv2.waitKey(20)
if key == 27: # exit on ESC
break
|
<commit_before><commit_msg>Add python script to diplay webcam video stream<commit_after>import cv2
cv2.namedWindow("preview")
vc = cv2.VideoCapture(0)
if vc.isOpened(): # try to get the first frame
rval, frame = vc.read()
else:
rval = False
while rval:
cv2.imshow("preview", frame)
rval, frame = vc.read()
key = cv2.waitKey(20)
if key == 27: # exit on ESC
break
|
|
4e6705acffe2ecc42bb934863629eb597375e8dc
|
data_provider.py
|
data_provider.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 10 00:15:50 2017
@author: sakurai
"""
import numpy as np
from sklearn.preprocessing import LabelEncoder
from datasets import get_cars196_streams
class DataProvider(object):
def __init__(self, stream, batch_size):
self._stream = stream
if hasattr(stream, 'data_stream'):
data_stream = stream.data_stream
else:
data_stream = stream
self.num_examples = data_stream.dataset.num_examples
labels = [stream.get_data([i])[1][0] for i in range(self.num_examples)]
labels = np.ravel(labels)
self._label_encoder = LabelEncoder().fit(labels)
self._labels = np.array(labels)
self._classes = self._label_encoder.classes_
self.num_classes = len(self._classes)
assert batch_size <= self.num_classes, (
"batch_size must not be greather than the number of classes"
" (i.e. batch_size <= {})".format(self.num_classes))
self.batch_size = batch_size
self._class_to_indexes = []
for c in self._classes:
self._class_to_indexes.append(
np.argwhere(self._labels == c).ravel())
def next(self):
anchor_indexes, positive_indexes = self._generate_indexes()
indexes = anchor_indexes + positive_indexes
return self._stream.get_data(indexes)
def _generate_indexes(self):
random_classes = np.random.choice(
self.num_classes, self.batch_size, False)
anchor_indexes = []
positive_indexes = []
for c in random_classes:
a, p = np.random.choice(self._class_to_indexes[c], 2, False)
anchor_indexes.append(a)
positive_indexes.append(p)
return anchor_indexes, positive_indexes
if __name__ == '__main__':
train, _ = get_cars196_streams(load_in_memory=True)
provider = DataProvider(train, 25)
anchor_indexes, positive_indexes = provider._generate_indexes()
print anchor_indexes
print positive_indexes
a_classes = provider._labels[anchor_indexes]
p_classes = provider._labels[positive_indexes]
print np.all(a_classes == p_classes)
x, c = provider.next()
|
Implement a class to feed minibatches for N-pair-mc training
|
Implement a class to feed minibatches for N-pair-mc training
|
Python
|
mit
|
ronekko/deep_metric_learning
|
Implement a class to feed minibatches for N-pair-mc training
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 10 00:15:50 2017
@author: sakurai
"""
import numpy as np
from sklearn.preprocessing import LabelEncoder
from datasets import get_cars196_streams
class DataProvider(object):
def __init__(self, stream, batch_size):
self._stream = stream
if hasattr(stream, 'data_stream'):
data_stream = stream.data_stream
else:
data_stream = stream
self.num_examples = data_stream.dataset.num_examples
labels = [stream.get_data([i])[1][0] for i in range(self.num_examples)]
labels = np.ravel(labels)
self._label_encoder = LabelEncoder().fit(labels)
self._labels = np.array(labels)
self._classes = self._label_encoder.classes_
self.num_classes = len(self._classes)
assert batch_size <= self.num_classes, (
"batch_size must not be greather than the number of classes"
" (i.e. batch_size <= {})".format(self.num_classes))
self.batch_size = batch_size
self._class_to_indexes = []
for c in self._classes:
self._class_to_indexes.append(
np.argwhere(self._labels == c).ravel())
def next(self):
anchor_indexes, positive_indexes = self._generate_indexes()
indexes = anchor_indexes + positive_indexes
return self._stream.get_data(indexes)
def _generate_indexes(self):
random_classes = np.random.choice(
self.num_classes, self.batch_size, False)
anchor_indexes = []
positive_indexes = []
for c in random_classes:
a, p = np.random.choice(self._class_to_indexes[c], 2, False)
anchor_indexes.append(a)
positive_indexes.append(p)
return anchor_indexes, positive_indexes
if __name__ == '__main__':
train, _ = get_cars196_streams(load_in_memory=True)
provider = DataProvider(train, 25)
anchor_indexes, positive_indexes = provider._generate_indexes()
print anchor_indexes
print positive_indexes
a_classes = provider._labels[anchor_indexes]
p_classes = provider._labels[positive_indexes]
print np.all(a_classes == p_classes)
x, c = provider.next()
|
<commit_before><commit_msg>Implement a class to feed minibatches for N-pair-mc training<commit_after>
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 10 00:15:50 2017
@author: sakurai
"""
import numpy as np
from sklearn.preprocessing import LabelEncoder
from datasets import get_cars196_streams
class DataProvider(object):
def __init__(self, stream, batch_size):
self._stream = stream
if hasattr(stream, 'data_stream'):
data_stream = stream.data_stream
else:
data_stream = stream
self.num_examples = data_stream.dataset.num_examples
labels = [stream.get_data([i])[1][0] for i in range(self.num_examples)]
labels = np.ravel(labels)
self._label_encoder = LabelEncoder().fit(labels)
self._labels = np.array(labels)
self._classes = self._label_encoder.classes_
self.num_classes = len(self._classes)
assert batch_size <= self.num_classes, (
"batch_size must not be greather than the number of classes"
" (i.e. batch_size <= {})".format(self.num_classes))
self.batch_size = batch_size
self._class_to_indexes = []
for c in self._classes:
self._class_to_indexes.append(
np.argwhere(self._labels == c).ravel())
def next(self):
anchor_indexes, positive_indexes = self._generate_indexes()
indexes = anchor_indexes + positive_indexes
return self._stream.get_data(indexes)
def _generate_indexes(self):
random_classes = np.random.choice(
self.num_classes, self.batch_size, False)
anchor_indexes = []
positive_indexes = []
for c in random_classes:
a, p = np.random.choice(self._class_to_indexes[c], 2, False)
anchor_indexes.append(a)
positive_indexes.append(p)
return anchor_indexes, positive_indexes
if __name__ == '__main__':
train, _ = get_cars196_streams(load_in_memory=True)
provider = DataProvider(train, 25)
anchor_indexes, positive_indexes = provider._generate_indexes()
print anchor_indexes
print positive_indexes
a_classes = provider._labels[anchor_indexes]
p_classes = provider._labels[positive_indexes]
print np.all(a_classes == p_classes)
x, c = provider.next()
|
Implement a class to feed minibatches for N-pair-mc training# -*- coding: utf-8 -*-
"""
Created on Tue Jan 10 00:15:50 2017
@author: sakurai
"""
import numpy as np
from sklearn.preprocessing import LabelEncoder
from datasets import get_cars196_streams
class DataProvider(object):
def __init__(self, stream, batch_size):
self._stream = stream
if hasattr(stream, 'data_stream'):
data_stream = stream.data_stream
else:
data_stream = stream
self.num_examples = data_stream.dataset.num_examples
labels = [stream.get_data([i])[1][0] for i in range(self.num_examples)]
labels = np.ravel(labels)
self._label_encoder = LabelEncoder().fit(labels)
self._labels = np.array(labels)
self._classes = self._label_encoder.classes_
self.num_classes = len(self._classes)
assert batch_size <= self.num_classes, (
"batch_size must not be greather than the number of classes"
" (i.e. batch_size <= {})".format(self.num_classes))
self.batch_size = batch_size
self._class_to_indexes = []
for c in self._classes:
self._class_to_indexes.append(
np.argwhere(self._labels == c).ravel())
def next(self):
anchor_indexes, positive_indexes = self._generate_indexes()
indexes = anchor_indexes + positive_indexes
return self._stream.get_data(indexes)
def _generate_indexes(self):
random_classes = np.random.choice(
self.num_classes, self.batch_size, False)
anchor_indexes = []
positive_indexes = []
for c in random_classes:
a, p = np.random.choice(self._class_to_indexes[c], 2, False)
anchor_indexes.append(a)
positive_indexes.append(p)
return anchor_indexes, positive_indexes
if __name__ == '__main__':
train, _ = get_cars196_streams(load_in_memory=True)
provider = DataProvider(train, 25)
anchor_indexes, positive_indexes = provider._generate_indexes()
print anchor_indexes
print positive_indexes
a_classes = provider._labels[anchor_indexes]
p_classes = provider._labels[positive_indexes]
print np.all(a_classes == p_classes)
x, c = provider.next()
|
<commit_before><commit_msg>Implement a class to feed minibatches for N-pair-mc training<commit_after># -*- coding: utf-8 -*-
"""
Created on Tue Jan 10 00:15:50 2017
@author: sakurai
"""
import numpy as np
from sklearn.preprocessing import LabelEncoder
from datasets import get_cars196_streams
class DataProvider(object):
def __init__(self, stream, batch_size):
self._stream = stream
if hasattr(stream, 'data_stream'):
data_stream = stream.data_stream
else:
data_stream = stream
self.num_examples = data_stream.dataset.num_examples
labels = [stream.get_data([i])[1][0] for i in range(self.num_examples)]
labels = np.ravel(labels)
self._label_encoder = LabelEncoder().fit(labels)
self._labels = np.array(labels)
self._classes = self._label_encoder.classes_
self.num_classes = len(self._classes)
assert batch_size <= self.num_classes, (
"batch_size must not be greather than the number of classes"
" (i.e. batch_size <= {})".format(self.num_classes))
self.batch_size = batch_size
self._class_to_indexes = []
for c in self._classes:
self._class_to_indexes.append(
np.argwhere(self._labels == c).ravel())
def next(self):
anchor_indexes, positive_indexes = self._generate_indexes()
indexes = anchor_indexes + positive_indexes
return self._stream.get_data(indexes)
def _generate_indexes(self):
random_classes = np.random.choice(
self.num_classes, self.batch_size, False)
anchor_indexes = []
positive_indexes = []
for c in random_classes:
a, p = np.random.choice(self._class_to_indexes[c], 2, False)
anchor_indexes.append(a)
positive_indexes.append(p)
return anchor_indexes, positive_indexes
if __name__ == '__main__':
train, _ = get_cars196_streams(load_in_memory=True)
provider = DataProvider(train, 25)
anchor_indexes, positive_indexes = provider._generate_indexes()
print anchor_indexes
print positive_indexes
a_classes = provider._labels[anchor_indexes]
p_classes = provider._labels[positive_indexes]
print np.all(a_classes == p_classes)
x, c = provider.next()
|
|
822cb468c033c81d8107d865715d024177b38fcd
|
CodeFights/doodledPassword.py
|
CodeFights/doodledPassword.py
|
#!/usr/local/bin/python
# Code Fights Alphabetic Shift Problem
from collections import deque
def doodledPassword(digits):
n = len(digits)
res = [deque(digits) for _ in range(n)]
deque(map(lambda i_x: i_x[1].rotate(-i_x[0]), enumerate(res)), 0)
return [list(d) for d in res]
def main():
tests = [
[
[1, 2, 3, 4, 5],
[
[1, 2, 3, 4, 5],
[2, 3, 4, 5, 1],
[3, 4, 5, 1, 2],
[4, 5, 1, 2, 3],
[5, 1, 2, 3, 4]
]
],
[[5], [[5]]],
[
[2, 2, 2, 2],
[
[2, 2, 2, 2],
[2, 2, 2, 2],
[2, 2, 2, 2],
[2, 2, 2, 2]
]
],
[
[9, 8, 7, 5, 4],
[
[9, 8, 7, 5, 4],
[8, 7, 5, 4, 9],
[7, 5, 4, 9, 8],
[5, 4, 9, 8, 7],
[4, 9, 8, 7, 5]
]
],
[
[1, 5, 1, 5, 1, 4],
[
[1, 5, 1, 5, 1, 4],
[5, 1, 5, 1, 4, 1],
[1, 5, 1, 4, 1, 5],
[5, 1, 4, 1, 5, 1],
[1, 4, 1, 5, 1, 5],
[4, 1, 5, 1, 5, 1]
]
]
]
for t in tests:
res = doodledPassword(t[0])
if t[1] == res:
print("PASSED: doodledPassword({}) returned {}"
.format(t[0], res))
else:
print("FAILED: doodledPassword({}) returned {}, answer: {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
Solve Code Fights doodled password problem
|
Solve Code Fights doodled password problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights doodled password problem
|
#!/usr/local/bin/python
# Code Fights Alphabetic Shift Problem
from collections import deque
def doodledPassword(digits):
n = len(digits)
res = [deque(digits) for _ in range(n)]
deque(map(lambda i_x: i_x[1].rotate(-i_x[0]), enumerate(res)), 0)
return [list(d) for d in res]
def main():
tests = [
[
[1, 2, 3, 4, 5],
[
[1, 2, 3, 4, 5],
[2, 3, 4, 5, 1],
[3, 4, 5, 1, 2],
[4, 5, 1, 2, 3],
[5, 1, 2, 3, 4]
]
],
[[5], [[5]]],
[
[2, 2, 2, 2],
[
[2, 2, 2, 2],
[2, 2, 2, 2],
[2, 2, 2, 2],
[2, 2, 2, 2]
]
],
[
[9, 8, 7, 5, 4],
[
[9, 8, 7, 5, 4],
[8, 7, 5, 4, 9],
[7, 5, 4, 9, 8],
[5, 4, 9, 8, 7],
[4, 9, 8, 7, 5]
]
],
[
[1, 5, 1, 5, 1, 4],
[
[1, 5, 1, 5, 1, 4],
[5, 1, 5, 1, 4, 1],
[1, 5, 1, 4, 1, 5],
[5, 1, 4, 1, 5, 1],
[1, 4, 1, 5, 1, 5],
[4, 1, 5, 1, 5, 1]
]
]
]
for t in tests:
res = doodledPassword(t[0])
if t[1] == res:
print("PASSED: doodledPassword({}) returned {}"
.format(t[0], res))
else:
print("FAILED: doodledPassword({}) returned {}, answer: {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights doodled password problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Alphabetic Shift Problem
from collections import deque
def doodledPassword(digits):
n = len(digits)
res = [deque(digits) for _ in range(n)]
deque(map(lambda i_x: i_x[1].rotate(-i_x[0]), enumerate(res)), 0)
return [list(d) for d in res]
def main():
tests = [
[
[1, 2, 3, 4, 5],
[
[1, 2, 3, 4, 5],
[2, 3, 4, 5, 1],
[3, 4, 5, 1, 2],
[4, 5, 1, 2, 3],
[5, 1, 2, 3, 4]
]
],
[[5], [[5]]],
[
[2, 2, 2, 2],
[
[2, 2, 2, 2],
[2, 2, 2, 2],
[2, 2, 2, 2],
[2, 2, 2, 2]
]
],
[
[9, 8, 7, 5, 4],
[
[9, 8, 7, 5, 4],
[8, 7, 5, 4, 9],
[7, 5, 4, 9, 8],
[5, 4, 9, 8, 7],
[4, 9, 8, 7, 5]
]
],
[
[1, 5, 1, 5, 1, 4],
[
[1, 5, 1, 5, 1, 4],
[5, 1, 5, 1, 4, 1],
[1, 5, 1, 4, 1, 5],
[5, 1, 4, 1, 5, 1],
[1, 4, 1, 5, 1, 5],
[4, 1, 5, 1, 5, 1]
]
]
]
for t in tests:
res = doodledPassword(t[0])
if t[1] == res:
print("PASSED: doodledPassword({}) returned {}"
.format(t[0], res))
else:
print("FAILED: doodledPassword({}) returned {}, answer: {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
Solve Code Fights doodled password problem#!/usr/local/bin/python
# Code Fights Alphabetic Shift Problem
from collections import deque
def doodledPassword(digits):
n = len(digits)
res = [deque(digits) for _ in range(n)]
deque(map(lambda i_x: i_x[1].rotate(-i_x[0]), enumerate(res)), 0)
return [list(d) for d in res]
def main():
tests = [
[
[1, 2, 3, 4, 5],
[
[1, 2, 3, 4, 5],
[2, 3, 4, 5, 1],
[3, 4, 5, 1, 2],
[4, 5, 1, 2, 3],
[5, 1, 2, 3, 4]
]
],
[[5], [[5]]],
[
[2, 2, 2, 2],
[
[2, 2, 2, 2],
[2, 2, 2, 2],
[2, 2, 2, 2],
[2, 2, 2, 2]
]
],
[
[9, 8, 7, 5, 4],
[
[9, 8, 7, 5, 4],
[8, 7, 5, 4, 9],
[7, 5, 4, 9, 8],
[5, 4, 9, 8, 7],
[4, 9, 8, 7, 5]
]
],
[
[1, 5, 1, 5, 1, 4],
[
[1, 5, 1, 5, 1, 4],
[5, 1, 5, 1, 4, 1],
[1, 5, 1, 4, 1, 5],
[5, 1, 4, 1, 5, 1],
[1, 4, 1, 5, 1, 5],
[4, 1, 5, 1, 5, 1]
]
]
]
for t in tests:
res = doodledPassword(t[0])
if t[1] == res:
print("PASSED: doodledPassword({}) returned {}"
.format(t[0], res))
else:
print("FAILED: doodledPassword({}) returned {}, answer: {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights doodled password problem<commit_after>#!/usr/local/bin/python
# Code Fights Alphabetic Shift Problem
from collections import deque
def doodledPassword(digits):
n = len(digits)
res = [deque(digits) for _ in range(n)]
deque(map(lambda i_x: i_x[1].rotate(-i_x[0]), enumerate(res)), 0)
return [list(d) for d in res]
def main():
tests = [
[
[1, 2, 3, 4, 5],
[
[1, 2, 3, 4, 5],
[2, 3, 4, 5, 1],
[3, 4, 5, 1, 2],
[4, 5, 1, 2, 3],
[5, 1, 2, 3, 4]
]
],
[[5], [[5]]],
[
[2, 2, 2, 2],
[
[2, 2, 2, 2],
[2, 2, 2, 2],
[2, 2, 2, 2],
[2, 2, 2, 2]
]
],
[
[9, 8, 7, 5, 4],
[
[9, 8, 7, 5, 4],
[8, 7, 5, 4, 9],
[7, 5, 4, 9, 8],
[5, 4, 9, 8, 7],
[4, 9, 8, 7, 5]
]
],
[
[1, 5, 1, 5, 1, 4],
[
[1, 5, 1, 5, 1, 4],
[5, 1, 5, 1, 4, 1],
[1, 5, 1, 4, 1, 5],
[5, 1, 4, 1, 5, 1],
[1, 4, 1, 5, 1, 5],
[4, 1, 5, 1, 5, 1]
]
]
]
for t in tests:
res = doodledPassword(t[0])
if t[1] == res:
print("PASSED: doodledPassword({}) returned {}"
.format(t[0], res))
else:
print("FAILED: doodledPassword({}) returned {}, answer: {}"
.format(t[0], res, t[1]))
if __name__ == '__main__':
main()
|
|
14118d824b3d9b25b4c6cebd0f94d81ea64b4add
|
test/test_cache/test_cache_categories.py
|
test/test_cache/test_cache_categories.py
|
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2015 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from default import Test
from pybossa.cache import categories as cached_categories
from factories import CategoryFactory, ProjectFactory
class TestCategoriesCache(Test):
def test_get_all_returns_all_categories(self):
categories = [CategoryFactory.create()]
assert cached_categories.get_all() == categories
def test_get_used_returns_only_categories_with_projects(self):
used_category = CategoryFactory.create()
ProjectFactory.create(category=used_category)
unused_category = CategoryFactory.create()
used_categories = cached_categories.get_used()
assert used_categories[0]['id'] == used_category.id, used_categories
def test_get_used_returns_requiered_fields(self):
used_category = CategoryFactory.create()
ProjectFactory.create(category=used_category)
fields = ('id', 'name', 'short_name', 'description')
used_categories = cached_categories.get_used()
for field in fields:
assert field in used_categories[0].keys()
assert len(fields) == len(used_categories[0].keys())
|
Add tests for categories in cache
|
Add tests for categories in cache
|
Python
|
agpl-3.0
|
PyBossa/pybossa,Scifabric/pybossa,geotagx/pybossa,PyBossa/pybossa,Scifabric/pybossa,geotagx/pybossa
|
Add tests for categories in cache
|
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2015 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from default import Test
from pybossa.cache import categories as cached_categories
from factories import CategoryFactory, ProjectFactory
class TestCategoriesCache(Test):
def test_get_all_returns_all_categories(self):
categories = [CategoryFactory.create()]
assert cached_categories.get_all() == categories
def test_get_used_returns_only_categories_with_projects(self):
used_category = CategoryFactory.create()
ProjectFactory.create(category=used_category)
unused_category = CategoryFactory.create()
used_categories = cached_categories.get_used()
assert used_categories[0]['id'] == used_category.id, used_categories
def test_get_used_returns_requiered_fields(self):
used_category = CategoryFactory.create()
ProjectFactory.create(category=used_category)
fields = ('id', 'name', 'short_name', 'description')
used_categories = cached_categories.get_used()
for field in fields:
assert field in used_categories[0].keys()
assert len(fields) == len(used_categories[0].keys())
|
<commit_before><commit_msg>Add tests for categories in cache<commit_after>
|
# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2015 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from default import Test
from pybossa.cache import categories as cached_categories
from factories import CategoryFactory, ProjectFactory
class TestCategoriesCache(Test):
def test_get_all_returns_all_categories(self):
categories = [CategoryFactory.create()]
assert cached_categories.get_all() == categories
def test_get_used_returns_only_categories_with_projects(self):
used_category = CategoryFactory.create()
ProjectFactory.create(category=used_category)
unused_category = CategoryFactory.create()
used_categories = cached_categories.get_used()
assert used_categories[0]['id'] == used_category.id, used_categories
def test_get_used_returns_requiered_fields(self):
used_category = CategoryFactory.create()
ProjectFactory.create(category=used_category)
fields = ('id', 'name', 'short_name', 'description')
used_categories = cached_categories.get_used()
for field in fields:
assert field in used_categories[0].keys()
assert len(fields) == len(used_categories[0].keys())
|
Add tests for categories in cache# -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2015 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from default import Test
from pybossa.cache import categories as cached_categories
from factories import CategoryFactory, ProjectFactory
class TestCategoriesCache(Test):
def test_get_all_returns_all_categories(self):
categories = [CategoryFactory.create()]
assert cached_categories.get_all() == categories
def test_get_used_returns_only_categories_with_projects(self):
used_category = CategoryFactory.create()
ProjectFactory.create(category=used_category)
unused_category = CategoryFactory.create()
used_categories = cached_categories.get_used()
assert used_categories[0]['id'] == used_category.id, used_categories
def test_get_used_returns_requiered_fields(self):
used_category = CategoryFactory.create()
ProjectFactory.create(category=used_category)
fields = ('id', 'name', 'short_name', 'description')
used_categories = cached_categories.get_used()
for field in fields:
assert field in used_categories[0].keys()
assert len(fields) == len(used_categories[0].keys())
|
<commit_before><commit_msg>Add tests for categories in cache<commit_after># -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2015 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
from default import Test
from pybossa.cache import categories as cached_categories
from factories import CategoryFactory, ProjectFactory
class TestCategoriesCache(Test):
def test_get_all_returns_all_categories(self):
categories = [CategoryFactory.create()]
assert cached_categories.get_all() == categories
def test_get_used_returns_only_categories_with_projects(self):
used_category = CategoryFactory.create()
ProjectFactory.create(category=used_category)
unused_category = CategoryFactory.create()
used_categories = cached_categories.get_used()
assert used_categories[0]['id'] == used_category.id, used_categories
def test_get_used_returns_requiered_fields(self):
used_category = CategoryFactory.create()
ProjectFactory.create(category=used_category)
fields = ('id', 'name', 'short_name', 'description')
used_categories = cached_categories.get_used()
for field in fields:
assert field in used_categories[0].keys()
assert len(fields) == len(used_categories[0].keys())
|
|
73d2cf253bbc11d7878db55823240824646f6079
|
tests/check_locale_format_consistency.py
|
tests/check_locale_format_consistency.py
|
import re
import json
import glob
locale_folder = "../locales/"
locale_files = glob.glob(locale_folder + "*.json")
locale_files = [filename.split("/")[-1] for filename in locale_files]
locale_files.remove("en.json")
reference = json.loads(open(locale_folder + "en.json").read())
for locale_file in locale_files:
this_locale = json.loads(open(locale_folder + locale_file).read())
for key, string in reference.items():
if key in this_locale:
subkeys_in_ref = set(k[0] for k in re.findall(r"{(\w+)(:\w)?}", string))
subkeys_in_this_locale = set(k[0] for k in re.findall(r"{(\w+)(:\w)?}", this_locale[key]))
if any(key not in subkeys_in_ref for key in subkeys_in_this_locale):
print("\n")
print("==========================")
print("Format inconsistency for string %s in %s:" % (key, locale_file))
print("%s -> %s " % ("en.json", string))
print("%s -> %s " % (locale_file, this_locale[key]))
|
Add draft of linter script to check locale format consistency
|
Add draft of linter script to check locale format consistency
|
Python
|
agpl-3.0
|
YunoHost/yunohost,YunoHost/moulinette-yunohost,YunoHost/moulinette-yunohost,YunoHost/yunohost,YunoHost/yunohost,YunoHost/moulinette-yunohost,YunoHost/moulinette-yunohost,YunoHost/yunohost,YunoHost/moulinette-yunohost
|
Add draft of linter script to check locale format consistency
|
import re
import json
import glob
locale_folder = "../locales/"
locale_files = glob.glob(locale_folder + "*.json")
locale_files = [filename.split("/")[-1] for filename in locale_files]
locale_files.remove("en.json")
reference = json.loads(open(locale_folder + "en.json").read())
for locale_file in locale_files:
this_locale = json.loads(open(locale_folder + locale_file).read())
for key, string in reference.items():
if key in this_locale:
subkeys_in_ref = set(k[0] for k in re.findall(r"{(\w+)(:\w)?}", string))
subkeys_in_this_locale = set(k[0] for k in re.findall(r"{(\w+)(:\w)?}", this_locale[key]))
if any(key not in subkeys_in_ref for key in subkeys_in_this_locale):
print("\n")
print("==========================")
print("Format inconsistency for string %s in %s:" % (key, locale_file))
print("%s -> %s " % ("en.json", string))
print("%s -> %s " % (locale_file, this_locale[key]))
|
<commit_before><commit_msg>Add draft of linter script to check locale format consistency<commit_after>
|
import re
import json
import glob
locale_folder = "../locales/"
locale_files = glob.glob(locale_folder + "*.json")
locale_files = [filename.split("/")[-1] for filename in locale_files]
locale_files.remove("en.json")
reference = json.loads(open(locale_folder + "en.json").read())
for locale_file in locale_files:
this_locale = json.loads(open(locale_folder + locale_file).read())
for key, string in reference.items():
if key in this_locale:
subkeys_in_ref = set(k[0] for k in re.findall(r"{(\w+)(:\w)?}", string))
subkeys_in_this_locale = set(k[0] for k in re.findall(r"{(\w+)(:\w)?}", this_locale[key]))
if any(key not in subkeys_in_ref for key in subkeys_in_this_locale):
print("\n")
print("==========================")
print("Format inconsistency for string %s in %s:" % (key, locale_file))
print("%s -> %s " % ("en.json", string))
print("%s -> %s " % (locale_file, this_locale[key]))
|
Add draft of linter script to check locale format consistencyimport re
import json
import glob
locale_folder = "../locales/"
locale_files = glob.glob(locale_folder + "*.json")
locale_files = [filename.split("/")[-1] for filename in locale_files]
locale_files.remove("en.json")
reference = json.loads(open(locale_folder + "en.json").read())
for locale_file in locale_files:
this_locale = json.loads(open(locale_folder + locale_file).read())
for key, string in reference.items():
if key in this_locale:
subkeys_in_ref = set(k[0] for k in re.findall(r"{(\w+)(:\w)?}", string))
subkeys_in_this_locale = set(k[0] for k in re.findall(r"{(\w+)(:\w)?}", this_locale[key]))
if any(key not in subkeys_in_ref for key in subkeys_in_this_locale):
print("\n")
print("==========================")
print("Format inconsistency for string %s in %s:" % (key, locale_file))
print("%s -> %s " % ("en.json", string))
print("%s -> %s " % (locale_file, this_locale[key]))
|
<commit_before><commit_msg>Add draft of linter script to check locale format consistency<commit_after>import re
import json
import glob
locale_folder = "../locales/"
locale_files = glob.glob(locale_folder + "*.json")
locale_files = [filename.split("/")[-1] for filename in locale_files]
locale_files.remove("en.json")
reference = json.loads(open(locale_folder + "en.json").read())
for locale_file in locale_files:
this_locale = json.loads(open(locale_folder + locale_file).read())
for key, string in reference.items():
if key in this_locale:
subkeys_in_ref = set(k[0] for k in re.findall(r"{(\w+)(:\w)?}", string))
subkeys_in_this_locale = set(k[0] for k in re.findall(r"{(\w+)(:\w)?}", this_locale[key]))
if any(key not in subkeys_in_ref for key in subkeys_in_this_locale):
print("\n")
print("==========================")
print("Format inconsistency for string %s in %s:" % (key, locale_file))
print("%s -> %s " % ("en.json", string))
print("%s -> %s " % (locale_file, this_locale[key]))
|
|
81468159a3fec44ba3d764d1fbf6545bc604468b
|
accounts/tests/test_registration_view.py
|
accounts/tests/test_registration_view.py
|
from django.test import TestCase
from django.core.urlresolvers import reverse
from ..models import PendingUser
class RegisterViewTestCase(TestCase):
def test_view_loads(self):
response = self.client.get(reverse('accounts:register'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'accounts/register.html')
def test_register_view_form_valid(self):
''' The first and last name should be defined.
The given email address should be a valid email. '''
# Try with correct informations
self.client.post(
reverse('accounts:register'),
{'first_name': 'Potato',
'last_name': 'Chips',
'email': 'potato.chips@lustucru.org',
'agreement': 'true'},
)
newUser = PendingUser.objects.filter(first_name='Potato')
self.assertNotEqual(list(newUser), [])
# Try with an incorrect email
self.client.post(
reverse('accounts:register'),
{'first_name': 'Mister',
'last_name': 'Tomaato',
'email': 'randomfalseemail',
'agreement': 'true'},
)
newUser2 = PendingUser.objects.filter(first_name='Mister')
self.assertEqual(list(newUser2), [])
# Try with no 'agreement' boolean
self.client.post(
reverse('accounts:register'),
{'first_name': 'Little',
'last_name': 'Cupcake',
'email': 'little.cupcake@haribo.org'}
)
newUser3 = PendingUser.objects.filter(first_name='Little')
self.assertEqual(list(newUser3), [])
|
Add tests for registration view
|
Add tests for registration view
|
Python
|
mit
|
Atilla106/members.atilla.org,Atilla106/members.atilla.org,Atilla106/members.atilla.org,Atilla106/members.atilla.org,Atilla106/members.atilla.org
|
Add tests for registration view
|
from django.test import TestCase
from django.core.urlresolvers import reverse
from ..models import PendingUser
class RegisterViewTestCase(TestCase):
def test_view_loads(self):
response = self.client.get(reverse('accounts:register'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'accounts/register.html')
def test_register_view_form_valid(self):
''' The first and last name should be defined.
The given email address should be a valid email. '''
# Try with correct informations
self.client.post(
reverse('accounts:register'),
{'first_name': 'Potato',
'last_name': 'Chips',
'email': 'potato.chips@lustucru.org',
'agreement': 'true'},
)
newUser = PendingUser.objects.filter(first_name='Potato')
self.assertNotEqual(list(newUser), [])
# Try with an incorrect email
self.client.post(
reverse('accounts:register'),
{'first_name': 'Mister',
'last_name': 'Tomaato',
'email': 'randomfalseemail',
'agreement': 'true'},
)
newUser2 = PendingUser.objects.filter(first_name='Mister')
self.assertEqual(list(newUser2), [])
# Try with no 'agreement' boolean
self.client.post(
reverse('accounts:register'),
{'first_name': 'Little',
'last_name': 'Cupcake',
'email': 'little.cupcake@haribo.org'}
)
newUser3 = PendingUser.objects.filter(first_name='Little')
self.assertEqual(list(newUser3), [])
|
<commit_before><commit_msg>Add tests for registration view<commit_after>
|
from django.test import TestCase
from django.core.urlresolvers import reverse
from ..models import PendingUser
class RegisterViewTestCase(TestCase):
def test_view_loads(self):
response = self.client.get(reverse('accounts:register'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'accounts/register.html')
def test_register_view_form_valid(self):
''' The first and last name should be defined.
The given email address should be a valid email. '''
# Try with correct informations
self.client.post(
reverse('accounts:register'),
{'first_name': 'Potato',
'last_name': 'Chips',
'email': 'potato.chips@lustucru.org',
'agreement': 'true'},
)
newUser = PendingUser.objects.filter(first_name='Potato')
self.assertNotEqual(list(newUser), [])
# Try with an incorrect email
self.client.post(
reverse('accounts:register'),
{'first_name': 'Mister',
'last_name': 'Tomaato',
'email': 'randomfalseemail',
'agreement': 'true'},
)
newUser2 = PendingUser.objects.filter(first_name='Mister')
self.assertEqual(list(newUser2), [])
# Try with no 'agreement' boolean
self.client.post(
reverse('accounts:register'),
{'first_name': 'Little',
'last_name': 'Cupcake',
'email': 'little.cupcake@haribo.org'}
)
newUser3 = PendingUser.objects.filter(first_name='Little')
self.assertEqual(list(newUser3), [])
|
Add tests for registration viewfrom django.test import TestCase
from django.core.urlresolvers import reverse
from ..models import PendingUser
class RegisterViewTestCase(TestCase):
def test_view_loads(self):
response = self.client.get(reverse('accounts:register'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'accounts/register.html')
def test_register_view_form_valid(self):
''' The first and last name should be defined.
The given email address should be a valid email. '''
# Try with correct informations
self.client.post(
reverse('accounts:register'),
{'first_name': 'Potato',
'last_name': 'Chips',
'email': 'potato.chips@lustucru.org',
'agreement': 'true'},
)
newUser = PendingUser.objects.filter(first_name='Potato')
self.assertNotEqual(list(newUser), [])
# Try with an incorrect email
self.client.post(
reverse('accounts:register'),
{'first_name': 'Mister',
'last_name': 'Tomaato',
'email': 'randomfalseemail',
'agreement': 'true'},
)
newUser2 = PendingUser.objects.filter(first_name='Mister')
self.assertEqual(list(newUser2), [])
# Try with no 'agreement' boolean
self.client.post(
reverse('accounts:register'),
{'first_name': 'Little',
'last_name': 'Cupcake',
'email': 'little.cupcake@haribo.org'}
)
newUser3 = PendingUser.objects.filter(first_name='Little')
self.assertEqual(list(newUser3), [])
|
<commit_before><commit_msg>Add tests for registration view<commit_after>from django.test import TestCase
from django.core.urlresolvers import reverse
from ..models import PendingUser
class RegisterViewTestCase(TestCase):
def test_view_loads(self):
response = self.client.get(reverse('accounts:register'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'accounts/register.html')
def test_register_view_form_valid(self):
''' The first and last name should be defined.
The given email address should be a valid email. '''
# Try with correct informations
self.client.post(
reverse('accounts:register'),
{'first_name': 'Potato',
'last_name': 'Chips',
'email': 'potato.chips@lustucru.org',
'agreement': 'true'},
)
newUser = PendingUser.objects.filter(first_name='Potato')
self.assertNotEqual(list(newUser), [])
# Try with an incorrect email
self.client.post(
reverse('accounts:register'),
{'first_name': 'Mister',
'last_name': 'Tomaato',
'email': 'randomfalseemail',
'agreement': 'true'},
)
newUser2 = PendingUser.objects.filter(first_name='Mister')
self.assertEqual(list(newUser2), [])
# Try with no 'agreement' boolean
self.client.post(
reverse('accounts:register'),
{'first_name': 'Little',
'last_name': 'Cupcake',
'email': 'little.cupcake@haribo.org'}
)
newUser3 = PendingUser.objects.filter(first_name='Little')
self.assertEqual(list(newUser3), [])
|
|
92759e9df89664ae515e51825982141750921ce3
|
src/sample_xblocks/basic/test/test_view_counter.py
|
src/sample_xblocks/basic/test/test_view_counter.py
|
""" Simple test for the view counter that verifies that it is updating properly """
from collections import namedtuple
from mock import Mock
from xblock.runtime import KvsFieldData, DictKeyValueStore
from xblock.view_counter import ViewCounter
from xblock.test.tools import assert_in, assert_equals
TestUsage = namedtuple('TestUsage', 'id, def_id') # pylint: disable=C0103
def test_view_counter_state():
key_store = DictKeyValueStore()
db_model = KvsFieldData(key_store)
tester = ViewCounter(Mock(), db_model, Mock())
assert_equals(tester.views, 0)
# View the XBlock five times
for i in xrange(5):
generated_html = tester.student_view({})
# Make sure the html fragment we're expecting appears in the body_html
assert_in('<span class="views">{0}</span>'.format(i + 1), generated_html.body_html())
assert_equals(tester.views, i + 1)
|
""" Simple test for the view counter that verifies that it is updating properly """
from collections import namedtuple
from mock import Mock
from xblock.runtime import KvsFieldData, DictKeyValueStore
from sample_xblocks.basic.view_counter import ViewCounter
from xblock.test.tools import assert_in, assert_equals
TestUsage = namedtuple('TestUsage', 'id, def_id') # pylint: disable=C0103
def test_view_counter_state():
key_store = DictKeyValueStore()
db_model = KvsFieldData(key_store)
tester = ViewCounter(Mock(), db_model, Mock())
assert_equals(tester.views, 0)
# View the XBlock five times
for i in xrange(5):
generated_html = tester.student_view({})
# Make sure the html fragment we're expecting appears in the body_html
assert_in('<span class="views">{0}</span>'.format(i + 1), generated_html.body_html())
assert_equals(tester.views, i + 1)
|
Use the correct location of view_counter in test
|
Use the correct location of view_counter in test
|
Python
|
apache-2.0
|
stvstnfrd/xblock-sdk,dcadams/xblock-sdk,edx/xblock-sdk,jamiefolsom/xblock-sdk,edx/xblock-sdk,stvstnfrd/xblock-sdk,nagyistoce/edx-xblock-sdk,lovehhf/xblock-sdk,edx-solutions/xblock-sdk,Pilou81715/hackathon_edX,Pilou81715/hackathon_edX,edx-solutions/xblock-sdk,lovehhf/xblock-sdk,lovehhf/xblock-sdk,nagyistoce/edx-xblock-sdk,dcadams/xblock-sdk,jamiefolsom/xblock-sdk,jamiefolsom/xblock-sdk,Lyla-Fischer/xblock-sdk,Lyla-Fischer/xblock-sdk,stvstnfrd/xblock-sdk,lovehhf/xblock-sdk,Lyla-Fischer/xblock-sdk,edx/xblock-sdk,Pilou81715/hackathon_edX,nagyistoce/edx-xblock-sdk,edx-solutions/xblock-sdk,Pilou81715/hackathon_edX,dcadams/xblock-sdk,nagyistoce/edx-xblock-sdk,edx-solutions/xblock-sdk,jamiefolsom/xblock-sdk
|
""" Simple test for the view counter that verifies that it is updating properly """
from collections import namedtuple
from mock import Mock
from xblock.runtime import KvsFieldData, DictKeyValueStore
from xblock.view_counter import ViewCounter
from xblock.test.tools import assert_in, assert_equals
TestUsage = namedtuple('TestUsage', 'id, def_id') # pylint: disable=C0103
def test_view_counter_state():
key_store = DictKeyValueStore()
db_model = KvsFieldData(key_store)
tester = ViewCounter(Mock(), db_model, Mock())
assert_equals(tester.views, 0)
# View the XBlock five times
for i in xrange(5):
generated_html = tester.student_view({})
# Make sure the html fragment we're expecting appears in the body_html
assert_in('<span class="views">{0}</span>'.format(i + 1), generated_html.body_html())
assert_equals(tester.views, i + 1)
Use the correct location of view_counter in test
|
""" Simple test for the view counter that verifies that it is updating properly """
from collections import namedtuple
from mock import Mock
from xblock.runtime import KvsFieldData, DictKeyValueStore
from sample_xblocks.basic.view_counter import ViewCounter
from xblock.test.tools import assert_in, assert_equals
TestUsage = namedtuple('TestUsage', 'id, def_id') # pylint: disable=C0103
def test_view_counter_state():
key_store = DictKeyValueStore()
db_model = KvsFieldData(key_store)
tester = ViewCounter(Mock(), db_model, Mock())
assert_equals(tester.views, 0)
# View the XBlock five times
for i in xrange(5):
generated_html = tester.student_view({})
# Make sure the html fragment we're expecting appears in the body_html
assert_in('<span class="views">{0}</span>'.format(i + 1), generated_html.body_html())
assert_equals(tester.views, i + 1)
|
<commit_before>""" Simple test for the view counter that verifies that it is updating properly """
from collections import namedtuple
from mock import Mock
from xblock.runtime import KvsFieldData, DictKeyValueStore
from xblock.view_counter import ViewCounter
from xblock.test.tools import assert_in, assert_equals
TestUsage = namedtuple('TestUsage', 'id, def_id') # pylint: disable=C0103
def test_view_counter_state():
key_store = DictKeyValueStore()
db_model = KvsFieldData(key_store)
tester = ViewCounter(Mock(), db_model, Mock())
assert_equals(tester.views, 0)
# View the XBlock five times
for i in xrange(5):
generated_html = tester.student_view({})
# Make sure the html fragment we're expecting appears in the body_html
assert_in('<span class="views">{0}</span>'.format(i + 1), generated_html.body_html())
assert_equals(tester.views, i + 1)
<commit_msg>Use the correct location of view_counter in test<commit_after>
|
""" Simple test for the view counter that verifies that it is updating properly """
from collections import namedtuple
from mock import Mock
from xblock.runtime import KvsFieldData, DictKeyValueStore
from sample_xblocks.basic.view_counter import ViewCounter
from xblock.test.tools import assert_in, assert_equals
TestUsage = namedtuple('TestUsage', 'id, def_id') # pylint: disable=C0103
def test_view_counter_state():
key_store = DictKeyValueStore()
db_model = KvsFieldData(key_store)
tester = ViewCounter(Mock(), db_model, Mock())
assert_equals(tester.views, 0)
# View the XBlock five times
for i in xrange(5):
generated_html = tester.student_view({})
# Make sure the html fragment we're expecting appears in the body_html
assert_in('<span class="views">{0}</span>'.format(i + 1), generated_html.body_html())
assert_equals(tester.views, i + 1)
|
""" Simple test for the view counter that verifies that it is updating properly """
from collections import namedtuple
from mock import Mock
from xblock.runtime import KvsFieldData, DictKeyValueStore
from xblock.view_counter import ViewCounter
from xblock.test.tools import assert_in, assert_equals
TestUsage = namedtuple('TestUsage', 'id, def_id') # pylint: disable=C0103
def test_view_counter_state():
key_store = DictKeyValueStore()
db_model = KvsFieldData(key_store)
tester = ViewCounter(Mock(), db_model, Mock())
assert_equals(tester.views, 0)
# View the XBlock five times
for i in xrange(5):
generated_html = tester.student_view({})
# Make sure the html fragment we're expecting appears in the body_html
assert_in('<span class="views">{0}</span>'.format(i + 1), generated_html.body_html())
assert_equals(tester.views, i + 1)
Use the correct location of view_counter in test""" Simple test for the view counter that verifies that it is updating properly """
from collections import namedtuple
from mock import Mock
from xblock.runtime import KvsFieldData, DictKeyValueStore
from sample_xblocks.basic.view_counter import ViewCounter
from xblock.test.tools import assert_in, assert_equals
TestUsage = namedtuple('TestUsage', 'id, def_id') # pylint: disable=C0103
def test_view_counter_state():
key_store = DictKeyValueStore()
db_model = KvsFieldData(key_store)
tester = ViewCounter(Mock(), db_model, Mock())
assert_equals(tester.views, 0)
# View the XBlock five times
for i in xrange(5):
generated_html = tester.student_view({})
# Make sure the html fragment we're expecting appears in the body_html
assert_in('<span class="views">{0}</span>'.format(i + 1), generated_html.body_html())
assert_equals(tester.views, i + 1)
|
<commit_before>""" Simple test for the view counter that verifies that it is updating properly """
from collections import namedtuple
from mock import Mock
from xblock.runtime import KvsFieldData, DictKeyValueStore
from xblock.view_counter import ViewCounter
from xblock.test.tools import assert_in, assert_equals
TestUsage = namedtuple('TestUsage', 'id, def_id') # pylint: disable=C0103
def test_view_counter_state():
key_store = DictKeyValueStore()
db_model = KvsFieldData(key_store)
tester = ViewCounter(Mock(), db_model, Mock())
assert_equals(tester.views, 0)
# View the XBlock five times
for i in xrange(5):
generated_html = tester.student_view({})
# Make sure the html fragment we're expecting appears in the body_html
assert_in('<span class="views">{0}</span>'.format(i + 1), generated_html.body_html())
assert_equals(tester.views, i + 1)
<commit_msg>Use the correct location of view_counter in test<commit_after>""" Simple test for the view counter that verifies that it is updating properly """
from collections import namedtuple
from mock import Mock
from xblock.runtime import KvsFieldData, DictKeyValueStore
from sample_xblocks.basic.view_counter import ViewCounter
from xblock.test.tools import assert_in, assert_equals
TestUsage = namedtuple('TestUsage', 'id, def_id') # pylint: disable=C0103
def test_view_counter_state():
key_store = DictKeyValueStore()
db_model = KvsFieldData(key_store)
tester = ViewCounter(Mock(), db_model, Mock())
assert_equals(tester.views, 0)
# View the XBlock five times
for i in xrange(5):
generated_html = tester.student_view({})
# Make sure the html fragment we're expecting appears in the body_html
assert_in('<span class="views">{0}</span>'.format(i + 1), generated_html.body_html())
assert_equals(tester.views, i + 1)
|
ddc73ffe5f68d57afc8965078fcdc8f61aaf589d
|
tests/app/test_model.py
|
tests/app/test_model.py
|
from app import create_app, ApiClient
from .flask_api_client.test_api_client import TestApiClient
from .helpers import BaseApplicationTest
from app.model import User
from nose.tools import assert_equal
class TestModel(BaseApplicationTest):
def __init__(self):
self.app = create_app('test')
self.api_client = ApiClient(self.app)
def test_user_from_json(self):
result = User.from_json(TestApiClient.user())
assert_equal(result.id, 987)
assert_equal(result.email_address, 'email_address')
|
Move test to it's own file
|
Move test to it's own file
The `from_json` method being tested has moved from the api client to the model,
so I've moved the test to reflect the change.
|
Python
|
mit
|
alphagov/digitalmarketplace-supplier-frontend,mtekel/digitalmarketplace-supplier-frontend,mtekel/digitalmarketplace-supplier-frontend,alphagov/digitalmarketplace-supplier-frontend,mtekel/digitalmarketplace-supplier-frontend,alphagov/digitalmarketplace-supplier-frontend,alphagov/digitalmarketplace-supplier-frontend,mtekel/digitalmarketplace-supplier-frontend
|
Move test to it's own file
The `from_json` method being tested has moved from the api client to the model,
so I've moved the test to reflect the change.
|
from app import create_app, ApiClient
from .flask_api_client.test_api_client import TestApiClient
from .helpers import BaseApplicationTest
from app.model import User
from nose.tools import assert_equal
class TestModel(BaseApplicationTest):
def __init__(self):
self.app = create_app('test')
self.api_client = ApiClient(self.app)
def test_user_from_json(self):
result = User.from_json(TestApiClient.user())
assert_equal(result.id, 987)
assert_equal(result.email_address, 'email_address')
|
<commit_before><commit_msg>Move test to it's own file
The `from_json` method being tested has moved from the api client to the model,
so I've moved the test to reflect the change.<commit_after>
|
from app import create_app, ApiClient
from .flask_api_client.test_api_client import TestApiClient
from .helpers import BaseApplicationTest
from app.model import User
from nose.tools import assert_equal
class TestModel(BaseApplicationTest):
def __init__(self):
self.app = create_app('test')
self.api_client = ApiClient(self.app)
def test_user_from_json(self):
result = User.from_json(TestApiClient.user())
assert_equal(result.id, 987)
assert_equal(result.email_address, 'email_address')
|
Move test to it's own file
The `from_json` method being tested has moved from the api client to the model,
so I've moved the test to reflect the change.from app import create_app, ApiClient
from .flask_api_client.test_api_client import TestApiClient
from .helpers import BaseApplicationTest
from app.model import User
from nose.tools import assert_equal
class TestModel(BaseApplicationTest):
def __init__(self):
self.app = create_app('test')
self.api_client = ApiClient(self.app)
def test_user_from_json(self):
result = User.from_json(TestApiClient.user())
assert_equal(result.id, 987)
assert_equal(result.email_address, 'email_address')
|
<commit_before><commit_msg>Move test to it's own file
The `from_json` method being tested has moved from the api client to the model,
so I've moved the test to reflect the change.<commit_after>from app import create_app, ApiClient
from .flask_api_client.test_api_client import TestApiClient
from .helpers import BaseApplicationTest
from app.model import User
from nose.tools import assert_equal
class TestModel(BaseApplicationTest):
def __init__(self):
self.app = create_app('test')
self.api_client = ApiClient(self.app)
def test_user_from_json(self):
result = User.from_json(TestApiClient.user())
assert_equal(result.id, 987)
assert_equal(result.email_address, 'email_address')
|
|
0504943ebedccbeea2d22a88510aa523afa9365c
|
tools/data/select_class_images_from_video_proto.py
|
tools/data/select_class_images_from_video_proto.py
|
#!/usr/bin/env python
import sys
import os.path as osp
import os
import argparse
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../external/'))
from vdetlib.utils.protocol import proto_load, frame_path_at, annots_at_frame
import shutil
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_proto')
parser.add_argument('annot_proto')
parser.add_argument('save_dir')
args = parser.parse_args()
vid_proto = proto_load(args.vid_proto)
annot_proto = proto_load(args.annot_proto)
if not osp.isdir(args.save_dir):
os.makedirs(args.save_dir)
for frame in vid_proto['frames']:
frame_id = frame['frame']
image_path = frame_path_at(vid_proto, frame_id)
annots = annots_at_frame(annot_proto, frame_id)
cls_idx = [annot['class_index'] for annot in annots]
uniq_cls = set(cls_idx)
for cls in uniq_cls:
save_dir = osp.join(args.save_dir,
"{:02d}".format(cls))
if not osp.isdir(save_dir):
os.makedirs(save_dir)
save_path = osp.join(save_dir,
'_'.join(image_path.split('/')[-2:]))
shutil.copyfile(image_path, save_path)
|
Add script select class images from video protocols.
|
Add script select class images from video protocols.
|
Python
|
mit
|
myfavouritekk/TPN
|
Add script select class images from video protocols.
|
#!/usr/bin/env python
import sys
import os.path as osp
import os
import argparse
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../external/'))
from vdetlib.utils.protocol import proto_load, frame_path_at, annots_at_frame
import shutil
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_proto')
parser.add_argument('annot_proto')
parser.add_argument('save_dir')
args = parser.parse_args()
vid_proto = proto_load(args.vid_proto)
annot_proto = proto_load(args.annot_proto)
if not osp.isdir(args.save_dir):
os.makedirs(args.save_dir)
for frame in vid_proto['frames']:
frame_id = frame['frame']
image_path = frame_path_at(vid_proto, frame_id)
annots = annots_at_frame(annot_proto, frame_id)
cls_idx = [annot['class_index'] for annot in annots]
uniq_cls = set(cls_idx)
for cls in uniq_cls:
save_dir = osp.join(args.save_dir,
"{:02d}".format(cls))
if not osp.isdir(save_dir):
os.makedirs(save_dir)
save_path = osp.join(save_dir,
'_'.join(image_path.split('/')[-2:]))
shutil.copyfile(image_path, save_path)
|
<commit_before><commit_msg>Add script select class images from video protocols.<commit_after>
|
#!/usr/bin/env python
import sys
import os.path as osp
import os
import argparse
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../external/'))
from vdetlib.utils.protocol import proto_load, frame_path_at, annots_at_frame
import shutil
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_proto')
parser.add_argument('annot_proto')
parser.add_argument('save_dir')
args = parser.parse_args()
vid_proto = proto_load(args.vid_proto)
annot_proto = proto_load(args.annot_proto)
if not osp.isdir(args.save_dir):
os.makedirs(args.save_dir)
for frame in vid_proto['frames']:
frame_id = frame['frame']
image_path = frame_path_at(vid_proto, frame_id)
annots = annots_at_frame(annot_proto, frame_id)
cls_idx = [annot['class_index'] for annot in annots]
uniq_cls = set(cls_idx)
for cls in uniq_cls:
save_dir = osp.join(args.save_dir,
"{:02d}".format(cls))
if not osp.isdir(save_dir):
os.makedirs(save_dir)
save_path = osp.join(save_dir,
'_'.join(image_path.split('/')[-2:]))
shutil.copyfile(image_path, save_path)
|
Add script select class images from video protocols.#!/usr/bin/env python
import sys
import os.path as osp
import os
import argparse
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../external/'))
from vdetlib.utils.protocol import proto_load, frame_path_at, annots_at_frame
import shutil
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_proto')
parser.add_argument('annot_proto')
parser.add_argument('save_dir')
args = parser.parse_args()
vid_proto = proto_load(args.vid_proto)
annot_proto = proto_load(args.annot_proto)
if not osp.isdir(args.save_dir):
os.makedirs(args.save_dir)
for frame in vid_proto['frames']:
frame_id = frame['frame']
image_path = frame_path_at(vid_proto, frame_id)
annots = annots_at_frame(annot_proto, frame_id)
cls_idx = [annot['class_index'] for annot in annots]
uniq_cls = set(cls_idx)
for cls in uniq_cls:
save_dir = osp.join(args.save_dir,
"{:02d}".format(cls))
if not osp.isdir(save_dir):
os.makedirs(save_dir)
save_path = osp.join(save_dir,
'_'.join(image_path.split('/')[-2:]))
shutil.copyfile(image_path, save_path)
|
<commit_before><commit_msg>Add script select class images from video protocols.<commit_after>#!/usr/bin/env python
import sys
import os.path as osp
import os
import argparse
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../external/'))
from vdetlib.utils.protocol import proto_load, frame_path_at, annots_at_frame
import shutil
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('vid_proto')
parser.add_argument('annot_proto')
parser.add_argument('save_dir')
args = parser.parse_args()
vid_proto = proto_load(args.vid_proto)
annot_proto = proto_load(args.annot_proto)
if not osp.isdir(args.save_dir):
os.makedirs(args.save_dir)
for frame in vid_proto['frames']:
frame_id = frame['frame']
image_path = frame_path_at(vid_proto, frame_id)
annots = annots_at_frame(annot_proto, frame_id)
cls_idx = [annot['class_index'] for annot in annots]
uniq_cls = set(cls_idx)
for cls in uniq_cls:
save_dir = osp.join(args.save_dir,
"{:02d}".format(cls))
if not osp.isdir(save_dir):
os.makedirs(save_dir)
save_path = osp.join(save_dir,
'_'.join(image_path.split('/')[-2:]))
shutil.copyfile(image_path, save_path)
|
|
98b3f4429cb9959f294f2fb6d6345ce18339a098
|
test/commands/test_object_mask.py
|
test/commands/test_object_mask.py
|
from unrealcv import client
import re
class Color(object):
''' A utility class to parse color value '''
regexp = re.compile('\(R=(.*),G=(.*),B=(.*),A=(.*)\)')
def __init__(self, color_str):
self.color_str = color_str
match = self.regexp.match(color_str)
(self.R, self.G, self.B, self.A) = [int(match.group(i)) for i in range(1,5)]
def __repr__(self):
return self.color_str
def __eq__(self, o):
equal = (self.R == other.R and self.G == other.G and self.B == other.B)
return equal
def get_color_mapping(client, object_list):
''' Get the color mapping for specified objects '''
color_mapping = {}
for objname in object_list:
color_mapping[objname] = Color(client.request('vget /object/%s/color' % objname))
return color_mapping
def test_color_mapping():
''' Make sure the color for each object is identical '''
client.connect()
scene_objects = client.request('vget /objects').split(' ')
color_mapping = get_color_mapping(client, scene_objects)
colors = color_mapping.values()
assert len(colors) == len(set(colors))
print 'Number of objects %d, number of unique colors %d' % (len(colors), len(set(colors)))
print color_mapping
# Make sure the colors are identical
if __name__ == '__main__':
test_color_mapping()
|
Add a test for object mask.
|
Add a test for object mask.
|
Python
|
mit
|
qiuwch/unrealcv,unrealcv/unrealcv,qiuwch/unrealcv,unrealcv/unrealcv,unrealcv/unrealcv,qiuwch/unrealcv,qiuwch/unrealcv,qiuwch/unrealcv,qiuwch/unrealcv,unrealcv/unrealcv,unrealcv/unrealcv
|
Add a test for object mask.
|
from unrealcv import client
import re
class Color(object):
''' A utility class to parse color value '''
regexp = re.compile('\(R=(.*),G=(.*),B=(.*),A=(.*)\)')
def __init__(self, color_str):
self.color_str = color_str
match = self.regexp.match(color_str)
(self.R, self.G, self.B, self.A) = [int(match.group(i)) for i in range(1,5)]
def __repr__(self):
return self.color_str
def __eq__(self, o):
equal = (self.R == other.R and self.G == other.G and self.B == other.B)
return equal
def get_color_mapping(client, object_list):
''' Get the color mapping for specified objects '''
color_mapping = {}
for objname in object_list:
color_mapping[objname] = Color(client.request('vget /object/%s/color' % objname))
return color_mapping
def test_color_mapping():
''' Make sure the color for each object is identical '''
client.connect()
scene_objects = client.request('vget /objects').split(' ')
color_mapping = get_color_mapping(client, scene_objects)
colors = color_mapping.values()
assert len(colors) == len(set(colors))
print 'Number of objects %d, number of unique colors %d' % (len(colors), len(set(colors)))
print color_mapping
# Make sure the colors are identical
if __name__ == '__main__':
test_color_mapping()
|
<commit_before><commit_msg>Add a test for object mask.<commit_after>
|
from unrealcv import client
import re
class Color(object):
''' A utility class to parse color value '''
regexp = re.compile('\(R=(.*),G=(.*),B=(.*),A=(.*)\)')
def __init__(self, color_str):
self.color_str = color_str
match = self.regexp.match(color_str)
(self.R, self.G, self.B, self.A) = [int(match.group(i)) for i in range(1,5)]
def __repr__(self):
return self.color_str
def __eq__(self, o):
equal = (self.R == other.R and self.G == other.G and self.B == other.B)
return equal
def get_color_mapping(client, object_list):
''' Get the color mapping for specified objects '''
color_mapping = {}
for objname in object_list:
color_mapping[objname] = Color(client.request('vget /object/%s/color' % objname))
return color_mapping
def test_color_mapping():
''' Make sure the color for each object is identical '''
client.connect()
scene_objects = client.request('vget /objects').split(' ')
color_mapping = get_color_mapping(client, scene_objects)
colors = color_mapping.values()
assert len(colors) == len(set(colors))
print 'Number of objects %d, number of unique colors %d' % (len(colors), len(set(colors)))
print color_mapping
# Make sure the colors are identical
if __name__ == '__main__':
test_color_mapping()
|
Add a test for object mask.from unrealcv import client
import re
class Color(object):
''' A utility class to parse color value '''
regexp = re.compile('\(R=(.*),G=(.*),B=(.*),A=(.*)\)')
def __init__(self, color_str):
self.color_str = color_str
match = self.regexp.match(color_str)
(self.R, self.G, self.B, self.A) = [int(match.group(i)) for i in range(1,5)]
def __repr__(self):
return self.color_str
def __eq__(self, o):
equal = (self.R == other.R and self.G == other.G and self.B == other.B)
return equal
def get_color_mapping(client, object_list):
''' Get the color mapping for specified objects '''
color_mapping = {}
for objname in object_list:
color_mapping[objname] = Color(client.request('vget /object/%s/color' % objname))
return color_mapping
def test_color_mapping():
''' Make sure the color for each object is identical '''
client.connect()
scene_objects = client.request('vget /objects').split(' ')
color_mapping = get_color_mapping(client, scene_objects)
colors = color_mapping.values()
assert len(colors) == len(set(colors))
print 'Number of objects %d, number of unique colors %d' % (len(colors), len(set(colors)))
print color_mapping
# Make sure the colors are identical
if __name__ == '__main__':
test_color_mapping()
|
<commit_before><commit_msg>Add a test for object mask.<commit_after>from unrealcv import client
import re
class Color(object):
''' A utility class to parse color value '''
regexp = re.compile('\(R=(.*),G=(.*),B=(.*),A=(.*)\)')
def __init__(self, color_str):
self.color_str = color_str
match = self.regexp.match(color_str)
(self.R, self.G, self.B, self.A) = [int(match.group(i)) for i in range(1,5)]
def __repr__(self):
return self.color_str
def __eq__(self, o):
equal = (self.R == other.R and self.G == other.G and self.B == other.B)
return equal
def get_color_mapping(client, object_list):
''' Get the color mapping for specified objects '''
color_mapping = {}
for objname in object_list:
color_mapping[objname] = Color(client.request('vget /object/%s/color' % objname))
return color_mapping
def test_color_mapping():
''' Make sure the color for each object is identical '''
client.connect()
scene_objects = client.request('vget /objects').split(' ')
color_mapping = get_color_mapping(client, scene_objects)
colors = color_mapping.values()
assert len(colors) == len(set(colors))
print 'Number of objects %d, number of unique colors %d' % (len(colors), len(set(colors)))
print color_mapping
# Make sure the colors are identical
if __name__ == '__main__':
test_color_mapping()
|
|
08718ce949e7f80b0cbe39c3eba4446133c6d72d
|
code/marv-api/marv_api/deprecation.py
|
code/marv-api/marv_api/deprecation.py
|
# Copyright 2020 Ternaris.
# SPDX-License-Identifier: AGPL-3.0-only
import warnings
from dataclasses import dataclass
from typing import Any
@dataclass
class Info:
module: str
version: str
obj: Any
msg: str = None
def make_getattr(module, dct):
assert all(x.module == module for x in dct.values())
def __getattr__(name):
info = dct.get(name)
if info is None:
raise AttributeError(f'module {module} has no attribute {name}')
msg = (
f'{module}.{name} will be removed in {info.version}; '
f'{info.msg or "please let us know if this is an issue for you."}'
)
warnings.warn(msg, FutureWarning, stacklevel=2)
return info.obj
return __getattr__
|
# Copyright 2020 Ternaris.
# SPDX-License-Identifier: AGPL-3.0-only
import functools
import warnings
from dataclasses import dataclass
from typing import Any
@dataclass
class Info:
module: str
version: str
obj: Any
msg: str = None
def make_getattr(module, dct):
assert all(x.module == module for x in dct.values())
def __getattr__(name):
info = dct.get(name)
if info is None:
raise AttributeError(f'module {module} has no attribute {name}')
msg = (
f'{module}.{name} will be removed in {info.version}; '
f'{info.msg or "please let us know if this is an issue for you."}'
)
warnings.warn(msg, FutureWarning, stacklevel=2)
return info.obj
return __getattr__
def deprecated(version, msg=None, name=None):
"""Wrap function to trigger deprecated message upon call."""
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kw):
_msg = (
f'{func.__module__}.{name or func.__name__} will be removed in {version}; '
f'{msg or "please let us know if this is an issue for you."}'
)
warnings.warn(_msg, FutureWarning, stacklevel=2)
return func(*args, **kw)
return wrapper
return deco
|
Add decorator to declare function deprecated
|
Add decorator to declare function deprecated
|
Python
|
agpl-3.0
|
ternaris/marv-robotics,ternaris/marv-robotics
|
# Copyright 2020 Ternaris.
# SPDX-License-Identifier: AGPL-3.0-only
import warnings
from dataclasses import dataclass
from typing import Any
@dataclass
class Info:
module: str
version: str
obj: Any
msg: str = None
def make_getattr(module, dct):
assert all(x.module == module for x in dct.values())
def __getattr__(name):
info = dct.get(name)
if info is None:
raise AttributeError(f'module {module} has no attribute {name}')
msg = (
f'{module}.{name} will be removed in {info.version}; '
f'{info.msg or "please let us know if this is an issue for you."}'
)
warnings.warn(msg, FutureWarning, stacklevel=2)
return info.obj
return __getattr__
Add decorator to declare function deprecated
|
# Copyright 2020 Ternaris.
# SPDX-License-Identifier: AGPL-3.0-only
import functools
import warnings
from dataclasses import dataclass
from typing import Any
@dataclass
class Info:
module: str
version: str
obj: Any
msg: str = None
def make_getattr(module, dct):
assert all(x.module == module for x in dct.values())
def __getattr__(name):
info = dct.get(name)
if info is None:
raise AttributeError(f'module {module} has no attribute {name}')
msg = (
f'{module}.{name} will be removed in {info.version}; '
f'{info.msg or "please let us know if this is an issue for you."}'
)
warnings.warn(msg, FutureWarning, stacklevel=2)
return info.obj
return __getattr__
def deprecated(version, msg=None, name=None):
"""Wrap function to trigger deprecated message upon call."""
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kw):
_msg = (
f'{func.__module__}.{name or func.__name__} will be removed in {version}; '
f'{msg or "please let us know if this is an issue for you."}'
)
warnings.warn(_msg, FutureWarning, stacklevel=2)
return func(*args, **kw)
return wrapper
return deco
|
<commit_before># Copyright 2020 Ternaris.
# SPDX-License-Identifier: AGPL-3.0-only
import warnings
from dataclasses import dataclass
from typing import Any
@dataclass
class Info:
module: str
version: str
obj: Any
msg: str = None
def make_getattr(module, dct):
assert all(x.module == module for x in dct.values())
def __getattr__(name):
info = dct.get(name)
if info is None:
raise AttributeError(f'module {module} has no attribute {name}')
msg = (
f'{module}.{name} will be removed in {info.version}; '
f'{info.msg or "please let us know if this is an issue for you."}'
)
warnings.warn(msg, FutureWarning, stacklevel=2)
return info.obj
return __getattr__
<commit_msg>Add decorator to declare function deprecated<commit_after>
|
# Copyright 2020 Ternaris.
# SPDX-License-Identifier: AGPL-3.0-only
import functools
import warnings
from dataclasses import dataclass
from typing import Any
@dataclass
class Info:
module: str
version: str
obj: Any
msg: str = None
def make_getattr(module, dct):
assert all(x.module == module for x in dct.values())
def __getattr__(name):
info = dct.get(name)
if info is None:
raise AttributeError(f'module {module} has no attribute {name}')
msg = (
f'{module}.{name} will be removed in {info.version}; '
f'{info.msg or "please let us know if this is an issue for you."}'
)
warnings.warn(msg, FutureWarning, stacklevel=2)
return info.obj
return __getattr__
def deprecated(version, msg=None, name=None):
"""Wrap function to trigger deprecated message upon call."""
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kw):
_msg = (
f'{func.__module__}.{name or func.__name__} will be removed in {version}; '
f'{msg or "please let us know if this is an issue for you."}'
)
warnings.warn(_msg, FutureWarning, stacklevel=2)
return func(*args, **kw)
return wrapper
return deco
|
# Copyright 2020 Ternaris.
# SPDX-License-Identifier: AGPL-3.0-only
import warnings
from dataclasses import dataclass
from typing import Any
@dataclass
class Info:
module: str
version: str
obj: Any
msg: str = None
def make_getattr(module, dct):
assert all(x.module == module for x in dct.values())
def __getattr__(name):
info = dct.get(name)
if info is None:
raise AttributeError(f'module {module} has no attribute {name}')
msg = (
f'{module}.{name} will be removed in {info.version}; '
f'{info.msg or "please let us know if this is an issue for you."}'
)
warnings.warn(msg, FutureWarning, stacklevel=2)
return info.obj
return __getattr__
Add decorator to declare function deprecated# Copyright 2020 Ternaris.
# SPDX-License-Identifier: AGPL-3.0-only
import functools
import warnings
from dataclasses import dataclass
from typing import Any
@dataclass
class Info:
module: str
version: str
obj: Any
msg: str = None
def make_getattr(module, dct):
assert all(x.module == module for x in dct.values())
def __getattr__(name):
info = dct.get(name)
if info is None:
raise AttributeError(f'module {module} has no attribute {name}')
msg = (
f'{module}.{name} will be removed in {info.version}; '
f'{info.msg or "please let us know if this is an issue for you."}'
)
warnings.warn(msg, FutureWarning, stacklevel=2)
return info.obj
return __getattr__
def deprecated(version, msg=None, name=None):
"""Wrap function to trigger deprecated message upon call."""
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kw):
_msg = (
f'{func.__module__}.{name or func.__name__} will be removed in {version}; '
f'{msg or "please let us know if this is an issue for you."}'
)
warnings.warn(_msg, FutureWarning, stacklevel=2)
return func(*args, **kw)
return wrapper
return deco
|
<commit_before># Copyright 2020 Ternaris.
# SPDX-License-Identifier: AGPL-3.0-only
import warnings
from dataclasses import dataclass
from typing import Any
@dataclass
class Info:
module: str
version: str
obj: Any
msg: str = None
def make_getattr(module, dct):
assert all(x.module == module for x in dct.values())
def __getattr__(name):
info = dct.get(name)
if info is None:
raise AttributeError(f'module {module} has no attribute {name}')
msg = (
f'{module}.{name} will be removed in {info.version}; '
f'{info.msg or "please let us know if this is an issue for you."}'
)
warnings.warn(msg, FutureWarning, stacklevel=2)
return info.obj
return __getattr__
<commit_msg>Add decorator to declare function deprecated<commit_after># Copyright 2020 Ternaris.
# SPDX-License-Identifier: AGPL-3.0-only
import functools
import warnings
from dataclasses import dataclass
from typing import Any
@dataclass
class Info:
module: str
version: str
obj: Any
msg: str = None
def make_getattr(module, dct):
assert all(x.module == module for x in dct.values())
def __getattr__(name):
info = dct.get(name)
if info is None:
raise AttributeError(f'module {module} has no attribute {name}')
msg = (
f'{module}.{name} will be removed in {info.version}; '
f'{info.msg or "please let us know if this is an issue for you."}'
)
warnings.warn(msg, FutureWarning, stacklevel=2)
return info.obj
return __getattr__
def deprecated(version, msg=None, name=None):
"""Wrap function to trigger deprecated message upon call."""
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kw):
_msg = (
f'{func.__module__}.{name or func.__name__} will be removed in {version}; '
f'{msg or "please let us know if this is an issue for you."}'
)
warnings.warn(_msg, FutureWarning, stacklevel=2)
return func(*args, **kw)
return wrapper
return deco
|
148cb8b58387f0e604ef93c8080be3ed533be488
|
tools/data/smooth_gt.py
|
tools/data/smooth_gt.py
|
#!/usr/bin/env python
import sys
import os.path as osp
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../external/'))
from vdetlib.utils.protocol import proto_load, proto_dump
import argparse
import numpy as np
import scipy.ndimage
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('gt_file')
parser.add_argument('save_file')
parser.add_argument('--window', type=int, default=11)
args = parser.parse_args()
annot_proto = proto_load(args.gt_file)
for annot in annot_proto['annotations']:
boxes = np.asarray([box['bbox'] for box in annot['track']], dtype=np.float)
smoothed = scipy.ndimage.filters.gaussian_filter1d(boxes, args.window / 6.,
axis=0, mode='nearest')
for box, pred_bbox in zip(annot['track'], smoothed):
box['bbox'] = pred_bbox.tolist()
proto_dump(annot_proto, args.save_file)
|
Add a script to smooth gt tracks.
|
Add a script to smooth gt tracks.
|
Python
|
mit
|
myfavouritekk/TPN
|
Add a script to smooth gt tracks.
|
#!/usr/bin/env python
import sys
import os.path as osp
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../external/'))
from vdetlib.utils.protocol import proto_load, proto_dump
import argparse
import numpy as np
import scipy.ndimage
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('gt_file')
parser.add_argument('save_file')
parser.add_argument('--window', type=int, default=11)
args = parser.parse_args()
annot_proto = proto_load(args.gt_file)
for annot in annot_proto['annotations']:
boxes = np.asarray([box['bbox'] for box in annot['track']], dtype=np.float)
smoothed = scipy.ndimage.filters.gaussian_filter1d(boxes, args.window / 6.,
axis=0, mode='nearest')
for box, pred_bbox in zip(annot['track'], smoothed):
box['bbox'] = pred_bbox.tolist()
proto_dump(annot_proto, args.save_file)
|
<commit_before><commit_msg>Add a script to smooth gt tracks.<commit_after>
|
#!/usr/bin/env python
import sys
import os.path as osp
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../external/'))
from vdetlib.utils.protocol import proto_load, proto_dump
import argparse
import numpy as np
import scipy.ndimage
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('gt_file')
parser.add_argument('save_file')
parser.add_argument('--window', type=int, default=11)
args = parser.parse_args()
annot_proto = proto_load(args.gt_file)
for annot in annot_proto['annotations']:
boxes = np.asarray([box['bbox'] for box in annot['track']], dtype=np.float)
smoothed = scipy.ndimage.filters.gaussian_filter1d(boxes, args.window / 6.,
axis=0, mode='nearest')
for box, pred_bbox in zip(annot['track'], smoothed):
box['bbox'] = pred_bbox.tolist()
proto_dump(annot_proto, args.save_file)
|
Add a script to smooth gt tracks.#!/usr/bin/env python
import sys
import os.path as osp
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../external/'))
from vdetlib.utils.protocol import proto_load, proto_dump
import argparse
import numpy as np
import scipy.ndimage
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('gt_file')
parser.add_argument('save_file')
parser.add_argument('--window', type=int, default=11)
args = parser.parse_args()
annot_proto = proto_load(args.gt_file)
for annot in annot_proto['annotations']:
boxes = np.asarray([box['bbox'] for box in annot['track']], dtype=np.float)
smoothed = scipy.ndimage.filters.gaussian_filter1d(boxes, args.window / 6.,
axis=0, mode='nearest')
for box, pred_bbox in zip(annot['track'], smoothed):
box['bbox'] = pred_bbox.tolist()
proto_dump(annot_proto, args.save_file)
|
<commit_before><commit_msg>Add a script to smooth gt tracks.<commit_after>#!/usr/bin/env python
import sys
import os.path as osp
this_dir = osp.dirname(__file__)
sys.path.insert(0, osp.join(this_dir, '../../external/'))
from vdetlib.utils.protocol import proto_load, proto_dump
import argparse
import numpy as np
import scipy.ndimage
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('gt_file')
parser.add_argument('save_file')
parser.add_argument('--window', type=int, default=11)
args = parser.parse_args()
annot_proto = proto_load(args.gt_file)
for annot in annot_proto['annotations']:
boxes = np.asarray([box['bbox'] for box in annot['track']], dtype=np.float)
smoothed = scipy.ndimage.filters.gaussian_filter1d(boxes, args.window / 6.,
axis=0, mode='nearest')
for box, pred_bbox in zip(annot['track'], smoothed):
box['bbox'] = pred_bbox.tolist()
proto_dump(annot_proto, args.save_file)
|
|
98f4a1c0fc1562d49d86b4b8f6763f2ce259280d
|
eche/eche_types.py
|
eche/eche_types.py
|
class Symbol(str):
pass
# lists
class List(list):
def __add__(self, rhs):
return List(list.__add__(self, rhs))
def __getitem__(self, i):
if type(i) == slice:
return List(list.__getitem__(self, i))
elif i >= len(self):
return None
else:
return list.__getitem__(self, i)
def __getslice__(self, *a):
return List(self.__getslice__(self, *a))
@classmethod
def is_list(cls, obj):
return isinstance(obj, cls)
class Boolean(object):
def __init__(self, val):
self.val = val
@classmethod
def is_true(cls, exp):
return isinstance(exp, cls) and bool(exp)
@classmethod
def is_false(cls, exp):
return isinstance(exp, cls) and not bool(exp)
class Nil(object):
def __init__(self, val):
self.val = val
@classmethod
def is_nil(cls, exp):
return isinstance(exp, cls)
class Atom(object):
def __init__(self, val):
self.val = val
@classmethod
def is_atom(cls, exp):
return isinstance(exp, Atom)
|
Create Symbol, List, Boolean, Nil and Atom types.
|
Create Symbol, List, Boolean, Nil and Atom types.
|
Python
|
mit
|
skk/eche
|
Create Symbol, List, Boolean, Nil and Atom types.
|
class Symbol(str):
pass
# lists
class List(list):
def __add__(self, rhs):
return List(list.__add__(self, rhs))
def __getitem__(self, i):
if type(i) == slice:
return List(list.__getitem__(self, i))
elif i >= len(self):
return None
else:
return list.__getitem__(self, i)
def __getslice__(self, *a):
return List(self.__getslice__(self, *a))
@classmethod
def is_list(cls, obj):
return isinstance(obj, cls)
class Boolean(object):
def __init__(self, val):
self.val = val
@classmethod
def is_true(cls, exp):
return isinstance(exp, cls) and bool(exp)
@classmethod
def is_false(cls, exp):
return isinstance(exp, cls) and not bool(exp)
class Nil(object):
def __init__(self, val):
self.val = val
@classmethod
def is_nil(cls, exp):
return isinstance(exp, cls)
class Atom(object):
def __init__(self, val):
self.val = val
@classmethod
def is_atom(cls, exp):
return isinstance(exp, Atom)
|
<commit_before><commit_msg>Create Symbol, List, Boolean, Nil and Atom types.<commit_after>
|
class Symbol(str):
pass
# lists
class List(list):
def __add__(self, rhs):
return List(list.__add__(self, rhs))
def __getitem__(self, i):
if type(i) == slice:
return List(list.__getitem__(self, i))
elif i >= len(self):
return None
else:
return list.__getitem__(self, i)
def __getslice__(self, *a):
return List(self.__getslice__(self, *a))
@classmethod
def is_list(cls, obj):
return isinstance(obj, cls)
class Boolean(object):
def __init__(self, val):
self.val = val
@classmethod
def is_true(cls, exp):
return isinstance(exp, cls) and bool(exp)
@classmethod
def is_false(cls, exp):
return isinstance(exp, cls) and not bool(exp)
class Nil(object):
def __init__(self, val):
self.val = val
@classmethod
def is_nil(cls, exp):
return isinstance(exp, cls)
class Atom(object):
def __init__(self, val):
self.val = val
@classmethod
def is_atom(cls, exp):
return isinstance(exp, Atom)
|
Create Symbol, List, Boolean, Nil and Atom types.class Symbol(str):
pass
# lists
class List(list):
def __add__(self, rhs):
return List(list.__add__(self, rhs))
def __getitem__(self, i):
if type(i) == slice:
return List(list.__getitem__(self, i))
elif i >= len(self):
return None
else:
return list.__getitem__(self, i)
def __getslice__(self, *a):
return List(self.__getslice__(self, *a))
@classmethod
def is_list(cls, obj):
return isinstance(obj, cls)
class Boolean(object):
def __init__(self, val):
self.val = val
@classmethod
def is_true(cls, exp):
return isinstance(exp, cls) and bool(exp)
@classmethod
def is_false(cls, exp):
return isinstance(exp, cls) and not bool(exp)
class Nil(object):
def __init__(self, val):
self.val = val
@classmethod
def is_nil(cls, exp):
return isinstance(exp, cls)
class Atom(object):
def __init__(self, val):
self.val = val
@classmethod
def is_atom(cls, exp):
return isinstance(exp, Atom)
|
<commit_before><commit_msg>Create Symbol, List, Boolean, Nil and Atom types.<commit_after>class Symbol(str):
pass
# lists
class List(list):
def __add__(self, rhs):
return List(list.__add__(self, rhs))
def __getitem__(self, i):
if type(i) == slice:
return List(list.__getitem__(self, i))
elif i >= len(self):
return None
else:
return list.__getitem__(self, i)
def __getslice__(self, *a):
return List(self.__getslice__(self, *a))
@classmethod
def is_list(cls, obj):
return isinstance(obj, cls)
class Boolean(object):
def __init__(self, val):
self.val = val
@classmethod
def is_true(cls, exp):
return isinstance(exp, cls) and bool(exp)
@classmethod
def is_false(cls, exp):
return isinstance(exp, cls) and not bool(exp)
class Nil(object):
def __init__(self, val):
self.val = val
@classmethod
def is_nil(cls, exp):
return isinstance(exp, cls)
class Atom(object):
def __init__(self, val):
self.val = val
@classmethod
def is_atom(cls, exp):
return isinstance(exp, Atom)
|
|
b1079d59593a9b0e6a5c3f4ffa286ab472c174bf
|
rockethook.py
|
rockethook.py
|
#!/usr/bin/env python
"""Simple library for posting to Rocket.Chat via webhooks a.k.a. integrations."""
import json
import httplib
import urllib
class WebhookError(Exception):
def __init__(self, status, message):
self.status = status
self.message = 'Rocket.Chat server error, code {0}: {1}'.format(status, message)
super(WebhookError, self).__init__(self.message)
class Webhook(object):
def __init__(self, server_url, token):
if server_url.split('://')[0] == 'https':
self.https = True
else:
self.https = False
self.server_fqdn = server_url.split('://')[-1].replace('/', '')
self.token = token
def quick_post(self, text):
"""Method for posting simple text messages via single hook."""
self.post(Message(text))
def post(self, message):
"""Send your message to Rocket.Chat.
message argument is expected to be a pyrocketchat.Message object.
If you want to just post simple text message, please use quick_post() method.
Raises pyrocketchat.WebhookError if response code is not 200."""
assert type(message) is Message, 'Error: message is not a pyrocketchat.Message'
payload_dict = {}
if message.text:
payload_dict['text'] = message.text
if message.icon_url:
payload_dict['icon_url'] = message.icon_url
if message.attachments:
payload_dict['attachments'] = message.attachments
payload = 'payload=' + urllib.quote_plus(json.dumps(payload_dict))
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
if self.https:
conn = httplib.HTTPSConnection(self.server_fqdn)
else:
conn = httplib.HTTPConnection(self.server_fqdn)
conn.request('POST', '/hooks/' + self.token, payload, headers)
response = conn.getresponse()
if response.status != 200:
error = json.loads(response.read())
conn.close()
raise WebhookError(response.status, error['message'])
conn.close()
class Message(object):
def __init__(self, text='', icon_url=None):
self.text = text
self.icon_url = icon_url
self.attachments = []
def append_text(self, text_to_append):
"""Add new text to the message."""
if self.text:
self.text = self.text + '\n' + text_to_append
else:
self.text = text_to_append
def add_attachment(self, **kwargs):
"""Add an attachment to the message.
As of Rocket.Chat version 0.17, valid attachment arguments are the following:
* title
* title_link
* text
* image_url
* color
You can have multiple attachments in a single message.
"""
self.attachments.append(kwargs)
|
Move code from old repo.
|
Move code from old repo.
|
Python
|
mit
|
gevial/rockethook
|
Move code from old repo.
|
#!/usr/bin/env python
"""Simple library for posting to Rocket.Chat via webhooks a.k.a. integrations."""
import json
import httplib
import urllib
class WebhookError(Exception):
def __init__(self, status, message):
self.status = status
self.message = 'Rocket.Chat server error, code {0}: {1}'.format(status, message)
super(WebhookError, self).__init__(self.message)
class Webhook(object):
def __init__(self, server_url, token):
if server_url.split('://')[0] == 'https':
self.https = True
else:
self.https = False
self.server_fqdn = server_url.split('://')[-1].replace('/', '')
self.token = token
def quick_post(self, text):
"""Method for posting simple text messages via single hook."""
self.post(Message(text))
def post(self, message):
"""Send your message to Rocket.Chat.
message argument is expected to be a pyrocketchat.Message object.
If you want to just post simple text message, please use quick_post() method.
Raises pyrocketchat.WebhookError if response code is not 200."""
assert type(message) is Message, 'Error: message is not a pyrocketchat.Message'
payload_dict = {}
if message.text:
payload_dict['text'] = message.text
if message.icon_url:
payload_dict['icon_url'] = message.icon_url
if message.attachments:
payload_dict['attachments'] = message.attachments
payload = 'payload=' + urllib.quote_plus(json.dumps(payload_dict))
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
if self.https:
conn = httplib.HTTPSConnection(self.server_fqdn)
else:
conn = httplib.HTTPConnection(self.server_fqdn)
conn.request('POST', '/hooks/' + self.token, payload, headers)
response = conn.getresponse()
if response.status != 200:
error = json.loads(response.read())
conn.close()
raise WebhookError(response.status, error['message'])
conn.close()
class Message(object):
def __init__(self, text='', icon_url=None):
self.text = text
self.icon_url = icon_url
self.attachments = []
def append_text(self, text_to_append):
"""Add new text to the message."""
if self.text:
self.text = self.text + '\n' + text_to_append
else:
self.text = text_to_append
def add_attachment(self, **kwargs):
"""Add an attachment to the message.
As of Rocket.Chat version 0.17, valid attachment arguments are the following:
* title
* title_link
* text
* image_url
* color
You can have multiple attachments in a single message.
"""
self.attachments.append(kwargs)
|
<commit_before><commit_msg>Move code from old repo.<commit_after>
|
#!/usr/bin/env python
"""Simple library for posting to Rocket.Chat via webhooks a.k.a. integrations."""
import json
import httplib
import urllib
class WebhookError(Exception):
def __init__(self, status, message):
self.status = status
self.message = 'Rocket.Chat server error, code {0}: {1}'.format(status, message)
super(WebhookError, self).__init__(self.message)
class Webhook(object):
def __init__(self, server_url, token):
if server_url.split('://')[0] == 'https':
self.https = True
else:
self.https = False
self.server_fqdn = server_url.split('://')[-1].replace('/', '')
self.token = token
def quick_post(self, text):
"""Method for posting simple text messages via single hook."""
self.post(Message(text))
def post(self, message):
"""Send your message to Rocket.Chat.
message argument is expected to be a pyrocketchat.Message object.
If you want to just post simple text message, please use quick_post() method.
Raises pyrocketchat.WebhookError if response code is not 200."""
assert type(message) is Message, 'Error: message is not a pyrocketchat.Message'
payload_dict = {}
if message.text:
payload_dict['text'] = message.text
if message.icon_url:
payload_dict['icon_url'] = message.icon_url
if message.attachments:
payload_dict['attachments'] = message.attachments
payload = 'payload=' + urllib.quote_plus(json.dumps(payload_dict))
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
if self.https:
conn = httplib.HTTPSConnection(self.server_fqdn)
else:
conn = httplib.HTTPConnection(self.server_fqdn)
conn.request('POST', '/hooks/' + self.token, payload, headers)
response = conn.getresponse()
if response.status != 200:
error = json.loads(response.read())
conn.close()
raise WebhookError(response.status, error['message'])
conn.close()
class Message(object):
def __init__(self, text='', icon_url=None):
self.text = text
self.icon_url = icon_url
self.attachments = []
def append_text(self, text_to_append):
"""Add new text to the message."""
if self.text:
self.text = self.text + '\n' + text_to_append
else:
self.text = text_to_append
def add_attachment(self, **kwargs):
"""Add an attachment to the message.
As of Rocket.Chat version 0.17, valid attachment arguments are the following:
* title
* title_link
* text
* image_url
* color
You can have multiple attachments in a single message.
"""
self.attachments.append(kwargs)
|
Move code from old repo.#!/usr/bin/env python
"""Simple library for posting to Rocket.Chat via webhooks a.k.a. integrations."""
import json
import httplib
import urllib
class WebhookError(Exception):
def __init__(self, status, message):
self.status = status
self.message = 'Rocket.Chat server error, code {0}: {1}'.format(status, message)
super(WebhookError, self).__init__(self.message)
class Webhook(object):
def __init__(self, server_url, token):
if server_url.split('://')[0] == 'https':
self.https = True
else:
self.https = False
self.server_fqdn = server_url.split('://')[-1].replace('/', '')
self.token = token
def quick_post(self, text):
"""Method for posting simple text messages via single hook."""
self.post(Message(text))
def post(self, message):
"""Send your message to Rocket.Chat.
message argument is expected to be a pyrocketchat.Message object.
If you want to just post simple text message, please use quick_post() method.
Raises pyrocketchat.WebhookError if response code is not 200."""
assert type(message) is Message, 'Error: message is not a pyrocketchat.Message'
payload_dict = {}
if message.text:
payload_dict['text'] = message.text
if message.icon_url:
payload_dict['icon_url'] = message.icon_url
if message.attachments:
payload_dict['attachments'] = message.attachments
payload = 'payload=' + urllib.quote_plus(json.dumps(payload_dict))
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
if self.https:
conn = httplib.HTTPSConnection(self.server_fqdn)
else:
conn = httplib.HTTPConnection(self.server_fqdn)
conn.request('POST', '/hooks/' + self.token, payload, headers)
response = conn.getresponse()
if response.status != 200:
error = json.loads(response.read())
conn.close()
raise WebhookError(response.status, error['message'])
conn.close()
class Message(object):
def __init__(self, text='', icon_url=None):
self.text = text
self.icon_url = icon_url
self.attachments = []
def append_text(self, text_to_append):
"""Add new text to the message."""
if self.text:
self.text = self.text + '\n' + text_to_append
else:
self.text = text_to_append
def add_attachment(self, **kwargs):
"""Add an attachment to the message.
As of Rocket.Chat version 0.17, valid attachment arguments are the following:
* title
* title_link
* text
* image_url
* color
You can have multiple attachments in a single message.
"""
self.attachments.append(kwargs)
|
<commit_before><commit_msg>Move code from old repo.<commit_after>#!/usr/bin/env python
"""Simple library for posting to Rocket.Chat via webhooks a.k.a. integrations."""
import json
import httplib
import urllib
class WebhookError(Exception):
def __init__(self, status, message):
self.status = status
self.message = 'Rocket.Chat server error, code {0}: {1}'.format(status, message)
super(WebhookError, self).__init__(self.message)
class Webhook(object):
def __init__(self, server_url, token):
if server_url.split('://')[0] == 'https':
self.https = True
else:
self.https = False
self.server_fqdn = server_url.split('://')[-1].replace('/', '')
self.token = token
def quick_post(self, text):
"""Method for posting simple text messages via single hook."""
self.post(Message(text))
def post(self, message):
"""Send your message to Rocket.Chat.
message argument is expected to be a pyrocketchat.Message object.
If you want to just post simple text message, please use quick_post() method.
Raises pyrocketchat.WebhookError if response code is not 200."""
assert type(message) is Message, 'Error: message is not a pyrocketchat.Message'
payload_dict = {}
if message.text:
payload_dict['text'] = message.text
if message.icon_url:
payload_dict['icon_url'] = message.icon_url
if message.attachments:
payload_dict['attachments'] = message.attachments
payload = 'payload=' + urllib.quote_plus(json.dumps(payload_dict))
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
if self.https:
conn = httplib.HTTPSConnection(self.server_fqdn)
else:
conn = httplib.HTTPConnection(self.server_fqdn)
conn.request('POST', '/hooks/' + self.token, payload, headers)
response = conn.getresponse()
if response.status != 200:
error = json.loads(response.read())
conn.close()
raise WebhookError(response.status, error['message'])
conn.close()
class Message(object):
def __init__(self, text='', icon_url=None):
self.text = text
self.icon_url = icon_url
self.attachments = []
def append_text(self, text_to_append):
"""Add new text to the message."""
if self.text:
self.text = self.text + '\n' + text_to_append
else:
self.text = text_to_append
def add_attachment(self, **kwargs):
"""Add an attachment to the message.
As of Rocket.Chat version 0.17, valid attachment arguments are the following:
* title
* title_link
* text
* image_url
* color
You can have multiple attachments in a single message.
"""
self.attachments.append(kwargs)
|
|
af966370d7c13e17f7b0ad77cec8e0c53d6738f1
|
alembic/versions/3b85eb7c4d7_add_dark_theme_entry.py
|
alembic/versions/3b85eb7c4d7_add_dark_theme_entry.py
|
"""Add dark_theme entry
Revision ID: 3b85eb7c4d7
Revises: 3d4136d1ae1
Create Date: 2015-08-06 03:34:32.888608
"""
# revision identifiers, used by Alembic.
revision = '3b85eb7c4d7'
down_revision = '3d4136d1ae1'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('user', Column('dark_theme', sa.Boolean, server_default=False, nullable=false))
pass
def downgrade():
with op.batch_alter_table("user") as batch_op:
batch_op.drop_column('dark_theme')
pass
|
Add alembic migration script for dark theme
|
Add alembic migration script for dark theme
|
Python
|
mit
|
EIREXE/SpaceDock,EIREXE/SpaceDock,KerbalStuff/KerbalStuff,Kerbas-ad-astra/KerbalStuff,KerbalStuff/KerbalStuff,EIREXE/SpaceDock,KerbalStuff/KerbalStuff,Kerbas-ad-astra/KerbalStuff,EIREXE/SpaceDock,Kerbas-ad-astra/KerbalStuff
|
Add alembic migration script for dark theme
|
"""Add dark_theme entry
Revision ID: 3b85eb7c4d7
Revises: 3d4136d1ae1
Create Date: 2015-08-06 03:34:32.888608
"""
# revision identifiers, used by Alembic.
revision = '3b85eb7c4d7'
down_revision = '3d4136d1ae1'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('user', Column('dark_theme', sa.Boolean, server_default=False, nullable=false))
pass
def downgrade():
with op.batch_alter_table("user") as batch_op:
batch_op.drop_column('dark_theme')
pass
|
<commit_before><commit_msg>Add alembic migration script for dark theme<commit_after>
|
"""Add dark_theme entry
Revision ID: 3b85eb7c4d7
Revises: 3d4136d1ae1
Create Date: 2015-08-06 03:34:32.888608
"""
# revision identifiers, used by Alembic.
revision = '3b85eb7c4d7'
down_revision = '3d4136d1ae1'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('user', Column('dark_theme', sa.Boolean, server_default=False, nullable=false))
pass
def downgrade():
with op.batch_alter_table("user") as batch_op:
batch_op.drop_column('dark_theme')
pass
|
Add alembic migration script for dark theme"""Add dark_theme entry
Revision ID: 3b85eb7c4d7
Revises: 3d4136d1ae1
Create Date: 2015-08-06 03:34:32.888608
"""
# revision identifiers, used by Alembic.
revision = '3b85eb7c4d7'
down_revision = '3d4136d1ae1'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('user', Column('dark_theme', sa.Boolean, server_default=False, nullable=false))
pass
def downgrade():
with op.batch_alter_table("user") as batch_op:
batch_op.drop_column('dark_theme')
pass
|
<commit_before><commit_msg>Add alembic migration script for dark theme<commit_after>"""Add dark_theme entry
Revision ID: 3b85eb7c4d7
Revises: 3d4136d1ae1
Create Date: 2015-08-06 03:34:32.888608
"""
# revision identifiers, used by Alembic.
revision = '3b85eb7c4d7'
down_revision = '3d4136d1ae1'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('user', Column('dark_theme', sa.Boolean, server_default=False, nullable=false))
pass
def downgrade():
with op.batch_alter_table("user") as batch_op:
batch_op.drop_column('dark_theme')
pass
|
|
f647b1bc68a76d990d452d50bb2d67981db12c4c
|
importlib_resources/tests/test_abc.py
|
importlib_resources/tests/test_abc.py
|
import io
import zipfile
import unittest
from importlib_resources._compat import ZipPath, Path
from importlib_resources.abc import Traversable
class TraversableTests(unittest.TestCase):
def test_zip_path_traversable(self):
zf = zipfile.ZipFile(io.BytesIO(), 'w')
assert isinstance(ZipPath(zf), Traversable)
def test_pathlib_path_traversable(self):
assert isinstance(Path(), Traversable)
|
Add tests for Traversable on pathlib and zipfile Path objects.
|
Add tests for Traversable on pathlib and zipfile Path objects.
|
Python
|
apache-2.0
|
python/importlib_resources
|
Add tests for Traversable on pathlib and zipfile Path objects.
|
import io
import zipfile
import unittest
from importlib_resources._compat import ZipPath, Path
from importlib_resources.abc import Traversable
class TraversableTests(unittest.TestCase):
def test_zip_path_traversable(self):
zf = zipfile.ZipFile(io.BytesIO(), 'w')
assert isinstance(ZipPath(zf), Traversable)
def test_pathlib_path_traversable(self):
assert isinstance(Path(), Traversable)
|
<commit_before><commit_msg>Add tests for Traversable on pathlib and zipfile Path objects.<commit_after>
|
import io
import zipfile
import unittest
from importlib_resources._compat import ZipPath, Path
from importlib_resources.abc import Traversable
class TraversableTests(unittest.TestCase):
def test_zip_path_traversable(self):
zf = zipfile.ZipFile(io.BytesIO(), 'w')
assert isinstance(ZipPath(zf), Traversable)
def test_pathlib_path_traversable(self):
assert isinstance(Path(), Traversable)
|
Add tests for Traversable on pathlib and zipfile Path objects.import io
import zipfile
import unittest
from importlib_resources._compat import ZipPath, Path
from importlib_resources.abc import Traversable
class TraversableTests(unittest.TestCase):
def test_zip_path_traversable(self):
zf = zipfile.ZipFile(io.BytesIO(), 'w')
assert isinstance(ZipPath(zf), Traversable)
def test_pathlib_path_traversable(self):
assert isinstance(Path(), Traversable)
|
<commit_before><commit_msg>Add tests for Traversable on pathlib and zipfile Path objects.<commit_after>import io
import zipfile
import unittest
from importlib_resources._compat import ZipPath, Path
from importlib_resources.abc import Traversable
class TraversableTests(unittest.TestCase):
def test_zip_path_traversable(self):
zf = zipfile.ZipFile(io.BytesIO(), 'w')
assert isinstance(ZipPath(zf), Traversable)
def test_pathlib_path_traversable(self):
assert isinstance(Path(), Traversable)
|
|
ca8efa51b4a8269089671ec24ccbf5d5ed56dd71
|
astropy/table/tests/test_subclass.py
|
astropy/table/tests/test_subclass.py
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# TEST_UNICODE_LITERALS
from ... import table
class MyRow(table.Row):
def __str__(self):
return str(self.data)
class MyColumn(table.Column):
def cool(self):
return 'Cool!'
class MyMaskedColumn(table.Column):
def cool(self):
return 'MaskedCool!'
class MyTableColumns(table.TableColumns):
def cool(self):
return 'CoolTableColumns!'
class MyTable(table.Table):
_Row = MyRow
_Column = MyColumn
_MaskedColumn = MyMaskedColumn
_TableColumns = MyTableColumns
def test_simple_subclass():
t = MyTable([[1, 2], [3, 4]])
row = t[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
assert t['col0'].cool() == 'Cool!'
assert t.columns.cool() == 'CoolTableColumns!'
t2 = MyTable(t)
row = t2[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
t3 = table.Table(t)
row = t3[0]
assert not isinstance(row, MyRow)
assert str(row) != '(1, 3)'
t = MyTable([[1, 2], [3, 4]], masked=True)
row = t[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
assert t['col0'].cool() == 'MaskedCool!'
|
Add initial tests of subclassing components
|
Add initial tests of subclassing components
|
Python
|
bsd-3-clause
|
kelle/astropy,MSeifert04/astropy,pllim/astropy,aleksandr-bakanov/astropy,larrybradley/astropy,tbabej/astropy,bsipocz/astropy,joergdietrich/astropy,joergdietrich/astropy,mhvk/astropy,stargaser/astropy,kelle/astropy,dhomeier/astropy,pllim/astropy,astropy/astropy,DougBurke/astropy,saimn/astropy,MSeifert04/astropy,larrybradley/astropy,saimn/astropy,kelle/astropy,AustereCuriosity/astropy,AustereCuriosity/astropy,funbaker/astropy,MSeifert04/astropy,larrybradley/astropy,StuartLittlefair/astropy,saimn/astropy,dhomeier/astropy,bsipocz/astropy,AustereCuriosity/astropy,mhvk/astropy,larrybradley/astropy,StuartLittlefair/astropy,aleksandr-bakanov/astropy,dhomeier/astropy,MSeifert04/astropy,lpsinger/astropy,AustereCuriosity/astropy,aleksandr-bakanov/astropy,lpsinger/astropy,saimn/astropy,StuartLittlefair/astropy,stargaser/astropy,AustereCuriosity/astropy,saimn/astropy,StuartLittlefair/astropy,aleksandr-bakanov/astropy,joergdietrich/astropy,lpsinger/astropy,mhvk/astropy,astropy/astropy,stargaser/astropy,dhomeier/astropy,DougBurke/astropy,funbaker/astropy,pllim/astropy,mhvk/astropy,bsipocz/astropy,lpsinger/astropy,kelle/astropy,pllim/astropy,DougBurke/astropy,larrybradley/astropy,tbabej/astropy,astropy/astropy,lpsinger/astropy,astropy/astropy,tbabej/astropy,bsipocz/astropy,funbaker/astropy,mhvk/astropy,dhomeier/astropy,astropy/astropy,funbaker/astropy,DougBurke/astropy,tbabej/astropy,kelle/astropy,stargaser/astropy,pllim/astropy,StuartLittlefair/astropy,joergdietrich/astropy,joergdietrich/astropy,tbabej/astropy
|
Add initial tests of subclassing components
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# TEST_UNICODE_LITERALS
from ... import table
class MyRow(table.Row):
def __str__(self):
return str(self.data)
class MyColumn(table.Column):
def cool(self):
return 'Cool!'
class MyMaskedColumn(table.Column):
def cool(self):
return 'MaskedCool!'
class MyTableColumns(table.TableColumns):
def cool(self):
return 'CoolTableColumns!'
class MyTable(table.Table):
_Row = MyRow
_Column = MyColumn
_MaskedColumn = MyMaskedColumn
_TableColumns = MyTableColumns
def test_simple_subclass():
t = MyTable([[1, 2], [3, 4]])
row = t[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
assert t['col0'].cool() == 'Cool!'
assert t.columns.cool() == 'CoolTableColumns!'
t2 = MyTable(t)
row = t2[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
t3 = table.Table(t)
row = t3[0]
assert not isinstance(row, MyRow)
assert str(row) != '(1, 3)'
t = MyTable([[1, 2], [3, 4]], masked=True)
row = t[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
assert t['col0'].cool() == 'MaskedCool!'
|
<commit_before><commit_msg>Add initial tests of subclassing components<commit_after>
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# TEST_UNICODE_LITERALS
from ... import table
class MyRow(table.Row):
def __str__(self):
return str(self.data)
class MyColumn(table.Column):
def cool(self):
return 'Cool!'
class MyMaskedColumn(table.Column):
def cool(self):
return 'MaskedCool!'
class MyTableColumns(table.TableColumns):
def cool(self):
return 'CoolTableColumns!'
class MyTable(table.Table):
_Row = MyRow
_Column = MyColumn
_MaskedColumn = MyMaskedColumn
_TableColumns = MyTableColumns
def test_simple_subclass():
t = MyTable([[1, 2], [3, 4]])
row = t[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
assert t['col0'].cool() == 'Cool!'
assert t.columns.cool() == 'CoolTableColumns!'
t2 = MyTable(t)
row = t2[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
t3 = table.Table(t)
row = t3[0]
assert not isinstance(row, MyRow)
assert str(row) != '(1, 3)'
t = MyTable([[1, 2], [3, 4]], masked=True)
row = t[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
assert t['col0'].cool() == 'MaskedCool!'
|
Add initial tests of subclassing components# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# TEST_UNICODE_LITERALS
from ... import table
class MyRow(table.Row):
def __str__(self):
return str(self.data)
class MyColumn(table.Column):
def cool(self):
return 'Cool!'
class MyMaskedColumn(table.Column):
def cool(self):
return 'MaskedCool!'
class MyTableColumns(table.TableColumns):
def cool(self):
return 'CoolTableColumns!'
class MyTable(table.Table):
_Row = MyRow
_Column = MyColumn
_MaskedColumn = MyMaskedColumn
_TableColumns = MyTableColumns
def test_simple_subclass():
t = MyTable([[1, 2], [3, 4]])
row = t[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
assert t['col0'].cool() == 'Cool!'
assert t.columns.cool() == 'CoolTableColumns!'
t2 = MyTable(t)
row = t2[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
t3 = table.Table(t)
row = t3[0]
assert not isinstance(row, MyRow)
assert str(row) != '(1, 3)'
t = MyTable([[1, 2], [3, 4]], masked=True)
row = t[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
assert t['col0'].cool() == 'MaskedCool!'
|
<commit_before><commit_msg>Add initial tests of subclassing components<commit_after># -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# TEST_UNICODE_LITERALS
from ... import table
class MyRow(table.Row):
def __str__(self):
return str(self.data)
class MyColumn(table.Column):
def cool(self):
return 'Cool!'
class MyMaskedColumn(table.Column):
def cool(self):
return 'MaskedCool!'
class MyTableColumns(table.TableColumns):
def cool(self):
return 'CoolTableColumns!'
class MyTable(table.Table):
_Row = MyRow
_Column = MyColumn
_MaskedColumn = MyMaskedColumn
_TableColumns = MyTableColumns
def test_simple_subclass():
t = MyTable([[1, 2], [3, 4]])
row = t[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
assert t['col0'].cool() == 'Cool!'
assert t.columns.cool() == 'CoolTableColumns!'
t2 = MyTable(t)
row = t2[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
t3 = table.Table(t)
row = t3[0]
assert not isinstance(row, MyRow)
assert str(row) != '(1, 3)'
t = MyTable([[1, 2], [3, 4]], masked=True)
row = t[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
assert t['col0'].cool() == 'MaskedCool!'
|
|
fc616c89e0b647c5ada9f15bfdc508fd77e1b391
|
tests/task.py
|
tests/task.py
|
from mongoengine import ValidationError
from nose.tools import raises
from app.task import Priority, Task
def test_Set_Title_String():
task = Task()
task.title = "Hello"
assert task.title == "Hello"
@raises(ValidationError)
def test_Set_Title_Int():
task = Task()
task.title = 1
# Should throw ValidationError, since title field is not a String.
task.save()
def test_Set_Priority_High():
task = Task()
assert task.priority == Priority.LOW
task.priority = Priority.HIGH
assert task.priority == Priority.HIGH
|
Add some tests for the Task model
|
Add some tests for the Task model
|
Python
|
mit
|
Zillolo/lazy-todo
|
Add some tests for the Task model
|
from mongoengine import ValidationError
from nose.tools import raises
from app.task import Priority, Task
def test_Set_Title_String():
task = Task()
task.title = "Hello"
assert task.title == "Hello"
@raises(ValidationError)
def test_Set_Title_Int():
task = Task()
task.title = 1
# Should throw ValidationError, since title field is not a String.
task.save()
def test_Set_Priority_High():
task = Task()
assert task.priority == Priority.LOW
task.priority = Priority.HIGH
assert task.priority == Priority.HIGH
|
<commit_before><commit_msg>Add some tests for the Task model<commit_after>
|
from mongoengine import ValidationError
from nose.tools import raises
from app.task import Priority, Task
def test_Set_Title_String():
task = Task()
task.title = "Hello"
assert task.title == "Hello"
@raises(ValidationError)
def test_Set_Title_Int():
task = Task()
task.title = 1
# Should throw ValidationError, since title field is not a String.
task.save()
def test_Set_Priority_High():
task = Task()
assert task.priority == Priority.LOW
task.priority = Priority.HIGH
assert task.priority == Priority.HIGH
|
Add some tests for the Task modelfrom mongoengine import ValidationError
from nose.tools import raises
from app.task import Priority, Task
def test_Set_Title_String():
task = Task()
task.title = "Hello"
assert task.title == "Hello"
@raises(ValidationError)
def test_Set_Title_Int():
task = Task()
task.title = 1
# Should throw ValidationError, since title field is not a String.
task.save()
def test_Set_Priority_High():
task = Task()
assert task.priority == Priority.LOW
task.priority = Priority.HIGH
assert task.priority == Priority.HIGH
|
<commit_before><commit_msg>Add some tests for the Task model<commit_after>from mongoengine import ValidationError
from nose.tools import raises
from app.task import Priority, Task
def test_Set_Title_String():
task = Task()
task.title = "Hello"
assert task.title == "Hello"
@raises(ValidationError)
def test_Set_Title_Int():
task = Task()
task.title = 1
# Should throw ValidationError, since title field is not a String.
task.save()
def test_Set_Priority_High():
task = Task()
assert task.priority == Priority.LOW
task.priority = Priority.HIGH
assert task.priority == Priority.HIGH
|
|
314c40bbfa33f5ae71b35b50de5305dc20b8fd9d
|
src/rnaseq_lib/civic/__init__.py
|
src/rnaseq_lib/civic/__init__.py
|
import requests
import progressbar
import pandas as pd
bar = progressbar.ProgressBar()
def create_civic_drug_disease_dataframe():
payload = {'count': 999} # Only 295 genes at time of writing
request = requests.get('https://civic.genome.wustl.edu/api/genes/', params=payload)
assert request.status_code == 200, 'Request failed: {}'.format(request.status_code)
records = request.json()['records']
# Create records in the format: Cancer, Gene, Drugs, Variant-Name, Aliases, Description
info = []
for record in bar(records):
gene = record['name']
description = record['description']
aliases = ','.join(record['aliases'])
variants = record['variants']
for variant in variants:
var_name = variant['name']
var_id = variant['id']
var = requests.get('https://civic.genome.wustl.edu/api/variants/{}'.format(var_id)).json()
for evidence in var['evidence_items']:
disease = evidence['disease']['name']
drugs = ','.join([x['name'] for x in evidence['drugs']])
info.append((disease, drugs, gene, aliases, var_name, description))
# Create DataFrame
labels = ['Cancer', 'Drugs', 'Gene', 'Aliases', 'Variant-Name', 'Description']
return pd.DataFrame.from_records(info, columns=labels)
|
Add function to retrieve CIViC Drug / Disease information
|
Add function to retrieve CIViC Drug / Disease information
|
Python
|
mit
|
jvivian/rnaseq-lib,jvivian/rnaseq-lib
|
Add function to retrieve CIViC Drug / Disease information
|
import requests
import progressbar
import pandas as pd
bar = progressbar.ProgressBar()
def create_civic_drug_disease_dataframe():
payload = {'count': 999} # Only 295 genes at time of writing
request = requests.get('https://civic.genome.wustl.edu/api/genes/', params=payload)
assert request.status_code == 200, 'Request failed: {}'.format(request.status_code)
records = request.json()['records']
# Create records in the format: Cancer, Gene, Drugs, Variant-Name, Aliases, Description
info = []
for record in bar(records):
gene = record['name']
description = record['description']
aliases = ','.join(record['aliases'])
variants = record['variants']
for variant in variants:
var_name = variant['name']
var_id = variant['id']
var = requests.get('https://civic.genome.wustl.edu/api/variants/{}'.format(var_id)).json()
for evidence in var['evidence_items']:
disease = evidence['disease']['name']
drugs = ','.join([x['name'] for x in evidence['drugs']])
info.append((disease, drugs, gene, aliases, var_name, description))
# Create DataFrame
labels = ['Cancer', 'Drugs', 'Gene', 'Aliases', 'Variant-Name', 'Description']
return pd.DataFrame.from_records(info, columns=labels)
|
<commit_before><commit_msg>Add function to retrieve CIViC Drug / Disease information<commit_after>
|
import requests
import progressbar
import pandas as pd
bar = progressbar.ProgressBar()
def create_civic_drug_disease_dataframe():
payload = {'count': 999} # Only 295 genes at time of writing
request = requests.get('https://civic.genome.wustl.edu/api/genes/', params=payload)
assert request.status_code == 200, 'Request failed: {}'.format(request.status_code)
records = request.json()['records']
# Create records in the format: Cancer, Gene, Drugs, Variant-Name, Aliases, Description
info = []
for record in bar(records):
gene = record['name']
description = record['description']
aliases = ','.join(record['aliases'])
variants = record['variants']
for variant in variants:
var_name = variant['name']
var_id = variant['id']
var = requests.get('https://civic.genome.wustl.edu/api/variants/{}'.format(var_id)).json()
for evidence in var['evidence_items']:
disease = evidence['disease']['name']
drugs = ','.join([x['name'] for x in evidence['drugs']])
info.append((disease, drugs, gene, aliases, var_name, description))
# Create DataFrame
labels = ['Cancer', 'Drugs', 'Gene', 'Aliases', 'Variant-Name', 'Description']
return pd.DataFrame.from_records(info, columns=labels)
|
Add function to retrieve CIViC Drug / Disease informationimport requests
import progressbar
import pandas as pd
bar = progressbar.ProgressBar()
def create_civic_drug_disease_dataframe():
payload = {'count': 999} # Only 295 genes at time of writing
request = requests.get('https://civic.genome.wustl.edu/api/genes/', params=payload)
assert request.status_code == 200, 'Request failed: {}'.format(request.status_code)
records = request.json()['records']
# Create records in the format: Cancer, Gene, Drugs, Variant-Name, Aliases, Description
info = []
for record in bar(records):
gene = record['name']
description = record['description']
aliases = ','.join(record['aliases'])
variants = record['variants']
for variant in variants:
var_name = variant['name']
var_id = variant['id']
var = requests.get('https://civic.genome.wustl.edu/api/variants/{}'.format(var_id)).json()
for evidence in var['evidence_items']:
disease = evidence['disease']['name']
drugs = ','.join([x['name'] for x in evidence['drugs']])
info.append((disease, drugs, gene, aliases, var_name, description))
# Create DataFrame
labels = ['Cancer', 'Drugs', 'Gene', 'Aliases', 'Variant-Name', 'Description']
return pd.DataFrame.from_records(info, columns=labels)
|
<commit_before><commit_msg>Add function to retrieve CIViC Drug / Disease information<commit_after>import requests
import progressbar
import pandas as pd
bar = progressbar.ProgressBar()
def create_civic_drug_disease_dataframe():
payload = {'count': 999} # Only 295 genes at time of writing
request = requests.get('https://civic.genome.wustl.edu/api/genes/', params=payload)
assert request.status_code == 200, 'Request failed: {}'.format(request.status_code)
records = request.json()['records']
# Create records in the format: Cancer, Gene, Drugs, Variant-Name, Aliases, Description
info = []
for record in bar(records):
gene = record['name']
description = record['description']
aliases = ','.join(record['aliases'])
variants = record['variants']
for variant in variants:
var_name = variant['name']
var_id = variant['id']
var = requests.get('https://civic.genome.wustl.edu/api/variants/{}'.format(var_id)).json()
for evidence in var['evidence_items']:
disease = evidence['disease']['name']
drugs = ','.join([x['name'] for x in evidence['drugs']])
info.append((disease, drugs, gene, aliases, var_name, description))
# Create DataFrame
labels = ['Cancer', 'Drugs', 'Gene', 'Aliases', 'Variant-Name', 'Description']
return pd.DataFrame.from_records(info, columns=labels)
|
|
205ce3a2d480e0d05bbdfd9cd2c2f3bafc08a061
|
gorynych/task.py
|
gorynych/task.py
|
#!/usr/bin/python3
from enum import Enum
class State(Enum):
inactive = 1
active = 2
running = 3
paused = 4
aborted = 5
failed = 6
frozen = 7
class Task(object):
def __init__(self):
self.state = State.inactive
self.scarab = None
def activate(self):
self.state = State.active
def deactivate(self):
self.state = State.inactive
def pause(self):
self.state = State.paused
def resume(self):
self.state = State.active
def abort(self):
self.state = State.aborted
def freeze(self):
self.state = State.frozen
def defreeze(self):
self.state = State.inactive
|
Add dummy module of Task class
|
Add dummy module of Task class
|
Python
|
apache-2.0
|
vurmux/gorynych
|
Add dummy module of Task class
|
#!/usr/bin/python3
from enum import Enum
class State(Enum):
inactive = 1
active = 2
running = 3
paused = 4
aborted = 5
failed = 6
frozen = 7
class Task(object):
def __init__(self):
self.state = State.inactive
self.scarab = None
def activate(self):
self.state = State.active
def deactivate(self):
self.state = State.inactive
def pause(self):
self.state = State.paused
def resume(self):
self.state = State.active
def abort(self):
self.state = State.aborted
def freeze(self):
self.state = State.frozen
def defreeze(self):
self.state = State.inactive
|
<commit_before><commit_msg>Add dummy module of Task class<commit_after>
|
#!/usr/bin/python3
from enum import Enum
class State(Enum):
inactive = 1
active = 2
running = 3
paused = 4
aborted = 5
failed = 6
frozen = 7
class Task(object):
def __init__(self):
self.state = State.inactive
self.scarab = None
def activate(self):
self.state = State.active
def deactivate(self):
self.state = State.inactive
def pause(self):
self.state = State.paused
def resume(self):
self.state = State.active
def abort(self):
self.state = State.aborted
def freeze(self):
self.state = State.frozen
def defreeze(self):
self.state = State.inactive
|
Add dummy module of Task class#!/usr/bin/python3
from enum import Enum
class State(Enum):
inactive = 1
active = 2
running = 3
paused = 4
aborted = 5
failed = 6
frozen = 7
class Task(object):
def __init__(self):
self.state = State.inactive
self.scarab = None
def activate(self):
self.state = State.active
def deactivate(self):
self.state = State.inactive
def pause(self):
self.state = State.paused
def resume(self):
self.state = State.active
def abort(self):
self.state = State.aborted
def freeze(self):
self.state = State.frozen
def defreeze(self):
self.state = State.inactive
|
<commit_before><commit_msg>Add dummy module of Task class<commit_after>#!/usr/bin/python3
from enum import Enum
class State(Enum):
inactive = 1
active = 2
running = 3
paused = 4
aborted = 5
failed = 6
frozen = 7
class Task(object):
def __init__(self):
self.state = State.inactive
self.scarab = None
def activate(self):
self.state = State.active
def deactivate(self):
self.state = State.inactive
def pause(self):
self.state = State.paused
def resume(self):
self.state = State.active
def abort(self):
self.state = State.aborted
def freeze(self):
self.state = State.frozen
def defreeze(self):
self.state = State.inactive
|
|
3265a243e5bea63d49259d0e56b4270ca322e696
|
ckanext/requestdata/logic/actions.py
|
ckanext/requestdata/logic/actions.py
|
from ckan.plugins import toolkit
from ckan.logic import check_access
import ckan.lib.navl.dictization_functions as df
from ckanext.requestdata.logic import schema
from ckanext.requestdata.model import ckanextRequestdata
def request_create(context, data_dict):
'''Create new request data.
:param sender_name: The name of the sender who request data.
:type sender_name: string
:param organization: The sender's organization.
:type organization: string
:param email_address: The sender's email_address.
:type email_address: string
:param message_content: The content of the message.
:type message_content: string
:param package_name: The name of the package the data belongs to.
:type package_name: string
:param data_shared: Whether data is shared or not.
:type data_shared: boolean
:returns: the newly created request data
:rtype: dictionary
'''
check_access('requestdata_request_create', context, data_dict)
data, errors = df.validate(data_dict, schema.request_create_schema(),
context)
if errors:
raise toolkit.ValidationError(errors)
sender_name = data.get('sender_name')
organization = data.get('organization')
email_address = data.get('email_address')
message_content = data.get('message_content')
package_name = data.get('package_name')
data_shared = data.get('data_shared')
data = {
'sender_name': sender_name,
'organization': organization,
'email_address': email_address,
'message_content': message_content,
'package_name': package_name,
'data_shared': data_shared
}
requestdata = ckanextRequestdata(**data)
requestdata.save()
out = requestdata.as_dict()
return out
def request_show(context, data_dict):
pass
def request_list(self):
pass
def request_patch(self):
pass
def request_update(self):
pass
def request_delete(self):
pass
|
Add action for creating new request data
|
Add action for creating new request data
|
Python
|
agpl-3.0
|
ViderumGlobal/ckanext-requestdata,ViderumGlobal/ckanext-requestdata,ViderumGlobal/ckanext-requestdata,ViderumGlobal/ckanext-requestdata
|
Add action for creating new request data
|
from ckan.plugins import toolkit
from ckan.logic import check_access
import ckan.lib.navl.dictization_functions as df
from ckanext.requestdata.logic import schema
from ckanext.requestdata.model import ckanextRequestdata
def request_create(context, data_dict):
'''Create new request data.
:param sender_name: The name of the sender who request data.
:type sender_name: string
:param organization: The sender's organization.
:type organization: string
:param email_address: The sender's email_address.
:type email_address: string
:param message_content: The content of the message.
:type message_content: string
:param package_name: The name of the package the data belongs to.
:type package_name: string
:param data_shared: Whether data is shared or not.
:type data_shared: boolean
:returns: the newly created request data
:rtype: dictionary
'''
check_access('requestdata_request_create', context, data_dict)
data, errors = df.validate(data_dict, schema.request_create_schema(),
context)
if errors:
raise toolkit.ValidationError(errors)
sender_name = data.get('sender_name')
organization = data.get('organization')
email_address = data.get('email_address')
message_content = data.get('message_content')
package_name = data.get('package_name')
data_shared = data.get('data_shared')
data = {
'sender_name': sender_name,
'organization': organization,
'email_address': email_address,
'message_content': message_content,
'package_name': package_name,
'data_shared': data_shared
}
requestdata = ckanextRequestdata(**data)
requestdata.save()
out = requestdata.as_dict()
return out
def request_show(context, data_dict):
pass
def request_list(self):
pass
def request_patch(self):
pass
def request_update(self):
pass
def request_delete(self):
pass
|
<commit_before><commit_msg>Add action for creating new request data<commit_after>
|
from ckan.plugins import toolkit
from ckan.logic import check_access
import ckan.lib.navl.dictization_functions as df
from ckanext.requestdata.logic import schema
from ckanext.requestdata.model import ckanextRequestdata
def request_create(context, data_dict):
'''Create new request data.
:param sender_name: The name of the sender who request data.
:type sender_name: string
:param organization: The sender's organization.
:type organization: string
:param email_address: The sender's email_address.
:type email_address: string
:param message_content: The content of the message.
:type message_content: string
:param package_name: The name of the package the data belongs to.
:type package_name: string
:param data_shared: Whether data is shared or not.
:type data_shared: boolean
:returns: the newly created request data
:rtype: dictionary
'''
check_access('requestdata_request_create', context, data_dict)
data, errors = df.validate(data_dict, schema.request_create_schema(),
context)
if errors:
raise toolkit.ValidationError(errors)
sender_name = data.get('sender_name')
organization = data.get('organization')
email_address = data.get('email_address')
message_content = data.get('message_content')
package_name = data.get('package_name')
data_shared = data.get('data_shared')
data = {
'sender_name': sender_name,
'organization': organization,
'email_address': email_address,
'message_content': message_content,
'package_name': package_name,
'data_shared': data_shared
}
requestdata = ckanextRequestdata(**data)
requestdata.save()
out = requestdata.as_dict()
return out
def request_show(context, data_dict):
pass
def request_list(self):
pass
def request_patch(self):
pass
def request_update(self):
pass
def request_delete(self):
pass
|
Add action for creating new request datafrom ckan.plugins import toolkit
from ckan.logic import check_access
import ckan.lib.navl.dictization_functions as df
from ckanext.requestdata.logic import schema
from ckanext.requestdata.model import ckanextRequestdata
def request_create(context, data_dict):
'''Create new request data.
:param sender_name: The name of the sender who request data.
:type sender_name: string
:param organization: The sender's organization.
:type organization: string
:param email_address: The sender's email_address.
:type email_address: string
:param message_content: The content of the message.
:type message_content: string
:param package_name: The name of the package the data belongs to.
:type package_name: string
:param data_shared: Whether data is shared or not.
:type data_shared: boolean
:returns: the newly created request data
:rtype: dictionary
'''
check_access('requestdata_request_create', context, data_dict)
data, errors = df.validate(data_dict, schema.request_create_schema(),
context)
if errors:
raise toolkit.ValidationError(errors)
sender_name = data.get('sender_name')
organization = data.get('organization')
email_address = data.get('email_address')
message_content = data.get('message_content')
package_name = data.get('package_name')
data_shared = data.get('data_shared')
data = {
'sender_name': sender_name,
'organization': organization,
'email_address': email_address,
'message_content': message_content,
'package_name': package_name,
'data_shared': data_shared
}
requestdata = ckanextRequestdata(**data)
requestdata.save()
out = requestdata.as_dict()
return out
def request_show(context, data_dict):
pass
def request_list(self):
pass
def request_patch(self):
pass
def request_update(self):
pass
def request_delete(self):
pass
|
<commit_before><commit_msg>Add action for creating new request data<commit_after>from ckan.plugins import toolkit
from ckan.logic import check_access
import ckan.lib.navl.dictization_functions as df
from ckanext.requestdata.logic import schema
from ckanext.requestdata.model import ckanextRequestdata
def request_create(context, data_dict):
'''Create new request data.
:param sender_name: The name of the sender who request data.
:type sender_name: string
:param organization: The sender's organization.
:type organization: string
:param email_address: The sender's email_address.
:type email_address: string
:param message_content: The content of the message.
:type message_content: string
:param package_name: The name of the package the data belongs to.
:type package_name: string
:param data_shared: Whether data is shared or not.
:type data_shared: boolean
:returns: the newly created request data
:rtype: dictionary
'''
check_access('requestdata_request_create', context, data_dict)
data, errors = df.validate(data_dict, schema.request_create_schema(),
context)
if errors:
raise toolkit.ValidationError(errors)
sender_name = data.get('sender_name')
organization = data.get('organization')
email_address = data.get('email_address')
message_content = data.get('message_content')
package_name = data.get('package_name')
data_shared = data.get('data_shared')
data = {
'sender_name': sender_name,
'organization': organization,
'email_address': email_address,
'message_content': message_content,
'package_name': package_name,
'data_shared': data_shared
}
requestdata = ckanextRequestdata(**data)
requestdata.save()
out = requestdata.as_dict()
return out
def request_show(context, data_dict):
pass
def request_list(self):
pass
def request_patch(self):
pass
def request_update(self):
pass
def request_delete(self):
pass
|
|
eacf27ebc042799a586039c76aa9cdd29d03c03d
|
lily/deals/migrations/0008_auto_20150930_1720.py
|
lily/deals/migrations/0008_auto_20150930_1720.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.db.models import Count
def set_correct_status(apps, schema_editor):
"""
Some deals had incorrect status of new business.
This migration tries to fix that for two cases:
- When an account only has one deal.
- When an account has no deal with 'new business' status at all.
"""
deal_cls = apps.get_model('deals', 'Deal')
# All accounts with one deal -> set deal to 'new business'.
account_id_list = deal_cls.objects.values('account').annotate(ac_count=Count('account')).filter(ac_count=1).values_list('account_id', flat=True)
deal_cls.objects.filter(account_id__in=account_id_list).update(new_business=True)
# All accounts with multiple deals, but none 'new business' -> set first deal to 'new business'.
account_id_list = deal_cls.objects.values('account').annotate(ac_count=Count('account')).exclude(ac_count=1).values_list('account_id', flat=True)
for account_id in account_id_list:
# Fetch list with deals and check if any of them is new business.
if not deal_cls.objects.filter(account_id=account_id, new_business=True).exists():
# deal with status new business doesn't exist, so set first deal to 'new business'.
deal = deal_cls.objects.filter(account_id=account_id).order_by('created').first()
deal.new_business = True
deal.save()
def backwards_noop(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('deals', '0007_auto_20150902_1543'),
]
operations = [
migrations.RunPython(set_correct_status, backwards_noop),
]
|
Migrate certain deals to set status new business
|
Migrate certain deals to set status new business
|
Python
|
agpl-3.0
|
HelloLily/hellolily,HelloLily/hellolily,HelloLily/hellolily,HelloLily/hellolily
|
Migrate certain deals to set status new business
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.db.models import Count
def set_correct_status(apps, schema_editor):
"""
Some deals had incorrect status of new business.
This migration tries to fix that for two cases:
- When an account only has one deal.
- When an account has no deal with 'new business' status at all.
"""
deal_cls = apps.get_model('deals', 'Deal')
# All accounts with one deal -> set deal to 'new business'.
account_id_list = deal_cls.objects.values('account').annotate(ac_count=Count('account')).filter(ac_count=1).values_list('account_id', flat=True)
deal_cls.objects.filter(account_id__in=account_id_list).update(new_business=True)
# All accounts with multiple deals, but none 'new business' -> set first deal to 'new business'.
account_id_list = deal_cls.objects.values('account').annotate(ac_count=Count('account')).exclude(ac_count=1).values_list('account_id', flat=True)
for account_id in account_id_list:
# Fetch list with deals and check if any of them is new business.
if not deal_cls.objects.filter(account_id=account_id, new_business=True).exists():
# deal with status new business doesn't exist, so set first deal to 'new business'.
deal = deal_cls.objects.filter(account_id=account_id).order_by('created').first()
deal.new_business = True
deal.save()
def backwards_noop(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('deals', '0007_auto_20150902_1543'),
]
operations = [
migrations.RunPython(set_correct_status, backwards_noop),
]
|
<commit_before><commit_msg>Migrate certain deals to set status new business<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.db.models import Count
def set_correct_status(apps, schema_editor):
"""
Some deals had incorrect status of new business.
This migration tries to fix that for two cases:
- When an account only has one deal.
- When an account has no deal with 'new business' status at all.
"""
deal_cls = apps.get_model('deals', 'Deal')
# All accounts with one deal -> set deal to 'new business'.
account_id_list = deal_cls.objects.values('account').annotate(ac_count=Count('account')).filter(ac_count=1).values_list('account_id', flat=True)
deal_cls.objects.filter(account_id__in=account_id_list).update(new_business=True)
# All accounts with multiple deals, but none 'new business' -> set first deal to 'new business'.
account_id_list = deal_cls.objects.values('account').annotate(ac_count=Count('account')).exclude(ac_count=1).values_list('account_id', flat=True)
for account_id in account_id_list:
# Fetch list with deals and check if any of them is new business.
if not deal_cls.objects.filter(account_id=account_id, new_business=True).exists():
# deal with status new business doesn't exist, so set first deal to 'new business'.
deal = deal_cls.objects.filter(account_id=account_id).order_by('created').first()
deal.new_business = True
deal.save()
def backwards_noop(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('deals', '0007_auto_20150902_1543'),
]
operations = [
migrations.RunPython(set_correct_status, backwards_noop),
]
|
Migrate certain deals to set status new business# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.db.models import Count
def set_correct_status(apps, schema_editor):
"""
Some deals had incorrect status of new business.
This migration tries to fix that for two cases:
- When an account only has one deal.
- When an account has no deal with 'new business' status at all.
"""
deal_cls = apps.get_model('deals', 'Deal')
# All accounts with one deal -> set deal to 'new business'.
account_id_list = deal_cls.objects.values('account').annotate(ac_count=Count('account')).filter(ac_count=1).values_list('account_id', flat=True)
deal_cls.objects.filter(account_id__in=account_id_list).update(new_business=True)
# All accounts with multiple deals, but none 'new business' -> set first deal to 'new business'.
account_id_list = deal_cls.objects.values('account').annotate(ac_count=Count('account')).exclude(ac_count=1).values_list('account_id', flat=True)
for account_id in account_id_list:
# Fetch list with deals and check if any of them is new business.
if not deal_cls.objects.filter(account_id=account_id, new_business=True).exists():
# deal with status new business doesn't exist, so set first deal to 'new business'.
deal = deal_cls.objects.filter(account_id=account_id).order_by('created').first()
deal.new_business = True
deal.save()
def backwards_noop(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('deals', '0007_auto_20150902_1543'),
]
operations = [
migrations.RunPython(set_correct_status, backwards_noop),
]
|
<commit_before><commit_msg>Migrate certain deals to set status new business<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.db.models import Count
def set_correct_status(apps, schema_editor):
"""
Some deals had incorrect status of new business.
This migration tries to fix that for two cases:
- When an account only has one deal.
- When an account has no deal with 'new business' status at all.
"""
deal_cls = apps.get_model('deals', 'Deal')
# All accounts with one deal -> set deal to 'new business'.
account_id_list = deal_cls.objects.values('account').annotate(ac_count=Count('account')).filter(ac_count=1).values_list('account_id', flat=True)
deal_cls.objects.filter(account_id__in=account_id_list).update(new_business=True)
# All accounts with multiple deals, but none 'new business' -> set first deal to 'new business'.
account_id_list = deal_cls.objects.values('account').annotate(ac_count=Count('account')).exclude(ac_count=1).values_list('account_id', flat=True)
for account_id in account_id_list:
# Fetch list with deals and check if any of them is new business.
if not deal_cls.objects.filter(account_id=account_id, new_business=True).exists():
# deal with status new business doesn't exist, so set first deal to 'new business'.
deal = deal_cls.objects.filter(account_id=account_id).order_by('created').first()
deal.new_business = True
deal.save()
def backwards_noop(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('deals', '0007_auto_20150902_1543'),
]
operations = [
migrations.RunPython(set_correct_status, backwards_noop),
]
|
|
ab1bcd10d7abf226b4f85970eec6e27512a431f2
|
CodeFights/codefighterAttribute.py
|
CodeFights/codefighterAttribute.py
|
#!/usr/local/bin/python
# Code Fights Codefighter Attribute Problem
class CodeFighter(object):
def __init__(self, username, _id, xp, name):
self.username = username
self._id = _id
self.xp = xp
self.name = name
def __getattr__(self, attribute, default=None):
if default is None:
default = "{} attribute is not defined".format(attribute)
if attribute not in self.__dir__():
return default
else:
return self.attribute
def codefighterAttribute(attribute):
codefighter = CodeFighter("annymaster", "1234567", "1500", "anny")
return getattr(codefighter, attribute)
def main():
tests = [
["_id", "1234567"],
["age", "age attribute is not defined"],
["name", "anny"],
["country", "country attribute is not defined"],
["I", "I attribute is not defined"]
]
for t in tests:
res = codefighterAttribute(t[0])
ans = t[1]
if ans == res:
print("PASSED: codefighterAttribute({}) returned {}"
.format(t[0], res))
else:
print("FAILED: codefighterAttribute({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights codefighter attribute problem
|
Solve Code Fights codefighter attribute problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights codefighter attribute problem
|
#!/usr/local/bin/python
# Code Fights Codefighter Attribute Problem
class CodeFighter(object):
def __init__(self, username, _id, xp, name):
self.username = username
self._id = _id
self.xp = xp
self.name = name
def __getattr__(self, attribute, default=None):
if default is None:
default = "{} attribute is not defined".format(attribute)
if attribute not in self.__dir__():
return default
else:
return self.attribute
def codefighterAttribute(attribute):
codefighter = CodeFighter("annymaster", "1234567", "1500", "anny")
return getattr(codefighter, attribute)
def main():
tests = [
["_id", "1234567"],
["age", "age attribute is not defined"],
["name", "anny"],
["country", "country attribute is not defined"],
["I", "I attribute is not defined"]
]
for t in tests:
res = codefighterAttribute(t[0])
ans = t[1]
if ans == res:
print("PASSED: codefighterAttribute({}) returned {}"
.format(t[0], res))
else:
print("FAILED: codefighterAttribute({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights codefighter attribute problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Codefighter Attribute Problem
class CodeFighter(object):
def __init__(self, username, _id, xp, name):
self.username = username
self._id = _id
self.xp = xp
self.name = name
def __getattr__(self, attribute, default=None):
if default is None:
default = "{} attribute is not defined".format(attribute)
if attribute not in self.__dir__():
return default
else:
return self.attribute
def codefighterAttribute(attribute):
codefighter = CodeFighter("annymaster", "1234567", "1500", "anny")
return getattr(codefighter, attribute)
def main():
tests = [
["_id", "1234567"],
["age", "age attribute is not defined"],
["name", "anny"],
["country", "country attribute is not defined"],
["I", "I attribute is not defined"]
]
for t in tests:
res = codefighterAttribute(t[0])
ans = t[1]
if ans == res:
print("PASSED: codefighterAttribute({}) returned {}"
.format(t[0], res))
else:
print("FAILED: codefighterAttribute({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights codefighter attribute problem#!/usr/local/bin/python
# Code Fights Codefighter Attribute Problem
class CodeFighter(object):
def __init__(self, username, _id, xp, name):
self.username = username
self._id = _id
self.xp = xp
self.name = name
def __getattr__(self, attribute, default=None):
if default is None:
default = "{} attribute is not defined".format(attribute)
if attribute not in self.__dir__():
return default
else:
return self.attribute
def codefighterAttribute(attribute):
codefighter = CodeFighter("annymaster", "1234567", "1500", "anny")
return getattr(codefighter, attribute)
def main():
tests = [
["_id", "1234567"],
["age", "age attribute is not defined"],
["name", "anny"],
["country", "country attribute is not defined"],
["I", "I attribute is not defined"]
]
for t in tests:
res = codefighterAttribute(t[0])
ans = t[1]
if ans == res:
print("PASSED: codefighterAttribute({}) returned {}"
.format(t[0], res))
else:
print("FAILED: codefighterAttribute({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights codefighter attribute problem<commit_after>#!/usr/local/bin/python
# Code Fights Codefighter Attribute Problem
class CodeFighter(object):
def __init__(self, username, _id, xp, name):
self.username = username
self._id = _id
self.xp = xp
self.name = name
def __getattr__(self, attribute, default=None):
if default is None:
default = "{} attribute is not defined".format(attribute)
if attribute not in self.__dir__():
return default
else:
return self.attribute
def codefighterAttribute(attribute):
codefighter = CodeFighter("annymaster", "1234567", "1500", "anny")
return getattr(codefighter, attribute)
def main():
tests = [
["_id", "1234567"],
["age", "age attribute is not defined"],
["name", "anny"],
["country", "country attribute is not defined"],
["I", "I attribute is not defined"]
]
for t in tests:
res = codefighterAttribute(t[0])
ans = t[1]
if ans == res:
print("PASSED: codefighterAttribute({}) returned {}"
.format(t[0], res))
else:
print("FAILED: codefighterAttribute({}) returned {}, answer: {}"
.format(t[0], res, ans))
if __name__ == '__main__':
main()
|
|
f0146cfdf639df8905f7240ca8ef939fbd328ece
|
pentagon/migration/migrations/migration_2_5_0.py
|
pentagon/migration/migrations/migration_2_5_0.py
|
from pentagon import migration
from pentagon.migration import *
from pentagon.component import core, inventory
from pentagon.helpers import merge_dict
import re
class Migration(migration.Migration):
_starting_version = '2.5.0'
_ending_version = '2.6.0'
def run(self):
for item in self.inventory:
inventory_path = "inventory/{}".format(item)
logging.debug(
'Processing Inventory Item: {}'
.format(inventory_path)
)
# Update version of VPC TF module
aws_vpc_file = "{}/terraform/aws_vpc.tf".format(inventory_path)
if os.path.exists(aws_vpc_file):
aws_vpc_file_content = self.get_file_content(aws_vpc_file)
aws_vpc_file_content = re.sub(r'terraform-vpc.git\?ref=v\d+\.\d+.\d+', 'terraform-vpc.git?ref=v3.0.0', aws_vpc_file_content)
aws_vpc_file_content = re.sub(r'\n\s*aws_secret_key\s+=.+', '', aws_vpc_file_content)
aws_vpc_file_content = re.sub(r'\n\s*aws_access_key\s+=.+', '', aws_vpc_file_content)
self.overwrite_file(aws_vpc_file, aws_vpc_file_content)
logging.info('Terraform VPC module updated to 3.0.0 in {}'.format(item))
# Remove TF AWS provider variables from secrets. No longer referenced directly in VPC module.
secret_file = "{}/config/private/secrets.yml".format(inventory_path)
if os.path.exists(secret_file):
secrets_file_content = self.get_file_content(secret_file)
original_secrets_content = secrets_file_content
secrets_file_content = re.sub(r'# Terraform.*\n', '', secrets_file_content)
secrets_file_content = re.sub(r'TF_VAR_aws_secret_key:.*\n', '', secrets_file_content)
secrets_file_content = re.sub(r'TF_VAR_aws_access_key:.*\n\n?', '', secrets_file_content)
self.overwrite_file(secret_file, secrets_file_content)
if original_secrets_content != secrets_file_content:
logging.warn("####### IMPORTANT: Secrets file has been updated #######")
logging.warn(" Update changed secrets file in 1Password: {}".format(secret_file))
logging.warn(" Terraform AWS provider variables removed in VPC module update and no longer needed in secrets.")
|
Add migration for VPC module update
|
Add migration for VPC module update
|
Python
|
apache-2.0
|
reactiveops/pentagon,reactiveops/pentagon,reactiveops/pentagon
|
Add migration for VPC module update
|
from pentagon import migration
from pentagon.migration import *
from pentagon.component import core, inventory
from pentagon.helpers import merge_dict
import re
class Migration(migration.Migration):
_starting_version = '2.5.0'
_ending_version = '2.6.0'
def run(self):
for item in self.inventory:
inventory_path = "inventory/{}".format(item)
logging.debug(
'Processing Inventory Item: {}'
.format(inventory_path)
)
# Update version of VPC TF module
aws_vpc_file = "{}/terraform/aws_vpc.tf".format(inventory_path)
if os.path.exists(aws_vpc_file):
aws_vpc_file_content = self.get_file_content(aws_vpc_file)
aws_vpc_file_content = re.sub(r'terraform-vpc.git\?ref=v\d+\.\d+.\d+', 'terraform-vpc.git?ref=v3.0.0', aws_vpc_file_content)
aws_vpc_file_content = re.sub(r'\n\s*aws_secret_key\s+=.+', '', aws_vpc_file_content)
aws_vpc_file_content = re.sub(r'\n\s*aws_access_key\s+=.+', '', aws_vpc_file_content)
self.overwrite_file(aws_vpc_file, aws_vpc_file_content)
logging.info('Terraform VPC module updated to 3.0.0 in {}'.format(item))
# Remove TF AWS provider variables from secrets. No longer referenced directly in VPC module.
secret_file = "{}/config/private/secrets.yml".format(inventory_path)
if os.path.exists(secret_file):
secrets_file_content = self.get_file_content(secret_file)
original_secrets_content = secrets_file_content
secrets_file_content = re.sub(r'# Terraform.*\n', '', secrets_file_content)
secrets_file_content = re.sub(r'TF_VAR_aws_secret_key:.*\n', '', secrets_file_content)
secrets_file_content = re.sub(r'TF_VAR_aws_access_key:.*\n\n?', '', secrets_file_content)
self.overwrite_file(secret_file, secrets_file_content)
if original_secrets_content != secrets_file_content:
logging.warn("####### IMPORTANT: Secrets file has been updated #######")
logging.warn(" Update changed secrets file in 1Password: {}".format(secret_file))
logging.warn(" Terraform AWS provider variables removed in VPC module update and no longer needed in secrets.")
|
<commit_before><commit_msg>Add migration for VPC module update<commit_after>
|
from pentagon import migration
from pentagon.migration import *
from pentagon.component import core, inventory
from pentagon.helpers import merge_dict
import re
class Migration(migration.Migration):
_starting_version = '2.5.0'
_ending_version = '2.6.0'
def run(self):
for item in self.inventory:
inventory_path = "inventory/{}".format(item)
logging.debug(
'Processing Inventory Item: {}'
.format(inventory_path)
)
# Update version of VPC TF module
aws_vpc_file = "{}/terraform/aws_vpc.tf".format(inventory_path)
if os.path.exists(aws_vpc_file):
aws_vpc_file_content = self.get_file_content(aws_vpc_file)
aws_vpc_file_content = re.sub(r'terraform-vpc.git\?ref=v\d+\.\d+.\d+', 'terraform-vpc.git?ref=v3.0.0', aws_vpc_file_content)
aws_vpc_file_content = re.sub(r'\n\s*aws_secret_key\s+=.+', '', aws_vpc_file_content)
aws_vpc_file_content = re.sub(r'\n\s*aws_access_key\s+=.+', '', aws_vpc_file_content)
self.overwrite_file(aws_vpc_file, aws_vpc_file_content)
logging.info('Terraform VPC module updated to 3.0.0 in {}'.format(item))
# Remove TF AWS provider variables from secrets. No longer referenced directly in VPC module.
secret_file = "{}/config/private/secrets.yml".format(inventory_path)
if os.path.exists(secret_file):
secrets_file_content = self.get_file_content(secret_file)
original_secrets_content = secrets_file_content
secrets_file_content = re.sub(r'# Terraform.*\n', '', secrets_file_content)
secrets_file_content = re.sub(r'TF_VAR_aws_secret_key:.*\n', '', secrets_file_content)
secrets_file_content = re.sub(r'TF_VAR_aws_access_key:.*\n\n?', '', secrets_file_content)
self.overwrite_file(secret_file, secrets_file_content)
if original_secrets_content != secrets_file_content:
logging.warn("####### IMPORTANT: Secrets file has been updated #######")
logging.warn(" Update changed secrets file in 1Password: {}".format(secret_file))
logging.warn(" Terraform AWS provider variables removed in VPC module update and no longer needed in secrets.")
|
Add migration for VPC module updatefrom pentagon import migration
from pentagon.migration import *
from pentagon.component import core, inventory
from pentagon.helpers import merge_dict
import re
class Migration(migration.Migration):
_starting_version = '2.5.0'
_ending_version = '2.6.0'
def run(self):
for item in self.inventory:
inventory_path = "inventory/{}".format(item)
logging.debug(
'Processing Inventory Item: {}'
.format(inventory_path)
)
# Update version of VPC TF module
aws_vpc_file = "{}/terraform/aws_vpc.tf".format(inventory_path)
if os.path.exists(aws_vpc_file):
aws_vpc_file_content = self.get_file_content(aws_vpc_file)
aws_vpc_file_content = re.sub(r'terraform-vpc.git\?ref=v\d+\.\d+.\d+', 'terraform-vpc.git?ref=v3.0.0', aws_vpc_file_content)
aws_vpc_file_content = re.sub(r'\n\s*aws_secret_key\s+=.+', '', aws_vpc_file_content)
aws_vpc_file_content = re.sub(r'\n\s*aws_access_key\s+=.+', '', aws_vpc_file_content)
self.overwrite_file(aws_vpc_file, aws_vpc_file_content)
logging.info('Terraform VPC module updated to 3.0.0 in {}'.format(item))
# Remove TF AWS provider variables from secrets. No longer referenced directly in VPC module.
secret_file = "{}/config/private/secrets.yml".format(inventory_path)
if os.path.exists(secret_file):
secrets_file_content = self.get_file_content(secret_file)
original_secrets_content = secrets_file_content
secrets_file_content = re.sub(r'# Terraform.*\n', '', secrets_file_content)
secrets_file_content = re.sub(r'TF_VAR_aws_secret_key:.*\n', '', secrets_file_content)
secrets_file_content = re.sub(r'TF_VAR_aws_access_key:.*\n\n?', '', secrets_file_content)
self.overwrite_file(secret_file, secrets_file_content)
if original_secrets_content != secrets_file_content:
logging.warn("####### IMPORTANT: Secrets file has been updated #######")
logging.warn(" Update changed secrets file in 1Password: {}".format(secret_file))
logging.warn(" Terraform AWS provider variables removed in VPC module update and no longer needed in secrets.")
|
<commit_before><commit_msg>Add migration for VPC module update<commit_after>from pentagon import migration
from pentagon.migration import *
from pentagon.component import core, inventory
from pentagon.helpers import merge_dict
import re
class Migration(migration.Migration):
_starting_version = '2.5.0'
_ending_version = '2.6.0'
def run(self):
for item in self.inventory:
inventory_path = "inventory/{}".format(item)
logging.debug(
'Processing Inventory Item: {}'
.format(inventory_path)
)
# Update version of VPC TF module
aws_vpc_file = "{}/terraform/aws_vpc.tf".format(inventory_path)
if os.path.exists(aws_vpc_file):
aws_vpc_file_content = self.get_file_content(aws_vpc_file)
aws_vpc_file_content = re.sub(r'terraform-vpc.git\?ref=v\d+\.\d+.\d+', 'terraform-vpc.git?ref=v3.0.0', aws_vpc_file_content)
aws_vpc_file_content = re.sub(r'\n\s*aws_secret_key\s+=.+', '', aws_vpc_file_content)
aws_vpc_file_content = re.sub(r'\n\s*aws_access_key\s+=.+', '', aws_vpc_file_content)
self.overwrite_file(aws_vpc_file, aws_vpc_file_content)
logging.info('Terraform VPC module updated to 3.0.0 in {}'.format(item))
# Remove TF AWS provider variables from secrets. No longer referenced directly in VPC module.
secret_file = "{}/config/private/secrets.yml".format(inventory_path)
if os.path.exists(secret_file):
secrets_file_content = self.get_file_content(secret_file)
original_secrets_content = secrets_file_content
secrets_file_content = re.sub(r'# Terraform.*\n', '', secrets_file_content)
secrets_file_content = re.sub(r'TF_VAR_aws_secret_key:.*\n', '', secrets_file_content)
secrets_file_content = re.sub(r'TF_VAR_aws_access_key:.*\n\n?', '', secrets_file_content)
self.overwrite_file(secret_file, secrets_file_content)
if original_secrets_content != secrets_file_content:
logging.warn("####### IMPORTANT: Secrets file has been updated #######")
logging.warn(" Update changed secrets file in 1Password: {}".format(secret_file))
logging.warn(" Terraform AWS provider variables removed in VPC module update and no longer needed in secrets.")
|
|
fa475825a9d31aab6f5f4cb8851531ec5207315c
|
scripts/kanimaji_batch.py
|
scripts/kanimaji_batch.py
|
#!/usr/bin/env python2
import os
import progressbar
svgs = filter(lambda f: f.endswith(".svg"), os.listdir("."))
bar = progressbar.ProgressBar()
for svg in bar(svgs):
os.system("./kanimaji/kanimaji.py %s > /dev/null" % svg)
|
Add batch script for kanji animations
|
Add batch script for kanji animations
|
Python
|
apache-2.0
|
Tenchi2xh/rem-v2,Tenchi2xh/rem-v2,Tenchi2xh/rem-v2
|
Add batch script for kanji animations
|
#!/usr/bin/env python2
import os
import progressbar
svgs = filter(lambda f: f.endswith(".svg"), os.listdir("."))
bar = progressbar.ProgressBar()
for svg in bar(svgs):
os.system("./kanimaji/kanimaji.py %s > /dev/null" % svg)
|
<commit_before><commit_msg>Add batch script for kanji animations<commit_after>
|
#!/usr/bin/env python2
import os
import progressbar
svgs = filter(lambda f: f.endswith(".svg"), os.listdir("."))
bar = progressbar.ProgressBar()
for svg in bar(svgs):
os.system("./kanimaji/kanimaji.py %s > /dev/null" % svg)
|
Add batch script for kanji animations#!/usr/bin/env python2
import os
import progressbar
svgs = filter(lambda f: f.endswith(".svg"), os.listdir("."))
bar = progressbar.ProgressBar()
for svg in bar(svgs):
os.system("./kanimaji/kanimaji.py %s > /dev/null" % svg)
|
<commit_before><commit_msg>Add batch script for kanji animations<commit_after>#!/usr/bin/env python2
import os
import progressbar
svgs = filter(lambda f: f.endswith(".svg"), os.listdir("."))
bar = progressbar.ProgressBar()
for svg in bar(svgs):
os.system("./kanimaji/kanimaji.py %s > /dev/null" % svg)
|
|
b4fa0194d995fd2ae20a11d8f27a6273e1f01aa9
|
sci_lib.py
|
sci_lib.py
|
#!/usr/bin/python
#Author: Scott T. Salesky
#Created: 12.6.2014
#Purpose: Collection of functions, routines to use
#Python for scientific work
#----------------------------------------------
|
Add function to read 3d direct access Fortran binary files into NumPy arrays.
|
Add function to read 3d direct access Fortran binary files into NumPy arrays.
|
Python
|
mit
|
ssalesky/Science-Library
|
Add function to read 3d direct access Fortran binary files into NumPy arrays.
|
#!/usr/bin/python
#Author: Scott T. Salesky
#Created: 12.6.2014
#Purpose: Collection of functions, routines to use
#Python for scientific work
#----------------------------------------------
|
<commit_before><commit_msg>Add function to read 3d direct access Fortran binary files into NumPy arrays.<commit_after>
|
#!/usr/bin/python
#Author: Scott T. Salesky
#Created: 12.6.2014
#Purpose: Collection of functions, routines to use
#Python for scientific work
#----------------------------------------------
|
Add function to read 3d direct access Fortran binary files into NumPy arrays.#!/usr/bin/python
#Author: Scott T. Salesky
#Created: 12.6.2014
#Purpose: Collection of functions, routines to use
#Python for scientific work
#----------------------------------------------
|
<commit_before><commit_msg>Add function to read 3d direct access Fortran binary files into NumPy arrays.<commit_after>#!/usr/bin/python
#Author: Scott T. Salesky
#Created: 12.6.2014
#Purpose: Collection of functions, routines to use
#Python for scientific work
#----------------------------------------------
|
|
700fa0144c5276d8e31c01a243340f6cbac07e8f
|
sentry/client/handlers.py
|
sentry/client/handlers.py
|
import logging
class SentryHandler(logging.Handler):
def emit(self, record):
from sentry.client.models import get_client
get_client().create_from_record(record)
|
import logging
import sys
class SentryHandler(logging.Handler):
def emit(self, record):
from sentry.client.models import get_client
# Avoid typical config issues by overriding loggers behavior
if record.name == 'sentry.errors':
print >> sys.stderr, record.message
return
get_client().create_from_record(record)
|
Add a safety net for recursive logging
|
Add a safety net for recursive logging
|
Python
|
bsd-3-clause
|
ewdurbin/sentry,hongliang5623/sentry,Photonomie/raven-python,ewdurbin/raven-python,daevaorn/sentry,NickPresta/sentry,dcramer/sentry-old,looker/sentry,NickPresta/sentry,chayapan/django-sentry,Kryz/sentry,BuildingLink/sentry,gencer/sentry,boneyao/sentry,danriti/raven-python,imankulov/sentry,nikolas/raven-python,mvaled/sentry,gencer/sentry,inspirehep/raven-python,arthurlogilab/raven-python,daevaorn/sentry,recht/raven-python,icereval/raven-python,drcapulet/sentry,ewdurbin/sentry,akheron/raven-python,boneyao/sentry,JamesMura/sentry,zenefits/sentry,johansteffner/raven-python,ewdurbin/sentry,gg7/sentry,SilentCircle/sentry,dbravender/raven-python,alex/raven,alex/sentry,zenefits/sentry,someonehan/raven-python,JTCunning/sentry,JTCunning/sentry,jbarbuto/raven-python,beniwohli/apm-agent-python,vperron/sentry,someonehan/raven-python,korealerts1/sentry,arthurlogilab/raven-python,Kronuz/django-sentry,mvaled/sentry,beni55/sentry,llonchj/sentry,mitsuhiko/raven,beni55/sentry,tarkatronic/opbeat_python,BuildingLink/sentry,WoLpH/django-sentry,jbarbuto/raven-python,Goldmund-Wyldebeast-Wunderliebe/raven-python,nikolas/raven-python,gencer/sentry,recht/raven-python,alexm92/sentry,jbarbuto/raven-python,wong2/sentry,pauloschilling/sentry,lepture/raven-python,felixbuenemann/sentry,nicholasserra/sentry,icereval/raven-python,percipient/raven-python,jmagnusson/raven-python,Kronuz/django-sentry,Natim/sentry,icereval/raven-python,SilentCircle/sentry,songyi199111/sentry,argonemyth/sentry,beeftornado/sentry,camilonova/sentry,NickPresta/sentry,smarkets/raven-python,akalipetis/raven-python,recht/raven-python,chayapan/django-sentry,inspirehep/raven-python,patrys/opbeat_python,SilentCircle/sentry,zenefits/sentry,ronaldevers/raven-python,beniwohli/apm-agent-python,kevinlondon/sentry,jean/sentry,Goldmund-Wyldebeast-Wunderliebe/raven-python,mvaled/sentry,BayanGroup/sentry,gencer/sentry,jmp0xf/raven-python,kevinastone/sentry,openlabs/raven,daevaorn/sentry,patrys/opbeat_python,wong2/sentry,JamesMura/sentry,WoLpH/django-sentry,Photonomie/raven-python,nikolas/raven-python,Kryz/sentry,looker/sentry,songyi199111/sentry,ifduyue/sentry,NickPresta/sentry,1tush/sentry,mitsuhiko/sentry,fotinakis/sentry,tarkatronic/opbeat_python,dirtycoder/opbeat_python,mvaled/sentry,vperron/sentry,korealerts1/sentry,dirtycoder/opbeat_python,jean/sentry,nikolas/raven-python,alex/sentry,jmagnusson/raven-python,percipient/raven-python,argonemyth/sentry,hongliang5623/sentry,JackDanger/sentry,kevinlondon/sentry,pauloschilling/sentry,fuziontech/sentry,tbarbugli/sentry_fork,rdio/sentry,smarkets/raven-python,looker/sentry,BuildingLink/sentry,camilonova/sentry,JTCunning/sentry,ifduyue/sentry,someonehan/raven-python,jean/sentry,Kryz/sentry,hzy/raven-python,arthurlogilab/raven-python,JamesMura/sentry,felixbuenemann/sentry,ronaldevers/raven-python,fuziontech/sentry,ifduyue/sentry,llonchj/sentry,SilentCircle/sentry,1tush/sentry,kevinastone/sentry,looker/sentry,zenefits/sentry,pauloschilling/sentry,alex/sentry,jbarbuto/raven-python,looker/sentry,collective/mr.poe,gg7/sentry,felixbuenemann/sentry,1tush/sentry,jean/sentry,ronaldevers/raven-python,Natim/sentry,BuildingLink/sentry,daikeren/opbeat_python,Natim/sentry,Goldmund-Wyldebeast-Wunderliebe/raven-python,wujuguang/sentry,wujuguang/sentry,akalipetis/raven-python,JackDanger/sentry,BayanGroup/sentry,arthurlogilab/raven-python,boneyao/sentry,BayanGroup/sentry,mitsuhiko/raven,tbarbugli/sentry_fork,dcramer/sentry-old,Goldmund-Wyldebeast-Wunderliebe/raven-python,daikeren/opbeat_python,ticosax/opbeat_python,beeftornado/sentry,jmp0xf/raven-python,dbravender/raven-python,jokey2k/sentry,drcapulet/sentry,WoLpH/django-sentry,ticosax/opbeat_python,fotinakis/sentry,primepix/django-sentry,dcramer/sentry-old,BuildingLink/sentry,ewdurbin/raven-python,mvaled/sentry,ngonzalvez/sentry,llonchj/sentry,beni55/sentry,smarkets/raven-python,TedaLIEz/sentry,lepture/raven-python,hongliang5623/sentry,primepix/django-sentry,hzy/raven-python,akheron/raven-python,lopter/raven-python-old,Kronuz/django-sentry,nicholasserra/sentry,daevaorn/sentry,TedaLIEz/sentry,fotinakis/sentry,rdio/sentry,hzy/raven-python,vperron/sentry,imankulov/sentry,ifduyue/sentry,mvaled/sentry,korealerts1/sentry,ngonzalvez/sentry,tbarbugli/sentry_fork,rdio/sentry,getsentry/raven-python,chayapan/django-sentry,rdio/sentry,johansteffner/raven-python,getsentry/raven-python,jean/sentry,TedaLIEz/sentry,patrys/opbeat_python,ngonzalvez/sentry,camilonova/sentry,JackDanger/sentry,smarkets/raven-python,akheron/raven-python,kevinastone/sentry,Photonomie/raven-python,JamesMura/sentry,wujuguang/sentry,ticosax/opbeat_python,inspirehep/raven-python,beeftornado/sentry,argonemyth/sentry,beniwohli/apm-agent-python,tarkatronic/opbeat_python,ewdurbin/raven-python,fuziontech/sentry,zenefits/sentry,songyi199111/sentry,jokey2k/sentry,daikeren/opbeat_python,gencer/sentry,patrys/opbeat_python,dirtycoder/opbeat_python,icereval/raven-python,getsentry/raven-python,jmp0xf/raven-python,danriti/raven-python,nicholasserra/sentry,inspirehep/raven-python,wong2/sentry,gg7/sentry,imankulov/sentry,percipient/raven-python,johansteffner/raven-python,ifduyue/sentry,beniwohli/apm-agent-python,danriti/raven-python,alexm92/sentry,fotinakis/sentry,lepture/raven-python,akalipetis/raven-python,primepix/django-sentry,jmagnusson/raven-python,jokey2k/sentry,alexm92/sentry,JamesMura/sentry,drcapulet/sentry,kevinlondon/sentry,dbravender/raven-python,mitsuhiko/sentry
|
import logging
class SentryHandler(logging.Handler):
def emit(self, record):
from sentry.client.models import get_client
get_client().create_from_record(record)Add a safety net for recursive logging
|
import logging
import sys
class SentryHandler(logging.Handler):
def emit(self, record):
from sentry.client.models import get_client
# Avoid typical config issues by overriding loggers behavior
if record.name == 'sentry.errors':
print >> sys.stderr, record.message
return
get_client().create_from_record(record)
|
<commit_before>import logging
class SentryHandler(logging.Handler):
def emit(self, record):
from sentry.client.models import get_client
get_client().create_from_record(record)<commit_msg>Add a safety net for recursive logging<commit_after>
|
import logging
import sys
class SentryHandler(logging.Handler):
def emit(self, record):
from sentry.client.models import get_client
# Avoid typical config issues by overriding loggers behavior
if record.name == 'sentry.errors':
print >> sys.stderr, record.message
return
get_client().create_from_record(record)
|
import logging
class SentryHandler(logging.Handler):
def emit(self, record):
from sentry.client.models import get_client
get_client().create_from_record(record)Add a safety net for recursive loggingimport logging
import sys
class SentryHandler(logging.Handler):
def emit(self, record):
from sentry.client.models import get_client
# Avoid typical config issues by overriding loggers behavior
if record.name == 'sentry.errors':
print >> sys.stderr, record.message
return
get_client().create_from_record(record)
|
<commit_before>import logging
class SentryHandler(logging.Handler):
def emit(self, record):
from sentry.client.models import get_client
get_client().create_from_record(record)<commit_msg>Add a safety net for recursive logging<commit_after>import logging
import sys
class SentryHandler(logging.Handler):
def emit(self, record):
from sentry.client.models import get_client
# Avoid typical config issues by overriding loggers behavior
if record.name == 'sentry.errors':
print >> sys.stderr, record.message
return
get_client().create_from_record(record)
|
e4d7375b2706dd46cf08831e5f005ce3a3c17553
|
embedding_server/embedding_server.py
|
embedding_server/embedding_server.py
|
from flask import Flask
from flask import request, abort, jsonify
import numpy as np
import gensim
import re
import string
app = Flask(__name__)
model = None
maxSeqLength = 70
numFeatures = 300
def get_tokens(s):
return re.sub('[{}]'.format(string.punctuation), '', s).lower().split()
def get_sequence_matrix(tokens, maxSeqLength, numFeatures, model):
matrix = []
for i in range(maxSeqLength):
if i < len(tokens) and tokens[i] in model:
matrix.append(model[tokens[i]])
else:
pass
matrix = np.array(matrix)
if matrix.shape[0] == 0:
matrix = np.zeros((maxSeqLength, numFeatures))
elif matrix.shape[0] < maxSeqLength:
matrix = np.concatenate((matrix, np.zeros((maxSeqLength - matrix.shape[0], numFeatures))))
return matrix
@app.route("/init", methods=['POST'])
def init():
global model
try:
model_path = request.form['filepath']
is_binary = bool(request.form['is_binary'])
model = gensim.models.KeyedVectors.load_word2vec_format(model_path, binary=is_binary)
except PermissionError:
abort(400, 'Could not load vectors - invalid path (%s)' % model_path)
except Exception as e:
abort(500, 'Internal server error: %s' % str(e))
return 'OK'
@app.route("/check/<string:word>", methods=['GET'])
def check(word):
if model is None:
abort(500, 'Model is not initialized.')
if word not in model:
abort(500, 'Word was not found.')
return jsonify({word: model[word].tolist()})
@app.route("/transform", methods=['POST'])
def transform():
if model is None:
abort(500, 'Model is not initialized.')
global maxSeqLength
global numFeatures
global model
response = {}
for item in request.data['texts']:
try:
id = item['id']
vector_sequence = get_sequence_matrix(get_tokens(item['text']), maxSeqLength, numFeatures, model).tolist()
response[id] = vector_sequence
except KeyError as e:
abort(400, 'Wrong JSON format, key %s' % e)
except Exception as e:
abort(500, 'Internal server error: %s' % str(e))
return jsonify(response)
|
Add server for embedding transformation
|
Add server for embedding transformation
|
Python
|
mit
|
DolphinBlockchainIntelligence/bitcointalk-sentiment
|
Add server for embedding transformation
|
from flask import Flask
from flask import request, abort, jsonify
import numpy as np
import gensim
import re
import string
app = Flask(__name__)
model = None
maxSeqLength = 70
numFeatures = 300
def get_tokens(s):
return re.sub('[{}]'.format(string.punctuation), '', s).lower().split()
def get_sequence_matrix(tokens, maxSeqLength, numFeatures, model):
matrix = []
for i in range(maxSeqLength):
if i < len(tokens) and tokens[i] in model:
matrix.append(model[tokens[i]])
else:
pass
matrix = np.array(matrix)
if matrix.shape[0] == 0:
matrix = np.zeros((maxSeqLength, numFeatures))
elif matrix.shape[0] < maxSeqLength:
matrix = np.concatenate((matrix, np.zeros((maxSeqLength - matrix.shape[0], numFeatures))))
return matrix
@app.route("/init", methods=['POST'])
def init():
global model
try:
model_path = request.form['filepath']
is_binary = bool(request.form['is_binary'])
model = gensim.models.KeyedVectors.load_word2vec_format(model_path, binary=is_binary)
except PermissionError:
abort(400, 'Could not load vectors - invalid path (%s)' % model_path)
except Exception as e:
abort(500, 'Internal server error: %s' % str(e))
return 'OK'
@app.route("/check/<string:word>", methods=['GET'])
def check(word):
if model is None:
abort(500, 'Model is not initialized.')
if word not in model:
abort(500, 'Word was not found.')
return jsonify({word: model[word].tolist()})
@app.route("/transform", methods=['POST'])
def transform():
if model is None:
abort(500, 'Model is not initialized.')
global maxSeqLength
global numFeatures
global model
response = {}
for item in request.data['texts']:
try:
id = item['id']
vector_sequence = get_sequence_matrix(get_tokens(item['text']), maxSeqLength, numFeatures, model).tolist()
response[id] = vector_sequence
except KeyError as e:
abort(400, 'Wrong JSON format, key %s' % e)
except Exception as e:
abort(500, 'Internal server error: %s' % str(e))
return jsonify(response)
|
<commit_before><commit_msg>Add server for embedding transformation<commit_after>
|
from flask import Flask
from flask import request, abort, jsonify
import numpy as np
import gensim
import re
import string
app = Flask(__name__)
model = None
maxSeqLength = 70
numFeatures = 300
def get_tokens(s):
return re.sub('[{}]'.format(string.punctuation), '', s).lower().split()
def get_sequence_matrix(tokens, maxSeqLength, numFeatures, model):
matrix = []
for i in range(maxSeqLength):
if i < len(tokens) and tokens[i] in model:
matrix.append(model[tokens[i]])
else:
pass
matrix = np.array(matrix)
if matrix.shape[0] == 0:
matrix = np.zeros((maxSeqLength, numFeatures))
elif matrix.shape[0] < maxSeqLength:
matrix = np.concatenate((matrix, np.zeros((maxSeqLength - matrix.shape[0], numFeatures))))
return matrix
@app.route("/init", methods=['POST'])
def init():
global model
try:
model_path = request.form['filepath']
is_binary = bool(request.form['is_binary'])
model = gensim.models.KeyedVectors.load_word2vec_format(model_path, binary=is_binary)
except PermissionError:
abort(400, 'Could not load vectors - invalid path (%s)' % model_path)
except Exception as e:
abort(500, 'Internal server error: %s' % str(e))
return 'OK'
@app.route("/check/<string:word>", methods=['GET'])
def check(word):
if model is None:
abort(500, 'Model is not initialized.')
if word not in model:
abort(500, 'Word was not found.')
return jsonify({word: model[word].tolist()})
@app.route("/transform", methods=['POST'])
def transform():
if model is None:
abort(500, 'Model is not initialized.')
global maxSeqLength
global numFeatures
global model
response = {}
for item in request.data['texts']:
try:
id = item['id']
vector_sequence = get_sequence_matrix(get_tokens(item['text']), maxSeqLength, numFeatures, model).tolist()
response[id] = vector_sequence
except KeyError as e:
abort(400, 'Wrong JSON format, key %s' % e)
except Exception as e:
abort(500, 'Internal server error: %s' % str(e))
return jsonify(response)
|
Add server for embedding transformationfrom flask import Flask
from flask import request, abort, jsonify
import numpy as np
import gensim
import re
import string
app = Flask(__name__)
model = None
maxSeqLength = 70
numFeatures = 300
def get_tokens(s):
return re.sub('[{}]'.format(string.punctuation), '', s).lower().split()
def get_sequence_matrix(tokens, maxSeqLength, numFeatures, model):
matrix = []
for i in range(maxSeqLength):
if i < len(tokens) and tokens[i] in model:
matrix.append(model[tokens[i]])
else:
pass
matrix = np.array(matrix)
if matrix.shape[0] == 0:
matrix = np.zeros((maxSeqLength, numFeatures))
elif matrix.shape[0] < maxSeqLength:
matrix = np.concatenate((matrix, np.zeros((maxSeqLength - matrix.shape[0], numFeatures))))
return matrix
@app.route("/init", methods=['POST'])
def init():
global model
try:
model_path = request.form['filepath']
is_binary = bool(request.form['is_binary'])
model = gensim.models.KeyedVectors.load_word2vec_format(model_path, binary=is_binary)
except PermissionError:
abort(400, 'Could not load vectors - invalid path (%s)' % model_path)
except Exception as e:
abort(500, 'Internal server error: %s' % str(e))
return 'OK'
@app.route("/check/<string:word>", methods=['GET'])
def check(word):
if model is None:
abort(500, 'Model is not initialized.')
if word not in model:
abort(500, 'Word was not found.')
return jsonify({word: model[word].tolist()})
@app.route("/transform", methods=['POST'])
def transform():
if model is None:
abort(500, 'Model is not initialized.')
global maxSeqLength
global numFeatures
global model
response = {}
for item in request.data['texts']:
try:
id = item['id']
vector_sequence = get_sequence_matrix(get_tokens(item['text']), maxSeqLength, numFeatures, model).tolist()
response[id] = vector_sequence
except KeyError as e:
abort(400, 'Wrong JSON format, key %s' % e)
except Exception as e:
abort(500, 'Internal server error: %s' % str(e))
return jsonify(response)
|
<commit_before><commit_msg>Add server for embedding transformation<commit_after>from flask import Flask
from flask import request, abort, jsonify
import numpy as np
import gensim
import re
import string
app = Flask(__name__)
model = None
maxSeqLength = 70
numFeatures = 300
def get_tokens(s):
return re.sub('[{}]'.format(string.punctuation), '', s).lower().split()
def get_sequence_matrix(tokens, maxSeqLength, numFeatures, model):
matrix = []
for i in range(maxSeqLength):
if i < len(tokens) and tokens[i] in model:
matrix.append(model[tokens[i]])
else:
pass
matrix = np.array(matrix)
if matrix.shape[0] == 0:
matrix = np.zeros((maxSeqLength, numFeatures))
elif matrix.shape[0] < maxSeqLength:
matrix = np.concatenate((matrix, np.zeros((maxSeqLength - matrix.shape[0], numFeatures))))
return matrix
@app.route("/init", methods=['POST'])
def init():
global model
try:
model_path = request.form['filepath']
is_binary = bool(request.form['is_binary'])
model = gensim.models.KeyedVectors.load_word2vec_format(model_path, binary=is_binary)
except PermissionError:
abort(400, 'Could not load vectors - invalid path (%s)' % model_path)
except Exception as e:
abort(500, 'Internal server error: %s' % str(e))
return 'OK'
@app.route("/check/<string:word>", methods=['GET'])
def check(word):
if model is None:
abort(500, 'Model is not initialized.')
if word not in model:
abort(500, 'Word was not found.')
return jsonify({word: model[word].tolist()})
@app.route("/transform", methods=['POST'])
def transform():
if model is None:
abort(500, 'Model is not initialized.')
global maxSeqLength
global numFeatures
global model
response = {}
for item in request.data['texts']:
try:
id = item['id']
vector_sequence = get_sequence_matrix(get_tokens(item['text']), maxSeqLength, numFeatures, model).tolist()
response[id] = vector_sequence
except KeyError as e:
abort(400, 'Wrong JSON format, key %s' % e)
except Exception as e:
abort(500, 'Internal server error: %s' % str(e))
return jsonify(response)
|
|
02ef9c6e77ce4ba1521eaf590b2bcba278ab2814
|
tests/test_wendy.py
|
tests/test_wendy.py
|
# test_wendy.py: some basic tests
import numpy
import wendy
def test_energy_conservation():
# Test that energy is conserved for a simple problem
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,1.,1.])
g= wendy.nbody(x,v,m,0.05)
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= g.next()
assert numpy.fabs(wendy.energy(tx,tv,m)-E) < 10.**-10., "Energy not conserved during simple N-body integration"
cnt+= 1
return None
def test_energy_conservation_unequalmasses():
# Test that energy is conserved for a simple problem
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,2.,3.])
g= wendy.nbody(x,v,m,0.05)
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= g.next()
assert numpy.fabs(wendy.energy(tx,tv,m)-E) < 10.**-10., "Energy not conserved during simple N-body integration"
cnt+= 1
return None
def test_energy_conservation_unequalmasses_python():
# Test that energy is conserved for a simple problem, using Python method
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,2.,3.])
g= wendy.nbody_python(x,v,m,0.05)
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= g.next()
assert numpy.fabs(wendy.energy(tx,tv,m)-E) < 10.**-10., "Energy not conserved during simple N-body integration"
cnt+= 1
return None
def test_momentum_conservation_unequalmasses():
# Test that momentum is conserved for a simple problem
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,2.,3.])
g= wendy.nbody(x,v,m,0.05)
p= wendy.momentum(v,m)
cnt= 0
while cnt < 100:
tx,tv= g.next()
assert numpy.fabs(wendy.momentum(tv,m)-p) < 10.**-10., "Momentum not conserved during simple N-body integration"
cnt+= 1
return None
|
Add a bunch of simple tests
|
Add a bunch of simple tests
|
Python
|
mit
|
jobovy/wendy,jobovy/wendy
|
Add a bunch of simple tests
|
# test_wendy.py: some basic tests
import numpy
import wendy
def test_energy_conservation():
# Test that energy is conserved for a simple problem
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,1.,1.])
g= wendy.nbody(x,v,m,0.05)
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= g.next()
assert numpy.fabs(wendy.energy(tx,tv,m)-E) < 10.**-10., "Energy not conserved during simple N-body integration"
cnt+= 1
return None
def test_energy_conservation_unequalmasses():
# Test that energy is conserved for a simple problem
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,2.,3.])
g= wendy.nbody(x,v,m,0.05)
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= g.next()
assert numpy.fabs(wendy.energy(tx,tv,m)-E) < 10.**-10., "Energy not conserved during simple N-body integration"
cnt+= 1
return None
def test_energy_conservation_unequalmasses_python():
# Test that energy is conserved for a simple problem, using Python method
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,2.,3.])
g= wendy.nbody_python(x,v,m,0.05)
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= g.next()
assert numpy.fabs(wendy.energy(tx,tv,m)-E) < 10.**-10., "Energy not conserved during simple N-body integration"
cnt+= 1
return None
def test_momentum_conservation_unequalmasses():
# Test that momentum is conserved for a simple problem
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,2.,3.])
g= wendy.nbody(x,v,m,0.05)
p= wendy.momentum(v,m)
cnt= 0
while cnt < 100:
tx,tv= g.next()
assert numpy.fabs(wendy.momentum(tv,m)-p) < 10.**-10., "Momentum not conserved during simple N-body integration"
cnt+= 1
return None
|
<commit_before><commit_msg>Add a bunch of simple tests<commit_after>
|
# test_wendy.py: some basic tests
import numpy
import wendy
def test_energy_conservation():
# Test that energy is conserved for a simple problem
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,1.,1.])
g= wendy.nbody(x,v,m,0.05)
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= g.next()
assert numpy.fabs(wendy.energy(tx,tv,m)-E) < 10.**-10., "Energy not conserved during simple N-body integration"
cnt+= 1
return None
def test_energy_conservation_unequalmasses():
# Test that energy is conserved for a simple problem
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,2.,3.])
g= wendy.nbody(x,v,m,0.05)
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= g.next()
assert numpy.fabs(wendy.energy(tx,tv,m)-E) < 10.**-10., "Energy not conserved during simple N-body integration"
cnt+= 1
return None
def test_energy_conservation_unequalmasses_python():
# Test that energy is conserved for a simple problem, using Python method
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,2.,3.])
g= wendy.nbody_python(x,v,m,0.05)
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= g.next()
assert numpy.fabs(wendy.energy(tx,tv,m)-E) < 10.**-10., "Energy not conserved during simple N-body integration"
cnt+= 1
return None
def test_momentum_conservation_unequalmasses():
# Test that momentum is conserved for a simple problem
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,2.,3.])
g= wendy.nbody(x,v,m,0.05)
p= wendy.momentum(v,m)
cnt= 0
while cnt < 100:
tx,tv= g.next()
assert numpy.fabs(wendy.momentum(tv,m)-p) < 10.**-10., "Momentum not conserved during simple N-body integration"
cnt+= 1
return None
|
Add a bunch of simple tests# test_wendy.py: some basic tests
import numpy
import wendy
def test_energy_conservation():
# Test that energy is conserved for a simple problem
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,1.,1.])
g= wendy.nbody(x,v,m,0.05)
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= g.next()
assert numpy.fabs(wendy.energy(tx,tv,m)-E) < 10.**-10., "Energy not conserved during simple N-body integration"
cnt+= 1
return None
def test_energy_conservation_unequalmasses():
# Test that energy is conserved for a simple problem
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,2.,3.])
g= wendy.nbody(x,v,m,0.05)
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= g.next()
assert numpy.fabs(wendy.energy(tx,tv,m)-E) < 10.**-10., "Energy not conserved during simple N-body integration"
cnt+= 1
return None
def test_energy_conservation_unequalmasses_python():
# Test that energy is conserved for a simple problem, using Python method
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,2.,3.])
g= wendy.nbody_python(x,v,m,0.05)
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= g.next()
assert numpy.fabs(wendy.energy(tx,tv,m)-E) < 10.**-10., "Energy not conserved during simple N-body integration"
cnt+= 1
return None
def test_momentum_conservation_unequalmasses():
# Test that momentum is conserved for a simple problem
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,2.,3.])
g= wendy.nbody(x,v,m,0.05)
p= wendy.momentum(v,m)
cnt= 0
while cnt < 100:
tx,tv= g.next()
assert numpy.fabs(wendy.momentum(tv,m)-p) < 10.**-10., "Momentum not conserved during simple N-body integration"
cnt+= 1
return None
|
<commit_before><commit_msg>Add a bunch of simple tests<commit_after># test_wendy.py: some basic tests
import numpy
import wendy
def test_energy_conservation():
# Test that energy is conserved for a simple problem
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,1.,1.])
g= wendy.nbody(x,v,m,0.05)
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= g.next()
assert numpy.fabs(wendy.energy(tx,tv,m)-E) < 10.**-10., "Energy not conserved during simple N-body integration"
cnt+= 1
return None
def test_energy_conservation_unequalmasses():
# Test that energy is conserved for a simple problem
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,2.,3.])
g= wendy.nbody(x,v,m,0.05)
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= g.next()
assert numpy.fabs(wendy.energy(tx,tv,m)-E) < 10.**-10., "Energy not conserved during simple N-body integration"
cnt+= 1
return None
def test_energy_conservation_unequalmasses_python():
# Test that energy is conserved for a simple problem, using Python method
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,2.,3.])
g= wendy.nbody_python(x,v,m,0.05)
E= wendy.energy(x,v,m)
cnt= 0
while cnt < 100:
tx,tv= g.next()
assert numpy.fabs(wendy.energy(tx,tv,m)-E) < 10.**-10., "Energy not conserved during simple N-body integration"
cnt+= 1
return None
def test_momentum_conservation_unequalmasses():
# Test that momentum is conserved for a simple problem
x= numpy.array([-1.1,0.1,1.3])
v= numpy.array([3.,2.,-5.])
m= numpy.array([1.,2.,3.])
g= wendy.nbody(x,v,m,0.05)
p= wendy.momentum(v,m)
cnt= 0
while cnt < 100:
tx,tv= g.next()
assert numpy.fabs(wendy.momentum(tv,m)-p) < 10.**-10., "Momentum not conserved during simple N-body integration"
cnt+= 1
return None
|
|
529d72ff62f3d4b8ab18a26beadd20322a118a28
|
client/scripts/osutil.py
|
client/scripts/osutil.py
|
import sys
class OSUtil():
def __init__(self):
pass
def platform(self):
platform = sys.platform # Map from python platform name to ue4 platform name
names = {
'cygwin': 'Win', # could be win32 also
'win32': 'Win',
'win64': 'Win',
'linux2': 'Linux',
'darwin': 'Mac',
}
return names[platform]
|
import sys, platform
class OSUtil():
def __init__(self):
pass
def platform(self):
win = 'Win'
mac = 'Mac'
linux = 'Linux'
if platform.release().endswith('Microsoft'):
# This is a hacky way to check whether I am running Ubuntu on Windows
return win
# Map from python platform name to ue4 platform name
names = {
'cygwin': win, # could be win32 also
'win32': win,
'win64': win,
'linux2': linux,
'darwin': mac,
}
return names[sys.platform]
|
Fix the platform check for windows.
|
Fix the platform check for windows.
|
Python
|
mit
|
qiuwch/unrealcv,unrealcv/unrealcv,unrealcv/unrealcv,qiuwch/unrealcv,qiuwch/unrealcv,unrealcv/unrealcv,unrealcv/unrealcv,unrealcv/unrealcv,qiuwch/unrealcv,qiuwch/unrealcv,qiuwch/unrealcv
|
import sys
class OSUtil():
def __init__(self):
pass
def platform(self):
platform = sys.platform # Map from python platform name to ue4 platform name
names = {
'cygwin': 'Win', # could be win32 also
'win32': 'Win',
'win64': 'Win',
'linux2': 'Linux',
'darwin': 'Mac',
}
return names[platform]
Fix the platform check for windows.
|
import sys, platform
class OSUtil():
def __init__(self):
pass
def platform(self):
win = 'Win'
mac = 'Mac'
linux = 'Linux'
if platform.release().endswith('Microsoft'):
# This is a hacky way to check whether I am running Ubuntu on Windows
return win
# Map from python platform name to ue4 platform name
names = {
'cygwin': win, # could be win32 also
'win32': win,
'win64': win,
'linux2': linux,
'darwin': mac,
}
return names[sys.platform]
|
<commit_before>import sys
class OSUtil():
def __init__(self):
pass
def platform(self):
platform = sys.platform # Map from python platform name to ue4 platform name
names = {
'cygwin': 'Win', # could be win32 also
'win32': 'Win',
'win64': 'Win',
'linux2': 'Linux',
'darwin': 'Mac',
}
return names[platform]
<commit_msg>Fix the platform check for windows.<commit_after>
|
import sys, platform
class OSUtil():
def __init__(self):
pass
def platform(self):
win = 'Win'
mac = 'Mac'
linux = 'Linux'
if platform.release().endswith('Microsoft'):
# This is a hacky way to check whether I am running Ubuntu on Windows
return win
# Map from python platform name to ue4 platform name
names = {
'cygwin': win, # could be win32 also
'win32': win,
'win64': win,
'linux2': linux,
'darwin': mac,
}
return names[sys.platform]
|
import sys
class OSUtil():
def __init__(self):
pass
def platform(self):
platform = sys.platform # Map from python platform name to ue4 platform name
names = {
'cygwin': 'Win', # could be win32 also
'win32': 'Win',
'win64': 'Win',
'linux2': 'Linux',
'darwin': 'Mac',
}
return names[platform]
Fix the platform check for windows.import sys, platform
class OSUtil():
def __init__(self):
pass
def platform(self):
win = 'Win'
mac = 'Mac'
linux = 'Linux'
if platform.release().endswith('Microsoft'):
# This is a hacky way to check whether I am running Ubuntu on Windows
return win
# Map from python platform name to ue4 platform name
names = {
'cygwin': win, # could be win32 also
'win32': win,
'win64': win,
'linux2': linux,
'darwin': mac,
}
return names[sys.platform]
|
<commit_before>import sys
class OSUtil():
def __init__(self):
pass
def platform(self):
platform = sys.platform # Map from python platform name to ue4 platform name
names = {
'cygwin': 'Win', # could be win32 also
'win32': 'Win',
'win64': 'Win',
'linux2': 'Linux',
'darwin': 'Mac',
}
return names[platform]
<commit_msg>Fix the platform check for windows.<commit_after>import sys, platform
class OSUtil():
def __init__(self):
pass
def platform(self):
win = 'Win'
mac = 'Mac'
linux = 'Linux'
if platform.release().endswith('Microsoft'):
# This is a hacky way to check whether I am running Ubuntu on Windows
return win
# Map from python platform name to ue4 platform name
names = {
'cygwin': win, # could be win32 also
'win32': win,
'win64': win,
'linux2': linux,
'darwin': mac,
}
return names[sys.platform]
|
37848cc0a6cffe9c12dda905715fd1e347603560
|
routing_table.py
|
routing_table.py
|
import pandas
class DynamicTable(object):
"""
Dynamically sized table.
"""
def __init__(self, *args, **kwargs):
self._data = pandas.DataFrame(*args, **kwargs)
def __getitem__(self, key):
"""
Retrieve a value from the table.
Note
----
The first index in the key specifies the column.
"""
if len(key) != 2:
raise KeyError('invalid key')
col, row = key
return self._data.__getitem__(col).__getitem__(row)
def __setitem__(self, key, value):
"""
Set the specified entry in the table.
Notes
-----
The first index in the key specifies the column.
If the specified row or column identifiers do not exist, the
table is expanded to include rows or columns with those
identifiers.
"""
if len(key) != 2:
raise KeyError('invalid key')
col, row = key
if row not in self.data.index:
new_row = pandas.DataFrame(index=[row],
columns=self._data.columns)
self._data = pandas.concat([self._data, new_row])
if col not in self._data.columns:
new_col = pandas.DataFrame(index=self._data.index,
columns=[col])
self._data = pandas.concat([self._data, new_col], axis=1)
self._data[col][row] = value
@property
def table(self):
"""
Return a view of the current table.
"""
return self._data
def __repr__(self):
return self._data.__repr__()
class RoutingTable(DynamicTable):
"""
Routing table.
"""
def __init__(self):
DynamicTable.__init__(self)
def __setitem__(self, key, value):
"""
Set the specified entry in the table.
Notes
-----
The first index in the key specifies the column.
If the specified row or column identifiers do not exist, the
table is expanded to include rows or columns with those
identifiers.
"""
if len(key) != 2:
raise KeyError('invalid key')
col, row = key
# Since the routing table must describe routes between all
# recognized entities, adding a hitherto unrecognized row or
# column identifier must cause that identifier to be added to
# both the list of rows and columns:
Nc = len(self._data.columns)
Nr = len(self._data.index)
for k in (col, row):
if k not in self._data.index:
new_row = pandas.DataFrame(index=[k],
columns=self._data.columns)
self._data = pandas.concat([self._data, new_row])
if k not in self._data.columns:
new_col = pandas.DataFrame(index=self._data.index,
columns=[k])
self._data = pandas.concat([self._data, new_col], axis=1)
self._data[col][row] = value
@property
def ids(self):
"""
Identifiers of rows and columns in the routing table.
"""
return self._data.index.tolist()
|
Add routing table class based on pandas.DataFrame.
|
Add routing table class based on pandas.DataFrame.
|
Python
|
bsd-3-clause
|
cerrno/neurokernel
|
Add routing table class based on pandas.DataFrame.
|
import pandas
class DynamicTable(object):
"""
Dynamically sized table.
"""
def __init__(self, *args, **kwargs):
self._data = pandas.DataFrame(*args, **kwargs)
def __getitem__(self, key):
"""
Retrieve a value from the table.
Note
----
The first index in the key specifies the column.
"""
if len(key) != 2:
raise KeyError('invalid key')
col, row = key
return self._data.__getitem__(col).__getitem__(row)
def __setitem__(self, key, value):
"""
Set the specified entry in the table.
Notes
-----
The first index in the key specifies the column.
If the specified row or column identifiers do not exist, the
table is expanded to include rows or columns with those
identifiers.
"""
if len(key) != 2:
raise KeyError('invalid key')
col, row = key
if row not in self.data.index:
new_row = pandas.DataFrame(index=[row],
columns=self._data.columns)
self._data = pandas.concat([self._data, new_row])
if col not in self._data.columns:
new_col = pandas.DataFrame(index=self._data.index,
columns=[col])
self._data = pandas.concat([self._data, new_col], axis=1)
self._data[col][row] = value
@property
def table(self):
"""
Return a view of the current table.
"""
return self._data
def __repr__(self):
return self._data.__repr__()
class RoutingTable(DynamicTable):
"""
Routing table.
"""
def __init__(self):
DynamicTable.__init__(self)
def __setitem__(self, key, value):
"""
Set the specified entry in the table.
Notes
-----
The first index in the key specifies the column.
If the specified row or column identifiers do not exist, the
table is expanded to include rows or columns with those
identifiers.
"""
if len(key) != 2:
raise KeyError('invalid key')
col, row = key
# Since the routing table must describe routes between all
# recognized entities, adding a hitherto unrecognized row or
# column identifier must cause that identifier to be added to
# both the list of rows and columns:
Nc = len(self._data.columns)
Nr = len(self._data.index)
for k in (col, row):
if k not in self._data.index:
new_row = pandas.DataFrame(index=[k],
columns=self._data.columns)
self._data = pandas.concat([self._data, new_row])
if k not in self._data.columns:
new_col = pandas.DataFrame(index=self._data.index,
columns=[k])
self._data = pandas.concat([self._data, new_col], axis=1)
self._data[col][row] = value
@property
def ids(self):
"""
Identifiers of rows and columns in the routing table.
"""
return self._data.index.tolist()
|
<commit_before><commit_msg>Add routing table class based on pandas.DataFrame.<commit_after>
|
import pandas
class DynamicTable(object):
"""
Dynamically sized table.
"""
def __init__(self, *args, **kwargs):
self._data = pandas.DataFrame(*args, **kwargs)
def __getitem__(self, key):
"""
Retrieve a value from the table.
Note
----
The first index in the key specifies the column.
"""
if len(key) != 2:
raise KeyError('invalid key')
col, row = key
return self._data.__getitem__(col).__getitem__(row)
def __setitem__(self, key, value):
"""
Set the specified entry in the table.
Notes
-----
The first index in the key specifies the column.
If the specified row or column identifiers do not exist, the
table is expanded to include rows or columns with those
identifiers.
"""
if len(key) != 2:
raise KeyError('invalid key')
col, row = key
if row not in self.data.index:
new_row = pandas.DataFrame(index=[row],
columns=self._data.columns)
self._data = pandas.concat([self._data, new_row])
if col not in self._data.columns:
new_col = pandas.DataFrame(index=self._data.index,
columns=[col])
self._data = pandas.concat([self._data, new_col], axis=1)
self._data[col][row] = value
@property
def table(self):
"""
Return a view of the current table.
"""
return self._data
def __repr__(self):
return self._data.__repr__()
class RoutingTable(DynamicTable):
"""
Routing table.
"""
def __init__(self):
DynamicTable.__init__(self)
def __setitem__(self, key, value):
"""
Set the specified entry in the table.
Notes
-----
The first index in the key specifies the column.
If the specified row or column identifiers do not exist, the
table is expanded to include rows or columns with those
identifiers.
"""
if len(key) != 2:
raise KeyError('invalid key')
col, row = key
# Since the routing table must describe routes between all
# recognized entities, adding a hitherto unrecognized row or
# column identifier must cause that identifier to be added to
# both the list of rows and columns:
Nc = len(self._data.columns)
Nr = len(self._data.index)
for k in (col, row):
if k not in self._data.index:
new_row = pandas.DataFrame(index=[k],
columns=self._data.columns)
self._data = pandas.concat([self._data, new_row])
if k not in self._data.columns:
new_col = pandas.DataFrame(index=self._data.index,
columns=[k])
self._data = pandas.concat([self._data, new_col], axis=1)
self._data[col][row] = value
@property
def ids(self):
"""
Identifiers of rows and columns in the routing table.
"""
return self._data.index.tolist()
|
Add routing table class based on pandas.DataFrame.import pandas
class DynamicTable(object):
"""
Dynamically sized table.
"""
def __init__(self, *args, **kwargs):
self._data = pandas.DataFrame(*args, **kwargs)
def __getitem__(self, key):
"""
Retrieve a value from the table.
Note
----
The first index in the key specifies the column.
"""
if len(key) != 2:
raise KeyError('invalid key')
col, row = key
return self._data.__getitem__(col).__getitem__(row)
def __setitem__(self, key, value):
"""
Set the specified entry in the table.
Notes
-----
The first index in the key specifies the column.
If the specified row or column identifiers do not exist, the
table is expanded to include rows or columns with those
identifiers.
"""
if len(key) != 2:
raise KeyError('invalid key')
col, row = key
if row not in self.data.index:
new_row = pandas.DataFrame(index=[row],
columns=self._data.columns)
self._data = pandas.concat([self._data, new_row])
if col not in self._data.columns:
new_col = pandas.DataFrame(index=self._data.index,
columns=[col])
self._data = pandas.concat([self._data, new_col], axis=1)
self._data[col][row] = value
@property
def table(self):
"""
Return a view of the current table.
"""
return self._data
def __repr__(self):
return self._data.__repr__()
class RoutingTable(DynamicTable):
"""
Routing table.
"""
def __init__(self):
DynamicTable.__init__(self)
def __setitem__(self, key, value):
"""
Set the specified entry in the table.
Notes
-----
The first index in the key specifies the column.
If the specified row or column identifiers do not exist, the
table is expanded to include rows or columns with those
identifiers.
"""
if len(key) != 2:
raise KeyError('invalid key')
col, row = key
# Since the routing table must describe routes between all
# recognized entities, adding a hitherto unrecognized row or
# column identifier must cause that identifier to be added to
# both the list of rows and columns:
Nc = len(self._data.columns)
Nr = len(self._data.index)
for k in (col, row):
if k not in self._data.index:
new_row = pandas.DataFrame(index=[k],
columns=self._data.columns)
self._data = pandas.concat([self._data, new_row])
if k not in self._data.columns:
new_col = pandas.DataFrame(index=self._data.index,
columns=[k])
self._data = pandas.concat([self._data, new_col], axis=1)
self._data[col][row] = value
@property
def ids(self):
"""
Identifiers of rows and columns in the routing table.
"""
return self._data.index.tolist()
|
<commit_before><commit_msg>Add routing table class based on pandas.DataFrame.<commit_after>import pandas
class DynamicTable(object):
"""
Dynamically sized table.
"""
def __init__(self, *args, **kwargs):
self._data = pandas.DataFrame(*args, **kwargs)
def __getitem__(self, key):
"""
Retrieve a value from the table.
Note
----
The first index in the key specifies the column.
"""
if len(key) != 2:
raise KeyError('invalid key')
col, row = key
return self._data.__getitem__(col).__getitem__(row)
def __setitem__(self, key, value):
"""
Set the specified entry in the table.
Notes
-----
The first index in the key specifies the column.
If the specified row or column identifiers do not exist, the
table is expanded to include rows or columns with those
identifiers.
"""
if len(key) != 2:
raise KeyError('invalid key')
col, row = key
if row not in self.data.index:
new_row = pandas.DataFrame(index=[row],
columns=self._data.columns)
self._data = pandas.concat([self._data, new_row])
if col not in self._data.columns:
new_col = pandas.DataFrame(index=self._data.index,
columns=[col])
self._data = pandas.concat([self._data, new_col], axis=1)
self._data[col][row] = value
@property
def table(self):
"""
Return a view of the current table.
"""
return self._data
def __repr__(self):
return self._data.__repr__()
class RoutingTable(DynamicTable):
"""
Routing table.
"""
def __init__(self):
DynamicTable.__init__(self)
def __setitem__(self, key, value):
"""
Set the specified entry in the table.
Notes
-----
The first index in the key specifies the column.
If the specified row or column identifiers do not exist, the
table is expanded to include rows or columns with those
identifiers.
"""
if len(key) != 2:
raise KeyError('invalid key')
col, row = key
# Since the routing table must describe routes between all
# recognized entities, adding a hitherto unrecognized row or
# column identifier must cause that identifier to be added to
# both the list of rows and columns:
Nc = len(self._data.columns)
Nr = len(self._data.index)
for k in (col, row):
if k not in self._data.index:
new_row = pandas.DataFrame(index=[k],
columns=self._data.columns)
self._data = pandas.concat([self._data, new_row])
if k not in self._data.columns:
new_col = pandas.DataFrame(index=self._data.index,
columns=[k])
self._data = pandas.concat([self._data, new_col], axis=1)
self._data[col][row] = value
@property
def ids(self):
"""
Identifiers of rows and columns in the routing table.
"""
return self._data.index.tolist()
|
|
4df85e49d48a76246d97383ac6a5d63e1ce2be60
|
tests/basics/builtin_hash.py
|
tests/basics/builtin_hash.py
|
# test builtin hash function
class A:
def __hash__(self):
return 123
def __repr__(self):
return "a instance"
print(hash(A()))
print({A():1})
|
Add test for hash of user defined class.
|
tests: Add test for hash of user defined class.
|
Python
|
mit
|
Peetz0r/micropython-esp32,henriknelson/micropython,drrk/micropython,dxxb/micropython,kerneltask/micropython,SHA2017-badge/micropython-esp32,turbinenreiter/micropython,martinribelotta/micropython,tobbad/micropython,misterdanb/micropython,Peetz0r/micropython-esp32,ganshun666/micropython,galenhz/micropython,mgyenik/micropython,adafruit/circuitpython,infinnovation/micropython,jimkmc/micropython,pfalcon/micropython,AriZuu/micropython,deshipu/micropython,suda/micropython,ChuckM/micropython,hosaka/micropython,cnoviello/micropython,tdautc19841202/micropython,neilh10/micropython,ceramos/micropython,jmarcelino/pycom-micropython,neilh10/micropython,aethaniel/micropython,henriknelson/micropython,oopy/micropython,omtinez/micropython,jmarcelino/pycom-micropython,chrisdearman/micropython,pfalcon/micropython,adamkh/micropython,stonegithubs/micropython,MrSurly/micropython,mgyenik/micropython,hiway/micropython,trezor/micropython,adafruit/circuitpython,kerneltask/micropython,galenhz/micropython,danicampora/micropython,tdautc19841202/micropython,paul-xxx/micropython,hiway/micropython,vriera/micropython,orionrobots/micropython,ryannathans/micropython,emfcamp/micropython,hiway/micropython,ryannathans/micropython,torwag/micropython,omtinez/micropython,stonegithubs/micropython,firstval/micropython,misterdanb/micropython,danicampora/micropython,HenrikSolver/micropython,dinau/micropython,kerneltask/micropython,supergis/micropython,matthewelse/micropython,paul-xxx/micropython,HenrikSolver/micropython,swegener/micropython,adamkh/micropython,dmazzella/micropython,noahchense/micropython,oopy/micropython,xhat/micropython,redbear/micropython,jimkmc/micropython,tralamazza/micropython,emfcamp/micropython,adafruit/micropython,cnoviello/micropython,blazewicz/micropython,mpalomer/micropython,slzatz/micropython,orionrobots/micropython,alex-march/micropython,slzatz/micropython,pramasoul/micropython,lowRISC/micropython,drrk/micropython,jlillest/micropython,rubencabrera/micropython,jlillest/micropython,blazewicz/micropython,pramasoul/micropython,toolmacher/micropython,pozetroninc/micropython,vitiral/micropython,mianos/micropython,turbinenreiter/micropython,jlillest/micropython,adafruit/circuitpython,pozetroninc/micropython,suda/micropython,aethaniel/micropython,bvernoux/micropython,adafruit/circuitpython,deshipu/micropython,warner83/micropython,ChuckM/micropython,trezor/micropython,mpalomer/micropython,misterdanb/micropython,feilongfl/micropython,redbear/micropython,infinnovation/micropython,mgyenik/micropython,ruffy91/micropython,firstval/micropython,chrisdearman/micropython,deshipu/micropython,pramasoul/micropython,bvernoux/micropython,Vogtinator/micropython,swegener/micropython,skybird6672/micropython,martinribelotta/micropython,hosaka/micropython,EcmaXp/micropython,AriZuu/micropython,bvernoux/micropython,jlillest/micropython,MrSurly/micropython,noahwilliamsson/micropython,noahchense/micropython,cwyark/micropython,henriknelson/micropython,micropython/micropython-esp32,PappaPeppar/micropython,supergis/micropython,ganshun666/micropython,blmorris/micropython,SungEun-Steve-Kim/test-mp,alex-robbins/micropython,SHA2017-badge/micropython-esp32,neilh10/micropython,omtinez/micropython,skybird6672/micropython,praemdonck/micropython,ahotam/micropython,mhoffma/micropython,xuxiaoxin/micropython,mpalomer/micropython,warner83/micropython,KISSMonX/micropython,EcmaXp/micropython,supergis/micropython,jmarcelino/pycom-micropython,dhylands/micropython,vitiral/micropython,toolmacher/micropython,rubencabrera/micropython,cnoviello/micropython,orionrobots/micropython,TDAbboud/micropython,ceramos/micropython,TDAbboud/micropython,aethaniel/micropython,dmazzella/micropython,noahwilliamsson/micropython,danicampora/micropython,feilongfl/micropython,Timmenem/micropython,torwag/micropython,aethaniel/micropython,vriera/micropython,Timmenem/micropython,jlillest/micropython,mgyenik/micropython,tobbad/micropython,drrk/micropython,ruffy91/micropython,blazewicz/micropython,skybird6672/micropython,PappaPeppar/micropython,dinau/micropython,praemdonck/micropython,MrSurly/micropython-esp32,mpalomer/micropython,turbinenreiter/micropython,cwyark/micropython,mhoffma/micropython,tdautc19841202/micropython,lbattraw/micropython,chrisdearman/micropython,xuxiaoxin/micropython,micropython/micropython-esp32,mhoffma/micropython,adafruit/micropython,tdautc19841202/micropython,xyb/micropython,tuc-osg/micropython,infinnovation/micropython,TDAbboud/micropython,firstval/micropython,cloudformdesign/micropython,PappaPeppar/micropython,kostyll/micropython,supergis/micropython,dmazzella/micropython,hiway/micropython,ahotam/micropython,pfalcon/micropython,matthewelse/micropython,orionrobots/micropython,ganshun666/micropython,cwyark/micropython,ryannathans/micropython,kostyll/micropython,blazewicz/micropython,vitiral/micropython,slzatz/micropython,chrisdearman/micropython,selste/micropython,dmazzella/micropython,suda/micropython,SungEun-Steve-Kim/test-mp,MrSurly/micropython,KISSMonX/micropython,SungEun-Steve-Kim/test-mp,lbattraw/micropython,ericsnowcurrently/micropython,hosaka/micropython,dxxb/micropython,pramasoul/micropython,mianos/micropython,dxxb/micropython,selste/micropython,warner83/micropython,torwag/micropython,heisewangluo/micropython,xhat/micropython,heisewangluo/micropython,dinau/micropython,cnoviello/micropython,ruffy91/micropython,ernesto-g/micropython,praemdonck/micropython,dhylands/micropython,KISSMonX/micropython,lowRISC/micropython,KISSMonX/micropython,adamkh/micropython,trezor/micropython,feilongfl/micropython,micropython/micropython-esp32,toolmacher/micropython,ernesto-g/micropython,ernesto-g/micropython,ericsnowcurrently/micropython,utopiaprince/micropython,kerneltask/micropython,tuc-osg/micropython,suda/micropython,puuu/micropython,mhoffma/micropython,warner83/micropython,rubencabrera/micropython,utopiaprince/micropython,mhoffma/micropython,swegener/micropython,SHA2017-badge/micropython-esp32,xhat/micropython,dhylands/micropython,praemdonck/micropython,emfcamp/micropython,pfalcon/micropython,bvernoux/micropython,PappaPeppar/micropython,cloudformdesign/micropython,firstval/micropython,cloudformdesign/micropython,ceramos/micropython,alex-march/micropython,dhylands/micropython,ceramos/micropython,Timmenem/micropython,paul-xxx/micropython,hiway/micropython,slzatz/micropython,xyb/micropython,kostyll/micropython,lbattraw/micropython,galenhz/micropython,blazewicz/micropython,puuu/micropython,Vogtinator/micropython,utopiaprince/micropython,HenrikSolver/micropython,ahotam/micropython,henriknelson/micropython,kostyll/micropython,MrSurly/micropython,Vogtinator/micropython,alex-robbins/micropython,vitiral/micropython,blmorris/micropython,redbear/micropython,vriera/micropython,xuxiaoxin/micropython,henriknelson/micropython,adafruit/circuitpython,toolmacher/micropython,hosaka/micropython,oopy/micropython,micropython/micropython-esp32,cwyark/micropython,swegener/micropython,tralamazza/micropython,ryannathans/micropython,dhylands/micropython,warner83/micropython,TDAbboud/micropython,mianos/micropython,bvernoux/micropython,ernesto-g/micropython,KISSMonX/micropython,slzatz/micropython,AriZuu/micropython,aethaniel/micropython,xhat/micropython,pozetroninc/micropython,HenrikSolver/micropython,emfcamp/micropython,martinribelotta/micropython,firstval/micropython,micropython/micropython-esp32,adafruit/micropython,MrSurly/micropython,feilongfl/micropython,oopy/micropython,deshipu/micropython,martinribelotta/micropython,jimkmc/micropython,Timmenem/micropython,galenhz/micropython,xhat/micropython,puuu/micropython,toolmacher/micropython,misterdanb/micropython,noahwilliamsson/micropython,pramasoul/micropython,xuxiaoxin/micropython,xyb/micropython,AriZuu/micropython,Peetz0r/micropython-esp32,SungEun-Steve-Kim/test-mp,pozetroninc/micropython,MrSurly/micropython-esp32,lowRISC/micropython,mianos/micropython,cnoviello/micropython,redbear/micropython,paul-xxx/micropython,ganshun666/micropython,selste/micropython,TDAbboud/micropython,noahwilliamsson/micropython,noahwilliamsson/micropython,AriZuu/micropython,rubencabrera/micropython,noahchense/micropython,adafruit/micropython,tralamazza/micropython,rubencabrera/micropython,SHA2017-badge/micropython-esp32,matthewelse/micropython,ChuckM/micropython,pfalcon/micropython,cloudformdesign/micropython,vitiral/micropython,drrk/micropython,lbattraw/micropython,praemdonck/micropython,tralamazza/micropython,omtinez/micropython,ChuckM/micropython,infinnovation/micropython,misterdanb/micropython,ericsnowcurrently/micropython,EcmaXp/micropython,mpalomer/micropython,lowRISC/micropython,alex-march/micropython,neilh10/micropython,HenrikSolver/micropython,utopiaprince/micropython,galenhz/micropython,oopy/micropython,SHA2017-badge/micropython-esp32,danicampora/micropython,heisewangluo/micropython,noahchense/micropython,ruffy91/micropython,alex-robbins/micropython,Vogtinator/micropython,mgyenik/micropython,Peetz0r/micropython-esp32,selste/micropython,alex-march/micropython,puuu/micropython,pozetroninc/micropython,blmorris/micropython,ahotam/micropython,ahotam/micropython,paul-xxx/micropython,vriera/micropython,martinribelotta/micropython,deshipu/micropython,feilongfl/micropython,skybird6672/micropython,matthewelse/micropython,skybird6672/micropython,alex-robbins/micropython,stonegithubs/micropython,emfcamp/micropython,heisewangluo/micropython,dinau/micropython,swegener/micropython,torwag/micropython,matthewelse/micropython,drrk/micropython,EcmaXp/micropython,PappaPeppar/micropython,redbear/micropython,matthewelse/micropython,orionrobots/micropython,kerneltask/micropython,xuxiaoxin/micropython,adamkh/micropython,lbattraw/micropython,MrSurly/micropython-esp32,adamkh/micropython,tuc-osg/micropython,selste/micropython,ceramos/micropython,dinau/micropython,chrisdearman/micropython,jimkmc/micropython,alex-march/micropython,utopiaprince/micropython,turbinenreiter/micropython,MrSurly/micropython-esp32,adafruit/circuitpython,ericsnowcurrently/micropython,neilh10/micropython,EcmaXp/micropython,kostyll/micropython,jmarcelino/pycom-micropython,Timmenem/micropython,infinnovation/micropython,tuc-osg/micropython,noahchense/micropython,dxxb/micropython,adafruit/micropython,tdautc19841202/micropython,tuc-osg/micropython,turbinenreiter/micropython,alex-robbins/micropython,tobbad/micropython,ganshun666/micropython,supergis/micropython,heisewangluo/micropython,jmarcelino/pycom-micropython,Vogtinator/micropython,xyb/micropython,torwag/micropython,MrSurly/micropython-esp32,blmorris/micropython,lowRISC/micropython,omtinez/micropython,stonegithubs/micropython,ernesto-g/micropython,SungEun-Steve-Kim/test-mp,cwyark/micropython,ericsnowcurrently/micropython,hosaka/micropython,dxxb/micropython,suda/micropython,Peetz0r/micropython-esp32,vriera/micropython,tobbad/micropython,xyb/micropython,mianos/micropython,ruffy91/micropython,tobbad/micropython,cloudformdesign/micropython,puuu/micropython,blmorris/micropython,ryannathans/micropython,trezor/micropython,jimkmc/micropython,stonegithubs/micropython,danicampora/micropython,trezor/micropython,ChuckM/micropython
|
tests: Add test for hash of user defined class.
|
# test builtin hash function
class A:
def __hash__(self):
return 123
def __repr__(self):
return "a instance"
print(hash(A()))
print({A():1})
|
<commit_before><commit_msg>tests: Add test for hash of user defined class.<commit_after>
|
# test builtin hash function
class A:
def __hash__(self):
return 123
def __repr__(self):
return "a instance"
print(hash(A()))
print({A():1})
|
tests: Add test for hash of user defined class.# test builtin hash function
class A:
def __hash__(self):
return 123
def __repr__(self):
return "a instance"
print(hash(A()))
print({A():1})
|
<commit_before><commit_msg>tests: Add test for hash of user defined class.<commit_after># test builtin hash function
class A:
def __hash__(self):
return 123
def __repr__(self):
return "a instance"
print(hash(A()))
print({A():1})
|
|
e149bddfe280e37adf2b67500c0696a86c9341dc
|
tests/test_in_serializers.py
|
tests/test_in_serializers.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_in_serializers
-------------------
Tests of the fields cooperation in the serializer interfaces for serialization, de-serialization,
and validation.
"""
# Django settings:
import os
os.environ['DJANGO_SETTINGS_MODULE'] = __name__
from django.conf.global_settings import CACHES
SECRET_KEY = 's3cr3t'
import unittest
from rest_framework import serializers
from rest_framework.compat import six
from drf_compound_fields.fields import DictField
from drf_compound_fields.fields import ListField
class DemoSerializer(serializers.Serializer):
name = ListField(serializers.CharField(), required=False)
emails = ListField(serializers.EmailField(), required=False)
class TestSerializerListField(unittest.TestCase):
"""
Tests for the ListField behavior
"""
def test_non_list_not_valid(self):
serializer = DemoSerializer(data={'name': 'notAList'})
self.assertFalse(serializer.is_valid())
def test_non_list_errors(self):
serializer = DemoSerializer(data={'name': 'notAList'})
self.assertIn('name', serializer.errors)
self.assertTrue(serializer.errors['name'], six.text_type)
|
Add tests of fields in serializers
|
Add tests of fields in serializers
|
Python
|
bsd-3-clause
|
estebistec/drf-compound-fields,pombredanne/drf-compound-fields
|
Add tests of fields in serializers
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_in_serializers
-------------------
Tests of the fields cooperation in the serializer interfaces for serialization, de-serialization,
and validation.
"""
# Django settings:
import os
os.environ['DJANGO_SETTINGS_MODULE'] = __name__
from django.conf.global_settings import CACHES
SECRET_KEY = 's3cr3t'
import unittest
from rest_framework import serializers
from rest_framework.compat import six
from drf_compound_fields.fields import DictField
from drf_compound_fields.fields import ListField
class DemoSerializer(serializers.Serializer):
name = ListField(serializers.CharField(), required=False)
emails = ListField(serializers.EmailField(), required=False)
class TestSerializerListField(unittest.TestCase):
"""
Tests for the ListField behavior
"""
def test_non_list_not_valid(self):
serializer = DemoSerializer(data={'name': 'notAList'})
self.assertFalse(serializer.is_valid())
def test_non_list_errors(self):
serializer = DemoSerializer(data={'name': 'notAList'})
self.assertIn('name', serializer.errors)
self.assertTrue(serializer.errors['name'], six.text_type)
|
<commit_before><commit_msg>Add tests of fields in serializers<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_in_serializers
-------------------
Tests of the fields cooperation in the serializer interfaces for serialization, de-serialization,
and validation.
"""
# Django settings:
import os
os.environ['DJANGO_SETTINGS_MODULE'] = __name__
from django.conf.global_settings import CACHES
SECRET_KEY = 's3cr3t'
import unittest
from rest_framework import serializers
from rest_framework.compat import six
from drf_compound_fields.fields import DictField
from drf_compound_fields.fields import ListField
class DemoSerializer(serializers.Serializer):
name = ListField(serializers.CharField(), required=False)
emails = ListField(serializers.EmailField(), required=False)
class TestSerializerListField(unittest.TestCase):
"""
Tests for the ListField behavior
"""
def test_non_list_not_valid(self):
serializer = DemoSerializer(data={'name': 'notAList'})
self.assertFalse(serializer.is_valid())
def test_non_list_errors(self):
serializer = DemoSerializer(data={'name': 'notAList'})
self.assertIn('name', serializer.errors)
self.assertTrue(serializer.errors['name'], six.text_type)
|
Add tests of fields in serializers#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_in_serializers
-------------------
Tests of the fields cooperation in the serializer interfaces for serialization, de-serialization,
and validation.
"""
# Django settings:
import os
os.environ['DJANGO_SETTINGS_MODULE'] = __name__
from django.conf.global_settings import CACHES
SECRET_KEY = 's3cr3t'
import unittest
from rest_framework import serializers
from rest_framework.compat import six
from drf_compound_fields.fields import DictField
from drf_compound_fields.fields import ListField
class DemoSerializer(serializers.Serializer):
name = ListField(serializers.CharField(), required=False)
emails = ListField(serializers.EmailField(), required=False)
class TestSerializerListField(unittest.TestCase):
"""
Tests for the ListField behavior
"""
def test_non_list_not_valid(self):
serializer = DemoSerializer(data={'name': 'notAList'})
self.assertFalse(serializer.is_valid())
def test_non_list_errors(self):
serializer = DemoSerializer(data={'name': 'notAList'})
self.assertIn('name', serializer.errors)
self.assertTrue(serializer.errors['name'], six.text_type)
|
<commit_before><commit_msg>Add tests of fields in serializers<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_in_serializers
-------------------
Tests of the fields cooperation in the serializer interfaces for serialization, de-serialization,
and validation.
"""
# Django settings:
import os
os.environ['DJANGO_SETTINGS_MODULE'] = __name__
from django.conf.global_settings import CACHES
SECRET_KEY = 's3cr3t'
import unittest
from rest_framework import serializers
from rest_framework.compat import six
from drf_compound_fields.fields import DictField
from drf_compound_fields.fields import ListField
class DemoSerializer(serializers.Serializer):
name = ListField(serializers.CharField(), required=False)
emails = ListField(serializers.EmailField(), required=False)
class TestSerializerListField(unittest.TestCase):
"""
Tests for the ListField behavior
"""
def test_non_list_not_valid(self):
serializer = DemoSerializer(data={'name': 'notAList'})
self.assertFalse(serializer.is_valid())
def test_non_list_errors(self):
serializer = DemoSerializer(data={'name': 'notAList'})
self.assertIn('name', serializer.errors)
self.assertTrue(serializer.errors['name'], six.text_type)
|
|
51b8e860e7edf77c7333c19d7de7654f22cd1b09
|
load_manifest.py
|
load_manifest.py
|
import argparse
import json
import os
import mcbench.client
def parse_args():
parser = argparse.ArgumentParser(
description='load benchmarks into McBench redis instance')
parser.add_argument(
'--redis_url',
default='redis://localhost:6379',
help='URL of redis instance.'
)
parser.add_argument(
'--manifest', required=True,
help='Path to manifest.json.')
return parser.parse_args()
def main():
args = parse_args()
mcbench_client = mcbench.client.from_redis_url(args.redis_url)
with open(os.path.expanduser(args.manifest)) as f:
manifest = json.load(f)
for project in manifest['projects']:
project['tags'] = ','.join(project['tags'])
benchmark = mcbench.client.Benchmark(**project)
mcbench_client.insert_benchmark(benchmark)
if __name__ == '__main__':
main()
|
Add script to load data from scraper manifest.
|
Add script to load data from scraper manifest.
|
Python
|
mit
|
isbadawi/mcbench,isbadawi/mcbench
|
Add script to load data from scraper manifest.
|
import argparse
import json
import os
import mcbench.client
def parse_args():
parser = argparse.ArgumentParser(
description='load benchmarks into McBench redis instance')
parser.add_argument(
'--redis_url',
default='redis://localhost:6379',
help='URL of redis instance.'
)
parser.add_argument(
'--manifest', required=True,
help='Path to manifest.json.')
return parser.parse_args()
def main():
args = parse_args()
mcbench_client = mcbench.client.from_redis_url(args.redis_url)
with open(os.path.expanduser(args.manifest)) as f:
manifest = json.load(f)
for project in manifest['projects']:
project['tags'] = ','.join(project['tags'])
benchmark = mcbench.client.Benchmark(**project)
mcbench_client.insert_benchmark(benchmark)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to load data from scraper manifest.<commit_after>
|
import argparse
import json
import os
import mcbench.client
def parse_args():
parser = argparse.ArgumentParser(
description='load benchmarks into McBench redis instance')
parser.add_argument(
'--redis_url',
default='redis://localhost:6379',
help='URL of redis instance.'
)
parser.add_argument(
'--manifest', required=True,
help='Path to manifest.json.')
return parser.parse_args()
def main():
args = parse_args()
mcbench_client = mcbench.client.from_redis_url(args.redis_url)
with open(os.path.expanduser(args.manifest)) as f:
manifest = json.load(f)
for project in manifest['projects']:
project['tags'] = ','.join(project['tags'])
benchmark = mcbench.client.Benchmark(**project)
mcbench_client.insert_benchmark(benchmark)
if __name__ == '__main__':
main()
|
Add script to load data from scraper manifest.import argparse
import json
import os
import mcbench.client
def parse_args():
parser = argparse.ArgumentParser(
description='load benchmarks into McBench redis instance')
parser.add_argument(
'--redis_url',
default='redis://localhost:6379',
help='URL of redis instance.'
)
parser.add_argument(
'--manifest', required=True,
help='Path to manifest.json.')
return parser.parse_args()
def main():
args = parse_args()
mcbench_client = mcbench.client.from_redis_url(args.redis_url)
with open(os.path.expanduser(args.manifest)) as f:
manifest = json.load(f)
for project in manifest['projects']:
project['tags'] = ','.join(project['tags'])
benchmark = mcbench.client.Benchmark(**project)
mcbench_client.insert_benchmark(benchmark)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to load data from scraper manifest.<commit_after>import argparse
import json
import os
import mcbench.client
def parse_args():
parser = argparse.ArgumentParser(
description='load benchmarks into McBench redis instance')
parser.add_argument(
'--redis_url',
default='redis://localhost:6379',
help='URL of redis instance.'
)
parser.add_argument(
'--manifest', required=True,
help='Path to manifest.json.')
return parser.parse_args()
def main():
args = parse_args()
mcbench_client = mcbench.client.from_redis_url(args.redis_url)
with open(os.path.expanduser(args.manifest)) as f:
manifest = json.load(f)
for project in manifest['projects']:
project['tags'] = ','.join(project['tags'])
benchmark = mcbench.client.Benchmark(**project)
mcbench_client.insert_benchmark(benchmark)
if __name__ == '__main__':
main()
|
|
40da5ab867f49f5db64d0c58e19723149d2f8243
|
cms/tests/plugins_mti.py
|
cms/tests/plugins_mti.py
|
# -*- coding: utf-8 -*-
from django.conf import settings
from cms.models import Page
from cms.models.pluginmodel import CMSPlugin
from cms.test_utils.util.context_managers import SettingsOverride
from cms.test_utils.testcases import (
URL_CMS_PAGE_ADD,
URL_CMS_PLUGIN_ADD,
URL_CMS_PLUGIN_EDIT,
)
from cms.test_utils.project.mti_pluginapp.models import TestPluginBetaModel
from .plugins import PluginsTestBaseCase
# class CustomPluginsTestCase(PluginsTestBaseCase):
# def test_add_edit_plugin(self):
# """
# Test that we can instantiate and use a MTI plugin
# """
# INSTALLED_APPS = settings.INSTALLED_APPS
# INSTALLED_APPS = INSTALLED_APPS + ['cms.test_utils.project.mti_pluginapp']
# with SettingsOverride(INSTALLED_APPS):
# # Create a page
# page_data = self.get_new_page_data()
# self.client.post(URL_CMS_PAGE_ADD, page_data)
# page = Page.objects.all()[0]
# # Add the MTI plugin
# plugin_data = {
# 'plugin_type': "TestPluginBeta",
# 'plugin_language': settings.LANGUAGES[0][0],
# 'placeholder_id': page.placeholders.get(slot="body").pk,
# 'plugin_parent': '',
# }
# response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
# self.assertEqual(response.status_code, 200)
# plugin_id = self.get_response_pk(response)
# self.assertEqual(plugin_id, CMSPlugin.objects.all()[0].pk)
# # Test we can open the change form for the MTI plugin
# edit_url = "%s%s/" % (URL_CMS_PLUGIN_EDIT, plugin_id)
# response = self.client.get(edit_url)
# self.assertEqual(response.status_code, 200)
# # Edit the MTI plugin
# data = {
# "alpha": "ALPHA",
# "beta": "BETA"
# }
# response = self.client.post(edit_url, data)
# self.assertEqual(response.status_code, 200)
# # Test that the change was properly stored in the DB
# plugin_model = TestPluginBetaModel.objects.all()[0]
# self.assertEqual("BETA", plugin_model.body)
|
Create test case for MTI Plugins
|
Create test case for MTI Plugins
|
Python
|
bsd-3-clause
|
rryan/django-cms,datakortet/django-cms,owers19856/django-cms,qnub/django-cms,mkoistinen/django-cms,rryan/django-cms,stefanfoulis/django-cms,petecummings/django-cms,wyg3958/django-cms,jeffreylu9/django-cms,nimbis/django-cms,netzkolchose/django-cms,jeffreylu9/django-cms,petecummings/django-cms,vxsx/django-cms,jsma/django-cms,yakky/django-cms,AlexProfi/django-cms,divio/django-cms,kk9599/django-cms,intip/django-cms,netzkolchose/django-cms,bittner/django-cms,SmithsonianEnterprises/django-cms,farhaadila/django-cms,czpython/django-cms,isotoma/django-cms,netzkolchose/django-cms,datakortet/django-cms,SachaMPS/django-cms,stefanw/django-cms,Livefyre/django-cms,AlexProfi/django-cms,Livefyre/django-cms,irudayarajisawa/django-cms,qnub/django-cms,stefanfoulis/django-cms,philippze/django-cms,intip/django-cms,iddqd1/django-cms,vad/django-cms,rsalmaso/django-cms,jproffitt/django-cms,netzkolchose/django-cms,yakky/django-cms,isotoma/django-cms,owers19856/django-cms,Vegasvikk/django-cms,intip/django-cms,datakortet/django-cms,rryan/django-cms,kk9599/django-cms,frnhr/django-cms,SofiaReis/django-cms,DylannCordel/django-cms,AlexProfi/django-cms,saintbird/django-cms,chkir/django-cms,wuzhihui1123/django-cms,chkir/django-cms,jsma/django-cms,Livefyre/django-cms,philippze/django-cms,saintbird/django-cms,liuyisiyisi/django-cms,DylannCordel/django-cms,mkoistinen/django-cms,czpython/django-cms,dhorelik/django-cms,robmagee/django-cms,frnhr/django-cms,DylannCordel/django-cms,keimlink/django-cms,chmberl/django-cms,evildmp/django-cms,divio/django-cms,czpython/django-cms,bittner/django-cms,keimlink/django-cms,stefanfoulis/django-cms,liuyisiyisi/django-cms,wuzhihui1123/django-cms,stefanw/django-cms,SachaMPS/django-cms,vxsx/django-cms,jeffreylu9/django-cms,chmberl/django-cms,sznekol/django-cms,jproffitt/django-cms,FinalAngel/django-cms,rsalmaso/django-cms,wuzhihui1123/django-cms,SofiaReis/django-cms,bittner/django-cms,jproffitt/django-cms,sephii/django-cms,benzkji/django-cms,Vegasvikk/django-cms,wuzhihui1123/django-cms,chkir/django-cms,takeshineshiro/django-cms,irudayarajisawa/django-cms,jeffreylu9/django-cms,evildmp/django-cms,rsalmaso/django-cms,rsalmaso/django-cms,datakortet/django-cms,wyg3958/django-cms,farhaadila/django-cms,andyzsf/django-cms,Vegasvikk/django-cms,youprofit/django-cms,webu/django-cms,vad/django-cms,donce/django-cms,FinalAngel/django-cms,Jaccorot/django-cms,takeshineshiro/django-cms,benzkji/django-cms,irudayarajisawa/django-cms,petecummings/django-cms,rscnt/django-cms,leture/django-cms,philippze/django-cms,timgraham/django-cms,timgraham/django-cms,mkoistinen/django-cms,cyberintruder/django-cms,wyg3958/django-cms,youprofit/django-cms,czpython/django-cms,mkoistinen/django-cms,stefanw/django-cms,isotoma/django-cms,SachaMPS/django-cms,keimlink/django-cms,robmagee/django-cms,sznekol/django-cms,frnhr/django-cms,vad/django-cms,SofiaReis/django-cms,kk9599/django-cms,yakky/django-cms,andyzsf/django-cms,jsma/django-cms,owers19856/django-cms,qnub/django-cms,sephii/django-cms,divio/django-cms,dhorelik/django-cms,Jaccorot/django-cms,benzkji/django-cms,timgraham/django-cms,rscnt/django-cms,robmagee/django-cms,SmithsonianEnterprises/django-cms,webu/django-cms,cyberintruder/django-cms,liuyisiyisi/django-cms,jsma/django-cms,FinalAngel/django-cms,andyzsf/django-cms,nimbis/django-cms,sephii/django-cms,vxsx/django-cms,rscnt/django-cms,Livefyre/django-cms,chmberl/django-cms,leture/django-cms,bittner/django-cms,webu/django-cms,josjevv/django-cms,iddqd1/django-cms,andyzsf/django-cms,frnhr/django-cms,evildmp/django-cms,donce/django-cms,memnonila/django-cms,farhaadila/django-cms,Jaccorot/django-cms,iddqd1/django-cms,saintbird/django-cms,takeshineshiro/django-cms,cyberintruder/django-cms,benzkji/django-cms,evildmp/django-cms,intip/django-cms,isotoma/django-cms,nimbis/django-cms,SmithsonianEnterprises/django-cms,stefanw/django-cms,vad/django-cms,jproffitt/django-cms,sznekol/django-cms,rryan/django-cms,sephii/django-cms,memnonila/django-cms,josjevv/django-cms,dhorelik/django-cms,memnonila/django-cms,vxsx/django-cms,donce/django-cms,stefanfoulis/django-cms,yakky/django-cms,FinalAngel/django-cms,youprofit/django-cms,leture/django-cms,josjevv/django-cms,nimbis/django-cms,divio/django-cms
|
Create test case for MTI Plugins
|
# -*- coding: utf-8 -*-
from django.conf import settings
from cms.models import Page
from cms.models.pluginmodel import CMSPlugin
from cms.test_utils.util.context_managers import SettingsOverride
from cms.test_utils.testcases import (
URL_CMS_PAGE_ADD,
URL_CMS_PLUGIN_ADD,
URL_CMS_PLUGIN_EDIT,
)
from cms.test_utils.project.mti_pluginapp.models import TestPluginBetaModel
from .plugins import PluginsTestBaseCase
# class CustomPluginsTestCase(PluginsTestBaseCase):
# def test_add_edit_plugin(self):
# """
# Test that we can instantiate and use a MTI plugin
# """
# INSTALLED_APPS = settings.INSTALLED_APPS
# INSTALLED_APPS = INSTALLED_APPS + ['cms.test_utils.project.mti_pluginapp']
# with SettingsOverride(INSTALLED_APPS):
# # Create a page
# page_data = self.get_new_page_data()
# self.client.post(URL_CMS_PAGE_ADD, page_data)
# page = Page.objects.all()[0]
# # Add the MTI plugin
# plugin_data = {
# 'plugin_type': "TestPluginBeta",
# 'plugin_language': settings.LANGUAGES[0][0],
# 'placeholder_id': page.placeholders.get(slot="body").pk,
# 'plugin_parent': '',
# }
# response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
# self.assertEqual(response.status_code, 200)
# plugin_id = self.get_response_pk(response)
# self.assertEqual(plugin_id, CMSPlugin.objects.all()[0].pk)
# # Test we can open the change form for the MTI plugin
# edit_url = "%s%s/" % (URL_CMS_PLUGIN_EDIT, plugin_id)
# response = self.client.get(edit_url)
# self.assertEqual(response.status_code, 200)
# # Edit the MTI plugin
# data = {
# "alpha": "ALPHA",
# "beta": "BETA"
# }
# response = self.client.post(edit_url, data)
# self.assertEqual(response.status_code, 200)
# # Test that the change was properly stored in the DB
# plugin_model = TestPluginBetaModel.objects.all()[0]
# self.assertEqual("BETA", plugin_model.body)
|
<commit_before><commit_msg>Create test case for MTI Plugins<commit_after>
|
# -*- coding: utf-8 -*-
from django.conf import settings
from cms.models import Page
from cms.models.pluginmodel import CMSPlugin
from cms.test_utils.util.context_managers import SettingsOverride
from cms.test_utils.testcases import (
URL_CMS_PAGE_ADD,
URL_CMS_PLUGIN_ADD,
URL_CMS_PLUGIN_EDIT,
)
from cms.test_utils.project.mti_pluginapp.models import TestPluginBetaModel
from .plugins import PluginsTestBaseCase
# class CustomPluginsTestCase(PluginsTestBaseCase):
# def test_add_edit_plugin(self):
# """
# Test that we can instantiate and use a MTI plugin
# """
# INSTALLED_APPS = settings.INSTALLED_APPS
# INSTALLED_APPS = INSTALLED_APPS + ['cms.test_utils.project.mti_pluginapp']
# with SettingsOverride(INSTALLED_APPS):
# # Create a page
# page_data = self.get_new_page_data()
# self.client.post(URL_CMS_PAGE_ADD, page_data)
# page = Page.objects.all()[0]
# # Add the MTI plugin
# plugin_data = {
# 'plugin_type': "TestPluginBeta",
# 'plugin_language': settings.LANGUAGES[0][0],
# 'placeholder_id': page.placeholders.get(slot="body").pk,
# 'plugin_parent': '',
# }
# response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
# self.assertEqual(response.status_code, 200)
# plugin_id = self.get_response_pk(response)
# self.assertEqual(plugin_id, CMSPlugin.objects.all()[0].pk)
# # Test we can open the change form for the MTI plugin
# edit_url = "%s%s/" % (URL_CMS_PLUGIN_EDIT, plugin_id)
# response = self.client.get(edit_url)
# self.assertEqual(response.status_code, 200)
# # Edit the MTI plugin
# data = {
# "alpha": "ALPHA",
# "beta": "BETA"
# }
# response = self.client.post(edit_url, data)
# self.assertEqual(response.status_code, 200)
# # Test that the change was properly stored in the DB
# plugin_model = TestPluginBetaModel.objects.all()[0]
# self.assertEqual("BETA", plugin_model.body)
|
Create test case for MTI Plugins# -*- coding: utf-8 -*-
from django.conf import settings
from cms.models import Page
from cms.models.pluginmodel import CMSPlugin
from cms.test_utils.util.context_managers import SettingsOverride
from cms.test_utils.testcases import (
URL_CMS_PAGE_ADD,
URL_CMS_PLUGIN_ADD,
URL_CMS_PLUGIN_EDIT,
)
from cms.test_utils.project.mti_pluginapp.models import TestPluginBetaModel
from .plugins import PluginsTestBaseCase
# class CustomPluginsTestCase(PluginsTestBaseCase):
# def test_add_edit_plugin(self):
# """
# Test that we can instantiate and use a MTI plugin
# """
# INSTALLED_APPS = settings.INSTALLED_APPS
# INSTALLED_APPS = INSTALLED_APPS + ['cms.test_utils.project.mti_pluginapp']
# with SettingsOverride(INSTALLED_APPS):
# # Create a page
# page_data = self.get_new_page_data()
# self.client.post(URL_CMS_PAGE_ADD, page_data)
# page = Page.objects.all()[0]
# # Add the MTI plugin
# plugin_data = {
# 'plugin_type': "TestPluginBeta",
# 'plugin_language': settings.LANGUAGES[0][0],
# 'placeholder_id': page.placeholders.get(slot="body").pk,
# 'plugin_parent': '',
# }
# response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
# self.assertEqual(response.status_code, 200)
# plugin_id = self.get_response_pk(response)
# self.assertEqual(plugin_id, CMSPlugin.objects.all()[0].pk)
# # Test we can open the change form for the MTI plugin
# edit_url = "%s%s/" % (URL_CMS_PLUGIN_EDIT, plugin_id)
# response = self.client.get(edit_url)
# self.assertEqual(response.status_code, 200)
# # Edit the MTI plugin
# data = {
# "alpha": "ALPHA",
# "beta": "BETA"
# }
# response = self.client.post(edit_url, data)
# self.assertEqual(response.status_code, 200)
# # Test that the change was properly stored in the DB
# plugin_model = TestPluginBetaModel.objects.all()[0]
# self.assertEqual("BETA", plugin_model.body)
|
<commit_before><commit_msg>Create test case for MTI Plugins<commit_after># -*- coding: utf-8 -*-
from django.conf import settings
from cms.models import Page
from cms.models.pluginmodel import CMSPlugin
from cms.test_utils.util.context_managers import SettingsOverride
from cms.test_utils.testcases import (
URL_CMS_PAGE_ADD,
URL_CMS_PLUGIN_ADD,
URL_CMS_PLUGIN_EDIT,
)
from cms.test_utils.project.mti_pluginapp.models import TestPluginBetaModel
from .plugins import PluginsTestBaseCase
# class CustomPluginsTestCase(PluginsTestBaseCase):
# def test_add_edit_plugin(self):
# """
# Test that we can instantiate and use a MTI plugin
# """
# INSTALLED_APPS = settings.INSTALLED_APPS
# INSTALLED_APPS = INSTALLED_APPS + ['cms.test_utils.project.mti_pluginapp']
# with SettingsOverride(INSTALLED_APPS):
# # Create a page
# page_data = self.get_new_page_data()
# self.client.post(URL_CMS_PAGE_ADD, page_data)
# page = Page.objects.all()[0]
# # Add the MTI plugin
# plugin_data = {
# 'plugin_type': "TestPluginBeta",
# 'plugin_language': settings.LANGUAGES[0][0],
# 'placeholder_id': page.placeholders.get(slot="body").pk,
# 'plugin_parent': '',
# }
# response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
# self.assertEqual(response.status_code, 200)
# plugin_id = self.get_response_pk(response)
# self.assertEqual(plugin_id, CMSPlugin.objects.all()[0].pk)
# # Test we can open the change form for the MTI plugin
# edit_url = "%s%s/" % (URL_CMS_PLUGIN_EDIT, plugin_id)
# response = self.client.get(edit_url)
# self.assertEqual(response.status_code, 200)
# # Edit the MTI plugin
# data = {
# "alpha": "ALPHA",
# "beta": "BETA"
# }
# response = self.client.post(edit_url, data)
# self.assertEqual(response.status_code, 200)
# # Test that the change was properly stored in the DB
# plugin_model = TestPluginBetaModel.objects.all()[0]
# self.assertEqual("BETA", plugin_model.body)
|
|
1458b5a02df4e983dfa54b474d1796d530d536a7
|
normalize_spelling.py
|
normalize_spelling.py
|
"""Normalize the spelling of the data used for machine learning
Usage: python normalize_spelling <train set> <test set> <output dir>
"""
import codecs
import json
import argparse
from collections import Counter
import os
import string
with codecs.open('hist2modern_liwc.json', 'rb', 'utf-8') as f:
full_dict = json.load(f, 'utf-8')
# create simple historic word -> modern word mapping
# (full_dict may contain multiple alternatives for a word)
hist2modern = {}
for w in full_dict.keys():
if w not in full_dict[w]:
c = Counter(full_dict[w])
hist2modern[w] = c.most_common()[0][0]
print '#words in dict: {}'.format(len(hist2modern))
parser = argparse.ArgumentParser()
parser.add_argument('train', help='the name of the file containing the train '
'data.')
parser.add_argument('test', help='the name of the file containing the test '
'data.')
parser.add_argument('output_dir', help='the directory where the '
'generated text files should be saved')
args = parser.parse_args()
data_files = [args.train, args.test]
num_words = 0
num_replaced = 0
replacements = Counter()
for df in data_files:
new_lines = []
with codecs.open(df, 'rb', 'utf-8') as f:
for line in f.readlines():
l, label = line.rsplit(None, 1)
words = l.split()
result = []
for word in words:
w = word.lower()
if w not in string.punctuation:
num_words += 1
if w in hist2modern.keys():
result.append(hist2modern[w])
num_replaced += 1
replacements[w] += 1
else:
result.append(word)
new_lines.append('{}\t{}'.format(' '.join(result).encode('utf-8'),
label))
# write output
_head, tail = os.path.split(df)
out_file_name = '{}-normalized_spelling.txt'.format(tail.split('.')[0])
out_file = os.path.join(args.output_dir, out_file_name)
with codecs.open(out_file, 'wb', 'utf-8') as f:
f.write('\n'.join(new_lines).decode('utf-8'))
# print number of replacements
print 'total words\t{}\ntotal replaced\t{}'.format(num_words, num_replaced)
for replaced, freq in replacements.most_common():
print '{}\t{}\t{}'.format(replaced.encode('utf-8'),
hist2modern[replaced].encode('utf-8'),
freq)
|
Add script to normalize spelling
|
Add script to normalize spelling
Added a script that takes as input a training and test set containing
sentences with words separated by spaces, normalizes the spelling using
the hist2modern dictionary and writes files containing the spelling
normalized train and test set. It also outputs to std out the counts of
words replaced.
|
Python
|
apache-2.0
|
NLeSC/embodied-emotions-scripts,NLeSC/embodied-emotions-scripts
|
Add script to normalize spelling
Added a script that takes as input a training and test set containing
sentences with words separated by spaces, normalizes the spelling using
the hist2modern dictionary and writes files containing the spelling
normalized train and test set. It also outputs to std out the counts of
words replaced.
|
"""Normalize the spelling of the data used for machine learning
Usage: python normalize_spelling <train set> <test set> <output dir>
"""
import codecs
import json
import argparse
from collections import Counter
import os
import string
with codecs.open('hist2modern_liwc.json', 'rb', 'utf-8') as f:
full_dict = json.load(f, 'utf-8')
# create simple historic word -> modern word mapping
# (full_dict may contain multiple alternatives for a word)
hist2modern = {}
for w in full_dict.keys():
if w not in full_dict[w]:
c = Counter(full_dict[w])
hist2modern[w] = c.most_common()[0][0]
print '#words in dict: {}'.format(len(hist2modern))
parser = argparse.ArgumentParser()
parser.add_argument('train', help='the name of the file containing the train '
'data.')
parser.add_argument('test', help='the name of the file containing the test '
'data.')
parser.add_argument('output_dir', help='the directory where the '
'generated text files should be saved')
args = parser.parse_args()
data_files = [args.train, args.test]
num_words = 0
num_replaced = 0
replacements = Counter()
for df in data_files:
new_lines = []
with codecs.open(df, 'rb', 'utf-8') as f:
for line in f.readlines():
l, label = line.rsplit(None, 1)
words = l.split()
result = []
for word in words:
w = word.lower()
if w not in string.punctuation:
num_words += 1
if w in hist2modern.keys():
result.append(hist2modern[w])
num_replaced += 1
replacements[w] += 1
else:
result.append(word)
new_lines.append('{}\t{}'.format(' '.join(result).encode('utf-8'),
label))
# write output
_head, tail = os.path.split(df)
out_file_name = '{}-normalized_spelling.txt'.format(tail.split('.')[0])
out_file = os.path.join(args.output_dir, out_file_name)
with codecs.open(out_file, 'wb', 'utf-8') as f:
f.write('\n'.join(new_lines).decode('utf-8'))
# print number of replacements
print 'total words\t{}\ntotal replaced\t{}'.format(num_words, num_replaced)
for replaced, freq in replacements.most_common():
print '{}\t{}\t{}'.format(replaced.encode('utf-8'),
hist2modern[replaced].encode('utf-8'),
freq)
|
<commit_before><commit_msg>Add script to normalize spelling
Added a script that takes as input a training and test set containing
sentences with words separated by spaces, normalizes the spelling using
the hist2modern dictionary and writes files containing the spelling
normalized train and test set. It also outputs to std out the counts of
words replaced.<commit_after>
|
"""Normalize the spelling of the data used for machine learning
Usage: python normalize_spelling <train set> <test set> <output dir>
"""
import codecs
import json
import argparse
from collections import Counter
import os
import string
with codecs.open('hist2modern_liwc.json', 'rb', 'utf-8') as f:
full_dict = json.load(f, 'utf-8')
# create simple historic word -> modern word mapping
# (full_dict may contain multiple alternatives for a word)
hist2modern = {}
for w in full_dict.keys():
if w not in full_dict[w]:
c = Counter(full_dict[w])
hist2modern[w] = c.most_common()[0][0]
print '#words in dict: {}'.format(len(hist2modern))
parser = argparse.ArgumentParser()
parser.add_argument('train', help='the name of the file containing the train '
'data.')
parser.add_argument('test', help='the name of the file containing the test '
'data.')
parser.add_argument('output_dir', help='the directory where the '
'generated text files should be saved')
args = parser.parse_args()
data_files = [args.train, args.test]
num_words = 0
num_replaced = 0
replacements = Counter()
for df in data_files:
new_lines = []
with codecs.open(df, 'rb', 'utf-8') as f:
for line in f.readlines():
l, label = line.rsplit(None, 1)
words = l.split()
result = []
for word in words:
w = word.lower()
if w not in string.punctuation:
num_words += 1
if w in hist2modern.keys():
result.append(hist2modern[w])
num_replaced += 1
replacements[w] += 1
else:
result.append(word)
new_lines.append('{}\t{}'.format(' '.join(result).encode('utf-8'),
label))
# write output
_head, tail = os.path.split(df)
out_file_name = '{}-normalized_spelling.txt'.format(tail.split('.')[0])
out_file = os.path.join(args.output_dir, out_file_name)
with codecs.open(out_file, 'wb', 'utf-8') as f:
f.write('\n'.join(new_lines).decode('utf-8'))
# print number of replacements
print 'total words\t{}\ntotal replaced\t{}'.format(num_words, num_replaced)
for replaced, freq in replacements.most_common():
print '{}\t{}\t{}'.format(replaced.encode('utf-8'),
hist2modern[replaced].encode('utf-8'),
freq)
|
Add script to normalize spelling
Added a script that takes as input a training and test set containing
sentences with words separated by spaces, normalizes the spelling using
the hist2modern dictionary and writes files containing the spelling
normalized train and test set. It also outputs to std out the counts of
words replaced."""Normalize the spelling of the data used for machine learning
Usage: python normalize_spelling <train set> <test set> <output dir>
"""
import codecs
import json
import argparse
from collections import Counter
import os
import string
with codecs.open('hist2modern_liwc.json', 'rb', 'utf-8') as f:
full_dict = json.load(f, 'utf-8')
# create simple historic word -> modern word mapping
# (full_dict may contain multiple alternatives for a word)
hist2modern = {}
for w in full_dict.keys():
if w not in full_dict[w]:
c = Counter(full_dict[w])
hist2modern[w] = c.most_common()[0][0]
print '#words in dict: {}'.format(len(hist2modern))
parser = argparse.ArgumentParser()
parser.add_argument('train', help='the name of the file containing the train '
'data.')
parser.add_argument('test', help='the name of the file containing the test '
'data.')
parser.add_argument('output_dir', help='the directory where the '
'generated text files should be saved')
args = parser.parse_args()
data_files = [args.train, args.test]
num_words = 0
num_replaced = 0
replacements = Counter()
for df in data_files:
new_lines = []
with codecs.open(df, 'rb', 'utf-8') as f:
for line in f.readlines():
l, label = line.rsplit(None, 1)
words = l.split()
result = []
for word in words:
w = word.lower()
if w not in string.punctuation:
num_words += 1
if w in hist2modern.keys():
result.append(hist2modern[w])
num_replaced += 1
replacements[w] += 1
else:
result.append(word)
new_lines.append('{}\t{}'.format(' '.join(result).encode('utf-8'),
label))
# write output
_head, tail = os.path.split(df)
out_file_name = '{}-normalized_spelling.txt'.format(tail.split('.')[0])
out_file = os.path.join(args.output_dir, out_file_name)
with codecs.open(out_file, 'wb', 'utf-8') as f:
f.write('\n'.join(new_lines).decode('utf-8'))
# print number of replacements
print 'total words\t{}\ntotal replaced\t{}'.format(num_words, num_replaced)
for replaced, freq in replacements.most_common():
print '{}\t{}\t{}'.format(replaced.encode('utf-8'),
hist2modern[replaced].encode('utf-8'),
freq)
|
<commit_before><commit_msg>Add script to normalize spelling
Added a script that takes as input a training and test set containing
sentences with words separated by spaces, normalizes the spelling using
the hist2modern dictionary and writes files containing the spelling
normalized train and test set. It also outputs to std out the counts of
words replaced.<commit_after>"""Normalize the spelling of the data used for machine learning
Usage: python normalize_spelling <train set> <test set> <output dir>
"""
import codecs
import json
import argparse
from collections import Counter
import os
import string
with codecs.open('hist2modern_liwc.json', 'rb', 'utf-8') as f:
full_dict = json.load(f, 'utf-8')
# create simple historic word -> modern word mapping
# (full_dict may contain multiple alternatives for a word)
hist2modern = {}
for w in full_dict.keys():
if w not in full_dict[w]:
c = Counter(full_dict[w])
hist2modern[w] = c.most_common()[0][0]
print '#words in dict: {}'.format(len(hist2modern))
parser = argparse.ArgumentParser()
parser.add_argument('train', help='the name of the file containing the train '
'data.')
parser.add_argument('test', help='the name of the file containing the test '
'data.')
parser.add_argument('output_dir', help='the directory where the '
'generated text files should be saved')
args = parser.parse_args()
data_files = [args.train, args.test]
num_words = 0
num_replaced = 0
replacements = Counter()
for df in data_files:
new_lines = []
with codecs.open(df, 'rb', 'utf-8') as f:
for line in f.readlines():
l, label = line.rsplit(None, 1)
words = l.split()
result = []
for word in words:
w = word.lower()
if w not in string.punctuation:
num_words += 1
if w in hist2modern.keys():
result.append(hist2modern[w])
num_replaced += 1
replacements[w] += 1
else:
result.append(word)
new_lines.append('{}\t{}'.format(' '.join(result).encode('utf-8'),
label))
# write output
_head, tail = os.path.split(df)
out_file_name = '{}-normalized_spelling.txt'.format(tail.split('.')[0])
out_file = os.path.join(args.output_dir, out_file_name)
with codecs.open(out_file, 'wb', 'utf-8') as f:
f.write('\n'.join(new_lines).decode('utf-8'))
# print number of replacements
print 'total words\t{}\ntotal replaced\t{}'.format(num_words, num_replaced)
for replaced, freq in replacements.most_common():
print '{}\t{}\t{}'.format(replaced.encode('utf-8'),
hist2modern[replaced].encode('utf-8'),
freq)
|
|
d339980773fb9e6b61c1f5dfab935aeb2daf8d5d
|
tests/tests_scanimage.py
|
tests/tests_scanimage.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import shutil
import unittest
class TestScanimage(unittest.TestCase):
"""
TODO: Docstring
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_tesseract_binary_in_path(self):
self.assertIsNotNone(shutil.which('tesseract'))
if __name__ == "__main__":
unittest.main()
|
Add test for checking if tesseract binary is available
|
Add test for checking if tesseract binary is available
|
Python
|
bsd-2-clause
|
sjktje/sjkscan,sjktje/sjkscan
|
Add test for checking if tesseract binary is available
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import shutil
import unittest
class TestScanimage(unittest.TestCase):
"""
TODO: Docstring
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_tesseract_binary_in_path(self):
self.assertIsNotNone(shutil.which('tesseract'))
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add test for checking if tesseract binary is available<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import shutil
import unittest
class TestScanimage(unittest.TestCase):
"""
TODO: Docstring
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_tesseract_binary_in_path(self):
self.assertIsNotNone(shutil.which('tesseract'))
if __name__ == "__main__":
unittest.main()
|
Add test for checking if tesseract binary is available#!/usr/bin/env python
# -*- coding: utf-8 -*-
import shutil
import unittest
class TestScanimage(unittest.TestCase):
"""
TODO: Docstring
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_tesseract_binary_in_path(self):
self.assertIsNotNone(shutil.which('tesseract'))
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add test for checking if tesseract binary is available<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import shutil
import unittest
class TestScanimage(unittest.TestCase):
"""
TODO: Docstring
"""
def setUp(self):
pass
def tearDown(self):
pass
def test_tesseract_binary_in_path(self):
self.assertIsNotNone(shutil.which('tesseract'))
if __name__ == "__main__":
unittest.main()
|
|
830227bad5e32683180d7d8df12f20e5935d81ed
|
test/test_shortcuts.py
|
test/test_shortcuts.py
|
from nose.tools import *
from lctools.shortcuts import get_node_or_fail
class TestShortCuts(object):
def setup(self):
class MyNode(object):
def __init__(self, id):
self.id = id
class MyConn(object):
def __init__(self, nodes):
self.nodes = nodes
def list_nodes(self):
return self.nodes
self.node_cls = MyNode
self.conn_cls = MyConn
def test_that_get_node_or_fail_returns_node_object_for_existing_node(self):
conn = self.conn_cls([self.node_cls("15"), self.node_cls("21")])
node = get_node_or_fail(conn, "15")
assert_equal(node.id, "15")
def test_that_get_node_or_fail_returns_none_in_fail_case(self):
conn = self.conn_cls([])
node = get_node_or_fail(conn, "15")
assert_true(node is None)
def test_that_get_node_or_fail_calls_coroutine_in_fail_case(self):
class CallableCheck(object):
called = False
args = None
kwargs = None
def __call__(self, *args, **kwargs):
self.called = True
self.args = args
self.kwargs = kwargs
coroutine = CallableCheck()
cargs = ("Error happened",)
ckwargs = {"node_id": "15"}
conn = self.conn_cls([])
node = get_node_or_fail(conn, "15",
coroutine, cargs, ckwargs)
assert_true(coroutine.called)
assert_equal(coroutine.args, cargs)
assert_equal(coroutine.kwargs, ckwargs)
|
Add tests for new shortcuts.
|
Add tests for new shortcuts.
|
Python
|
apache-2.0
|
novel/lc-tools,novel/lc-tools
|
Add tests for new shortcuts.
|
from nose.tools import *
from lctools.shortcuts import get_node_or_fail
class TestShortCuts(object):
def setup(self):
class MyNode(object):
def __init__(self, id):
self.id = id
class MyConn(object):
def __init__(self, nodes):
self.nodes = nodes
def list_nodes(self):
return self.nodes
self.node_cls = MyNode
self.conn_cls = MyConn
def test_that_get_node_or_fail_returns_node_object_for_existing_node(self):
conn = self.conn_cls([self.node_cls("15"), self.node_cls("21")])
node = get_node_or_fail(conn, "15")
assert_equal(node.id, "15")
def test_that_get_node_or_fail_returns_none_in_fail_case(self):
conn = self.conn_cls([])
node = get_node_or_fail(conn, "15")
assert_true(node is None)
def test_that_get_node_or_fail_calls_coroutine_in_fail_case(self):
class CallableCheck(object):
called = False
args = None
kwargs = None
def __call__(self, *args, **kwargs):
self.called = True
self.args = args
self.kwargs = kwargs
coroutine = CallableCheck()
cargs = ("Error happened",)
ckwargs = {"node_id": "15"}
conn = self.conn_cls([])
node = get_node_or_fail(conn, "15",
coroutine, cargs, ckwargs)
assert_true(coroutine.called)
assert_equal(coroutine.args, cargs)
assert_equal(coroutine.kwargs, ckwargs)
|
<commit_before><commit_msg>Add tests for new shortcuts.<commit_after>
|
from nose.tools import *
from lctools.shortcuts import get_node_or_fail
class TestShortCuts(object):
def setup(self):
class MyNode(object):
def __init__(self, id):
self.id = id
class MyConn(object):
def __init__(self, nodes):
self.nodes = nodes
def list_nodes(self):
return self.nodes
self.node_cls = MyNode
self.conn_cls = MyConn
def test_that_get_node_or_fail_returns_node_object_for_existing_node(self):
conn = self.conn_cls([self.node_cls("15"), self.node_cls("21")])
node = get_node_or_fail(conn, "15")
assert_equal(node.id, "15")
def test_that_get_node_or_fail_returns_none_in_fail_case(self):
conn = self.conn_cls([])
node = get_node_or_fail(conn, "15")
assert_true(node is None)
def test_that_get_node_or_fail_calls_coroutine_in_fail_case(self):
class CallableCheck(object):
called = False
args = None
kwargs = None
def __call__(self, *args, **kwargs):
self.called = True
self.args = args
self.kwargs = kwargs
coroutine = CallableCheck()
cargs = ("Error happened",)
ckwargs = {"node_id": "15"}
conn = self.conn_cls([])
node = get_node_or_fail(conn, "15",
coroutine, cargs, ckwargs)
assert_true(coroutine.called)
assert_equal(coroutine.args, cargs)
assert_equal(coroutine.kwargs, ckwargs)
|
Add tests for new shortcuts.from nose.tools import *
from lctools.shortcuts import get_node_or_fail
class TestShortCuts(object):
def setup(self):
class MyNode(object):
def __init__(self, id):
self.id = id
class MyConn(object):
def __init__(self, nodes):
self.nodes = nodes
def list_nodes(self):
return self.nodes
self.node_cls = MyNode
self.conn_cls = MyConn
def test_that_get_node_or_fail_returns_node_object_for_existing_node(self):
conn = self.conn_cls([self.node_cls("15"), self.node_cls("21")])
node = get_node_or_fail(conn, "15")
assert_equal(node.id, "15")
def test_that_get_node_or_fail_returns_none_in_fail_case(self):
conn = self.conn_cls([])
node = get_node_or_fail(conn, "15")
assert_true(node is None)
def test_that_get_node_or_fail_calls_coroutine_in_fail_case(self):
class CallableCheck(object):
called = False
args = None
kwargs = None
def __call__(self, *args, **kwargs):
self.called = True
self.args = args
self.kwargs = kwargs
coroutine = CallableCheck()
cargs = ("Error happened",)
ckwargs = {"node_id": "15"}
conn = self.conn_cls([])
node = get_node_or_fail(conn, "15",
coroutine, cargs, ckwargs)
assert_true(coroutine.called)
assert_equal(coroutine.args, cargs)
assert_equal(coroutine.kwargs, ckwargs)
|
<commit_before><commit_msg>Add tests for new shortcuts.<commit_after>from nose.tools import *
from lctools.shortcuts import get_node_or_fail
class TestShortCuts(object):
def setup(self):
class MyNode(object):
def __init__(self, id):
self.id = id
class MyConn(object):
def __init__(self, nodes):
self.nodes = nodes
def list_nodes(self):
return self.nodes
self.node_cls = MyNode
self.conn_cls = MyConn
def test_that_get_node_or_fail_returns_node_object_for_existing_node(self):
conn = self.conn_cls([self.node_cls("15"), self.node_cls("21")])
node = get_node_or_fail(conn, "15")
assert_equal(node.id, "15")
def test_that_get_node_or_fail_returns_none_in_fail_case(self):
conn = self.conn_cls([])
node = get_node_or_fail(conn, "15")
assert_true(node is None)
def test_that_get_node_or_fail_calls_coroutine_in_fail_case(self):
class CallableCheck(object):
called = False
args = None
kwargs = None
def __call__(self, *args, **kwargs):
self.called = True
self.args = args
self.kwargs = kwargs
coroutine = CallableCheck()
cargs = ("Error happened",)
ckwargs = {"node_id": "15"}
conn = self.conn_cls([])
node = get_node_or_fail(conn, "15",
coroutine, cargs, ckwargs)
assert_true(coroutine.called)
assert_equal(coroutine.args, cargs)
assert_equal(coroutine.kwargs, ckwargs)
|
|
ad68c13a4080c80c88b039e3033f6b94421f4a27
|
sms_send_demo.py
|
sms_send_demo.py
|
"""
Basic demo of summit-python that sends a simple SMS message from the
command-line.
"""
import argparse
from summit.rest import SummitRestClient
def main():
parser = argparse.ArgumentParser(
description="Command-line SMS sender using Corvisa's Summit API.")
parser.add_argument('--key', required=True,
help="Your application's API key.")
parser.add_argument('--secret', required=True,
help="Your application's API key secret.")
parser.add_argument('--from', required=True, help=(
"Number to send from. Must be an authorized number "
"for your Summit app."))
parser.add_argument('--to', required=True, help="Recipient of SMS message.")
parser.add_argument('--message', required=True,
help="Body of the SMS message.")
args = parser.parse_args()
client = SummitRestClient(account=args.key, token=args.secret)
resp, inst = client.messages.create(from_=from_number, to=to_number,
body=args.message)
print 'Responded with code: {}'.format(resp.status_code)
print inst
if __name__ == '__main__':
main()
|
Add demo script to send SMS message from command_line
|
Add demo script to send SMS message from command_line
|
Python
|
mit
|
josephl/summit-python
|
Add demo script to send SMS message from command_line
|
"""
Basic demo of summit-python that sends a simple SMS message from the
command-line.
"""
import argparse
from summit.rest import SummitRestClient
def main():
parser = argparse.ArgumentParser(
description="Command-line SMS sender using Corvisa's Summit API.")
parser.add_argument('--key', required=True,
help="Your application's API key.")
parser.add_argument('--secret', required=True,
help="Your application's API key secret.")
parser.add_argument('--from', required=True, help=(
"Number to send from. Must be an authorized number "
"for your Summit app."))
parser.add_argument('--to', required=True, help="Recipient of SMS message.")
parser.add_argument('--message', required=True,
help="Body of the SMS message.")
args = parser.parse_args()
client = SummitRestClient(account=args.key, token=args.secret)
resp, inst = client.messages.create(from_=from_number, to=to_number,
body=args.message)
print 'Responded with code: {}'.format(resp.status_code)
print inst
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add demo script to send SMS message from command_line<commit_after>
|
"""
Basic demo of summit-python that sends a simple SMS message from the
command-line.
"""
import argparse
from summit.rest import SummitRestClient
def main():
parser = argparse.ArgumentParser(
description="Command-line SMS sender using Corvisa's Summit API.")
parser.add_argument('--key', required=True,
help="Your application's API key.")
parser.add_argument('--secret', required=True,
help="Your application's API key secret.")
parser.add_argument('--from', required=True, help=(
"Number to send from. Must be an authorized number "
"for your Summit app."))
parser.add_argument('--to', required=True, help="Recipient of SMS message.")
parser.add_argument('--message', required=True,
help="Body of the SMS message.")
args = parser.parse_args()
client = SummitRestClient(account=args.key, token=args.secret)
resp, inst = client.messages.create(from_=from_number, to=to_number,
body=args.message)
print 'Responded with code: {}'.format(resp.status_code)
print inst
if __name__ == '__main__':
main()
|
Add demo script to send SMS message from command_line"""
Basic demo of summit-python that sends a simple SMS message from the
command-line.
"""
import argparse
from summit.rest import SummitRestClient
def main():
parser = argparse.ArgumentParser(
description="Command-line SMS sender using Corvisa's Summit API.")
parser.add_argument('--key', required=True,
help="Your application's API key.")
parser.add_argument('--secret', required=True,
help="Your application's API key secret.")
parser.add_argument('--from', required=True, help=(
"Number to send from. Must be an authorized number "
"for your Summit app."))
parser.add_argument('--to', required=True, help="Recipient of SMS message.")
parser.add_argument('--message', required=True,
help="Body of the SMS message.")
args = parser.parse_args()
client = SummitRestClient(account=args.key, token=args.secret)
resp, inst = client.messages.create(from_=from_number, to=to_number,
body=args.message)
print 'Responded with code: {}'.format(resp.status_code)
print inst
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add demo script to send SMS message from command_line<commit_after>"""
Basic demo of summit-python that sends a simple SMS message from the
command-line.
"""
import argparse
from summit.rest import SummitRestClient
def main():
parser = argparse.ArgumentParser(
description="Command-line SMS sender using Corvisa's Summit API.")
parser.add_argument('--key', required=True,
help="Your application's API key.")
parser.add_argument('--secret', required=True,
help="Your application's API key secret.")
parser.add_argument('--from', required=True, help=(
"Number to send from. Must be an authorized number "
"for your Summit app."))
parser.add_argument('--to', required=True, help="Recipient of SMS message.")
parser.add_argument('--message', required=True,
help="Body of the SMS message.")
args = parser.parse_args()
client = SummitRestClient(account=args.key, token=args.secret)
resp, inst = client.messages.create(from_=from_number, to=to_number,
body=args.message)
print 'Responded with code: {}'.format(resp.status_code)
print inst
if __name__ == '__main__':
main()
|
|
a60ee78c65cc1920dc57bf26781cde5836271147
|
indra/sources/bbn/make_bbn_ontology.py
|
indra/sources/bbn/make_bbn_ontology.py
|
import yaml
import requests
from os.path import join, dirname, abspath
from rdflib import Graph, Namespace, Literal
bb_ont_url = ('https://raw.githubusercontent.com/BBN-E/Hume/master/resource/'
'ontologies/hume_ontology.yaml')
eidos_ns = Namespace('https://github.com/clulab/eidos/wiki/JSON-LD/Grounding#')
indra_ns = 'http://sorger.med.harvard.edu/indra/'
indra_rel_ns = Namespace(indra_ns + 'relations/')
isa = indra_rel_ns.term('isa')
def save_hierarchy(g, path):
with open(path, 'wb') as out_file:
g_bytes = g.serialize(format='nt')
# Replace extra new lines in string and get rid of empty line at end
g_bytes = g_bytes.replace(b'\n\n', b'\n').strip()
# Split into rows and sort
rows = g_bytes.split(b'\n')
rows.sort()
g_bytes = b'\n'.join(rows)
out_file.write(g_bytes)
def get_term(node, prefix):
node = node.replace(' ', '_')
path = prefix + '/' + node if prefix else node
return eidos_ns.term(path)
def build_relations(G, node, tree, prefix):
this_term = get_term(node, prefix)
node = node.replace(' ', '_')
if prefix is not None:
prefix = prefix.replace(' ', '_')
this_prefix = prefix + '/' + node if prefix else node
for entry in tree:
if isinstance(entry, str) and entry[0] != '_':
child = entry
elif isinstance(entry, dict):
for child in entry.keys():
if child[0] != '_' and child != 'examples' \
and any(isinstance(entry[child], t) for t in [list, dict]):
build_relations(G, child, entry[child], this_prefix)
if child[0] != '_' and child != 'examples':
child_term = get_term(child, this_prefix)
rel = (child_term, isa, this_term)
G.add(rel)
if __name__ == '__main__':
yml = requests.get(bb_ont_url).content
root = yaml.load(yml)
G = Graph()
for top_entry in root:
for node in top_entry.keys():
build_relations(G, node, top_entry[node], None)
rdf_path = join(dirname(abspath(__file__)), 'bbn_ontology.rdf')
save_hierarchy(G, rdf_path)
|
Create separate bbn ontology retriever.
|
Create separate bbn ontology retriever.
|
Python
|
bsd-2-clause
|
johnbachman/belpy,sorgerlab/belpy,johnbachman/belpy,johnbachman/indra,sorgerlab/belpy,sorgerlab/indra,johnbachman/indra,sorgerlab/indra,pvtodorov/indra,sorgerlab/belpy,bgyori/indra,pvtodorov/indra,pvtodorov/indra,bgyori/indra,johnbachman/belpy,sorgerlab/indra,johnbachman/indra,bgyori/indra,pvtodorov/indra
|
Create separate bbn ontology retriever.
|
import yaml
import requests
from os.path import join, dirname, abspath
from rdflib import Graph, Namespace, Literal
bb_ont_url = ('https://raw.githubusercontent.com/BBN-E/Hume/master/resource/'
'ontologies/hume_ontology.yaml')
eidos_ns = Namespace('https://github.com/clulab/eidos/wiki/JSON-LD/Grounding#')
indra_ns = 'http://sorger.med.harvard.edu/indra/'
indra_rel_ns = Namespace(indra_ns + 'relations/')
isa = indra_rel_ns.term('isa')
def save_hierarchy(g, path):
with open(path, 'wb') as out_file:
g_bytes = g.serialize(format='nt')
# Replace extra new lines in string and get rid of empty line at end
g_bytes = g_bytes.replace(b'\n\n', b'\n').strip()
# Split into rows and sort
rows = g_bytes.split(b'\n')
rows.sort()
g_bytes = b'\n'.join(rows)
out_file.write(g_bytes)
def get_term(node, prefix):
node = node.replace(' ', '_')
path = prefix + '/' + node if prefix else node
return eidos_ns.term(path)
def build_relations(G, node, tree, prefix):
this_term = get_term(node, prefix)
node = node.replace(' ', '_')
if prefix is not None:
prefix = prefix.replace(' ', '_')
this_prefix = prefix + '/' + node if prefix else node
for entry in tree:
if isinstance(entry, str) and entry[0] != '_':
child = entry
elif isinstance(entry, dict):
for child in entry.keys():
if child[0] != '_' and child != 'examples' \
and any(isinstance(entry[child], t) for t in [list, dict]):
build_relations(G, child, entry[child], this_prefix)
if child[0] != '_' and child != 'examples':
child_term = get_term(child, this_prefix)
rel = (child_term, isa, this_term)
G.add(rel)
if __name__ == '__main__':
yml = requests.get(bb_ont_url).content
root = yaml.load(yml)
G = Graph()
for top_entry in root:
for node in top_entry.keys():
build_relations(G, node, top_entry[node], None)
rdf_path = join(dirname(abspath(__file__)), 'bbn_ontology.rdf')
save_hierarchy(G, rdf_path)
|
<commit_before><commit_msg>Create separate bbn ontology retriever.<commit_after>
|
import yaml
import requests
from os.path import join, dirname, abspath
from rdflib import Graph, Namespace, Literal
bb_ont_url = ('https://raw.githubusercontent.com/BBN-E/Hume/master/resource/'
'ontologies/hume_ontology.yaml')
eidos_ns = Namespace('https://github.com/clulab/eidos/wiki/JSON-LD/Grounding#')
indra_ns = 'http://sorger.med.harvard.edu/indra/'
indra_rel_ns = Namespace(indra_ns + 'relations/')
isa = indra_rel_ns.term('isa')
def save_hierarchy(g, path):
with open(path, 'wb') as out_file:
g_bytes = g.serialize(format='nt')
# Replace extra new lines in string and get rid of empty line at end
g_bytes = g_bytes.replace(b'\n\n', b'\n').strip()
# Split into rows and sort
rows = g_bytes.split(b'\n')
rows.sort()
g_bytes = b'\n'.join(rows)
out_file.write(g_bytes)
def get_term(node, prefix):
node = node.replace(' ', '_')
path = prefix + '/' + node if prefix else node
return eidos_ns.term(path)
def build_relations(G, node, tree, prefix):
this_term = get_term(node, prefix)
node = node.replace(' ', '_')
if prefix is not None:
prefix = prefix.replace(' ', '_')
this_prefix = prefix + '/' + node if prefix else node
for entry in tree:
if isinstance(entry, str) and entry[0] != '_':
child = entry
elif isinstance(entry, dict):
for child in entry.keys():
if child[0] != '_' and child != 'examples' \
and any(isinstance(entry[child], t) for t in [list, dict]):
build_relations(G, child, entry[child], this_prefix)
if child[0] != '_' and child != 'examples':
child_term = get_term(child, this_prefix)
rel = (child_term, isa, this_term)
G.add(rel)
if __name__ == '__main__':
yml = requests.get(bb_ont_url).content
root = yaml.load(yml)
G = Graph()
for top_entry in root:
for node in top_entry.keys():
build_relations(G, node, top_entry[node], None)
rdf_path = join(dirname(abspath(__file__)), 'bbn_ontology.rdf')
save_hierarchy(G, rdf_path)
|
Create separate bbn ontology retriever.import yaml
import requests
from os.path import join, dirname, abspath
from rdflib import Graph, Namespace, Literal
bb_ont_url = ('https://raw.githubusercontent.com/BBN-E/Hume/master/resource/'
'ontologies/hume_ontology.yaml')
eidos_ns = Namespace('https://github.com/clulab/eidos/wiki/JSON-LD/Grounding#')
indra_ns = 'http://sorger.med.harvard.edu/indra/'
indra_rel_ns = Namespace(indra_ns + 'relations/')
isa = indra_rel_ns.term('isa')
def save_hierarchy(g, path):
with open(path, 'wb') as out_file:
g_bytes = g.serialize(format='nt')
# Replace extra new lines in string and get rid of empty line at end
g_bytes = g_bytes.replace(b'\n\n', b'\n').strip()
# Split into rows and sort
rows = g_bytes.split(b'\n')
rows.sort()
g_bytes = b'\n'.join(rows)
out_file.write(g_bytes)
def get_term(node, prefix):
node = node.replace(' ', '_')
path = prefix + '/' + node if prefix else node
return eidos_ns.term(path)
def build_relations(G, node, tree, prefix):
this_term = get_term(node, prefix)
node = node.replace(' ', '_')
if prefix is not None:
prefix = prefix.replace(' ', '_')
this_prefix = prefix + '/' + node if prefix else node
for entry in tree:
if isinstance(entry, str) and entry[0] != '_':
child = entry
elif isinstance(entry, dict):
for child in entry.keys():
if child[0] != '_' and child != 'examples' \
and any(isinstance(entry[child], t) for t in [list, dict]):
build_relations(G, child, entry[child], this_prefix)
if child[0] != '_' and child != 'examples':
child_term = get_term(child, this_prefix)
rel = (child_term, isa, this_term)
G.add(rel)
if __name__ == '__main__':
yml = requests.get(bb_ont_url).content
root = yaml.load(yml)
G = Graph()
for top_entry in root:
for node in top_entry.keys():
build_relations(G, node, top_entry[node], None)
rdf_path = join(dirname(abspath(__file__)), 'bbn_ontology.rdf')
save_hierarchy(G, rdf_path)
|
<commit_before><commit_msg>Create separate bbn ontology retriever.<commit_after>import yaml
import requests
from os.path import join, dirname, abspath
from rdflib import Graph, Namespace, Literal
bb_ont_url = ('https://raw.githubusercontent.com/BBN-E/Hume/master/resource/'
'ontologies/hume_ontology.yaml')
eidos_ns = Namespace('https://github.com/clulab/eidos/wiki/JSON-LD/Grounding#')
indra_ns = 'http://sorger.med.harvard.edu/indra/'
indra_rel_ns = Namespace(indra_ns + 'relations/')
isa = indra_rel_ns.term('isa')
def save_hierarchy(g, path):
with open(path, 'wb') as out_file:
g_bytes = g.serialize(format='nt')
# Replace extra new lines in string and get rid of empty line at end
g_bytes = g_bytes.replace(b'\n\n', b'\n').strip()
# Split into rows and sort
rows = g_bytes.split(b'\n')
rows.sort()
g_bytes = b'\n'.join(rows)
out_file.write(g_bytes)
def get_term(node, prefix):
node = node.replace(' ', '_')
path = prefix + '/' + node if prefix else node
return eidos_ns.term(path)
def build_relations(G, node, tree, prefix):
this_term = get_term(node, prefix)
node = node.replace(' ', '_')
if prefix is not None:
prefix = prefix.replace(' ', '_')
this_prefix = prefix + '/' + node if prefix else node
for entry in tree:
if isinstance(entry, str) and entry[0] != '_':
child = entry
elif isinstance(entry, dict):
for child in entry.keys():
if child[0] != '_' and child != 'examples' \
and any(isinstance(entry[child], t) for t in [list, dict]):
build_relations(G, child, entry[child], this_prefix)
if child[0] != '_' and child != 'examples':
child_term = get_term(child, this_prefix)
rel = (child_term, isa, this_term)
G.add(rel)
if __name__ == '__main__':
yml = requests.get(bb_ont_url).content
root = yaml.load(yml)
G = Graph()
for top_entry in root:
for node in top_entry.keys():
build_relations(G, node, top_entry[node], None)
rdf_path = join(dirname(abspath(__file__)), 'bbn_ontology.rdf')
save_hierarchy(G, rdf_path)
|
|
bddd57dd75b81887cda3f82f44eaf548b6bba405
|
tests/grammar_term-nonterm_test/NonterminalIterationTest.py
|
tests/grammar_term-nonterm_test/NonterminalIterationTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import Grammar
from grammpy import Nonterminal
class TempClass(Nonterminal):
pass
class Second(Nonterminal):
pass
class Third(Nonterminal):
pass
class NonterminalIterationTest(TestCase):
def test_oneTerminalTerms(self):
gr = Grammar()
gr.add_term('a')
for i in gr.terms():
self.assertEqual(i.s, 'a')
def test_oneTerminalGetTerm(self):
gr = Grammar()
gr.add_term('a')
for i in gr.get_term():
self.assertEqual(i.s, 'a')
def test_ThreeTerminalTerms(self):
gr = Grammar()
gr.add_term([0, 'a', TempClass])
s = set(term.s for term in gr.terms())
for i in [0, 'a', TempClass]:
self.assertTrue(i in s)
def test_ThreeTerminalGetTerm(self):
gr = Grammar()
gr.add_term([0, 'a', TempClass])
s = set(term.s for term in gr.get_term())
for i in [0, 'a', TempClass]:
self.assertTrue(i in s)
if __name__ == '__main__':
main()
|
Add file for nonterms iteration tests
|
Add file for nonterms iteration tests
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add file for nonterms iteration tests
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import Grammar
from grammpy import Nonterminal
class TempClass(Nonterminal):
pass
class Second(Nonterminal):
pass
class Third(Nonterminal):
pass
class NonterminalIterationTest(TestCase):
def test_oneTerminalTerms(self):
gr = Grammar()
gr.add_term('a')
for i in gr.terms():
self.assertEqual(i.s, 'a')
def test_oneTerminalGetTerm(self):
gr = Grammar()
gr.add_term('a')
for i in gr.get_term():
self.assertEqual(i.s, 'a')
def test_ThreeTerminalTerms(self):
gr = Grammar()
gr.add_term([0, 'a', TempClass])
s = set(term.s for term in gr.terms())
for i in [0, 'a', TempClass]:
self.assertTrue(i in s)
def test_ThreeTerminalGetTerm(self):
gr = Grammar()
gr.add_term([0, 'a', TempClass])
s = set(term.s for term in gr.get_term())
for i in [0, 'a', TempClass]:
self.assertTrue(i in s)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add file for nonterms iteration tests<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import Grammar
from grammpy import Nonterminal
class TempClass(Nonterminal):
pass
class Second(Nonterminal):
pass
class Third(Nonterminal):
pass
class NonterminalIterationTest(TestCase):
def test_oneTerminalTerms(self):
gr = Grammar()
gr.add_term('a')
for i in gr.terms():
self.assertEqual(i.s, 'a')
def test_oneTerminalGetTerm(self):
gr = Grammar()
gr.add_term('a')
for i in gr.get_term():
self.assertEqual(i.s, 'a')
def test_ThreeTerminalTerms(self):
gr = Grammar()
gr.add_term([0, 'a', TempClass])
s = set(term.s for term in gr.terms())
for i in [0, 'a', TempClass]:
self.assertTrue(i in s)
def test_ThreeTerminalGetTerm(self):
gr = Grammar()
gr.add_term([0, 'a', TempClass])
s = set(term.s for term in gr.get_term())
for i in [0, 'a', TempClass]:
self.assertTrue(i in s)
if __name__ == '__main__':
main()
|
Add file for nonterms iteration tests#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import Grammar
from grammpy import Nonterminal
class TempClass(Nonterminal):
pass
class Second(Nonterminal):
pass
class Third(Nonterminal):
pass
class NonterminalIterationTest(TestCase):
def test_oneTerminalTerms(self):
gr = Grammar()
gr.add_term('a')
for i in gr.terms():
self.assertEqual(i.s, 'a')
def test_oneTerminalGetTerm(self):
gr = Grammar()
gr.add_term('a')
for i in gr.get_term():
self.assertEqual(i.s, 'a')
def test_ThreeTerminalTerms(self):
gr = Grammar()
gr.add_term([0, 'a', TempClass])
s = set(term.s for term in gr.terms())
for i in [0, 'a', TempClass]:
self.assertTrue(i in s)
def test_ThreeTerminalGetTerm(self):
gr = Grammar()
gr.add_term([0, 'a', TempClass])
s = set(term.s for term in gr.get_term())
for i in [0, 'a', TempClass]:
self.assertTrue(i in s)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add file for nonterms iteration tests<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import Grammar
from grammpy import Nonterminal
class TempClass(Nonterminal):
pass
class Second(Nonterminal):
pass
class Third(Nonterminal):
pass
class NonterminalIterationTest(TestCase):
def test_oneTerminalTerms(self):
gr = Grammar()
gr.add_term('a')
for i in gr.terms():
self.assertEqual(i.s, 'a')
def test_oneTerminalGetTerm(self):
gr = Grammar()
gr.add_term('a')
for i in gr.get_term():
self.assertEqual(i.s, 'a')
def test_ThreeTerminalTerms(self):
gr = Grammar()
gr.add_term([0, 'a', TempClass])
s = set(term.s for term in gr.terms())
for i in [0, 'a', TempClass]:
self.assertTrue(i in s)
def test_ThreeTerminalGetTerm(self):
gr = Grammar()
gr.add_term([0, 'a', TempClass])
s = set(term.s for term in gr.get_term())
for i in [0, 'a', TempClass]:
self.assertTrue(i in s)
if __name__ == '__main__':
main()
|
|
af3577a1ab0f7005d95c64be640550263efcb416
|
indra/tools/reading/util/reporter.py
|
indra/tools/reading/util/reporter.py
|
from reportlab.lib.enums import TA_JUSTIFY
from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch
class Reporter(object):
def __init__(self, name):
self.styles = getSampleStyleSheet()
self.styles.add(ParagraphStyle(name="Justify", alignment=TA_JUSTIFY))
self.story = []
self.name = name
return
def make_report(self):
doc = SimpleDocTemplate(self.name + '.pdf', pagesize=letter,
rightMargin=72, leftMargin=72,
topMargin=72, bottomMargin=18)
doc.build(self.story)
return doc
def add_story_text(self, text, style='Normal', space=None, fontsize=12):
if space is None:
space=(1,12)
ptext = '<fond size=%d>%s</font>' % (fontsize, text)
self.story.append(Paragraph(ptext, self.styles[style]))
self.story.append(Spacer(*space))
return
def add_story_image(self, image_path, width=None, height=None):
if width is not None:
width = width*inch
if height is not None:
height = height*inch
im = Image(image_path, width, height)
self.story.append(im)
|
Add a simple class to work with canvas tools.
|
Add a simple class to work with canvas tools.
|
Python
|
bsd-2-clause
|
johnbachman/indra,bgyori/indra,johnbachman/indra,johnbachman/belpy,sorgerlab/belpy,johnbachman/belpy,pvtodorov/indra,sorgerlab/belpy,johnbachman/belpy,sorgerlab/indra,pvtodorov/indra,pvtodorov/indra,bgyori/indra,pvtodorov/indra,bgyori/indra,sorgerlab/indra,johnbachman/indra,sorgerlab/indra,sorgerlab/belpy
|
Add a simple class to work with canvas tools.
|
from reportlab.lib.enums import TA_JUSTIFY
from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch
class Reporter(object):
def __init__(self, name):
self.styles = getSampleStyleSheet()
self.styles.add(ParagraphStyle(name="Justify", alignment=TA_JUSTIFY))
self.story = []
self.name = name
return
def make_report(self):
doc = SimpleDocTemplate(self.name + '.pdf', pagesize=letter,
rightMargin=72, leftMargin=72,
topMargin=72, bottomMargin=18)
doc.build(self.story)
return doc
def add_story_text(self, text, style='Normal', space=None, fontsize=12):
if space is None:
space=(1,12)
ptext = '<fond size=%d>%s</font>' % (fontsize, text)
self.story.append(Paragraph(ptext, self.styles[style]))
self.story.append(Spacer(*space))
return
def add_story_image(self, image_path, width=None, height=None):
if width is not None:
width = width*inch
if height is not None:
height = height*inch
im = Image(image_path, width, height)
self.story.append(im)
|
<commit_before><commit_msg>Add a simple class to work with canvas tools.<commit_after>
|
from reportlab.lib.enums import TA_JUSTIFY
from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch
class Reporter(object):
def __init__(self, name):
self.styles = getSampleStyleSheet()
self.styles.add(ParagraphStyle(name="Justify", alignment=TA_JUSTIFY))
self.story = []
self.name = name
return
def make_report(self):
doc = SimpleDocTemplate(self.name + '.pdf', pagesize=letter,
rightMargin=72, leftMargin=72,
topMargin=72, bottomMargin=18)
doc.build(self.story)
return doc
def add_story_text(self, text, style='Normal', space=None, fontsize=12):
if space is None:
space=(1,12)
ptext = '<fond size=%d>%s</font>' % (fontsize, text)
self.story.append(Paragraph(ptext, self.styles[style]))
self.story.append(Spacer(*space))
return
def add_story_image(self, image_path, width=None, height=None):
if width is not None:
width = width*inch
if height is not None:
height = height*inch
im = Image(image_path, width, height)
self.story.append(im)
|
Add a simple class to work with canvas tools.from reportlab.lib.enums import TA_JUSTIFY
from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch
class Reporter(object):
def __init__(self, name):
self.styles = getSampleStyleSheet()
self.styles.add(ParagraphStyle(name="Justify", alignment=TA_JUSTIFY))
self.story = []
self.name = name
return
def make_report(self):
doc = SimpleDocTemplate(self.name + '.pdf', pagesize=letter,
rightMargin=72, leftMargin=72,
topMargin=72, bottomMargin=18)
doc.build(self.story)
return doc
def add_story_text(self, text, style='Normal', space=None, fontsize=12):
if space is None:
space=(1,12)
ptext = '<fond size=%d>%s</font>' % (fontsize, text)
self.story.append(Paragraph(ptext, self.styles[style]))
self.story.append(Spacer(*space))
return
def add_story_image(self, image_path, width=None, height=None):
if width is not None:
width = width*inch
if height is not None:
height = height*inch
im = Image(image_path, width, height)
self.story.append(im)
|
<commit_before><commit_msg>Add a simple class to work with canvas tools.<commit_after>from reportlab.lib.enums import TA_JUSTIFY
from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch
class Reporter(object):
def __init__(self, name):
self.styles = getSampleStyleSheet()
self.styles.add(ParagraphStyle(name="Justify", alignment=TA_JUSTIFY))
self.story = []
self.name = name
return
def make_report(self):
doc = SimpleDocTemplate(self.name + '.pdf', pagesize=letter,
rightMargin=72, leftMargin=72,
topMargin=72, bottomMargin=18)
doc.build(self.story)
return doc
def add_story_text(self, text, style='Normal', space=None, fontsize=12):
if space is None:
space=(1,12)
ptext = '<fond size=%d>%s</font>' % (fontsize, text)
self.story.append(Paragraph(ptext, self.styles[style]))
self.story.append(Spacer(*space))
return
def add_story_image(self, image_path, width=None, height=None):
if width is not None:
width = width*inch
if height is not None:
height = height*inch
im = Image(image_path, width, height)
self.story.append(im)
|
|
05d7332d6305ed7d1bc75a843f91603c5997b14e
|
tests/test_learning.py
|
tests/test_learning.py
|
"""
Learning tests.
"""
# pylint: disable=no-member
# pylint: disable=missing-docstring
# future imports
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import os
import numpy as np
import numpy.testing as nt
import pygp
import pygp.demos.basic as demo
def test_optimization():
# load the data.
cdir = os.path.abspath(os.path.dirname(demo.__file__))
data = np.load(os.path.join(cdir, 'xy.npz'))
X = data['X']
y = data['y']
# create the model and add data.
gp = pygp.BasicGP(sn=.1, sf=1, ell=.1, mu=0)
gp.add_data(X, y)
# optimize the model
pygp.optimize(gp, {'sn': None})
# make sure our constraint is satisfied
nt.assert_equal(gp.get_hyper()[0], np.log(0.1))
|
Add test for the optimizer.
|
Add test for the optimizer.
|
Python
|
bsd-2-clause
|
mwhoffman/pygp
|
Add test for the optimizer.
|
"""
Learning tests.
"""
# pylint: disable=no-member
# pylint: disable=missing-docstring
# future imports
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import os
import numpy as np
import numpy.testing as nt
import pygp
import pygp.demos.basic as demo
def test_optimization():
# load the data.
cdir = os.path.abspath(os.path.dirname(demo.__file__))
data = np.load(os.path.join(cdir, 'xy.npz'))
X = data['X']
y = data['y']
# create the model and add data.
gp = pygp.BasicGP(sn=.1, sf=1, ell=.1, mu=0)
gp.add_data(X, y)
# optimize the model
pygp.optimize(gp, {'sn': None})
# make sure our constraint is satisfied
nt.assert_equal(gp.get_hyper()[0], np.log(0.1))
|
<commit_before><commit_msg>Add test for the optimizer.<commit_after>
|
"""
Learning tests.
"""
# pylint: disable=no-member
# pylint: disable=missing-docstring
# future imports
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import os
import numpy as np
import numpy.testing as nt
import pygp
import pygp.demos.basic as demo
def test_optimization():
# load the data.
cdir = os.path.abspath(os.path.dirname(demo.__file__))
data = np.load(os.path.join(cdir, 'xy.npz'))
X = data['X']
y = data['y']
# create the model and add data.
gp = pygp.BasicGP(sn=.1, sf=1, ell=.1, mu=0)
gp.add_data(X, y)
# optimize the model
pygp.optimize(gp, {'sn': None})
# make sure our constraint is satisfied
nt.assert_equal(gp.get_hyper()[0], np.log(0.1))
|
Add test for the optimizer."""
Learning tests.
"""
# pylint: disable=no-member
# pylint: disable=missing-docstring
# future imports
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import os
import numpy as np
import numpy.testing as nt
import pygp
import pygp.demos.basic as demo
def test_optimization():
# load the data.
cdir = os.path.abspath(os.path.dirname(demo.__file__))
data = np.load(os.path.join(cdir, 'xy.npz'))
X = data['X']
y = data['y']
# create the model and add data.
gp = pygp.BasicGP(sn=.1, sf=1, ell=.1, mu=0)
gp.add_data(X, y)
# optimize the model
pygp.optimize(gp, {'sn': None})
# make sure our constraint is satisfied
nt.assert_equal(gp.get_hyper()[0], np.log(0.1))
|
<commit_before><commit_msg>Add test for the optimizer.<commit_after>"""
Learning tests.
"""
# pylint: disable=no-member
# pylint: disable=missing-docstring
# future imports
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import os
import numpy as np
import numpy.testing as nt
import pygp
import pygp.demos.basic as demo
def test_optimization():
# load the data.
cdir = os.path.abspath(os.path.dirname(demo.__file__))
data = np.load(os.path.join(cdir, 'xy.npz'))
X = data['X']
y = data['y']
# create the model and add data.
gp = pygp.BasicGP(sn=.1, sf=1, ell=.1, mu=0)
gp.add_data(X, y)
# optimize the model
pygp.optimize(gp, {'sn': None})
# make sure our constraint is satisfied
nt.assert_equal(gp.get_hyper()[0], np.log(0.1))
|
|
58b9d66a10c88fb141570f7aca9ea6f471e2f82d
|
statsmodels/tsa/statespace/tests/test_forecasting.py
|
statsmodels/tsa/statespace/tests/test_forecasting.py
|
r"""
Tests for forecasting-related features not tested elsewhere
"""
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_, assert_equal, assert_allclose
from statsmodels.tsa.statespace import sarimax
@pytest.mark.parametrize('data', ['list', 'numpy', 'range', 'date', 'period'])
def test_append_multistep(data):
# Test that `MLEResults.append` works when called repeatedly
endog = [1., 0.5, 1.5, 0.9, 0.2, 0.34]
if data == 'numpy':
endog = np.array(endog)
elif data == 'range':
endog = pd.Series(endog)
elif data == 'date':
index = pd.date_range(start='2000-01-01', periods=6, freq='MS')
endog = pd.Series(endog, index=index)
elif data == 'period':
index = pd.period_range(start='2000-01', periods=6, freq='M')
endog = pd.Series(endog, index=index)
# Base model fitting
mod = sarimax.SARIMAX(endog[:2], order=(1, 0, 0))
res = mod.smooth([0.5, 1.0])
assert_allclose(res.model.endog[:, 0], [1., 0.5])
assert_allclose(res.forecast(1), 0.25)
# First append
res1 = res.append(endog[2:3])
assert_allclose(res1.model.endog[:, 0], [1., 0.5, 1.5])
assert_allclose(res1.forecast(1), 0.75)
# Second append
res2 = res1.append(endog[3:5])
assert_allclose(res2.model.endog[:, 0], [1., 0.5, 1.5, 0.9, 0.2])
assert_allclose(res2.forecast(1), 0.1)
# Third append
res3 = res2.append(endog[5:6])
print(res3.model.endog)
assert_allclose(res3.model.endog[:, 0], [1., 0.5, 1.5, 0.9, 0.2, 0.34])
assert_allclose(res3.forecast(1), 0.17)
|
Add test for multi-step append
|
TST: Add test for multi-step append
|
Python
|
bsd-3-clause
|
jseabold/statsmodels,statsmodels/statsmodels,statsmodels/statsmodels,statsmodels/statsmodels,josef-pkt/statsmodels,bashtage/statsmodels,bashtage/statsmodels,jseabold/statsmodels,josef-pkt/statsmodels,jseabold/statsmodels,josef-pkt/statsmodels,josef-pkt/statsmodels,josef-pkt/statsmodels,bashtage/statsmodels,statsmodels/statsmodels,statsmodels/statsmodels,statsmodels/statsmodels,bashtage/statsmodels,bashtage/statsmodels,bashtage/statsmodels,josef-pkt/statsmodels,jseabold/statsmodels,jseabold/statsmodels
|
TST: Add test for multi-step append
|
r"""
Tests for forecasting-related features not tested elsewhere
"""
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_, assert_equal, assert_allclose
from statsmodels.tsa.statespace import sarimax
@pytest.mark.parametrize('data', ['list', 'numpy', 'range', 'date', 'period'])
def test_append_multistep(data):
# Test that `MLEResults.append` works when called repeatedly
endog = [1., 0.5, 1.5, 0.9, 0.2, 0.34]
if data == 'numpy':
endog = np.array(endog)
elif data == 'range':
endog = pd.Series(endog)
elif data == 'date':
index = pd.date_range(start='2000-01-01', periods=6, freq='MS')
endog = pd.Series(endog, index=index)
elif data == 'period':
index = pd.period_range(start='2000-01', periods=6, freq='M')
endog = pd.Series(endog, index=index)
# Base model fitting
mod = sarimax.SARIMAX(endog[:2], order=(1, 0, 0))
res = mod.smooth([0.5, 1.0])
assert_allclose(res.model.endog[:, 0], [1., 0.5])
assert_allclose(res.forecast(1), 0.25)
# First append
res1 = res.append(endog[2:3])
assert_allclose(res1.model.endog[:, 0], [1., 0.5, 1.5])
assert_allclose(res1.forecast(1), 0.75)
# Second append
res2 = res1.append(endog[3:5])
assert_allclose(res2.model.endog[:, 0], [1., 0.5, 1.5, 0.9, 0.2])
assert_allclose(res2.forecast(1), 0.1)
# Third append
res3 = res2.append(endog[5:6])
print(res3.model.endog)
assert_allclose(res3.model.endog[:, 0], [1., 0.5, 1.5, 0.9, 0.2, 0.34])
assert_allclose(res3.forecast(1), 0.17)
|
<commit_before><commit_msg>TST: Add test for multi-step append<commit_after>
|
r"""
Tests for forecasting-related features not tested elsewhere
"""
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_, assert_equal, assert_allclose
from statsmodels.tsa.statespace import sarimax
@pytest.mark.parametrize('data', ['list', 'numpy', 'range', 'date', 'period'])
def test_append_multistep(data):
# Test that `MLEResults.append` works when called repeatedly
endog = [1., 0.5, 1.5, 0.9, 0.2, 0.34]
if data == 'numpy':
endog = np.array(endog)
elif data == 'range':
endog = pd.Series(endog)
elif data == 'date':
index = pd.date_range(start='2000-01-01', periods=6, freq='MS')
endog = pd.Series(endog, index=index)
elif data == 'period':
index = pd.period_range(start='2000-01', periods=6, freq='M')
endog = pd.Series(endog, index=index)
# Base model fitting
mod = sarimax.SARIMAX(endog[:2], order=(1, 0, 0))
res = mod.smooth([0.5, 1.0])
assert_allclose(res.model.endog[:, 0], [1., 0.5])
assert_allclose(res.forecast(1), 0.25)
# First append
res1 = res.append(endog[2:3])
assert_allclose(res1.model.endog[:, 0], [1., 0.5, 1.5])
assert_allclose(res1.forecast(1), 0.75)
# Second append
res2 = res1.append(endog[3:5])
assert_allclose(res2.model.endog[:, 0], [1., 0.5, 1.5, 0.9, 0.2])
assert_allclose(res2.forecast(1), 0.1)
# Third append
res3 = res2.append(endog[5:6])
print(res3.model.endog)
assert_allclose(res3.model.endog[:, 0], [1., 0.5, 1.5, 0.9, 0.2, 0.34])
assert_allclose(res3.forecast(1), 0.17)
|
TST: Add test for multi-step appendr"""
Tests for forecasting-related features not tested elsewhere
"""
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_, assert_equal, assert_allclose
from statsmodels.tsa.statespace import sarimax
@pytest.mark.parametrize('data', ['list', 'numpy', 'range', 'date', 'period'])
def test_append_multistep(data):
# Test that `MLEResults.append` works when called repeatedly
endog = [1., 0.5, 1.5, 0.9, 0.2, 0.34]
if data == 'numpy':
endog = np.array(endog)
elif data == 'range':
endog = pd.Series(endog)
elif data == 'date':
index = pd.date_range(start='2000-01-01', periods=6, freq='MS')
endog = pd.Series(endog, index=index)
elif data == 'period':
index = pd.period_range(start='2000-01', periods=6, freq='M')
endog = pd.Series(endog, index=index)
# Base model fitting
mod = sarimax.SARIMAX(endog[:2], order=(1, 0, 0))
res = mod.smooth([0.5, 1.0])
assert_allclose(res.model.endog[:, 0], [1., 0.5])
assert_allclose(res.forecast(1), 0.25)
# First append
res1 = res.append(endog[2:3])
assert_allclose(res1.model.endog[:, 0], [1., 0.5, 1.5])
assert_allclose(res1.forecast(1), 0.75)
# Second append
res2 = res1.append(endog[3:5])
assert_allclose(res2.model.endog[:, 0], [1., 0.5, 1.5, 0.9, 0.2])
assert_allclose(res2.forecast(1), 0.1)
# Third append
res3 = res2.append(endog[5:6])
print(res3.model.endog)
assert_allclose(res3.model.endog[:, 0], [1., 0.5, 1.5, 0.9, 0.2, 0.34])
assert_allclose(res3.forecast(1), 0.17)
|
<commit_before><commit_msg>TST: Add test for multi-step append<commit_after>r"""
Tests for forecasting-related features not tested elsewhere
"""
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_, assert_equal, assert_allclose
from statsmodels.tsa.statespace import sarimax
@pytest.mark.parametrize('data', ['list', 'numpy', 'range', 'date', 'period'])
def test_append_multistep(data):
# Test that `MLEResults.append` works when called repeatedly
endog = [1., 0.5, 1.5, 0.9, 0.2, 0.34]
if data == 'numpy':
endog = np.array(endog)
elif data == 'range':
endog = pd.Series(endog)
elif data == 'date':
index = pd.date_range(start='2000-01-01', periods=6, freq='MS')
endog = pd.Series(endog, index=index)
elif data == 'period':
index = pd.period_range(start='2000-01', periods=6, freq='M')
endog = pd.Series(endog, index=index)
# Base model fitting
mod = sarimax.SARIMAX(endog[:2], order=(1, 0, 0))
res = mod.smooth([0.5, 1.0])
assert_allclose(res.model.endog[:, 0], [1., 0.5])
assert_allclose(res.forecast(1), 0.25)
# First append
res1 = res.append(endog[2:3])
assert_allclose(res1.model.endog[:, 0], [1., 0.5, 1.5])
assert_allclose(res1.forecast(1), 0.75)
# Second append
res2 = res1.append(endog[3:5])
assert_allclose(res2.model.endog[:, 0], [1., 0.5, 1.5, 0.9, 0.2])
assert_allclose(res2.forecast(1), 0.1)
# Third append
res3 = res2.append(endog[5:6])
print(res3.model.endog)
assert_allclose(res3.model.endog[:, 0], [1., 0.5, 1.5, 0.9, 0.2, 0.34])
assert_allclose(res3.forecast(1), 0.17)
|
|
8d1b806e6b28bcb80bed64d1449716a218008529
|
test/418-wof-l10n_name.py
|
test/418-wof-l10n_name.py
|
# Hollywood (wof neighbourhood)
# https://whosonfirst.mapzen.com/data/858/260/37/85826037.geojson
assert_has_feature(
16, 11227, 26157, 'places',
{ 'id': 85826037, 'kind': 'neighbourhood',
'source': "whosonfirst.mapzen.com",
'name': 'Hollywood',
'name:ko': '\xed\x97\x90\xeb\xa6\xac\xec\x9a\xb0\xeb\x93\x9c' })
|
Add l10n test for Hollywood neighbourhood
|
Add l10n test for Hollywood neighbourhood
|
Python
|
mit
|
mapzen/vector-datasource,mapzen/vector-datasource,mapzen/vector-datasource
|
Add l10n test for Hollywood neighbourhood
|
# Hollywood (wof neighbourhood)
# https://whosonfirst.mapzen.com/data/858/260/37/85826037.geojson
assert_has_feature(
16, 11227, 26157, 'places',
{ 'id': 85826037, 'kind': 'neighbourhood',
'source': "whosonfirst.mapzen.com",
'name': 'Hollywood',
'name:ko': '\xed\x97\x90\xeb\xa6\xac\xec\x9a\xb0\xeb\x93\x9c' })
|
<commit_before><commit_msg>Add l10n test for Hollywood neighbourhood<commit_after>
|
# Hollywood (wof neighbourhood)
# https://whosonfirst.mapzen.com/data/858/260/37/85826037.geojson
assert_has_feature(
16, 11227, 26157, 'places',
{ 'id': 85826037, 'kind': 'neighbourhood',
'source': "whosonfirst.mapzen.com",
'name': 'Hollywood',
'name:ko': '\xed\x97\x90\xeb\xa6\xac\xec\x9a\xb0\xeb\x93\x9c' })
|
Add l10n test for Hollywood neighbourhood# Hollywood (wof neighbourhood)
# https://whosonfirst.mapzen.com/data/858/260/37/85826037.geojson
assert_has_feature(
16, 11227, 26157, 'places',
{ 'id': 85826037, 'kind': 'neighbourhood',
'source': "whosonfirst.mapzen.com",
'name': 'Hollywood',
'name:ko': '\xed\x97\x90\xeb\xa6\xac\xec\x9a\xb0\xeb\x93\x9c' })
|
<commit_before><commit_msg>Add l10n test for Hollywood neighbourhood<commit_after># Hollywood (wof neighbourhood)
# https://whosonfirst.mapzen.com/data/858/260/37/85826037.geojson
assert_has_feature(
16, 11227, 26157, 'places',
{ 'id': 85826037, 'kind': 'neighbourhood',
'source': "whosonfirst.mapzen.com",
'name': 'Hollywood',
'name:ko': '\xed\x97\x90\xeb\xa6\xac\xec\x9a\xb0\xeb\x93\x9c' })
|
|
102481bc47e22424cd4d623fd84f1eb317b1d55f
|
zerver/migrations/0398_tsvector_statistics.py
|
zerver/migrations/0398_tsvector_statistics.py
|
# Generated by Django 4.0.6 on 2022-07-18 23:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("zerver", "0397_remove_custom_field_values_for_deleted_options"),
]
operations = [
# The "most common values" list for a tsvector is 10x this
# number, which defaults to 100. Increasing it allows for
# better query planning, at a small cost of size, and
# `ANALYZE` time. It only takes effect after the next
# `ANALYZE`, which we run immediately.
migrations.RunSQL(
sql="ALTER TABLE zerver_message ALTER COLUMN search_tsvector SET STATISTICS 10000",
reverse_sql="ALTER TABLE zerver_message ALTER COLUMNS search_tsvector SET STATISTICS -1",
),
migrations.RunSQL(
sql="ANALYZE zerver_message",
reverse_sql=migrations.RunSQL.noop,
),
]
|
Adjust stats size for tsvector to 10k, from 100.
|
migrations: Adjust stats size for tsvector to 10k, from 100.
PostgreSQL's `default_statistics_target` is used to track how many
"most common values" ("MCVs") for a column when performing an
`ANALYZE`. For `tsvector` columns, the number of values is actually
10x this number, because each row contains multiple values for the
column[1]. The `default_statistics_target` defaults to 100[2], and
Zulip does not adjust this at the server level.
This translates to 1000 entries in the MCV for tsvectors. For
large tables like `zerver_messages`, a too-small value can cause
mis-planned query plans. The query planner assumes that any
entry *not* found in the MCV list is *half* as likely as the
least-likely value in it. If the table is large, and the MCV list is
too short (as 1000 values is for large deployments), arbitrary
no-in-the-MCV words will often be estimated by the query planner to
occur comparatively quite frequently in the index. Based on this, the
planner will instead choose to scan all messages accessible by the
user, filtering by word in tsvector, instead of using the tsvector
index and filtering by being accessible to the user. This results in
degraded performance for word searching.
However, PostgreSQL allows adjustment of this value on a per-column
basis. Add a migration to adjust the value up to 10k for
`search_tsvector` on `zerver_message`, which results in 100k entries
in that MCV list.
PostgreSQL's documentation says[3]:
> Raising the limit might allow more accurate planner estimates to be
> made, particularly for columns with irregular data distributions, at
> the price of consuming more space in `pg_statistic` and slightly
> more time to compute the estimates.
These costs seem adequate for the utility of having better search.
In the event that the pgroonga backend is in use, these larger index
statistics are simply wasted space and `VACUUM` computational time,
but the costs are likely still reasonable -- even 100k values are
dwarfed by the size of the database needed to generate 100k unique
entries in tsvectors.
[1]: https://github.com/postgres/postgres/blob/REL_14_4/src/backend/utils/adt/array_typanalyze.c#L261-L267
[2]: https://www.postgresql.org/docs/14/runtime-config-query.html#GUC-DEFAULT-STATISTICS-TARGET
[3]: https://www.postgresql.org/docs/14/planner-stats.html#id-1.5.13.5.3
|
Python
|
apache-2.0
|
zulip/zulip,rht/zulip,rht/zulip,andersk/zulip,andersk/zulip,zulip/zulip,andersk/zulip,andersk/zulip,rht/zulip,andersk/zulip,andersk/zulip,rht/zulip,rht/zulip,zulip/zulip,rht/zulip,andersk/zulip,zulip/zulip,rht/zulip,zulip/zulip,zulip/zulip,zulip/zulip
|
migrations: Adjust stats size for tsvector to 10k, from 100.
PostgreSQL's `default_statistics_target` is used to track how many
"most common values" ("MCVs") for a column when performing an
`ANALYZE`. For `tsvector` columns, the number of values is actually
10x this number, because each row contains multiple values for the
column[1]. The `default_statistics_target` defaults to 100[2], and
Zulip does not adjust this at the server level.
This translates to 1000 entries in the MCV for tsvectors. For
large tables like `zerver_messages`, a too-small value can cause
mis-planned query plans. The query planner assumes that any
entry *not* found in the MCV list is *half* as likely as the
least-likely value in it. If the table is large, and the MCV list is
too short (as 1000 values is for large deployments), arbitrary
no-in-the-MCV words will often be estimated by the query planner to
occur comparatively quite frequently in the index. Based on this, the
planner will instead choose to scan all messages accessible by the
user, filtering by word in tsvector, instead of using the tsvector
index and filtering by being accessible to the user. This results in
degraded performance for word searching.
However, PostgreSQL allows adjustment of this value on a per-column
basis. Add a migration to adjust the value up to 10k for
`search_tsvector` on `zerver_message`, which results in 100k entries
in that MCV list.
PostgreSQL's documentation says[3]:
> Raising the limit might allow more accurate planner estimates to be
> made, particularly for columns with irregular data distributions, at
> the price of consuming more space in `pg_statistic` and slightly
> more time to compute the estimates.
These costs seem adequate for the utility of having better search.
In the event that the pgroonga backend is in use, these larger index
statistics are simply wasted space and `VACUUM` computational time,
but the costs are likely still reasonable -- even 100k values are
dwarfed by the size of the database needed to generate 100k unique
entries in tsvectors.
[1]: https://github.com/postgres/postgres/blob/REL_14_4/src/backend/utils/adt/array_typanalyze.c#L261-L267
[2]: https://www.postgresql.org/docs/14/runtime-config-query.html#GUC-DEFAULT-STATISTICS-TARGET
[3]: https://www.postgresql.org/docs/14/planner-stats.html#id-1.5.13.5.3
|
# Generated by Django 4.0.6 on 2022-07-18 23:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("zerver", "0397_remove_custom_field_values_for_deleted_options"),
]
operations = [
# The "most common values" list for a tsvector is 10x this
# number, which defaults to 100. Increasing it allows for
# better query planning, at a small cost of size, and
# `ANALYZE` time. It only takes effect after the next
# `ANALYZE`, which we run immediately.
migrations.RunSQL(
sql="ALTER TABLE zerver_message ALTER COLUMN search_tsvector SET STATISTICS 10000",
reverse_sql="ALTER TABLE zerver_message ALTER COLUMNS search_tsvector SET STATISTICS -1",
),
migrations.RunSQL(
sql="ANALYZE zerver_message",
reverse_sql=migrations.RunSQL.noop,
),
]
|
<commit_before><commit_msg>migrations: Adjust stats size for tsvector to 10k, from 100.
PostgreSQL's `default_statistics_target` is used to track how many
"most common values" ("MCVs") for a column when performing an
`ANALYZE`. For `tsvector` columns, the number of values is actually
10x this number, because each row contains multiple values for the
column[1]. The `default_statistics_target` defaults to 100[2], and
Zulip does not adjust this at the server level.
This translates to 1000 entries in the MCV for tsvectors. For
large tables like `zerver_messages`, a too-small value can cause
mis-planned query plans. The query planner assumes that any
entry *not* found in the MCV list is *half* as likely as the
least-likely value in it. If the table is large, and the MCV list is
too short (as 1000 values is for large deployments), arbitrary
no-in-the-MCV words will often be estimated by the query planner to
occur comparatively quite frequently in the index. Based on this, the
planner will instead choose to scan all messages accessible by the
user, filtering by word in tsvector, instead of using the tsvector
index and filtering by being accessible to the user. This results in
degraded performance for word searching.
However, PostgreSQL allows adjustment of this value on a per-column
basis. Add a migration to adjust the value up to 10k for
`search_tsvector` on `zerver_message`, which results in 100k entries
in that MCV list.
PostgreSQL's documentation says[3]:
> Raising the limit might allow more accurate planner estimates to be
> made, particularly for columns with irregular data distributions, at
> the price of consuming more space in `pg_statistic` and slightly
> more time to compute the estimates.
These costs seem adequate for the utility of having better search.
In the event that the pgroonga backend is in use, these larger index
statistics are simply wasted space and `VACUUM` computational time,
but the costs are likely still reasonable -- even 100k values are
dwarfed by the size of the database needed to generate 100k unique
entries in tsvectors.
[1]: https://github.com/postgres/postgres/blob/REL_14_4/src/backend/utils/adt/array_typanalyze.c#L261-L267
[2]: https://www.postgresql.org/docs/14/runtime-config-query.html#GUC-DEFAULT-STATISTICS-TARGET
[3]: https://www.postgresql.org/docs/14/planner-stats.html#id-1.5.13.5.3<commit_after>
|
# Generated by Django 4.0.6 on 2022-07-18 23:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("zerver", "0397_remove_custom_field_values_for_deleted_options"),
]
operations = [
# The "most common values" list for a tsvector is 10x this
# number, which defaults to 100. Increasing it allows for
# better query planning, at a small cost of size, and
# `ANALYZE` time. It only takes effect after the next
# `ANALYZE`, which we run immediately.
migrations.RunSQL(
sql="ALTER TABLE zerver_message ALTER COLUMN search_tsvector SET STATISTICS 10000",
reverse_sql="ALTER TABLE zerver_message ALTER COLUMNS search_tsvector SET STATISTICS -1",
),
migrations.RunSQL(
sql="ANALYZE zerver_message",
reverse_sql=migrations.RunSQL.noop,
),
]
|
migrations: Adjust stats size for tsvector to 10k, from 100.
PostgreSQL's `default_statistics_target` is used to track how many
"most common values" ("MCVs") for a column when performing an
`ANALYZE`. For `tsvector` columns, the number of values is actually
10x this number, because each row contains multiple values for the
column[1]. The `default_statistics_target` defaults to 100[2], and
Zulip does not adjust this at the server level.
This translates to 1000 entries in the MCV for tsvectors. For
large tables like `zerver_messages`, a too-small value can cause
mis-planned query plans. The query planner assumes that any
entry *not* found in the MCV list is *half* as likely as the
least-likely value in it. If the table is large, and the MCV list is
too short (as 1000 values is for large deployments), arbitrary
no-in-the-MCV words will often be estimated by the query planner to
occur comparatively quite frequently in the index. Based on this, the
planner will instead choose to scan all messages accessible by the
user, filtering by word in tsvector, instead of using the tsvector
index and filtering by being accessible to the user. This results in
degraded performance for word searching.
However, PostgreSQL allows adjustment of this value on a per-column
basis. Add a migration to adjust the value up to 10k for
`search_tsvector` on `zerver_message`, which results in 100k entries
in that MCV list.
PostgreSQL's documentation says[3]:
> Raising the limit might allow more accurate planner estimates to be
> made, particularly for columns with irregular data distributions, at
> the price of consuming more space in `pg_statistic` and slightly
> more time to compute the estimates.
These costs seem adequate for the utility of having better search.
In the event that the pgroonga backend is in use, these larger index
statistics are simply wasted space and `VACUUM` computational time,
but the costs are likely still reasonable -- even 100k values are
dwarfed by the size of the database needed to generate 100k unique
entries in tsvectors.
[1]: https://github.com/postgres/postgres/blob/REL_14_4/src/backend/utils/adt/array_typanalyze.c#L261-L267
[2]: https://www.postgresql.org/docs/14/runtime-config-query.html#GUC-DEFAULT-STATISTICS-TARGET
[3]: https://www.postgresql.org/docs/14/planner-stats.html#id-1.5.13.5.3# Generated by Django 4.0.6 on 2022-07-18 23:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("zerver", "0397_remove_custom_field_values_for_deleted_options"),
]
operations = [
# The "most common values" list for a tsvector is 10x this
# number, which defaults to 100. Increasing it allows for
# better query planning, at a small cost of size, and
# `ANALYZE` time. It only takes effect after the next
# `ANALYZE`, which we run immediately.
migrations.RunSQL(
sql="ALTER TABLE zerver_message ALTER COLUMN search_tsvector SET STATISTICS 10000",
reverse_sql="ALTER TABLE zerver_message ALTER COLUMNS search_tsvector SET STATISTICS -1",
),
migrations.RunSQL(
sql="ANALYZE zerver_message",
reverse_sql=migrations.RunSQL.noop,
),
]
|
<commit_before><commit_msg>migrations: Adjust stats size for tsvector to 10k, from 100.
PostgreSQL's `default_statistics_target` is used to track how many
"most common values" ("MCVs") for a column when performing an
`ANALYZE`. For `tsvector` columns, the number of values is actually
10x this number, because each row contains multiple values for the
column[1]. The `default_statistics_target` defaults to 100[2], and
Zulip does not adjust this at the server level.
This translates to 1000 entries in the MCV for tsvectors. For
large tables like `zerver_messages`, a too-small value can cause
mis-planned query plans. The query planner assumes that any
entry *not* found in the MCV list is *half* as likely as the
least-likely value in it. If the table is large, and the MCV list is
too short (as 1000 values is for large deployments), arbitrary
no-in-the-MCV words will often be estimated by the query planner to
occur comparatively quite frequently in the index. Based on this, the
planner will instead choose to scan all messages accessible by the
user, filtering by word in tsvector, instead of using the tsvector
index and filtering by being accessible to the user. This results in
degraded performance for word searching.
However, PostgreSQL allows adjustment of this value on a per-column
basis. Add a migration to adjust the value up to 10k for
`search_tsvector` on `zerver_message`, which results in 100k entries
in that MCV list.
PostgreSQL's documentation says[3]:
> Raising the limit might allow more accurate planner estimates to be
> made, particularly for columns with irregular data distributions, at
> the price of consuming more space in `pg_statistic` and slightly
> more time to compute the estimates.
These costs seem adequate for the utility of having better search.
In the event that the pgroonga backend is in use, these larger index
statistics are simply wasted space and `VACUUM` computational time,
but the costs are likely still reasonable -- even 100k values are
dwarfed by the size of the database needed to generate 100k unique
entries in tsvectors.
[1]: https://github.com/postgres/postgres/blob/REL_14_4/src/backend/utils/adt/array_typanalyze.c#L261-L267
[2]: https://www.postgresql.org/docs/14/runtime-config-query.html#GUC-DEFAULT-STATISTICS-TARGET
[3]: https://www.postgresql.org/docs/14/planner-stats.html#id-1.5.13.5.3<commit_after># Generated by Django 4.0.6 on 2022-07-18 23:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("zerver", "0397_remove_custom_field_values_for_deleted_options"),
]
operations = [
# The "most common values" list for a tsvector is 10x this
# number, which defaults to 100. Increasing it allows for
# better query planning, at a small cost of size, and
# `ANALYZE` time. It only takes effect after the next
# `ANALYZE`, which we run immediately.
migrations.RunSQL(
sql="ALTER TABLE zerver_message ALTER COLUMN search_tsvector SET STATISTICS 10000",
reverse_sql="ALTER TABLE zerver_message ALTER COLUMNS search_tsvector SET STATISTICS -1",
),
migrations.RunSQL(
sql="ANALYZE zerver_message",
reverse_sql=migrations.RunSQL.noop,
),
]
|
|
0d3ff29b122d01fa34eaf639fde091618e15cd4d
|
survey/migrations/0013_auto_20200609_0748.py
|
survey/migrations/0013_auto_20200609_0748.py
|
# Generated by Django 3.0.7 on 2020-06-09 07:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("survey", "0012_add_display_by_category")]
operations = [
migrations.AlterField(
model_name="question",
name="type",
field=models.CharField(
choices=[
("text", "text (multiple line)"),
("short-text", "short text (one line)"),
("radio", "radio"),
("select", "select"),
("select-multiple", "Select Multiple"),
("select_image", "Select Image"),
("integer", "integer"),
("float", "float"),
("date", "date"),
],
default="text",
max_length=200,
verbose_name="Type",
),
)
]
|
Create old migrations that were not done before
|
Create old migrations that were not done before
|
Python
|
agpl-3.0
|
Pierre-Sassoulas/django-survey,Pierre-Sassoulas/django-survey,Pierre-Sassoulas/django-survey
|
Create old migrations that were not done before
|
# Generated by Django 3.0.7 on 2020-06-09 07:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("survey", "0012_add_display_by_category")]
operations = [
migrations.AlterField(
model_name="question",
name="type",
field=models.CharField(
choices=[
("text", "text (multiple line)"),
("short-text", "short text (one line)"),
("radio", "radio"),
("select", "select"),
("select-multiple", "Select Multiple"),
("select_image", "Select Image"),
("integer", "integer"),
("float", "float"),
("date", "date"),
],
default="text",
max_length=200,
verbose_name="Type",
),
)
]
|
<commit_before><commit_msg>Create old migrations that were not done before<commit_after>
|
# Generated by Django 3.0.7 on 2020-06-09 07:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("survey", "0012_add_display_by_category")]
operations = [
migrations.AlterField(
model_name="question",
name="type",
field=models.CharField(
choices=[
("text", "text (multiple line)"),
("short-text", "short text (one line)"),
("radio", "radio"),
("select", "select"),
("select-multiple", "Select Multiple"),
("select_image", "Select Image"),
("integer", "integer"),
("float", "float"),
("date", "date"),
],
default="text",
max_length=200,
verbose_name="Type",
),
)
]
|
Create old migrations that were not done before# Generated by Django 3.0.7 on 2020-06-09 07:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("survey", "0012_add_display_by_category")]
operations = [
migrations.AlterField(
model_name="question",
name="type",
field=models.CharField(
choices=[
("text", "text (multiple line)"),
("short-text", "short text (one line)"),
("radio", "radio"),
("select", "select"),
("select-multiple", "Select Multiple"),
("select_image", "Select Image"),
("integer", "integer"),
("float", "float"),
("date", "date"),
],
default="text",
max_length=200,
verbose_name="Type",
),
)
]
|
<commit_before><commit_msg>Create old migrations that were not done before<commit_after># Generated by Django 3.0.7 on 2020-06-09 07:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("survey", "0012_add_display_by_category")]
operations = [
migrations.AlterField(
model_name="question",
name="type",
field=models.CharField(
choices=[
("text", "text (multiple line)"),
("short-text", "short text (one line)"),
("radio", "radio"),
("select", "select"),
("select-multiple", "Select Multiple"),
("select_image", "Select Image"),
("integer", "integer"),
("float", "float"),
("date", "date"),
],
default="text",
max_length=200,
verbose_name="Type",
),
)
]
|
|
7b6e503924525046932ad671d1a39990492c37ff
|
convert_velocity.py
|
convert_velocity.py
|
from astropy import units as u
import numpy as np
def optical_to_radio(vel, freq, rest):
return freq * vel / rest
def radio_to_optical(vel, freq, rest):
return rest * vel / freq
def test_opt_to_rad():
opt_vel = -2126.1876453900204 * (u.km/u.s)
rad_vel = -2141.374699999949 * (u.km/u.s)
rest = 1.4 * u.GHz
freq = 1.41 * u.GHz
np.testing.assert_almost_equal(opt_vel.value,
radio_to_optical(rad_vel, freq, rest).value)
np.testing.assert_almost_equal(rad_vel.value,
optical_to_radio(opt_vel, freq, rest).value)
|
Convert between optical and radio velocities.
|
Convert between optical and radio velocities.
|
Python
|
mit
|
e-koch/ewky_scripts,e-koch/ewky_scripts
|
Convert between optical and radio velocities.
|
from astropy import units as u
import numpy as np
def optical_to_radio(vel, freq, rest):
return freq * vel / rest
def radio_to_optical(vel, freq, rest):
return rest * vel / freq
def test_opt_to_rad():
opt_vel = -2126.1876453900204 * (u.km/u.s)
rad_vel = -2141.374699999949 * (u.km/u.s)
rest = 1.4 * u.GHz
freq = 1.41 * u.GHz
np.testing.assert_almost_equal(opt_vel.value,
radio_to_optical(rad_vel, freq, rest).value)
np.testing.assert_almost_equal(rad_vel.value,
optical_to_radio(opt_vel, freq, rest).value)
|
<commit_before><commit_msg>Convert between optical and radio velocities.<commit_after>
|
from astropy import units as u
import numpy as np
def optical_to_radio(vel, freq, rest):
return freq * vel / rest
def radio_to_optical(vel, freq, rest):
return rest * vel / freq
def test_opt_to_rad():
opt_vel = -2126.1876453900204 * (u.km/u.s)
rad_vel = -2141.374699999949 * (u.km/u.s)
rest = 1.4 * u.GHz
freq = 1.41 * u.GHz
np.testing.assert_almost_equal(opt_vel.value,
radio_to_optical(rad_vel, freq, rest).value)
np.testing.assert_almost_equal(rad_vel.value,
optical_to_radio(opt_vel, freq, rest).value)
|
Convert between optical and radio velocities.
from astropy import units as u
import numpy as np
def optical_to_radio(vel, freq, rest):
return freq * vel / rest
def radio_to_optical(vel, freq, rest):
return rest * vel / freq
def test_opt_to_rad():
opt_vel = -2126.1876453900204 * (u.km/u.s)
rad_vel = -2141.374699999949 * (u.km/u.s)
rest = 1.4 * u.GHz
freq = 1.41 * u.GHz
np.testing.assert_almost_equal(opt_vel.value,
radio_to_optical(rad_vel, freq, rest).value)
np.testing.assert_almost_equal(rad_vel.value,
optical_to_radio(opt_vel, freq, rest).value)
|
<commit_before><commit_msg>Convert between optical and radio velocities.<commit_after>
from astropy import units as u
import numpy as np
def optical_to_radio(vel, freq, rest):
return freq * vel / rest
def radio_to_optical(vel, freq, rest):
return rest * vel / freq
def test_opt_to_rad():
opt_vel = -2126.1876453900204 * (u.km/u.s)
rad_vel = -2141.374699999949 * (u.km/u.s)
rest = 1.4 * u.GHz
freq = 1.41 * u.GHz
np.testing.assert_almost_equal(opt_vel.value,
radio_to_optical(rad_vel, freq, rest).value)
np.testing.assert_almost_equal(rad_vel.value,
optical_to_radio(opt_vel, freq, rest).value)
|
|
1cb22dfdf9882de7aa6e977c79d80eac7158873e
|
tools/win32build/doall.py
|
tools/win32build/doall.py
|
import subprocess
import os
PYVER = "2.5"
# Bootstrap
subprocess.check_call(['python', 'prepare_bootstrap.py'])
# Build binaries
subprocess.check_call(['python', 'build.py', '-p', PYVER], cwd = 'bootstrap-%s' % PYVER)
# Build installer using nsis
subprocess.check_call(['makensis', 'numpy-superinstaller.nsi'], cwd = 'bootstrap-%s' % PYVER)
|
Add top script to generate binaries from scratch.
|
Add top script to generate binaries from scratch.
git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@5553 94b884b6-d6fd-0310-90d3-974f1d3f35e1
|
Python
|
bsd-3-clause
|
efiring/numpy-work,teoliphant/numpy-refactor,Ademan/NumPy-GSoC,teoliphant/numpy-refactor,efiring/numpy-work,efiring/numpy-work,illume/numpy3k,chadnetzer/numpy-gaurdro,jasonmccampbell/numpy-refactor-sprint,illume/numpy3k,illume/numpy3k,jasonmccampbell/numpy-refactor-sprint,efiring/numpy-work,jasonmccampbell/numpy-refactor-sprint,Ademan/NumPy-GSoC,teoliphant/numpy-refactor,Ademan/NumPy-GSoC,Ademan/NumPy-GSoC,teoliphant/numpy-refactor,chadnetzer/numpy-gaurdro,illume/numpy3k,teoliphant/numpy-refactor,chadnetzer/numpy-gaurdro,jasonmccampbell/numpy-refactor-sprint,chadnetzer/numpy-gaurdro
|
Add top script to generate binaries from scratch.
git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@5553 94b884b6-d6fd-0310-90d3-974f1d3f35e1
|
import subprocess
import os
PYVER = "2.5"
# Bootstrap
subprocess.check_call(['python', 'prepare_bootstrap.py'])
# Build binaries
subprocess.check_call(['python', 'build.py', '-p', PYVER], cwd = 'bootstrap-%s' % PYVER)
# Build installer using nsis
subprocess.check_call(['makensis', 'numpy-superinstaller.nsi'], cwd = 'bootstrap-%s' % PYVER)
|
<commit_before><commit_msg>Add top script to generate binaries from scratch.
git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@5553 94b884b6-d6fd-0310-90d3-974f1d3f35e1<commit_after>
|
import subprocess
import os
PYVER = "2.5"
# Bootstrap
subprocess.check_call(['python', 'prepare_bootstrap.py'])
# Build binaries
subprocess.check_call(['python', 'build.py', '-p', PYVER], cwd = 'bootstrap-%s' % PYVER)
# Build installer using nsis
subprocess.check_call(['makensis', 'numpy-superinstaller.nsi'], cwd = 'bootstrap-%s' % PYVER)
|
Add top script to generate binaries from scratch.
git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@5553 94b884b6-d6fd-0310-90d3-974f1d3f35e1import subprocess
import os
PYVER = "2.5"
# Bootstrap
subprocess.check_call(['python', 'prepare_bootstrap.py'])
# Build binaries
subprocess.check_call(['python', 'build.py', '-p', PYVER], cwd = 'bootstrap-%s' % PYVER)
# Build installer using nsis
subprocess.check_call(['makensis', 'numpy-superinstaller.nsi'], cwd = 'bootstrap-%s' % PYVER)
|
<commit_before><commit_msg>Add top script to generate binaries from scratch.
git-svn-id: 77a43f9646713b91fea7788fad5dfbf67e151ece@5553 94b884b6-d6fd-0310-90d3-974f1d3f35e1<commit_after>import subprocess
import os
PYVER = "2.5"
# Bootstrap
subprocess.check_call(['python', 'prepare_bootstrap.py'])
# Build binaries
subprocess.check_call(['python', 'build.py', '-p', PYVER], cwd = 'bootstrap-%s' % PYVER)
# Build installer using nsis
subprocess.check_call(['makensis', 'numpy-superinstaller.nsi'], cwd = 'bootstrap-%s' % PYVER)
|
|
34ec07ef4601a7702f8d606efa0d861b11cca393
|
code/animatefuse.py
|
code/animatefuse.py
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as anim
from mpl_toolkits.mplot3d import Axes3D
import sys
import numpy.ma as ma
i=0
fuz = 1;
minLength = 500.0
if (len(sys.argv) < 2) :
print("Usage: \n dataplot.py path_to_binfile [clamp value]")
sys.exit()
elif (len(sys.argv) > 2) :
fuz = float(sys.argv[2])
binfile = sys.argv[1]
lengthdata=np.fromfile(binfile+str(i)+'_lengths.bin', dtype="float32")
winddata=np.fromfile(binfile+str(i)+'_windings.bin', dtype="float32")
datasize = int(np.sqrt(lengthdata.shape[0]))
lengthdata=lengthdata.reshape(datasize, datasize)
winddata=winddata.reshape(datasize, datasize)
masked= ma.masked_where(lengthdata<minLength,winddata)
clampVal = np.mean(masked)
dev = ma.std(masked)
fig = plt.figure()
img = plt.imshow(masked.filled(0), clim=((1-dev*fuz)*clampVal,(1+dev*fuz)*clampVal), animated=True)
img.set_cmap('hot')
plt.colorbar()
ims = []
for i in range(149) :
lengthdata=np.fromfile(binfile+str(i)+'_lengths.bin', dtype="float32")
winddata=np.fromfile(binfile+str(i)+'_windings.bin', dtype="float32")
lengthdata=lengthdata.reshape(datasize, datasize)
winddata=winddata.reshape(datasize, datasize)
masked= ma.masked_where(lengthdata<minLength,winddata)
clampVal = np.mean(masked)
dev = ma.std(masked)
img = plt.imshow(masked.filled(0),
clim=[(1-np.sign(clampVal)*dev*fuz)*clampVal,(1+np.sign(clampVal)*dev*fuz)*clampVal],
animated=True,
cmap='hot')
# img = plt.imshow(masked.filled(0), clim=(-1,0), animated=True, cmap='hot')
ims.append([img])
ani = anim.ArtistAnimation(fig, ims, interval=100, blit=True, repeat_delay=1000)
ani.save('animation.mp4')
plt.show()
# plt.savefig(sys.argv[1].rsplit(".",1)[0]+'_fig.png')
|
Create a animated figure from a series of data
|
Create a animated figure from a series of data
|
Python
|
mit
|
TAdeJong/plasma-analysis,TAdeJong/plasma-analysis
|
Create a animated figure from a series of data
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as anim
from mpl_toolkits.mplot3d import Axes3D
import sys
import numpy.ma as ma
i=0
fuz = 1;
minLength = 500.0
if (len(sys.argv) < 2) :
print("Usage: \n dataplot.py path_to_binfile [clamp value]")
sys.exit()
elif (len(sys.argv) > 2) :
fuz = float(sys.argv[2])
binfile = sys.argv[1]
lengthdata=np.fromfile(binfile+str(i)+'_lengths.bin', dtype="float32")
winddata=np.fromfile(binfile+str(i)+'_windings.bin', dtype="float32")
datasize = int(np.sqrt(lengthdata.shape[0]))
lengthdata=lengthdata.reshape(datasize, datasize)
winddata=winddata.reshape(datasize, datasize)
masked= ma.masked_where(lengthdata<minLength,winddata)
clampVal = np.mean(masked)
dev = ma.std(masked)
fig = plt.figure()
img = plt.imshow(masked.filled(0), clim=((1-dev*fuz)*clampVal,(1+dev*fuz)*clampVal), animated=True)
img.set_cmap('hot')
plt.colorbar()
ims = []
for i in range(149) :
lengthdata=np.fromfile(binfile+str(i)+'_lengths.bin', dtype="float32")
winddata=np.fromfile(binfile+str(i)+'_windings.bin', dtype="float32")
lengthdata=lengthdata.reshape(datasize, datasize)
winddata=winddata.reshape(datasize, datasize)
masked= ma.masked_where(lengthdata<minLength,winddata)
clampVal = np.mean(masked)
dev = ma.std(masked)
img = plt.imshow(masked.filled(0),
clim=[(1-np.sign(clampVal)*dev*fuz)*clampVal,(1+np.sign(clampVal)*dev*fuz)*clampVal],
animated=True,
cmap='hot')
# img = plt.imshow(masked.filled(0), clim=(-1,0), animated=True, cmap='hot')
ims.append([img])
ani = anim.ArtistAnimation(fig, ims, interval=100, blit=True, repeat_delay=1000)
ani.save('animation.mp4')
plt.show()
# plt.savefig(sys.argv[1].rsplit(".",1)[0]+'_fig.png')
|
<commit_before><commit_msg>Create a animated figure from a series of data<commit_after>
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as anim
from mpl_toolkits.mplot3d import Axes3D
import sys
import numpy.ma as ma
i=0
fuz = 1;
minLength = 500.0
if (len(sys.argv) < 2) :
print("Usage: \n dataplot.py path_to_binfile [clamp value]")
sys.exit()
elif (len(sys.argv) > 2) :
fuz = float(sys.argv[2])
binfile = sys.argv[1]
lengthdata=np.fromfile(binfile+str(i)+'_lengths.bin', dtype="float32")
winddata=np.fromfile(binfile+str(i)+'_windings.bin', dtype="float32")
datasize = int(np.sqrt(lengthdata.shape[0]))
lengthdata=lengthdata.reshape(datasize, datasize)
winddata=winddata.reshape(datasize, datasize)
masked= ma.masked_where(lengthdata<minLength,winddata)
clampVal = np.mean(masked)
dev = ma.std(masked)
fig = plt.figure()
img = plt.imshow(masked.filled(0), clim=((1-dev*fuz)*clampVal,(1+dev*fuz)*clampVal), animated=True)
img.set_cmap('hot')
plt.colorbar()
ims = []
for i in range(149) :
lengthdata=np.fromfile(binfile+str(i)+'_lengths.bin', dtype="float32")
winddata=np.fromfile(binfile+str(i)+'_windings.bin', dtype="float32")
lengthdata=lengthdata.reshape(datasize, datasize)
winddata=winddata.reshape(datasize, datasize)
masked= ma.masked_where(lengthdata<minLength,winddata)
clampVal = np.mean(masked)
dev = ma.std(masked)
img = plt.imshow(masked.filled(0),
clim=[(1-np.sign(clampVal)*dev*fuz)*clampVal,(1+np.sign(clampVal)*dev*fuz)*clampVal],
animated=True,
cmap='hot')
# img = plt.imshow(masked.filled(0), clim=(-1,0), animated=True, cmap='hot')
ims.append([img])
ani = anim.ArtistAnimation(fig, ims, interval=100, blit=True, repeat_delay=1000)
ani.save('animation.mp4')
plt.show()
# plt.savefig(sys.argv[1].rsplit(".",1)[0]+'_fig.png')
|
Create a animated figure from a series of dataimport numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as anim
from mpl_toolkits.mplot3d import Axes3D
import sys
import numpy.ma as ma
i=0
fuz = 1;
minLength = 500.0
if (len(sys.argv) < 2) :
print("Usage: \n dataplot.py path_to_binfile [clamp value]")
sys.exit()
elif (len(sys.argv) > 2) :
fuz = float(sys.argv[2])
binfile = sys.argv[1]
lengthdata=np.fromfile(binfile+str(i)+'_lengths.bin', dtype="float32")
winddata=np.fromfile(binfile+str(i)+'_windings.bin', dtype="float32")
datasize = int(np.sqrt(lengthdata.shape[0]))
lengthdata=lengthdata.reshape(datasize, datasize)
winddata=winddata.reshape(datasize, datasize)
masked= ma.masked_where(lengthdata<minLength,winddata)
clampVal = np.mean(masked)
dev = ma.std(masked)
fig = plt.figure()
img = plt.imshow(masked.filled(0), clim=((1-dev*fuz)*clampVal,(1+dev*fuz)*clampVal), animated=True)
img.set_cmap('hot')
plt.colorbar()
ims = []
for i in range(149) :
lengthdata=np.fromfile(binfile+str(i)+'_lengths.bin', dtype="float32")
winddata=np.fromfile(binfile+str(i)+'_windings.bin', dtype="float32")
lengthdata=lengthdata.reshape(datasize, datasize)
winddata=winddata.reshape(datasize, datasize)
masked= ma.masked_where(lengthdata<minLength,winddata)
clampVal = np.mean(masked)
dev = ma.std(masked)
img = plt.imshow(masked.filled(0),
clim=[(1-np.sign(clampVal)*dev*fuz)*clampVal,(1+np.sign(clampVal)*dev*fuz)*clampVal],
animated=True,
cmap='hot')
# img = plt.imshow(masked.filled(0), clim=(-1,0), animated=True, cmap='hot')
ims.append([img])
ani = anim.ArtistAnimation(fig, ims, interval=100, blit=True, repeat_delay=1000)
ani.save('animation.mp4')
plt.show()
# plt.savefig(sys.argv[1].rsplit(".",1)[0]+'_fig.png')
|
<commit_before><commit_msg>Create a animated figure from a series of data<commit_after>import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as anim
from mpl_toolkits.mplot3d import Axes3D
import sys
import numpy.ma as ma
i=0
fuz = 1;
minLength = 500.0
if (len(sys.argv) < 2) :
print("Usage: \n dataplot.py path_to_binfile [clamp value]")
sys.exit()
elif (len(sys.argv) > 2) :
fuz = float(sys.argv[2])
binfile = sys.argv[1]
lengthdata=np.fromfile(binfile+str(i)+'_lengths.bin', dtype="float32")
winddata=np.fromfile(binfile+str(i)+'_windings.bin', dtype="float32")
datasize = int(np.sqrt(lengthdata.shape[0]))
lengthdata=lengthdata.reshape(datasize, datasize)
winddata=winddata.reshape(datasize, datasize)
masked= ma.masked_where(lengthdata<minLength,winddata)
clampVal = np.mean(masked)
dev = ma.std(masked)
fig = plt.figure()
img = plt.imshow(masked.filled(0), clim=((1-dev*fuz)*clampVal,(1+dev*fuz)*clampVal), animated=True)
img.set_cmap('hot')
plt.colorbar()
ims = []
for i in range(149) :
lengthdata=np.fromfile(binfile+str(i)+'_lengths.bin', dtype="float32")
winddata=np.fromfile(binfile+str(i)+'_windings.bin', dtype="float32")
lengthdata=lengthdata.reshape(datasize, datasize)
winddata=winddata.reshape(datasize, datasize)
masked= ma.masked_where(lengthdata<minLength,winddata)
clampVal = np.mean(masked)
dev = ma.std(masked)
img = plt.imshow(masked.filled(0),
clim=[(1-np.sign(clampVal)*dev*fuz)*clampVal,(1+np.sign(clampVal)*dev*fuz)*clampVal],
animated=True,
cmap='hot')
# img = plt.imshow(masked.filled(0), clim=(-1,0), animated=True, cmap='hot')
ims.append([img])
ani = anim.ArtistAnimation(fig, ims, interval=100, blit=True, repeat_delay=1000)
ani.save('animation.mp4')
plt.show()
# plt.savefig(sys.argv[1].rsplit(".",1)[0]+'_fig.png')
|
|
a37521ef7b6ce40b6a182c6ff42c62f30fb12537
|
tests/changes/api/test_build_details.py
|
tests/changes/api/test_build_details.py
|
from datetime import datetime
from changes.constants import Status
from changes.testutils import APITestCase
class BuildDetailsTest(APITestCase):
def test_simple(self):
previous_build = self.create_build(
self.project, date_created=datetime(2013, 9, 19, 22, 15, 23),
status=Status.finished)
build = self.create_build(
self.project, date_created=datetime(2013, 9, 19, 22, 15, 24))
job1 = self.create_job(build)
job2 = self.create_job(build)
path = '/api/0/builds/{0}/'.format(build.id.hex)
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert data['build']['id'] == build.id.hex
assert data['project']['id'] == self.project.id.hex
assert len(data['jobs']) == 2
assert data['jobs'][0]['id'] == job1.id.hex
assert data['jobs'][1]['id'] == job2.id.hex
assert len(data['previousRuns']) == 1
assert data['previousRuns'][0]['id'] == previous_build.id.hex
assert data['seenBy'] == []
assert data['testFailures']['total'] == 0
assert data['testFailures']['testGroups'] == []
|
Add simple test for build details
|
Add simple test for build details
|
Python
|
apache-2.0
|
bowlofstew/changes,wfxiang08/changes,wfxiang08/changes,dropbox/changes,wfxiang08/changes,bowlofstew/changes,dropbox/changes,bowlofstew/changes,wfxiang08/changes,dropbox/changes,dropbox/changes,bowlofstew/changes
|
Add simple test for build details
|
from datetime import datetime
from changes.constants import Status
from changes.testutils import APITestCase
class BuildDetailsTest(APITestCase):
def test_simple(self):
previous_build = self.create_build(
self.project, date_created=datetime(2013, 9, 19, 22, 15, 23),
status=Status.finished)
build = self.create_build(
self.project, date_created=datetime(2013, 9, 19, 22, 15, 24))
job1 = self.create_job(build)
job2 = self.create_job(build)
path = '/api/0/builds/{0}/'.format(build.id.hex)
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert data['build']['id'] == build.id.hex
assert data['project']['id'] == self.project.id.hex
assert len(data['jobs']) == 2
assert data['jobs'][0]['id'] == job1.id.hex
assert data['jobs'][1]['id'] == job2.id.hex
assert len(data['previousRuns']) == 1
assert data['previousRuns'][0]['id'] == previous_build.id.hex
assert data['seenBy'] == []
assert data['testFailures']['total'] == 0
assert data['testFailures']['testGroups'] == []
|
<commit_before><commit_msg>Add simple test for build details<commit_after>
|
from datetime import datetime
from changes.constants import Status
from changes.testutils import APITestCase
class BuildDetailsTest(APITestCase):
def test_simple(self):
previous_build = self.create_build(
self.project, date_created=datetime(2013, 9, 19, 22, 15, 23),
status=Status.finished)
build = self.create_build(
self.project, date_created=datetime(2013, 9, 19, 22, 15, 24))
job1 = self.create_job(build)
job2 = self.create_job(build)
path = '/api/0/builds/{0}/'.format(build.id.hex)
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert data['build']['id'] == build.id.hex
assert data['project']['id'] == self.project.id.hex
assert len(data['jobs']) == 2
assert data['jobs'][0]['id'] == job1.id.hex
assert data['jobs'][1]['id'] == job2.id.hex
assert len(data['previousRuns']) == 1
assert data['previousRuns'][0]['id'] == previous_build.id.hex
assert data['seenBy'] == []
assert data['testFailures']['total'] == 0
assert data['testFailures']['testGroups'] == []
|
Add simple test for build detailsfrom datetime import datetime
from changes.constants import Status
from changes.testutils import APITestCase
class BuildDetailsTest(APITestCase):
def test_simple(self):
previous_build = self.create_build(
self.project, date_created=datetime(2013, 9, 19, 22, 15, 23),
status=Status.finished)
build = self.create_build(
self.project, date_created=datetime(2013, 9, 19, 22, 15, 24))
job1 = self.create_job(build)
job2 = self.create_job(build)
path = '/api/0/builds/{0}/'.format(build.id.hex)
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert data['build']['id'] == build.id.hex
assert data['project']['id'] == self.project.id.hex
assert len(data['jobs']) == 2
assert data['jobs'][0]['id'] == job1.id.hex
assert data['jobs'][1]['id'] == job2.id.hex
assert len(data['previousRuns']) == 1
assert data['previousRuns'][0]['id'] == previous_build.id.hex
assert data['seenBy'] == []
assert data['testFailures']['total'] == 0
assert data['testFailures']['testGroups'] == []
|
<commit_before><commit_msg>Add simple test for build details<commit_after>from datetime import datetime
from changes.constants import Status
from changes.testutils import APITestCase
class BuildDetailsTest(APITestCase):
def test_simple(self):
previous_build = self.create_build(
self.project, date_created=datetime(2013, 9, 19, 22, 15, 23),
status=Status.finished)
build = self.create_build(
self.project, date_created=datetime(2013, 9, 19, 22, 15, 24))
job1 = self.create_job(build)
job2 = self.create_job(build)
path = '/api/0/builds/{0}/'.format(build.id.hex)
resp = self.client.get(path)
assert resp.status_code == 200
data = self.unserialize(resp)
assert data['build']['id'] == build.id.hex
assert data['project']['id'] == self.project.id.hex
assert len(data['jobs']) == 2
assert data['jobs'][0]['id'] == job1.id.hex
assert data['jobs'][1]['id'] == job2.id.hex
assert len(data['previousRuns']) == 1
assert data['previousRuns'][0]['id'] == previous_build.id.hex
assert data['seenBy'] == []
assert data['testFailures']['total'] == 0
assert data['testFailures']['testGroups'] == []
|
|
c95ff949ea70e5466cf6bd0775f1033d0ad6caab
|
pysyte/types/times.py
|
pysyte/types/times.py
|
"""Handle datetime for pysyte's types"""
import datetime
epoch = datetime.datetime.fromtimestamp(0)
def taken(diff, days=True):
seconds = diff.seconds * 1_000 + diff.microseconds
if days:
return seconds
result = abs(diff.days * 24 * 60 * 60 + seconds)
return result
|
Add method to find time taken
|
Add method to find time taken
|
Python
|
mit
|
jalanb/dotsite
|
Add method to find time taken
|
"""Handle datetime for pysyte's types"""
import datetime
epoch = datetime.datetime.fromtimestamp(0)
def taken(diff, days=True):
seconds = diff.seconds * 1_000 + diff.microseconds
if days:
return seconds
result = abs(diff.days * 24 * 60 * 60 + seconds)
return result
|
<commit_before><commit_msg>Add method to find time taken<commit_after>
|
"""Handle datetime for pysyte's types"""
import datetime
epoch = datetime.datetime.fromtimestamp(0)
def taken(diff, days=True):
seconds = diff.seconds * 1_000 + diff.microseconds
if days:
return seconds
result = abs(diff.days * 24 * 60 * 60 + seconds)
return result
|
Add method to find time taken"""Handle datetime for pysyte's types"""
import datetime
epoch = datetime.datetime.fromtimestamp(0)
def taken(diff, days=True):
seconds = diff.seconds * 1_000 + diff.microseconds
if days:
return seconds
result = abs(diff.days * 24 * 60 * 60 + seconds)
return result
|
<commit_before><commit_msg>Add method to find time taken<commit_after>"""Handle datetime for pysyte's types"""
import datetime
epoch = datetime.datetime.fromtimestamp(0)
def taken(diff, days=True):
seconds = diff.seconds * 1_000 + diff.microseconds
if days:
return seconds
result = abs(diff.days * 24 * 60 * 60 + seconds)
return result
|
|
ca61d3c9ca598dfb5bd685b6dcb81a6f933c2e36
|
scripts/dist-standalone-collector.py
|
scripts/dist-standalone-collector.py
|
#!/usr/bin/python
import os
import shutil
import glob
distPath = "./dist/standalone-collector"
if not os.path.exists(distPath):
os.makedirs(distPath)
if not os.path.exists(distPath + "/simplejson"):
os.mkdir(distPath + "/simplejson")
shutil.copy("./docs/README-standalone-collector.txt", distPath + "/README.txt")
toplevel = ["standalone_collector.py", "config.py", "uuid.py"]
for name in toplevel:
shutil.copy("./webapp/socorro/lib/" + name, distPath + "/" + name)
simplejsonFiles = glob.glob("./webapp/socorro/lib/simplejson/*.py")
for name in simplejsonFiles:
shutil.copy(name, distPath + "/simplejson/" + os.path.basename(name))
|
Add distribution script for standalone collector
|
Add distribution script for standalone collector
git-svn-id: 0429bb2eb6f71f8cd87fe9410a915008a087abca@34 efcc4bdc-162a-0410-a8af-5b4b46c5376d
|
Python
|
mpl-2.0
|
luser/socorro,Tchanders/socorro,luser/socorro,luser/socorro,pcabido/socorro,Tchanders/socorro,twobraids/socorro,cliqz/socorro,KaiRo-at/socorro,m8ttyB/socorro,Tchanders/socorro,AdrianGaudebert/socorro,pcabido/socorro,rhelmer/socorro,adngdb/socorro,luser/socorro,KaiRo-at/socorro,mozilla/socorro,m8ttyB/socorro,pcabido/socorro,linearregression/socorro,linearregression/socorro,lonnen/socorro,adngdb/socorro,linearregression/socorro,Serg09/socorro,spthaolt/socorro,Tayamarn/socorro,Tayamarn/socorro,twobraids/socorro,Serg09/socorro,cliqz/socorro,luser/socorro,mozilla/socorro,Serg09/socorro,rhelmer/socorro,cliqz/socorro,AdrianGaudebert/socorro,yglazko/socorro,m8ttyB/socorro,adngdb/socorro,twobraids/socorro,AdrianGaudebert/socorro,spthaolt/socorro,Tchanders/socorro,adngdb/socorro,AdrianGaudebert/socorro,twobraids/socorro,KaiRo-at/socorro,yglazko/socorro,Tayamarn/socorro,yglazko/socorro,KaiRo-at/socorro,Tchanders/socorro,linearregression/socorro,twobraids/socorro,Tayamarn/socorro,mozilla/socorro,pcabido/socorro,Serg09/socorro,Tayamarn/socorro,yglazko/socorro,bsmedberg/socorro,bsmedberg/socorro,mozilla/socorro,spthaolt/socorro,adngdb/socorro,mozilla/socorro,rhelmer/socorro,lonnen/socorro,Serg09/socorro,mozilla/socorro,Tchanders/socorro,lonnen/socorro,KaiRo-at/socorro,linearregression/socorro,linearregression/socorro,cliqz/socorro,adngdb/socorro,spthaolt/socorro,twobraids/socorro,rhelmer/socorro,KaiRo-at/socorro,Serg09/socorro,pcabido/socorro,cliqz/socorro,yglazko/socorro,m8ttyB/socorro,rhelmer/socorro,m8ttyB/socorro,AdrianGaudebert/socorro,yglazko/socorro,bsmedberg/socorro,AdrianGaudebert/socorro,bsmedberg/socorro,spthaolt/socorro,rhelmer/socorro,cliqz/socorro,luser/socorro,pcabido/socorro,lonnen/socorro,m8ttyB/socorro,Tayamarn/socorro,spthaolt/socorro,bsmedberg/socorro
|
Add distribution script for standalone collector
git-svn-id: 0429bb2eb6f71f8cd87fe9410a915008a087abca@34 efcc4bdc-162a-0410-a8af-5b4b46c5376d
|
#!/usr/bin/python
import os
import shutil
import glob
distPath = "./dist/standalone-collector"
if not os.path.exists(distPath):
os.makedirs(distPath)
if not os.path.exists(distPath + "/simplejson"):
os.mkdir(distPath + "/simplejson")
shutil.copy("./docs/README-standalone-collector.txt", distPath + "/README.txt")
toplevel = ["standalone_collector.py", "config.py", "uuid.py"]
for name in toplevel:
shutil.copy("./webapp/socorro/lib/" + name, distPath + "/" + name)
simplejsonFiles = glob.glob("./webapp/socorro/lib/simplejson/*.py")
for name in simplejsonFiles:
shutil.copy(name, distPath + "/simplejson/" + os.path.basename(name))
|
<commit_before><commit_msg>Add distribution script for standalone collector
git-svn-id: 0429bb2eb6f71f8cd87fe9410a915008a087abca@34 efcc4bdc-162a-0410-a8af-5b4b46c5376d<commit_after>
|
#!/usr/bin/python
import os
import shutil
import glob
distPath = "./dist/standalone-collector"
if not os.path.exists(distPath):
os.makedirs(distPath)
if not os.path.exists(distPath + "/simplejson"):
os.mkdir(distPath + "/simplejson")
shutil.copy("./docs/README-standalone-collector.txt", distPath + "/README.txt")
toplevel = ["standalone_collector.py", "config.py", "uuid.py"]
for name in toplevel:
shutil.copy("./webapp/socorro/lib/" + name, distPath + "/" + name)
simplejsonFiles = glob.glob("./webapp/socorro/lib/simplejson/*.py")
for name in simplejsonFiles:
shutil.copy(name, distPath + "/simplejson/" + os.path.basename(name))
|
Add distribution script for standalone collector
git-svn-id: 0429bb2eb6f71f8cd87fe9410a915008a087abca@34 efcc4bdc-162a-0410-a8af-5b4b46c5376d#!/usr/bin/python
import os
import shutil
import glob
distPath = "./dist/standalone-collector"
if not os.path.exists(distPath):
os.makedirs(distPath)
if not os.path.exists(distPath + "/simplejson"):
os.mkdir(distPath + "/simplejson")
shutil.copy("./docs/README-standalone-collector.txt", distPath + "/README.txt")
toplevel = ["standalone_collector.py", "config.py", "uuid.py"]
for name in toplevel:
shutil.copy("./webapp/socorro/lib/" + name, distPath + "/" + name)
simplejsonFiles = glob.glob("./webapp/socorro/lib/simplejson/*.py")
for name in simplejsonFiles:
shutil.copy(name, distPath + "/simplejson/" + os.path.basename(name))
|
<commit_before><commit_msg>Add distribution script for standalone collector
git-svn-id: 0429bb2eb6f71f8cd87fe9410a915008a087abca@34 efcc4bdc-162a-0410-a8af-5b4b46c5376d<commit_after>#!/usr/bin/python
import os
import shutil
import glob
distPath = "./dist/standalone-collector"
if not os.path.exists(distPath):
os.makedirs(distPath)
if not os.path.exists(distPath + "/simplejson"):
os.mkdir(distPath + "/simplejson")
shutil.copy("./docs/README-standalone-collector.txt", distPath + "/README.txt")
toplevel = ["standalone_collector.py", "config.py", "uuid.py"]
for name in toplevel:
shutil.copy("./webapp/socorro/lib/" + name, distPath + "/" + name)
simplejsonFiles = glob.glob("./webapp/socorro/lib/simplejson/*.py")
for name in simplejsonFiles:
shutil.copy(name, distPath + "/simplejson/" + os.path.basename(name))
|
|
8cfc4d0fda1bbc3301953cd7bb03073042f7c38b
|
telethon_tests/higher_level_test.py
|
telethon_tests/higher_level_test.py
|
import unittest
import os
from io import BytesIO
from random import randint
from hashlib import sha256
from telethon import TelegramClient
# Fill in your api_id and api_hash when running the tests
# and REMOVE THEM once you've finished testing them.
api_id = None
api_hash = None
if not api_id or not api_hash:
raise ValueError('Please fill in both your api_id and api_hash.')
class HigherLevelTests(unittest.TestCase):
@staticmethod
def test_cdn_download():
client = TelegramClient(None, api_id, api_hash)
client.session.server_address = '149.154.167.40'
assert client.connect()
try:
phone = '+999662' + str(randint(0, 9999)).zfill(4)
client.send_code_request(phone)
client.sign_up(phone, '22222', 'Test', 'DC')
me = client.get_me()
data = os.urandom(2 ** 17)
client.send_file(
me, data,
progress_callback=lambda c, t:
print('test_cdn_download:uploading {:.2%}...'.format(c/t))
)
msg = client.get_message_history(me)[1][0]
out = BytesIO()
client.download_media(msg, out)
assert sha256(data).digest() == sha256(out.getvalue()).digest()
client.log_out()
finally:
client.disconnect()
|
Add a unit test for CDN-downloads
|
Add a unit test for CDN-downloads
|
Python
|
mit
|
LonamiWebs/Telethon,andr-04/Telethon,LonamiWebs/Telethon,expectocode/Telethon,LonamiWebs/Telethon,LonamiWebs/Telethon
|
Add a unit test for CDN-downloads
|
import unittest
import os
from io import BytesIO
from random import randint
from hashlib import sha256
from telethon import TelegramClient
# Fill in your api_id and api_hash when running the tests
# and REMOVE THEM once you've finished testing them.
api_id = None
api_hash = None
if not api_id or not api_hash:
raise ValueError('Please fill in both your api_id and api_hash.')
class HigherLevelTests(unittest.TestCase):
@staticmethod
def test_cdn_download():
client = TelegramClient(None, api_id, api_hash)
client.session.server_address = '149.154.167.40'
assert client.connect()
try:
phone = '+999662' + str(randint(0, 9999)).zfill(4)
client.send_code_request(phone)
client.sign_up(phone, '22222', 'Test', 'DC')
me = client.get_me()
data = os.urandom(2 ** 17)
client.send_file(
me, data,
progress_callback=lambda c, t:
print('test_cdn_download:uploading {:.2%}...'.format(c/t))
)
msg = client.get_message_history(me)[1][0]
out = BytesIO()
client.download_media(msg, out)
assert sha256(data).digest() == sha256(out.getvalue()).digest()
client.log_out()
finally:
client.disconnect()
|
<commit_before><commit_msg>Add a unit test for CDN-downloads<commit_after>
|
import unittest
import os
from io import BytesIO
from random import randint
from hashlib import sha256
from telethon import TelegramClient
# Fill in your api_id and api_hash when running the tests
# and REMOVE THEM once you've finished testing them.
api_id = None
api_hash = None
if not api_id or not api_hash:
raise ValueError('Please fill in both your api_id and api_hash.')
class HigherLevelTests(unittest.TestCase):
@staticmethod
def test_cdn_download():
client = TelegramClient(None, api_id, api_hash)
client.session.server_address = '149.154.167.40'
assert client.connect()
try:
phone = '+999662' + str(randint(0, 9999)).zfill(4)
client.send_code_request(phone)
client.sign_up(phone, '22222', 'Test', 'DC')
me = client.get_me()
data = os.urandom(2 ** 17)
client.send_file(
me, data,
progress_callback=lambda c, t:
print('test_cdn_download:uploading {:.2%}...'.format(c/t))
)
msg = client.get_message_history(me)[1][0]
out = BytesIO()
client.download_media(msg, out)
assert sha256(data).digest() == sha256(out.getvalue()).digest()
client.log_out()
finally:
client.disconnect()
|
Add a unit test for CDN-downloadsimport unittest
import os
from io import BytesIO
from random import randint
from hashlib import sha256
from telethon import TelegramClient
# Fill in your api_id and api_hash when running the tests
# and REMOVE THEM once you've finished testing them.
api_id = None
api_hash = None
if not api_id or not api_hash:
raise ValueError('Please fill in both your api_id and api_hash.')
class HigherLevelTests(unittest.TestCase):
@staticmethod
def test_cdn_download():
client = TelegramClient(None, api_id, api_hash)
client.session.server_address = '149.154.167.40'
assert client.connect()
try:
phone = '+999662' + str(randint(0, 9999)).zfill(4)
client.send_code_request(phone)
client.sign_up(phone, '22222', 'Test', 'DC')
me = client.get_me()
data = os.urandom(2 ** 17)
client.send_file(
me, data,
progress_callback=lambda c, t:
print('test_cdn_download:uploading {:.2%}...'.format(c/t))
)
msg = client.get_message_history(me)[1][0]
out = BytesIO()
client.download_media(msg, out)
assert sha256(data).digest() == sha256(out.getvalue()).digest()
client.log_out()
finally:
client.disconnect()
|
<commit_before><commit_msg>Add a unit test for CDN-downloads<commit_after>import unittest
import os
from io import BytesIO
from random import randint
from hashlib import sha256
from telethon import TelegramClient
# Fill in your api_id and api_hash when running the tests
# and REMOVE THEM once you've finished testing them.
api_id = None
api_hash = None
if not api_id or not api_hash:
raise ValueError('Please fill in both your api_id and api_hash.')
class HigherLevelTests(unittest.TestCase):
@staticmethod
def test_cdn_download():
client = TelegramClient(None, api_id, api_hash)
client.session.server_address = '149.154.167.40'
assert client.connect()
try:
phone = '+999662' + str(randint(0, 9999)).zfill(4)
client.send_code_request(phone)
client.sign_up(phone, '22222', 'Test', 'DC')
me = client.get_me()
data = os.urandom(2 ** 17)
client.send_file(
me, data,
progress_callback=lambda c, t:
print('test_cdn_download:uploading {:.2%}...'.format(c/t))
)
msg = client.get_message_history(me)[1][0]
out = BytesIO()
client.download_media(msg, out)
assert sha256(data).digest() == sha256(out.getvalue()).digest()
client.log_out()
finally:
client.disconnect()
|
|
fa4cdc8cf87d45b2f3fe38d3a99ad9ac70f70a17
|
util/check_pireps_against_aviationwx.py
|
util/check_pireps_against_aviationwx.py
|
"""
Compare our PIREPs data against what is at aviation wx JSON service
"""
import json
import urllib2
import datetime
import psycopg2
pgconn = psycopg2.connect(database='postgis', user='nobody', host='iemdb')
cursor = pgconn.cursor()
avwx = urllib2.urlopen("http://aviationweather.gov/gis/scripts/AirepJSON.php")
avwx = json.loads(avwx.read())
mine = {}
cursor.execute("""SELECT valid at time zone 'UTC', ST_x(geom::geometry),
ST_y(geom::geometry), report
from pireps WHERE valid > '2015-01-21 07:00'""")
for row in cursor:
key = "/".join(row[3].replace(" ", "").split("/")[:3])
mine[key] = row
floor = None
for feature in avwx['features']:
if feature['properties']['airepType'] != 'PIREP':
continue
ts = datetime.datetime.strptime(feature['properties']['obsTime'],
'%Y-%m-%dT%H:%M:%SZ')
if floor is None:
floor = ts
lon, lat = feature['geometry']['coordinates']
key = "/".join(feature['properties']['rawOb'].replace(" ", "").split("/")[:3])
if not mine.has_key(key):
print 'IEM MISS', ts, feature['properties']['rawOb']
else:
error = ((mine[key][1]-lon)**2 + (mine[key][2]-lat)**2)**.5
if error > 0.1:
location = "/".join(feature['properties']['rawOb'].split("/")[:2])
print "ERROR: %5.2f IEM: %8.3f %6.3f AWX: %7.2f %5.2f %s" % (error,
mine[key][1], mine[key][2],
lon, lat, location)
del(mine[key])
for report in mine:
if mine[report][0] < floor:
continue
print 'AVWX MISS', mine[report][0], mine[report][3]
|
Add diagnostic comparing aviationweather.gov to IEM
|
Add diagnostic comparing aviationweather.gov to IEM
|
Python
|
mit
|
akrherz/pyWWA,akrherz/pyWWA
|
Add diagnostic comparing aviationweather.gov to IEM
|
"""
Compare our PIREPs data against what is at aviation wx JSON service
"""
import json
import urllib2
import datetime
import psycopg2
pgconn = psycopg2.connect(database='postgis', user='nobody', host='iemdb')
cursor = pgconn.cursor()
avwx = urllib2.urlopen("http://aviationweather.gov/gis/scripts/AirepJSON.php")
avwx = json.loads(avwx.read())
mine = {}
cursor.execute("""SELECT valid at time zone 'UTC', ST_x(geom::geometry),
ST_y(geom::geometry), report
from pireps WHERE valid > '2015-01-21 07:00'""")
for row in cursor:
key = "/".join(row[3].replace(" ", "").split("/")[:3])
mine[key] = row
floor = None
for feature in avwx['features']:
if feature['properties']['airepType'] != 'PIREP':
continue
ts = datetime.datetime.strptime(feature['properties']['obsTime'],
'%Y-%m-%dT%H:%M:%SZ')
if floor is None:
floor = ts
lon, lat = feature['geometry']['coordinates']
key = "/".join(feature['properties']['rawOb'].replace(" ", "").split("/")[:3])
if not mine.has_key(key):
print 'IEM MISS', ts, feature['properties']['rawOb']
else:
error = ((mine[key][1]-lon)**2 + (mine[key][2]-lat)**2)**.5
if error > 0.1:
location = "/".join(feature['properties']['rawOb'].split("/")[:2])
print "ERROR: %5.2f IEM: %8.3f %6.3f AWX: %7.2f %5.2f %s" % (error,
mine[key][1], mine[key][2],
lon, lat, location)
del(mine[key])
for report in mine:
if mine[report][0] < floor:
continue
print 'AVWX MISS', mine[report][0], mine[report][3]
|
<commit_before><commit_msg>Add diagnostic comparing aviationweather.gov to IEM <commit_after>
|
"""
Compare our PIREPs data against what is at aviation wx JSON service
"""
import json
import urllib2
import datetime
import psycopg2
pgconn = psycopg2.connect(database='postgis', user='nobody', host='iemdb')
cursor = pgconn.cursor()
avwx = urllib2.urlopen("http://aviationweather.gov/gis/scripts/AirepJSON.php")
avwx = json.loads(avwx.read())
mine = {}
cursor.execute("""SELECT valid at time zone 'UTC', ST_x(geom::geometry),
ST_y(geom::geometry), report
from pireps WHERE valid > '2015-01-21 07:00'""")
for row in cursor:
key = "/".join(row[3].replace(" ", "").split("/")[:3])
mine[key] = row
floor = None
for feature in avwx['features']:
if feature['properties']['airepType'] != 'PIREP':
continue
ts = datetime.datetime.strptime(feature['properties']['obsTime'],
'%Y-%m-%dT%H:%M:%SZ')
if floor is None:
floor = ts
lon, lat = feature['geometry']['coordinates']
key = "/".join(feature['properties']['rawOb'].replace(" ", "").split("/")[:3])
if not mine.has_key(key):
print 'IEM MISS', ts, feature['properties']['rawOb']
else:
error = ((mine[key][1]-lon)**2 + (mine[key][2]-lat)**2)**.5
if error > 0.1:
location = "/".join(feature['properties']['rawOb'].split("/")[:2])
print "ERROR: %5.2f IEM: %8.3f %6.3f AWX: %7.2f %5.2f %s" % (error,
mine[key][1], mine[key][2],
lon, lat, location)
del(mine[key])
for report in mine:
if mine[report][0] < floor:
continue
print 'AVWX MISS', mine[report][0], mine[report][3]
|
Add diagnostic comparing aviationweather.gov to IEM """
Compare our PIREPs data against what is at aviation wx JSON service
"""
import json
import urllib2
import datetime
import psycopg2
pgconn = psycopg2.connect(database='postgis', user='nobody', host='iemdb')
cursor = pgconn.cursor()
avwx = urllib2.urlopen("http://aviationweather.gov/gis/scripts/AirepJSON.php")
avwx = json.loads(avwx.read())
mine = {}
cursor.execute("""SELECT valid at time zone 'UTC', ST_x(geom::geometry),
ST_y(geom::geometry), report
from pireps WHERE valid > '2015-01-21 07:00'""")
for row in cursor:
key = "/".join(row[3].replace(" ", "").split("/")[:3])
mine[key] = row
floor = None
for feature in avwx['features']:
if feature['properties']['airepType'] != 'PIREP':
continue
ts = datetime.datetime.strptime(feature['properties']['obsTime'],
'%Y-%m-%dT%H:%M:%SZ')
if floor is None:
floor = ts
lon, lat = feature['geometry']['coordinates']
key = "/".join(feature['properties']['rawOb'].replace(" ", "").split("/")[:3])
if not mine.has_key(key):
print 'IEM MISS', ts, feature['properties']['rawOb']
else:
error = ((mine[key][1]-lon)**2 + (mine[key][2]-lat)**2)**.5
if error > 0.1:
location = "/".join(feature['properties']['rawOb'].split("/")[:2])
print "ERROR: %5.2f IEM: %8.3f %6.3f AWX: %7.2f %5.2f %s" % (error,
mine[key][1], mine[key][2],
lon, lat, location)
del(mine[key])
for report in mine:
if mine[report][0] < floor:
continue
print 'AVWX MISS', mine[report][0], mine[report][3]
|
<commit_before><commit_msg>Add diagnostic comparing aviationweather.gov to IEM <commit_after>"""
Compare our PIREPs data against what is at aviation wx JSON service
"""
import json
import urllib2
import datetime
import psycopg2
pgconn = psycopg2.connect(database='postgis', user='nobody', host='iemdb')
cursor = pgconn.cursor()
avwx = urllib2.urlopen("http://aviationweather.gov/gis/scripts/AirepJSON.php")
avwx = json.loads(avwx.read())
mine = {}
cursor.execute("""SELECT valid at time zone 'UTC', ST_x(geom::geometry),
ST_y(geom::geometry), report
from pireps WHERE valid > '2015-01-21 07:00'""")
for row in cursor:
key = "/".join(row[3].replace(" ", "").split("/")[:3])
mine[key] = row
floor = None
for feature in avwx['features']:
if feature['properties']['airepType'] != 'PIREP':
continue
ts = datetime.datetime.strptime(feature['properties']['obsTime'],
'%Y-%m-%dT%H:%M:%SZ')
if floor is None:
floor = ts
lon, lat = feature['geometry']['coordinates']
key = "/".join(feature['properties']['rawOb'].replace(" ", "").split("/")[:3])
if not mine.has_key(key):
print 'IEM MISS', ts, feature['properties']['rawOb']
else:
error = ((mine[key][1]-lon)**2 + (mine[key][2]-lat)**2)**.5
if error > 0.1:
location = "/".join(feature['properties']['rawOb'].split("/")[:2])
print "ERROR: %5.2f IEM: %8.3f %6.3f AWX: %7.2f %5.2f %s" % (error,
mine[key][1], mine[key][2],
lon, lat, location)
del(mine[key])
for report in mine:
if mine[report][0] < floor:
continue
print 'AVWX MISS', mine[report][0], mine[report][3]
|
|
b3d863bd43ee53ed4fee32c4ebcd4e935852a752
|
pychat/pychat.py
|
pychat/pychat.py
|
"""
A simple chat server using TCP/IP and the publisher-subscriber pattern.
@author Steven Briggs
@version 2015.05.25
"""
import sys
import argparse
import socket
DEFAULT_PORT = 8080
DEFAULT_BACKLOG = 5
def get_args():
"""
Parse and return the arguments given at the command line
@returns the command line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--port", default=DEFAULT_PORT, type=int, help="port number for the server")
return parser.parse_args()
def main():
args = get_args()
port = args.port
# Prepare the listening socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind((socket.gethostname(), port))
server_socket.listen(DEFAULT_BACKLOG)
# Accept connections until the program is killed
running = True
while running:
(client_socket, address) = server_socket.accept()
client_socket.close()
print("Got a client @ address {0}".format(address))
server_socket.close()
if __name__ == "__main__":
sys.exit(main())
|
Add a simple accept loop for clients
|
Add a simple accept loop for clients
|
Python
|
mit
|
Broar/PyChat
|
Add a simple accept loop for clients
|
"""
A simple chat server using TCP/IP and the publisher-subscriber pattern.
@author Steven Briggs
@version 2015.05.25
"""
import sys
import argparse
import socket
DEFAULT_PORT = 8080
DEFAULT_BACKLOG = 5
def get_args():
"""
Parse and return the arguments given at the command line
@returns the command line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--port", default=DEFAULT_PORT, type=int, help="port number for the server")
return parser.parse_args()
def main():
args = get_args()
port = args.port
# Prepare the listening socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind((socket.gethostname(), port))
server_socket.listen(DEFAULT_BACKLOG)
# Accept connections until the program is killed
running = True
while running:
(client_socket, address) = server_socket.accept()
client_socket.close()
print("Got a client @ address {0}".format(address))
server_socket.close()
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add a simple accept loop for clients<commit_after>
|
"""
A simple chat server using TCP/IP and the publisher-subscriber pattern.
@author Steven Briggs
@version 2015.05.25
"""
import sys
import argparse
import socket
DEFAULT_PORT = 8080
DEFAULT_BACKLOG = 5
def get_args():
"""
Parse and return the arguments given at the command line
@returns the command line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--port", default=DEFAULT_PORT, type=int, help="port number for the server")
return parser.parse_args()
def main():
args = get_args()
port = args.port
# Prepare the listening socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind((socket.gethostname(), port))
server_socket.listen(DEFAULT_BACKLOG)
# Accept connections until the program is killed
running = True
while running:
(client_socket, address) = server_socket.accept()
client_socket.close()
print("Got a client @ address {0}".format(address))
server_socket.close()
if __name__ == "__main__":
sys.exit(main())
|
Add a simple accept loop for clients"""
A simple chat server using TCP/IP and the publisher-subscriber pattern.
@author Steven Briggs
@version 2015.05.25
"""
import sys
import argparse
import socket
DEFAULT_PORT = 8080
DEFAULT_BACKLOG = 5
def get_args():
"""
Parse and return the arguments given at the command line
@returns the command line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--port", default=DEFAULT_PORT, type=int, help="port number for the server")
return parser.parse_args()
def main():
args = get_args()
port = args.port
# Prepare the listening socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind((socket.gethostname(), port))
server_socket.listen(DEFAULT_BACKLOG)
# Accept connections until the program is killed
running = True
while running:
(client_socket, address) = server_socket.accept()
client_socket.close()
print("Got a client @ address {0}".format(address))
server_socket.close()
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add a simple accept loop for clients<commit_after>"""
A simple chat server using TCP/IP and the publisher-subscriber pattern.
@author Steven Briggs
@version 2015.05.25
"""
import sys
import argparse
import socket
DEFAULT_PORT = 8080
DEFAULT_BACKLOG = 5
def get_args():
"""
Parse and return the arguments given at the command line
@returns the command line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument("-p", "--port", default=DEFAULT_PORT, type=int, help="port number for the server")
return parser.parse_args()
def main():
args = get_args()
port = args.port
# Prepare the listening socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind((socket.gethostname(), port))
server_socket.listen(DEFAULT_BACKLOG)
# Accept connections until the program is killed
running = True
while running:
(client_socket, address) = server_socket.accept()
client_socket.close()
print("Got a client @ address {0}".format(address))
server_socket.close()
if __name__ == "__main__":
sys.exit(main())
|
|
0cd84e16ae6159097900e743160c6b2efa66ea0e
|
test/test_orm.py
|
test/test_orm.py
|
# encoding: utf-8
from __future__ import print_function, unicode_literals
import sys
import pytest
from simplesqlite import connect_memdb
from simplesqlite.model import Blob, Integer, Model, Real, Text
class Hoge(Model):
hoge_id = Integer()
name = Text()
class Foo(Model):
foo_id = Integer(not_null=True)
name = Text(not_null=True)
value = Real(not_null=True)
blob = Blob()
def test_orm():
con = connect_memdb()
Hoge.attach(con, is_hidden=True)
Hoge.create()
hoge_inputs = [Hoge(hoge_id=10, name="a"), Hoge(hoge_id=20, name="b")]
for hoge_input in hoge_inputs:
Hoge.insert(hoge_input)
Foo.attach(con)
Foo.create()
foo_inputs = [Foo(foo_id=11, name="aq", value=0.1), Foo(foo_id=22, name="bb", value=1.11)]
for foo_input in foo_inputs:
Foo.insert(foo_input)
for record, hoge_input in zip(Hoge.select(), hoge_inputs):
assert record == hoge_input
for record, foo_input in zip(Foo.select(), foo_inputs):
assert record == foo_input
|
Add a test case for ORM
|
Add a test case for ORM
|
Python
|
mit
|
thombashi/SimpleSQLite,thombashi/SimpleSQLite
|
Add a test case for ORM
|
# encoding: utf-8
from __future__ import print_function, unicode_literals
import sys
import pytest
from simplesqlite import connect_memdb
from simplesqlite.model import Blob, Integer, Model, Real, Text
class Hoge(Model):
hoge_id = Integer()
name = Text()
class Foo(Model):
foo_id = Integer(not_null=True)
name = Text(not_null=True)
value = Real(not_null=True)
blob = Blob()
def test_orm():
con = connect_memdb()
Hoge.attach(con, is_hidden=True)
Hoge.create()
hoge_inputs = [Hoge(hoge_id=10, name="a"), Hoge(hoge_id=20, name="b")]
for hoge_input in hoge_inputs:
Hoge.insert(hoge_input)
Foo.attach(con)
Foo.create()
foo_inputs = [Foo(foo_id=11, name="aq", value=0.1), Foo(foo_id=22, name="bb", value=1.11)]
for foo_input in foo_inputs:
Foo.insert(foo_input)
for record, hoge_input in zip(Hoge.select(), hoge_inputs):
assert record == hoge_input
for record, foo_input in zip(Foo.select(), foo_inputs):
assert record == foo_input
|
<commit_before><commit_msg>Add a test case for ORM<commit_after>
|
# encoding: utf-8
from __future__ import print_function, unicode_literals
import sys
import pytest
from simplesqlite import connect_memdb
from simplesqlite.model import Blob, Integer, Model, Real, Text
class Hoge(Model):
hoge_id = Integer()
name = Text()
class Foo(Model):
foo_id = Integer(not_null=True)
name = Text(not_null=True)
value = Real(not_null=True)
blob = Blob()
def test_orm():
con = connect_memdb()
Hoge.attach(con, is_hidden=True)
Hoge.create()
hoge_inputs = [Hoge(hoge_id=10, name="a"), Hoge(hoge_id=20, name="b")]
for hoge_input in hoge_inputs:
Hoge.insert(hoge_input)
Foo.attach(con)
Foo.create()
foo_inputs = [Foo(foo_id=11, name="aq", value=0.1), Foo(foo_id=22, name="bb", value=1.11)]
for foo_input in foo_inputs:
Foo.insert(foo_input)
for record, hoge_input in zip(Hoge.select(), hoge_inputs):
assert record == hoge_input
for record, foo_input in zip(Foo.select(), foo_inputs):
assert record == foo_input
|
Add a test case for ORM# encoding: utf-8
from __future__ import print_function, unicode_literals
import sys
import pytest
from simplesqlite import connect_memdb
from simplesqlite.model import Blob, Integer, Model, Real, Text
class Hoge(Model):
hoge_id = Integer()
name = Text()
class Foo(Model):
foo_id = Integer(not_null=True)
name = Text(not_null=True)
value = Real(not_null=True)
blob = Blob()
def test_orm():
con = connect_memdb()
Hoge.attach(con, is_hidden=True)
Hoge.create()
hoge_inputs = [Hoge(hoge_id=10, name="a"), Hoge(hoge_id=20, name="b")]
for hoge_input in hoge_inputs:
Hoge.insert(hoge_input)
Foo.attach(con)
Foo.create()
foo_inputs = [Foo(foo_id=11, name="aq", value=0.1), Foo(foo_id=22, name="bb", value=1.11)]
for foo_input in foo_inputs:
Foo.insert(foo_input)
for record, hoge_input in zip(Hoge.select(), hoge_inputs):
assert record == hoge_input
for record, foo_input in zip(Foo.select(), foo_inputs):
assert record == foo_input
|
<commit_before><commit_msg>Add a test case for ORM<commit_after># encoding: utf-8
from __future__ import print_function, unicode_literals
import sys
import pytest
from simplesqlite import connect_memdb
from simplesqlite.model import Blob, Integer, Model, Real, Text
class Hoge(Model):
hoge_id = Integer()
name = Text()
class Foo(Model):
foo_id = Integer(not_null=True)
name = Text(not_null=True)
value = Real(not_null=True)
blob = Blob()
def test_orm():
con = connect_memdb()
Hoge.attach(con, is_hidden=True)
Hoge.create()
hoge_inputs = [Hoge(hoge_id=10, name="a"), Hoge(hoge_id=20, name="b")]
for hoge_input in hoge_inputs:
Hoge.insert(hoge_input)
Foo.attach(con)
Foo.create()
foo_inputs = [Foo(foo_id=11, name="aq", value=0.1), Foo(foo_id=22, name="bb", value=1.11)]
for foo_input in foo_inputs:
Foo.insert(foo_input)
for record, hoge_input in zip(Hoge.select(), hoge_inputs):
assert record == hoge_input
for record, foo_input in zip(Foo.select(), foo_inputs):
assert record == foo_input
|
|
a95e5532468bbdc8bb667e377a6f515854474d79
|
tests/test_configuration/test_menu.py
|
tests/test_configuration/test_menu.py
|
'''
Test menu creation
'''
import unittest
from wirecurly.configuration import menu
from nose import tools
class testMenuCreation(unittest.TestCase):
'''
Test menu creation
'''
def setUp(self):
'''
menu fixtures for tests
'''
self.menu = menu.Menu('on_hours')
def test_menu_dict_ok(self):
'''
Test that menu is properly serialized
'''
assert isinstance(self.menu.todict(), dict)
def test_adding_attr(self):
'''
Test if an attr is properly add to a menu
'''
self.menu.addAttr('digit-len','4')
assert self.menu.get('digit-len') == '4'
@tools.raises(ValueError)
def test_adding_existing_attr(self):
'''
Test adding an existing attr
'''
self.menu.addAttr('digit-len','4')
self.menu.addAttr('digit-len','4')
def test_adding_entry(self):
'''
Test if an entry is properly add to a menu
'''
self.menu.addEntry('menu-exec-app','1','transfer 1001 XML default')
assert self.menu.getEntry('menu-exec-app','1') == 'transfer 1001 XML default'
@tools.raises(ValueError)
def test_adding_existing_entry(self):
'''
Test adding an existing entry
'''
self.menu.addEntry('menu-exec-app','1','transfer 1001 XML default')
self.menu.addEntry('menu-exec-app','1','transfer 1001 XML default')
|
Add tests for IVR menu
|
Add tests for IVR menu
|
Python
|
mpl-2.0
|
IndiciumSRL/wirecurly
|
Add tests for IVR menu
|
'''
Test menu creation
'''
import unittest
from wirecurly.configuration import menu
from nose import tools
class testMenuCreation(unittest.TestCase):
'''
Test menu creation
'''
def setUp(self):
'''
menu fixtures for tests
'''
self.menu = menu.Menu('on_hours')
def test_menu_dict_ok(self):
'''
Test that menu is properly serialized
'''
assert isinstance(self.menu.todict(), dict)
def test_adding_attr(self):
'''
Test if an attr is properly add to a menu
'''
self.menu.addAttr('digit-len','4')
assert self.menu.get('digit-len') == '4'
@tools.raises(ValueError)
def test_adding_existing_attr(self):
'''
Test adding an existing attr
'''
self.menu.addAttr('digit-len','4')
self.menu.addAttr('digit-len','4')
def test_adding_entry(self):
'''
Test if an entry is properly add to a menu
'''
self.menu.addEntry('menu-exec-app','1','transfer 1001 XML default')
assert self.menu.getEntry('menu-exec-app','1') == 'transfer 1001 XML default'
@tools.raises(ValueError)
def test_adding_existing_entry(self):
'''
Test adding an existing entry
'''
self.menu.addEntry('menu-exec-app','1','transfer 1001 XML default')
self.menu.addEntry('menu-exec-app','1','transfer 1001 XML default')
|
<commit_before><commit_msg>Add tests for IVR menu<commit_after>
|
'''
Test menu creation
'''
import unittest
from wirecurly.configuration import menu
from nose import tools
class testMenuCreation(unittest.TestCase):
'''
Test menu creation
'''
def setUp(self):
'''
menu fixtures for tests
'''
self.menu = menu.Menu('on_hours')
def test_menu_dict_ok(self):
'''
Test that menu is properly serialized
'''
assert isinstance(self.menu.todict(), dict)
def test_adding_attr(self):
'''
Test if an attr is properly add to a menu
'''
self.menu.addAttr('digit-len','4')
assert self.menu.get('digit-len') == '4'
@tools.raises(ValueError)
def test_adding_existing_attr(self):
'''
Test adding an existing attr
'''
self.menu.addAttr('digit-len','4')
self.menu.addAttr('digit-len','4')
def test_adding_entry(self):
'''
Test if an entry is properly add to a menu
'''
self.menu.addEntry('menu-exec-app','1','transfer 1001 XML default')
assert self.menu.getEntry('menu-exec-app','1') == 'transfer 1001 XML default'
@tools.raises(ValueError)
def test_adding_existing_entry(self):
'''
Test adding an existing entry
'''
self.menu.addEntry('menu-exec-app','1','transfer 1001 XML default')
self.menu.addEntry('menu-exec-app','1','transfer 1001 XML default')
|
Add tests for IVR menu'''
Test menu creation
'''
import unittest
from wirecurly.configuration import menu
from nose import tools
class testMenuCreation(unittest.TestCase):
'''
Test menu creation
'''
def setUp(self):
'''
menu fixtures for tests
'''
self.menu = menu.Menu('on_hours')
def test_menu_dict_ok(self):
'''
Test that menu is properly serialized
'''
assert isinstance(self.menu.todict(), dict)
def test_adding_attr(self):
'''
Test if an attr is properly add to a menu
'''
self.menu.addAttr('digit-len','4')
assert self.menu.get('digit-len') == '4'
@tools.raises(ValueError)
def test_adding_existing_attr(self):
'''
Test adding an existing attr
'''
self.menu.addAttr('digit-len','4')
self.menu.addAttr('digit-len','4')
def test_adding_entry(self):
'''
Test if an entry is properly add to a menu
'''
self.menu.addEntry('menu-exec-app','1','transfer 1001 XML default')
assert self.menu.getEntry('menu-exec-app','1') == 'transfer 1001 XML default'
@tools.raises(ValueError)
def test_adding_existing_entry(self):
'''
Test adding an existing entry
'''
self.menu.addEntry('menu-exec-app','1','transfer 1001 XML default')
self.menu.addEntry('menu-exec-app','1','transfer 1001 XML default')
|
<commit_before><commit_msg>Add tests for IVR menu<commit_after>'''
Test menu creation
'''
import unittest
from wirecurly.configuration import menu
from nose import tools
class testMenuCreation(unittest.TestCase):
'''
Test menu creation
'''
def setUp(self):
'''
menu fixtures for tests
'''
self.menu = menu.Menu('on_hours')
def test_menu_dict_ok(self):
'''
Test that menu is properly serialized
'''
assert isinstance(self.menu.todict(), dict)
def test_adding_attr(self):
'''
Test if an attr is properly add to a menu
'''
self.menu.addAttr('digit-len','4')
assert self.menu.get('digit-len') == '4'
@tools.raises(ValueError)
def test_adding_existing_attr(self):
'''
Test adding an existing attr
'''
self.menu.addAttr('digit-len','4')
self.menu.addAttr('digit-len','4')
def test_adding_entry(self):
'''
Test if an entry is properly add to a menu
'''
self.menu.addEntry('menu-exec-app','1','transfer 1001 XML default')
assert self.menu.getEntry('menu-exec-app','1') == 'transfer 1001 XML default'
@tools.raises(ValueError)
def test_adding_existing_entry(self):
'''
Test adding an existing entry
'''
self.menu.addEntry('menu-exec-app','1','transfer 1001 XML default')
self.menu.addEntry('menu-exec-app','1','transfer 1001 XML default')
|
|
4e90a2fd424eeb078957b779a211d9643c516566
|
tests/commands/test_settings.py
|
tests/commands/test_settings.py
|
# Copyright 2014-2015 Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from platformio.commands.settings import cli
from platformio import app
def test_settings_check(clirunner, validate_cliresult):
result = clirunner.invoke(cli, ["get"])
validate_cliresult(result)
assert len(result.output)
for item in app.DEFAULT_SETTINGS.items():
assert item[0] in result.output
|
# Copyright 2014-2015 Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from platformio.commands.settings import cli
from platformio import app
def test_settings_check(clirunner, validate_cliresult):
result = clirunner.invoke(cli, ["get"])
assert result.exit_code == 0
assert not result.exception
assert len(result.output)
for item in app.DEFAULT_SETTINGS.items():
assert item[0] in result.output
|
Fix test for settings command
|
Fix test for settings command
|
Python
|
apache-2.0
|
platformio/platformio-core,eiginn/platformio,platformio/platformio-core,ZachMassia/platformio,platformio/platformio,valeros/platformio
|
# Copyright 2014-2015 Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from platformio.commands.settings import cli
from platformio import app
def test_settings_check(clirunner, validate_cliresult):
result = clirunner.invoke(cli, ["get"])
validate_cliresult(result)
assert len(result.output)
for item in app.DEFAULT_SETTINGS.items():
assert item[0] in result.output
Fix test for settings command
|
# Copyright 2014-2015 Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from platformio.commands.settings import cli
from platformio import app
def test_settings_check(clirunner, validate_cliresult):
result = clirunner.invoke(cli, ["get"])
assert result.exit_code == 0
assert not result.exception
assert len(result.output)
for item in app.DEFAULT_SETTINGS.items():
assert item[0] in result.output
|
<commit_before># Copyright 2014-2015 Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from platformio.commands.settings import cli
from platformio import app
def test_settings_check(clirunner, validate_cliresult):
result = clirunner.invoke(cli, ["get"])
validate_cliresult(result)
assert len(result.output)
for item in app.DEFAULT_SETTINGS.items():
assert item[0] in result.output
<commit_msg>Fix test for settings command<commit_after>
|
# Copyright 2014-2015 Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from platformio.commands.settings import cli
from platformio import app
def test_settings_check(clirunner, validate_cliresult):
result = clirunner.invoke(cli, ["get"])
assert result.exit_code == 0
assert not result.exception
assert len(result.output)
for item in app.DEFAULT_SETTINGS.items():
assert item[0] in result.output
|
# Copyright 2014-2015 Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from platformio.commands.settings import cli
from platformio import app
def test_settings_check(clirunner, validate_cliresult):
result = clirunner.invoke(cli, ["get"])
validate_cliresult(result)
assert len(result.output)
for item in app.DEFAULT_SETTINGS.items():
assert item[0] in result.output
Fix test for settings command# Copyright 2014-2015 Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from platformio.commands.settings import cli
from platformio import app
def test_settings_check(clirunner, validate_cliresult):
result = clirunner.invoke(cli, ["get"])
assert result.exit_code == 0
assert not result.exception
assert len(result.output)
for item in app.DEFAULT_SETTINGS.items():
assert item[0] in result.output
|
<commit_before># Copyright 2014-2015 Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from platformio.commands.settings import cli
from platformio import app
def test_settings_check(clirunner, validate_cliresult):
result = clirunner.invoke(cli, ["get"])
validate_cliresult(result)
assert len(result.output)
for item in app.DEFAULT_SETTINGS.items():
assert item[0] in result.output
<commit_msg>Fix test for settings command<commit_after># Copyright 2014-2015 Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from platformio.commands.settings import cli
from platformio import app
def test_settings_check(clirunner, validate_cliresult):
result = clirunner.invoke(cli, ["get"])
assert result.exit_code == 0
assert not result.exception
assert len(result.output)
for item in app.DEFAULT_SETTINGS.items():
assert item[0] in result.output
|
f2bdbe29395f1fc875d097200821f0bfd099ee68
|
tests/test_config_milestones.py
|
tests/test_config_milestones.py
|
from tg.configuration.milestones import _ConfigMilestoneTracker
class Action:
called = 0
def __call__(self):
self.called += 1
class TestMilestones(object):
def setup(self):
self.milestone = _ConfigMilestoneTracker('test_milestone')
def test_multiple_registration(self):
a = Action()
self.milestone.register(a)
self.milestone.register(a)
self.milestone.register(a)
self.milestone.reach()
assert a.called == 1
def test_register_after_reach(self):
a = Action()
self.milestone.reach()
self.milestone.register(a)
assert a.called == 1
def test_call_all(self):
a = Action()
a2 = Action()
a3 = Action()
self.milestone.register(a)
self.milestone.register(a2)
self.milestone.register(a3)
self.milestone.reach()
assert a.called == a2.called == a3.called == 1
def test_register_func_unique(self):
called = []
def f():
called.append(True)
self.milestone.register(f)
self.milestone.register(f)
self.milestone.reach()
assert len(called) == 1
|
Add tests for config milestones
|
Add tests for config milestones
|
Python
|
mit
|
lucius-feng/tg2,lucius-feng/tg2
|
Add tests for config milestones
|
from tg.configuration.milestones import _ConfigMilestoneTracker
class Action:
called = 0
def __call__(self):
self.called += 1
class TestMilestones(object):
def setup(self):
self.milestone = _ConfigMilestoneTracker('test_milestone')
def test_multiple_registration(self):
a = Action()
self.milestone.register(a)
self.milestone.register(a)
self.milestone.register(a)
self.milestone.reach()
assert a.called == 1
def test_register_after_reach(self):
a = Action()
self.milestone.reach()
self.milestone.register(a)
assert a.called == 1
def test_call_all(self):
a = Action()
a2 = Action()
a3 = Action()
self.milestone.register(a)
self.milestone.register(a2)
self.milestone.register(a3)
self.milestone.reach()
assert a.called == a2.called == a3.called == 1
def test_register_func_unique(self):
called = []
def f():
called.append(True)
self.milestone.register(f)
self.milestone.register(f)
self.milestone.reach()
assert len(called) == 1
|
<commit_before><commit_msg>Add tests for config milestones<commit_after>
|
from tg.configuration.milestones import _ConfigMilestoneTracker
class Action:
called = 0
def __call__(self):
self.called += 1
class TestMilestones(object):
def setup(self):
self.milestone = _ConfigMilestoneTracker('test_milestone')
def test_multiple_registration(self):
a = Action()
self.milestone.register(a)
self.milestone.register(a)
self.milestone.register(a)
self.milestone.reach()
assert a.called == 1
def test_register_after_reach(self):
a = Action()
self.milestone.reach()
self.milestone.register(a)
assert a.called == 1
def test_call_all(self):
a = Action()
a2 = Action()
a3 = Action()
self.milestone.register(a)
self.milestone.register(a2)
self.milestone.register(a3)
self.milestone.reach()
assert a.called == a2.called == a3.called == 1
def test_register_func_unique(self):
called = []
def f():
called.append(True)
self.milestone.register(f)
self.milestone.register(f)
self.milestone.reach()
assert len(called) == 1
|
Add tests for config milestonesfrom tg.configuration.milestones import _ConfigMilestoneTracker
class Action:
called = 0
def __call__(self):
self.called += 1
class TestMilestones(object):
def setup(self):
self.milestone = _ConfigMilestoneTracker('test_milestone')
def test_multiple_registration(self):
a = Action()
self.milestone.register(a)
self.milestone.register(a)
self.milestone.register(a)
self.milestone.reach()
assert a.called == 1
def test_register_after_reach(self):
a = Action()
self.milestone.reach()
self.milestone.register(a)
assert a.called == 1
def test_call_all(self):
a = Action()
a2 = Action()
a3 = Action()
self.milestone.register(a)
self.milestone.register(a2)
self.milestone.register(a3)
self.milestone.reach()
assert a.called == a2.called == a3.called == 1
def test_register_func_unique(self):
called = []
def f():
called.append(True)
self.milestone.register(f)
self.milestone.register(f)
self.milestone.reach()
assert len(called) == 1
|
<commit_before><commit_msg>Add tests for config milestones<commit_after>from tg.configuration.milestones import _ConfigMilestoneTracker
class Action:
called = 0
def __call__(self):
self.called += 1
class TestMilestones(object):
def setup(self):
self.milestone = _ConfigMilestoneTracker('test_milestone')
def test_multiple_registration(self):
a = Action()
self.milestone.register(a)
self.milestone.register(a)
self.milestone.register(a)
self.milestone.reach()
assert a.called == 1
def test_register_after_reach(self):
a = Action()
self.milestone.reach()
self.milestone.register(a)
assert a.called == 1
def test_call_all(self):
a = Action()
a2 = Action()
a3 = Action()
self.milestone.register(a)
self.milestone.register(a2)
self.milestone.register(a3)
self.milestone.reach()
assert a.called == a2.called == a3.called == 1
def test_register_func_unique(self):
called = []
def f():
called.append(True)
self.milestone.register(f)
self.milestone.register(f)
self.milestone.reach()
assert len(called) == 1
|
|
b41e5552fe5abf75e6bfda9814eed4798695c01b
|
tools/rm_trailing_whitespace.py
|
tools/rm_trailing_whitespace.py
|
import glob
import os
import sys
extensions = [".cpp", ".hpp"]
PATH = "/home/youngmit/git/mocc/src"
def strip( filename ):
with open(filename, 'r') as f:
new = []
n_white = 0
for line in f:
strip_line = line.rstrip()
# Minus 1 for the newline
n_white += len(line) - len(strip_line) - 1
new.append(strip_line)
with open(filename, 'w') as f:
[f.write('%s\n' % line) for line in new]
print "Stripped " + str(n_white) + " characters of whitespace."
return
for path, dirs, files in os.walk(PATH):
for f in files:
file_name, file_extension = os.path.splitext(f)
if file_extension in extensions:
while True:
inp = raw_input("Treat file: " + f + "? [Y/n/q]: ")
if( inp == "" or inp == "y" or inp == "Y" ):
strip(os.path.join(path, f))
break
if( inp == "n" or inp == "N" ):
# next
continue
if( inp == "q" ):
exit()
|
Remove tabs and trailing whitespace
|
Remove tabs and trailing whitespace
Also added a simple python script for stripping the whitespace. Tabs, just grep
and remove manually
|
Python
|
apache-2.0
|
youngmit/mocc,youngmit/mocc,youngmit/mocc,youngmit/mocc
|
Remove tabs and trailing whitespace
Also added a simple python script for stripping the whitespace. Tabs, just grep
and remove manually
|
import glob
import os
import sys
extensions = [".cpp", ".hpp"]
PATH = "/home/youngmit/git/mocc/src"
def strip( filename ):
with open(filename, 'r') as f:
new = []
n_white = 0
for line in f:
strip_line = line.rstrip()
# Minus 1 for the newline
n_white += len(line) - len(strip_line) - 1
new.append(strip_line)
with open(filename, 'w') as f:
[f.write('%s\n' % line) for line in new]
print "Stripped " + str(n_white) + " characters of whitespace."
return
for path, dirs, files in os.walk(PATH):
for f in files:
file_name, file_extension = os.path.splitext(f)
if file_extension in extensions:
while True:
inp = raw_input("Treat file: " + f + "? [Y/n/q]: ")
if( inp == "" or inp == "y" or inp == "Y" ):
strip(os.path.join(path, f))
break
if( inp == "n" or inp == "N" ):
# next
continue
if( inp == "q" ):
exit()
|
<commit_before><commit_msg>Remove tabs and trailing whitespace
Also added a simple python script for stripping the whitespace. Tabs, just grep
and remove manually<commit_after>
|
import glob
import os
import sys
extensions = [".cpp", ".hpp"]
PATH = "/home/youngmit/git/mocc/src"
def strip( filename ):
with open(filename, 'r') as f:
new = []
n_white = 0
for line in f:
strip_line = line.rstrip()
# Minus 1 for the newline
n_white += len(line) - len(strip_line) - 1
new.append(strip_line)
with open(filename, 'w') as f:
[f.write('%s\n' % line) for line in new]
print "Stripped " + str(n_white) + " characters of whitespace."
return
for path, dirs, files in os.walk(PATH):
for f in files:
file_name, file_extension = os.path.splitext(f)
if file_extension in extensions:
while True:
inp = raw_input("Treat file: " + f + "? [Y/n/q]: ")
if( inp == "" or inp == "y" or inp == "Y" ):
strip(os.path.join(path, f))
break
if( inp == "n" or inp == "N" ):
# next
continue
if( inp == "q" ):
exit()
|
Remove tabs and trailing whitespace
Also added a simple python script for stripping the whitespace. Tabs, just grep
and remove manuallyimport glob
import os
import sys
extensions = [".cpp", ".hpp"]
PATH = "/home/youngmit/git/mocc/src"
def strip( filename ):
with open(filename, 'r') as f:
new = []
n_white = 0
for line in f:
strip_line = line.rstrip()
# Minus 1 for the newline
n_white += len(line) - len(strip_line) - 1
new.append(strip_line)
with open(filename, 'w') as f:
[f.write('%s\n' % line) for line in new]
print "Stripped " + str(n_white) + " characters of whitespace."
return
for path, dirs, files in os.walk(PATH):
for f in files:
file_name, file_extension = os.path.splitext(f)
if file_extension in extensions:
while True:
inp = raw_input("Treat file: " + f + "? [Y/n/q]: ")
if( inp == "" or inp == "y" or inp == "Y" ):
strip(os.path.join(path, f))
break
if( inp == "n" or inp == "N" ):
# next
continue
if( inp == "q" ):
exit()
|
<commit_before><commit_msg>Remove tabs and trailing whitespace
Also added a simple python script for stripping the whitespace. Tabs, just grep
and remove manually<commit_after>import glob
import os
import sys
extensions = [".cpp", ".hpp"]
PATH = "/home/youngmit/git/mocc/src"
def strip( filename ):
with open(filename, 'r') as f:
new = []
n_white = 0
for line in f:
strip_line = line.rstrip()
# Minus 1 for the newline
n_white += len(line) - len(strip_line) - 1
new.append(strip_line)
with open(filename, 'w') as f:
[f.write('%s\n' % line) for line in new]
print "Stripped " + str(n_white) + " characters of whitespace."
return
for path, dirs, files in os.walk(PATH):
for f in files:
file_name, file_extension = os.path.splitext(f)
if file_extension in extensions:
while True:
inp = raw_input("Treat file: " + f + "? [Y/n/q]: ")
if( inp == "" or inp == "y" or inp == "Y" ):
strip(os.path.join(path, f))
break
if( inp == "n" or inp == "N" ):
# next
continue
if( inp == "q" ):
exit()
|
|
86cd7dff1ab6d0c47b51f14d8784b8bf5f61e762
|
update-readme.py
|
update-readme.py
|
from hammock import Hammock as Github
import json
import base64
from pprint import pprint
# Let's create the first chain of hammock using base api url
github = Github('https://api.github.com')
user = 'holtzermann17'
# In the future this will be running on a server
# somewhere, so password can just be hard coded
password = raw_input("Enter your github password: ")
repo = 'py-blot'
# The most interesting things for us are in this part of the API
# https://developer.github.com/v3/repos/contents/
# get the contents of a given file, the README in this case
resp = github.repos(user, repo).contents.GET('README.md')
# examine the file we retrieved,
# here's how to have a look at everything:
#pprint(vars(resp))
# And from that, the most interesting part(s) can be extracted:
text = base64.b64decode(json.loads(resp._content)['content'])
# (we will need the sha later)
sha = json.loads(resp._content)['sha']
print text
# Now to further explore the API, let's loop back and
# update the file. Let's prompt the user to add
# some text to the file.
delta = raw_input("Add text: ")
newtext = ''.join([text, '\n', delta])
newcontent = base64.b64encode(newtext)
#message = base64.b64encode()
data={'message':'Adding "'+text+'".',
'committer':{'name':'Joe Corneli',
'email':'holtzermann17@gmail.com'},
'content':newcontent,
'sha':sha,
'branch':'master'}
# Forming the correct URL (by following the instructions from Hammock docs!)
# In particular, we need to specify the :path as part of the url, whereas
# other arguments are passed along as a JSON body
resptwo = github.repos(user, repo).contents('README.md').PUT(
auth=(user, password),
headers = {'Content-type': 'application/json'},
data=json.dumps(data))
pprint(vars(resptwo))
# Here's how to do something similar with curl:
# curl -i -X PUT -H 'Authorization: <token>' -d '{"path": "test4.txt", "message": "Initial Test", "committer": {"name": "Joe Corneli", "email": "holtzermann17@gmail.com"}, "content": "bXkgbmV3IGZpbGUgY29udGVudHM=", "branch": "master"}' https://api.github.com/repos/holtzermann17/py-blot/contents/test4.txt
|
Add a proof of concept for reading and writing from github api
|
Add a proof of concept for reading and writing from github api
|
Python
|
mit
|
maddyloo/miniBibServer,maddyloo/miniBibServer
|
Add a proof of concept for reading and writing from github api
|
from hammock import Hammock as Github
import json
import base64
from pprint import pprint
# Let's create the first chain of hammock using base api url
github = Github('https://api.github.com')
user = 'holtzermann17'
# In the future this will be running on a server
# somewhere, so password can just be hard coded
password = raw_input("Enter your github password: ")
repo = 'py-blot'
# The most interesting things for us are in this part of the API
# https://developer.github.com/v3/repos/contents/
# get the contents of a given file, the README in this case
resp = github.repos(user, repo).contents.GET('README.md')
# examine the file we retrieved,
# here's how to have a look at everything:
#pprint(vars(resp))
# And from that, the most interesting part(s) can be extracted:
text = base64.b64decode(json.loads(resp._content)['content'])
# (we will need the sha later)
sha = json.loads(resp._content)['sha']
print text
# Now to further explore the API, let's loop back and
# update the file. Let's prompt the user to add
# some text to the file.
delta = raw_input("Add text: ")
newtext = ''.join([text, '\n', delta])
newcontent = base64.b64encode(newtext)
#message = base64.b64encode()
data={'message':'Adding "'+text+'".',
'committer':{'name':'Joe Corneli',
'email':'holtzermann17@gmail.com'},
'content':newcontent,
'sha':sha,
'branch':'master'}
# Forming the correct URL (by following the instructions from Hammock docs!)
# In particular, we need to specify the :path as part of the url, whereas
# other arguments are passed along as a JSON body
resptwo = github.repos(user, repo).contents('README.md').PUT(
auth=(user, password),
headers = {'Content-type': 'application/json'},
data=json.dumps(data))
pprint(vars(resptwo))
# Here's how to do something similar with curl:
# curl -i -X PUT -H 'Authorization: <token>' -d '{"path": "test4.txt", "message": "Initial Test", "committer": {"name": "Joe Corneli", "email": "holtzermann17@gmail.com"}, "content": "bXkgbmV3IGZpbGUgY29udGVudHM=", "branch": "master"}' https://api.github.com/repos/holtzermann17/py-blot/contents/test4.txt
|
<commit_before><commit_msg>Add a proof of concept for reading and writing from github api<commit_after>
|
from hammock import Hammock as Github
import json
import base64
from pprint import pprint
# Let's create the first chain of hammock using base api url
github = Github('https://api.github.com')
user = 'holtzermann17'
# In the future this will be running on a server
# somewhere, so password can just be hard coded
password = raw_input("Enter your github password: ")
repo = 'py-blot'
# The most interesting things for us are in this part of the API
# https://developer.github.com/v3/repos/contents/
# get the contents of a given file, the README in this case
resp = github.repos(user, repo).contents.GET('README.md')
# examine the file we retrieved,
# here's how to have a look at everything:
#pprint(vars(resp))
# And from that, the most interesting part(s) can be extracted:
text = base64.b64decode(json.loads(resp._content)['content'])
# (we will need the sha later)
sha = json.loads(resp._content)['sha']
print text
# Now to further explore the API, let's loop back and
# update the file. Let's prompt the user to add
# some text to the file.
delta = raw_input("Add text: ")
newtext = ''.join([text, '\n', delta])
newcontent = base64.b64encode(newtext)
#message = base64.b64encode()
data={'message':'Adding "'+text+'".',
'committer':{'name':'Joe Corneli',
'email':'holtzermann17@gmail.com'},
'content':newcontent,
'sha':sha,
'branch':'master'}
# Forming the correct URL (by following the instructions from Hammock docs!)
# In particular, we need to specify the :path as part of the url, whereas
# other arguments are passed along as a JSON body
resptwo = github.repos(user, repo).contents('README.md').PUT(
auth=(user, password),
headers = {'Content-type': 'application/json'},
data=json.dumps(data))
pprint(vars(resptwo))
# Here's how to do something similar with curl:
# curl -i -X PUT -H 'Authorization: <token>' -d '{"path": "test4.txt", "message": "Initial Test", "committer": {"name": "Joe Corneli", "email": "holtzermann17@gmail.com"}, "content": "bXkgbmV3IGZpbGUgY29udGVudHM=", "branch": "master"}' https://api.github.com/repos/holtzermann17/py-blot/contents/test4.txt
|
Add a proof of concept for reading and writing from github apifrom hammock import Hammock as Github
import json
import base64
from pprint import pprint
# Let's create the first chain of hammock using base api url
github = Github('https://api.github.com')
user = 'holtzermann17'
# In the future this will be running on a server
# somewhere, so password can just be hard coded
password = raw_input("Enter your github password: ")
repo = 'py-blot'
# The most interesting things for us are in this part of the API
# https://developer.github.com/v3/repos/contents/
# get the contents of a given file, the README in this case
resp = github.repos(user, repo).contents.GET('README.md')
# examine the file we retrieved,
# here's how to have a look at everything:
#pprint(vars(resp))
# And from that, the most interesting part(s) can be extracted:
text = base64.b64decode(json.loads(resp._content)['content'])
# (we will need the sha later)
sha = json.loads(resp._content)['sha']
print text
# Now to further explore the API, let's loop back and
# update the file. Let's prompt the user to add
# some text to the file.
delta = raw_input("Add text: ")
newtext = ''.join([text, '\n', delta])
newcontent = base64.b64encode(newtext)
#message = base64.b64encode()
data={'message':'Adding "'+text+'".',
'committer':{'name':'Joe Corneli',
'email':'holtzermann17@gmail.com'},
'content':newcontent,
'sha':sha,
'branch':'master'}
# Forming the correct URL (by following the instructions from Hammock docs!)
# In particular, we need to specify the :path as part of the url, whereas
# other arguments are passed along as a JSON body
resptwo = github.repos(user, repo).contents('README.md').PUT(
auth=(user, password),
headers = {'Content-type': 'application/json'},
data=json.dumps(data))
pprint(vars(resptwo))
# Here's how to do something similar with curl:
# curl -i -X PUT -H 'Authorization: <token>' -d '{"path": "test4.txt", "message": "Initial Test", "committer": {"name": "Joe Corneli", "email": "holtzermann17@gmail.com"}, "content": "bXkgbmV3IGZpbGUgY29udGVudHM=", "branch": "master"}' https://api.github.com/repos/holtzermann17/py-blot/contents/test4.txt
|
<commit_before><commit_msg>Add a proof of concept for reading and writing from github api<commit_after>from hammock import Hammock as Github
import json
import base64
from pprint import pprint
# Let's create the first chain of hammock using base api url
github = Github('https://api.github.com')
user = 'holtzermann17'
# In the future this will be running on a server
# somewhere, so password can just be hard coded
password = raw_input("Enter your github password: ")
repo = 'py-blot'
# The most interesting things for us are in this part of the API
# https://developer.github.com/v3/repos/contents/
# get the contents of a given file, the README in this case
resp = github.repos(user, repo).contents.GET('README.md')
# examine the file we retrieved,
# here's how to have a look at everything:
#pprint(vars(resp))
# And from that, the most interesting part(s) can be extracted:
text = base64.b64decode(json.loads(resp._content)['content'])
# (we will need the sha later)
sha = json.loads(resp._content)['sha']
print text
# Now to further explore the API, let's loop back and
# update the file. Let's prompt the user to add
# some text to the file.
delta = raw_input("Add text: ")
newtext = ''.join([text, '\n', delta])
newcontent = base64.b64encode(newtext)
#message = base64.b64encode()
data={'message':'Adding "'+text+'".',
'committer':{'name':'Joe Corneli',
'email':'holtzermann17@gmail.com'},
'content':newcontent,
'sha':sha,
'branch':'master'}
# Forming the correct URL (by following the instructions from Hammock docs!)
# In particular, we need to specify the :path as part of the url, whereas
# other arguments are passed along as a JSON body
resptwo = github.repos(user, repo).contents('README.md').PUT(
auth=(user, password),
headers = {'Content-type': 'application/json'},
data=json.dumps(data))
pprint(vars(resptwo))
# Here's how to do something similar with curl:
# curl -i -X PUT -H 'Authorization: <token>' -d '{"path": "test4.txt", "message": "Initial Test", "committer": {"name": "Joe Corneli", "email": "holtzermann17@gmail.com"}, "content": "bXkgbmV3IGZpbGUgY29udGVudHM=", "branch": "master"}' https://api.github.com/repos/holtzermann17/py-blot/contents/test4.txt
|
|
1c2fc6d3e60cf36bc66aac96baa31074d52e3695
|
CodeFights/checkPassword.py
|
CodeFights/checkPassword.py
|
#!/usr/local/bin/python
# Code Fights Check Password Problem
def checkPassword(attempts, password):
def check():
while True:
tmp = yield
yield tmp == password
checker = check()
for i, attempt in enumerate(attempts):
next(checker)
if checker.send(attempt):
return i + 1
return -1
def main():
tests = [
[["hello", "world", "I", "like", "coding"], "like", 4],
[["hello", "world", "I", "like", "coding"], "qwerty123", -1],
[["codefights"], "codefights", 1],
[["123", "456", "qwerty", "zzz", "password", "genius239", "password"],
"password", 5],
[["warrior", "ninja", "trainee"], "recruit", -1],
[[], "igiveup", -1]
]
for t in tests:
res = checkPassword(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: checkPassword({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: checkPassword({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights check password problem
|
Solve Code Fights check password problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights check password problem
|
#!/usr/local/bin/python
# Code Fights Check Password Problem
def checkPassword(attempts, password):
def check():
while True:
tmp = yield
yield tmp == password
checker = check()
for i, attempt in enumerate(attempts):
next(checker)
if checker.send(attempt):
return i + 1
return -1
def main():
tests = [
[["hello", "world", "I", "like", "coding"], "like", 4],
[["hello", "world", "I", "like", "coding"], "qwerty123", -1],
[["codefights"], "codefights", 1],
[["123", "456", "qwerty", "zzz", "password", "genius239", "password"],
"password", 5],
[["warrior", "ninja", "trainee"], "recruit", -1],
[[], "igiveup", -1]
]
for t in tests:
res = checkPassword(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: checkPassword({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: checkPassword({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights check password problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Check Password Problem
def checkPassword(attempts, password):
def check():
while True:
tmp = yield
yield tmp == password
checker = check()
for i, attempt in enumerate(attempts):
next(checker)
if checker.send(attempt):
return i + 1
return -1
def main():
tests = [
[["hello", "world", "I", "like", "coding"], "like", 4],
[["hello", "world", "I", "like", "coding"], "qwerty123", -1],
[["codefights"], "codefights", 1],
[["123", "456", "qwerty", "zzz", "password", "genius239", "password"],
"password", 5],
[["warrior", "ninja", "trainee"], "recruit", -1],
[[], "igiveup", -1]
]
for t in tests:
res = checkPassword(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: checkPassword({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: checkPassword({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights check password problem#!/usr/local/bin/python
# Code Fights Check Password Problem
def checkPassword(attempts, password):
def check():
while True:
tmp = yield
yield tmp == password
checker = check()
for i, attempt in enumerate(attempts):
next(checker)
if checker.send(attempt):
return i + 1
return -1
def main():
tests = [
[["hello", "world", "I", "like", "coding"], "like", 4],
[["hello", "world", "I", "like", "coding"], "qwerty123", -1],
[["codefights"], "codefights", 1],
[["123", "456", "qwerty", "zzz", "password", "genius239", "password"],
"password", 5],
[["warrior", "ninja", "trainee"], "recruit", -1],
[[], "igiveup", -1]
]
for t in tests:
res = checkPassword(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: checkPassword({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: checkPassword({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights check password problem<commit_after>#!/usr/local/bin/python
# Code Fights Check Password Problem
def checkPassword(attempts, password):
def check():
while True:
tmp = yield
yield tmp == password
checker = check()
for i, attempt in enumerate(attempts):
next(checker)
if checker.send(attempt):
return i + 1
return -1
def main():
tests = [
[["hello", "world", "I", "like", "coding"], "like", 4],
[["hello", "world", "I", "like", "coding"], "qwerty123", -1],
[["codefights"], "codefights", 1],
[["123", "456", "qwerty", "zzz", "password", "genius239", "password"],
"password", 5],
[["warrior", "ninja", "trainee"], "recruit", -1],
[[], "igiveup", -1]
]
for t in tests:
res = checkPassword(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: checkPassword({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print("FAILED: checkPassword({}, {}) returned {}, answer: {}"
.format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
|
5f5f5df6f76e6f82960f7cd0050266e4f523d481
|
tests/gl.py
|
tests/gl.py
|
def test_mesh():
from pyquante2 import grid,h2o
from pyquante2.viewer.viewer import Shapes,Viewer
h2o_mesh = grid(h2o)
shapes = Shapes(h2o)
shapes.add_points(h2o_mesh.points[:,:3])
win = Viewer()
win.calllist(shapes.shapelist)
win.run()
return
if __name__ == '__main__': test_mesh()
|
Test example viewing a dft mesh
|
Test example viewing a dft mesh
|
Python
|
bsd-3-clause
|
Konjkov/pyquante2,Konjkov/pyquante2,Konjkov/pyquante2
|
Test example viewing a dft mesh
|
def test_mesh():
from pyquante2 import grid,h2o
from pyquante2.viewer.viewer import Shapes,Viewer
h2o_mesh = grid(h2o)
shapes = Shapes(h2o)
shapes.add_points(h2o_mesh.points[:,:3])
win = Viewer()
win.calllist(shapes.shapelist)
win.run()
return
if __name__ == '__main__': test_mesh()
|
<commit_before><commit_msg>Test example viewing a dft mesh<commit_after>
|
def test_mesh():
from pyquante2 import grid,h2o
from pyquante2.viewer.viewer import Shapes,Viewer
h2o_mesh = grid(h2o)
shapes = Shapes(h2o)
shapes.add_points(h2o_mesh.points[:,:3])
win = Viewer()
win.calllist(shapes.shapelist)
win.run()
return
if __name__ == '__main__': test_mesh()
|
Test example viewing a dft mesh
def test_mesh():
from pyquante2 import grid,h2o
from pyquante2.viewer.viewer import Shapes,Viewer
h2o_mesh = grid(h2o)
shapes = Shapes(h2o)
shapes.add_points(h2o_mesh.points[:,:3])
win = Viewer()
win.calllist(shapes.shapelist)
win.run()
return
if __name__ == '__main__': test_mesh()
|
<commit_before><commit_msg>Test example viewing a dft mesh<commit_after>
def test_mesh():
from pyquante2 import grid,h2o
from pyquante2.viewer.viewer import Shapes,Viewer
h2o_mesh = grid(h2o)
shapes = Shapes(h2o)
shapes.add_points(h2o_mesh.points[:,:3])
win = Viewer()
win.calllist(shapes.shapelist)
win.run()
return
if __name__ == '__main__': test_mesh()
|
|
2a964e2030fd3764c0cdebfd98701f6ea381fd40
|
euler007.py
|
euler007.py
|
#!/usr/bin/python
from math import sqrt, ceil
def isPrime (x):
for i in range (3, ceil (sqrt (x) + 1), 2):
if x % i == 0:
return 0
return 1
# 2 already count
count = 1
# we start from 3
test = 3
while (count < 10001):
if isPrime (test):
count += 1
test += 2
print (test - 2)
|
Add solution for problem 7
|
Add solution for problem 7
|
Python
|
mit
|
cifvts/PyEuler
|
Add solution for problem 7
|
#!/usr/bin/python
from math import sqrt, ceil
def isPrime (x):
for i in range (3, ceil (sqrt (x) + 1), 2):
if x % i == 0:
return 0
return 1
# 2 already count
count = 1
# we start from 3
test = 3
while (count < 10001):
if isPrime (test):
count += 1
test += 2
print (test - 2)
|
<commit_before><commit_msg>Add solution for problem 7<commit_after>
|
#!/usr/bin/python
from math import sqrt, ceil
def isPrime (x):
for i in range (3, ceil (sqrt (x) + 1), 2):
if x % i == 0:
return 0
return 1
# 2 already count
count = 1
# we start from 3
test = 3
while (count < 10001):
if isPrime (test):
count += 1
test += 2
print (test - 2)
|
Add solution for problem 7#!/usr/bin/python
from math import sqrt, ceil
def isPrime (x):
for i in range (3, ceil (sqrt (x) + 1), 2):
if x % i == 0:
return 0
return 1
# 2 already count
count = 1
# we start from 3
test = 3
while (count < 10001):
if isPrime (test):
count += 1
test += 2
print (test - 2)
|
<commit_before><commit_msg>Add solution for problem 7<commit_after>#!/usr/bin/python
from math import sqrt, ceil
def isPrime (x):
for i in range (3, ceil (sqrt (x) + 1), 2):
if x % i == 0:
return 0
return 1
# 2 already count
count = 1
# we start from 3
test = 3
while (count < 10001):
if isPrime (test):
count += 1
test += 2
print (test - 2)
|
|
ba9f62379b4f6b0dd8546d843840a011ce7edef2
|
tests/RemoveEpsilonRules/__init__.py
|
tests/RemoveEpsilonRules/__init__.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 20.08.2017 17:38
:Licence GNUv3
Part of grammpy-transforms
"""
|
Add directory for tests of removing epsilon rules
|
Add directory for tests of removing epsilon rules
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add directory for tests of removing epsilon rules
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 20.08.2017 17:38
:Licence GNUv3
Part of grammpy-transforms
"""
|
<commit_before><commit_msg>Add directory for tests of removing epsilon rules<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 20.08.2017 17:38
:Licence GNUv3
Part of grammpy-transforms
"""
|
Add directory for tests of removing epsilon rules#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 20.08.2017 17:38
:Licence GNUv3
Part of grammpy-transforms
"""
|
<commit_before><commit_msg>Add directory for tests of removing epsilon rules<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 20.08.2017 17:38
:Licence GNUv3
Part of grammpy-transforms
"""
|
|
0a9d56d7e99324bf09cbb0ce6d6893e716b99f4f
|
api_tests/registrations/views/test_registration_embeds.py
|
api_tests/registrations/views/test_registration_embeds.py
|
from nose.tools import * # flake8: noqa
import functools
from framework.auth.core import Auth
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from tests.factories import (
ProjectFactory,
AuthUserFactory,
RegistrationFactory
)
class TestRegistrationEmbeds(ApiTestCase):
def setUp(self):
super(TestRegistrationEmbeds, self).setUp()
self.user = AuthUserFactory()
self.auth = Auth(self.user)
make_public_node = functools.partial(ProjectFactory, is_public=False, creator=self.user)
self.root_node = make_public_node()
self.child1 = make_public_node(parent=self.root_node)
self.child2 = make_public_node(parent=self.root_node)
self.contribs = [AuthUserFactory() for i in range(2)]
for contrib in self.contribs:
self.root_node.add_contributor(contrib, ['read', 'write'], auth=self.auth, save=True)
self.child1.add_contributor(contrib, ['read', 'write'], auth=self.auth, save=True)
self.contrib1 = self.contribs[0]
self.contrib2 = self.contribs[1]
self.subchild = ProjectFactory(parent=self.child2, creator=self.contrib1)
self.registration = RegistrationFactory(project=self.root_node, is_public=True)
self.registration_child = RegistrationFactory(project=self.child1, is_public=True)
def test_embed_children(self):
url = '/{0}registrations/{1}/?embed=children'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
embeds = res.json['data']['embeds']
assert_equal(len(embeds['children']['data']), 2)
titles = [self.child1.title, self.child2.title]
for child in embeds['children']['data']:
assert_in(child['attributes']['title'], titles)
def test_embed_contributors(self):
url = '/{0}registrations/{1}/?embed=contributors'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
embeds = res.json['data']['embeds']
ids = [c._id for c in self.contribs] + [self.user._id]
for contrib in embeds['contributors']['data']:
assert_in(contrib['id'], ids)
def test_embed_attributes_not_relationships(self):
url = '/{}registrations/{}/?embed=title'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.contrib1.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "The following fields are not embeddable: title")
|
Add a couple registrations embeds tests.
|
Add a couple registrations embeds tests.
|
Python
|
apache-2.0
|
cwisecarver/osf.io,mattclark/osf.io,Ghalko/osf.io,samchrisinger/osf.io,billyhunt/osf.io,billyhunt/osf.io,chrisseto/osf.io,SSJohns/osf.io,CenterForOpenScience/osf.io,aaxelb/osf.io,chennan47/osf.io,binoculars/osf.io,chrisseto/osf.io,crcresearch/osf.io,pattisdr/osf.io,zamattiac/osf.io,alexschiller/osf.io,crcresearch/osf.io,KAsante95/osf.io,Johnetordoff/osf.io,abought/osf.io,doublebits/osf.io,icereval/osf.io,erinspace/osf.io,doublebits/osf.io,emetsger/osf.io,brianjgeiger/osf.io,TomBaxter/osf.io,Nesiehr/osf.io,brandonPurvis/osf.io,mfraezz/osf.io,RomanZWang/osf.io,rdhyee/osf.io,abought/osf.io,baylee-d/osf.io,CenterForOpenScience/osf.io,jnayak1/osf.io,cslzchen/osf.io,acshi/osf.io,brandonPurvis/osf.io,kwierman/osf.io,jnayak1/osf.io,kwierman/osf.io,hmoco/osf.io,cwisecarver/osf.io,RomanZWang/osf.io,zachjanicki/osf.io,monikagrabowska/osf.io,asanfilippo7/osf.io,amyshi188/osf.io,GageGaskins/osf.io,brianjgeiger/osf.io,rdhyee/osf.io,zachjanicki/osf.io,GageGaskins/osf.io,laurenrevere/osf.io,cslzchen/osf.io,CenterForOpenScience/osf.io,DanielSBrown/osf.io,hmoco/osf.io,zamattiac/osf.io,ticklemepierce/osf.io,alexschiller/osf.io,zamattiac/osf.io,doublebits/osf.io,mfraezz/osf.io,brandonPurvis/osf.io,adlius/osf.io,chrisseto/osf.io,samchrisinger/osf.io,caneruguz/osf.io,KAsante95/osf.io,zamattiac/osf.io,monikagrabowska/osf.io,TomHeatwole/osf.io,ticklemepierce/osf.io,erinspace/osf.io,caneruguz/osf.io,chrisseto/osf.io,hmoco/osf.io,kch8qx/osf.io,abought/osf.io,CenterForOpenScience/osf.io,GageGaskins/osf.io,pattisdr/osf.io,HalcyonChimera/osf.io,asanfilippo7/osf.io,TomHeatwole/osf.io,samchrisinger/osf.io,billyhunt/osf.io,mluo613/osf.io,icereval/osf.io,ticklemepierce/osf.io,emetsger/osf.io,amyshi188/osf.io,asanfilippo7/osf.io,TomBaxter/osf.io,KAsante95/osf.io,Nesiehr/osf.io,KAsante95/osf.io,kwierman/osf.io,sloria/osf.io,alexschiller/osf.io,Johnetordoff/osf.io,jnayak1/osf.io,Ghalko/osf.io,ticklemepierce/osf.io,laurenrevere/osf.io,wearpants/osf.io,Ghalko/osf.io,mattclark/osf.io,acshi/osf.io,GageGaskins/osf.io,zachjanicki/osf.io,RomanZWang/osf.io,cwisecarver/osf.io,KAsante95/osf.io,kch8qx/osf.io,Nesiehr/osf.io,brandonPurvis/osf.io,kch8qx/osf.io,jnayak1/osf.io,mluo613/osf.io,TomHeatwole/osf.io,adlius/osf.io,felliott/osf.io,DanielSBrown/osf.io,icereval/osf.io,mluo613/osf.io,TomBaxter/osf.io,saradbowman/osf.io,SSJohns/osf.io,adlius/osf.io,Ghalko/osf.io,monikagrabowska/osf.io,abought/osf.io,Johnetordoff/osf.io,acshi/osf.io,aaxelb/osf.io,sloria/osf.io,pattisdr/osf.io,rdhyee/osf.io,binoculars/osf.io,laurenrevere/osf.io,HalcyonChimera/osf.io,TomHeatwole/osf.io,acshi/osf.io,sloria/osf.io,monikagrabowska/osf.io,mattclark/osf.io,leb2dg/osf.io,DanielSBrown/osf.io,felliott/osf.io,RomanZWang/osf.io,asanfilippo7/osf.io,wearpants/osf.io,kwierman/osf.io,SSJohns/osf.io,baylee-d/osf.io,HalcyonChimera/osf.io,acshi/osf.io,aaxelb/osf.io,felliott/osf.io,brianjgeiger/osf.io,Nesiehr/osf.io,billyhunt/osf.io,monikagrabowska/osf.io,cslzchen/osf.io,mluke93/osf.io,doublebits/osf.io,binoculars/osf.io,mluke93/osf.io,billyhunt/osf.io,mluo613/osf.io,cslzchen/osf.io,caneruguz/osf.io,HalcyonChimera/osf.io,doublebits/osf.io,rdhyee/osf.io,samchrisinger/osf.io,mfraezz/osf.io,alexschiller/osf.io,mfraezz/osf.io,SSJohns/osf.io,brandonPurvis/osf.io,amyshi188/osf.io,baylee-d/osf.io,wearpants/osf.io,Johnetordoff/osf.io,emetsger/osf.io,zachjanicki/osf.io,amyshi188/osf.io,leb2dg/osf.io,mluke93/osf.io,chennan47/osf.io,erinspace/osf.io,leb2dg/osf.io,hmoco/osf.io,felliott/osf.io,cwisecarver/osf.io,mluo613/osf.io,RomanZWang/osf.io,DanielSBrown/osf.io,kch8qx/osf.io,caseyrollins/osf.io,leb2dg/osf.io,crcresearch/osf.io,chennan47/osf.io,kch8qx/osf.io,saradbowman/osf.io,mluke93/osf.io,GageGaskins/osf.io,wearpants/osf.io,brianjgeiger/osf.io,adlius/osf.io,caseyrollins/osf.io,caneruguz/osf.io,emetsger/osf.io,alexschiller/osf.io,aaxelb/osf.io,caseyrollins/osf.io
|
Add a couple registrations embeds tests.
|
from nose.tools import * # flake8: noqa
import functools
from framework.auth.core import Auth
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from tests.factories import (
ProjectFactory,
AuthUserFactory,
RegistrationFactory
)
class TestRegistrationEmbeds(ApiTestCase):
def setUp(self):
super(TestRegistrationEmbeds, self).setUp()
self.user = AuthUserFactory()
self.auth = Auth(self.user)
make_public_node = functools.partial(ProjectFactory, is_public=False, creator=self.user)
self.root_node = make_public_node()
self.child1 = make_public_node(parent=self.root_node)
self.child2 = make_public_node(parent=self.root_node)
self.contribs = [AuthUserFactory() for i in range(2)]
for contrib in self.contribs:
self.root_node.add_contributor(contrib, ['read', 'write'], auth=self.auth, save=True)
self.child1.add_contributor(contrib, ['read', 'write'], auth=self.auth, save=True)
self.contrib1 = self.contribs[0]
self.contrib2 = self.contribs[1]
self.subchild = ProjectFactory(parent=self.child2, creator=self.contrib1)
self.registration = RegistrationFactory(project=self.root_node, is_public=True)
self.registration_child = RegistrationFactory(project=self.child1, is_public=True)
def test_embed_children(self):
url = '/{0}registrations/{1}/?embed=children'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
embeds = res.json['data']['embeds']
assert_equal(len(embeds['children']['data']), 2)
titles = [self.child1.title, self.child2.title]
for child in embeds['children']['data']:
assert_in(child['attributes']['title'], titles)
def test_embed_contributors(self):
url = '/{0}registrations/{1}/?embed=contributors'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
embeds = res.json['data']['embeds']
ids = [c._id for c in self.contribs] + [self.user._id]
for contrib in embeds['contributors']['data']:
assert_in(contrib['id'], ids)
def test_embed_attributes_not_relationships(self):
url = '/{}registrations/{}/?embed=title'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.contrib1.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "The following fields are not embeddable: title")
|
<commit_before><commit_msg>Add a couple registrations embeds tests.<commit_after>
|
from nose.tools import * # flake8: noqa
import functools
from framework.auth.core import Auth
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from tests.factories import (
ProjectFactory,
AuthUserFactory,
RegistrationFactory
)
class TestRegistrationEmbeds(ApiTestCase):
def setUp(self):
super(TestRegistrationEmbeds, self).setUp()
self.user = AuthUserFactory()
self.auth = Auth(self.user)
make_public_node = functools.partial(ProjectFactory, is_public=False, creator=self.user)
self.root_node = make_public_node()
self.child1 = make_public_node(parent=self.root_node)
self.child2 = make_public_node(parent=self.root_node)
self.contribs = [AuthUserFactory() for i in range(2)]
for contrib in self.contribs:
self.root_node.add_contributor(contrib, ['read', 'write'], auth=self.auth, save=True)
self.child1.add_contributor(contrib, ['read', 'write'], auth=self.auth, save=True)
self.contrib1 = self.contribs[0]
self.contrib2 = self.contribs[1]
self.subchild = ProjectFactory(parent=self.child2, creator=self.contrib1)
self.registration = RegistrationFactory(project=self.root_node, is_public=True)
self.registration_child = RegistrationFactory(project=self.child1, is_public=True)
def test_embed_children(self):
url = '/{0}registrations/{1}/?embed=children'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
embeds = res.json['data']['embeds']
assert_equal(len(embeds['children']['data']), 2)
titles = [self.child1.title, self.child2.title]
for child in embeds['children']['data']:
assert_in(child['attributes']['title'], titles)
def test_embed_contributors(self):
url = '/{0}registrations/{1}/?embed=contributors'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
embeds = res.json['data']['embeds']
ids = [c._id for c in self.contribs] + [self.user._id]
for contrib in embeds['contributors']['data']:
assert_in(contrib['id'], ids)
def test_embed_attributes_not_relationships(self):
url = '/{}registrations/{}/?embed=title'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.contrib1.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "The following fields are not embeddable: title")
|
Add a couple registrations embeds tests.from nose.tools import * # flake8: noqa
import functools
from framework.auth.core import Auth
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from tests.factories import (
ProjectFactory,
AuthUserFactory,
RegistrationFactory
)
class TestRegistrationEmbeds(ApiTestCase):
def setUp(self):
super(TestRegistrationEmbeds, self).setUp()
self.user = AuthUserFactory()
self.auth = Auth(self.user)
make_public_node = functools.partial(ProjectFactory, is_public=False, creator=self.user)
self.root_node = make_public_node()
self.child1 = make_public_node(parent=self.root_node)
self.child2 = make_public_node(parent=self.root_node)
self.contribs = [AuthUserFactory() for i in range(2)]
for contrib in self.contribs:
self.root_node.add_contributor(contrib, ['read', 'write'], auth=self.auth, save=True)
self.child1.add_contributor(contrib, ['read', 'write'], auth=self.auth, save=True)
self.contrib1 = self.contribs[0]
self.contrib2 = self.contribs[1]
self.subchild = ProjectFactory(parent=self.child2, creator=self.contrib1)
self.registration = RegistrationFactory(project=self.root_node, is_public=True)
self.registration_child = RegistrationFactory(project=self.child1, is_public=True)
def test_embed_children(self):
url = '/{0}registrations/{1}/?embed=children'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
embeds = res.json['data']['embeds']
assert_equal(len(embeds['children']['data']), 2)
titles = [self.child1.title, self.child2.title]
for child in embeds['children']['data']:
assert_in(child['attributes']['title'], titles)
def test_embed_contributors(self):
url = '/{0}registrations/{1}/?embed=contributors'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
embeds = res.json['data']['embeds']
ids = [c._id for c in self.contribs] + [self.user._id]
for contrib in embeds['contributors']['data']:
assert_in(contrib['id'], ids)
def test_embed_attributes_not_relationships(self):
url = '/{}registrations/{}/?embed=title'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.contrib1.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "The following fields are not embeddable: title")
|
<commit_before><commit_msg>Add a couple registrations embeds tests.<commit_after>from nose.tools import * # flake8: noqa
import functools
from framework.auth.core import Auth
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from tests.factories import (
ProjectFactory,
AuthUserFactory,
RegistrationFactory
)
class TestRegistrationEmbeds(ApiTestCase):
def setUp(self):
super(TestRegistrationEmbeds, self).setUp()
self.user = AuthUserFactory()
self.auth = Auth(self.user)
make_public_node = functools.partial(ProjectFactory, is_public=False, creator=self.user)
self.root_node = make_public_node()
self.child1 = make_public_node(parent=self.root_node)
self.child2 = make_public_node(parent=self.root_node)
self.contribs = [AuthUserFactory() for i in range(2)]
for contrib in self.contribs:
self.root_node.add_contributor(contrib, ['read', 'write'], auth=self.auth, save=True)
self.child1.add_contributor(contrib, ['read', 'write'], auth=self.auth, save=True)
self.contrib1 = self.contribs[0]
self.contrib2 = self.contribs[1]
self.subchild = ProjectFactory(parent=self.child2, creator=self.contrib1)
self.registration = RegistrationFactory(project=self.root_node, is_public=True)
self.registration_child = RegistrationFactory(project=self.child1, is_public=True)
def test_embed_children(self):
url = '/{0}registrations/{1}/?embed=children'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
embeds = res.json['data']['embeds']
assert_equal(len(embeds['children']['data']), 2)
titles = [self.child1.title, self.child2.title]
for child in embeds['children']['data']:
assert_in(child['attributes']['title'], titles)
def test_embed_contributors(self):
url = '/{0}registrations/{1}/?embed=contributors'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.user.auth)
embeds = res.json['data']['embeds']
ids = [c._id for c in self.contribs] + [self.user._id]
for contrib in embeds['contributors']['data']:
assert_in(contrib['id'], ids)
def test_embed_attributes_not_relationships(self):
url = '/{}registrations/{}/?embed=title'.format(API_BASE, self.registration._id)
res = self.app.get(url, auth=self.contrib1.auth, expect_errors=True)
assert_equal(res.status_code, 400)
assert_equal(res.json['errors'][0]['detail'], "The following fields are not embeddable: title")
|
|
dcb8bc2a4bb06c9a6fd5d76d90a6c7621a2f7011
|
moniker/tests/test_backend/test_mysqlbind9.py
|
moniker/tests/test_backend/test_mysqlbind9.py
|
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from moniker.openstack.common import log as logging
from moniker.tests.test_backend import BackendDriverTestCase
LOG = logging.getLogger(__name__)
class MySQLBind9BackendDriverTestCase(BackendDriverTestCase):
__test__ = True
def setUp(self):
super(MySQLBind9BackendDriverTestCase, self).setUp()
self.config(backend_driver='mysqlbind9')
|
Add empty testcase for the MySQL Bind9 backend
|
Add empty testcase for the MySQL Bind9 backend
Change-Id: I55ef157b0c3dbf5f353553336d15c1f9681a92d4
|
Python
|
apache-2.0
|
openstack/designate,richm/designate,melodous/designate,cneill/designate,NeCTAR-RC/designate,tonyli71/designate,cneill/designate-testing,muraliselva10/designate,ionrock/designate,ramsateesh/designate,grahamhayes/designate,richm/designate,ramsateesh/designate,ionrock/designate,cneill/designate-testing,melodous/designate,kiall/designate-py3,ramsateesh/designate,kiall/designate-py3,grahamhayes/designate,muraliselva10/designate,cneill/designate-testing,kiall/designate-py3,openstack/designate,grahamhayes/designate,NeCTAR-RC/designate,tonyli71/designate,ionrock/designate,kiall/designate-py3,melodous/designate,muraliselva10/designate,openstack/designate,tonyli71/designate,cneill/designate,cneill/designate,cneill/designate,cneill/designate,kiall/designate-py3,melodous/designate
|
Add empty testcase for the MySQL Bind9 backend
Change-Id: I55ef157b0c3dbf5f353553336d15c1f9681a92d4
|
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from moniker.openstack.common import log as logging
from moniker.tests.test_backend import BackendDriverTestCase
LOG = logging.getLogger(__name__)
class MySQLBind9BackendDriverTestCase(BackendDriverTestCase):
__test__ = True
def setUp(self):
super(MySQLBind9BackendDriverTestCase, self).setUp()
self.config(backend_driver='mysqlbind9')
|
<commit_before><commit_msg>Add empty testcase for the MySQL Bind9 backend
Change-Id: I55ef157b0c3dbf5f353553336d15c1f9681a92d4<commit_after>
|
# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from moniker.openstack.common import log as logging
from moniker.tests.test_backend import BackendDriverTestCase
LOG = logging.getLogger(__name__)
class MySQLBind9BackendDriverTestCase(BackendDriverTestCase):
__test__ = True
def setUp(self):
super(MySQLBind9BackendDriverTestCase, self).setUp()
self.config(backend_driver='mysqlbind9')
|
Add empty testcase for the MySQL Bind9 backend
Change-Id: I55ef157b0c3dbf5f353553336d15c1f9681a92d4# Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from moniker.openstack.common import log as logging
from moniker.tests.test_backend import BackendDriverTestCase
LOG = logging.getLogger(__name__)
class MySQLBind9BackendDriverTestCase(BackendDriverTestCase):
__test__ = True
def setUp(self):
super(MySQLBind9BackendDriverTestCase, self).setUp()
self.config(backend_driver='mysqlbind9')
|
<commit_before><commit_msg>Add empty testcase for the MySQL Bind9 backend
Change-Id: I55ef157b0c3dbf5f353553336d15c1f9681a92d4<commit_after># Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from moniker.openstack.common import log as logging
from moniker.tests.test_backend import BackendDriverTestCase
LOG = logging.getLogger(__name__)
class MySQLBind9BackendDriverTestCase(BackendDriverTestCase):
__test__ = True
def setUp(self):
super(MySQLBind9BackendDriverTestCase, self).setUp()
self.config(backend_driver='mysqlbind9')
|
|
83328bdc1ce69a8a3b8f6f7465e226ef524bc728
|
obfsproxy/test/transports/test_bananaphone.py
|
obfsproxy/test/transports/test_bananaphone.py
|
#!/usr/bin/env python
import unittest
import twisted.trial.unittest
from struct import pack
from obfsproxy.network.buffer import Buffer
from obfsproxy.transports.bananaphone import rh_encoder, rh_decoder
class test_Bananaphone(twisted.trial.unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.encodingSpec = 'words,sha1,4'
cls.modelName = 'markov'
cls.order = 1
cls.corpus = '/usr/share/dict/words'
cls.abridged = None
def writeEncodeBuffer(self, buff):
self.encodeBuffer.write(buff)
def writeDecodeBuffer(self, buff):
self.decodeBuffer.write(buff)
def test_1(self):
self.encodeBuffer = Buffer()
self.decodeBuffer = Buffer()
if self.modelName == 'markov':
args = [ self.corpus, self.order, self.abridged ]
elif self.modelName == 'random':
args = [ self.corpus ]
self.encoder = rh_encoder(self.encodingSpec, self.modelName, *args) > self.writeEncodeBuffer
self.decoder = rh_decoder(self.encodingSpec) > self.writeDecodeBuffer
orig_message = 'War is peace. Freedom is slavery. Ignorance is strength.'
self.encoder.send(orig_message)
encoded_message = self.encodeBuffer.read()
self.decoder.send(encoded_message)
self.assertEqual(orig_message, self.decodeBuffer.read())
if __name__ == '__main__':
unittest.main()
|
Add simple unit test for Bananaphone codec
|
Add simple unit test for Bananaphone codec
|
Python
|
bsd-3-clause
|
david415/obfsproxy
|
Add simple unit test for Bananaphone codec
|
#!/usr/bin/env python
import unittest
import twisted.trial.unittest
from struct import pack
from obfsproxy.network.buffer import Buffer
from obfsproxy.transports.bananaphone import rh_encoder, rh_decoder
class test_Bananaphone(twisted.trial.unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.encodingSpec = 'words,sha1,4'
cls.modelName = 'markov'
cls.order = 1
cls.corpus = '/usr/share/dict/words'
cls.abridged = None
def writeEncodeBuffer(self, buff):
self.encodeBuffer.write(buff)
def writeDecodeBuffer(self, buff):
self.decodeBuffer.write(buff)
def test_1(self):
self.encodeBuffer = Buffer()
self.decodeBuffer = Buffer()
if self.modelName == 'markov':
args = [ self.corpus, self.order, self.abridged ]
elif self.modelName == 'random':
args = [ self.corpus ]
self.encoder = rh_encoder(self.encodingSpec, self.modelName, *args) > self.writeEncodeBuffer
self.decoder = rh_decoder(self.encodingSpec) > self.writeDecodeBuffer
orig_message = 'War is peace. Freedom is slavery. Ignorance is strength.'
self.encoder.send(orig_message)
encoded_message = self.encodeBuffer.read()
self.decoder.send(encoded_message)
self.assertEqual(orig_message, self.decodeBuffer.read())
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add simple unit test for Bananaphone codec<commit_after>
|
#!/usr/bin/env python
import unittest
import twisted.trial.unittest
from struct import pack
from obfsproxy.network.buffer import Buffer
from obfsproxy.transports.bananaphone import rh_encoder, rh_decoder
class test_Bananaphone(twisted.trial.unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.encodingSpec = 'words,sha1,4'
cls.modelName = 'markov'
cls.order = 1
cls.corpus = '/usr/share/dict/words'
cls.abridged = None
def writeEncodeBuffer(self, buff):
self.encodeBuffer.write(buff)
def writeDecodeBuffer(self, buff):
self.decodeBuffer.write(buff)
def test_1(self):
self.encodeBuffer = Buffer()
self.decodeBuffer = Buffer()
if self.modelName == 'markov':
args = [ self.corpus, self.order, self.abridged ]
elif self.modelName == 'random':
args = [ self.corpus ]
self.encoder = rh_encoder(self.encodingSpec, self.modelName, *args) > self.writeEncodeBuffer
self.decoder = rh_decoder(self.encodingSpec) > self.writeDecodeBuffer
orig_message = 'War is peace. Freedom is slavery. Ignorance is strength.'
self.encoder.send(orig_message)
encoded_message = self.encodeBuffer.read()
self.decoder.send(encoded_message)
self.assertEqual(orig_message, self.decodeBuffer.read())
if __name__ == '__main__':
unittest.main()
|
Add simple unit test for Bananaphone codec#!/usr/bin/env python
import unittest
import twisted.trial.unittest
from struct import pack
from obfsproxy.network.buffer import Buffer
from obfsproxy.transports.bananaphone import rh_encoder, rh_decoder
class test_Bananaphone(twisted.trial.unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.encodingSpec = 'words,sha1,4'
cls.modelName = 'markov'
cls.order = 1
cls.corpus = '/usr/share/dict/words'
cls.abridged = None
def writeEncodeBuffer(self, buff):
self.encodeBuffer.write(buff)
def writeDecodeBuffer(self, buff):
self.decodeBuffer.write(buff)
def test_1(self):
self.encodeBuffer = Buffer()
self.decodeBuffer = Buffer()
if self.modelName == 'markov':
args = [ self.corpus, self.order, self.abridged ]
elif self.modelName == 'random':
args = [ self.corpus ]
self.encoder = rh_encoder(self.encodingSpec, self.modelName, *args) > self.writeEncodeBuffer
self.decoder = rh_decoder(self.encodingSpec) > self.writeDecodeBuffer
orig_message = 'War is peace. Freedom is slavery. Ignorance is strength.'
self.encoder.send(orig_message)
encoded_message = self.encodeBuffer.read()
self.decoder.send(encoded_message)
self.assertEqual(orig_message, self.decodeBuffer.read())
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add simple unit test for Bananaphone codec<commit_after>#!/usr/bin/env python
import unittest
import twisted.trial.unittest
from struct import pack
from obfsproxy.network.buffer import Buffer
from obfsproxy.transports.bananaphone import rh_encoder, rh_decoder
class test_Bananaphone(twisted.trial.unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.encodingSpec = 'words,sha1,4'
cls.modelName = 'markov'
cls.order = 1
cls.corpus = '/usr/share/dict/words'
cls.abridged = None
def writeEncodeBuffer(self, buff):
self.encodeBuffer.write(buff)
def writeDecodeBuffer(self, buff):
self.decodeBuffer.write(buff)
def test_1(self):
self.encodeBuffer = Buffer()
self.decodeBuffer = Buffer()
if self.modelName == 'markov':
args = [ self.corpus, self.order, self.abridged ]
elif self.modelName == 'random':
args = [ self.corpus ]
self.encoder = rh_encoder(self.encodingSpec, self.modelName, *args) > self.writeEncodeBuffer
self.decoder = rh_decoder(self.encodingSpec) > self.writeDecodeBuffer
orig_message = 'War is peace. Freedom is slavery. Ignorance is strength.'
self.encoder.send(orig_message)
encoded_message = self.encodeBuffer.read()
self.decoder.send(encoded_message)
self.assertEqual(orig_message, self.decodeBuffer.read())
if __name__ == '__main__':
unittest.main()
|
|
7b7f723ca2c7e4569e24d79d16693b6113824780
|
ida/anim_to_dict.py
|
ida/anim_to_dict.py
|
from collections import namedtuple
from struct import unpack
import sark
ImagePosition = namedtuple('ImagePosition', 'spritesheet image_number y collide x')
def hex_to_sign(value):
if value >= 0x8000:
value -= 0x10000
return value
def byte_to_sign(value):
if value >= 0x80:
value -= 0x100
return value
spritesheets = {
0: 'kn1.ob',
4: 'kn2.ob',
8: 'kn3.ob',
12: 'kn4.ob',
16: 'kn4.ob',
}
collide_type = {
0: 'Collide.NON_SOLID',
1: 'Collide.COLLIDEE',
2: 'Collide.COLLIDER',
}
line = sark.Line()
next_line = True
while next_line:
if line.disasm.startswith("AnimationFrame"):
sprite, img_num, y, collide, x = unpack('<4BH', line.bytes)
y = byte_to_sign(y)
x = hex_to_sign(x)
sprite = spritesheets[sprite]
collide = collide_type[collide]
test = "ImagePosition('{}', {}, {}, {}, {}),".format(sprite, img_num, y, x, collide)
print test
line = line.next
elif line.disasm.startswith('EndOfAnimFrame <0FFh, 0>'):
line = line.next
print '),'
print '('
elif line.disasm.startswith('EndOfAnimFrame <0FFh, 0FFh>'):
next_line = False
else:
line = line.next
|
Add ida script to convert ida anim to python anim
|
Add ida script to convert ida anim to python anim
select the address of the AnimationFrame in ida and run this script and it'll output ImagePositions that I can copy paste into animations
|
Python
|
agpl-3.0
|
joetsoi/moonstone,joetsoi/moonstone
|
Add ida script to convert ida anim to python anim
select the address of the AnimationFrame in ida and run this script and it'll output ImagePositions that I can copy paste into animations
|
from collections import namedtuple
from struct import unpack
import sark
ImagePosition = namedtuple('ImagePosition', 'spritesheet image_number y collide x')
def hex_to_sign(value):
if value >= 0x8000:
value -= 0x10000
return value
def byte_to_sign(value):
if value >= 0x80:
value -= 0x100
return value
spritesheets = {
0: 'kn1.ob',
4: 'kn2.ob',
8: 'kn3.ob',
12: 'kn4.ob',
16: 'kn4.ob',
}
collide_type = {
0: 'Collide.NON_SOLID',
1: 'Collide.COLLIDEE',
2: 'Collide.COLLIDER',
}
line = sark.Line()
next_line = True
while next_line:
if line.disasm.startswith("AnimationFrame"):
sprite, img_num, y, collide, x = unpack('<4BH', line.bytes)
y = byte_to_sign(y)
x = hex_to_sign(x)
sprite = spritesheets[sprite]
collide = collide_type[collide]
test = "ImagePosition('{}', {}, {}, {}, {}),".format(sprite, img_num, y, x, collide)
print test
line = line.next
elif line.disasm.startswith('EndOfAnimFrame <0FFh, 0>'):
line = line.next
print '),'
print '('
elif line.disasm.startswith('EndOfAnimFrame <0FFh, 0FFh>'):
next_line = False
else:
line = line.next
|
<commit_before><commit_msg>Add ida script to convert ida anim to python anim
select the address of the AnimationFrame in ida and run this script and it'll output ImagePositions that I can copy paste into animations<commit_after>
|
from collections import namedtuple
from struct import unpack
import sark
ImagePosition = namedtuple('ImagePosition', 'spritesheet image_number y collide x')
def hex_to_sign(value):
if value >= 0x8000:
value -= 0x10000
return value
def byte_to_sign(value):
if value >= 0x80:
value -= 0x100
return value
spritesheets = {
0: 'kn1.ob',
4: 'kn2.ob',
8: 'kn3.ob',
12: 'kn4.ob',
16: 'kn4.ob',
}
collide_type = {
0: 'Collide.NON_SOLID',
1: 'Collide.COLLIDEE',
2: 'Collide.COLLIDER',
}
line = sark.Line()
next_line = True
while next_line:
if line.disasm.startswith("AnimationFrame"):
sprite, img_num, y, collide, x = unpack('<4BH', line.bytes)
y = byte_to_sign(y)
x = hex_to_sign(x)
sprite = spritesheets[sprite]
collide = collide_type[collide]
test = "ImagePosition('{}', {}, {}, {}, {}),".format(sprite, img_num, y, x, collide)
print test
line = line.next
elif line.disasm.startswith('EndOfAnimFrame <0FFh, 0>'):
line = line.next
print '),'
print '('
elif line.disasm.startswith('EndOfAnimFrame <0FFh, 0FFh>'):
next_line = False
else:
line = line.next
|
Add ida script to convert ida anim to python anim
select the address of the AnimationFrame in ida and run this script and it'll output ImagePositions that I can copy paste into animationsfrom collections import namedtuple
from struct import unpack
import sark
ImagePosition = namedtuple('ImagePosition', 'spritesheet image_number y collide x')
def hex_to_sign(value):
if value >= 0x8000:
value -= 0x10000
return value
def byte_to_sign(value):
if value >= 0x80:
value -= 0x100
return value
spritesheets = {
0: 'kn1.ob',
4: 'kn2.ob',
8: 'kn3.ob',
12: 'kn4.ob',
16: 'kn4.ob',
}
collide_type = {
0: 'Collide.NON_SOLID',
1: 'Collide.COLLIDEE',
2: 'Collide.COLLIDER',
}
line = sark.Line()
next_line = True
while next_line:
if line.disasm.startswith("AnimationFrame"):
sprite, img_num, y, collide, x = unpack('<4BH', line.bytes)
y = byte_to_sign(y)
x = hex_to_sign(x)
sprite = spritesheets[sprite]
collide = collide_type[collide]
test = "ImagePosition('{}', {}, {}, {}, {}),".format(sprite, img_num, y, x, collide)
print test
line = line.next
elif line.disasm.startswith('EndOfAnimFrame <0FFh, 0>'):
line = line.next
print '),'
print '('
elif line.disasm.startswith('EndOfAnimFrame <0FFh, 0FFh>'):
next_line = False
else:
line = line.next
|
<commit_before><commit_msg>Add ida script to convert ida anim to python anim
select the address of the AnimationFrame in ida and run this script and it'll output ImagePositions that I can copy paste into animations<commit_after>from collections import namedtuple
from struct import unpack
import sark
ImagePosition = namedtuple('ImagePosition', 'spritesheet image_number y collide x')
def hex_to_sign(value):
if value >= 0x8000:
value -= 0x10000
return value
def byte_to_sign(value):
if value >= 0x80:
value -= 0x100
return value
spritesheets = {
0: 'kn1.ob',
4: 'kn2.ob',
8: 'kn3.ob',
12: 'kn4.ob',
16: 'kn4.ob',
}
collide_type = {
0: 'Collide.NON_SOLID',
1: 'Collide.COLLIDEE',
2: 'Collide.COLLIDER',
}
line = sark.Line()
next_line = True
while next_line:
if line.disasm.startswith("AnimationFrame"):
sprite, img_num, y, collide, x = unpack('<4BH', line.bytes)
y = byte_to_sign(y)
x = hex_to_sign(x)
sprite = spritesheets[sprite]
collide = collide_type[collide]
test = "ImagePosition('{}', {}, {}, {}, {}),".format(sprite, img_num, y, x, collide)
print test
line = line.next
elif line.disasm.startswith('EndOfAnimFrame <0FFh, 0>'):
line = line.next
print '),'
print '('
elif line.disasm.startswith('EndOfAnimFrame <0FFh, 0FFh>'):
next_line = False
else:
line = line.next
|
|
e081d62fb4a715ceafc753ad278579885e6e69da
|
chapter1/mouse_pos.py
|
chapter1/mouse_pos.py
|
#!/bin/env python
import wx
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, "My Frame", size=(300, 300))
panel = wx.Panel(self, -1)
panel.Bind(wx.EVT_MOTION, self.OnMove)
wx.StaticText(panel, -1, "Pos:", pos=(10, 12))
self.posCtrl = wx.TextCtrl(panel, -1, "", pos=(40, 10))
def OnMove(self, event):
pos = event.GetPosition()
self.posCtrl.SetValue("%s, %s" %(pos.x, pos.y))
if __name__ == '__main__':
app = wx.PySimpleApp()
frame = MyFrame()
frame.Show(True)
app.MainLoop()
|
Add chapter1 folder and moise_pos.py
|
Add chapter1 folder and moise_pos.py
Signed-off-by: sdphome <e1de489c79848a5fdae5f6d6adb3887195897185@live.cn>
|
Python
|
apache-2.0
|
sdphome/wxPython_training
|
Add chapter1 folder and moise_pos.py
Signed-off-by: sdphome <e1de489c79848a5fdae5f6d6adb3887195897185@live.cn>
|
#!/bin/env python
import wx
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, "My Frame", size=(300, 300))
panel = wx.Panel(self, -1)
panel.Bind(wx.EVT_MOTION, self.OnMove)
wx.StaticText(panel, -1, "Pos:", pos=(10, 12))
self.posCtrl = wx.TextCtrl(panel, -1, "", pos=(40, 10))
def OnMove(self, event):
pos = event.GetPosition()
self.posCtrl.SetValue("%s, %s" %(pos.x, pos.y))
if __name__ == '__main__':
app = wx.PySimpleApp()
frame = MyFrame()
frame.Show(True)
app.MainLoop()
|
<commit_before><commit_msg>Add chapter1 folder and moise_pos.py
Signed-off-by: sdphome <e1de489c79848a5fdae5f6d6adb3887195897185@live.cn><commit_after>
|
#!/bin/env python
import wx
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, "My Frame", size=(300, 300))
panel = wx.Panel(self, -1)
panel.Bind(wx.EVT_MOTION, self.OnMove)
wx.StaticText(panel, -1, "Pos:", pos=(10, 12))
self.posCtrl = wx.TextCtrl(panel, -1, "", pos=(40, 10))
def OnMove(self, event):
pos = event.GetPosition()
self.posCtrl.SetValue("%s, %s" %(pos.x, pos.y))
if __name__ == '__main__':
app = wx.PySimpleApp()
frame = MyFrame()
frame.Show(True)
app.MainLoop()
|
Add chapter1 folder and moise_pos.py
Signed-off-by: sdphome <e1de489c79848a5fdae5f6d6adb3887195897185@live.cn>#!/bin/env python
import wx
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, "My Frame", size=(300, 300))
panel = wx.Panel(self, -1)
panel.Bind(wx.EVT_MOTION, self.OnMove)
wx.StaticText(panel, -1, "Pos:", pos=(10, 12))
self.posCtrl = wx.TextCtrl(panel, -1, "", pos=(40, 10))
def OnMove(self, event):
pos = event.GetPosition()
self.posCtrl.SetValue("%s, %s" %(pos.x, pos.y))
if __name__ == '__main__':
app = wx.PySimpleApp()
frame = MyFrame()
frame.Show(True)
app.MainLoop()
|
<commit_before><commit_msg>Add chapter1 folder and moise_pos.py
Signed-off-by: sdphome <e1de489c79848a5fdae5f6d6adb3887195897185@live.cn><commit_after>#!/bin/env python
import wx
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, -1, "My Frame", size=(300, 300))
panel = wx.Panel(self, -1)
panel.Bind(wx.EVT_MOTION, self.OnMove)
wx.StaticText(panel, -1, "Pos:", pos=(10, 12))
self.posCtrl = wx.TextCtrl(panel, -1, "", pos=(40, 10))
def OnMove(self, event):
pos = event.GetPosition()
self.posCtrl.SetValue("%s, %s" %(pos.x, pos.y))
if __name__ == '__main__':
app = wx.PySimpleApp()
frame = MyFrame()
frame.Show(True)
app.MainLoop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.