commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3d6e79c4099015d556e58d63d73f9ee0fac71754
|
BBBC017-mongo.py
|
BBBC017-mongo.py
|
#!/usr/bin/env python
# BBBC017-image.py <metadata.json> <features-mongo.json> <screen-info.json>
import json
import os
from pymongo import MongoClient
import sys
def main(argv=None):
metadata_file = argv[1]
metadata = []
with open(metadata_file) as f:
for line in f:
metadata.append(json.loads(line))
feature_file = argv[2]
feature = []
with open(feature_file) as f:
for line in f:
feature.append(json.loads(line))
info_file = argv[3]
info = []
with open(info_file) as f:
info = eval(f.read())
for document in metadata:
col = document['column']
_id = document['_id']
document['column'] = "{0:02d}".format(col)
document['_id'] = 'BBBC017-' + _id
document['screen'] = 'BBBC017'
document['control_pos'] = False
for document in feature:
_id = document['_id']
document['_id'] = 'BBBC017-' + _id
if 'neighbours' in document:
new_array = []
for item in document['neighbours']:
new_array.append('BBBC017-' + item)
document['neighbours'] = new_array
client = MongoClient('localhost', 27017)
db_micro = client['microscopium']
micro_images = db_micro['samples']
micro_screens = db_micro['screens']
foo = micro_images.insert(metadata)
for document in feature:
current_id = document['_id']
micro_images.update({'_id': current_id}, {"$set": document})
micro_screens.insert(info)
if __name__ == '__main__':
main(sys.argv)
|
Add script used to add BBB017 sample documents
|
Add script used to add BBB017 sample documents
|
Python
|
bsd-3-clause
|
microscopium/microscopium-scripts,microscopium/microscopium-scripts
|
Add script used to add BBB017 sample documents
|
#!/usr/bin/env python
# BBBC017-image.py <metadata.json> <features-mongo.json> <screen-info.json>
import json
import os
from pymongo import MongoClient
import sys
def main(argv=None):
metadata_file = argv[1]
metadata = []
with open(metadata_file) as f:
for line in f:
metadata.append(json.loads(line))
feature_file = argv[2]
feature = []
with open(feature_file) as f:
for line in f:
feature.append(json.loads(line))
info_file = argv[3]
info = []
with open(info_file) as f:
info = eval(f.read())
for document in metadata:
col = document['column']
_id = document['_id']
document['column'] = "{0:02d}".format(col)
document['_id'] = 'BBBC017-' + _id
document['screen'] = 'BBBC017'
document['control_pos'] = False
for document in feature:
_id = document['_id']
document['_id'] = 'BBBC017-' + _id
if 'neighbours' in document:
new_array = []
for item in document['neighbours']:
new_array.append('BBBC017-' + item)
document['neighbours'] = new_array
client = MongoClient('localhost', 27017)
db_micro = client['microscopium']
micro_images = db_micro['samples']
micro_screens = db_micro['screens']
foo = micro_images.insert(metadata)
for document in feature:
current_id = document['_id']
micro_images.update({'_id': current_id}, {"$set": document})
micro_screens.insert(info)
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>Add script used to add BBB017 sample documents<commit_after>
|
#!/usr/bin/env python
# BBBC017-image.py <metadata.json> <features-mongo.json> <screen-info.json>
import json
import os
from pymongo import MongoClient
import sys
def main(argv=None):
metadata_file = argv[1]
metadata = []
with open(metadata_file) as f:
for line in f:
metadata.append(json.loads(line))
feature_file = argv[2]
feature = []
with open(feature_file) as f:
for line in f:
feature.append(json.loads(line))
info_file = argv[3]
info = []
with open(info_file) as f:
info = eval(f.read())
for document in metadata:
col = document['column']
_id = document['_id']
document['column'] = "{0:02d}".format(col)
document['_id'] = 'BBBC017-' + _id
document['screen'] = 'BBBC017'
document['control_pos'] = False
for document in feature:
_id = document['_id']
document['_id'] = 'BBBC017-' + _id
if 'neighbours' in document:
new_array = []
for item in document['neighbours']:
new_array.append('BBBC017-' + item)
document['neighbours'] = new_array
client = MongoClient('localhost', 27017)
db_micro = client['microscopium']
micro_images = db_micro['samples']
micro_screens = db_micro['screens']
foo = micro_images.insert(metadata)
for document in feature:
current_id = document['_id']
micro_images.update({'_id': current_id}, {"$set": document})
micro_screens.insert(info)
if __name__ == '__main__':
main(sys.argv)
|
Add script used to add BBB017 sample documents#!/usr/bin/env python
# BBBC017-image.py <metadata.json> <features-mongo.json> <screen-info.json>
import json
import os
from pymongo import MongoClient
import sys
def main(argv=None):
metadata_file = argv[1]
metadata = []
with open(metadata_file) as f:
for line in f:
metadata.append(json.loads(line))
feature_file = argv[2]
feature = []
with open(feature_file) as f:
for line in f:
feature.append(json.loads(line))
info_file = argv[3]
info = []
with open(info_file) as f:
info = eval(f.read())
for document in metadata:
col = document['column']
_id = document['_id']
document['column'] = "{0:02d}".format(col)
document['_id'] = 'BBBC017-' + _id
document['screen'] = 'BBBC017'
document['control_pos'] = False
for document in feature:
_id = document['_id']
document['_id'] = 'BBBC017-' + _id
if 'neighbours' in document:
new_array = []
for item in document['neighbours']:
new_array.append('BBBC017-' + item)
document['neighbours'] = new_array
client = MongoClient('localhost', 27017)
db_micro = client['microscopium']
micro_images = db_micro['samples']
micro_screens = db_micro['screens']
foo = micro_images.insert(metadata)
for document in feature:
current_id = document['_id']
micro_images.update({'_id': current_id}, {"$set": document})
micro_screens.insert(info)
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>Add script used to add BBB017 sample documents<commit_after>#!/usr/bin/env python
# BBBC017-image.py <metadata.json> <features-mongo.json> <screen-info.json>
import json
import os
from pymongo import MongoClient
import sys
def main(argv=None):
metadata_file = argv[1]
metadata = []
with open(metadata_file) as f:
for line in f:
metadata.append(json.loads(line))
feature_file = argv[2]
feature = []
with open(feature_file) as f:
for line in f:
feature.append(json.loads(line))
info_file = argv[3]
info = []
with open(info_file) as f:
info = eval(f.read())
for document in metadata:
col = document['column']
_id = document['_id']
document['column'] = "{0:02d}".format(col)
document['_id'] = 'BBBC017-' + _id
document['screen'] = 'BBBC017'
document['control_pos'] = False
for document in feature:
_id = document['_id']
document['_id'] = 'BBBC017-' + _id
if 'neighbours' in document:
new_array = []
for item in document['neighbours']:
new_array.append('BBBC017-' + item)
document['neighbours'] = new_array
client = MongoClient('localhost', 27017)
db_micro = client['microscopium']
micro_images = db_micro['samples']
micro_screens = db_micro['screens']
foo = micro_images.insert(metadata)
for document in feature:
current_id = document['_id']
micro_images.update({'_id': current_id}, {"$set": document})
micro_screens.insert(info)
if __name__ == '__main__':
main(sys.argv)
|
|
378f3f87b68ca054bbdd86adee4b887a5aaa8374
|
tests/test_utils.py
|
tests/test_utils.py
|
#!/usr/bin/env python3
from __future__ import print_function, division
import numpy as np
from sht.utils import l_to_lm
def test_l_to_lm():
L = 10
x = np.random.randn(L)
# Test without fill_zeros
y = l_to_lm(x)
for i in range(L):
assert np.array_equal(y[i**2:(i+1)**2], x[i] * np.ones(2 * i + 1))
# Test with fill_zeros
z = l_to_lm(x, fill_zeros=True)
l = np.arange(L)
assert np.array_equal(z[l**2 + l], x)
z = np.delete(z, l**2 + l)
assert np.array_equal(z, np.zeros(L**2 - L))
|
Add a test for l_to_lm.
|
Add a test for l_to_lm.
|
Python
|
mit
|
praveenv253/sht,praveenv253/sht
|
Add a test for l_to_lm.
|
#!/usr/bin/env python3
from __future__ import print_function, division
import numpy as np
from sht.utils import l_to_lm
def test_l_to_lm():
L = 10
x = np.random.randn(L)
# Test without fill_zeros
y = l_to_lm(x)
for i in range(L):
assert np.array_equal(y[i**2:(i+1)**2], x[i] * np.ones(2 * i + 1))
# Test with fill_zeros
z = l_to_lm(x, fill_zeros=True)
l = np.arange(L)
assert np.array_equal(z[l**2 + l], x)
z = np.delete(z, l**2 + l)
assert np.array_equal(z, np.zeros(L**2 - L))
|
<commit_before><commit_msg>Add a test for l_to_lm.<commit_after>
|
#!/usr/bin/env python3
from __future__ import print_function, division
import numpy as np
from sht.utils import l_to_lm
def test_l_to_lm():
L = 10
x = np.random.randn(L)
# Test without fill_zeros
y = l_to_lm(x)
for i in range(L):
assert np.array_equal(y[i**2:(i+1)**2], x[i] * np.ones(2 * i + 1))
# Test with fill_zeros
z = l_to_lm(x, fill_zeros=True)
l = np.arange(L)
assert np.array_equal(z[l**2 + l], x)
z = np.delete(z, l**2 + l)
assert np.array_equal(z, np.zeros(L**2 - L))
|
Add a test for l_to_lm.#!/usr/bin/env python3
from __future__ import print_function, division
import numpy as np
from sht.utils import l_to_lm
def test_l_to_lm():
L = 10
x = np.random.randn(L)
# Test without fill_zeros
y = l_to_lm(x)
for i in range(L):
assert np.array_equal(y[i**2:(i+1)**2], x[i] * np.ones(2 * i + 1))
# Test with fill_zeros
z = l_to_lm(x, fill_zeros=True)
l = np.arange(L)
assert np.array_equal(z[l**2 + l], x)
z = np.delete(z, l**2 + l)
assert np.array_equal(z, np.zeros(L**2 - L))
|
<commit_before><commit_msg>Add a test for l_to_lm.<commit_after>#!/usr/bin/env python3
from __future__ import print_function, division
import numpy as np
from sht.utils import l_to_lm
def test_l_to_lm():
L = 10
x = np.random.randn(L)
# Test without fill_zeros
y = l_to_lm(x)
for i in range(L):
assert np.array_equal(y[i**2:(i+1)**2], x[i] * np.ones(2 * i + 1))
# Test with fill_zeros
z = l_to_lm(x, fill_zeros=True)
l = np.arange(L)
assert np.array_equal(z[l**2 + l], x)
z = np.delete(z, l**2 + l)
assert np.array_equal(z, np.zeros(L**2 - L))
|
|
92cc938b83d56f70b45d0ba3856cae56fa6db77e
|
skyfield/constellationlib.py
|
skyfield/constellationlib.py
|
"""
http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42
https://iopscience.iop.org/article/10.1086/132034/pdf
"""
|
Add reference to constellation paper
|
Add reference to constellation paper
|
Python
|
mit
|
skyfielders/python-skyfield,skyfielders/python-skyfield
|
Add reference to constellation paper
|
"""
http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42
https://iopscience.iop.org/article/10.1086/132034/pdf
"""
|
<commit_before><commit_msg>Add reference to constellation paper<commit_after>
|
"""
http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42
https://iopscience.iop.org/article/10.1086/132034/pdf
"""
|
Add reference to constellation paper"""
http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42
https://iopscience.iop.org/article/10.1086/132034/pdf
"""
|
<commit_before><commit_msg>Add reference to constellation paper<commit_after>"""
http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42
https://iopscience.iop.org/article/10.1086/132034/pdf
"""
|
|
b06d6acf77c1894a1fbe1c92b8f46af1d9f4dfb3
|
tool/greedy_test.py
|
tool/greedy_test.py
|
#!/usr/bin/env python
#
# This script tests the functions in GreedyEmbedding. It also shows
# how to calculate the distance of any two points in Poincare disk.
#
# Reference:
# R. Kleinberg - Geographic routing using hyperbolic space
# A. Cvetkovski - Hyperbolic Embedding and Routing for Dynamic Graphs
#
# Liang Wang @ Dept. of Computer Science, Univ. of Helsinki, Finland
# 2012.10.07 created
#
import re
import os
import sys
import cmath
from greedy_embedding import GreedyEmbedding
if __name__=="__main__":
greed = GreedyEmbedding()
sys.exit(0)
|
Add test module for GreedyEmbedding
|
Add test module for GreedyEmbedding
|
Python
|
lgpl-2.1
|
ryanrhymes/mobiccnx,ryanrhymes/mobiccnx,ryanrhymes/mobiccnx,ryanrhymes/mobiccnx,ryanrhymes/mobiccnx,ryanrhymes/mobiccnx
|
Add test module for GreedyEmbedding
|
#!/usr/bin/env python
#
# This script tests the functions in GreedyEmbedding. It also shows
# how to calculate the distance of any two points in Poincare disk.
#
# Reference:
# R. Kleinberg - Geographic routing using hyperbolic space
# A. Cvetkovski - Hyperbolic Embedding and Routing for Dynamic Graphs
#
# Liang Wang @ Dept. of Computer Science, Univ. of Helsinki, Finland
# 2012.10.07 created
#
import re
import os
import sys
import cmath
from greedy_embedding import GreedyEmbedding
if __name__=="__main__":
greed = GreedyEmbedding()
sys.exit(0)
|
<commit_before><commit_msg>Add test module for GreedyEmbedding<commit_after>
|
#!/usr/bin/env python
#
# This script tests the functions in GreedyEmbedding. It also shows
# how to calculate the distance of any two points in Poincare disk.
#
# Reference:
# R. Kleinberg - Geographic routing using hyperbolic space
# A. Cvetkovski - Hyperbolic Embedding and Routing for Dynamic Graphs
#
# Liang Wang @ Dept. of Computer Science, Univ. of Helsinki, Finland
# 2012.10.07 created
#
import re
import os
import sys
import cmath
from greedy_embedding import GreedyEmbedding
if __name__=="__main__":
greed = GreedyEmbedding()
sys.exit(0)
|
Add test module for GreedyEmbedding#!/usr/bin/env python
#
# This script tests the functions in GreedyEmbedding. It also shows
# how to calculate the distance of any two points in Poincare disk.
#
# Reference:
# R. Kleinberg - Geographic routing using hyperbolic space
# A. Cvetkovski - Hyperbolic Embedding and Routing for Dynamic Graphs
#
# Liang Wang @ Dept. of Computer Science, Univ. of Helsinki, Finland
# 2012.10.07 created
#
import re
import os
import sys
import cmath
from greedy_embedding import GreedyEmbedding
if __name__=="__main__":
greed = GreedyEmbedding()
sys.exit(0)
|
<commit_before><commit_msg>Add test module for GreedyEmbedding<commit_after>#!/usr/bin/env python
#
# This script tests the functions in GreedyEmbedding. It also shows
# how to calculate the distance of any two points in Poincare disk.
#
# Reference:
# R. Kleinberg - Geographic routing using hyperbolic space
# A. Cvetkovski - Hyperbolic Embedding and Routing for Dynamic Graphs
#
# Liang Wang @ Dept. of Computer Science, Univ. of Helsinki, Finland
# 2012.10.07 created
#
import re
import os
import sys
import cmath
from greedy_embedding import GreedyEmbedding
if __name__=="__main__":
greed = GreedyEmbedding()
sys.exit(0)
|
|
68ff568d4761b2aa3adc2c077f8cd6c9fc893c1e
|
src/pretix/urls.py
|
src/pretix/urls.py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
import pretixcontrol.urls
import pretixpresale.urls
urlpatterns = patterns('',
url(r'^control/', include(pretixcontrol.urls, namespace='control')),
url(r'^admin/', include(admin.site.urls)),
url(r'', include(pretixpresale.urls, namespace='presale')),
)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
import pretixcontrol.urls
import pretixpresale.urls
urlpatterns = patterns('',
url(r'^control/', include(pretixcontrol.urls, namespace='control')),
url(r'^admin/', include(admin.site.urls)),
# The pretixpresale namespace is configured at the bottom of this file, because it
# contains a wildcard-style URL which has to be configured _after_ debug settings.
)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
urlpatterns += patterns('',
url(r'', include(pretixpresale.urls, namespace='presale'))
)
|
Move URLconfig around to fix Django Debug Toolbar
|
Move URLconfig around to fix Django Debug Toolbar
|
Python
|
apache-2.0
|
awg24/pretix,Unicorn-rzl/pretix,lab2112/pretix,lab2112/pretix,awg24/pretix,Flamacue/pretix,Unicorn-rzl/pretix,lab2112/pretix,lab2112/pretix,Flamacue/pretix,akuks/pretix,Unicorn-rzl/pretix,Flamacue/pretix,akuks/pretix,Flamacue/pretix,Unicorn-rzl/pretix,awg24/pretix,akuks/pretix,awg24/pretix,akuks/pretix
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
import pretixcontrol.urls
import pretixpresale.urls
urlpatterns = patterns('',
url(r'^control/', include(pretixcontrol.urls, namespace='control')),
url(r'^admin/', include(admin.site.urls)),
url(r'', include(pretixpresale.urls, namespace='presale')),
)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
Move URLconfig around to fix Django Debug Toolbar
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
import pretixcontrol.urls
import pretixpresale.urls
urlpatterns = patterns('',
url(r'^control/', include(pretixcontrol.urls, namespace='control')),
url(r'^admin/', include(admin.site.urls)),
# The pretixpresale namespace is configured at the bottom of this file, because it
# contains a wildcard-style URL which has to be configured _after_ debug settings.
)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
urlpatterns += patterns('',
url(r'', include(pretixpresale.urls, namespace='presale'))
)
|
<commit_before>from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
import pretixcontrol.urls
import pretixpresale.urls
urlpatterns = patterns('',
url(r'^control/', include(pretixcontrol.urls, namespace='control')),
url(r'^admin/', include(admin.site.urls)),
url(r'', include(pretixpresale.urls, namespace='presale')),
)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
<commit_msg>Move URLconfig around to fix Django Debug Toolbar<commit_after>
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
import pretixcontrol.urls
import pretixpresale.urls
urlpatterns = patterns('',
url(r'^control/', include(pretixcontrol.urls, namespace='control')),
url(r'^admin/', include(admin.site.urls)),
# The pretixpresale namespace is configured at the bottom of this file, because it
# contains a wildcard-style URL which has to be configured _after_ debug settings.
)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
urlpatterns += patterns('',
url(r'', include(pretixpresale.urls, namespace='presale'))
)
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
import pretixcontrol.urls
import pretixpresale.urls
urlpatterns = patterns('',
url(r'^control/', include(pretixcontrol.urls, namespace='control')),
url(r'^admin/', include(admin.site.urls)),
url(r'', include(pretixpresale.urls, namespace='presale')),
)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
Move URLconfig around to fix Django Debug Toolbarfrom django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
import pretixcontrol.urls
import pretixpresale.urls
urlpatterns = patterns('',
url(r'^control/', include(pretixcontrol.urls, namespace='control')),
url(r'^admin/', include(admin.site.urls)),
# The pretixpresale namespace is configured at the bottom of this file, because it
# contains a wildcard-style URL which has to be configured _after_ debug settings.
)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
urlpatterns += patterns('',
url(r'', include(pretixpresale.urls, namespace='presale'))
)
|
<commit_before>from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
import pretixcontrol.urls
import pretixpresale.urls
urlpatterns = patterns('',
url(r'^control/', include(pretixcontrol.urls, namespace='control')),
url(r'^admin/', include(admin.site.urls)),
url(r'', include(pretixpresale.urls, namespace='presale')),
)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
<commit_msg>Move URLconfig around to fix Django Debug Toolbar<commit_after>from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
import pretixcontrol.urls
import pretixpresale.urls
urlpatterns = patterns('',
url(r'^control/', include(pretixcontrol.urls, namespace='control')),
url(r'^admin/', include(admin.site.urls)),
# The pretixpresale namespace is configured at the bottom of this file, because it
# contains a wildcard-style URL which has to be configured _after_ debug settings.
)
if settings.DEBUG:
import debug_toolbar
urlpatterns += patterns('',
url(r'^__debug__/', include(debug_toolbar.urls)),
)
urlpatterns += patterns('',
url(r'', include(pretixpresale.urls, namespace='presale'))
)
|
5a2774bb90b98e413e69a2ea53afb1b0a6fafff4
|
mxreverse.py
|
mxreverse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import dns.resolver
import dns.reversename
import json
import urlparse
def resolve_mx(hostname):
response = []
for data in dns.resolver.query(hostname, 'MX'):
response.append((data.exchange.to_text(), data.preference))
return response
def resolve_ip(hostname):
response = []
for data in dns.resolver.query(hostname, 'A'):
response.append(data.address)
return response
def resolve_ptr(ip):
try:
for data in dns.resolver.query(dns.reversename.from_address(ip), 'PTR'):
return data.target.to_text()
except dns.resolver.NXDOMAIN:
pass
return ''
def check_mx(domainname):
mails = resolve_mx(domainname)
in_all = True
in_any = False
response = []
for mx in mails:
ips = []
for ip in resolve_ip(mx[0]):
ptr = resolve_ptr(ip)
host_in = ptr == mx[0]
if host_in:
in_any = True
else:
in_all = False
ips.append({'ip': ip, 'ptr': ptr, 'check': host_in})
response.append({'name': mx[0], 'preference': mx[1], 'ips': ips})
return {'domain': domainname, 'mx': response, 'all': in_all, 'any': in_any}
def application(environ, start_response):
query = urlparse.parse_qs(environ.get('QUERY_STRING', ''))
status = '200 OK'
response_headers = [('Content-type', 'application/json')]
try:
content = check_mx(query['domain'][0])
except Exception as e:
status = '400 ERROR'
content = {'message': e.message}
start_response(status, response_headers)
return [json.dumps(content).encode('utf-8')]
|
Add API for check MX
|
Add API for check MX
|
Python
|
mit
|
eduardoklosowski/mxreverse,eduardoklosowski/mxreverse,eduardoklosowski/mxreverse
|
Add API for check MX
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import dns.resolver
import dns.reversename
import json
import urlparse
def resolve_mx(hostname):
response = []
for data in dns.resolver.query(hostname, 'MX'):
response.append((data.exchange.to_text(), data.preference))
return response
def resolve_ip(hostname):
response = []
for data in dns.resolver.query(hostname, 'A'):
response.append(data.address)
return response
def resolve_ptr(ip):
try:
for data in dns.resolver.query(dns.reversename.from_address(ip), 'PTR'):
return data.target.to_text()
except dns.resolver.NXDOMAIN:
pass
return ''
def check_mx(domainname):
mails = resolve_mx(domainname)
in_all = True
in_any = False
response = []
for mx in mails:
ips = []
for ip in resolve_ip(mx[0]):
ptr = resolve_ptr(ip)
host_in = ptr == mx[0]
if host_in:
in_any = True
else:
in_all = False
ips.append({'ip': ip, 'ptr': ptr, 'check': host_in})
response.append({'name': mx[0], 'preference': mx[1], 'ips': ips})
return {'domain': domainname, 'mx': response, 'all': in_all, 'any': in_any}
def application(environ, start_response):
query = urlparse.parse_qs(environ.get('QUERY_STRING', ''))
status = '200 OK'
response_headers = [('Content-type', 'application/json')]
try:
content = check_mx(query['domain'][0])
except Exception as e:
status = '400 ERROR'
content = {'message': e.message}
start_response(status, response_headers)
return [json.dumps(content).encode('utf-8')]
|
<commit_before><commit_msg>Add API for check MX<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import dns.resolver
import dns.reversename
import json
import urlparse
def resolve_mx(hostname):
response = []
for data in dns.resolver.query(hostname, 'MX'):
response.append((data.exchange.to_text(), data.preference))
return response
def resolve_ip(hostname):
response = []
for data in dns.resolver.query(hostname, 'A'):
response.append(data.address)
return response
def resolve_ptr(ip):
try:
for data in dns.resolver.query(dns.reversename.from_address(ip), 'PTR'):
return data.target.to_text()
except dns.resolver.NXDOMAIN:
pass
return ''
def check_mx(domainname):
mails = resolve_mx(domainname)
in_all = True
in_any = False
response = []
for mx in mails:
ips = []
for ip in resolve_ip(mx[0]):
ptr = resolve_ptr(ip)
host_in = ptr == mx[0]
if host_in:
in_any = True
else:
in_all = False
ips.append({'ip': ip, 'ptr': ptr, 'check': host_in})
response.append({'name': mx[0], 'preference': mx[1], 'ips': ips})
return {'domain': domainname, 'mx': response, 'all': in_all, 'any': in_any}
def application(environ, start_response):
query = urlparse.parse_qs(environ.get('QUERY_STRING', ''))
status = '200 OK'
response_headers = [('Content-type', 'application/json')]
try:
content = check_mx(query['domain'][0])
except Exception as e:
status = '400 ERROR'
content = {'message': e.message}
start_response(status, response_headers)
return [json.dumps(content).encode('utf-8')]
|
Add API for check MX#!/usr/bin/env python
# -*- coding: utf-8 -*-
import dns.resolver
import dns.reversename
import json
import urlparse
def resolve_mx(hostname):
response = []
for data in dns.resolver.query(hostname, 'MX'):
response.append((data.exchange.to_text(), data.preference))
return response
def resolve_ip(hostname):
response = []
for data in dns.resolver.query(hostname, 'A'):
response.append(data.address)
return response
def resolve_ptr(ip):
try:
for data in dns.resolver.query(dns.reversename.from_address(ip), 'PTR'):
return data.target.to_text()
except dns.resolver.NXDOMAIN:
pass
return ''
def check_mx(domainname):
mails = resolve_mx(domainname)
in_all = True
in_any = False
response = []
for mx in mails:
ips = []
for ip in resolve_ip(mx[0]):
ptr = resolve_ptr(ip)
host_in = ptr == mx[0]
if host_in:
in_any = True
else:
in_all = False
ips.append({'ip': ip, 'ptr': ptr, 'check': host_in})
response.append({'name': mx[0], 'preference': mx[1], 'ips': ips})
return {'domain': domainname, 'mx': response, 'all': in_all, 'any': in_any}
def application(environ, start_response):
query = urlparse.parse_qs(environ.get('QUERY_STRING', ''))
status = '200 OK'
response_headers = [('Content-type', 'application/json')]
try:
content = check_mx(query['domain'][0])
except Exception as e:
status = '400 ERROR'
content = {'message': e.message}
start_response(status, response_headers)
return [json.dumps(content).encode('utf-8')]
|
<commit_before><commit_msg>Add API for check MX<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import dns.resolver
import dns.reversename
import json
import urlparse
def resolve_mx(hostname):
response = []
for data in dns.resolver.query(hostname, 'MX'):
response.append((data.exchange.to_text(), data.preference))
return response
def resolve_ip(hostname):
response = []
for data in dns.resolver.query(hostname, 'A'):
response.append(data.address)
return response
def resolve_ptr(ip):
try:
for data in dns.resolver.query(dns.reversename.from_address(ip), 'PTR'):
return data.target.to_text()
except dns.resolver.NXDOMAIN:
pass
return ''
def check_mx(domainname):
mails = resolve_mx(domainname)
in_all = True
in_any = False
response = []
for mx in mails:
ips = []
for ip in resolve_ip(mx[0]):
ptr = resolve_ptr(ip)
host_in = ptr == mx[0]
if host_in:
in_any = True
else:
in_all = False
ips.append({'ip': ip, 'ptr': ptr, 'check': host_in})
response.append({'name': mx[0], 'preference': mx[1], 'ips': ips})
return {'domain': domainname, 'mx': response, 'all': in_all, 'any': in_any}
def application(environ, start_response):
query = urlparse.parse_qs(environ.get('QUERY_STRING', ''))
status = '200 OK'
response_headers = [('Content-type', 'application/json')]
try:
content = check_mx(query['domain'][0])
except Exception as e:
status = '400 ERROR'
content = {'message': e.message}
start_response(status, response_headers)
return [json.dumps(content).encode('utf-8')]
|
|
e111ef984c06202f9c7c3b0e882121befa33bb47
|
Control/controlFromMobile.py
|
Control/controlFromMobile.py
|
import os
import sys
import threading
import time
import numpy as np
sys.path.append(os.getcwd())
sys.path.append('/media/bat/DATA/Baptiste/Nautilab/kite_project/robokite/ObjectTracking')
import mobileState
import serial
mobile = mobileState.mobileState()
a = threading.Thread(None, mobileState.mobileState.checkUpdate, None, (mobile,))
a.start()
ser = serial.Serial('/dev/ttyACM1', 9600)
dt = 0.1
time.sleep(dt)
ser.write('i1')
while True:
mobile.computeRPY()
ser.write(str(np.floor(100*mobile.roll/3)/100.0))
print mobile.roll
time.sleep(dt)
|
Add a script to be able to control the motor based on mobile phone inclination. This can be used to control the kite while launching it if alone
|
Add a script to be able to control the motor based on mobile phone inclination. This can be used to control the kite while launching it if alone
|
Python
|
mit
|
baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite
|
Add a script to be able to control the motor based on mobile phone inclination. This can be used to control the kite while launching it if alone
|
import os
import sys
import threading
import time
import numpy as np
sys.path.append(os.getcwd())
sys.path.append('/media/bat/DATA/Baptiste/Nautilab/kite_project/robokite/ObjectTracking')
import mobileState
import serial
mobile = mobileState.mobileState()
a = threading.Thread(None, mobileState.mobileState.checkUpdate, None, (mobile,))
a.start()
ser = serial.Serial('/dev/ttyACM1', 9600)
dt = 0.1
time.sleep(dt)
ser.write('i1')
while True:
mobile.computeRPY()
ser.write(str(np.floor(100*mobile.roll/3)/100.0))
print mobile.roll
time.sleep(dt)
|
<commit_before><commit_msg>Add a script to be able to control the motor based on mobile phone inclination. This can be used to control the kite while launching it if alone<commit_after>
|
import os
import sys
import threading
import time
import numpy as np
sys.path.append(os.getcwd())
sys.path.append('/media/bat/DATA/Baptiste/Nautilab/kite_project/robokite/ObjectTracking')
import mobileState
import serial
mobile = mobileState.mobileState()
a = threading.Thread(None, mobileState.mobileState.checkUpdate, None, (mobile,))
a.start()
ser = serial.Serial('/dev/ttyACM1', 9600)
dt = 0.1
time.sleep(dt)
ser.write('i1')
while True:
mobile.computeRPY()
ser.write(str(np.floor(100*mobile.roll/3)/100.0))
print mobile.roll
time.sleep(dt)
|
Add a script to be able to control the motor based on mobile phone inclination. This can be used to control the kite while launching it if aloneimport os
import sys
import threading
import time
import numpy as np
sys.path.append(os.getcwd())
sys.path.append('/media/bat/DATA/Baptiste/Nautilab/kite_project/robokite/ObjectTracking')
import mobileState
import serial
mobile = mobileState.mobileState()
a = threading.Thread(None, mobileState.mobileState.checkUpdate, None, (mobile,))
a.start()
ser = serial.Serial('/dev/ttyACM1', 9600)
dt = 0.1
time.sleep(dt)
ser.write('i1')
while True:
mobile.computeRPY()
ser.write(str(np.floor(100*mobile.roll/3)/100.0))
print mobile.roll
time.sleep(dt)
|
<commit_before><commit_msg>Add a script to be able to control the motor based on mobile phone inclination. This can be used to control the kite while launching it if alone<commit_after>import os
import sys
import threading
import time
import numpy as np
sys.path.append(os.getcwd())
sys.path.append('/media/bat/DATA/Baptiste/Nautilab/kite_project/robokite/ObjectTracking')
import mobileState
import serial
mobile = mobileState.mobileState()
a = threading.Thread(None, mobileState.mobileState.checkUpdate, None, (mobile,))
a.start()
ser = serial.Serial('/dev/ttyACM1', 9600)
dt = 0.1
time.sleep(dt)
ser.write('i1')
while True:
mobile.computeRPY()
ser.write(str(np.floor(100*mobile.roll/3)/100.0))
print mobile.roll
time.sleep(dt)
|
|
4b344091370263a98630623fdb6a083b680a9403
|
update_manifest.py
|
update_manifest.py
|
#!/usr/bin/python
import json
import os
import sys
import tempfile
import urllib2
import zipfile
# Get the manifest urls.
req = urllib2.Request(
"https://www.bungie.net//platform/Destiny/Manifest/",
headers={'X-API-Key': sys.argv[1]},
)
resp = json.loads(urllib2.urlopen(req).read())
if resp['ErrorCode'] != 1:
raise Exception("error: %s", resp)
with tempfile.TemporaryFile() as tf:
# Download the zipped database.
path = resp['Response']['mobileWorldContentPaths']['en']
resp = urllib2.urlopen("https://www.bungie.net%s" % path)
while True:
chunk = resp.read(16 << 10)
if not chunk:
break
tf.write(chunk)
# Unzip the database to the current directory.
tf.seek(0)
with zipfile.ZipFile(tf, 'r') as f:
f.extractall()
|
Add a script to download the manifest database.
|
Add a script to download the manifest database.
|
Python
|
mit
|
zhirsch/destinykioskstatus,zhirsch/destinykioskstatus
|
Add a script to download the manifest database.
|
#!/usr/bin/python
import json
import os
import sys
import tempfile
import urllib2
import zipfile
# Get the manifest urls.
req = urllib2.Request(
"https://www.bungie.net//platform/Destiny/Manifest/",
headers={'X-API-Key': sys.argv[1]},
)
resp = json.loads(urllib2.urlopen(req).read())
if resp['ErrorCode'] != 1:
raise Exception("error: %s", resp)
with tempfile.TemporaryFile() as tf:
# Download the zipped database.
path = resp['Response']['mobileWorldContentPaths']['en']
resp = urllib2.urlopen("https://www.bungie.net%s" % path)
while True:
chunk = resp.read(16 << 10)
if not chunk:
break
tf.write(chunk)
# Unzip the database to the current directory.
tf.seek(0)
with zipfile.ZipFile(tf, 'r') as f:
f.extractall()
|
<commit_before><commit_msg>Add a script to download the manifest database.<commit_after>
|
#!/usr/bin/python
import json
import os
import sys
import tempfile
import urllib2
import zipfile
# Get the manifest urls.
req = urllib2.Request(
"https://www.bungie.net//platform/Destiny/Manifest/",
headers={'X-API-Key': sys.argv[1]},
)
resp = json.loads(urllib2.urlopen(req).read())
if resp['ErrorCode'] != 1:
raise Exception("error: %s", resp)
with tempfile.TemporaryFile() as tf:
# Download the zipped database.
path = resp['Response']['mobileWorldContentPaths']['en']
resp = urllib2.urlopen("https://www.bungie.net%s" % path)
while True:
chunk = resp.read(16 << 10)
if not chunk:
break
tf.write(chunk)
# Unzip the database to the current directory.
tf.seek(0)
with zipfile.ZipFile(tf, 'r') as f:
f.extractall()
|
Add a script to download the manifest database.#!/usr/bin/python
import json
import os
import sys
import tempfile
import urllib2
import zipfile
# Get the manifest urls.
req = urllib2.Request(
"https://www.bungie.net//platform/Destiny/Manifest/",
headers={'X-API-Key': sys.argv[1]},
)
resp = json.loads(urllib2.urlopen(req).read())
if resp['ErrorCode'] != 1:
raise Exception("error: %s", resp)
with tempfile.TemporaryFile() as tf:
# Download the zipped database.
path = resp['Response']['mobileWorldContentPaths']['en']
resp = urllib2.urlopen("https://www.bungie.net%s" % path)
while True:
chunk = resp.read(16 << 10)
if not chunk:
break
tf.write(chunk)
# Unzip the database to the current directory.
tf.seek(0)
with zipfile.ZipFile(tf, 'r') as f:
f.extractall()
|
<commit_before><commit_msg>Add a script to download the manifest database.<commit_after>#!/usr/bin/python
import json
import os
import sys
import tempfile
import urllib2
import zipfile
# Get the manifest urls.
req = urllib2.Request(
"https://www.bungie.net//platform/Destiny/Manifest/",
headers={'X-API-Key': sys.argv[1]},
)
resp = json.loads(urllib2.urlopen(req).read())
if resp['ErrorCode'] != 1:
raise Exception("error: %s", resp)
with tempfile.TemporaryFile() as tf:
# Download the zipped database.
path = resp['Response']['mobileWorldContentPaths']['en']
resp = urllib2.urlopen("https://www.bungie.net%s" % path)
while True:
chunk = resp.read(16 << 10)
if not chunk:
break
tf.write(chunk)
# Unzip the database to the current directory.
tf.seek(0)
with zipfile.ZipFile(tf, 'r') as f:
f.extractall()
|
|
567e12bfb8d0f4e2a4f6fddf0fab9ffbcbf6d49f
|
requests/_bug.py
|
requests/_bug.py
|
"""Module containing bug report helper(s)."""
from __future__ import print_function
import json
import platform
import sys
import ssl
from . import __version__ as requests_version
try:
from .packages.urllib3.contrib import pyopenssl
except ImportError:
pyopenssl = None
OpenSSL = None
cryptography = None
else:
import OpenSSL
import cryptography
def _implementation():
"""Return a dict with the Python implementation and verison.
Provide both the name and the version of the Python implementation
currently running. For example, on CPython 2.7.5 it will return
{'name': 'CPython', 'version': '2.7.5'}.
This function works best on CPython and PyPy: in particular, it probably
doesn't work for Jython or IronPython. Future investigation should be done
to work out the correct shape of the code for those platforms.
"""
implementation = platform.python_implementation()
if implementation == 'CPython':
implementation_version = platform.python_version()
elif implementation == 'PyPy':
implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
implementation_version = ''.join([
implementation_version, sys.pypy_version_info.releaselevel
])
elif implementation == 'Jython':
implementation_version = platform.python_version() # Complete Guess
elif implementation == 'IronPython':
implementation_version = platform.python_version() # Complete Guess
else:
implementation_version = 'Unknown'
return {'name': implementation, 'version': implementation_version}
def information():
"""Generate information for a bug report."""
try:
platform_info = {
'system': platform.system(),
'release': platform.release(),
}
except IOError:
platform_info = {
'system': 'Unknown',
'release': 'Unknown',
}
implementation_info = _implementation()
pyopenssl_info = {
'version': None,
'openssl_version': '',
}
if OpenSSL:
pyopenssl_info = {
'version': OpenSSL.__version__,
'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER,
}
cryptography_info = {
'version': getattr(cryptography, '__version__', ''),
}
return {
'platform': platform_info,
'implementation': implementation_info,
'system_ssl': {
'version': '%x' % ssl.OPENSSL_VERSION_NUMBER,
},
'using_pyopenssl': pyopenssl is not None,
'pyOpenSSL': pyopenssl_info,
'cryptography': cryptography_info,
'requests': {
'version': requests_version,
},
}
def print_information():
"""Pretty-print the bug information as JSON."""
print(json.dumps(information(), sort_keys=True, indent=2))
|
Add debugging submodule for bug reporters
|
Add debugging submodule for bug reporters
The suggested usage in a bug report would be
python -c 'from requests import _bug; _bug.print_information()'
This should generate most of the information we tend to ask for
repeatedly from bug reporters.
|
Python
|
apache-2.0
|
psf/requests
|
Add debugging submodule for bug reporters
The suggested usage in a bug report would be
python -c 'from requests import _bug; _bug.print_information()'
This should generate most of the information we tend to ask for
repeatedly from bug reporters.
|
"""Module containing bug report helper(s)."""
from __future__ import print_function
import json
import platform
import sys
import ssl
from . import __version__ as requests_version
try:
from .packages.urllib3.contrib import pyopenssl
except ImportError:
pyopenssl = None
OpenSSL = None
cryptography = None
else:
import OpenSSL
import cryptography
def _implementation():
"""Return a dict with the Python implementation and verison.
Provide both the name and the version of the Python implementation
currently running. For example, on CPython 2.7.5 it will return
{'name': 'CPython', 'version': '2.7.5'}.
This function works best on CPython and PyPy: in particular, it probably
doesn't work for Jython or IronPython. Future investigation should be done
to work out the correct shape of the code for those platforms.
"""
implementation = platform.python_implementation()
if implementation == 'CPython':
implementation_version = platform.python_version()
elif implementation == 'PyPy':
implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
implementation_version = ''.join([
implementation_version, sys.pypy_version_info.releaselevel
])
elif implementation == 'Jython':
implementation_version = platform.python_version() # Complete Guess
elif implementation == 'IronPython':
implementation_version = platform.python_version() # Complete Guess
else:
implementation_version = 'Unknown'
return {'name': implementation, 'version': implementation_version}
def information():
"""Generate information for a bug report."""
try:
platform_info = {
'system': platform.system(),
'release': platform.release(),
}
except IOError:
platform_info = {
'system': 'Unknown',
'release': 'Unknown',
}
implementation_info = _implementation()
pyopenssl_info = {
'version': None,
'openssl_version': '',
}
if OpenSSL:
pyopenssl_info = {
'version': OpenSSL.__version__,
'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER,
}
cryptography_info = {
'version': getattr(cryptography, '__version__', ''),
}
return {
'platform': platform_info,
'implementation': implementation_info,
'system_ssl': {
'version': '%x' % ssl.OPENSSL_VERSION_NUMBER,
},
'using_pyopenssl': pyopenssl is not None,
'pyOpenSSL': pyopenssl_info,
'cryptography': cryptography_info,
'requests': {
'version': requests_version,
},
}
def print_information():
"""Pretty-print the bug information as JSON."""
print(json.dumps(information(), sort_keys=True, indent=2))
|
<commit_before><commit_msg>Add debugging submodule for bug reporters
The suggested usage in a bug report would be
python -c 'from requests import _bug; _bug.print_information()'
This should generate most of the information we tend to ask for
repeatedly from bug reporters.<commit_after>
|
"""Module containing bug report helper(s)."""
from __future__ import print_function
import json
import platform
import sys
import ssl
from . import __version__ as requests_version
try:
from .packages.urllib3.contrib import pyopenssl
except ImportError:
pyopenssl = None
OpenSSL = None
cryptography = None
else:
import OpenSSL
import cryptography
def _implementation():
"""Return a dict with the Python implementation and verison.
Provide both the name and the version of the Python implementation
currently running. For example, on CPython 2.7.5 it will return
{'name': 'CPython', 'version': '2.7.5'}.
This function works best on CPython and PyPy: in particular, it probably
doesn't work for Jython or IronPython. Future investigation should be done
to work out the correct shape of the code for those platforms.
"""
implementation = platform.python_implementation()
if implementation == 'CPython':
implementation_version = platform.python_version()
elif implementation == 'PyPy':
implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
implementation_version = ''.join([
implementation_version, sys.pypy_version_info.releaselevel
])
elif implementation == 'Jython':
implementation_version = platform.python_version() # Complete Guess
elif implementation == 'IronPython':
implementation_version = platform.python_version() # Complete Guess
else:
implementation_version = 'Unknown'
return {'name': implementation, 'version': implementation_version}
def information():
"""Generate information for a bug report."""
try:
platform_info = {
'system': platform.system(),
'release': platform.release(),
}
except IOError:
platform_info = {
'system': 'Unknown',
'release': 'Unknown',
}
implementation_info = _implementation()
pyopenssl_info = {
'version': None,
'openssl_version': '',
}
if OpenSSL:
pyopenssl_info = {
'version': OpenSSL.__version__,
'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER,
}
cryptography_info = {
'version': getattr(cryptography, '__version__', ''),
}
return {
'platform': platform_info,
'implementation': implementation_info,
'system_ssl': {
'version': '%x' % ssl.OPENSSL_VERSION_NUMBER,
},
'using_pyopenssl': pyopenssl is not None,
'pyOpenSSL': pyopenssl_info,
'cryptography': cryptography_info,
'requests': {
'version': requests_version,
},
}
def print_information():
"""Pretty-print the bug information as JSON."""
print(json.dumps(information(), sort_keys=True, indent=2))
|
Add debugging submodule for bug reporters
The suggested usage in a bug report would be
python -c 'from requests import _bug; _bug.print_information()'
This should generate most of the information we tend to ask for
repeatedly from bug reporters."""Module containing bug report helper(s)."""
from __future__ import print_function
import json
import platform
import sys
import ssl
from . import __version__ as requests_version
try:
from .packages.urllib3.contrib import pyopenssl
except ImportError:
pyopenssl = None
OpenSSL = None
cryptography = None
else:
import OpenSSL
import cryptography
def _implementation():
"""Return a dict with the Python implementation and verison.
Provide both the name and the version of the Python implementation
currently running. For example, on CPython 2.7.5 it will return
{'name': 'CPython', 'version': '2.7.5'}.
This function works best on CPython and PyPy: in particular, it probably
doesn't work for Jython or IronPython. Future investigation should be done
to work out the correct shape of the code for those platforms.
"""
implementation = platform.python_implementation()
if implementation == 'CPython':
implementation_version = platform.python_version()
elif implementation == 'PyPy':
implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
implementation_version = ''.join([
implementation_version, sys.pypy_version_info.releaselevel
])
elif implementation == 'Jython':
implementation_version = platform.python_version() # Complete Guess
elif implementation == 'IronPython':
implementation_version = platform.python_version() # Complete Guess
else:
implementation_version = 'Unknown'
return {'name': implementation, 'version': implementation_version}
def information():
"""Generate information for a bug report."""
try:
platform_info = {
'system': platform.system(),
'release': platform.release(),
}
except IOError:
platform_info = {
'system': 'Unknown',
'release': 'Unknown',
}
implementation_info = _implementation()
pyopenssl_info = {
'version': None,
'openssl_version': '',
}
if OpenSSL:
pyopenssl_info = {
'version': OpenSSL.__version__,
'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER,
}
cryptography_info = {
'version': getattr(cryptography, '__version__', ''),
}
return {
'platform': platform_info,
'implementation': implementation_info,
'system_ssl': {
'version': '%x' % ssl.OPENSSL_VERSION_NUMBER,
},
'using_pyopenssl': pyopenssl is not None,
'pyOpenSSL': pyopenssl_info,
'cryptography': cryptography_info,
'requests': {
'version': requests_version,
},
}
def print_information():
"""Pretty-print the bug information as JSON."""
print(json.dumps(information(), sort_keys=True, indent=2))
|
<commit_before><commit_msg>Add debugging submodule for bug reporters
The suggested usage in a bug report would be
python -c 'from requests import _bug; _bug.print_information()'
This should generate most of the information we tend to ask for
repeatedly from bug reporters.<commit_after>"""Module containing bug report helper(s)."""
from __future__ import print_function
import json
import platform
import sys
import ssl
from . import __version__ as requests_version
try:
from .packages.urllib3.contrib import pyopenssl
except ImportError:
pyopenssl = None
OpenSSL = None
cryptography = None
else:
import OpenSSL
import cryptography
def _implementation():
"""Return a dict with the Python implementation and verison.
Provide both the name and the version of the Python implementation
currently running. For example, on CPython 2.7.5 it will return
{'name': 'CPython', 'version': '2.7.5'}.
This function works best on CPython and PyPy: in particular, it probably
doesn't work for Jython or IronPython. Future investigation should be done
to work out the correct shape of the code for those platforms.
"""
implementation = platform.python_implementation()
if implementation == 'CPython':
implementation_version = platform.python_version()
elif implementation == 'PyPy':
implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
implementation_version = ''.join([
implementation_version, sys.pypy_version_info.releaselevel
])
elif implementation == 'Jython':
implementation_version = platform.python_version() # Complete Guess
elif implementation == 'IronPython':
implementation_version = platform.python_version() # Complete Guess
else:
implementation_version = 'Unknown'
return {'name': implementation, 'version': implementation_version}
def information():
"""Generate information for a bug report."""
try:
platform_info = {
'system': platform.system(),
'release': platform.release(),
}
except IOError:
platform_info = {
'system': 'Unknown',
'release': 'Unknown',
}
implementation_info = _implementation()
pyopenssl_info = {
'version': None,
'openssl_version': '',
}
if OpenSSL:
pyopenssl_info = {
'version': OpenSSL.__version__,
'openssl_version': '%x' % OpenSSL.SSL.OPENSSL_VERSION_NUMBER,
}
cryptography_info = {
'version': getattr(cryptography, '__version__', ''),
}
return {
'platform': platform_info,
'implementation': implementation_info,
'system_ssl': {
'version': '%x' % ssl.OPENSSL_VERSION_NUMBER,
},
'using_pyopenssl': pyopenssl is not None,
'pyOpenSSL': pyopenssl_info,
'cryptography': cryptography_info,
'requests': {
'version': requests_version,
},
}
def print_information():
"""Pretty-print the bug information as JSON."""
print(json.dumps(information(), sort_keys=True, indent=2))
|
|
7989d123453e7d0f2898f2ee4c229f9295ef17cb
|
src/ggrc_basic_permissions/migrations/versions/20170311142655_3ab8b37b04_clear_user_roles_with_invalid_context.py
|
src/ggrc_basic_permissions/migrations/versions/20170311142655_3ab8b37b04_clear_user_roles_with_invalid_context.py
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Clear user roles with invalid context
Create Date: 2017-03-11 14:26:55.133169
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '3ab8b37b04'
down_revision = '53831d153d8e'
def upgrade():
"""Delete audit user roles from bad audits.
This will remove all auditors from audits that had the program context
instead of their own context. In that case there is no way of knowing t
which audit the any given auditor belonged to in the first place, so from
security standpoint it is safer to remove those roles and manually add them
back if needed.
"""
sql = """
DELETE user_roles
FROM user_roles
JOIN roles as r on user_roles.role_id = r.id
JOIN contexts as c on user_roles.context_id = c.id
WHERE
r.name = "Auditor" AND
c.related_object_type = "Program"
"""
op.execute(sql)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
|
Remove invalid user role entries
|
Remove invalid user role entries
|
Python
|
apache-2.0
|
plamut/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core
|
Remove invalid user role entries
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Clear user roles with invalid context
Create Date: 2017-03-11 14:26:55.133169
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '3ab8b37b04'
down_revision = '53831d153d8e'
def upgrade():
"""Delete audit user roles from bad audits.
This will remove all auditors from audits that had the program context
instead of their own context. In that case there is no way of knowing t
which audit the any given auditor belonged to in the first place, so from
security standpoint it is safer to remove those roles and manually add them
back if needed.
"""
sql = """
DELETE user_roles
FROM user_roles
JOIN roles as r on user_roles.role_id = r.id
JOIN contexts as c on user_roles.context_id = c.id
WHERE
r.name = "Auditor" AND
c.related_object_type = "Program"
"""
op.execute(sql)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
|
<commit_before><commit_msg>Remove invalid user role entries<commit_after>
|
# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Clear user roles with invalid context
Create Date: 2017-03-11 14:26:55.133169
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '3ab8b37b04'
down_revision = '53831d153d8e'
def upgrade():
"""Delete audit user roles from bad audits.
This will remove all auditors from audits that had the program context
instead of their own context. In that case there is no way of knowing t
which audit the any given auditor belonged to in the first place, so from
security standpoint it is safer to remove those roles and manually add them
back if needed.
"""
sql = """
DELETE user_roles
FROM user_roles
JOIN roles as r on user_roles.role_id = r.id
JOIN contexts as c on user_roles.context_id = c.id
WHERE
r.name = "Auditor" AND
c.related_object_type = "Program"
"""
op.execute(sql)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
|
Remove invalid user role entries# Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Clear user roles with invalid context
Create Date: 2017-03-11 14:26:55.133169
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '3ab8b37b04'
down_revision = '53831d153d8e'
def upgrade():
"""Delete audit user roles from bad audits.
This will remove all auditors from audits that had the program context
instead of their own context. In that case there is no way of knowing t
which audit the any given auditor belonged to in the first place, so from
security standpoint it is safer to remove those roles and manually add them
back if needed.
"""
sql = """
DELETE user_roles
FROM user_roles
JOIN roles as r on user_roles.role_id = r.id
JOIN contexts as c on user_roles.context_id = c.id
WHERE
r.name = "Auditor" AND
c.related_object_type = "Program"
"""
op.execute(sql)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
|
<commit_before><commit_msg>Remove invalid user role entries<commit_after># Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Clear user roles with invalid context
Create Date: 2017-03-11 14:26:55.133169
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from alembic import op
# revision identifiers, used by Alembic.
revision = '3ab8b37b04'
down_revision = '53831d153d8e'
def upgrade():
"""Delete audit user roles from bad audits.
This will remove all auditors from audits that had the program context
instead of their own context. In that case there is no way of knowing t
which audit the any given auditor belonged to in the first place, so from
security standpoint it is safer to remove those roles and manually add them
back if needed.
"""
sql = """
DELETE user_roles
FROM user_roles
JOIN roles as r on user_roles.role_id = r.id
JOIN contexts as c on user_roles.context_id = c.id
WHERE
r.name = "Auditor" AND
c.related_object_type = "Program"
"""
op.execute(sql)
def downgrade():
"""Downgrade database schema and/or data back to the previous revision."""
|
|
3f2a9a7a7438b72459fb2f0fbf0fb960c685cbe2
|
plumeria/plugins/myanimelist.py
|
plumeria/plugins/myanimelist.py
|
import aiohttp
from bs4 import BeautifulSoup
from plumeria import config
from plumeria.command import commands, CommandError
from plumeria.util import http
from plumeria.util.message import strip_html
from plumeria.util.ratelimit import rate_limit
username = config.create("myanimelist", "username", fallback="",
comment="Account username for API requests on myanimelist.net")
password = config.create("myanimelist", "password", fallback="",
comment="Account password for API requests on myanimelist.net")
@commands.register("anime", category="Search")
@rate_limit()
async def random_user(message):
"""
Gets information about an anime using myanimelist.net.
Example::
/anime code geass
"""
query = message.content.strip()
if not len(query):
raise CommandError("Supply the name of an anime to search.")
auth = aiohttp.BasicAuth(username(), password())
r = await http.get("https://myanimelist.net/api/anime/search.xml", params=[
('q', query)
], auth=auth)
doc = BeautifulSoup(r.text(), features="lxml")
entries = doc.anime.find_all("entry", recursive=False)
if not len(entries):
raise CommandError("No results found.")
entry = entries[0]
return "{image}\n\n" \
"**{name}** ({type})\n\n" \
"**Score:** {score}\n" \
"**Episodes:** {ep_count}\n" \
"**Air Dates:** {start}-{end}\n\n" \
"{synopsis}\n".format(
image=entry.image.text,
type=entry.type.text,
name=entry.title.text,
score=entry.score.text,
ep_count=entry.episodes.text,
start=entry.start_date.text,
end=entry.end_date.text,
synopsis=strip_html(entry.synopsis.text),
)
|
Add MyAnimeList plugin for searching animes.
|
Add MyAnimeList plugin for searching animes.
|
Python
|
mit
|
sk89q/Plumeria,sk89q/Plumeria,sk89q/Plumeria
|
Add MyAnimeList plugin for searching animes.
|
import aiohttp
from bs4 import BeautifulSoup
from plumeria import config
from plumeria.command import commands, CommandError
from plumeria.util import http
from plumeria.util.message import strip_html
from plumeria.util.ratelimit import rate_limit
username = config.create("myanimelist", "username", fallback="",
comment="Account username for API requests on myanimelist.net")
password = config.create("myanimelist", "password", fallback="",
comment="Account password for API requests on myanimelist.net")
@commands.register("anime", category="Search")
@rate_limit()
async def random_user(message):
"""
Gets information about an anime using myanimelist.net.
Example::
/anime code geass
"""
query = message.content.strip()
if not len(query):
raise CommandError("Supply the name of an anime to search.")
auth = aiohttp.BasicAuth(username(), password())
r = await http.get("https://myanimelist.net/api/anime/search.xml", params=[
('q', query)
], auth=auth)
doc = BeautifulSoup(r.text(), features="lxml")
entries = doc.anime.find_all("entry", recursive=False)
if not len(entries):
raise CommandError("No results found.")
entry = entries[0]
return "{image}\n\n" \
"**{name}** ({type})\n\n" \
"**Score:** {score}\n" \
"**Episodes:** {ep_count}\n" \
"**Air Dates:** {start}-{end}\n\n" \
"{synopsis}\n".format(
image=entry.image.text,
type=entry.type.text,
name=entry.title.text,
score=entry.score.text,
ep_count=entry.episodes.text,
start=entry.start_date.text,
end=entry.end_date.text,
synopsis=strip_html(entry.synopsis.text),
)
|
<commit_before><commit_msg>Add MyAnimeList plugin for searching animes.<commit_after>
|
import aiohttp
from bs4 import BeautifulSoup
from plumeria import config
from plumeria.command import commands, CommandError
from plumeria.util import http
from plumeria.util.message import strip_html
from plumeria.util.ratelimit import rate_limit
username = config.create("myanimelist", "username", fallback="",
comment="Account username for API requests on myanimelist.net")
password = config.create("myanimelist", "password", fallback="",
comment="Account password for API requests on myanimelist.net")
@commands.register("anime", category="Search")
@rate_limit()
async def random_user(message):
"""
Gets information about an anime using myanimelist.net.
Example::
/anime code geass
"""
query = message.content.strip()
if not len(query):
raise CommandError("Supply the name of an anime to search.")
auth = aiohttp.BasicAuth(username(), password())
r = await http.get("https://myanimelist.net/api/anime/search.xml", params=[
('q', query)
], auth=auth)
doc = BeautifulSoup(r.text(), features="lxml")
entries = doc.anime.find_all("entry", recursive=False)
if not len(entries):
raise CommandError("No results found.")
entry = entries[0]
return "{image}\n\n" \
"**{name}** ({type})\n\n" \
"**Score:** {score}\n" \
"**Episodes:** {ep_count}\n" \
"**Air Dates:** {start}-{end}\n\n" \
"{synopsis}\n".format(
image=entry.image.text,
type=entry.type.text,
name=entry.title.text,
score=entry.score.text,
ep_count=entry.episodes.text,
start=entry.start_date.text,
end=entry.end_date.text,
synopsis=strip_html(entry.synopsis.text),
)
|
Add MyAnimeList plugin for searching animes.import aiohttp
from bs4 import BeautifulSoup
from plumeria import config
from plumeria.command import commands, CommandError
from plumeria.util import http
from plumeria.util.message import strip_html
from plumeria.util.ratelimit import rate_limit
username = config.create("myanimelist", "username", fallback="",
comment="Account username for API requests on myanimelist.net")
password = config.create("myanimelist", "password", fallback="",
comment="Account password for API requests on myanimelist.net")
@commands.register("anime", category="Search")
@rate_limit()
async def random_user(message):
"""
Gets information about an anime using myanimelist.net.
Example::
/anime code geass
"""
query = message.content.strip()
if not len(query):
raise CommandError("Supply the name of an anime to search.")
auth = aiohttp.BasicAuth(username(), password())
r = await http.get("https://myanimelist.net/api/anime/search.xml", params=[
('q', query)
], auth=auth)
doc = BeautifulSoup(r.text(), features="lxml")
entries = doc.anime.find_all("entry", recursive=False)
if not len(entries):
raise CommandError("No results found.")
entry = entries[0]
return "{image}\n\n" \
"**{name}** ({type})\n\n" \
"**Score:** {score}\n" \
"**Episodes:** {ep_count}\n" \
"**Air Dates:** {start}-{end}\n\n" \
"{synopsis}\n".format(
image=entry.image.text,
type=entry.type.text,
name=entry.title.text,
score=entry.score.text,
ep_count=entry.episodes.text,
start=entry.start_date.text,
end=entry.end_date.text,
synopsis=strip_html(entry.synopsis.text),
)
|
<commit_before><commit_msg>Add MyAnimeList plugin for searching animes.<commit_after>import aiohttp
from bs4 import BeautifulSoup
from plumeria import config
from plumeria.command import commands, CommandError
from plumeria.util import http
from plumeria.util.message import strip_html
from plumeria.util.ratelimit import rate_limit
username = config.create("myanimelist", "username", fallback="",
comment="Account username for API requests on myanimelist.net")
password = config.create("myanimelist", "password", fallback="",
comment="Account password for API requests on myanimelist.net")
@commands.register("anime", category="Search")
@rate_limit()
async def random_user(message):
"""
Gets information about an anime using myanimelist.net.
Example::
/anime code geass
"""
query = message.content.strip()
if not len(query):
raise CommandError("Supply the name of an anime to search.")
auth = aiohttp.BasicAuth(username(), password())
r = await http.get("https://myanimelist.net/api/anime/search.xml", params=[
('q', query)
], auth=auth)
doc = BeautifulSoup(r.text(), features="lxml")
entries = doc.anime.find_all("entry", recursive=False)
if not len(entries):
raise CommandError("No results found.")
entry = entries[0]
return "{image}\n\n" \
"**{name}** ({type})\n\n" \
"**Score:** {score}\n" \
"**Episodes:** {ep_count}\n" \
"**Air Dates:** {start}-{end}\n\n" \
"{synopsis}\n".format(
image=entry.image.text,
type=entry.type.text,
name=entry.title.text,
score=entry.score.text,
ep_count=entry.episodes.text,
start=entry.start_date.text,
end=entry.end_date.text,
synopsis=strip_html(entry.synopsis.text),
)
|
|
8298b23f48028861985d7431fb8514ba5a4bfff6
|
17B-162/spw_setup.py
|
17B-162/spw_setup.py
|
# Line SPW setup for 17B-162 w/ rest frequencies
linespw_dict = {0: ["HI", "1.420405752GHz"],
1: ["H166alp", "1.42473GHz"],
2: ["H164alp", "1.47734GHz"],
3: ["OH1612", "1.612231GHz"],
4: ["H158alp", "1.65154GHz"],
5: ["OH1665", "1.6654018GHz"],
6: ["OH1667", "1.667359GHz"],
7: ["OH1720", "1.72053GHz"],
8: ["H153alp", "1.81825GHz"],
9: ["H152alp", "1.85425GHz"]}
|
Add dict of rest freqs for 17B
|
Add dict of rest freqs for 17B
|
Python
|
mit
|
e-koch/VLA_Lband,e-koch/VLA_Lband
|
Add dict of rest freqs for 17B
|
# Line SPW setup for 17B-162 w/ rest frequencies
linespw_dict = {0: ["HI", "1.420405752GHz"],
1: ["H166alp", "1.42473GHz"],
2: ["H164alp", "1.47734GHz"],
3: ["OH1612", "1.612231GHz"],
4: ["H158alp", "1.65154GHz"],
5: ["OH1665", "1.6654018GHz"],
6: ["OH1667", "1.667359GHz"],
7: ["OH1720", "1.72053GHz"],
8: ["H153alp", "1.81825GHz"],
9: ["H152alp", "1.85425GHz"]}
|
<commit_before><commit_msg>Add dict of rest freqs for 17B<commit_after>
|
# Line SPW setup for 17B-162 w/ rest frequencies
linespw_dict = {0: ["HI", "1.420405752GHz"],
1: ["H166alp", "1.42473GHz"],
2: ["H164alp", "1.47734GHz"],
3: ["OH1612", "1.612231GHz"],
4: ["H158alp", "1.65154GHz"],
5: ["OH1665", "1.6654018GHz"],
6: ["OH1667", "1.667359GHz"],
7: ["OH1720", "1.72053GHz"],
8: ["H153alp", "1.81825GHz"],
9: ["H152alp", "1.85425GHz"]}
|
Add dict of rest freqs for 17B
# Line SPW setup for 17B-162 w/ rest frequencies
linespw_dict = {0: ["HI", "1.420405752GHz"],
1: ["H166alp", "1.42473GHz"],
2: ["H164alp", "1.47734GHz"],
3: ["OH1612", "1.612231GHz"],
4: ["H158alp", "1.65154GHz"],
5: ["OH1665", "1.6654018GHz"],
6: ["OH1667", "1.667359GHz"],
7: ["OH1720", "1.72053GHz"],
8: ["H153alp", "1.81825GHz"],
9: ["H152alp", "1.85425GHz"]}
|
<commit_before><commit_msg>Add dict of rest freqs for 17B<commit_after>
# Line SPW setup for 17B-162 w/ rest frequencies
linespw_dict = {0: ["HI", "1.420405752GHz"],
1: ["H166alp", "1.42473GHz"],
2: ["H164alp", "1.47734GHz"],
3: ["OH1612", "1.612231GHz"],
4: ["H158alp", "1.65154GHz"],
5: ["OH1665", "1.6654018GHz"],
6: ["OH1667", "1.667359GHz"],
7: ["OH1720", "1.72053GHz"],
8: ["H153alp", "1.81825GHz"],
9: ["H152alp", "1.85425GHz"]}
|
|
5eaa607ce4e0b04dfd6ee050d964119064ad68f1
|
scripts/create_dataset_toml.py
|
scripts/create_dataset_toml.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import glob
import os
import re
import argparse
import h5py
import numpy as np
import pytoml as toml
from diluvian.util import get_nonzero_aabb
def create_dataset_conf_from_files(path, file_pattern, name_regex, name_format, mask_bounds=True):
pathspec = path + file_pattern
name_regex = re.compile(name_regex)
datasets = []
for pathname in glob.iglob(pathspec):
filename = os.path.basename(pathname)
name = name_format.format(*name_regex.match(filename).groups())
ds = {
'name': name,
'hdf5_file': pathname,
'image_dataset': 'volumes/raw',
'label_dataset': 'volumes/labels/neuron_ids',
'mask_dataset': 'volumes/labels/mask',
'resolution': [40, 4, 4],
}
if mask_bounds:
print('Finding mask bounds for {}'.format(filename))
f = h5py.File(pathname, 'r')
d = f[ds['mask_dataset']]
mask_data = d[:]
mask_min, mask_max = get_nonzero_aabb(mask_data)
ds['mask_bounds'] = [mask_min, mask_max]
f.close()
datasets.append(ds)
return {'dataset': datasets}
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create a dataset TOML from a directory of HDF5 files.')
parser.add_argument(
'--file-pattern', dest='file_pattern', default='sample_[ABC]*hdf',
help='Glob for HDF5 volume filenames.')
parser.add_argument(
'--name-regex', dest='name_regex', default=r'sample_([ABC])(.*).hdf',
help='Regex for extracting volume name from filenames.')
parser.add_argument(
'--name-format', dest='name_format', default='Sample {} ({})',
help='Format string for creating volume names from name regex matches.')
parser.add_argument(
'path', default=None,
help='Path to the HDF5 volume files.')
parser.add_argument(
'dataset_file', default=None,
help='Name for the TOML dataset file that will be created.')
args = parser.parse_args()
conf = create_dataset_conf_from_files(args.path, args.file_pattern, args.name_regex, args.name_format)
print('Found {} datasets.'.format(len(conf['dataset'])))
with open(args.dataset_file, 'wb') as tomlfile:
tomlfile.write(toml.dumps(conf))
|
Add old script for creating dataset TOML from HDF5
|
Add old script for creating dataset TOML from HDF5
|
Python
|
mit
|
aschampion/diluvian
|
Add old script for creating dataset TOML from HDF5
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import glob
import os
import re
import argparse
import h5py
import numpy as np
import pytoml as toml
from diluvian.util import get_nonzero_aabb
def create_dataset_conf_from_files(path, file_pattern, name_regex, name_format, mask_bounds=True):
pathspec = path + file_pattern
name_regex = re.compile(name_regex)
datasets = []
for pathname in glob.iglob(pathspec):
filename = os.path.basename(pathname)
name = name_format.format(*name_regex.match(filename).groups())
ds = {
'name': name,
'hdf5_file': pathname,
'image_dataset': 'volumes/raw',
'label_dataset': 'volumes/labels/neuron_ids',
'mask_dataset': 'volumes/labels/mask',
'resolution': [40, 4, 4],
}
if mask_bounds:
print('Finding mask bounds for {}'.format(filename))
f = h5py.File(pathname, 'r')
d = f[ds['mask_dataset']]
mask_data = d[:]
mask_min, mask_max = get_nonzero_aabb(mask_data)
ds['mask_bounds'] = [mask_min, mask_max]
f.close()
datasets.append(ds)
return {'dataset': datasets}
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create a dataset TOML from a directory of HDF5 files.')
parser.add_argument(
'--file-pattern', dest='file_pattern', default='sample_[ABC]*hdf',
help='Glob for HDF5 volume filenames.')
parser.add_argument(
'--name-regex', dest='name_regex', default=r'sample_([ABC])(.*).hdf',
help='Regex for extracting volume name from filenames.')
parser.add_argument(
'--name-format', dest='name_format', default='Sample {} ({})',
help='Format string for creating volume names from name regex matches.')
parser.add_argument(
'path', default=None,
help='Path to the HDF5 volume files.')
parser.add_argument(
'dataset_file', default=None,
help='Name for the TOML dataset file that will be created.')
args = parser.parse_args()
conf = create_dataset_conf_from_files(args.path, args.file_pattern, args.name_regex, args.name_format)
print('Found {} datasets.'.format(len(conf['dataset'])))
with open(args.dataset_file, 'wb') as tomlfile:
tomlfile.write(toml.dumps(conf))
|
<commit_before><commit_msg>Add old script for creating dataset TOML from HDF5<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import glob
import os
import re
import argparse
import h5py
import numpy as np
import pytoml as toml
from diluvian.util import get_nonzero_aabb
def create_dataset_conf_from_files(path, file_pattern, name_regex, name_format, mask_bounds=True):
pathspec = path + file_pattern
name_regex = re.compile(name_regex)
datasets = []
for pathname in glob.iglob(pathspec):
filename = os.path.basename(pathname)
name = name_format.format(*name_regex.match(filename).groups())
ds = {
'name': name,
'hdf5_file': pathname,
'image_dataset': 'volumes/raw',
'label_dataset': 'volumes/labels/neuron_ids',
'mask_dataset': 'volumes/labels/mask',
'resolution': [40, 4, 4],
}
if mask_bounds:
print('Finding mask bounds for {}'.format(filename))
f = h5py.File(pathname, 'r')
d = f[ds['mask_dataset']]
mask_data = d[:]
mask_min, mask_max = get_nonzero_aabb(mask_data)
ds['mask_bounds'] = [mask_min, mask_max]
f.close()
datasets.append(ds)
return {'dataset': datasets}
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create a dataset TOML from a directory of HDF5 files.')
parser.add_argument(
'--file-pattern', dest='file_pattern', default='sample_[ABC]*hdf',
help='Glob for HDF5 volume filenames.')
parser.add_argument(
'--name-regex', dest='name_regex', default=r'sample_([ABC])(.*).hdf',
help='Regex for extracting volume name from filenames.')
parser.add_argument(
'--name-format', dest='name_format', default='Sample {} ({})',
help='Format string for creating volume names from name regex matches.')
parser.add_argument(
'path', default=None,
help='Path to the HDF5 volume files.')
parser.add_argument(
'dataset_file', default=None,
help='Name for the TOML dataset file that will be created.')
args = parser.parse_args()
conf = create_dataset_conf_from_files(args.path, args.file_pattern, args.name_regex, args.name_format)
print('Found {} datasets.'.format(len(conf['dataset'])))
with open(args.dataset_file, 'wb') as tomlfile:
tomlfile.write(toml.dumps(conf))
|
Add old script for creating dataset TOML from HDF5#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import glob
import os
import re
import argparse
import h5py
import numpy as np
import pytoml as toml
from diluvian.util import get_nonzero_aabb
def create_dataset_conf_from_files(path, file_pattern, name_regex, name_format, mask_bounds=True):
pathspec = path + file_pattern
name_regex = re.compile(name_regex)
datasets = []
for pathname in glob.iglob(pathspec):
filename = os.path.basename(pathname)
name = name_format.format(*name_regex.match(filename).groups())
ds = {
'name': name,
'hdf5_file': pathname,
'image_dataset': 'volumes/raw',
'label_dataset': 'volumes/labels/neuron_ids',
'mask_dataset': 'volumes/labels/mask',
'resolution': [40, 4, 4],
}
if mask_bounds:
print('Finding mask bounds for {}'.format(filename))
f = h5py.File(pathname, 'r')
d = f[ds['mask_dataset']]
mask_data = d[:]
mask_min, mask_max = get_nonzero_aabb(mask_data)
ds['mask_bounds'] = [mask_min, mask_max]
f.close()
datasets.append(ds)
return {'dataset': datasets}
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create a dataset TOML from a directory of HDF5 files.')
parser.add_argument(
'--file-pattern', dest='file_pattern', default='sample_[ABC]*hdf',
help='Glob for HDF5 volume filenames.')
parser.add_argument(
'--name-regex', dest='name_regex', default=r'sample_([ABC])(.*).hdf',
help='Regex for extracting volume name from filenames.')
parser.add_argument(
'--name-format', dest='name_format', default='Sample {} ({})',
help='Format string for creating volume names from name regex matches.')
parser.add_argument(
'path', default=None,
help='Path to the HDF5 volume files.')
parser.add_argument(
'dataset_file', default=None,
help='Name for the TOML dataset file that will be created.')
args = parser.parse_args()
conf = create_dataset_conf_from_files(args.path, args.file_pattern, args.name_regex, args.name_format)
print('Found {} datasets.'.format(len(conf['dataset'])))
with open(args.dataset_file, 'wb') as tomlfile:
tomlfile.write(toml.dumps(conf))
|
<commit_before><commit_msg>Add old script for creating dataset TOML from HDF5<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import glob
import os
import re
import argparse
import h5py
import numpy as np
import pytoml as toml
from diluvian.util import get_nonzero_aabb
def create_dataset_conf_from_files(path, file_pattern, name_regex, name_format, mask_bounds=True):
pathspec = path + file_pattern
name_regex = re.compile(name_regex)
datasets = []
for pathname in glob.iglob(pathspec):
filename = os.path.basename(pathname)
name = name_format.format(*name_regex.match(filename).groups())
ds = {
'name': name,
'hdf5_file': pathname,
'image_dataset': 'volumes/raw',
'label_dataset': 'volumes/labels/neuron_ids',
'mask_dataset': 'volumes/labels/mask',
'resolution': [40, 4, 4],
}
if mask_bounds:
print('Finding mask bounds for {}'.format(filename))
f = h5py.File(pathname, 'r')
d = f[ds['mask_dataset']]
mask_data = d[:]
mask_min, mask_max = get_nonzero_aabb(mask_data)
ds['mask_bounds'] = [mask_min, mask_max]
f.close()
datasets.append(ds)
return {'dataset': datasets}
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Create a dataset TOML from a directory of HDF5 files.')
parser.add_argument(
'--file-pattern', dest='file_pattern', default='sample_[ABC]*hdf',
help='Glob for HDF5 volume filenames.')
parser.add_argument(
'--name-regex', dest='name_regex', default=r'sample_([ABC])(.*).hdf',
help='Regex for extracting volume name from filenames.')
parser.add_argument(
'--name-format', dest='name_format', default='Sample {} ({})',
help='Format string for creating volume names from name regex matches.')
parser.add_argument(
'path', default=None,
help='Path to the HDF5 volume files.')
parser.add_argument(
'dataset_file', default=None,
help='Name for the TOML dataset file that will be created.')
args = parser.parse_args()
conf = create_dataset_conf_from_files(args.path, args.file_pattern, args.name_regex, args.name_format)
print('Found {} datasets.'.format(len(conf['dataset'])))
with open(args.dataset_file, 'wb') as tomlfile:
tomlfile.write(toml.dumps(conf))
|
|
c51062a8e4e621fc50c3ed393f6d3dad8e19b4c3
|
cs4teachers/tests/events/models/test_location.py
|
cs4teachers/tests/events/models/test_location.py
|
from tests.BaseTest import BaseTest
from events.models import Location
class LocationModelTest(BaseTest):
def test_location(self):
location = self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result,
location
)
def test_location_slug(self):
self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result.slug,
"location-1"
)
def test_location_slug_unique(self):
self.event_data.create_location(1)
self.event_data.create_location(1)
self.event_data.create_location(1)
Location.objects.get(slug="location-1")
Location.objects.get(slug="location-1-2")
Location.objects.get(slug="location-1-3")
def test_location_name(self):
self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result.name,
"Location 1"
)
def test_location_description(self):
self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result.description,
"Description for Location 1"
)
def test_location_absolute_url(self):
self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result.get_absolute_url(),
"/events/location/location-1/"
)
def test_location_str(self):
location = self.event_data.create_location(1)
self.assertEqual(location.__str__(), "Location 1")
|
Add tests for events Location model
|
Add tests for events Location model
|
Python
|
mit
|
uccser/cs4teachers,uccser/cs4teachers,uccser/cs4teachers,uccser/cs4teachers
|
Add tests for events Location model
|
from tests.BaseTest import BaseTest
from events.models import Location
class LocationModelTest(BaseTest):
def test_location(self):
location = self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result,
location
)
def test_location_slug(self):
self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result.slug,
"location-1"
)
def test_location_slug_unique(self):
self.event_data.create_location(1)
self.event_data.create_location(1)
self.event_data.create_location(1)
Location.objects.get(slug="location-1")
Location.objects.get(slug="location-1-2")
Location.objects.get(slug="location-1-3")
def test_location_name(self):
self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result.name,
"Location 1"
)
def test_location_description(self):
self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result.description,
"Description for Location 1"
)
def test_location_absolute_url(self):
self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result.get_absolute_url(),
"/events/location/location-1/"
)
def test_location_str(self):
location = self.event_data.create_location(1)
self.assertEqual(location.__str__(), "Location 1")
|
<commit_before><commit_msg>Add tests for events Location model<commit_after>
|
from tests.BaseTest import BaseTest
from events.models import Location
class LocationModelTest(BaseTest):
def test_location(self):
location = self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result,
location
)
def test_location_slug(self):
self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result.slug,
"location-1"
)
def test_location_slug_unique(self):
self.event_data.create_location(1)
self.event_data.create_location(1)
self.event_data.create_location(1)
Location.objects.get(slug="location-1")
Location.objects.get(slug="location-1-2")
Location.objects.get(slug="location-1-3")
def test_location_name(self):
self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result.name,
"Location 1"
)
def test_location_description(self):
self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result.description,
"Description for Location 1"
)
def test_location_absolute_url(self):
self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result.get_absolute_url(),
"/events/location/location-1/"
)
def test_location_str(self):
location = self.event_data.create_location(1)
self.assertEqual(location.__str__(), "Location 1")
|
Add tests for events Location modelfrom tests.BaseTest import BaseTest
from events.models import Location
class LocationModelTest(BaseTest):
def test_location(self):
location = self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result,
location
)
def test_location_slug(self):
self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result.slug,
"location-1"
)
def test_location_slug_unique(self):
self.event_data.create_location(1)
self.event_data.create_location(1)
self.event_data.create_location(1)
Location.objects.get(slug="location-1")
Location.objects.get(slug="location-1-2")
Location.objects.get(slug="location-1-3")
def test_location_name(self):
self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result.name,
"Location 1"
)
def test_location_description(self):
self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result.description,
"Description for Location 1"
)
def test_location_absolute_url(self):
self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result.get_absolute_url(),
"/events/location/location-1/"
)
def test_location_str(self):
location = self.event_data.create_location(1)
self.assertEqual(location.__str__(), "Location 1")
|
<commit_before><commit_msg>Add tests for events Location model<commit_after>from tests.BaseTest import BaseTest
from events.models import Location
class LocationModelTest(BaseTest):
def test_location(self):
location = self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result,
location
)
def test_location_slug(self):
self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result.slug,
"location-1"
)
def test_location_slug_unique(self):
self.event_data.create_location(1)
self.event_data.create_location(1)
self.event_data.create_location(1)
Location.objects.get(slug="location-1")
Location.objects.get(slug="location-1-2")
Location.objects.get(slug="location-1-3")
def test_location_name(self):
self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result.name,
"Location 1"
)
def test_location_description(self):
self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result.description,
"Description for Location 1"
)
def test_location_absolute_url(self):
self.event_data.create_location(1)
query_result = Location.objects.get(slug="location-1")
self.assertEqual(
query_result.get_absolute_url(),
"/events/location/location-1/"
)
def test_location_str(self):
location = self.event_data.create_location(1)
self.assertEqual(location.__str__(), "Location 1")
|
|
21fa0a82ce89e5b71bc146591aaa2dac3fa0d04a
|
sv-comp/witness-isomorphism.py
|
sv-comp/witness-isomorphism.py
|
#!/usr/bin/python3
import sys
import networkx as nx
# def witness_node_match(n1, n2):
# return True
witness_node_match = nx.algorithms.isomorphism.categorical_node_match(
["entry", "sink", "violation", "invariant", "invariant.scope"],
[False, False, False, None, None]
)
# def witness_edge_match(e1, e2):
# return True
witness_edge_match = nx.algorithms.isomorphism.categorical_multiedge_match(
["assumption", "assumption.scope", "assumption.resultfunction", "control", "startline", "endline", "startoffset", "endoffset", "enterLoopHead", "enterFunction", "returnFromFunction", "threadId", "createThread"],
[None, None, None, None, None, None, None, None, False, None, None, None, None]
)
expected = nx.read_graphml(sys.argv[1], force_multigraph=True)
actual = nx.read_graphml(sys.argv[2], force_multigraph=True)
isomorphic = nx.is_isomorphic(expected, actual, node_match=witness_node_match, edge_match=witness_edge_match)
print(isomorphic)
|
Add script for checking witness isomorphism
|
Add script for checking witness isomorphism
|
Python
|
mit
|
goblint/analyzer,goblint/analyzer,goblint/analyzer,goblint/analyzer,goblint/analyzer
|
Add script for checking witness isomorphism
|
#!/usr/bin/python3
import sys
import networkx as nx
# def witness_node_match(n1, n2):
# return True
witness_node_match = nx.algorithms.isomorphism.categorical_node_match(
["entry", "sink", "violation", "invariant", "invariant.scope"],
[False, False, False, None, None]
)
# def witness_edge_match(e1, e2):
# return True
witness_edge_match = nx.algorithms.isomorphism.categorical_multiedge_match(
["assumption", "assumption.scope", "assumption.resultfunction", "control", "startline", "endline", "startoffset", "endoffset", "enterLoopHead", "enterFunction", "returnFromFunction", "threadId", "createThread"],
[None, None, None, None, None, None, None, None, False, None, None, None, None]
)
expected = nx.read_graphml(sys.argv[1], force_multigraph=True)
actual = nx.read_graphml(sys.argv[2], force_multigraph=True)
isomorphic = nx.is_isomorphic(expected, actual, node_match=witness_node_match, edge_match=witness_edge_match)
print(isomorphic)
|
<commit_before><commit_msg>Add script for checking witness isomorphism<commit_after>
|
#!/usr/bin/python3
import sys
import networkx as nx
# def witness_node_match(n1, n2):
# return True
witness_node_match = nx.algorithms.isomorphism.categorical_node_match(
["entry", "sink", "violation", "invariant", "invariant.scope"],
[False, False, False, None, None]
)
# def witness_edge_match(e1, e2):
# return True
witness_edge_match = nx.algorithms.isomorphism.categorical_multiedge_match(
["assumption", "assumption.scope", "assumption.resultfunction", "control", "startline", "endline", "startoffset", "endoffset", "enterLoopHead", "enterFunction", "returnFromFunction", "threadId", "createThread"],
[None, None, None, None, None, None, None, None, False, None, None, None, None]
)
expected = nx.read_graphml(sys.argv[1], force_multigraph=True)
actual = nx.read_graphml(sys.argv[2], force_multigraph=True)
isomorphic = nx.is_isomorphic(expected, actual, node_match=witness_node_match, edge_match=witness_edge_match)
print(isomorphic)
|
Add script for checking witness isomorphism#!/usr/bin/python3
import sys
import networkx as nx
# def witness_node_match(n1, n2):
# return True
witness_node_match = nx.algorithms.isomorphism.categorical_node_match(
["entry", "sink", "violation", "invariant", "invariant.scope"],
[False, False, False, None, None]
)
# def witness_edge_match(e1, e2):
# return True
witness_edge_match = nx.algorithms.isomorphism.categorical_multiedge_match(
["assumption", "assumption.scope", "assumption.resultfunction", "control", "startline", "endline", "startoffset", "endoffset", "enterLoopHead", "enterFunction", "returnFromFunction", "threadId", "createThread"],
[None, None, None, None, None, None, None, None, False, None, None, None, None]
)
expected = nx.read_graphml(sys.argv[1], force_multigraph=True)
actual = nx.read_graphml(sys.argv[2], force_multigraph=True)
isomorphic = nx.is_isomorphic(expected, actual, node_match=witness_node_match, edge_match=witness_edge_match)
print(isomorphic)
|
<commit_before><commit_msg>Add script for checking witness isomorphism<commit_after>#!/usr/bin/python3
import sys
import networkx as nx
# def witness_node_match(n1, n2):
# return True
witness_node_match = nx.algorithms.isomorphism.categorical_node_match(
["entry", "sink", "violation", "invariant", "invariant.scope"],
[False, False, False, None, None]
)
# def witness_edge_match(e1, e2):
# return True
witness_edge_match = nx.algorithms.isomorphism.categorical_multiedge_match(
["assumption", "assumption.scope", "assumption.resultfunction", "control", "startline", "endline", "startoffset", "endoffset", "enterLoopHead", "enterFunction", "returnFromFunction", "threadId", "createThread"],
[None, None, None, None, None, None, None, None, False, None, None, None, None]
)
expected = nx.read_graphml(sys.argv[1], force_multigraph=True)
actual = nx.read_graphml(sys.argv[2], force_multigraph=True)
isomorphic = nx.is_isomorphic(expected, actual, node_match=witness_node_match, edge_match=witness_edge_match)
print(isomorphic)
|
|
757b82071bda164342acd86ebb1df26239dafa5a
|
enable/tools/drop_tool.py
|
enable/tools/drop_tool.py
|
#------------------------------------------------------------------------------
# Copyright (c) 2014, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#------------------------------------------------------------------------------
""" Abstract base class for tools that handle drag and drop """
from __future__ import absolute_import, print_function, division
from enable.base_tool import BaseTool
class BaseDropTool(BaseTool):
""" Abstract base class for tools that handle drag and drop """
def normal_drag_over(self, event):
""" Handle dragging over the component """
try:
result = self.accepts_drop(event.obj)
self.component.window.set_drag_result(result)
except Exception:
self.component.window.set_drag_result("error")
raise
def normal_dropped_on(self, event):
if self.accepts_drop(event.obj) != "none":
self.handle_drop(event.obj)
def accepts_drop(self, urls):
""" Whether or not to accept the drag, and the type of drag
The return value is either "none", if the drag is refused for the
dragged object types, or one of "copy", "move", or "link".
Subclasses should override this method.
"""
raise NotImplementedError
def handle_drop(self, urls):
""" Handle objects being dropped on the component
Subclasses should override this method.
"""
raise NotImplementedError
|
Add base tool to facilitate drag and drop support.
|
Add base tool to facilitate drag and drop support.
|
Python
|
bsd-3-clause
|
tommy-u/enable,tommy-u/enable,tommy-u/enable,tommy-u/enable
|
Add base tool to facilitate drag and drop support.
|
#------------------------------------------------------------------------------
# Copyright (c) 2014, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#------------------------------------------------------------------------------
""" Abstract base class for tools that handle drag and drop """
from __future__ import absolute_import, print_function, division
from enable.base_tool import BaseTool
class BaseDropTool(BaseTool):
""" Abstract base class for tools that handle drag and drop """
def normal_drag_over(self, event):
""" Handle dragging over the component """
try:
result = self.accepts_drop(event.obj)
self.component.window.set_drag_result(result)
except Exception:
self.component.window.set_drag_result("error")
raise
def normal_dropped_on(self, event):
if self.accepts_drop(event.obj) != "none":
self.handle_drop(event.obj)
def accepts_drop(self, urls):
""" Whether or not to accept the drag, and the type of drag
The return value is either "none", if the drag is refused for the
dragged object types, or one of "copy", "move", or "link".
Subclasses should override this method.
"""
raise NotImplementedError
def handle_drop(self, urls):
""" Handle objects being dropped on the component
Subclasses should override this method.
"""
raise NotImplementedError
|
<commit_before><commit_msg>Add base tool to facilitate drag and drop support.<commit_after>
|
#------------------------------------------------------------------------------
# Copyright (c) 2014, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#------------------------------------------------------------------------------
""" Abstract base class for tools that handle drag and drop """
from __future__ import absolute_import, print_function, division
from enable.base_tool import BaseTool
class BaseDropTool(BaseTool):
""" Abstract base class for tools that handle drag and drop """
def normal_drag_over(self, event):
""" Handle dragging over the component """
try:
result = self.accepts_drop(event.obj)
self.component.window.set_drag_result(result)
except Exception:
self.component.window.set_drag_result("error")
raise
def normal_dropped_on(self, event):
if self.accepts_drop(event.obj) != "none":
self.handle_drop(event.obj)
def accepts_drop(self, urls):
""" Whether or not to accept the drag, and the type of drag
The return value is either "none", if the drag is refused for the
dragged object types, or one of "copy", "move", or "link".
Subclasses should override this method.
"""
raise NotImplementedError
def handle_drop(self, urls):
""" Handle objects being dropped on the component
Subclasses should override this method.
"""
raise NotImplementedError
|
Add base tool to facilitate drag and drop support.#------------------------------------------------------------------------------
# Copyright (c) 2014, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#------------------------------------------------------------------------------
""" Abstract base class for tools that handle drag and drop """
from __future__ import absolute_import, print_function, division
from enable.base_tool import BaseTool
class BaseDropTool(BaseTool):
""" Abstract base class for tools that handle drag and drop """
def normal_drag_over(self, event):
""" Handle dragging over the component """
try:
result = self.accepts_drop(event.obj)
self.component.window.set_drag_result(result)
except Exception:
self.component.window.set_drag_result("error")
raise
def normal_dropped_on(self, event):
if self.accepts_drop(event.obj) != "none":
self.handle_drop(event.obj)
def accepts_drop(self, urls):
""" Whether or not to accept the drag, and the type of drag
The return value is either "none", if the drag is refused for the
dragged object types, or one of "copy", "move", or "link".
Subclasses should override this method.
"""
raise NotImplementedError
def handle_drop(self, urls):
""" Handle objects being dropped on the component
Subclasses should override this method.
"""
raise NotImplementedError
|
<commit_before><commit_msg>Add base tool to facilitate drag and drop support.<commit_after>#------------------------------------------------------------------------------
# Copyright (c) 2014, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#------------------------------------------------------------------------------
""" Abstract base class for tools that handle drag and drop """
from __future__ import absolute_import, print_function, division
from enable.base_tool import BaseTool
class BaseDropTool(BaseTool):
""" Abstract base class for tools that handle drag and drop """
def normal_drag_over(self, event):
""" Handle dragging over the component """
try:
result = self.accepts_drop(event.obj)
self.component.window.set_drag_result(result)
except Exception:
self.component.window.set_drag_result("error")
raise
def normal_dropped_on(self, event):
if self.accepts_drop(event.obj) != "none":
self.handle_drop(event.obj)
def accepts_drop(self, urls):
""" Whether or not to accept the drag, and the type of drag
The return value is either "none", if the drag is refused for the
dragged object types, or one of "copy", "move", or "link".
Subclasses should override this method.
"""
raise NotImplementedError
def handle_drop(self, urls):
""" Handle objects being dropped on the component
Subclasses should override this method.
"""
raise NotImplementedError
|
|
a893223d4964f946d9413a17e62871e2660843a8
|
flexget/plugins/input_listdir.py
|
flexget/plugins/input_listdir.py
|
import logging
from flexget.plugin import *
log = logging.getLogger('listdir')
class InputListdir:
"""
Uses local path content as an input.
Example:
listdir: /storage/movies/
"""
def validator(self):
from flexget import validator
root = validator.factory()
root.accept('path')
bundle = root.accept('list')
bundle.accept('path')
return root
def get_config(self, feed):
config = feed.config.get('listdir', None)
#if only a single path is passed turn it into a 1 element list
if isinstance(config, basestring):
config = [config]
return config
def on_feed_input(self, feed):
from flexget.feed import Entry
import os
config = self.get_config(feed)
for path in config:
for name in os.listdir(unicode(path)):
e = Entry()
e['title'] = name
e['url'] = 'file://%s' % (os.path.join(path, name))
e['location'] = os.path.join(path, name)
feed.entries.append(e)
register_plugin(InputListdir, 'listdir')
|
import logging
from flexget.plugin import register_plugin
log = logging.getLogger('listdir')
class InputListdir:
"""
Uses local path content as an input.
Example:
listdir: /storage/movies/
"""
def validator(self):
from flexget import validator
root = validator.factory()
root.accept('path')
bundle = root.accept('list')
bundle.accept('path')
return root
def get_config(self, feed):
config = feed.config.get('listdir', None)
# If only a single path is passed turn it into a 1 element list
if isinstance(config, basestring):
config = [config]
return config
def on_feed_input(self, feed):
from flexget.feed import Entry
import os
config = self.get_config(feed)
for path in config:
for name in os.listdir(unicode(path)):
e = Entry()
e['title'] = name
filepath = os.path.join(path, name)
# Windows paths need an extra / prepended to them
if not filepath.startswith('/'):
filepath = '/' + filepath
e['url'] = 'file://%s' % (filepath)
e['location'] = os.path.join(path, name)
feed.entries.append(e)
register_plugin(InputListdir, 'listdir')
|
Fix url of entries made by listdir on Windows.
|
Fix url of entries made by listdir on Windows.
git-svn-id: ad91b9aa7ba7638d69f912c9f5d012e3326e9f74@1586 3942dd89-8c5d-46d7-aeed-044bccf3e60c
|
Python
|
mit
|
LynxyssCZ/Flexget,thalamus/Flexget,tvcsantos/Flexget,ibrahimkarahan/Flexget,patsissons/Flexget,oxc/Flexget,dsemi/Flexget,qvazzler/Flexget,poulpito/Flexget,crawln45/Flexget,Flexget/Flexget,ZefQ/Flexget,malkavi/Flexget,malkavi/Flexget,oxc/Flexget,JorisDeRieck/Flexget,crawln45/Flexget,sean797/Flexget,jawilson/Flexget,dsemi/Flexget,spencerjanssen/Flexget,ibrahimkarahan/Flexget,qvazzler/Flexget,qvazzler/Flexget,asm0dey/Flexget,Pretagonist/Flexget,ianstalk/Flexget,drwyrm/Flexget,ZefQ/Flexget,jawilson/Flexget,antivirtel/Flexget,spencerjanssen/Flexget,ratoaq2/Flexget,camon/Flexget,malkavi/Flexget,spencerjanssen/Flexget,lildadou/Flexget,tsnoam/Flexget,xfouloux/Flexget,voriux/Flexget,ratoaq2/Flexget,grrr2/Flexget,LynxyssCZ/Flexget,LynxyssCZ/Flexget,ianstalk/Flexget,X-dark/Flexget,tobinjt/Flexget,poulpito/Flexget,tarzasai/Flexget,jawilson/Flexget,OmgOhnoes/Flexget,dsemi/Flexget,qk4l/Flexget,OmgOhnoes/Flexget,patsissons/Flexget,tobinjt/Flexget,jacobmetrick/Flexget,ZefQ/Flexget,jacobmetrick/Flexget,Pretagonist/Flexget,JorisDeRieck/Flexget,crawln45/Flexget,Danfocus/Flexget,JorisDeRieck/Flexget,OmgOhnoes/Flexget,Flexget/Flexget,Pretagonist/Flexget,X-dark/Flexget,Danfocus/Flexget,LynxyssCZ/Flexget,thalamus/Flexget,tobinjt/Flexget,ianstalk/Flexget,vfrc2/Flexget,jawilson/Flexget,patsissons/Flexget,tvcsantos/Flexget,tsnoam/Flexget,tarzasai/Flexget,lildadou/Flexget,thalamus/Flexget,Flexget/Flexget,gazpachoking/Flexget,asm0dey/Flexget,antivirtel/Flexget,qk4l/Flexget,sean797/Flexget,Danfocus/Flexget,offbyone/Flexget,drwyrm/Flexget,asm0dey/Flexget,X-dark/Flexget,gazpachoking/Flexget,vfrc2/Flexget,ibrahimkarahan/Flexget,Flexget/Flexget,offbyone/Flexget,xfouloux/Flexget,lildadou/Flexget,grrr2/Flexget,antivirtel/Flexget,oxc/Flexget,tsnoam/Flexget,v17al/Flexget,offbyone/Flexget,drwyrm/Flexget,Danfocus/Flexget,v17al/Flexget,poulpito/Flexget,grrr2/Flexget,cvium/Flexget,cvium/Flexget,jacobmetrick/Flexget,sean797/Flexget,cvium/Flexget,JorisDeRieck/Flexget,tobinjt/Flexget,crawln45/Flexget,vfrc2/Flexget,tarzasai/Flexget,qk4l/Flexget,ratoaq2/Flexget,v17al/Flexget,camon/Flexget,voriux/Flexget,malkavi/Flexget,xfouloux/Flexget
|
import logging
from flexget.plugin import *
log = logging.getLogger('listdir')
class InputListdir:
"""
Uses local path content as an input.
Example:
listdir: /storage/movies/
"""
def validator(self):
from flexget import validator
root = validator.factory()
root.accept('path')
bundle = root.accept('list')
bundle.accept('path')
return root
def get_config(self, feed):
config = feed.config.get('listdir', None)
#if only a single path is passed turn it into a 1 element list
if isinstance(config, basestring):
config = [config]
return config
def on_feed_input(self, feed):
from flexget.feed import Entry
import os
config = self.get_config(feed)
for path in config:
for name in os.listdir(unicode(path)):
e = Entry()
e['title'] = name
e['url'] = 'file://%s' % (os.path.join(path, name))
e['location'] = os.path.join(path, name)
feed.entries.append(e)
register_plugin(InputListdir, 'listdir')
Fix url of entries made by listdir on Windows.
git-svn-id: ad91b9aa7ba7638d69f912c9f5d012e3326e9f74@1586 3942dd89-8c5d-46d7-aeed-044bccf3e60c
|
import logging
from flexget.plugin import register_plugin
log = logging.getLogger('listdir')
class InputListdir:
"""
Uses local path content as an input.
Example:
listdir: /storage/movies/
"""
def validator(self):
from flexget import validator
root = validator.factory()
root.accept('path')
bundle = root.accept('list')
bundle.accept('path')
return root
def get_config(self, feed):
config = feed.config.get('listdir', None)
# If only a single path is passed turn it into a 1 element list
if isinstance(config, basestring):
config = [config]
return config
def on_feed_input(self, feed):
from flexget.feed import Entry
import os
config = self.get_config(feed)
for path in config:
for name in os.listdir(unicode(path)):
e = Entry()
e['title'] = name
filepath = os.path.join(path, name)
# Windows paths need an extra / prepended to them
if not filepath.startswith('/'):
filepath = '/' + filepath
e['url'] = 'file://%s' % (filepath)
e['location'] = os.path.join(path, name)
feed.entries.append(e)
register_plugin(InputListdir, 'listdir')
|
<commit_before>import logging
from flexget.plugin import *
log = logging.getLogger('listdir')
class InputListdir:
"""
Uses local path content as an input.
Example:
listdir: /storage/movies/
"""
def validator(self):
from flexget import validator
root = validator.factory()
root.accept('path')
bundle = root.accept('list')
bundle.accept('path')
return root
def get_config(self, feed):
config = feed.config.get('listdir', None)
#if only a single path is passed turn it into a 1 element list
if isinstance(config, basestring):
config = [config]
return config
def on_feed_input(self, feed):
from flexget.feed import Entry
import os
config = self.get_config(feed)
for path in config:
for name in os.listdir(unicode(path)):
e = Entry()
e['title'] = name
e['url'] = 'file://%s' % (os.path.join(path, name))
e['location'] = os.path.join(path, name)
feed.entries.append(e)
register_plugin(InputListdir, 'listdir')
<commit_msg>Fix url of entries made by listdir on Windows.
git-svn-id: ad91b9aa7ba7638d69f912c9f5d012e3326e9f74@1586 3942dd89-8c5d-46d7-aeed-044bccf3e60c<commit_after>
|
import logging
from flexget.plugin import register_plugin
log = logging.getLogger('listdir')
class InputListdir:
"""
Uses local path content as an input.
Example:
listdir: /storage/movies/
"""
def validator(self):
from flexget import validator
root = validator.factory()
root.accept('path')
bundle = root.accept('list')
bundle.accept('path')
return root
def get_config(self, feed):
config = feed.config.get('listdir', None)
# If only a single path is passed turn it into a 1 element list
if isinstance(config, basestring):
config = [config]
return config
def on_feed_input(self, feed):
from flexget.feed import Entry
import os
config = self.get_config(feed)
for path in config:
for name in os.listdir(unicode(path)):
e = Entry()
e['title'] = name
filepath = os.path.join(path, name)
# Windows paths need an extra / prepended to them
if not filepath.startswith('/'):
filepath = '/' + filepath
e['url'] = 'file://%s' % (filepath)
e['location'] = os.path.join(path, name)
feed.entries.append(e)
register_plugin(InputListdir, 'listdir')
|
import logging
from flexget.plugin import *
log = logging.getLogger('listdir')
class InputListdir:
"""
Uses local path content as an input.
Example:
listdir: /storage/movies/
"""
def validator(self):
from flexget import validator
root = validator.factory()
root.accept('path')
bundle = root.accept('list')
bundle.accept('path')
return root
def get_config(self, feed):
config = feed.config.get('listdir', None)
#if only a single path is passed turn it into a 1 element list
if isinstance(config, basestring):
config = [config]
return config
def on_feed_input(self, feed):
from flexget.feed import Entry
import os
config = self.get_config(feed)
for path in config:
for name in os.listdir(unicode(path)):
e = Entry()
e['title'] = name
e['url'] = 'file://%s' % (os.path.join(path, name))
e['location'] = os.path.join(path, name)
feed.entries.append(e)
register_plugin(InputListdir, 'listdir')
Fix url of entries made by listdir on Windows.
git-svn-id: ad91b9aa7ba7638d69f912c9f5d012e3326e9f74@1586 3942dd89-8c5d-46d7-aeed-044bccf3e60cimport logging
from flexget.plugin import register_plugin
log = logging.getLogger('listdir')
class InputListdir:
"""
Uses local path content as an input.
Example:
listdir: /storage/movies/
"""
def validator(self):
from flexget import validator
root = validator.factory()
root.accept('path')
bundle = root.accept('list')
bundle.accept('path')
return root
def get_config(self, feed):
config = feed.config.get('listdir', None)
# If only a single path is passed turn it into a 1 element list
if isinstance(config, basestring):
config = [config]
return config
def on_feed_input(self, feed):
from flexget.feed import Entry
import os
config = self.get_config(feed)
for path in config:
for name in os.listdir(unicode(path)):
e = Entry()
e['title'] = name
filepath = os.path.join(path, name)
# Windows paths need an extra / prepended to them
if not filepath.startswith('/'):
filepath = '/' + filepath
e['url'] = 'file://%s' % (filepath)
e['location'] = os.path.join(path, name)
feed.entries.append(e)
register_plugin(InputListdir, 'listdir')
|
<commit_before>import logging
from flexget.plugin import *
log = logging.getLogger('listdir')
class InputListdir:
"""
Uses local path content as an input.
Example:
listdir: /storage/movies/
"""
def validator(self):
from flexget import validator
root = validator.factory()
root.accept('path')
bundle = root.accept('list')
bundle.accept('path')
return root
def get_config(self, feed):
config = feed.config.get('listdir', None)
#if only a single path is passed turn it into a 1 element list
if isinstance(config, basestring):
config = [config]
return config
def on_feed_input(self, feed):
from flexget.feed import Entry
import os
config = self.get_config(feed)
for path in config:
for name in os.listdir(unicode(path)):
e = Entry()
e['title'] = name
e['url'] = 'file://%s' % (os.path.join(path, name))
e['location'] = os.path.join(path, name)
feed.entries.append(e)
register_plugin(InputListdir, 'listdir')
<commit_msg>Fix url of entries made by listdir on Windows.
git-svn-id: ad91b9aa7ba7638d69f912c9f5d012e3326e9f74@1586 3942dd89-8c5d-46d7-aeed-044bccf3e60c<commit_after>import logging
from flexget.plugin import register_plugin
log = logging.getLogger('listdir')
class InputListdir:
"""
Uses local path content as an input.
Example:
listdir: /storage/movies/
"""
def validator(self):
from flexget import validator
root = validator.factory()
root.accept('path')
bundle = root.accept('list')
bundle.accept('path')
return root
def get_config(self, feed):
config = feed.config.get('listdir', None)
# If only a single path is passed turn it into a 1 element list
if isinstance(config, basestring):
config = [config]
return config
def on_feed_input(self, feed):
from flexget.feed import Entry
import os
config = self.get_config(feed)
for path in config:
for name in os.listdir(unicode(path)):
e = Entry()
e['title'] = name
filepath = os.path.join(path, name)
# Windows paths need an extra / prepended to them
if not filepath.startswith('/'):
filepath = '/' + filepath
e['url'] = 'file://%s' % (filepath)
e['location'] = os.path.join(path, name)
feed.entries.append(e)
register_plugin(InputListdir, 'listdir')
|
bc80e5df435c5be01db2bc1547d66788276366eb
|
test/test_script.py
|
test/test_script.py
|
# -*- coding: UTF-8 -*-
"""
Tests for pyftpsync
"""
from __future__ import print_function
import os
import sys
import unittest
from ftpsync import pyftpsync, __version__
from test.fixture_tools import _SyncTestBase, PYFTPSYNC_TEST_FOLDER
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
class Capturing(list):
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
del self._stringio # free up some memory
sys.stdout = self._stdout
def run_script(*args, expect_code=0, **kw):
"""Run `pyftpsync args` and return (errcode, output)."""
pyftpsync.sys.argv = ["pyftpsync_dummy"] + list(args)
# print("S", sys.argv)
errcode = 0
out = []
try:
with Capturing() as out:
pyftpsync.run()
except SystemExit as e:
errcode = e.code
if expect_code is not None:
assert errcode == expect_code
return "\n".join(out).strip()
#===============================================================================
# ScriptTest
#===============================================================================
class ScriptTest(_SyncTestBase):
"""Test command line script interface."""
def setUp(self):
# Call self._prepare_initial_synced_fixture():
super(ScriptTest, self).setUp()
def tearDown(self):
super(ScriptTest, self).tearDown()
def test_basic(self):
out = run_script("--version")
# self.assertEqual(errcode, 0)
self.assertEqual(out, __version__)
out = run_script("--help")
assert "usage: pyftpsync" in out
out = run_script("foobar", expect_code=2)
def test_scan_list(self):
out = run_script("scan", os.path.join(PYFTPSYNC_TEST_FOLDER, "local"), "--list")
assert "file1.txt 2014-01-01 13:00:00" in out
#===============================================================================
# Main
#===============================================================================
if __name__ == "__main__":
unittest.main()
|
Add tests for command line script
|
Add tests for command line script
|
Python
|
mit
|
mar10/pyftpsync
|
Add tests for command line script
|
# -*- coding: UTF-8 -*-
"""
Tests for pyftpsync
"""
from __future__ import print_function
import os
import sys
import unittest
from ftpsync import pyftpsync, __version__
from test.fixture_tools import _SyncTestBase, PYFTPSYNC_TEST_FOLDER
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
class Capturing(list):
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
del self._stringio # free up some memory
sys.stdout = self._stdout
def run_script(*args, expect_code=0, **kw):
"""Run `pyftpsync args` and return (errcode, output)."""
pyftpsync.sys.argv = ["pyftpsync_dummy"] + list(args)
# print("S", sys.argv)
errcode = 0
out = []
try:
with Capturing() as out:
pyftpsync.run()
except SystemExit as e:
errcode = e.code
if expect_code is not None:
assert errcode == expect_code
return "\n".join(out).strip()
#===============================================================================
# ScriptTest
#===============================================================================
class ScriptTest(_SyncTestBase):
"""Test command line script interface."""
def setUp(self):
# Call self._prepare_initial_synced_fixture():
super(ScriptTest, self).setUp()
def tearDown(self):
super(ScriptTest, self).tearDown()
def test_basic(self):
out = run_script("--version")
# self.assertEqual(errcode, 0)
self.assertEqual(out, __version__)
out = run_script("--help")
assert "usage: pyftpsync" in out
out = run_script("foobar", expect_code=2)
def test_scan_list(self):
out = run_script("scan", os.path.join(PYFTPSYNC_TEST_FOLDER, "local"), "--list")
assert "file1.txt 2014-01-01 13:00:00" in out
#===============================================================================
# Main
#===============================================================================
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add tests for command line script<commit_after>
|
# -*- coding: UTF-8 -*-
"""
Tests for pyftpsync
"""
from __future__ import print_function
import os
import sys
import unittest
from ftpsync import pyftpsync, __version__
from test.fixture_tools import _SyncTestBase, PYFTPSYNC_TEST_FOLDER
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
class Capturing(list):
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
del self._stringio # free up some memory
sys.stdout = self._stdout
def run_script(*args, expect_code=0, **kw):
"""Run `pyftpsync args` and return (errcode, output)."""
pyftpsync.sys.argv = ["pyftpsync_dummy"] + list(args)
# print("S", sys.argv)
errcode = 0
out = []
try:
with Capturing() as out:
pyftpsync.run()
except SystemExit as e:
errcode = e.code
if expect_code is not None:
assert errcode == expect_code
return "\n".join(out).strip()
#===============================================================================
# ScriptTest
#===============================================================================
class ScriptTest(_SyncTestBase):
"""Test command line script interface."""
def setUp(self):
# Call self._prepare_initial_synced_fixture():
super(ScriptTest, self).setUp()
def tearDown(self):
super(ScriptTest, self).tearDown()
def test_basic(self):
out = run_script("--version")
# self.assertEqual(errcode, 0)
self.assertEqual(out, __version__)
out = run_script("--help")
assert "usage: pyftpsync" in out
out = run_script("foobar", expect_code=2)
def test_scan_list(self):
out = run_script("scan", os.path.join(PYFTPSYNC_TEST_FOLDER, "local"), "--list")
assert "file1.txt 2014-01-01 13:00:00" in out
#===============================================================================
# Main
#===============================================================================
if __name__ == "__main__":
unittest.main()
|
Add tests for command line script# -*- coding: UTF-8 -*-
"""
Tests for pyftpsync
"""
from __future__ import print_function
import os
import sys
import unittest
from ftpsync import pyftpsync, __version__
from test.fixture_tools import _SyncTestBase, PYFTPSYNC_TEST_FOLDER
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
class Capturing(list):
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
del self._stringio # free up some memory
sys.stdout = self._stdout
def run_script(*args, expect_code=0, **kw):
"""Run `pyftpsync args` and return (errcode, output)."""
pyftpsync.sys.argv = ["pyftpsync_dummy"] + list(args)
# print("S", sys.argv)
errcode = 0
out = []
try:
with Capturing() as out:
pyftpsync.run()
except SystemExit as e:
errcode = e.code
if expect_code is not None:
assert errcode == expect_code
return "\n".join(out).strip()
#===============================================================================
# ScriptTest
#===============================================================================
class ScriptTest(_SyncTestBase):
"""Test command line script interface."""
def setUp(self):
# Call self._prepare_initial_synced_fixture():
super(ScriptTest, self).setUp()
def tearDown(self):
super(ScriptTest, self).tearDown()
def test_basic(self):
out = run_script("--version")
# self.assertEqual(errcode, 0)
self.assertEqual(out, __version__)
out = run_script("--help")
assert "usage: pyftpsync" in out
out = run_script("foobar", expect_code=2)
def test_scan_list(self):
out = run_script("scan", os.path.join(PYFTPSYNC_TEST_FOLDER, "local"), "--list")
assert "file1.txt 2014-01-01 13:00:00" in out
#===============================================================================
# Main
#===============================================================================
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add tests for command line script<commit_after># -*- coding: UTF-8 -*-
"""
Tests for pyftpsync
"""
from __future__ import print_function
import os
import sys
import unittest
from ftpsync import pyftpsync, __version__
from test.fixture_tools import _SyncTestBase, PYFTPSYNC_TEST_FOLDER
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
class Capturing(list):
def __enter__(self):
self._stdout = sys.stdout
sys.stdout = self._stringio = StringIO()
return self
def __exit__(self, *args):
self.extend(self._stringio.getvalue().splitlines())
del self._stringio # free up some memory
sys.stdout = self._stdout
def run_script(*args, expect_code=0, **kw):
"""Run `pyftpsync args` and return (errcode, output)."""
pyftpsync.sys.argv = ["pyftpsync_dummy"] + list(args)
# print("S", sys.argv)
errcode = 0
out = []
try:
with Capturing() as out:
pyftpsync.run()
except SystemExit as e:
errcode = e.code
if expect_code is not None:
assert errcode == expect_code
return "\n".join(out).strip()
#===============================================================================
# ScriptTest
#===============================================================================
class ScriptTest(_SyncTestBase):
"""Test command line script interface."""
def setUp(self):
# Call self._prepare_initial_synced_fixture():
super(ScriptTest, self).setUp()
def tearDown(self):
super(ScriptTest, self).tearDown()
def test_basic(self):
out = run_script("--version")
# self.assertEqual(errcode, 0)
self.assertEqual(out, __version__)
out = run_script("--help")
assert "usage: pyftpsync" in out
out = run_script("foobar", expect_code=2)
def test_scan_list(self):
out = run_script("scan", os.path.join(PYFTPSYNC_TEST_FOLDER, "local"), "--list")
assert "file1.txt 2014-01-01 13:00:00" in out
#===============================================================================
# Main
#===============================================================================
if __name__ == "__main__":
unittest.main()
|
|
d5fbbc55286d249c320ed5b54460b2091a023419
|
concurren-futures.py
|
concurren-futures.py
|
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
import multiprocessing
from multiprocessing.pool import ThreadPool
import threading
import time
def bar(i=0):
if i == 0:
raise ValueError("bar raise")
return i ** 2
def main_Thread():
thread = threading.Thread(target=bar)
thread.start()
thread.join()
raise RuntimeError("Exception not caught")
def main_ThreadPool():
p = ThreadPool(4)
for i in p.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ThreadPoolExecutorMap():
with ThreadPoolExecutor(4) as ex:
for i in ex.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ThreadPoolExecutorSubmit():
with ThreadPoolExecutor(4) as ex:
s = ex.submit(bar)
print s.result()
raise RuntimeError("Exception not caught")
def main_Process():
thread = multiprocessing.Process(target=bar)
thread.start()
thread.join()
raise RuntimeError("Exception not caught")
def main_ProcessPool():
p = multiprocessing.Pool(4)
for i in p.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ProcessPoolExecutorMap():
with ProcessPoolExecutor(4) as ex:
for i in ex.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ProcessPoolExecutorSubmit():
with ProcessPoolExecutor(4) as ex:
s = ex.submit(bar, 0)
print s.result()
raise RuntimeError("Exception not caught")
def run(fun):
ac = threading.active_count()
try:
fun()
except RuntimeError:
print fun.__name__, "[NOT raised]"
except ValueError:
print fun.__name__, "[RAISED]"
time.sleep(1)
print "Zombie thread:", threading.active_count() - ac
if __name__ == '__main__':
run(main_Thread)
run(main_ThreadPool)
run(main_ThreadPoolExecutorMap)
run(main_ThreadPoolExecutorSubmit)
run(main_Process)
run(main_ProcessPool)
run(main_ProcessPoolExecutorMap)
run(main_ProcessPoolExecutorSubmit)
|
Add concurrent futures benchmark script
|
Add concurrent futures benchmark script
|
Python
|
mit
|
voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts
|
Add concurrent futures benchmark script
|
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
import multiprocessing
from multiprocessing.pool import ThreadPool
import threading
import time
def bar(i=0):
if i == 0:
raise ValueError("bar raise")
return i ** 2
def main_Thread():
thread = threading.Thread(target=bar)
thread.start()
thread.join()
raise RuntimeError("Exception not caught")
def main_ThreadPool():
p = ThreadPool(4)
for i in p.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ThreadPoolExecutorMap():
with ThreadPoolExecutor(4) as ex:
for i in ex.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ThreadPoolExecutorSubmit():
with ThreadPoolExecutor(4) as ex:
s = ex.submit(bar)
print s.result()
raise RuntimeError("Exception not caught")
def main_Process():
thread = multiprocessing.Process(target=bar)
thread.start()
thread.join()
raise RuntimeError("Exception not caught")
def main_ProcessPool():
p = multiprocessing.Pool(4)
for i in p.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ProcessPoolExecutorMap():
with ProcessPoolExecutor(4) as ex:
for i in ex.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ProcessPoolExecutorSubmit():
with ProcessPoolExecutor(4) as ex:
s = ex.submit(bar, 0)
print s.result()
raise RuntimeError("Exception not caught")
def run(fun):
ac = threading.active_count()
try:
fun()
except RuntimeError:
print fun.__name__, "[NOT raised]"
except ValueError:
print fun.__name__, "[RAISED]"
time.sleep(1)
print "Zombie thread:", threading.active_count() - ac
if __name__ == '__main__':
run(main_Thread)
run(main_ThreadPool)
run(main_ThreadPoolExecutorMap)
run(main_ThreadPoolExecutorSubmit)
run(main_Process)
run(main_ProcessPool)
run(main_ProcessPoolExecutorMap)
run(main_ProcessPoolExecutorSubmit)
|
<commit_before><commit_msg>Add concurrent futures benchmark script<commit_after>
|
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
import multiprocessing
from multiprocessing.pool import ThreadPool
import threading
import time
def bar(i=0):
if i == 0:
raise ValueError("bar raise")
return i ** 2
def main_Thread():
thread = threading.Thread(target=bar)
thread.start()
thread.join()
raise RuntimeError("Exception not caught")
def main_ThreadPool():
p = ThreadPool(4)
for i in p.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ThreadPoolExecutorMap():
with ThreadPoolExecutor(4) as ex:
for i in ex.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ThreadPoolExecutorSubmit():
with ThreadPoolExecutor(4) as ex:
s = ex.submit(bar)
print s.result()
raise RuntimeError("Exception not caught")
def main_Process():
thread = multiprocessing.Process(target=bar)
thread.start()
thread.join()
raise RuntimeError("Exception not caught")
def main_ProcessPool():
p = multiprocessing.Pool(4)
for i in p.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ProcessPoolExecutorMap():
with ProcessPoolExecutor(4) as ex:
for i in ex.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ProcessPoolExecutorSubmit():
with ProcessPoolExecutor(4) as ex:
s = ex.submit(bar, 0)
print s.result()
raise RuntimeError("Exception not caught")
def run(fun):
ac = threading.active_count()
try:
fun()
except RuntimeError:
print fun.__name__, "[NOT raised]"
except ValueError:
print fun.__name__, "[RAISED]"
time.sleep(1)
print "Zombie thread:", threading.active_count() - ac
if __name__ == '__main__':
run(main_Thread)
run(main_ThreadPool)
run(main_ThreadPoolExecutorMap)
run(main_ThreadPoolExecutorSubmit)
run(main_Process)
run(main_ProcessPool)
run(main_ProcessPoolExecutorMap)
run(main_ProcessPoolExecutorSubmit)
|
Add concurrent futures benchmark scriptfrom concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
import multiprocessing
from multiprocessing.pool import ThreadPool
import threading
import time
def bar(i=0):
if i == 0:
raise ValueError("bar raise")
return i ** 2
def main_Thread():
thread = threading.Thread(target=bar)
thread.start()
thread.join()
raise RuntimeError("Exception not caught")
def main_ThreadPool():
p = ThreadPool(4)
for i in p.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ThreadPoolExecutorMap():
with ThreadPoolExecutor(4) as ex:
for i in ex.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ThreadPoolExecutorSubmit():
with ThreadPoolExecutor(4) as ex:
s = ex.submit(bar)
print s.result()
raise RuntimeError("Exception not caught")
def main_Process():
thread = multiprocessing.Process(target=bar)
thread.start()
thread.join()
raise RuntimeError("Exception not caught")
def main_ProcessPool():
p = multiprocessing.Pool(4)
for i in p.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ProcessPoolExecutorMap():
with ProcessPoolExecutor(4) as ex:
for i in ex.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ProcessPoolExecutorSubmit():
with ProcessPoolExecutor(4) as ex:
s = ex.submit(bar, 0)
print s.result()
raise RuntimeError("Exception not caught")
def run(fun):
ac = threading.active_count()
try:
fun()
except RuntimeError:
print fun.__name__, "[NOT raised]"
except ValueError:
print fun.__name__, "[RAISED]"
time.sleep(1)
print "Zombie thread:", threading.active_count() - ac
if __name__ == '__main__':
run(main_Thread)
run(main_ThreadPool)
run(main_ThreadPoolExecutorMap)
run(main_ThreadPoolExecutorSubmit)
run(main_Process)
run(main_ProcessPool)
run(main_ProcessPoolExecutorMap)
run(main_ProcessPoolExecutorSubmit)
|
<commit_before><commit_msg>Add concurrent futures benchmark script<commit_after>from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
import multiprocessing
from multiprocessing.pool import ThreadPool
import threading
import time
def bar(i=0):
if i == 0:
raise ValueError("bar raise")
return i ** 2
def main_Thread():
thread = threading.Thread(target=bar)
thread.start()
thread.join()
raise RuntimeError("Exception not caught")
def main_ThreadPool():
p = ThreadPool(4)
for i in p.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ThreadPoolExecutorMap():
with ThreadPoolExecutor(4) as ex:
for i in ex.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ThreadPoolExecutorSubmit():
with ThreadPoolExecutor(4) as ex:
s = ex.submit(bar)
print s.result()
raise RuntimeError("Exception not caught")
def main_Process():
thread = multiprocessing.Process(target=bar)
thread.start()
thread.join()
raise RuntimeError("Exception not caught")
def main_ProcessPool():
p = multiprocessing.Pool(4)
for i in p.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ProcessPoolExecutorMap():
with ProcessPoolExecutor(4) as ex:
for i in ex.map(bar, xrange(4)):
print i
raise RuntimeError("Exception not caught")
def main_ProcessPoolExecutorSubmit():
with ProcessPoolExecutor(4) as ex:
s = ex.submit(bar, 0)
print s.result()
raise RuntimeError("Exception not caught")
def run(fun):
ac = threading.active_count()
try:
fun()
except RuntimeError:
print fun.__name__, "[NOT raised]"
except ValueError:
print fun.__name__, "[RAISED]"
time.sleep(1)
print "Zombie thread:", threading.active_count() - ac
if __name__ == '__main__':
run(main_Thread)
run(main_ThreadPool)
run(main_ThreadPoolExecutorMap)
run(main_ThreadPoolExecutorSubmit)
run(main_Process)
run(main_ProcessPool)
run(main_ProcessPoolExecutorMap)
run(main_ProcessPoolExecutorSubmit)
|
|
ee36e70c572f1ca6651b889a7fe1139c4fa7297e
|
plugins/playing_maps.py
|
plugins/playing_maps.py
|
from bs4 import BeautifulSoup
from plugin import CommandPlugin
import requests
import json
class PlayingMaps(CommandPlugin):
"""
Parses Overcast's maps page and prints currently playing maps
"""
ocn_maps_link = 'https://oc.tc/maps'
def __init__(self):
CommandPlugin.__init__(self)
self.triggers = ['playingmaps']
self.short_help = 'Prints out currently playing maps on the Overcast Network'
self.help = self.short_help
self.help_example = ['!playingmaps']
@staticmethod
def build_slack_attachment(data):
attach = {
'title': 'Overcast Network - Currently playing maps',
'text': '',
'mrkdwn_in': ['text'],
'title_link': PlayingMaps.ocn_maps_link
}
for server in sorted(data.keys()):
attach['text'] += '*%s:* ' % server
attach['text'] += ', '.join(data[server])
attach['text'] += '\n'
return attach
@staticmethod
def parse_maps_list():
r = requests.get(PlayingMaps.ocn_maps_link)
if r.status_code != requests.codes.ok:
return
soup = BeautifulSoup(r.text)
# Key: Server Name
# Value: List of currently playing maps
data = {}
maps_elements = soup.find_all('div', class_='map thumbnail')
for map in maps_elements:
map_name = map.find('h1', class_='lead').a.contents[0]
# Wrapper around server labels
servers_div = map.find('div', class_='servers')
# Labels of playing servers
playing_on = servers_div.find_all('a', class_='label label-warning')
for server in playing_on:
server_name = server.contents[0]
# Check if server is not in dictionary
if not data.get(server_name):
data[server_name] = []
data[server_name].append(map_name)
# Return slack attachment with parsed data
return PlayingMaps.build_slack_attachment(data)
def on_command(self, bot, event, response):
response.update(attachments=json.dumps([PlayingMaps.parse_maps_list()]))
bot.sc.api_call('chat.postMessage', **response)
|
Add command for listing playing OCN maps
|
Add command for listing playing OCN maps
|
Python
|
mit
|
Plastix/nimbus,itsmartin/nimbus,bcbwilla/nimbus,Brottweiler/nimbus
|
Add command for listing playing OCN maps
|
from bs4 import BeautifulSoup
from plugin import CommandPlugin
import requests
import json
class PlayingMaps(CommandPlugin):
"""
Parses Overcast's maps page and prints currently playing maps
"""
ocn_maps_link = 'https://oc.tc/maps'
def __init__(self):
CommandPlugin.__init__(self)
self.triggers = ['playingmaps']
self.short_help = 'Prints out currently playing maps on the Overcast Network'
self.help = self.short_help
self.help_example = ['!playingmaps']
@staticmethod
def build_slack_attachment(data):
attach = {
'title': 'Overcast Network - Currently playing maps',
'text': '',
'mrkdwn_in': ['text'],
'title_link': PlayingMaps.ocn_maps_link
}
for server in sorted(data.keys()):
attach['text'] += '*%s:* ' % server
attach['text'] += ', '.join(data[server])
attach['text'] += '\n'
return attach
@staticmethod
def parse_maps_list():
r = requests.get(PlayingMaps.ocn_maps_link)
if r.status_code != requests.codes.ok:
return
soup = BeautifulSoup(r.text)
# Key: Server Name
# Value: List of currently playing maps
data = {}
maps_elements = soup.find_all('div', class_='map thumbnail')
for map in maps_elements:
map_name = map.find('h1', class_='lead').a.contents[0]
# Wrapper around server labels
servers_div = map.find('div', class_='servers')
# Labels of playing servers
playing_on = servers_div.find_all('a', class_='label label-warning')
for server in playing_on:
server_name = server.contents[0]
# Check if server is not in dictionary
if not data.get(server_name):
data[server_name] = []
data[server_name].append(map_name)
# Return slack attachment with parsed data
return PlayingMaps.build_slack_attachment(data)
def on_command(self, bot, event, response):
response.update(attachments=json.dumps([PlayingMaps.parse_maps_list()]))
bot.sc.api_call('chat.postMessage', **response)
|
<commit_before><commit_msg>Add command for listing playing OCN maps<commit_after>
|
from bs4 import BeautifulSoup
from plugin import CommandPlugin
import requests
import json
class PlayingMaps(CommandPlugin):
"""
Parses Overcast's maps page and prints currently playing maps
"""
ocn_maps_link = 'https://oc.tc/maps'
def __init__(self):
CommandPlugin.__init__(self)
self.triggers = ['playingmaps']
self.short_help = 'Prints out currently playing maps on the Overcast Network'
self.help = self.short_help
self.help_example = ['!playingmaps']
@staticmethod
def build_slack_attachment(data):
attach = {
'title': 'Overcast Network - Currently playing maps',
'text': '',
'mrkdwn_in': ['text'],
'title_link': PlayingMaps.ocn_maps_link
}
for server in sorted(data.keys()):
attach['text'] += '*%s:* ' % server
attach['text'] += ', '.join(data[server])
attach['text'] += '\n'
return attach
@staticmethod
def parse_maps_list():
r = requests.get(PlayingMaps.ocn_maps_link)
if r.status_code != requests.codes.ok:
return
soup = BeautifulSoup(r.text)
# Key: Server Name
# Value: List of currently playing maps
data = {}
maps_elements = soup.find_all('div', class_='map thumbnail')
for map in maps_elements:
map_name = map.find('h1', class_='lead').a.contents[0]
# Wrapper around server labels
servers_div = map.find('div', class_='servers')
# Labels of playing servers
playing_on = servers_div.find_all('a', class_='label label-warning')
for server in playing_on:
server_name = server.contents[0]
# Check if server is not in dictionary
if not data.get(server_name):
data[server_name] = []
data[server_name].append(map_name)
# Return slack attachment with parsed data
return PlayingMaps.build_slack_attachment(data)
def on_command(self, bot, event, response):
response.update(attachments=json.dumps([PlayingMaps.parse_maps_list()]))
bot.sc.api_call('chat.postMessage', **response)
|
Add command for listing playing OCN mapsfrom bs4 import BeautifulSoup
from plugin import CommandPlugin
import requests
import json
class PlayingMaps(CommandPlugin):
"""
Parses Overcast's maps page and prints currently playing maps
"""
ocn_maps_link = 'https://oc.tc/maps'
def __init__(self):
CommandPlugin.__init__(self)
self.triggers = ['playingmaps']
self.short_help = 'Prints out currently playing maps on the Overcast Network'
self.help = self.short_help
self.help_example = ['!playingmaps']
@staticmethod
def build_slack_attachment(data):
attach = {
'title': 'Overcast Network - Currently playing maps',
'text': '',
'mrkdwn_in': ['text'],
'title_link': PlayingMaps.ocn_maps_link
}
for server in sorted(data.keys()):
attach['text'] += '*%s:* ' % server
attach['text'] += ', '.join(data[server])
attach['text'] += '\n'
return attach
@staticmethod
def parse_maps_list():
r = requests.get(PlayingMaps.ocn_maps_link)
if r.status_code != requests.codes.ok:
return
soup = BeautifulSoup(r.text)
# Key: Server Name
# Value: List of currently playing maps
data = {}
maps_elements = soup.find_all('div', class_='map thumbnail')
for map in maps_elements:
map_name = map.find('h1', class_='lead').a.contents[0]
# Wrapper around server labels
servers_div = map.find('div', class_='servers')
# Labels of playing servers
playing_on = servers_div.find_all('a', class_='label label-warning')
for server in playing_on:
server_name = server.contents[0]
# Check if server is not in dictionary
if not data.get(server_name):
data[server_name] = []
data[server_name].append(map_name)
# Return slack attachment with parsed data
return PlayingMaps.build_slack_attachment(data)
def on_command(self, bot, event, response):
response.update(attachments=json.dumps([PlayingMaps.parse_maps_list()]))
bot.sc.api_call('chat.postMessage', **response)
|
<commit_before><commit_msg>Add command for listing playing OCN maps<commit_after>from bs4 import BeautifulSoup
from plugin import CommandPlugin
import requests
import json
class PlayingMaps(CommandPlugin):
"""
Parses Overcast's maps page and prints currently playing maps
"""
ocn_maps_link = 'https://oc.tc/maps'
def __init__(self):
CommandPlugin.__init__(self)
self.triggers = ['playingmaps']
self.short_help = 'Prints out currently playing maps on the Overcast Network'
self.help = self.short_help
self.help_example = ['!playingmaps']
@staticmethod
def build_slack_attachment(data):
attach = {
'title': 'Overcast Network - Currently playing maps',
'text': '',
'mrkdwn_in': ['text'],
'title_link': PlayingMaps.ocn_maps_link
}
for server in sorted(data.keys()):
attach['text'] += '*%s:* ' % server
attach['text'] += ', '.join(data[server])
attach['text'] += '\n'
return attach
@staticmethod
def parse_maps_list():
r = requests.get(PlayingMaps.ocn_maps_link)
if r.status_code != requests.codes.ok:
return
soup = BeautifulSoup(r.text)
# Key: Server Name
# Value: List of currently playing maps
data = {}
maps_elements = soup.find_all('div', class_='map thumbnail')
for map in maps_elements:
map_name = map.find('h1', class_='lead').a.contents[0]
# Wrapper around server labels
servers_div = map.find('div', class_='servers')
# Labels of playing servers
playing_on = servers_div.find_all('a', class_='label label-warning')
for server in playing_on:
server_name = server.contents[0]
# Check if server is not in dictionary
if not data.get(server_name):
data[server_name] = []
data[server_name].append(map_name)
# Return slack attachment with parsed data
return PlayingMaps.build_slack_attachment(data)
def on_command(self, bot, event, response):
response.update(attachments=json.dumps([PlayingMaps.parse_maps_list()]))
bot.sc.api_call('chat.postMessage', **response)
|
|
5752df3cf5e77e76836376846db6c3cbcbfe2ef7
|
troposphere/ecs.py
|
troposphere/ecs.py
|
from . import AWSObject, AWSProperty
from .validators import boolean, network_port, integer
class Cluster(AWSObject):
resource_type = "AWS::ECS::Cluster"
props = {}
class LoadBalancer(AWSProperty):
props = {
'ContainerName': (basestring, False),
'ContainerPort': (network_port, False),
'LoadBalancerName': (basestring, False),
}
class Service(AWSObject):
resource_type = "AWS::ECS::Service"
props = {
'Cluster': (basestring, False),
'DesiredCount': (integer, False),
'LoadBalancers': ([LoadBalancer], False),
'Role': (basestring, False),
'TaskDefinition': (basestring, False),
}
class Environment(AWSProperty):
props = {
'Name': (basestring, True),
'Value': (basestring, True),
}
class MountPoint(AWSProperty):
props = {
'ContainerPath': (basestring, True),
'SourceVolume': (basestring, True),
'ReadOnly': (boolean, False),
}
class PortMapping(AWSProperty):
props = {
'ContainerPort': (network_port, True),
'HostPort': (network_port, False),
}
class VolumesFrom(AWSProperty):
props = {
'SourceContainer': (basestring, True),
'ReadOnly': (boolean, False),
}
class ContainerDefinition(AWSProperty):
props = {
'Command': ([basestring], False),
'Cpu': (integer, False),
'EntryPoint': ([basestring], False),
'Environment': ([Environment], False),
'Essential': (boolean, False),
'Image': (basestring, True),
'Links': ([basestring], False),
'Memory': (integer, True),
'MountPoints': ([MountPoint], False),
'Name': (basestring, True),
'PortMappings': ([PortMapping], False),
'VolumesFrom': ([VolumesFrom], False),
}
class Host(AWSProperty):
props = {
'SourcePath': (basestring, False),
}
class Volume(AWSProperty):
props = {
'Name': (basestring, True),
'Host': (Host, False),
}
class TaskDefinition(AWSObject):
resource_type = "AWS::ECS::TaskDefinition"
props = {
'ContainerDefinitions': ([ContainerDefinition], True),
'Volumes': ([Volume], True),
}
|
Add EC2 Container Service (ECS)
|
Add EC2 Container Service (ECS)
|
Python
|
bsd-2-clause
|
Yipit/troposphere,xxxVxxx/troposphere,samcrang/troposphere,ptoraskar/troposphere,horacio3/troposphere,jdc0589/troposphere,amosshapira/troposphere,7digital/troposphere,micahhausler/troposphere,LouTheBrew/troposphere,ikben/troposphere,cloudtools/troposphere,johnctitus/troposphere,cryptickp/troposphere,cloudtools/troposphere,dmm92/troposphere,inetCatapult/troposphere,johnctitus/troposphere,pas256/troposphere,yxd-hde/troposphere,ikben/troposphere,garnaat/troposphere,mannytoledo/troposphere,kid/troposphere,mhahn/troposphere,7digital/troposphere,nicolaka/troposphere,horacio3/troposphere,craigbruce/troposphere,wangqiang8511/troposphere,alonsodomin/troposphere,pas256/troposphere,ccortezb/troposphere,dmm92/troposphere,WeAreCloudar/troposphere,alonsodomin/troposphere,unravelin/troposphere
|
Add EC2 Container Service (ECS)
|
from . import AWSObject, AWSProperty
from .validators import boolean, network_port, integer
class Cluster(AWSObject):
resource_type = "AWS::ECS::Cluster"
props = {}
class LoadBalancer(AWSProperty):
props = {
'ContainerName': (basestring, False),
'ContainerPort': (network_port, False),
'LoadBalancerName': (basestring, False),
}
class Service(AWSObject):
resource_type = "AWS::ECS::Service"
props = {
'Cluster': (basestring, False),
'DesiredCount': (integer, False),
'LoadBalancers': ([LoadBalancer], False),
'Role': (basestring, False),
'TaskDefinition': (basestring, False),
}
class Environment(AWSProperty):
props = {
'Name': (basestring, True),
'Value': (basestring, True),
}
class MountPoint(AWSProperty):
props = {
'ContainerPath': (basestring, True),
'SourceVolume': (basestring, True),
'ReadOnly': (boolean, False),
}
class PortMapping(AWSProperty):
props = {
'ContainerPort': (network_port, True),
'HostPort': (network_port, False),
}
class VolumesFrom(AWSProperty):
props = {
'SourceContainer': (basestring, True),
'ReadOnly': (boolean, False),
}
class ContainerDefinition(AWSProperty):
props = {
'Command': ([basestring], False),
'Cpu': (integer, False),
'EntryPoint': ([basestring], False),
'Environment': ([Environment], False),
'Essential': (boolean, False),
'Image': (basestring, True),
'Links': ([basestring], False),
'Memory': (integer, True),
'MountPoints': ([MountPoint], False),
'Name': (basestring, True),
'PortMappings': ([PortMapping], False),
'VolumesFrom': ([VolumesFrom], False),
}
class Host(AWSProperty):
props = {
'SourcePath': (basestring, False),
}
class Volume(AWSProperty):
props = {
'Name': (basestring, True),
'Host': (Host, False),
}
class TaskDefinition(AWSObject):
resource_type = "AWS::ECS::TaskDefinition"
props = {
'ContainerDefinitions': ([ContainerDefinition], True),
'Volumes': ([Volume], True),
}
|
<commit_before><commit_msg>Add EC2 Container Service (ECS)<commit_after>
|
from . import AWSObject, AWSProperty
from .validators import boolean, network_port, integer
class Cluster(AWSObject):
resource_type = "AWS::ECS::Cluster"
props = {}
class LoadBalancer(AWSProperty):
props = {
'ContainerName': (basestring, False),
'ContainerPort': (network_port, False),
'LoadBalancerName': (basestring, False),
}
class Service(AWSObject):
resource_type = "AWS::ECS::Service"
props = {
'Cluster': (basestring, False),
'DesiredCount': (integer, False),
'LoadBalancers': ([LoadBalancer], False),
'Role': (basestring, False),
'TaskDefinition': (basestring, False),
}
class Environment(AWSProperty):
props = {
'Name': (basestring, True),
'Value': (basestring, True),
}
class MountPoint(AWSProperty):
props = {
'ContainerPath': (basestring, True),
'SourceVolume': (basestring, True),
'ReadOnly': (boolean, False),
}
class PortMapping(AWSProperty):
props = {
'ContainerPort': (network_port, True),
'HostPort': (network_port, False),
}
class VolumesFrom(AWSProperty):
props = {
'SourceContainer': (basestring, True),
'ReadOnly': (boolean, False),
}
class ContainerDefinition(AWSProperty):
props = {
'Command': ([basestring], False),
'Cpu': (integer, False),
'EntryPoint': ([basestring], False),
'Environment': ([Environment], False),
'Essential': (boolean, False),
'Image': (basestring, True),
'Links': ([basestring], False),
'Memory': (integer, True),
'MountPoints': ([MountPoint], False),
'Name': (basestring, True),
'PortMappings': ([PortMapping], False),
'VolumesFrom': ([VolumesFrom], False),
}
class Host(AWSProperty):
props = {
'SourcePath': (basestring, False),
}
class Volume(AWSProperty):
props = {
'Name': (basestring, True),
'Host': (Host, False),
}
class TaskDefinition(AWSObject):
resource_type = "AWS::ECS::TaskDefinition"
props = {
'ContainerDefinitions': ([ContainerDefinition], True),
'Volumes': ([Volume], True),
}
|
Add EC2 Container Service (ECS)from . import AWSObject, AWSProperty
from .validators import boolean, network_port, integer
class Cluster(AWSObject):
resource_type = "AWS::ECS::Cluster"
props = {}
class LoadBalancer(AWSProperty):
props = {
'ContainerName': (basestring, False),
'ContainerPort': (network_port, False),
'LoadBalancerName': (basestring, False),
}
class Service(AWSObject):
resource_type = "AWS::ECS::Service"
props = {
'Cluster': (basestring, False),
'DesiredCount': (integer, False),
'LoadBalancers': ([LoadBalancer], False),
'Role': (basestring, False),
'TaskDefinition': (basestring, False),
}
class Environment(AWSProperty):
props = {
'Name': (basestring, True),
'Value': (basestring, True),
}
class MountPoint(AWSProperty):
props = {
'ContainerPath': (basestring, True),
'SourceVolume': (basestring, True),
'ReadOnly': (boolean, False),
}
class PortMapping(AWSProperty):
props = {
'ContainerPort': (network_port, True),
'HostPort': (network_port, False),
}
class VolumesFrom(AWSProperty):
props = {
'SourceContainer': (basestring, True),
'ReadOnly': (boolean, False),
}
class ContainerDefinition(AWSProperty):
props = {
'Command': ([basestring], False),
'Cpu': (integer, False),
'EntryPoint': ([basestring], False),
'Environment': ([Environment], False),
'Essential': (boolean, False),
'Image': (basestring, True),
'Links': ([basestring], False),
'Memory': (integer, True),
'MountPoints': ([MountPoint], False),
'Name': (basestring, True),
'PortMappings': ([PortMapping], False),
'VolumesFrom': ([VolumesFrom], False),
}
class Host(AWSProperty):
props = {
'SourcePath': (basestring, False),
}
class Volume(AWSProperty):
props = {
'Name': (basestring, True),
'Host': (Host, False),
}
class TaskDefinition(AWSObject):
resource_type = "AWS::ECS::TaskDefinition"
props = {
'ContainerDefinitions': ([ContainerDefinition], True),
'Volumes': ([Volume], True),
}
|
<commit_before><commit_msg>Add EC2 Container Service (ECS)<commit_after>from . import AWSObject, AWSProperty
from .validators import boolean, network_port, integer
class Cluster(AWSObject):
resource_type = "AWS::ECS::Cluster"
props = {}
class LoadBalancer(AWSProperty):
props = {
'ContainerName': (basestring, False),
'ContainerPort': (network_port, False),
'LoadBalancerName': (basestring, False),
}
class Service(AWSObject):
resource_type = "AWS::ECS::Service"
props = {
'Cluster': (basestring, False),
'DesiredCount': (integer, False),
'LoadBalancers': ([LoadBalancer], False),
'Role': (basestring, False),
'TaskDefinition': (basestring, False),
}
class Environment(AWSProperty):
props = {
'Name': (basestring, True),
'Value': (basestring, True),
}
class MountPoint(AWSProperty):
props = {
'ContainerPath': (basestring, True),
'SourceVolume': (basestring, True),
'ReadOnly': (boolean, False),
}
class PortMapping(AWSProperty):
props = {
'ContainerPort': (network_port, True),
'HostPort': (network_port, False),
}
class VolumesFrom(AWSProperty):
props = {
'SourceContainer': (basestring, True),
'ReadOnly': (boolean, False),
}
class ContainerDefinition(AWSProperty):
props = {
'Command': ([basestring], False),
'Cpu': (integer, False),
'EntryPoint': ([basestring], False),
'Environment': ([Environment], False),
'Essential': (boolean, False),
'Image': (basestring, True),
'Links': ([basestring], False),
'Memory': (integer, True),
'MountPoints': ([MountPoint], False),
'Name': (basestring, True),
'PortMappings': ([PortMapping], False),
'VolumesFrom': ([VolumesFrom], False),
}
class Host(AWSProperty):
props = {
'SourcePath': (basestring, False),
}
class Volume(AWSProperty):
props = {
'Name': (basestring, True),
'Host': (Host, False),
}
class TaskDefinition(AWSObject):
resource_type = "AWS::ECS::TaskDefinition"
props = {
'ContainerDefinitions': ([ContainerDefinition], True),
'Volumes': ([Volume], True),
}
|
|
a443c1bf29cc27ea8e1e58f22444d1e6b6587975
|
config.py
|
config.py
|
from tabs.bitcoin import BitcoinPrice, Bitcoind
from tabs.sysinfo import SystemStats, DiskUsage
from tabs.uptime import WebsiteUptime
import os
# Add any tabs you want to be visible here
tabs = [ # Track a running Bitcoin node
#Bitcoind({"host": "http://127.0.0.1:8332",
# "username": "bitcoinrpc",
# # Read the password from a file
# "password": "password" }),
# A Bitcoin price ticker
#BitcoinPrice(),
# Displays CPU, RAM usage and uptime
SystemStats(),
# Displays disk usage
DiskUsage(),
# Tracks website uptime
WebsiteUptime({"websites": [ {"name": "Google",
"url": "http://google.com"} ] })]
|
Fix some issues with Bitcoind and WebsiteUptime tabs
|
Fix some issues with Bitcoind and WebsiteUptime tabs
|
Python
|
unlicense
|
Matoking/SHOWtime,Matoking/SHOWtime,Matoking/SHOWtime
|
Fix some issues with Bitcoind and WebsiteUptime tabs
|
from tabs.bitcoin import BitcoinPrice, Bitcoind
from tabs.sysinfo import SystemStats, DiskUsage
from tabs.uptime import WebsiteUptime
import os
# Add any tabs you want to be visible here
tabs = [ # Track a running Bitcoin node
#Bitcoind({"host": "http://127.0.0.1:8332",
# "username": "bitcoinrpc",
# # Read the password from a file
# "password": "password" }),
# A Bitcoin price ticker
#BitcoinPrice(),
# Displays CPU, RAM usage and uptime
SystemStats(),
# Displays disk usage
DiskUsage(),
# Tracks website uptime
WebsiteUptime({"websites": [ {"name": "Google",
"url": "http://google.com"} ] })]
|
<commit_before><commit_msg>Fix some issues with Bitcoind and WebsiteUptime tabs<commit_after>
|
from tabs.bitcoin import BitcoinPrice, Bitcoind
from tabs.sysinfo import SystemStats, DiskUsage
from tabs.uptime import WebsiteUptime
import os
# Add any tabs you want to be visible here
tabs = [ # Track a running Bitcoin node
#Bitcoind({"host": "http://127.0.0.1:8332",
# "username": "bitcoinrpc",
# # Read the password from a file
# "password": "password" }),
# A Bitcoin price ticker
#BitcoinPrice(),
# Displays CPU, RAM usage and uptime
SystemStats(),
# Displays disk usage
DiskUsage(),
# Tracks website uptime
WebsiteUptime({"websites": [ {"name": "Google",
"url": "http://google.com"} ] })]
|
Fix some issues with Bitcoind and WebsiteUptime tabsfrom tabs.bitcoin import BitcoinPrice, Bitcoind
from tabs.sysinfo import SystemStats, DiskUsage
from tabs.uptime import WebsiteUptime
import os
# Add any tabs you want to be visible here
tabs = [ # Track a running Bitcoin node
#Bitcoind({"host": "http://127.0.0.1:8332",
# "username": "bitcoinrpc",
# # Read the password from a file
# "password": "password" }),
# A Bitcoin price ticker
#BitcoinPrice(),
# Displays CPU, RAM usage and uptime
SystemStats(),
# Displays disk usage
DiskUsage(),
# Tracks website uptime
WebsiteUptime({"websites": [ {"name": "Google",
"url": "http://google.com"} ] })]
|
<commit_before><commit_msg>Fix some issues with Bitcoind and WebsiteUptime tabs<commit_after>from tabs.bitcoin import BitcoinPrice, Bitcoind
from tabs.sysinfo import SystemStats, DiskUsage
from tabs.uptime import WebsiteUptime
import os
# Add any tabs you want to be visible here
tabs = [ # Track a running Bitcoin node
#Bitcoind({"host": "http://127.0.0.1:8332",
# "username": "bitcoinrpc",
# # Read the password from a file
# "password": "password" }),
# A Bitcoin price ticker
#BitcoinPrice(),
# Displays CPU, RAM usage and uptime
SystemStats(),
# Displays disk usage
DiskUsage(),
# Tracks website uptime
WebsiteUptime({"websites": [ {"name": "Google",
"url": "http://google.com"} ] })]
|
|
3debec0c40472d0eb43f94b6769ef1d8cc80383d
|
py/reverse-string-ii.py
|
py/reverse-string-ii.py
|
class Solution(object):
def reverseStr(self, s, k):
"""
:type s: str
:type k: int
:rtype: str
"""
ans = []
for i in xrange(0, len(s), k * 2):
ans.append(s[i:i + k][::-1] + s[i + k:i + 2 * k])
return ''.join(ans)
|
Add py solution for 541. Reverse String II
|
Add py solution for 541. Reverse String II
541. Reverse String II: https://leetcode.com/problems/reverse-string-ii/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 541. Reverse String II
541. Reverse String II: https://leetcode.com/problems/reverse-string-ii/
|
class Solution(object):
def reverseStr(self, s, k):
"""
:type s: str
:type k: int
:rtype: str
"""
ans = []
for i in xrange(0, len(s), k * 2):
ans.append(s[i:i + k][::-1] + s[i + k:i + 2 * k])
return ''.join(ans)
|
<commit_before><commit_msg>Add py solution for 541. Reverse String II
541. Reverse String II: https://leetcode.com/problems/reverse-string-ii/<commit_after>
|
class Solution(object):
def reverseStr(self, s, k):
"""
:type s: str
:type k: int
:rtype: str
"""
ans = []
for i in xrange(0, len(s), k * 2):
ans.append(s[i:i + k][::-1] + s[i + k:i + 2 * k])
return ''.join(ans)
|
Add py solution for 541. Reverse String II
541. Reverse String II: https://leetcode.com/problems/reverse-string-ii/class Solution(object):
def reverseStr(self, s, k):
"""
:type s: str
:type k: int
:rtype: str
"""
ans = []
for i in xrange(0, len(s), k * 2):
ans.append(s[i:i + k][::-1] + s[i + k:i + 2 * k])
return ''.join(ans)
|
<commit_before><commit_msg>Add py solution for 541. Reverse String II
541. Reverse String II: https://leetcode.com/problems/reverse-string-ii/<commit_after>class Solution(object):
def reverseStr(self, s, k):
"""
:type s: str
:type k: int
:rtype: str
"""
ans = []
for i in xrange(0, len(s), k * 2):
ans.append(s[i:i + k][::-1] + s[i + k:i + 2 * k])
return ''.join(ans)
|
|
30f7dd1c1d36fffe98147c7de76f09bb81a3a5be
|
src/split_node.py
|
src/split_node.py
|
class SplitNode:
# TODO Naming convention
"""
Type: Node
Function: Splits the dataset into 2 sets
Input port requirements: DATASET, PERCENTAGES
Output port promises: a tuple that contains the 2 new sets
"""
def __init__(self, input_port, output_port1, output_port2):
self.input_port = input_port
self.output_port1 = output_port1
self.output_port2 = output_port2
def run(self):
# TODO Define the function to get the percentages from the port
# TODO Define the function to set the output port splitsets
# TODO Agree on the output ports features
dataset = self.input_port.get()
out1_percentage, out2_percentage = input_port.get_percentages()
out1_end = int(out1_percentage * len(dataset))
out1 = dataset[:out1_end]
out2 = dataset[out1_end:]
self.output_port1.data = out1
self.output_port2.data = out2
|
Split Node initial commit. Class is created with its primary feature
|
Split Node initial commit. Class is created with its primary feature
|
Python
|
mit
|
sshihata/mash
|
Split Node initial commit. Class is created with its primary feature
|
class SplitNode:
# TODO Naming convention
"""
Type: Node
Function: Splits the dataset into 2 sets
Input port requirements: DATASET, PERCENTAGES
Output port promises: a tuple that contains the 2 new sets
"""
def __init__(self, input_port, output_port1, output_port2):
self.input_port = input_port
self.output_port1 = output_port1
self.output_port2 = output_port2
def run(self):
# TODO Define the function to get the percentages from the port
# TODO Define the function to set the output port splitsets
# TODO Agree on the output ports features
dataset = self.input_port.get()
out1_percentage, out2_percentage = input_port.get_percentages()
out1_end = int(out1_percentage * len(dataset))
out1 = dataset[:out1_end]
out2 = dataset[out1_end:]
self.output_port1.data = out1
self.output_port2.data = out2
|
<commit_before><commit_msg>Split Node initial commit. Class is created with its primary feature<commit_after>
|
class SplitNode:
# TODO Naming convention
"""
Type: Node
Function: Splits the dataset into 2 sets
Input port requirements: DATASET, PERCENTAGES
Output port promises: a tuple that contains the 2 new sets
"""
def __init__(self, input_port, output_port1, output_port2):
self.input_port = input_port
self.output_port1 = output_port1
self.output_port2 = output_port2
def run(self):
# TODO Define the function to get the percentages from the port
# TODO Define the function to set the output port splitsets
# TODO Agree on the output ports features
dataset = self.input_port.get()
out1_percentage, out2_percentage = input_port.get_percentages()
out1_end = int(out1_percentage * len(dataset))
out1 = dataset[:out1_end]
out2 = dataset[out1_end:]
self.output_port1.data = out1
self.output_port2.data = out2
|
Split Node initial commit. Class is created with its primary featureclass SplitNode:
# TODO Naming convention
"""
Type: Node
Function: Splits the dataset into 2 sets
Input port requirements: DATASET, PERCENTAGES
Output port promises: a tuple that contains the 2 new sets
"""
def __init__(self, input_port, output_port1, output_port2):
self.input_port = input_port
self.output_port1 = output_port1
self.output_port2 = output_port2
def run(self):
# TODO Define the function to get the percentages from the port
# TODO Define the function to set the output port splitsets
# TODO Agree on the output ports features
dataset = self.input_port.get()
out1_percentage, out2_percentage = input_port.get_percentages()
out1_end = int(out1_percentage * len(dataset))
out1 = dataset[:out1_end]
out2 = dataset[out1_end:]
self.output_port1.data = out1
self.output_port2.data = out2
|
<commit_before><commit_msg>Split Node initial commit. Class is created with its primary feature<commit_after>class SplitNode:
# TODO Naming convention
"""
Type: Node
Function: Splits the dataset into 2 sets
Input port requirements: DATASET, PERCENTAGES
Output port promises: a tuple that contains the 2 new sets
"""
def __init__(self, input_port, output_port1, output_port2):
self.input_port = input_port
self.output_port1 = output_port1
self.output_port2 = output_port2
def run(self):
# TODO Define the function to get the percentages from the port
# TODO Define the function to set the output port splitsets
# TODO Agree on the output ports features
dataset = self.input_port.get()
out1_percentage, out2_percentage = input_port.get_percentages()
out1_end = int(out1_percentage * len(dataset))
out1 = dataset[:out1_end]
out2 = dataset[out1_end:]
self.output_port1.data = out1
self.output_port2.data = out2
|
|
ad08e17bef38a1efc7f395d02938b08643756706
|
test_documents.py
|
test_documents.py
|
#!/bin/python
import MySQLdb
import argparse
import os
parser = argparse.ArgumentParser(description='Test documents of SugarCRM')
parser.add_argument('--remove', type=bool, default=False,
help='delete documents')
args = parser.parse_args()
HOST = "localhost"
USER = "database_user"
PASSWD = "database_password"
DB = "database"
SUGARDIR = "/var/www/sugarcrm/upload"
db = MySQLdb.connect(host=HOST, user=USER, passwd=PASSWD, db=DB)
cur = db.cursor()
cur.execute("SELECT id,filename FROM document_revisions")
rows = cur.fetchall()
to_delete = []
for row in rows:
if os.path.isfile(os.path.join(SUGARDIR, row[0])):
print "File %s (%s) exists" % (row[0], row[1])
else:
print "File %s (%s) doesn't exist!!" % (row[0], row[1])
to_delete.append(row[0])
cur.close()
if args.remove:
print "Removing unexistent files..."
ids = ""
for revision_id in to_delete:
ids += "'%s'," % revision_id
ids = ids[0:-1]
sql = " delete from document_revisions where id in (%s)" % ids
cur.execute(sql)
sql = " delete from documents d where not exists (select id from"
" document_revisions where document_id = d.id) "
cur.execute(sql)
|
Add script to test if documents exist
|
Add script to test if documents exist
|
Python
|
mit
|
pokoli/sugarcrm_document_uploader
|
Add script to test if documents exist
|
#!/bin/python
import MySQLdb
import argparse
import os
parser = argparse.ArgumentParser(description='Test documents of SugarCRM')
parser.add_argument('--remove', type=bool, default=False,
help='delete documents')
args = parser.parse_args()
HOST = "localhost"
USER = "database_user"
PASSWD = "database_password"
DB = "database"
SUGARDIR = "/var/www/sugarcrm/upload"
db = MySQLdb.connect(host=HOST, user=USER, passwd=PASSWD, db=DB)
cur = db.cursor()
cur.execute("SELECT id,filename FROM document_revisions")
rows = cur.fetchall()
to_delete = []
for row in rows:
if os.path.isfile(os.path.join(SUGARDIR, row[0])):
print "File %s (%s) exists" % (row[0], row[1])
else:
print "File %s (%s) doesn't exist!!" % (row[0], row[1])
to_delete.append(row[0])
cur.close()
if args.remove:
print "Removing unexistent files..."
ids = ""
for revision_id in to_delete:
ids += "'%s'," % revision_id
ids = ids[0:-1]
sql = " delete from document_revisions where id in (%s)" % ids
cur.execute(sql)
sql = " delete from documents d where not exists (select id from"
" document_revisions where document_id = d.id) "
cur.execute(sql)
|
<commit_before><commit_msg>Add script to test if documents exist<commit_after>
|
#!/bin/python
import MySQLdb
import argparse
import os
parser = argparse.ArgumentParser(description='Test documents of SugarCRM')
parser.add_argument('--remove', type=bool, default=False,
help='delete documents')
args = parser.parse_args()
HOST = "localhost"
USER = "database_user"
PASSWD = "database_password"
DB = "database"
SUGARDIR = "/var/www/sugarcrm/upload"
db = MySQLdb.connect(host=HOST, user=USER, passwd=PASSWD, db=DB)
cur = db.cursor()
cur.execute("SELECT id,filename FROM document_revisions")
rows = cur.fetchall()
to_delete = []
for row in rows:
if os.path.isfile(os.path.join(SUGARDIR, row[0])):
print "File %s (%s) exists" % (row[0], row[1])
else:
print "File %s (%s) doesn't exist!!" % (row[0], row[1])
to_delete.append(row[0])
cur.close()
if args.remove:
print "Removing unexistent files..."
ids = ""
for revision_id in to_delete:
ids += "'%s'," % revision_id
ids = ids[0:-1]
sql = " delete from document_revisions where id in (%s)" % ids
cur.execute(sql)
sql = " delete from documents d where not exists (select id from"
" document_revisions where document_id = d.id) "
cur.execute(sql)
|
Add script to test if documents exist#!/bin/python
import MySQLdb
import argparse
import os
parser = argparse.ArgumentParser(description='Test documents of SugarCRM')
parser.add_argument('--remove', type=bool, default=False,
help='delete documents')
args = parser.parse_args()
HOST = "localhost"
USER = "database_user"
PASSWD = "database_password"
DB = "database"
SUGARDIR = "/var/www/sugarcrm/upload"
db = MySQLdb.connect(host=HOST, user=USER, passwd=PASSWD, db=DB)
cur = db.cursor()
cur.execute("SELECT id,filename FROM document_revisions")
rows = cur.fetchall()
to_delete = []
for row in rows:
if os.path.isfile(os.path.join(SUGARDIR, row[0])):
print "File %s (%s) exists" % (row[0], row[1])
else:
print "File %s (%s) doesn't exist!!" % (row[0], row[1])
to_delete.append(row[0])
cur.close()
if args.remove:
print "Removing unexistent files..."
ids = ""
for revision_id in to_delete:
ids += "'%s'," % revision_id
ids = ids[0:-1]
sql = " delete from document_revisions where id in (%s)" % ids
cur.execute(sql)
sql = " delete from documents d where not exists (select id from"
" document_revisions where document_id = d.id) "
cur.execute(sql)
|
<commit_before><commit_msg>Add script to test if documents exist<commit_after>#!/bin/python
import MySQLdb
import argparse
import os
parser = argparse.ArgumentParser(description='Test documents of SugarCRM')
parser.add_argument('--remove', type=bool, default=False,
help='delete documents')
args = parser.parse_args()
HOST = "localhost"
USER = "database_user"
PASSWD = "database_password"
DB = "database"
SUGARDIR = "/var/www/sugarcrm/upload"
db = MySQLdb.connect(host=HOST, user=USER, passwd=PASSWD, db=DB)
cur = db.cursor()
cur.execute("SELECT id,filename FROM document_revisions")
rows = cur.fetchall()
to_delete = []
for row in rows:
if os.path.isfile(os.path.join(SUGARDIR, row[0])):
print "File %s (%s) exists" % (row[0], row[1])
else:
print "File %s (%s) doesn't exist!!" % (row[0], row[1])
to_delete.append(row[0])
cur.close()
if args.remove:
print "Removing unexistent files..."
ids = ""
for revision_id in to_delete:
ids += "'%s'," % revision_id
ids = ids[0:-1]
sql = " delete from document_revisions where id in (%s)" % ids
cur.execute(sql)
sql = " delete from documents d where not exists (select id from"
" document_revisions where document_id = d.id) "
cur.execute(sql)
|
|
a634d90e4ac80027466782975007de33afcb4c28
|
etc/wpt-summarize.py
|
etc/wpt-summarize.py
|
#!/usr/bin/env python
# Copyright 2019 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import sys
import json
full_search = len(sys.argv) > 3 and sys.argv[3] == '--full'
with open(sys.argv[1]) as f:
data = f.readlines()
thread = None
for entry in data:
entry = json.loads(entry)
if thread and "thread" in entry:
if entry["thread"] == thread:
print(json.dumps(entry))
if "action" in entry and entry["action"] == "test_end":
thread = None
else:
if "action" in entry and \
entry["action"] == "test_start" and \
entry["test"] == sys.argv[2]:
thread = entry["thread"]
print(json.dumps(entry))
elif full_search and \
"command" in entry and \
sys.argv[2] in entry["command"]:
thread = entry["thread"]
print(json.dumps(entry))
|
Add a script to extra logs for particular test filenames from full WPT logs.
|
Add a script to extra logs for particular test filenames from full WPT logs.
|
Python
|
mpl-2.0
|
KiChjang/servo,DominoTree/servo,splav/servo,splav/servo,DominoTree/servo,KiChjang/servo,KiChjang/servo,DominoTree/servo,splav/servo,DominoTree/servo,splav/servo,splav/servo,KiChjang/servo,DominoTree/servo,splav/servo,DominoTree/servo,DominoTree/servo,splav/servo,DominoTree/servo,KiChjang/servo,splav/servo,DominoTree/servo,KiChjang/servo,splav/servo,KiChjang/servo,KiChjang/servo,DominoTree/servo,KiChjang/servo,KiChjang/servo,splav/servo
|
Add a script to extra logs for particular test filenames from full WPT logs.
|
#!/usr/bin/env python
# Copyright 2019 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import sys
import json
full_search = len(sys.argv) > 3 and sys.argv[3] == '--full'
with open(sys.argv[1]) as f:
data = f.readlines()
thread = None
for entry in data:
entry = json.loads(entry)
if thread and "thread" in entry:
if entry["thread"] == thread:
print(json.dumps(entry))
if "action" in entry and entry["action"] == "test_end":
thread = None
else:
if "action" in entry and \
entry["action"] == "test_start" and \
entry["test"] == sys.argv[2]:
thread = entry["thread"]
print(json.dumps(entry))
elif full_search and \
"command" in entry and \
sys.argv[2] in entry["command"]:
thread = entry["thread"]
print(json.dumps(entry))
|
<commit_before><commit_msg>Add a script to extra logs for particular test filenames from full WPT logs.<commit_after>
|
#!/usr/bin/env python
# Copyright 2019 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import sys
import json
full_search = len(sys.argv) > 3 and sys.argv[3] == '--full'
with open(sys.argv[1]) as f:
data = f.readlines()
thread = None
for entry in data:
entry = json.loads(entry)
if thread and "thread" in entry:
if entry["thread"] == thread:
print(json.dumps(entry))
if "action" in entry and entry["action"] == "test_end":
thread = None
else:
if "action" in entry and \
entry["action"] == "test_start" and \
entry["test"] == sys.argv[2]:
thread = entry["thread"]
print(json.dumps(entry))
elif full_search and \
"command" in entry and \
sys.argv[2] in entry["command"]:
thread = entry["thread"]
print(json.dumps(entry))
|
Add a script to extra logs for particular test filenames from full WPT logs.#!/usr/bin/env python
# Copyright 2019 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import sys
import json
full_search = len(sys.argv) > 3 and sys.argv[3] == '--full'
with open(sys.argv[1]) as f:
data = f.readlines()
thread = None
for entry in data:
entry = json.loads(entry)
if thread and "thread" in entry:
if entry["thread"] == thread:
print(json.dumps(entry))
if "action" in entry and entry["action"] == "test_end":
thread = None
else:
if "action" in entry and \
entry["action"] == "test_start" and \
entry["test"] == sys.argv[2]:
thread = entry["thread"]
print(json.dumps(entry))
elif full_search and \
"command" in entry and \
sys.argv[2] in entry["command"]:
thread = entry["thread"]
print(json.dumps(entry))
|
<commit_before><commit_msg>Add a script to extra logs for particular test filenames from full WPT logs.<commit_after>#!/usr/bin/env python
# Copyright 2019 The Servo Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import sys
import json
full_search = len(sys.argv) > 3 and sys.argv[3] == '--full'
with open(sys.argv[1]) as f:
data = f.readlines()
thread = None
for entry in data:
entry = json.loads(entry)
if thread and "thread" in entry:
if entry["thread"] == thread:
print(json.dumps(entry))
if "action" in entry and entry["action"] == "test_end":
thread = None
else:
if "action" in entry and \
entry["action"] == "test_start" and \
entry["test"] == sys.argv[2]:
thread = entry["thread"]
print(json.dumps(entry))
elif full_search and \
"command" in entry and \
sys.argv[2] in entry["command"]:
thread = entry["thread"]
print(json.dumps(entry))
|
|
17a632d3474d66652f4fa15b3365ab14c7316870
|
Graphs/check_bst.py
|
Graphs/check_bst.py
|
"""
Write a program to check if a given binary tree is bst or not.
"""
"""
Approach 1:
1. For each node, check that node's value is greater than max_value in its left subtree and smaller than
min_value in its right subtree.
2. This approach is not efficient as each node is traversed multiple times.
Approach 2:
1. We use two variables, min and max, to track the range in which the current node's data should lie.
2. Initially, min = -INF and max = INF.
3. At each node, when we go to its left subtree, we make max = node.data and when we go to its right subtree
we make min = node.data
Approach 3:
1. We can do an in-order traversal of the tree and see if the output is sorted.
2. A space optimization is, we only keep track of previously seen node's value. If current node's value
is less than previous node's value, binary tree is not BST.
Below, Approach 2 is implemented.
"""
class Node:
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
def is_bst(node):
if node is None:
return True
return is_bst_helper(node, -float('inf'), float('inf'))
def is_bst_helper(node, min_val, max_val):
if node is None:
return True
if node.data < min_val or node.data > max_val:
return False
return is_bst_helper(node.left, min_val, node.data) and is_bst_helper(node.right, node.data, max_val)
|
Check if binary tree is a BST
|
Check if binary tree is a BST
|
Python
|
mit
|
prathamtandon/g4gproblems
|
Check if binary tree is a BST
|
"""
Write a program to check if a given binary tree is bst or not.
"""
"""
Approach 1:
1. For each node, check that node's value is greater than max_value in its left subtree and smaller than
min_value in its right subtree.
2. This approach is not efficient as each node is traversed multiple times.
Approach 2:
1. We use two variables, min and max, to track the range in which the current node's data should lie.
2. Initially, min = -INF and max = INF.
3. At each node, when we go to its left subtree, we make max = node.data and when we go to its right subtree
we make min = node.data
Approach 3:
1. We can do an in-order traversal of the tree and see if the output is sorted.
2. A space optimization is, we only keep track of previously seen node's value. If current node's value
is less than previous node's value, binary tree is not BST.
Below, Approach 2 is implemented.
"""
class Node:
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
def is_bst(node):
if node is None:
return True
return is_bst_helper(node, -float('inf'), float('inf'))
def is_bst_helper(node, min_val, max_val):
if node is None:
return True
if node.data < min_val or node.data > max_val:
return False
return is_bst_helper(node.left, min_val, node.data) and is_bst_helper(node.right, node.data, max_val)
|
<commit_before><commit_msg>Check if binary tree is a BST<commit_after>
|
"""
Write a program to check if a given binary tree is bst or not.
"""
"""
Approach 1:
1. For each node, check that node's value is greater than max_value in its left subtree and smaller than
min_value in its right subtree.
2. This approach is not efficient as each node is traversed multiple times.
Approach 2:
1. We use two variables, min and max, to track the range in which the current node's data should lie.
2. Initially, min = -INF and max = INF.
3. At each node, when we go to its left subtree, we make max = node.data and when we go to its right subtree
we make min = node.data
Approach 3:
1. We can do an in-order traversal of the tree and see if the output is sorted.
2. A space optimization is, we only keep track of previously seen node's value. If current node's value
is less than previous node's value, binary tree is not BST.
Below, Approach 2 is implemented.
"""
class Node:
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
def is_bst(node):
if node is None:
return True
return is_bst_helper(node, -float('inf'), float('inf'))
def is_bst_helper(node, min_val, max_val):
if node is None:
return True
if node.data < min_val or node.data > max_val:
return False
return is_bst_helper(node.left, min_val, node.data) and is_bst_helper(node.right, node.data, max_val)
|
Check if binary tree is a BST"""
Write a program to check if a given binary tree is bst or not.
"""
"""
Approach 1:
1. For each node, check that node's value is greater than max_value in its left subtree and smaller than
min_value in its right subtree.
2. This approach is not efficient as each node is traversed multiple times.
Approach 2:
1. We use two variables, min and max, to track the range in which the current node's data should lie.
2. Initially, min = -INF and max = INF.
3. At each node, when we go to its left subtree, we make max = node.data and when we go to its right subtree
we make min = node.data
Approach 3:
1. We can do an in-order traversal of the tree and see if the output is sorted.
2. A space optimization is, we only keep track of previously seen node's value. If current node's value
is less than previous node's value, binary tree is not BST.
Below, Approach 2 is implemented.
"""
class Node:
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
def is_bst(node):
if node is None:
return True
return is_bst_helper(node, -float('inf'), float('inf'))
def is_bst_helper(node, min_val, max_val):
if node is None:
return True
if node.data < min_val or node.data > max_val:
return False
return is_bst_helper(node.left, min_val, node.data) and is_bst_helper(node.right, node.data, max_val)
|
<commit_before><commit_msg>Check if binary tree is a BST<commit_after>"""
Write a program to check if a given binary tree is bst or not.
"""
"""
Approach 1:
1. For each node, check that node's value is greater than max_value in its left subtree and smaller than
min_value in its right subtree.
2. This approach is not efficient as each node is traversed multiple times.
Approach 2:
1. We use two variables, min and max, to track the range in which the current node's data should lie.
2. Initially, min = -INF and max = INF.
3. At each node, when we go to its left subtree, we make max = node.data and when we go to its right subtree
we make min = node.data
Approach 3:
1. We can do an in-order traversal of the tree and see if the output is sorted.
2. A space optimization is, we only keep track of previously seen node's value. If current node's value
is less than previous node's value, binary tree is not BST.
Below, Approach 2 is implemented.
"""
class Node:
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
def is_bst(node):
if node is None:
return True
return is_bst_helper(node, -float('inf'), float('inf'))
def is_bst_helper(node, min_val, max_val):
if node is None:
return True
if node.data < min_val or node.data > max_val:
return False
return is_bst_helper(node.left, min_val, node.data) and is_bst_helper(node.right, node.data, max_val)
|
|
1528b501e0125db0e054c2fd27820ec4384807a1
|
experiments/pc_hybridization.py
|
experiments/pc_hybridization.py
|
"""Solve a mixed Helmholtz problem
sigma + grad(u) = 0,
u + div(sigma) = f,
using hybridisation with SLATE performing the forward elimination and
backwards reconstructions. The corresponding finite element variational
problem is:
dot(sigma, tau)*dx - u*div(tau)*dx + lambdar*dot(tau, n)*dS = 0
div(sigma)*v*dx + u*v*dx = f*v*dx
gammar*dot(sigma, n)*dS = 0
for all tau, v, and gammar.
This is solved using broken Raviart-Thomas elements of degree k for
(sigma, tau), discontinuous Galerkin elements of degree k - 1
for (u, v), and HDiv-Trace elements of degree k - 1 for (lambdar, gammar).
The forcing function is chosen as:
(1+8*pi*pi)*sin(x[0]*pi*2)*sin(x[1]*pi*2),
which reproduces the known analytical solution:
sin(x[0]*pi*2)*sin(x[1]*pi*2)
"""
from __future__ import absolute_import, print_function, division
from firedrake import *
# Create a mesh
mesh = UnitSquareMesh(120, 120)
# Define relevant function spaces
degree = 2
RT = FiniteElement("RT", triangle, degree)
BRT = FunctionSpace(mesh, RT)
DG = FunctionSpace(mesh, "DG", degree - 1)
W = BRT * DG
# Define the trial and test functions
sigma, u = TrialFunctions(W)
tau, v = TestFunctions(W)
# Define the source function
f = Function(DG)
x, y = SpatialCoordinate(mesh)
f.interpolate((1+8*pi*pi)*sin(x*pi*2)*sin(y*pi*2))
# Define finite element variational forms
a = (dot(sigma, tau) + u * v + div(sigma) * v - div(tau) * u) * dx
L = f * v * dx
w = Function(W)
parameters = {"mat_type": "matfree",
"pc_type": "python",
"pc_python_type": "firedrake.HybridizationPC",
"trace_pc_type": "lu",
"trace_ksp_type": "preonly",
"trace_ksp_monitor_true_residual": True,
"ksp_monitor_true_residual": True}
solve(a == L, w, solver_parameters=parameters)
u, p = w.split()
u.rename("Velocity")
p.rename("Pressure")
File("pchybrid.pvd").write(u, p)
|
Add complete hybridization test with python PC
|
Add complete hybridization test with python PC
|
Python
|
mit
|
thomasgibson/tabula-rasa
|
Add complete hybridization test with python PC
|
"""Solve a mixed Helmholtz problem
sigma + grad(u) = 0,
u + div(sigma) = f,
using hybridisation with SLATE performing the forward elimination and
backwards reconstructions. The corresponding finite element variational
problem is:
dot(sigma, tau)*dx - u*div(tau)*dx + lambdar*dot(tau, n)*dS = 0
div(sigma)*v*dx + u*v*dx = f*v*dx
gammar*dot(sigma, n)*dS = 0
for all tau, v, and gammar.
This is solved using broken Raviart-Thomas elements of degree k for
(sigma, tau), discontinuous Galerkin elements of degree k - 1
for (u, v), and HDiv-Trace elements of degree k - 1 for (lambdar, gammar).
The forcing function is chosen as:
(1+8*pi*pi)*sin(x[0]*pi*2)*sin(x[1]*pi*2),
which reproduces the known analytical solution:
sin(x[0]*pi*2)*sin(x[1]*pi*2)
"""
from __future__ import absolute_import, print_function, division
from firedrake import *
# Create a mesh
mesh = UnitSquareMesh(120, 120)
# Define relevant function spaces
degree = 2
RT = FiniteElement("RT", triangle, degree)
BRT = FunctionSpace(mesh, RT)
DG = FunctionSpace(mesh, "DG", degree - 1)
W = BRT * DG
# Define the trial and test functions
sigma, u = TrialFunctions(W)
tau, v = TestFunctions(W)
# Define the source function
f = Function(DG)
x, y = SpatialCoordinate(mesh)
f.interpolate((1+8*pi*pi)*sin(x*pi*2)*sin(y*pi*2))
# Define finite element variational forms
a = (dot(sigma, tau) + u * v + div(sigma) * v - div(tau) * u) * dx
L = f * v * dx
w = Function(W)
parameters = {"mat_type": "matfree",
"pc_type": "python",
"pc_python_type": "firedrake.HybridizationPC",
"trace_pc_type": "lu",
"trace_ksp_type": "preonly",
"trace_ksp_monitor_true_residual": True,
"ksp_monitor_true_residual": True}
solve(a == L, w, solver_parameters=parameters)
u, p = w.split()
u.rename("Velocity")
p.rename("Pressure")
File("pchybrid.pvd").write(u, p)
|
<commit_before><commit_msg>Add complete hybridization test with python PC<commit_after>
|
"""Solve a mixed Helmholtz problem
sigma + grad(u) = 0,
u + div(sigma) = f,
using hybridisation with SLATE performing the forward elimination and
backwards reconstructions. The corresponding finite element variational
problem is:
dot(sigma, tau)*dx - u*div(tau)*dx + lambdar*dot(tau, n)*dS = 0
div(sigma)*v*dx + u*v*dx = f*v*dx
gammar*dot(sigma, n)*dS = 0
for all tau, v, and gammar.
This is solved using broken Raviart-Thomas elements of degree k for
(sigma, tau), discontinuous Galerkin elements of degree k - 1
for (u, v), and HDiv-Trace elements of degree k - 1 for (lambdar, gammar).
The forcing function is chosen as:
(1+8*pi*pi)*sin(x[0]*pi*2)*sin(x[1]*pi*2),
which reproduces the known analytical solution:
sin(x[0]*pi*2)*sin(x[1]*pi*2)
"""
from __future__ import absolute_import, print_function, division
from firedrake import *
# Create a mesh
mesh = UnitSquareMesh(120, 120)
# Define relevant function spaces
degree = 2
RT = FiniteElement("RT", triangle, degree)
BRT = FunctionSpace(mesh, RT)
DG = FunctionSpace(mesh, "DG", degree - 1)
W = BRT * DG
# Define the trial and test functions
sigma, u = TrialFunctions(W)
tau, v = TestFunctions(W)
# Define the source function
f = Function(DG)
x, y = SpatialCoordinate(mesh)
f.interpolate((1+8*pi*pi)*sin(x*pi*2)*sin(y*pi*2))
# Define finite element variational forms
a = (dot(sigma, tau) + u * v + div(sigma) * v - div(tau) * u) * dx
L = f * v * dx
w = Function(W)
parameters = {"mat_type": "matfree",
"pc_type": "python",
"pc_python_type": "firedrake.HybridizationPC",
"trace_pc_type": "lu",
"trace_ksp_type": "preonly",
"trace_ksp_monitor_true_residual": True,
"ksp_monitor_true_residual": True}
solve(a == L, w, solver_parameters=parameters)
u, p = w.split()
u.rename("Velocity")
p.rename("Pressure")
File("pchybrid.pvd").write(u, p)
|
Add complete hybridization test with python PC"""Solve a mixed Helmholtz problem
sigma + grad(u) = 0,
u + div(sigma) = f,
using hybridisation with SLATE performing the forward elimination and
backwards reconstructions. The corresponding finite element variational
problem is:
dot(sigma, tau)*dx - u*div(tau)*dx + lambdar*dot(tau, n)*dS = 0
div(sigma)*v*dx + u*v*dx = f*v*dx
gammar*dot(sigma, n)*dS = 0
for all tau, v, and gammar.
This is solved using broken Raviart-Thomas elements of degree k for
(sigma, tau), discontinuous Galerkin elements of degree k - 1
for (u, v), and HDiv-Trace elements of degree k - 1 for (lambdar, gammar).
The forcing function is chosen as:
(1+8*pi*pi)*sin(x[0]*pi*2)*sin(x[1]*pi*2),
which reproduces the known analytical solution:
sin(x[0]*pi*2)*sin(x[1]*pi*2)
"""
from __future__ import absolute_import, print_function, division
from firedrake import *
# Create a mesh
mesh = UnitSquareMesh(120, 120)
# Define relevant function spaces
degree = 2
RT = FiniteElement("RT", triangle, degree)
BRT = FunctionSpace(mesh, RT)
DG = FunctionSpace(mesh, "DG", degree - 1)
W = BRT * DG
# Define the trial and test functions
sigma, u = TrialFunctions(W)
tau, v = TestFunctions(W)
# Define the source function
f = Function(DG)
x, y = SpatialCoordinate(mesh)
f.interpolate((1+8*pi*pi)*sin(x*pi*2)*sin(y*pi*2))
# Define finite element variational forms
a = (dot(sigma, tau) + u * v + div(sigma) * v - div(tau) * u) * dx
L = f * v * dx
w = Function(W)
parameters = {"mat_type": "matfree",
"pc_type": "python",
"pc_python_type": "firedrake.HybridizationPC",
"trace_pc_type": "lu",
"trace_ksp_type": "preonly",
"trace_ksp_monitor_true_residual": True,
"ksp_monitor_true_residual": True}
solve(a == L, w, solver_parameters=parameters)
u, p = w.split()
u.rename("Velocity")
p.rename("Pressure")
File("pchybrid.pvd").write(u, p)
|
<commit_before><commit_msg>Add complete hybridization test with python PC<commit_after>"""Solve a mixed Helmholtz problem
sigma + grad(u) = 0,
u + div(sigma) = f,
using hybridisation with SLATE performing the forward elimination and
backwards reconstructions. The corresponding finite element variational
problem is:
dot(sigma, tau)*dx - u*div(tau)*dx + lambdar*dot(tau, n)*dS = 0
div(sigma)*v*dx + u*v*dx = f*v*dx
gammar*dot(sigma, n)*dS = 0
for all tau, v, and gammar.
This is solved using broken Raviart-Thomas elements of degree k for
(sigma, tau), discontinuous Galerkin elements of degree k - 1
for (u, v), and HDiv-Trace elements of degree k - 1 for (lambdar, gammar).
The forcing function is chosen as:
(1+8*pi*pi)*sin(x[0]*pi*2)*sin(x[1]*pi*2),
which reproduces the known analytical solution:
sin(x[0]*pi*2)*sin(x[1]*pi*2)
"""
from __future__ import absolute_import, print_function, division
from firedrake import *
# Create a mesh
mesh = UnitSquareMesh(120, 120)
# Define relevant function spaces
degree = 2
RT = FiniteElement("RT", triangle, degree)
BRT = FunctionSpace(mesh, RT)
DG = FunctionSpace(mesh, "DG", degree - 1)
W = BRT * DG
# Define the trial and test functions
sigma, u = TrialFunctions(W)
tau, v = TestFunctions(W)
# Define the source function
f = Function(DG)
x, y = SpatialCoordinate(mesh)
f.interpolate((1+8*pi*pi)*sin(x*pi*2)*sin(y*pi*2))
# Define finite element variational forms
a = (dot(sigma, tau) + u * v + div(sigma) * v - div(tau) * u) * dx
L = f * v * dx
w = Function(W)
parameters = {"mat_type": "matfree",
"pc_type": "python",
"pc_python_type": "firedrake.HybridizationPC",
"trace_pc_type": "lu",
"trace_ksp_type": "preonly",
"trace_ksp_monitor_true_residual": True,
"ksp_monitor_true_residual": True}
solve(a == L, w, solver_parameters=parameters)
u, p = w.split()
u.rename("Velocity")
p.rename("Pressure")
File("pchybrid.pvd").write(u, p)
|
|
22a500afb9e03f59d33a6c9ee02a5fee6b164cba
|
SizeDocGenerator.py
|
SizeDocGenerator.py
|
import os, re;
# I got the actual size of the binary code wrong on the site once - this script should help prevent that.
dsDoc_by_sArch = {"w32": "x86", "w64": "x64", "win": "x86+x64"};
with open("build_info.txt", "rb") as oFile:
iBuildNumber = int(re.search(r"build number\: (\d+)", oFile.read(), re.M).group(1));
print "Sizes (build %d)" % iBuildNumber;
for sArch in sorted(dsDoc_by_sArch.keys()):
sDoc = dsDoc_by_sArch[sArch];
iBinSize = os.path.getsize(r"build\bin\%s-exec-calc-shellcode.bin" % sArch);
iBinESPSize = os.path.getsize(r"build\bin\%s-exec-calc-shellcode-esp.bin" % sArch);
print " * %s: %d bytes (%d with stack allignment)" % (sDoc, iBinSize, iBinESPSize);
|
Add script to generate the site documentation containing the sizes of the binary shellcodes.
|
Add script to generate the site documentation containing the sizes of the binary shellcodes.
|
Python
|
bsd-3-clause
|
CryptXor/win-exec-calc-shellcode,r3dbrain/win-exec-calc-shellcode,CryptXor/win-exec-calc-shellcode,r3dbrain/win-exec-calc-shellcode,Templario17/win-exec-calc-shellcode,Templario17/win-exec-calc-shellcode
|
Add script to generate the site documentation containing the sizes of the binary shellcodes.
|
import os, re;
# I got the actual size of the binary code wrong on the site once - this script should help prevent that.
dsDoc_by_sArch = {"w32": "x86", "w64": "x64", "win": "x86+x64"};
with open("build_info.txt", "rb") as oFile:
iBuildNumber = int(re.search(r"build number\: (\d+)", oFile.read(), re.M).group(1));
print "Sizes (build %d)" % iBuildNumber;
for sArch in sorted(dsDoc_by_sArch.keys()):
sDoc = dsDoc_by_sArch[sArch];
iBinSize = os.path.getsize(r"build\bin\%s-exec-calc-shellcode.bin" % sArch);
iBinESPSize = os.path.getsize(r"build\bin\%s-exec-calc-shellcode-esp.bin" % sArch);
print " * %s: %d bytes (%d with stack allignment)" % (sDoc, iBinSize, iBinESPSize);
|
<commit_before><commit_msg>Add script to generate the site documentation containing the sizes of the binary shellcodes.<commit_after>
|
import os, re;
# I got the actual size of the binary code wrong on the site once - this script should help prevent that.
dsDoc_by_sArch = {"w32": "x86", "w64": "x64", "win": "x86+x64"};
with open("build_info.txt", "rb") as oFile:
iBuildNumber = int(re.search(r"build number\: (\d+)", oFile.read(), re.M).group(1));
print "Sizes (build %d)" % iBuildNumber;
for sArch in sorted(dsDoc_by_sArch.keys()):
sDoc = dsDoc_by_sArch[sArch];
iBinSize = os.path.getsize(r"build\bin\%s-exec-calc-shellcode.bin" % sArch);
iBinESPSize = os.path.getsize(r"build\bin\%s-exec-calc-shellcode-esp.bin" % sArch);
print " * %s: %d bytes (%d with stack allignment)" % (sDoc, iBinSize, iBinESPSize);
|
Add script to generate the site documentation containing the sizes of the binary shellcodes.import os, re;
# I got the actual size of the binary code wrong on the site once - this script should help prevent that.
dsDoc_by_sArch = {"w32": "x86", "w64": "x64", "win": "x86+x64"};
with open("build_info.txt", "rb") as oFile:
iBuildNumber = int(re.search(r"build number\: (\d+)", oFile.read(), re.M).group(1));
print "Sizes (build %d)" % iBuildNumber;
for sArch in sorted(dsDoc_by_sArch.keys()):
sDoc = dsDoc_by_sArch[sArch];
iBinSize = os.path.getsize(r"build\bin\%s-exec-calc-shellcode.bin" % sArch);
iBinESPSize = os.path.getsize(r"build\bin\%s-exec-calc-shellcode-esp.bin" % sArch);
print " * %s: %d bytes (%d with stack allignment)" % (sDoc, iBinSize, iBinESPSize);
|
<commit_before><commit_msg>Add script to generate the site documentation containing the sizes of the binary shellcodes.<commit_after>import os, re;
# I got the actual size of the binary code wrong on the site once - this script should help prevent that.
dsDoc_by_sArch = {"w32": "x86", "w64": "x64", "win": "x86+x64"};
with open("build_info.txt", "rb") as oFile:
iBuildNumber = int(re.search(r"build number\: (\d+)", oFile.read(), re.M).group(1));
print "Sizes (build %d)" % iBuildNumber;
for sArch in sorted(dsDoc_by_sArch.keys()):
sDoc = dsDoc_by_sArch[sArch];
iBinSize = os.path.getsize(r"build\bin\%s-exec-calc-shellcode.bin" % sArch);
iBinESPSize = os.path.getsize(r"build\bin\%s-exec-calc-shellcode-esp.bin" % sArch);
print " * %s: %d bytes (%d with stack allignment)" % (sDoc, iBinSize, iBinESPSize);
|
|
7d26f961a4e6eff6f9dad7b42a04a8648efdefb8
|
.travis-output.py
|
.travis-output.py
|
#!/usr/bin/python3
import io
import pexpect
import string
import sys
import time
sys.stdin = io.TextIOWrapper(sys.stdin.detach(), newline='')
output_to=sys.stdout
args = list(sys.argv[1:])
logfile = open(args.pop(0), "w")
child = pexpect.spawn(' '.join(args))
def output_line(line_bits, last_skip):
line = "".join(line_bits)
sline = line.strip()
skip = True
if line.startswith(" "):
skip = False
if len(sline) > 0:
if sline[0] in string.ascii_uppercase:
skip = False
if sline[0] in ('[', '=', '!', '+'):
skip = False
if skip != last_skip:
output_to.write('\n')
if skip:
output_to.write('.')
else:
output_to.write(line)
output_to.flush()
line_bits.clear()
return skip
def find_newline(data):
fulldata = b"".join(data)
newlinechar = fulldata.find(b'\n')
retlinechar = fulldata.find(b'\r')
if newlinechar == -1:
newlinechar = len(fulldata)+1
if retlinechar == -1:
retlinechar = len(fulldata)+1
if retlinechar+1 == newlinechar:
splitpos = newlinechar
else:
splitpos = min(newlinechar, retlinechar)
if splitpos > len(fulldata):
return
newline = fulldata[:splitpos+1]
leftover = fulldata[splitpos+1:]
data.clear()
data.append(leftover)
return newline
last_skip = False
cont = []
data = [b'']
while True:
line = None
while len(data) > 1 or len(data[0]) > 0 or child.isalive():
line = find_newline(data)
if line is not None:
break
try:
data.append(child.read_nonblocking(100))
except pexpect.TIMEOUT:
pass
except pexpect.EOF as e:
data.append(b'\n')
if not line:
break
line = line.decode('utf-8')
logfile.write(line)
logfile.flush()
if line.endswith('\r'):
cont.append(line[:-1])
last_skip = output_line(cont, last_skip)
cont.append('\r')
continue
sline = line.strip('\n\r')
cont.append(sline)
if sline.endswith('\\'):
continue
cont.append('\n')
last_skip = output_line(cont, last_skip)
|
Clean up the travis log output.
|
Clean up the travis log output.
|
Python
|
apache-2.0
|
litex-hub/litex-conda-ci,litex-hub/litex-conda-ci
|
Clean up the travis log output.
|
#!/usr/bin/python3
import io
import pexpect
import string
import sys
import time
sys.stdin = io.TextIOWrapper(sys.stdin.detach(), newline='')
output_to=sys.stdout
args = list(sys.argv[1:])
logfile = open(args.pop(0), "w")
child = pexpect.spawn(' '.join(args))
def output_line(line_bits, last_skip):
line = "".join(line_bits)
sline = line.strip()
skip = True
if line.startswith(" "):
skip = False
if len(sline) > 0:
if sline[0] in string.ascii_uppercase:
skip = False
if sline[0] in ('[', '=', '!', '+'):
skip = False
if skip != last_skip:
output_to.write('\n')
if skip:
output_to.write('.')
else:
output_to.write(line)
output_to.flush()
line_bits.clear()
return skip
def find_newline(data):
fulldata = b"".join(data)
newlinechar = fulldata.find(b'\n')
retlinechar = fulldata.find(b'\r')
if newlinechar == -1:
newlinechar = len(fulldata)+1
if retlinechar == -1:
retlinechar = len(fulldata)+1
if retlinechar+1 == newlinechar:
splitpos = newlinechar
else:
splitpos = min(newlinechar, retlinechar)
if splitpos > len(fulldata):
return
newline = fulldata[:splitpos+1]
leftover = fulldata[splitpos+1:]
data.clear()
data.append(leftover)
return newline
last_skip = False
cont = []
data = [b'']
while True:
line = None
while len(data) > 1 or len(data[0]) > 0 or child.isalive():
line = find_newline(data)
if line is not None:
break
try:
data.append(child.read_nonblocking(100))
except pexpect.TIMEOUT:
pass
except pexpect.EOF as e:
data.append(b'\n')
if not line:
break
line = line.decode('utf-8')
logfile.write(line)
logfile.flush()
if line.endswith('\r'):
cont.append(line[:-1])
last_skip = output_line(cont, last_skip)
cont.append('\r')
continue
sline = line.strip('\n\r')
cont.append(sline)
if sline.endswith('\\'):
continue
cont.append('\n')
last_skip = output_line(cont, last_skip)
|
<commit_before><commit_msg>Clean up the travis log output.<commit_after>
|
#!/usr/bin/python3
import io
import pexpect
import string
import sys
import time
sys.stdin = io.TextIOWrapper(sys.stdin.detach(), newline='')
output_to=sys.stdout
args = list(sys.argv[1:])
logfile = open(args.pop(0), "w")
child = pexpect.spawn(' '.join(args))
def output_line(line_bits, last_skip):
line = "".join(line_bits)
sline = line.strip()
skip = True
if line.startswith(" "):
skip = False
if len(sline) > 0:
if sline[0] in string.ascii_uppercase:
skip = False
if sline[0] in ('[', '=', '!', '+'):
skip = False
if skip != last_skip:
output_to.write('\n')
if skip:
output_to.write('.')
else:
output_to.write(line)
output_to.flush()
line_bits.clear()
return skip
def find_newline(data):
fulldata = b"".join(data)
newlinechar = fulldata.find(b'\n')
retlinechar = fulldata.find(b'\r')
if newlinechar == -1:
newlinechar = len(fulldata)+1
if retlinechar == -1:
retlinechar = len(fulldata)+1
if retlinechar+1 == newlinechar:
splitpos = newlinechar
else:
splitpos = min(newlinechar, retlinechar)
if splitpos > len(fulldata):
return
newline = fulldata[:splitpos+1]
leftover = fulldata[splitpos+1:]
data.clear()
data.append(leftover)
return newline
last_skip = False
cont = []
data = [b'']
while True:
line = None
while len(data) > 1 or len(data[0]) > 0 or child.isalive():
line = find_newline(data)
if line is not None:
break
try:
data.append(child.read_nonblocking(100))
except pexpect.TIMEOUT:
pass
except pexpect.EOF as e:
data.append(b'\n')
if not line:
break
line = line.decode('utf-8')
logfile.write(line)
logfile.flush()
if line.endswith('\r'):
cont.append(line[:-1])
last_skip = output_line(cont, last_skip)
cont.append('\r')
continue
sline = line.strip('\n\r')
cont.append(sline)
if sline.endswith('\\'):
continue
cont.append('\n')
last_skip = output_line(cont, last_skip)
|
Clean up the travis log output.#!/usr/bin/python3
import io
import pexpect
import string
import sys
import time
sys.stdin = io.TextIOWrapper(sys.stdin.detach(), newline='')
output_to=sys.stdout
args = list(sys.argv[1:])
logfile = open(args.pop(0), "w")
child = pexpect.spawn(' '.join(args))
def output_line(line_bits, last_skip):
line = "".join(line_bits)
sline = line.strip()
skip = True
if line.startswith(" "):
skip = False
if len(sline) > 0:
if sline[0] in string.ascii_uppercase:
skip = False
if sline[0] in ('[', '=', '!', '+'):
skip = False
if skip != last_skip:
output_to.write('\n')
if skip:
output_to.write('.')
else:
output_to.write(line)
output_to.flush()
line_bits.clear()
return skip
def find_newline(data):
fulldata = b"".join(data)
newlinechar = fulldata.find(b'\n')
retlinechar = fulldata.find(b'\r')
if newlinechar == -1:
newlinechar = len(fulldata)+1
if retlinechar == -1:
retlinechar = len(fulldata)+1
if retlinechar+1 == newlinechar:
splitpos = newlinechar
else:
splitpos = min(newlinechar, retlinechar)
if splitpos > len(fulldata):
return
newline = fulldata[:splitpos+1]
leftover = fulldata[splitpos+1:]
data.clear()
data.append(leftover)
return newline
last_skip = False
cont = []
data = [b'']
while True:
line = None
while len(data) > 1 or len(data[0]) > 0 or child.isalive():
line = find_newline(data)
if line is not None:
break
try:
data.append(child.read_nonblocking(100))
except pexpect.TIMEOUT:
pass
except pexpect.EOF as e:
data.append(b'\n')
if not line:
break
line = line.decode('utf-8')
logfile.write(line)
logfile.flush()
if line.endswith('\r'):
cont.append(line[:-1])
last_skip = output_line(cont, last_skip)
cont.append('\r')
continue
sline = line.strip('\n\r')
cont.append(sline)
if sline.endswith('\\'):
continue
cont.append('\n')
last_skip = output_line(cont, last_skip)
|
<commit_before><commit_msg>Clean up the travis log output.<commit_after>#!/usr/bin/python3
import io
import pexpect
import string
import sys
import time
sys.stdin = io.TextIOWrapper(sys.stdin.detach(), newline='')
output_to=sys.stdout
args = list(sys.argv[1:])
logfile = open(args.pop(0), "w")
child = pexpect.spawn(' '.join(args))
def output_line(line_bits, last_skip):
line = "".join(line_bits)
sline = line.strip()
skip = True
if line.startswith(" "):
skip = False
if len(sline) > 0:
if sline[0] in string.ascii_uppercase:
skip = False
if sline[0] in ('[', '=', '!', '+'):
skip = False
if skip != last_skip:
output_to.write('\n')
if skip:
output_to.write('.')
else:
output_to.write(line)
output_to.flush()
line_bits.clear()
return skip
def find_newline(data):
fulldata = b"".join(data)
newlinechar = fulldata.find(b'\n')
retlinechar = fulldata.find(b'\r')
if newlinechar == -1:
newlinechar = len(fulldata)+1
if retlinechar == -1:
retlinechar = len(fulldata)+1
if retlinechar+1 == newlinechar:
splitpos = newlinechar
else:
splitpos = min(newlinechar, retlinechar)
if splitpos > len(fulldata):
return
newline = fulldata[:splitpos+1]
leftover = fulldata[splitpos+1:]
data.clear()
data.append(leftover)
return newline
last_skip = False
cont = []
data = [b'']
while True:
line = None
while len(data) > 1 or len(data[0]) > 0 or child.isalive():
line = find_newline(data)
if line is not None:
break
try:
data.append(child.read_nonblocking(100))
except pexpect.TIMEOUT:
pass
except pexpect.EOF as e:
data.append(b'\n')
if not line:
break
line = line.decode('utf-8')
logfile.write(line)
logfile.flush()
if line.endswith('\r'):
cont.append(line[:-1])
last_skip = output_line(cont, last_skip)
cont.append('\r')
continue
sline = line.strip('\n\r')
cont.append(sline)
if sline.endswith('\\'):
continue
cont.append('\n')
last_skip = output_line(cont, last_skip)
|
|
f11f82e39d115129081c733e99b9cadff93a27ec
|
candidates/tests/test_signup.py
|
candidates/tests/test_signup.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from usersettings.shortcuts import get_current_usersettings
from django_webtest import WebTest
from django.core.urlresolvers import reverse
from .settings import SettingsMixin
class SettingsTests(SettingsMixin, WebTest):
def test_signup_allowed(self):
settings_url = reverse(
'account_signup',
)
response = self.app.get(
settings_url,
expect_errors=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Password (again)')
self.assertNotContains(response, 'Sign Up Closed')
def test_signup_disabled(self):
user_settings = get_current_usersettings()
user_settings.NEW_ACCOUNTS_ALLOWED = False;
user_settings.save()
settings_url = reverse(
'account_signup',
)
response = self.app.get(
settings_url,
expect_errors=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Sign Up Closed')
self.assertNotContains(response, 'Password (again)')
|
Test to check switching off user signup
|
Test to check switching off user signup
|
Python
|
agpl-3.0
|
mysociety/yournextmp-popit,mysociety/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextmp-popit,mysociety/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextmp-popit
|
Test to check switching off user signup
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from usersettings.shortcuts import get_current_usersettings
from django_webtest import WebTest
from django.core.urlresolvers import reverse
from .settings import SettingsMixin
class SettingsTests(SettingsMixin, WebTest):
def test_signup_allowed(self):
settings_url = reverse(
'account_signup',
)
response = self.app.get(
settings_url,
expect_errors=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Password (again)')
self.assertNotContains(response, 'Sign Up Closed')
def test_signup_disabled(self):
user_settings = get_current_usersettings()
user_settings.NEW_ACCOUNTS_ALLOWED = False;
user_settings.save()
settings_url = reverse(
'account_signup',
)
response = self.app.get(
settings_url,
expect_errors=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Sign Up Closed')
self.assertNotContains(response, 'Password (again)')
|
<commit_before><commit_msg>Test to check switching off user signup<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from usersettings.shortcuts import get_current_usersettings
from django_webtest import WebTest
from django.core.urlresolvers import reverse
from .settings import SettingsMixin
class SettingsTests(SettingsMixin, WebTest):
def test_signup_allowed(self):
settings_url = reverse(
'account_signup',
)
response = self.app.get(
settings_url,
expect_errors=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Password (again)')
self.assertNotContains(response, 'Sign Up Closed')
def test_signup_disabled(self):
user_settings = get_current_usersettings()
user_settings.NEW_ACCOUNTS_ALLOWED = False;
user_settings.save()
settings_url = reverse(
'account_signup',
)
response = self.app.get(
settings_url,
expect_errors=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Sign Up Closed')
self.assertNotContains(response, 'Password (again)')
|
Test to check switching off user signup# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from usersettings.shortcuts import get_current_usersettings
from django_webtest import WebTest
from django.core.urlresolvers import reverse
from .settings import SettingsMixin
class SettingsTests(SettingsMixin, WebTest):
def test_signup_allowed(self):
settings_url = reverse(
'account_signup',
)
response = self.app.get(
settings_url,
expect_errors=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Password (again)')
self.assertNotContains(response, 'Sign Up Closed')
def test_signup_disabled(self):
user_settings = get_current_usersettings()
user_settings.NEW_ACCOUNTS_ALLOWED = False;
user_settings.save()
settings_url = reverse(
'account_signup',
)
response = self.app.get(
settings_url,
expect_errors=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Sign Up Closed')
self.assertNotContains(response, 'Password (again)')
|
<commit_before><commit_msg>Test to check switching off user signup<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from usersettings.shortcuts import get_current_usersettings
from django_webtest import WebTest
from django.core.urlresolvers import reverse
from .settings import SettingsMixin
class SettingsTests(SettingsMixin, WebTest):
def test_signup_allowed(self):
settings_url = reverse(
'account_signup',
)
response = self.app.get(
settings_url,
expect_errors=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Password (again)')
self.assertNotContains(response, 'Sign Up Closed')
def test_signup_disabled(self):
user_settings = get_current_usersettings()
user_settings.NEW_ACCOUNTS_ALLOWED = False;
user_settings.save()
settings_url = reverse(
'account_signup',
)
response = self.app.get(
settings_url,
expect_errors=True
)
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Sign Up Closed')
self.assertNotContains(response, 'Password (again)')
|
|
632fecc86b5b431e77cbb046268f17f7e2fa0b5a
|
web.py
|
web.py
|
from flask import Flask
from database_setup import User, session
app = Flask(__name__)
@app.route('/')
@app.route('/hello')
def HelloWorld():
users = session.query(User)
output = ''
for user in users:
output += user.username
output += '</br>'
return output
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0', port=5000)
|
Add a minimal Flask application
|
Add a minimal Flask application
|
Python
|
unknown
|
gregcowell/BAM,gregcowell/PFT,gregcowell/PFT,gregcowell/BAM
|
Add a minimal Flask application
|
from flask import Flask
from database_setup import User, session
app = Flask(__name__)
@app.route('/')
@app.route('/hello')
def HelloWorld():
users = session.query(User)
output = ''
for user in users:
output += user.username
output += '</br>'
return output
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0', port=5000)
|
<commit_before><commit_msg>Add a minimal Flask application<commit_after>
|
from flask import Flask
from database_setup import User, session
app = Flask(__name__)
@app.route('/')
@app.route('/hello')
def HelloWorld():
users = session.query(User)
output = ''
for user in users:
output += user.username
output += '</br>'
return output
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0', port=5000)
|
Add a minimal Flask applicationfrom flask import Flask
from database_setup import User, session
app = Flask(__name__)
@app.route('/')
@app.route('/hello')
def HelloWorld():
users = session.query(User)
output = ''
for user in users:
output += user.username
output += '</br>'
return output
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0', port=5000)
|
<commit_before><commit_msg>Add a minimal Flask application<commit_after>from flask import Flask
from database_setup import User, session
app = Flask(__name__)
@app.route('/')
@app.route('/hello')
def HelloWorld():
users = session.query(User)
output = ''
for user in users:
output += user.username
output += '</br>'
return output
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0', port=5000)
|
|
b2415a08ca5e8688cb0e5e8ffe38d5b842171ecb
|
wrench/reftest-debugger.py
|
wrench/reftest-debugger.py
|
#!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import subprocess
with open('reftest.log', "w") as out:
try:
subprocess.check_call(['./headless.py', 'reftest'], stdout=out)
print("All tests passed.")
except subprocess.CalledProcessError as ex:
subprocess.check_call(['firefox', 'reftest-analyzer.xhtml#logurl=reftest.log'])
|
Add a script to run reftests and spawn reftest analyzer.
|
Add a script to run reftests and spawn reftest analyzer.
This probably only works on Linux, and will need some changes
to support running on Mac and Windows.
Fixes #1863.
|
Python
|
mpl-2.0
|
servo/webrender,servo/webrender,servo/webrender,servo/webrender,servo/webrender,servo/webrender
|
Add a script to run reftests and spawn reftest analyzer.
This probably only works on Linux, and will need some changes
to support running on Mac and Windows.
Fixes #1863.
|
#!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import subprocess
with open('reftest.log', "w") as out:
try:
subprocess.check_call(['./headless.py', 'reftest'], stdout=out)
print("All tests passed.")
except subprocess.CalledProcessError as ex:
subprocess.check_call(['firefox', 'reftest-analyzer.xhtml#logurl=reftest.log'])
|
<commit_before><commit_msg>Add a script to run reftests and spawn reftest analyzer.
This probably only works on Linux, and will need some changes
to support running on Mac and Windows.
Fixes #1863.<commit_after>
|
#!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import subprocess
with open('reftest.log', "w") as out:
try:
subprocess.check_call(['./headless.py', 'reftest'], stdout=out)
print("All tests passed.")
except subprocess.CalledProcessError as ex:
subprocess.check_call(['firefox', 'reftest-analyzer.xhtml#logurl=reftest.log'])
|
Add a script to run reftests and spawn reftest analyzer.
This probably only works on Linux, and will need some changes
to support running on Mac and Windows.
Fixes #1863.#!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import subprocess
with open('reftest.log', "w") as out:
try:
subprocess.check_call(['./headless.py', 'reftest'], stdout=out)
print("All tests passed.")
except subprocess.CalledProcessError as ex:
subprocess.check_call(['firefox', 'reftest-analyzer.xhtml#logurl=reftest.log'])
|
<commit_before><commit_msg>Add a script to run reftests and spawn reftest analyzer.
This probably only works on Linux, and will need some changes
to support running on Mac and Windows.
Fixes #1863.<commit_after>#!/usr/bin/python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import subprocess
with open('reftest.log', "w") as out:
try:
subprocess.check_call(['./headless.py', 'reftest'], stdout=out)
print("All tests passed.")
except subprocess.CalledProcessError as ex:
subprocess.check_call(['firefox', 'reftest-analyzer.xhtml#logurl=reftest.log'])
|
|
0f1b0dec702e314c4e891115b7b72adac7896402
|
src/ggrc/converters/__init__.py
|
src/ggrc/converters/__init__.py
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from ggrc.models import (
Audit, Control, ControlAssessment, DataAsset, Directive, Contract,
Policy, Regulation, Standard, Facility, Market, Objective, Option,
OrgGroup, Vendor, Person, Product, Program, Project, Request, Response,
Section, Clause, System, Process, Issue,
)
from ggrc.utils import get_mapping_rules
def get_allowed_mappings():
""" get all mapping rules with lowercase names
import export is case insensitive so we use lower case names for all
comparisons.
"""
mapping_rules = get_mapping_rules()
for object_mappings in mapping_rules.values():
map(str.lower, object_mappings)
return mapping_rules
IMPORTABLE = {
"audit": Audit,
"control": Control,
"control assessment": ControlAssessment,
"control_assessment": ControlAssessment,
"data asset": DataAsset,
"data_asset": DataAsset,
"directive": Directive,
"contract": Contract,
"policy": Policy,
"regulation": Regulation,
"standard": Standard,
"facility": Facility,
"market": Market,
"objective": Objective,
"option": Option,
"org group": OrgGroup,
"org_group": OrgGroup,
"vendor": Vendor,
"person": Person,
"product": Product,
"program": Program,
"project": Project,
"request": Request,
"response": Response,
"section": Section,
"clause": Clause,
"system": System,
"process": Process,
"issue": Issue,
}
COLUMN_ORDER = (
"slug",
"title",
"description",
"test_plan",
"notes",
"owners",
"start_date",
"end_date",
"report_end_date",
"report_start_date",
"assertions",
"audit",
"categories",
"contact",
"control",
"design",
"directive_id",
"fraud_related",
"key_control",
"kind",
"link",
"means",
"network_zone",
"operationally",
"principal_assessor",
"private",
"program_id",
"secondary_assessor",
"secondary_contact",
"status",
"url",
"reference_url",
"_user_role_auditor",
"verify_frequency",
"name",
"email",
"is_enabled",
"company",
"_custom_attributes",
)
COLUMN_HANDLERS = {}
ALLOWED_MAPPINGS = get_allowed_mappings()
|
Add convertes folder with init python file
|
Add convertes folder with init python file
Add the folder that will contain all files related to import export. The
init file contains:
- Column order for csv files.
- Mapping rules that will be used for setting mapping columns
- List of all objects that are/should be importable.
|
Python
|
apache-2.0
|
edofic/ggrc-core,prasannav7/ggrc-core,jmakov/ggrc-core,prasannav7/ggrc-core,plamut/ggrc-core,prasannav7/ggrc-core,prasannav7/ggrc-core,plamut/ggrc-core,hyperNURb/ggrc-core,VinnieJohns/ggrc-core,jmakov/ggrc-core,hasanalom/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,hyperNURb/ggrc-core,jmakov/ggrc-core,josthkko/ggrc-core,jmakov/ggrc-core,selahssea/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,hyperNURb/ggrc-core,hasanalom/ggrc-core,NejcZupec/ggrc-core,NejcZupec/ggrc-core,AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,edofic/ggrc-core,hasanalom/ggrc-core,kr41/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,hyperNURb/ggrc-core,kr41/ggrc-core,NejcZupec/ggrc-core,selahssea/ggrc-core,selahssea/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,hasanalom/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,hyperNURb/ggrc-core,jmakov/ggrc-core,josthkko/ggrc-core,edofic/ggrc-core,hasanalom/ggrc-core,NejcZupec/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core
|
Add convertes folder with init python file
Add the folder that will contain all files related to import export. The
init file contains:
- Column order for csv files.
- Mapping rules that will be used for setting mapping columns
- List of all objects that are/should be importable.
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from ggrc.models import (
Audit, Control, ControlAssessment, DataAsset, Directive, Contract,
Policy, Regulation, Standard, Facility, Market, Objective, Option,
OrgGroup, Vendor, Person, Product, Program, Project, Request, Response,
Section, Clause, System, Process, Issue,
)
from ggrc.utils import get_mapping_rules
def get_allowed_mappings():
""" get all mapping rules with lowercase names
import export is case insensitive so we use lower case names for all
comparisons.
"""
mapping_rules = get_mapping_rules()
for object_mappings in mapping_rules.values():
map(str.lower, object_mappings)
return mapping_rules
IMPORTABLE = {
"audit": Audit,
"control": Control,
"control assessment": ControlAssessment,
"control_assessment": ControlAssessment,
"data asset": DataAsset,
"data_asset": DataAsset,
"directive": Directive,
"contract": Contract,
"policy": Policy,
"regulation": Regulation,
"standard": Standard,
"facility": Facility,
"market": Market,
"objective": Objective,
"option": Option,
"org group": OrgGroup,
"org_group": OrgGroup,
"vendor": Vendor,
"person": Person,
"product": Product,
"program": Program,
"project": Project,
"request": Request,
"response": Response,
"section": Section,
"clause": Clause,
"system": System,
"process": Process,
"issue": Issue,
}
COLUMN_ORDER = (
"slug",
"title",
"description",
"test_plan",
"notes",
"owners",
"start_date",
"end_date",
"report_end_date",
"report_start_date",
"assertions",
"audit",
"categories",
"contact",
"control",
"design",
"directive_id",
"fraud_related",
"key_control",
"kind",
"link",
"means",
"network_zone",
"operationally",
"principal_assessor",
"private",
"program_id",
"secondary_assessor",
"secondary_contact",
"status",
"url",
"reference_url",
"_user_role_auditor",
"verify_frequency",
"name",
"email",
"is_enabled",
"company",
"_custom_attributes",
)
COLUMN_HANDLERS = {}
ALLOWED_MAPPINGS = get_allowed_mappings()
|
<commit_before><commit_msg>Add convertes folder with init python file
Add the folder that will contain all files related to import export. The
init file contains:
- Column order for csv files.
- Mapping rules that will be used for setting mapping columns
- List of all objects that are/should be importable.<commit_after>
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from ggrc.models import (
Audit, Control, ControlAssessment, DataAsset, Directive, Contract,
Policy, Regulation, Standard, Facility, Market, Objective, Option,
OrgGroup, Vendor, Person, Product, Program, Project, Request, Response,
Section, Clause, System, Process, Issue,
)
from ggrc.utils import get_mapping_rules
def get_allowed_mappings():
""" get all mapping rules with lowercase names
import export is case insensitive so we use lower case names for all
comparisons.
"""
mapping_rules = get_mapping_rules()
for object_mappings in mapping_rules.values():
map(str.lower, object_mappings)
return mapping_rules
IMPORTABLE = {
"audit": Audit,
"control": Control,
"control assessment": ControlAssessment,
"control_assessment": ControlAssessment,
"data asset": DataAsset,
"data_asset": DataAsset,
"directive": Directive,
"contract": Contract,
"policy": Policy,
"regulation": Regulation,
"standard": Standard,
"facility": Facility,
"market": Market,
"objective": Objective,
"option": Option,
"org group": OrgGroup,
"org_group": OrgGroup,
"vendor": Vendor,
"person": Person,
"product": Product,
"program": Program,
"project": Project,
"request": Request,
"response": Response,
"section": Section,
"clause": Clause,
"system": System,
"process": Process,
"issue": Issue,
}
COLUMN_ORDER = (
"slug",
"title",
"description",
"test_plan",
"notes",
"owners",
"start_date",
"end_date",
"report_end_date",
"report_start_date",
"assertions",
"audit",
"categories",
"contact",
"control",
"design",
"directive_id",
"fraud_related",
"key_control",
"kind",
"link",
"means",
"network_zone",
"operationally",
"principal_assessor",
"private",
"program_id",
"secondary_assessor",
"secondary_contact",
"status",
"url",
"reference_url",
"_user_role_auditor",
"verify_frequency",
"name",
"email",
"is_enabled",
"company",
"_custom_attributes",
)
COLUMN_HANDLERS = {}
ALLOWED_MAPPINGS = get_allowed_mappings()
|
Add convertes folder with init python file
Add the folder that will contain all files related to import export. The
init file contains:
- Column order for csv files.
- Mapping rules that will be used for setting mapping columns
- List of all objects that are/should be importable.# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from ggrc.models import (
Audit, Control, ControlAssessment, DataAsset, Directive, Contract,
Policy, Regulation, Standard, Facility, Market, Objective, Option,
OrgGroup, Vendor, Person, Product, Program, Project, Request, Response,
Section, Clause, System, Process, Issue,
)
from ggrc.utils import get_mapping_rules
def get_allowed_mappings():
""" get all mapping rules with lowercase names
import export is case insensitive so we use lower case names for all
comparisons.
"""
mapping_rules = get_mapping_rules()
for object_mappings in mapping_rules.values():
map(str.lower, object_mappings)
return mapping_rules
IMPORTABLE = {
"audit": Audit,
"control": Control,
"control assessment": ControlAssessment,
"control_assessment": ControlAssessment,
"data asset": DataAsset,
"data_asset": DataAsset,
"directive": Directive,
"contract": Contract,
"policy": Policy,
"regulation": Regulation,
"standard": Standard,
"facility": Facility,
"market": Market,
"objective": Objective,
"option": Option,
"org group": OrgGroup,
"org_group": OrgGroup,
"vendor": Vendor,
"person": Person,
"product": Product,
"program": Program,
"project": Project,
"request": Request,
"response": Response,
"section": Section,
"clause": Clause,
"system": System,
"process": Process,
"issue": Issue,
}
COLUMN_ORDER = (
"slug",
"title",
"description",
"test_plan",
"notes",
"owners",
"start_date",
"end_date",
"report_end_date",
"report_start_date",
"assertions",
"audit",
"categories",
"contact",
"control",
"design",
"directive_id",
"fraud_related",
"key_control",
"kind",
"link",
"means",
"network_zone",
"operationally",
"principal_assessor",
"private",
"program_id",
"secondary_assessor",
"secondary_contact",
"status",
"url",
"reference_url",
"_user_role_auditor",
"verify_frequency",
"name",
"email",
"is_enabled",
"company",
"_custom_attributes",
)
COLUMN_HANDLERS = {}
ALLOWED_MAPPINGS = get_allowed_mappings()
|
<commit_before><commit_msg>Add convertes folder with init python file
Add the folder that will contain all files related to import export. The
init file contains:
- Column order for csv files.
- Mapping rules that will be used for setting mapping columns
- List of all objects that are/should be importable.<commit_after># Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
from ggrc.models import (
Audit, Control, ControlAssessment, DataAsset, Directive, Contract,
Policy, Regulation, Standard, Facility, Market, Objective, Option,
OrgGroup, Vendor, Person, Product, Program, Project, Request, Response,
Section, Clause, System, Process, Issue,
)
from ggrc.utils import get_mapping_rules
def get_allowed_mappings():
""" get all mapping rules with lowercase names
import export is case insensitive so we use lower case names for all
comparisons.
"""
mapping_rules = get_mapping_rules()
for object_mappings in mapping_rules.values():
map(str.lower, object_mappings)
return mapping_rules
IMPORTABLE = {
"audit": Audit,
"control": Control,
"control assessment": ControlAssessment,
"control_assessment": ControlAssessment,
"data asset": DataAsset,
"data_asset": DataAsset,
"directive": Directive,
"contract": Contract,
"policy": Policy,
"regulation": Regulation,
"standard": Standard,
"facility": Facility,
"market": Market,
"objective": Objective,
"option": Option,
"org group": OrgGroup,
"org_group": OrgGroup,
"vendor": Vendor,
"person": Person,
"product": Product,
"program": Program,
"project": Project,
"request": Request,
"response": Response,
"section": Section,
"clause": Clause,
"system": System,
"process": Process,
"issue": Issue,
}
COLUMN_ORDER = (
"slug",
"title",
"description",
"test_plan",
"notes",
"owners",
"start_date",
"end_date",
"report_end_date",
"report_start_date",
"assertions",
"audit",
"categories",
"contact",
"control",
"design",
"directive_id",
"fraud_related",
"key_control",
"kind",
"link",
"means",
"network_zone",
"operationally",
"principal_assessor",
"private",
"program_id",
"secondary_assessor",
"secondary_contact",
"status",
"url",
"reference_url",
"_user_role_auditor",
"verify_frequency",
"name",
"email",
"is_enabled",
"company",
"_custom_attributes",
)
COLUMN_HANDLERS = {}
ALLOWED_MAPPINGS = get_allowed_mappings()
|
|
9f7e1acdaa5a86c25c3d9f9bc6cd84a55b31b83f
|
speech/unit_tests/test_result.py
|
speech/unit_tests/test_result.py
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestResult(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.speech.result import Result
return Result
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
result = self._make_one([])
self.assertIsInstance(result, self._get_target_class())
def test_from_pb(self):
from google.cloud.proto.speech.v1beta1 import cloud_speech_pb2
confidence = 0.625
transcript = 'this is a test transcript'
alternative = cloud_speech_pb2.SpeechRecognitionAlternative(
transcript=transcript, confidence=confidence)
result_pb = cloud_speech_pb2.SpeechRecognitionResult(
alternatives=[alternative])
result = self._get_target_class().from_pb(result_pb)
self.assertEqual(result.confidence, confidence)
self.assertEqual(result.transcript, transcript)
def test_from_api_repr(self):
confidence = 0.625
transcript = 'this is a test'
response = {
'alternatives': [
{
'confidence': confidence,
'transcript': transcript,
},
],
}
result = self._get_target_class().from_api_repr(response)
self.assertEqual(result.confidence, confidence)
self.assertEqual(result.transcript, transcript)
|
Add test file for Result class.
|
Add test file for Result class.
|
Python
|
apache-2.0
|
jonparrott/gcloud-python,tseaver/gcloud-python,GoogleCloudPlatform/gcloud-python,calpeyser/google-cloud-python,tseaver/google-cloud-python,dhermes/google-cloud-python,dhermes/google-cloud-python,jonparrott/google-cloud-python,dhermes/gcloud-python,GoogleCloudPlatform/gcloud-python,tswast/google-cloud-python,googleapis/google-cloud-python,dhermes/gcloud-python,jonparrott/gcloud-python,daspecster/google-cloud-python,googleapis/google-cloud-python,tseaver/google-cloud-python,tseaver/google-cloud-python,tseaver/gcloud-python,tswast/google-cloud-python,daspecster/google-cloud-python,dhermes/google-cloud-python,tartavull/google-cloud-python,tartavull/google-cloud-python,tswast/google-cloud-python,calpeyser/google-cloud-python,jonparrott/google-cloud-python
|
Add test file for Result class.
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestResult(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.speech.result import Result
return Result
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
result = self._make_one([])
self.assertIsInstance(result, self._get_target_class())
def test_from_pb(self):
from google.cloud.proto.speech.v1beta1 import cloud_speech_pb2
confidence = 0.625
transcript = 'this is a test transcript'
alternative = cloud_speech_pb2.SpeechRecognitionAlternative(
transcript=transcript, confidence=confidence)
result_pb = cloud_speech_pb2.SpeechRecognitionResult(
alternatives=[alternative])
result = self._get_target_class().from_pb(result_pb)
self.assertEqual(result.confidence, confidence)
self.assertEqual(result.transcript, transcript)
def test_from_api_repr(self):
confidence = 0.625
transcript = 'this is a test'
response = {
'alternatives': [
{
'confidence': confidence,
'transcript': transcript,
},
],
}
result = self._get_target_class().from_api_repr(response)
self.assertEqual(result.confidence, confidence)
self.assertEqual(result.transcript, transcript)
|
<commit_before><commit_msg>Add test file for Result class.<commit_after>
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestResult(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.speech.result import Result
return Result
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
result = self._make_one([])
self.assertIsInstance(result, self._get_target_class())
def test_from_pb(self):
from google.cloud.proto.speech.v1beta1 import cloud_speech_pb2
confidence = 0.625
transcript = 'this is a test transcript'
alternative = cloud_speech_pb2.SpeechRecognitionAlternative(
transcript=transcript, confidence=confidence)
result_pb = cloud_speech_pb2.SpeechRecognitionResult(
alternatives=[alternative])
result = self._get_target_class().from_pb(result_pb)
self.assertEqual(result.confidence, confidence)
self.assertEqual(result.transcript, transcript)
def test_from_api_repr(self):
confidence = 0.625
transcript = 'this is a test'
response = {
'alternatives': [
{
'confidence': confidence,
'transcript': transcript,
},
],
}
result = self._get_target_class().from_api_repr(response)
self.assertEqual(result.confidence, confidence)
self.assertEqual(result.transcript, transcript)
|
Add test file for Result class.# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestResult(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.speech.result import Result
return Result
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
result = self._make_one([])
self.assertIsInstance(result, self._get_target_class())
def test_from_pb(self):
from google.cloud.proto.speech.v1beta1 import cloud_speech_pb2
confidence = 0.625
transcript = 'this is a test transcript'
alternative = cloud_speech_pb2.SpeechRecognitionAlternative(
transcript=transcript, confidence=confidence)
result_pb = cloud_speech_pb2.SpeechRecognitionResult(
alternatives=[alternative])
result = self._get_target_class().from_pb(result_pb)
self.assertEqual(result.confidence, confidence)
self.assertEqual(result.transcript, transcript)
def test_from_api_repr(self):
confidence = 0.625
transcript = 'this is a test'
response = {
'alternatives': [
{
'confidence': confidence,
'transcript': transcript,
},
],
}
result = self._get_target_class().from_api_repr(response)
self.assertEqual(result.confidence, confidence)
self.assertEqual(result.transcript, transcript)
|
<commit_before><commit_msg>Add test file for Result class.<commit_after># Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestResult(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.speech.result import Result
return Result
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_ctor(self):
result = self._make_one([])
self.assertIsInstance(result, self._get_target_class())
def test_from_pb(self):
from google.cloud.proto.speech.v1beta1 import cloud_speech_pb2
confidence = 0.625
transcript = 'this is a test transcript'
alternative = cloud_speech_pb2.SpeechRecognitionAlternative(
transcript=transcript, confidence=confidence)
result_pb = cloud_speech_pb2.SpeechRecognitionResult(
alternatives=[alternative])
result = self._get_target_class().from_pb(result_pb)
self.assertEqual(result.confidence, confidence)
self.assertEqual(result.transcript, transcript)
def test_from_api_repr(self):
confidence = 0.625
transcript = 'this is a test'
response = {
'alternatives': [
{
'confidence': confidence,
'transcript': transcript,
},
],
}
result = self._get_target_class().from_api_repr(response)
self.assertEqual(result.confidence, confidence)
self.assertEqual(result.transcript, transcript)
|
|
114f9c1c887a4fc6f92165edf8f62576687e314b
|
scripts/find_nested_projects.py
|
scripts/find_nested_projects.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Helper to get a list of all projects that are nested within another project."""
from website.project.model import Node
from modularodm import Q
from tests.base import OsfTestCase
from tests.factories import ProjectFactory
def find_nested_projects():
return [node for node in Node.find()
if node.category == 'project'
and node.parent_node is not None]
class TestFindNestedProjects(OsfTestCase):
def test_find_nested(self):
project =ProjectFactory.build()
nested_project = ProjectFactory()
project.nodes.append(nested_project)
project.save()
result = find_nested_projects()
assert nested_project in result
assert project not in result
def test_unnested_project(self):
project = ProjectFactory()
assert project not in find_nested_projects()
def main():
result = find_nested_projects()
print('Number of nested projects: {0}'.format(len(results)))
if __name__ == '__main__':
main()
|
Add script to get nested projects
|
Add script to get nested projects
|
Python
|
apache-2.0
|
laurenrevere/osf.io,ckc6cz/osf.io,petermalcolm/osf.io,baylee-d/osf.io,zachjanicki/osf.io,chrisseto/osf.io,petermalcolm/osf.io,petermalcolm/osf.io,ckc6cz/osf.io,cwisecarver/osf.io,RomanZWang/osf.io,cslzchen/osf.io,TomHeatwole/osf.io,himanshuo/osf.io,jeffreyliu3230/osf.io,lyndsysimon/osf.io,amyshi188/osf.io,samchrisinger/osf.io,alexschiller/osf.io,HarryRybacki/osf.io,brianjgeiger/osf.io,CenterForOpenScience/osf.io,abought/osf.io,asanfilippo7/osf.io,TomHeatwole/osf.io,mfraezz/osf.io,baylee-d/osf.io,icereval/osf.io,GageGaskins/osf.io,chrisseto/osf.io,sloria/osf.io,rdhyee/osf.io,jinluyuan/osf.io,njantrania/osf.io,kch8qx/osf.io,cldershem/osf.io,kwierman/osf.io,GageGaskins/osf.io,danielneis/osf.io,revanthkolli/osf.io,acshi/osf.io,zkraime/osf.io,ticklemepierce/osf.io,HarryRybacki/osf.io,cwisecarver/osf.io,GaryKriebel/osf.io,chennan47/osf.io,Ghalko/osf.io,caseyrygt/osf.io,wearpants/osf.io,fabianvf/osf.io,jnayak1/osf.io,zachjanicki/osf.io,acshi/osf.io,jnayak1/osf.io,samanehsan/osf.io,binoculars/osf.io,leb2dg/osf.io,barbour-em/osf.io,kwierman/osf.io,caseyrygt/osf.io,jeffreyliu3230/osf.io,crcresearch/osf.io,GageGaskins/osf.io,sloria/osf.io,rdhyee/osf.io,KAsante95/osf.io,brianjgeiger/osf.io,MerlinZhang/osf.io,billyhunt/osf.io,chrisseto/osf.io,Johnetordoff/osf.io,arpitar/osf.io,Nesiehr/osf.io,haoyuchen1992/osf.io,MerlinZhang/osf.io,DanielSBrown/osf.io,samchrisinger/osf.io,aaxelb/osf.io,doublebits/osf.io,chrisseto/osf.io,TomBaxter/osf.io,jinluyuan/osf.io,reinaH/osf.io,acshi/osf.io,adlius/osf.io,kch8qx/osf.io,Ghalko/osf.io,pattisdr/osf.io,felliott/osf.io,mluke93/osf.io,barbour-em/osf.io,billyhunt/osf.io,SSJohns/osf.io,revanthkolli/osf.io,lyndsysimon/osf.io,zamattiac/osf.io,himanshuo/osf.io,lyndsysimon/osf.io,caneruguz/osf.io,monikagrabowska/osf.io,fabianvf/osf.io,monikagrabowska/osf.io,haoyuchen1992/osf.io,crcresearch/osf.io,aaxelb/osf.io,bdyetton/prettychart,mattclark/osf.io,icereval/osf.io,kushG/osf.io,jmcarp/osf.io,amyshi188/osf.io,reinaH/osf.io,jolene-esposito/osf.io,danielneis/osf.io,mluo613/osf.io,DanielSBrown/osf.io,brandonPurvis/osf.io,caseyrollins/osf.io,acshi/osf.io,asanfilippo7/osf.io,wearpants/osf.io,dplorimer/osf,alexschiller/osf.io,mluo613/osf.io,wearpants/osf.io,Nesiehr/osf.io,caneruguz/osf.io,CenterForOpenScience/osf.io,SSJohns/osf.io,Nesiehr/osf.io,emetsger/osf.io,HarryRybacki/osf.io,erinspace/osf.io,laurenrevere/osf.io,cosenal/osf.io,jnayak1/osf.io,abought/osf.io,Nesiehr/osf.io,caseyrollins/osf.io,pattisdr/osf.io,aaxelb/osf.io,barbour-em/osf.io,jolene-esposito/osf.io,adlius/osf.io,jolene-esposito/osf.io,ckc6cz/osf.io,lamdnhan/osf.io,DanielSBrown/osf.io,ZobairAlijan/osf.io,reinaH/osf.io,brandonPurvis/osf.io,caneruguz/osf.io,HalcyonChimera/osf.io,monikagrabowska/osf.io,rdhyee/osf.io,RomanZWang/osf.io,jinluyuan/osf.io,bdyetton/prettychart,mattclark/osf.io,TomBaxter/osf.io,doublebits/osf.io,HalcyonChimera/osf.io,adlius/osf.io,mfraezz/osf.io,danielneis/osf.io,bdyetton/prettychart,Johnetordoff/osf.io,Johnetordoff/osf.io,monikagrabowska/osf.io,emetsger/osf.io,alexschiller/osf.io,mluke93/osf.io,ticklemepierce/osf.io,cosenal/osf.io,zamattiac/osf.io,erinspace/osf.io,mluo613/osf.io,ZobairAlijan/osf.io,pattisdr/osf.io,SSJohns/osf.io,arpitar/osf.io,jnayak1/osf.io,hmoco/osf.io,hmoco/osf.io,zkraime/osf.io,CenterForOpenScience/osf.io,doublebits/osf.io,njantrania/osf.io,samchrisinger/osf.io,bdyetton/prettychart,MerlinZhang/osf.io,jmcarp/osf.io,ckc6cz/osf.io,brandonPurvis/osf.io,ticklemepierce/osf.io,doublebits/osf.io,danielneis/osf.io,sbt9uc/osf.io,AndrewSallans/osf.io,felliott/osf.io,zamattiac/osf.io,samanehsan/osf.io,HalcyonChimera/osf.io,GageGaskins/osf.io,revanthkolli/osf.io,sloria/osf.io,zkraime/osf.io,GageGaskins/osf.io,sbt9uc/osf.io,billyhunt/osf.io,GaryKriebel/osf.io,cosenal/osf.io,crcresearch/osf.io,jmcarp/osf.io,kushG/osf.io,baylee-d/osf.io,samchrisinger/osf.io,himanshuo/osf.io,zachjanicki/osf.io,MerlinZhang/osf.io,arpitar/osf.io,brandonPurvis/osf.io,abought/osf.io,TomHeatwole/osf.io,billyhunt/osf.io,emetsger/osf.io,petermalcolm/osf.io,kwierman/osf.io,brandonPurvis/osf.io,DanielSBrown/osf.io,ZobairAlijan/osf.io,jolene-esposito/osf.io,barbour-em/osf.io,sbt9uc/osf.io,hmoco/osf.io,mfraezz/osf.io,erinspace/osf.io,SSJohns/osf.io,RomanZWang/osf.io,Johnetordoff/osf.io,zachjanicki/osf.io,Ghalko/osf.io,haoyuchen1992/osf.io,acshi/osf.io,ticklemepierce/osf.io,mluo613/osf.io,kch8qx/osf.io,binoculars/osf.io,wearpants/osf.io,alexschiller/osf.io,mfraezz/osf.io,dplorimer/osf,billyhunt/osf.io,CenterForOpenScience/osf.io,Ghalko/osf.io,GaryKriebel/osf.io,jeffreyliu3230/osf.io,abought/osf.io,cldershem/osf.io,leb2dg/osf.io,samanehsan/osf.io,chennan47/osf.io,rdhyee/osf.io,TomBaxter/osf.io,lamdnhan/osf.io,caseyrygt/osf.io,amyshi188/osf.io,emetsger/osf.io,himanshuo/osf.io,cslzchen/osf.io,kch8qx/osf.io,dplorimer/osf,zamattiac/osf.io,dplorimer/osf,hmoco/osf.io,lamdnhan/osf.io,kushG/osf.io,njantrania/osf.io,ZobairAlijan/osf.io,zkraime/osf.io,chennan47/osf.io,caneruguz/osf.io,cslzchen/osf.io,caseyrollins/osf.io,saradbowman/osf.io,reinaH/osf.io,kwierman/osf.io,KAsante95/osf.io,mluo613/osf.io,kushG/osf.io,KAsante95/osf.io,sbt9uc/osf.io,cldershem/osf.io,jinluyuan/osf.io,felliott/osf.io,laurenrevere/osf.io,alexschiller/osf.io,leb2dg/osf.io,felliott/osf.io,cosenal/osf.io,cldershem/osf.io,asanfilippo7/osf.io,RomanZWang/osf.io,GaryKriebel/osf.io,samanehsan/osf.io,mluke93/osf.io,cwisecarver/osf.io,jeffreyliu3230/osf.io,icereval/osf.io,brianjgeiger/osf.io,KAsante95/osf.io,adlius/osf.io,monikagrabowska/osf.io,binoculars/osf.io,brianjgeiger/osf.io,caseyrygt/osf.io,HarryRybacki/osf.io,revanthkolli/osf.io,KAsante95/osf.io,cslzchen/osf.io,amyshi188/osf.io,mluke93/osf.io,asanfilippo7/osf.io,doublebits/osf.io,haoyuchen1992/osf.io,fabianvf/osf.io,leb2dg/osf.io,arpitar/osf.io,jmcarp/osf.io,fabianvf/osf.io,RomanZWang/osf.io,lyndsysimon/osf.io,aaxelb/osf.io,mattclark/osf.io,kch8qx/osf.io,AndrewSallans/osf.io,TomHeatwole/osf.io,cwisecarver/osf.io,lamdnhan/osf.io,saradbowman/osf.io,njantrania/osf.io,HalcyonChimera/osf.io
|
Add script to get nested projects
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Helper to get a list of all projects that are nested within another project."""
from website.project.model import Node
from modularodm import Q
from tests.base import OsfTestCase
from tests.factories import ProjectFactory
def find_nested_projects():
return [node for node in Node.find()
if node.category == 'project'
and node.parent_node is not None]
class TestFindNestedProjects(OsfTestCase):
def test_find_nested(self):
project =ProjectFactory.build()
nested_project = ProjectFactory()
project.nodes.append(nested_project)
project.save()
result = find_nested_projects()
assert nested_project in result
assert project not in result
def test_unnested_project(self):
project = ProjectFactory()
assert project not in find_nested_projects()
def main():
result = find_nested_projects()
print('Number of nested projects: {0}'.format(len(results)))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to get nested projects<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Helper to get a list of all projects that are nested within another project."""
from website.project.model import Node
from modularodm import Q
from tests.base import OsfTestCase
from tests.factories import ProjectFactory
def find_nested_projects():
return [node for node in Node.find()
if node.category == 'project'
and node.parent_node is not None]
class TestFindNestedProjects(OsfTestCase):
def test_find_nested(self):
project =ProjectFactory.build()
nested_project = ProjectFactory()
project.nodes.append(nested_project)
project.save()
result = find_nested_projects()
assert nested_project in result
assert project not in result
def test_unnested_project(self):
project = ProjectFactory()
assert project not in find_nested_projects()
def main():
result = find_nested_projects()
print('Number of nested projects: {0}'.format(len(results)))
if __name__ == '__main__':
main()
|
Add script to get nested projects#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Helper to get a list of all projects that are nested within another project."""
from website.project.model import Node
from modularodm import Q
from tests.base import OsfTestCase
from tests.factories import ProjectFactory
def find_nested_projects():
return [node for node in Node.find()
if node.category == 'project'
and node.parent_node is not None]
class TestFindNestedProjects(OsfTestCase):
def test_find_nested(self):
project =ProjectFactory.build()
nested_project = ProjectFactory()
project.nodes.append(nested_project)
project.save()
result = find_nested_projects()
assert nested_project in result
assert project not in result
def test_unnested_project(self):
project = ProjectFactory()
assert project not in find_nested_projects()
def main():
result = find_nested_projects()
print('Number of nested projects: {0}'.format(len(results)))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script to get nested projects<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Helper to get a list of all projects that are nested within another project."""
from website.project.model import Node
from modularodm import Q
from tests.base import OsfTestCase
from tests.factories import ProjectFactory
def find_nested_projects():
return [node for node in Node.find()
if node.category == 'project'
and node.parent_node is not None]
class TestFindNestedProjects(OsfTestCase):
def test_find_nested(self):
project =ProjectFactory.build()
nested_project = ProjectFactory()
project.nodes.append(nested_project)
project.save()
result = find_nested_projects()
assert nested_project in result
assert project not in result
def test_unnested_project(self):
project = ProjectFactory()
assert project not in find_nested_projects()
def main():
result = find_nested_projects()
print('Number of nested projects: {0}'.format(len(results)))
if __name__ == '__main__':
main()
|
|
a6d3b4b8a322a2642a575fe7ab81eee4d3ac40a7
|
scripts/update_rows_from_csv.py
|
scripts/update_rows_from_csv.py
|
# The purpose of this script to update database with a CSV
import os
import sys
import psycopg2
import dotenv
# Load environment variable from a .env file
dotenv.load_dotenv('.env')
csv_path = sys.argv[1]
postgres_url = os.getenv('DATABASE_URL')[11:]
user_tokens = postgres_url.split(':')
user_name = user_tokens[0]
password_tokens = user_tokens[1].split('@')
password = password_tokens[0]
db_tokens = password_tokens[1].split('/')
db_url = db_tokens[0]
db_name = db_tokens[1].split('?')[0]
connection = None
rows = open(csv_path).read().split('\n')
id_column, status_column = rows[0].split(',')
try:
connection = psycopg2.connect(host=db_url, dbname=db_name, user=user_name, password=password)
cursor = connection.cursor()
print 'Starting DB connection...'
for current_row in rows[1:]:
user_id, status_value = current_row.split(',')
sql_command = """
UPDATE users
SET {} = {}
WHERE users.id = {};
""".format(status_column, status_value.upper(), user_id)
cursor.execute(sql_command)
print 'Updating user_id={} column={} value={}'.format(user_id, status_column, status_value)
connection.commit()
except psycopg2.DatabaseError:
if connection:
print 'Rolling back...'
connection.rollback()
finally:
if connection:
print 'Closing DB connection...'
connection.close()
|
Add a script to update users with a CSV
|
Add a script to update users with a CSV
|
Python
|
agpl-3.0
|
rohitdatta/pepper,rohitdatta/pepper,rohitdatta/pepper
|
Add a script to update users with a CSV
|
# The purpose of this script to update database with a CSV
import os
import sys
import psycopg2
import dotenv
# Load environment variable from a .env file
dotenv.load_dotenv('.env')
csv_path = sys.argv[1]
postgres_url = os.getenv('DATABASE_URL')[11:]
user_tokens = postgres_url.split(':')
user_name = user_tokens[0]
password_tokens = user_tokens[1].split('@')
password = password_tokens[0]
db_tokens = password_tokens[1].split('/')
db_url = db_tokens[0]
db_name = db_tokens[1].split('?')[0]
connection = None
rows = open(csv_path).read().split('\n')
id_column, status_column = rows[0].split(',')
try:
connection = psycopg2.connect(host=db_url, dbname=db_name, user=user_name, password=password)
cursor = connection.cursor()
print 'Starting DB connection...'
for current_row in rows[1:]:
user_id, status_value = current_row.split(',')
sql_command = """
UPDATE users
SET {} = {}
WHERE users.id = {};
""".format(status_column, status_value.upper(), user_id)
cursor.execute(sql_command)
print 'Updating user_id={} column={} value={}'.format(user_id, status_column, status_value)
connection.commit()
except psycopg2.DatabaseError:
if connection:
print 'Rolling back...'
connection.rollback()
finally:
if connection:
print 'Closing DB connection...'
connection.close()
|
<commit_before><commit_msg>Add a script to update users with a CSV<commit_after>
|
# The purpose of this script to update database with a CSV
import os
import sys
import psycopg2
import dotenv
# Load environment variable from a .env file
dotenv.load_dotenv('.env')
csv_path = sys.argv[1]
postgres_url = os.getenv('DATABASE_URL')[11:]
user_tokens = postgres_url.split(':')
user_name = user_tokens[0]
password_tokens = user_tokens[1].split('@')
password = password_tokens[0]
db_tokens = password_tokens[1].split('/')
db_url = db_tokens[0]
db_name = db_tokens[1].split('?')[0]
connection = None
rows = open(csv_path).read().split('\n')
id_column, status_column = rows[0].split(',')
try:
connection = psycopg2.connect(host=db_url, dbname=db_name, user=user_name, password=password)
cursor = connection.cursor()
print 'Starting DB connection...'
for current_row in rows[1:]:
user_id, status_value = current_row.split(',')
sql_command = """
UPDATE users
SET {} = {}
WHERE users.id = {};
""".format(status_column, status_value.upper(), user_id)
cursor.execute(sql_command)
print 'Updating user_id={} column={} value={}'.format(user_id, status_column, status_value)
connection.commit()
except psycopg2.DatabaseError:
if connection:
print 'Rolling back...'
connection.rollback()
finally:
if connection:
print 'Closing DB connection...'
connection.close()
|
Add a script to update users with a CSV# The purpose of this script to update database with a CSV
import os
import sys
import psycopg2
import dotenv
# Load environment variable from a .env file
dotenv.load_dotenv('.env')
csv_path = sys.argv[1]
postgres_url = os.getenv('DATABASE_URL')[11:]
user_tokens = postgres_url.split(':')
user_name = user_tokens[0]
password_tokens = user_tokens[1].split('@')
password = password_tokens[0]
db_tokens = password_tokens[1].split('/')
db_url = db_tokens[0]
db_name = db_tokens[1].split('?')[0]
connection = None
rows = open(csv_path).read().split('\n')
id_column, status_column = rows[0].split(',')
try:
connection = psycopg2.connect(host=db_url, dbname=db_name, user=user_name, password=password)
cursor = connection.cursor()
print 'Starting DB connection...'
for current_row in rows[1:]:
user_id, status_value = current_row.split(',')
sql_command = """
UPDATE users
SET {} = {}
WHERE users.id = {};
""".format(status_column, status_value.upper(), user_id)
cursor.execute(sql_command)
print 'Updating user_id={} column={} value={}'.format(user_id, status_column, status_value)
connection.commit()
except psycopg2.DatabaseError:
if connection:
print 'Rolling back...'
connection.rollback()
finally:
if connection:
print 'Closing DB connection...'
connection.close()
|
<commit_before><commit_msg>Add a script to update users with a CSV<commit_after># The purpose of this script to update database with a CSV
import os
import sys
import psycopg2
import dotenv
# Load environment variable from a .env file
dotenv.load_dotenv('.env')
csv_path = sys.argv[1]
postgres_url = os.getenv('DATABASE_URL')[11:]
user_tokens = postgres_url.split(':')
user_name = user_tokens[0]
password_tokens = user_tokens[1].split('@')
password = password_tokens[0]
db_tokens = password_tokens[1].split('/')
db_url = db_tokens[0]
db_name = db_tokens[1].split('?')[0]
connection = None
rows = open(csv_path).read().split('\n')
id_column, status_column = rows[0].split(',')
try:
connection = psycopg2.connect(host=db_url, dbname=db_name, user=user_name, password=password)
cursor = connection.cursor()
print 'Starting DB connection...'
for current_row in rows[1:]:
user_id, status_value = current_row.split(',')
sql_command = """
UPDATE users
SET {} = {}
WHERE users.id = {};
""".format(status_column, status_value.upper(), user_id)
cursor.execute(sql_command)
print 'Updating user_id={} column={} value={}'.format(user_id, status_column, status_value)
connection.commit()
except psycopg2.DatabaseError:
if connection:
print 'Rolling back...'
connection.rollback()
finally:
if connection:
print 'Closing DB connection...'
connection.close()
|
|
c4541bc0947cd6f5fd81ed1750de3d7046184d68
|
beta_plotting.py
|
beta_plotting.py
|
#!/usr/bin/env python
import numpy as np
#import matplotlib
#matplotlib.use('agg')
import matplotlib.pyplot as plt
import castep_isotope_sub
def plot_beta(Ts, betas):
Tsm1 = 1E6/(Ts**2.0)
fix, ax1 = plt.subplots()
ax1.plot(Tsm1, betas, "b-")
ax1.set_ylabel("1000 * ln beta")
ax1.set_xlabel("1000000 / T^2")
x1locs, x1labels = plt.xticks()
ax2 = ax1.twiny()
#ax2.plot(Ts, betas, "b-")
ax2.set_xlabel(" T")
x2vals = []
x2locs = []
for xloc in x1locs:
thisval = np.sqrt((1.0/(xloc/1E6)))
if thisval != float("inf"):
x2vals.append("{:4.0f}".format(thisval))
x2locs.append(xloc)
plt.xticks(x2locs, x2vals)
print x1locs
print x1labels
plt.show()
if __name__ == "__main__":
import sys
seedname = sys.argv[1]
(popt, pconv) = castep_isotope_sub.fit_beta_func(seedname)
Ts = np.linspace(300.0, 4000.0, num=40)
betas = castep_isotope_sub.beta_function(Ts, popt[0], popt[1], popt[2])
print Ts
print betas
plot_beta(Ts, betas)
|
Add tool to plot beta
|
Add tool to plot beta
|
Python
|
bsd-3-clause
|
andreww/isofrac
|
Add tool to plot beta
|
#!/usr/bin/env python
import numpy as np
#import matplotlib
#matplotlib.use('agg')
import matplotlib.pyplot as plt
import castep_isotope_sub
def plot_beta(Ts, betas):
Tsm1 = 1E6/(Ts**2.0)
fix, ax1 = plt.subplots()
ax1.plot(Tsm1, betas, "b-")
ax1.set_ylabel("1000 * ln beta")
ax1.set_xlabel("1000000 / T^2")
x1locs, x1labels = plt.xticks()
ax2 = ax1.twiny()
#ax2.plot(Ts, betas, "b-")
ax2.set_xlabel(" T")
x2vals = []
x2locs = []
for xloc in x1locs:
thisval = np.sqrt((1.0/(xloc/1E6)))
if thisval != float("inf"):
x2vals.append("{:4.0f}".format(thisval))
x2locs.append(xloc)
plt.xticks(x2locs, x2vals)
print x1locs
print x1labels
plt.show()
if __name__ == "__main__":
import sys
seedname = sys.argv[1]
(popt, pconv) = castep_isotope_sub.fit_beta_func(seedname)
Ts = np.linspace(300.0, 4000.0, num=40)
betas = castep_isotope_sub.beta_function(Ts, popt[0], popt[1], popt[2])
print Ts
print betas
plot_beta(Ts, betas)
|
<commit_before><commit_msg>Add tool to plot beta<commit_after>
|
#!/usr/bin/env python
import numpy as np
#import matplotlib
#matplotlib.use('agg')
import matplotlib.pyplot as plt
import castep_isotope_sub
def plot_beta(Ts, betas):
Tsm1 = 1E6/(Ts**2.0)
fix, ax1 = plt.subplots()
ax1.plot(Tsm1, betas, "b-")
ax1.set_ylabel("1000 * ln beta")
ax1.set_xlabel("1000000 / T^2")
x1locs, x1labels = plt.xticks()
ax2 = ax1.twiny()
#ax2.plot(Ts, betas, "b-")
ax2.set_xlabel(" T")
x2vals = []
x2locs = []
for xloc in x1locs:
thisval = np.sqrt((1.0/(xloc/1E6)))
if thisval != float("inf"):
x2vals.append("{:4.0f}".format(thisval))
x2locs.append(xloc)
plt.xticks(x2locs, x2vals)
print x1locs
print x1labels
plt.show()
if __name__ == "__main__":
import sys
seedname = sys.argv[1]
(popt, pconv) = castep_isotope_sub.fit_beta_func(seedname)
Ts = np.linspace(300.0, 4000.0, num=40)
betas = castep_isotope_sub.beta_function(Ts, popt[0], popt[1], popt[2])
print Ts
print betas
plot_beta(Ts, betas)
|
Add tool to plot beta#!/usr/bin/env python
import numpy as np
#import matplotlib
#matplotlib.use('agg')
import matplotlib.pyplot as plt
import castep_isotope_sub
def plot_beta(Ts, betas):
Tsm1 = 1E6/(Ts**2.0)
fix, ax1 = plt.subplots()
ax1.plot(Tsm1, betas, "b-")
ax1.set_ylabel("1000 * ln beta")
ax1.set_xlabel("1000000 / T^2")
x1locs, x1labels = plt.xticks()
ax2 = ax1.twiny()
#ax2.plot(Ts, betas, "b-")
ax2.set_xlabel(" T")
x2vals = []
x2locs = []
for xloc in x1locs:
thisval = np.sqrt((1.0/(xloc/1E6)))
if thisval != float("inf"):
x2vals.append("{:4.0f}".format(thisval))
x2locs.append(xloc)
plt.xticks(x2locs, x2vals)
print x1locs
print x1labels
plt.show()
if __name__ == "__main__":
import sys
seedname = sys.argv[1]
(popt, pconv) = castep_isotope_sub.fit_beta_func(seedname)
Ts = np.linspace(300.0, 4000.0, num=40)
betas = castep_isotope_sub.beta_function(Ts, popt[0], popt[1], popt[2])
print Ts
print betas
plot_beta(Ts, betas)
|
<commit_before><commit_msg>Add tool to plot beta<commit_after>#!/usr/bin/env python
import numpy as np
#import matplotlib
#matplotlib.use('agg')
import matplotlib.pyplot as plt
import castep_isotope_sub
def plot_beta(Ts, betas):
Tsm1 = 1E6/(Ts**2.0)
fix, ax1 = plt.subplots()
ax1.plot(Tsm1, betas, "b-")
ax1.set_ylabel("1000 * ln beta")
ax1.set_xlabel("1000000 / T^2")
x1locs, x1labels = plt.xticks()
ax2 = ax1.twiny()
#ax2.plot(Ts, betas, "b-")
ax2.set_xlabel(" T")
x2vals = []
x2locs = []
for xloc in x1locs:
thisval = np.sqrt((1.0/(xloc/1E6)))
if thisval != float("inf"):
x2vals.append("{:4.0f}".format(thisval))
x2locs.append(xloc)
plt.xticks(x2locs, x2vals)
print x1locs
print x1labels
plt.show()
if __name__ == "__main__":
import sys
seedname = sys.argv[1]
(popt, pconv) = castep_isotope_sub.fit_beta_func(seedname)
Ts = np.linspace(300.0, 4000.0, num=40)
betas = castep_isotope_sub.beta_function(Ts, popt[0], popt[1], popt[2])
print Ts
print betas
plot_beta(Ts, betas)
|
|
e1bb23fa54e8ec3fc022d55bbc4c6a954f5b76c3
|
dpmm.py
|
dpmm.py
|
""" Going to try algorithm 1 from Neal (2000)
"""
import numpy as np
import bisect
def pick_discrete(p):
"""Pick a discrete integer between 0 and len(p) - 1 with probability given by p array."""
c = np.cumsum(p)
u = np.random.uniform()
return bisect.bisect_left(c, u)
class DPMM(object):
"""Dirichlet Process Mixture Model.
@param conjugate_prior The conjugate_prior object for whatever model is being inferred.
@param alpha Concentration parameter.
@param D Data.
@param theta Optional initial state for sampler. Will be drawn from conjugate_prior
if not specified.
"""
def __init__(self, conjugate_prior, alpha, D, theta=None):
self.conjugate_prior = conjugate_prior
self.alpha = alpha
self.D = D # data
self.n = len(self.D)
if theta is None:
# Draw from the prior
theta = conjugate_prior.sample(size=len(D))
self.theta = theta
# Initialize r_i array
self.r_i = self.alpha * np.array([conjugate_prior.pred(x) for x in D])
def q(self, i):
# compute and return row of q_ij matrix (we only ever need one row at a time).
qs = np.array([self.conjugate_prior.like1(th_j, x=self.D[i]) for th_j in self.theta])
qs[i] = self.r_i[i] # cheat by placing r_i at q_ii.
return qs
def update_1_theta(self, i):
x = self.D[i]
qs = self.q(i)
p = qs/np.sum(qs)
picked = pick_discrete(p)
if picked == i: # This corresponds to picking r_i in Neal (2000); i.e. get a new theta
# Neal (2000) H_i is the posterior given a single observation x.
self.theta[i] = self.conjugate_prior.post(x).sample()
else: # reuse an existing theta
self.theta[i] = self.theta[picked]
def update_theta(self, n=1):
for j in xrange(n):
for i in xrange(len(self.D)):
self.update_1_theta(i)
|
Create Dirichlet Process Mixture Model class.
|
Create Dirichlet Process Mixture Model class.
|
Python
|
bsd-2-clause
|
jmeyers314/DPMM
|
Create Dirichlet Process Mixture Model class.
|
""" Going to try algorithm 1 from Neal (2000)
"""
import numpy as np
import bisect
def pick_discrete(p):
"""Pick a discrete integer between 0 and len(p) - 1 with probability given by p array."""
c = np.cumsum(p)
u = np.random.uniform()
return bisect.bisect_left(c, u)
class DPMM(object):
"""Dirichlet Process Mixture Model.
@param conjugate_prior The conjugate_prior object for whatever model is being inferred.
@param alpha Concentration parameter.
@param D Data.
@param theta Optional initial state for sampler. Will be drawn from conjugate_prior
if not specified.
"""
def __init__(self, conjugate_prior, alpha, D, theta=None):
self.conjugate_prior = conjugate_prior
self.alpha = alpha
self.D = D # data
self.n = len(self.D)
if theta is None:
# Draw from the prior
theta = conjugate_prior.sample(size=len(D))
self.theta = theta
# Initialize r_i array
self.r_i = self.alpha * np.array([conjugate_prior.pred(x) for x in D])
def q(self, i):
# compute and return row of q_ij matrix (we only ever need one row at a time).
qs = np.array([self.conjugate_prior.like1(th_j, x=self.D[i]) for th_j in self.theta])
qs[i] = self.r_i[i] # cheat by placing r_i at q_ii.
return qs
def update_1_theta(self, i):
x = self.D[i]
qs = self.q(i)
p = qs/np.sum(qs)
picked = pick_discrete(p)
if picked == i: # This corresponds to picking r_i in Neal (2000); i.e. get a new theta
# Neal (2000) H_i is the posterior given a single observation x.
self.theta[i] = self.conjugate_prior.post(x).sample()
else: # reuse an existing theta
self.theta[i] = self.theta[picked]
def update_theta(self, n=1):
for j in xrange(n):
for i in xrange(len(self.D)):
self.update_1_theta(i)
|
<commit_before><commit_msg>Create Dirichlet Process Mixture Model class.<commit_after>
|
""" Going to try algorithm 1 from Neal (2000)
"""
import numpy as np
import bisect
def pick_discrete(p):
"""Pick a discrete integer between 0 and len(p) - 1 with probability given by p array."""
c = np.cumsum(p)
u = np.random.uniform()
return bisect.bisect_left(c, u)
class DPMM(object):
"""Dirichlet Process Mixture Model.
@param conjugate_prior The conjugate_prior object for whatever model is being inferred.
@param alpha Concentration parameter.
@param D Data.
@param theta Optional initial state for sampler. Will be drawn from conjugate_prior
if not specified.
"""
def __init__(self, conjugate_prior, alpha, D, theta=None):
self.conjugate_prior = conjugate_prior
self.alpha = alpha
self.D = D # data
self.n = len(self.D)
if theta is None:
# Draw from the prior
theta = conjugate_prior.sample(size=len(D))
self.theta = theta
# Initialize r_i array
self.r_i = self.alpha * np.array([conjugate_prior.pred(x) for x in D])
def q(self, i):
# compute and return row of q_ij matrix (we only ever need one row at a time).
qs = np.array([self.conjugate_prior.like1(th_j, x=self.D[i]) for th_j in self.theta])
qs[i] = self.r_i[i] # cheat by placing r_i at q_ii.
return qs
def update_1_theta(self, i):
x = self.D[i]
qs = self.q(i)
p = qs/np.sum(qs)
picked = pick_discrete(p)
if picked == i: # This corresponds to picking r_i in Neal (2000); i.e. get a new theta
# Neal (2000) H_i is the posterior given a single observation x.
self.theta[i] = self.conjugate_prior.post(x).sample()
else: # reuse an existing theta
self.theta[i] = self.theta[picked]
def update_theta(self, n=1):
for j in xrange(n):
for i in xrange(len(self.D)):
self.update_1_theta(i)
|
Create Dirichlet Process Mixture Model class.""" Going to try algorithm 1 from Neal (2000)
"""
import numpy as np
import bisect
def pick_discrete(p):
"""Pick a discrete integer between 0 and len(p) - 1 with probability given by p array."""
c = np.cumsum(p)
u = np.random.uniform()
return bisect.bisect_left(c, u)
class DPMM(object):
"""Dirichlet Process Mixture Model.
@param conjugate_prior The conjugate_prior object for whatever model is being inferred.
@param alpha Concentration parameter.
@param D Data.
@param theta Optional initial state for sampler. Will be drawn from conjugate_prior
if not specified.
"""
def __init__(self, conjugate_prior, alpha, D, theta=None):
self.conjugate_prior = conjugate_prior
self.alpha = alpha
self.D = D # data
self.n = len(self.D)
if theta is None:
# Draw from the prior
theta = conjugate_prior.sample(size=len(D))
self.theta = theta
# Initialize r_i array
self.r_i = self.alpha * np.array([conjugate_prior.pred(x) for x in D])
def q(self, i):
# compute and return row of q_ij matrix (we only ever need one row at a time).
qs = np.array([self.conjugate_prior.like1(th_j, x=self.D[i]) for th_j in self.theta])
qs[i] = self.r_i[i] # cheat by placing r_i at q_ii.
return qs
def update_1_theta(self, i):
x = self.D[i]
qs = self.q(i)
p = qs/np.sum(qs)
picked = pick_discrete(p)
if picked == i: # This corresponds to picking r_i in Neal (2000); i.e. get a new theta
# Neal (2000) H_i is the posterior given a single observation x.
self.theta[i] = self.conjugate_prior.post(x).sample()
else: # reuse an existing theta
self.theta[i] = self.theta[picked]
def update_theta(self, n=1):
for j in xrange(n):
for i in xrange(len(self.D)):
self.update_1_theta(i)
|
<commit_before><commit_msg>Create Dirichlet Process Mixture Model class.<commit_after>""" Going to try algorithm 1 from Neal (2000)
"""
import numpy as np
import bisect
def pick_discrete(p):
"""Pick a discrete integer between 0 and len(p) - 1 with probability given by p array."""
c = np.cumsum(p)
u = np.random.uniform()
return bisect.bisect_left(c, u)
class DPMM(object):
"""Dirichlet Process Mixture Model.
@param conjugate_prior The conjugate_prior object for whatever model is being inferred.
@param alpha Concentration parameter.
@param D Data.
@param theta Optional initial state for sampler. Will be drawn from conjugate_prior
if not specified.
"""
def __init__(self, conjugate_prior, alpha, D, theta=None):
self.conjugate_prior = conjugate_prior
self.alpha = alpha
self.D = D # data
self.n = len(self.D)
if theta is None:
# Draw from the prior
theta = conjugate_prior.sample(size=len(D))
self.theta = theta
# Initialize r_i array
self.r_i = self.alpha * np.array([conjugate_prior.pred(x) for x in D])
def q(self, i):
# compute and return row of q_ij matrix (we only ever need one row at a time).
qs = np.array([self.conjugate_prior.like1(th_j, x=self.D[i]) for th_j in self.theta])
qs[i] = self.r_i[i] # cheat by placing r_i at q_ii.
return qs
def update_1_theta(self, i):
x = self.D[i]
qs = self.q(i)
p = qs/np.sum(qs)
picked = pick_discrete(p)
if picked == i: # This corresponds to picking r_i in Neal (2000); i.e. get a new theta
# Neal (2000) H_i is the posterior given a single observation x.
self.theta[i] = self.conjugate_prior.post(x).sample()
else: # reuse an existing theta
self.theta[i] = self.theta[picked]
def update_theta(self, n=1):
for j in xrange(n):
for i in xrange(len(self.D)):
self.update_1_theta(i)
|
|
c0cc71c51e95fd9b254c64e2557673d302ec0b9d
|
perfscale_mass_model_destruction.py
|
perfscale_mass_model_destruction.py
|
#!/usr/bin/env python
"""Perfscale test measuring adding and destroying a large number of models.
Steps taken in this test:
- Bootstraps a provider
- Creates x amount of models and waits for them to be ready
- Delete all the models at once.
"""
import argparse
from datetime import datetime
import logging
import sys
from deploy_stack import (
BootstrapManager,
)
from generate_perfscale_results import (
DeployDetails,
TimingData,
run_perfscale_test,
)
from utility import (
add_basic_testing_arguments,
configure_logging,
)
log = logging.getLogger("perfscale_mass_model_destruction")
__metaclass__ = type
def perfscale_assess_model_destruction(client, args):
"""Create a bunch of models and then destroy them all."""
model_count = args.model_count
all_models = []
for item in xrange(0, model_count):
model_name = 'model{}'.format(item)
log.info('Creating model: {}'.format(model_name))
new_model = client.add_model(client.env.clone(model_name))
new_model.wait_for_started()
all_models.append(new_model)
destruction_start = datetime.utcnow()
for doomed in all_models:
doomed.destroy_model()
destruction_end = datetime.utcnow()
destruction_timing = TimingData(destruction_start, destruction_end)
return DeployDetails(
'Destroy {} models'.format(model_count),
{'Model Count': model_count},
destruction_timing)
def parse_args(argv):
"""Parse all arguments."""
parser = argparse.ArgumentParser(
description="Perfscale bundle deployment test.")
add_basic_testing_arguments(parser)
parser.add_argument(
'--model-count',
help='Number of models to create.',
default=100)
return parser.parse_args(argv)
def main(argv=None):
args = parse_args(argv)
configure_logging(args.verbose)
bs_manager = BootstrapManager.from_args(args)
run_perfscale_test(perfscale_assess_model_destruction, bs_manager, args)
return 0
if __name__ == '__main__':
sys.exit(main())
|
Add mass model destruction test.
|
Add mass model destruction test.
|
Python
|
agpl-3.0
|
mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju,mjs/juju
|
Add mass model destruction test.
|
#!/usr/bin/env python
"""Perfscale test measuring adding and destroying a large number of models.
Steps taken in this test:
- Bootstraps a provider
- Creates x amount of models and waits for them to be ready
- Delete all the models at once.
"""
import argparse
from datetime import datetime
import logging
import sys
from deploy_stack import (
BootstrapManager,
)
from generate_perfscale_results import (
DeployDetails,
TimingData,
run_perfscale_test,
)
from utility import (
add_basic_testing_arguments,
configure_logging,
)
log = logging.getLogger("perfscale_mass_model_destruction")
__metaclass__ = type
def perfscale_assess_model_destruction(client, args):
"""Create a bunch of models and then destroy them all."""
model_count = args.model_count
all_models = []
for item in xrange(0, model_count):
model_name = 'model{}'.format(item)
log.info('Creating model: {}'.format(model_name))
new_model = client.add_model(client.env.clone(model_name))
new_model.wait_for_started()
all_models.append(new_model)
destruction_start = datetime.utcnow()
for doomed in all_models:
doomed.destroy_model()
destruction_end = datetime.utcnow()
destruction_timing = TimingData(destruction_start, destruction_end)
return DeployDetails(
'Destroy {} models'.format(model_count),
{'Model Count': model_count},
destruction_timing)
def parse_args(argv):
"""Parse all arguments."""
parser = argparse.ArgumentParser(
description="Perfscale bundle deployment test.")
add_basic_testing_arguments(parser)
parser.add_argument(
'--model-count',
help='Number of models to create.',
default=100)
return parser.parse_args(argv)
def main(argv=None):
args = parse_args(argv)
configure_logging(args.verbose)
bs_manager = BootstrapManager.from_args(args)
run_perfscale_test(perfscale_assess_model_destruction, bs_manager, args)
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add mass model destruction test.<commit_after>
|
#!/usr/bin/env python
"""Perfscale test measuring adding and destroying a large number of models.
Steps taken in this test:
- Bootstraps a provider
- Creates x amount of models and waits for them to be ready
- Delete all the models at once.
"""
import argparse
from datetime import datetime
import logging
import sys
from deploy_stack import (
BootstrapManager,
)
from generate_perfscale_results import (
DeployDetails,
TimingData,
run_perfscale_test,
)
from utility import (
add_basic_testing_arguments,
configure_logging,
)
log = logging.getLogger("perfscale_mass_model_destruction")
__metaclass__ = type
def perfscale_assess_model_destruction(client, args):
"""Create a bunch of models and then destroy them all."""
model_count = args.model_count
all_models = []
for item in xrange(0, model_count):
model_name = 'model{}'.format(item)
log.info('Creating model: {}'.format(model_name))
new_model = client.add_model(client.env.clone(model_name))
new_model.wait_for_started()
all_models.append(new_model)
destruction_start = datetime.utcnow()
for doomed in all_models:
doomed.destroy_model()
destruction_end = datetime.utcnow()
destruction_timing = TimingData(destruction_start, destruction_end)
return DeployDetails(
'Destroy {} models'.format(model_count),
{'Model Count': model_count},
destruction_timing)
def parse_args(argv):
"""Parse all arguments."""
parser = argparse.ArgumentParser(
description="Perfscale bundle deployment test.")
add_basic_testing_arguments(parser)
parser.add_argument(
'--model-count',
help='Number of models to create.',
default=100)
return parser.parse_args(argv)
def main(argv=None):
args = parse_args(argv)
configure_logging(args.verbose)
bs_manager = BootstrapManager.from_args(args)
run_perfscale_test(perfscale_assess_model_destruction, bs_manager, args)
return 0
if __name__ == '__main__':
sys.exit(main())
|
Add mass model destruction test.#!/usr/bin/env python
"""Perfscale test measuring adding and destroying a large number of models.
Steps taken in this test:
- Bootstraps a provider
- Creates x amount of models and waits for them to be ready
- Delete all the models at once.
"""
import argparse
from datetime import datetime
import logging
import sys
from deploy_stack import (
BootstrapManager,
)
from generate_perfscale_results import (
DeployDetails,
TimingData,
run_perfscale_test,
)
from utility import (
add_basic_testing_arguments,
configure_logging,
)
log = logging.getLogger("perfscale_mass_model_destruction")
__metaclass__ = type
def perfscale_assess_model_destruction(client, args):
"""Create a bunch of models and then destroy them all."""
model_count = args.model_count
all_models = []
for item in xrange(0, model_count):
model_name = 'model{}'.format(item)
log.info('Creating model: {}'.format(model_name))
new_model = client.add_model(client.env.clone(model_name))
new_model.wait_for_started()
all_models.append(new_model)
destruction_start = datetime.utcnow()
for doomed in all_models:
doomed.destroy_model()
destruction_end = datetime.utcnow()
destruction_timing = TimingData(destruction_start, destruction_end)
return DeployDetails(
'Destroy {} models'.format(model_count),
{'Model Count': model_count},
destruction_timing)
def parse_args(argv):
"""Parse all arguments."""
parser = argparse.ArgumentParser(
description="Perfscale bundle deployment test.")
add_basic_testing_arguments(parser)
parser.add_argument(
'--model-count',
help='Number of models to create.',
default=100)
return parser.parse_args(argv)
def main(argv=None):
args = parse_args(argv)
configure_logging(args.verbose)
bs_manager = BootstrapManager.from_args(args)
run_perfscale_test(perfscale_assess_model_destruction, bs_manager, args)
return 0
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add mass model destruction test.<commit_after>#!/usr/bin/env python
"""Perfscale test measuring adding and destroying a large number of models.
Steps taken in this test:
- Bootstraps a provider
- Creates x amount of models and waits for them to be ready
- Delete all the models at once.
"""
import argparse
from datetime import datetime
import logging
import sys
from deploy_stack import (
BootstrapManager,
)
from generate_perfscale_results import (
DeployDetails,
TimingData,
run_perfscale_test,
)
from utility import (
add_basic_testing_arguments,
configure_logging,
)
log = logging.getLogger("perfscale_mass_model_destruction")
__metaclass__ = type
def perfscale_assess_model_destruction(client, args):
"""Create a bunch of models and then destroy them all."""
model_count = args.model_count
all_models = []
for item in xrange(0, model_count):
model_name = 'model{}'.format(item)
log.info('Creating model: {}'.format(model_name))
new_model = client.add_model(client.env.clone(model_name))
new_model.wait_for_started()
all_models.append(new_model)
destruction_start = datetime.utcnow()
for doomed in all_models:
doomed.destroy_model()
destruction_end = datetime.utcnow()
destruction_timing = TimingData(destruction_start, destruction_end)
return DeployDetails(
'Destroy {} models'.format(model_count),
{'Model Count': model_count},
destruction_timing)
def parse_args(argv):
"""Parse all arguments."""
parser = argparse.ArgumentParser(
description="Perfscale bundle deployment test.")
add_basic_testing_arguments(parser)
parser.add_argument(
'--model-count',
help='Number of models to create.',
default=100)
return parser.parse_args(argv)
def main(argv=None):
args = parse_args(argv)
configure_logging(args.verbose)
bs_manager = BootstrapManager.from_args(args)
run_perfscale_test(perfscale_assess_model_destruction, bs_manager, args)
return 0
if __name__ == '__main__':
sys.exit(main())
|
|
fcaaad56c3b532e2f1b5845a69c46f8aa9845846
|
hub/tests/test_globalsettings.py
|
hub/tests/test_globalsettings.py
|
# coding: utf-8
import constance
from constance.test import override_config
from django.urls import reverse
from django.test import TestCase
class GlobalSettingsTestCase(TestCase):
fixtures = ['test_data']
@override_config(MFA_ENABLED=True)
def test_mfa_enabled(self):
self.client.login(username='someuser', password='someuser')
self.assertTrue(constance.config.MFA_ENABLED)
response = self.client.get(reverse('kpi-root'))
lines = [line.strip() for line in response.content.decode().split('\n')]
self.assertTrue("window.MFAEnabled = true;" in lines)
@override_config(MFA_ENABLED=False)
def test_mfa_disabled(self):
self.client.login(username='someuser', password='someuser')
self.assertFalse(constance.config.MFA_ENABLED)
response = self.client.get(reverse('kpi-root'))
lines = [line.strip() for line in response.content.decode().split('\n')]
self.assertTrue("window.MFAEnabled = false;" in lines)
|
Add test to valide MFA status in JS
|
Add test to valide MFA status in JS
|
Python
|
agpl-3.0
|
kobotoolbox/kpi,kobotoolbox/kpi,kobotoolbox/kpi,kobotoolbox/kpi,kobotoolbox/kpi
|
Add test to valide MFA status in JS
|
# coding: utf-8
import constance
from constance.test import override_config
from django.urls import reverse
from django.test import TestCase
class GlobalSettingsTestCase(TestCase):
fixtures = ['test_data']
@override_config(MFA_ENABLED=True)
def test_mfa_enabled(self):
self.client.login(username='someuser', password='someuser')
self.assertTrue(constance.config.MFA_ENABLED)
response = self.client.get(reverse('kpi-root'))
lines = [line.strip() for line in response.content.decode().split('\n')]
self.assertTrue("window.MFAEnabled = true;" in lines)
@override_config(MFA_ENABLED=False)
def test_mfa_disabled(self):
self.client.login(username='someuser', password='someuser')
self.assertFalse(constance.config.MFA_ENABLED)
response = self.client.get(reverse('kpi-root'))
lines = [line.strip() for line in response.content.decode().split('\n')]
self.assertTrue("window.MFAEnabled = false;" in lines)
|
<commit_before><commit_msg>Add test to valide MFA status in JS<commit_after>
|
# coding: utf-8
import constance
from constance.test import override_config
from django.urls import reverse
from django.test import TestCase
class GlobalSettingsTestCase(TestCase):
fixtures = ['test_data']
@override_config(MFA_ENABLED=True)
def test_mfa_enabled(self):
self.client.login(username='someuser', password='someuser')
self.assertTrue(constance.config.MFA_ENABLED)
response = self.client.get(reverse('kpi-root'))
lines = [line.strip() for line in response.content.decode().split('\n')]
self.assertTrue("window.MFAEnabled = true;" in lines)
@override_config(MFA_ENABLED=False)
def test_mfa_disabled(self):
self.client.login(username='someuser', password='someuser')
self.assertFalse(constance.config.MFA_ENABLED)
response = self.client.get(reverse('kpi-root'))
lines = [line.strip() for line in response.content.decode().split('\n')]
self.assertTrue("window.MFAEnabled = false;" in lines)
|
Add test to valide MFA status in JS# coding: utf-8
import constance
from constance.test import override_config
from django.urls import reverse
from django.test import TestCase
class GlobalSettingsTestCase(TestCase):
fixtures = ['test_data']
@override_config(MFA_ENABLED=True)
def test_mfa_enabled(self):
self.client.login(username='someuser', password='someuser')
self.assertTrue(constance.config.MFA_ENABLED)
response = self.client.get(reverse('kpi-root'))
lines = [line.strip() for line in response.content.decode().split('\n')]
self.assertTrue("window.MFAEnabled = true;" in lines)
@override_config(MFA_ENABLED=False)
def test_mfa_disabled(self):
self.client.login(username='someuser', password='someuser')
self.assertFalse(constance.config.MFA_ENABLED)
response = self.client.get(reverse('kpi-root'))
lines = [line.strip() for line in response.content.decode().split('\n')]
self.assertTrue("window.MFAEnabled = false;" in lines)
|
<commit_before><commit_msg>Add test to valide MFA status in JS<commit_after># coding: utf-8
import constance
from constance.test import override_config
from django.urls import reverse
from django.test import TestCase
class GlobalSettingsTestCase(TestCase):
fixtures = ['test_data']
@override_config(MFA_ENABLED=True)
def test_mfa_enabled(self):
self.client.login(username='someuser', password='someuser')
self.assertTrue(constance.config.MFA_ENABLED)
response = self.client.get(reverse('kpi-root'))
lines = [line.strip() for line in response.content.decode().split('\n')]
self.assertTrue("window.MFAEnabled = true;" in lines)
@override_config(MFA_ENABLED=False)
def test_mfa_disabled(self):
self.client.login(username='someuser', password='someuser')
self.assertFalse(constance.config.MFA_ENABLED)
response = self.client.get(reverse('kpi-root'))
lines = [line.strip() for line in response.content.decode().split('\n')]
self.assertTrue("window.MFAEnabled = false;" in lines)
|
|
7b94c8390f4756aa69a0e88737b9f4f749e13acd
|
test/test_sparql_base_ref.py
|
test/test_sparql_base_ref.py
|
from rdflib import ConjunctiveGraph, Literal
from StringIO import StringIO
import unittest
test_data = """
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
<http://example.org/alice> a foaf:Person;
foaf:name "Alice";
foaf:knows <http://example.org/bob> ."""
test_query = """
BASE <http://xmlns.com/foaf/0.1/>
SELECT ?name
WHERE { [ a :Person; :name ?name ]. }
}"""
class TestSparqlJsonResults(unittest.TestCase):
def setUp(self):
self.graph = ConjunctiveGraph()
self.graph.parse(StringIO(test_data), format="n3")
def test_base_ref(self):
rt=self.graph.query(test_query).serialize("python")
self.failUnless(rt[0] == Literal("Alice"),"Expected:\n 'Alice' \nGot:\n %s" % rt)
if __name__ == "__main__":
unittest.main()
|
Test for use of BASE <..>
|
Test for use of BASE <..>
|
Python
|
bsd-3-clause
|
yingerj/rdflib,yingerj/rdflib,marma/rdflib,avorio/rdflib,marma/rdflib,avorio/rdflib,yingerj/rdflib,RDFLib/rdflib,avorio/rdflib,armandobs14/rdflib,dbs/rdflib,armandobs14/rdflib,ssssam/rdflib,armandobs14/rdflib,ssssam/rdflib,avorio/rdflib,dbs/rdflib,armandobs14/rdflib,ssssam/rdflib,marma/rdflib,RDFLib/rdflib,RDFLib/rdflib,dbs/rdflib,ssssam/rdflib,RDFLib/rdflib,dbs/rdflib,marma/rdflib,yingerj/rdflib
|
Test for use of BASE <..>
|
from rdflib import ConjunctiveGraph, Literal
from StringIO import StringIO
import unittest
test_data = """
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
<http://example.org/alice> a foaf:Person;
foaf:name "Alice";
foaf:knows <http://example.org/bob> ."""
test_query = """
BASE <http://xmlns.com/foaf/0.1/>
SELECT ?name
WHERE { [ a :Person; :name ?name ]. }
}"""
class TestSparqlJsonResults(unittest.TestCase):
def setUp(self):
self.graph = ConjunctiveGraph()
self.graph.parse(StringIO(test_data), format="n3")
def test_base_ref(self):
rt=self.graph.query(test_query).serialize("python")
self.failUnless(rt[0] == Literal("Alice"),"Expected:\n 'Alice' \nGot:\n %s" % rt)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Test for use of BASE <..><commit_after>
|
from rdflib import ConjunctiveGraph, Literal
from StringIO import StringIO
import unittest
test_data = """
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
<http://example.org/alice> a foaf:Person;
foaf:name "Alice";
foaf:knows <http://example.org/bob> ."""
test_query = """
BASE <http://xmlns.com/foaf/0.1/>
SELECT ?name
WHERE { [ a :Person; :name ?name ]. }
}"""
class TestSparqlJsonResults(unittest.TestCase):
def setUp(self):
self.graph = ConjunctiveGraph()
self.graph.parse(StringIO(test_data), format="n3")
def test_base_ref(self):
rt=self.graph.query(test_query).serialize("python")
self.failUnless(rt[0] == Literal("Alice"),"Expected:\n 'Alice' \nGot:\n %s" % rt)
if __name__ == "__main__":
unittest.main()
|
Test for use of BASE <..>from rdflib import ConjunctiveGraph, Literal
from StringIO import StringIO
import unittest
test_data = """
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
<http://example.org/alice> a foaf:Person;
foaf:name "Alice";
foaf:knows <http://example.org/bob> ."""
test_query = """
BASE <http://xmlns.com/foaf/0.1/>
SELECT ?name
WHERE { [ a :Person; :name ?name ]. }
}"""
class TestSparqlJsonResults(unittest.TestCase):
def setUp(self):
self.graph = ConjunctiveGraph()
self.graph.parse(StringIO(test_data), format="n3")
def test_base_ref(self):
rt=self.graph.query(test_query).serialize("python")
self.failUnless(rt[0] == Literal("Alice"),"Expected:\n 'Alice' \nGot:\n %s" % rt)
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Test for use of BASE <..><commit_after>from rdflib import ConjunctiveGraph, Literal
from StringIO import StringIO
import unittest
test_data = """
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
<http://example.org/alice> a foaf:Person;
foaf:name "Alice";
foaf:knows <http://example.org/bob> ."""
test_query = """
BASE <http://xmlns.com/foaf/0.1/>
SELECT ?name
WHERE { [ a :Person; :name ?name ]. }
}"""
class TestSparqlJsonResults(unittest.TestCase):
def setUp(self):
self.graph = ConjunctiveGraph()
self.graph.parse(StringIO(test_data), format="n3")
def test_base_ref(self):
rt=self.graph.query(test_query).serialize("python")
self.failUnless(rt[0] == Literal("Alice"),"Expected:\n 'Alice' \nGot:\n %s" % rt)
if __name__ == "__main__":
unittest.main()
|
|
ba84af2586e5d0cc70ffd95f8899d28659c36d9f
|
mopidy/frontends/mpd/__init__.py
|
mopidy/frontends/mpd/__init__.py
|
"""The MPD server frontend.
MPD stands for Music Player Daemon. MPD is an independent project and server.
Mopidy implements the MPD protocol, and is thus compatible with clients for the
original MPD server.
**Dependencies:**
- None
**Settings:**
- :attr:`mopidy.settings.MPD_SERVER_HOSTNAME`
- :attr:`mopidy.settings.MPD_SERVER_PORT`
- :attr:`mopidy.settings.MPD_SERVER_PASSWORD`
**Usage:**
Make sure :attr:`mopidy.settings.FRONTENDS` includes
``mopidy.frontends.mpd.MpdFrontend``. By default, the setting includes the MPD
frontend.
"""
from __future__ import unicode_literals
# flake8: noqa
from .actor import MpdFrontend
|
"""The MPD server frontend.
MPD stands for Music Player Daemon. MPD is an independent project and server.
Mopidy implements the MPD protocol, and is thus compatible with clients for the
original MPD server.
**Dependencies:**
- None
**Settings:**
- :attr:`mopidy.settings.MPD_SERVER_HOSTNAME`
- :attr:`mopidy.settings.MPD_SERVER_PORT`
- :attr:`mopidy.settings.MPD_SERVER_PASSWORD`
**Usage:**
Make sure :attr:`mopidy.settings.FRONTENDS` includes
``mopidy.frontends.mpd.MpdFrontend``. By default, the setting includes the MPD
frontend.
**Limitations:**
This is a non exhaustive list of MPD features that Mopidy doesn't support.
Items on this list will probably not be supported in the near future.
- Toggling of audio outputs is not supported
- Channels for client-to-client communication is not supported
- Stickers is not supported
- Crossfade is not supported
- Replay gain is not supported
- ``count`` does not provide any statistics
- ``stats`` does not provide any statistics
- ``list`` does not support listing tracks by genre
- ``decoders`` does not provide information about available decoders
The following items are currently not supported, but should be added in the
near future:
- Modifying stored playlists is not supported
- ``tagtypes`` is not supported
- Browsing the file system is not supported
- Live update of the music database is not supported
"""
from __future__ import unicode_literals
# flake8: noqa
from .actor import MpdFrontend
|
Add list of unsupported MPD features
|
mpd: Add list of unsupported MPD features
|
Python
|
apache-2.0
|
woutervanwijk/mopidy,rawdlite/mopidy,diandiankan/mopidy,ZenithDK/mopidy,dbrgn/mopidy,SuperStarPL/mopidy,diandiankan/mopidy,mokieyue/mopidy,dbrgn/mopidy,abarisain/mopidy,jmarsik/mopidy,pacificIT/mopidy,abarisain/mopidy,rawdlite/mopidy,vrs01/mopidy,swak/mopidy,liamw9534/mopidy,mokieyue/mopidy,kingosticks/mopidy,glogiotatidis/mopidy,pacificIT/mopidy,quartz55/mopidy,bacontext/mopidy,pacificIT/mopidy,mopidy/mopidy,priestd09/mopidy,ZenithDK/mopidy,adamcik/mopidy,mokieyue/mopidy,bencevans/mopidy,woutervanwijk/mopidy,ali/mopidy,bacontext/mopidy,mopidy/mopidy,adamcik/mopidy,quartz55/mopidy,vrs01/mopidy,dbrgn/mopidy,hkariti/mopidy,pacificIT/mopidy,jmarsik/mopidy,ZenithDK/mopidy,priestd09/mopidy,vrs01/mopidy,quartz55/mopidy,ali/mopidy,kingosticks/mopidy,tkem/mopidy,glogiotatidis/mopidy,jodal/mopidy,dbrgn/mopidy,kingosticks/mopidy,ZenithDK/mopidy,vrs01/mopidy,SuperStarPL/mopidy,liamw9534/mopidy,quartz55/mopidy,bacontext/mopidy,ali/mopidy,jmarsik/mopidy,diandiankan/mopidy,ali/mopidy,diandiankan/mopidy,hkariti/mopidy,hkariti/mopidy,jodal/mopidy,hkariti/mopidy,bencevans/mopidy,tkem/mopidy,swak/mopidy,tkem/mopidy,glogiotatidis/mopidy,jmarsik/mopidy,bacontext/mopidy,swak/mopidy,mopidy/mopidy,tkem/mopidy,SuperStarPL/mopidy,swak/mopidy,glogiotatidis/mopidy,jcass77/mopidy,bencevans/mopidy,SuperStarPL/mopidy,jodal/mopidy,bencevans/mopidy,jcass77/mopidy,priestd09/mopidy,rawdlite/mopidy,jcass77/mopidy,rawdlite/mopidy,adamcik/mopidy,mokieyue/mopidy
|
"""The MPD server frontend.
MPD stands for Music Player Daemon. MPD is an independent project and server.
Mopidy implements the MPD protocol, and is thus compatible with clients for the
original MPD server.
**Dependencies:**
- None
**Settings:**
- :attr:`mopidy.settings.MPD_SERVER_HOSTNAME`
- :attr:`mopidy.settings.MPD_SERVER_PORT`
- :attr:`mopidy.settings.MPD_SERVER_PASSWORD`
**Usage:**
Make sure :attr:`mopidy.settings.FRONTENDS` includes
``mopidy.frontends.mpd.MpdFrontend``. By default, the setting includes the MPD
frontend.
"""
from __future__ import unicode_literals
# flake8: noqa
from .actor import MpdFrontend
mpd: Add list of unsupported MPD features
|
"""The MPD server frontend.
MPD stands for Music Player Daemon. MPD is an independent project and server.
Mopidy implements the MPD protocol, and is thus compatible with clients for the
original MPD server.
**Dependencies:**
- None
**Settings:**
- :attr:`mopidy.settings.MPD_SERVER_HOSTNAME`
- :attr:`mopidy.settings.MPD_SERVER_PORT`
- :attr:`mopidy.settings.MPD_SERVER_PASSWORD`
**Usage:**
Make sure :attr:`mopidy.settings.FRONTENDS` includes
``mopidy.frontends.mpd.MpdFrontend``. By default, the setting includes the MPD
frontend.
**Limitations:**
This is a non exhaustive list of MPD features that Mopidy doesn't support.
Items on this list will probably not be supported in the near future.
- Toggling of audio outputs is not supported
- Channels for client-to-client communication is not supported
- Stickers is not supported
- Crossfade is not supported
- Replay gain is not supported
- ``count`` does not provide any statistics
- ``stats`` does not provide any statistics
- ``list`` does not support listing tracks by genre
- ``decoders`` does not provide information about available decoders
The following items are currently not supported, but should be added in the
near future:
- Modifying stored playlists is not supported
- ``tagtypes`` is not supported
- Browsing the file system is not supported
- Live update of the music database is not supported
"""
from __future__ import unicode_literals
# flake8: noqa
from .actor import MpdFrontend
|
<commit_before>"""The MPD server frontend.
MPD stands for Music Player Daemon. MPD is an independent project and server.
Mopidy implements the MPD protocol, and is thus compatible with clients for the
original MPD server.
**Dependencies:**
- None
**Settings:**
- :attr:`mopidy.settings.MPD_SERVER_HOSTNAME`
- :attr:`mopidy.settings.MPD_SERVER_PORT`
- :attr:`mopidy.settings.MPD_SERVER_PASSWORD`
**Usage:**
Make sure :attr:`mopidy.settings.FRONTENDS` includes
``mopidy.frontends.mpd.MpdFrontend``. By default, the setting includes the MPD
frontend.
"""
from __future__ import unicode_literals
# flake8: noqa
from .actor import MpdFrontend
<commit_msg>mpd: Add list of unsupported MPD features<commit_after>
|
"""The MPD server frontend.
MPD stands for Music Player Daemon. MPD is an independent project and server.
Mopidy implements the MPD protocol, and is thus compatible with clients for the
original MPD server.
**Dependencies:**
- None
**Settings:**
- :attr:`mopidy.settings.MPD_SERVER_HOSTNAME`
- :attr:`mopidy.settings.MPD_SERVER_PORT`
- :attr:`mopidy.settings.MPD_SERVER_PASSWORD`
**Usage:**
Make sure :attr:`mopidy.settings.FRONTENDS` includes
``mopidy.frontends.mpd.MpdFrontend``. By default, the setting includes the MPD
frontend.
**Limitations:**
This is a non exhaustive list of MPD features that Mopidy doesn't support.
Items on this list will probably not be supported in the near future.
- Toggling of audio outputs is not supported
- Channels for client-to-client communication is not supported
- Stickers is not supported
- Crossfade is not supported
- Replay gain is not supported
- ``count`` does not provide any statistics
- ``stats`` does not provide any statistics
- ``list`` does not support listing tracks by genre
- ``decoders`` does not provide information about available decoders
The following items are currently not supported, but should be added in the
near future:
- Modifying stored playlists is not supported
- ``tagtypes`` is not supported
- Browsing the file system is not supported
- Live update of the music database is not supported
"""
from __future__ import unicode_literals
# flake8: noqa
from .actor import MpdFrontend
|
"""The MPD server frontend.
MPD stands for Music Player Daemon. MPD is an independent project and server.
Mopidy implements the MPD protocol, and is thus compatible with clients for the
original MPD server.
**Dependencies:**
- None
**Settings:**
- :attr:`mopidy.settings.MPD_SERVER_HOSTNAME`
- :attr:`mopidy.settings.MPD_SERVER_PORT`
- :attr:`mopidy.settings.MPD_SERVER_PASSWORD`
**Usage:**
Make sure :attr:`mopidy.settings.FRONTENDS` includes
``mopidy.frontends.mpd.MpdFrontend``. By default, the setting includes the MPD
frontend.
"""
from __future__ import unicode_literals
# flake8: noqa
from .actor import MpdFrontend
mpd: Add list of unsupported MPD features"""The MPD server frontend.
MPD stands for Music Player Daemon. MPD is an independent project and server.
Mopidy implements the MPD protocol, and is thus compatible with clients for the
original MPD server.
**Dependencies:**
- None
**Settings:**
- :attr:`mopidy.settings.MPD_SERVER_HOSTNAME`
- :attr:`mopidy.settings.MPD_SERVER_PORT`
- :attr:`mopidy.settings.MPD_SERVER_PASSWORD`
**Usage:**
Make sure :attr:`mopidy.settings.FRONTENDS` includes
``mopidy.frontends.mpd.MpdFrontend``. By default, the setting includes the MPD
frontend.
**Limitations:**
This is a non exhaustive list of MPD features that Mopidy doesn't support.
Items on this list will probably not be supported in the near future.
- Toggling of audio outputs is not supported
- Channels for client-to-client communication is not supported
- Stickers is not supported
- Crossfade is not supported
- Replay gain is not supported
- ``count`` does not provide any statistics
- ``stats`` does not provide any statistics
- ``list`` does not support listing tracks by genre
- ``decoders`` does not provide information about available decoders
The following items are currently not supported, but should be added in the
near future:
- Modifying stored playlists is not supported
- ``tagtypes`` is not supported
- Browsing the file system is not supported
- Live update of the music database is not supported
"""
from __future__ import unicode_literals
# flake8: noqa
from .actor import MpdFrontend
|
<commit_before>"""The MPD server frontend.
MPD stands for Music Player Daemon. MPD is an independent project and server.
Mopidy implements the MPD protocol, and is thus compatible with clients for the
original MPD server.
**Dependencies:**
- None
**Settings:**
- :attr:`mopidy.settings.MPD_SERVER_HOSTNAME`
- :attr:`mopidy.settings.MPD_SERVER_PORT`
- :attr:`mopidy.settings.MPD_SERVER_PASSWORD`
**Usage:**
Make sure :attr:`mopidy.settings.FRONTENDS` includes
``mopidy.frontends.mpd.MpdFrontend``. By default, the setting includes the MPD
frontend.
"""
from __future__ import unicode_literals
# flake8: noqa
from .actor import MpdFrontend
<commit_msg>mpd: Add list of unsupported MPD features<commit_after>"""The MPD server frontend.
MPD stands for Music Player Daemon. MPD is an independent project and server.
Mopidy implements the MPD protocol, and is thus compatible with clients for the
original MPD server.
**Dependencies:**
- None
**Settings:**
- :attr:`mopidy.settings.MPD_SERVER_HOSTNAME`
- :attr:`mopidy.settings.MPD_SERVER_PORT`
- :attr:`mopidy.settings.MPD_SERVER_PASSWORD`
**Usage:**
Make sure :attr:`mopidy.settings.FRONTENDS` includes
``mopidy.frontends.mpd.MpdFrontend``. By default, the setting includes the MPD
frontend.
**Limitations:**
This is a non exhaustive list of MPD features that Mopidy doesn't support.
Items on this list will probably not be supported in the near future.
- Toggling of audio outputs is not supported
- Channels for client-to-client communication is not supported
- Stickers is not supported
- Crossfade is not supported
- Replay gain is not supported
- ``count`` does not provide any statistics
- ``stats`` does not provide any statistics
- ``list`` does not support listing tracks by genre
- ``decoders`` does not provide information about available decoders
The following items are currently not supported, but should be added in the
near future:
- Modifying stored playlists is not supported
- ``tagtypes`` is not supported
- Browsing the file system is not supported
- Live update of the music database is not supported
"""
from __future__ import unicode_literals
# flake8: noqa
from .actor import MpdFrontend
|
bec0104e8268eca321fec39a471adc241c06200e
|
workshops/test/test_debrief.py
|
workshops/test/test_debrief.py
|
import datetime
from django.core.urlresolvers import reverse
from ..models import Event, Task, Role
from .base import TestBase
class TestDebrief(TestBase):
def setUp(self):
super().setUp()
self._setUpUsersAndLogin()
self.today = datetime.date.today()
self.tomorrow = self.today + datetime.timedelta(days=1)
self.yesterday = self.today - datetime.timedelta(days=1)
self.after_tomorrow = self.today + datetime.timedelta(days=2)
# set up some testing Events
self.e1 = Event.objects.create(
site=self.site_alpha,
slug="in-range",
start=self.today,
end=self.tomorrow,
)
self.e2 = Event.objects.create(
site=self.site_alpha,
slug="out-of-range1",
start=self.yesterday,
end=self.tomorrow,
)
self.e3 = Event.objects.create(
site=self.site_alpha,
slug="out-of-range2",
start=self.today,
end=self.after_tomorrow,
)
self.role = Role.objects.create(name='instructor')
Task.objects.create(event=self.e1, person=self.hermione,
role=self.role)
Task.objects.create(event=self.e2, person=self.hermione,
role=self.role)
Task.objects.create(event=self.e3, person=self.hermione,
role=self.role)
def test_debrief(self):
"Make sure proper events are returned withing specific date ranges."
data = {
'begin_date': self.today,
'end_date': self.tomorrow
}
rv = self.client.post(reverse('debrief'), data)
assert rv.status_code == 200
content = rv.content.decode('utf-8')
assert self.e1.slug in content
assert self.e2.slug not in content
assert self.e3.slug not in content
data['begin_date'] = self.yesterday
rv = self.client.post(reverse('debrief'), data)
assert rv.status_code == 200
content = rv.content.decode('utf-8')
assert self.e1.slug in content
assert self.e2.slug in content
assert self.e3.slug not in content
data['end_date'] = self.after_tomorrow
rv = self.client.post(reverse('debrief'), data)
assert rv.status_code == 200
content = rv.content.decode('utf-8')
assert self.e1.slug in content
assert self.e2.slug in content
assert self.e3.slug in content
data['begin_date'] = self.today
rv = self.client.post(reverse('debrief'), data)
assert rv.status_code == 200
content = rv.content.decode('utf-8')
assert self.e1.slug in content
assert self.e2.slug not in content
assert self.e3.slug in content
|
Add test for debrief view
|
Add test for debrief view
|
Python
|
mit
|
swcarpentry/amy,wking/swc-amy,shapiromatron/amy,shapiromatron/amy,pbanaszkiewicz/amy,vahtras/amy,wking/swc-amy,vahtras/amy,swcarpentry/amy,shapiromatron/amy,wking/swc-amy,swcarpentry/amy,pbanaszkiewicz/amy,vahtras/amy,pbanaszkiewicz/amy,wking/swc-amy
|
Add test for debrief view
|
import datetime
from django.core.urlresolvers import reverse
from ..models import Event, Task, Role
from .base import TestBase
class TestDebrief(TestBase):
def setUp(self):
super().setUp()
self._setUpUsersAndLogin()
self.today = datetime.date.today()
self.tomorrow = self.today + datetime.timedelta(days=1)
self.yesterday = self.today - datetime.timedelta(days=1)
self.after_tomorrow = self.today + datetime.timedelta(days=2)
# set up some testing Events
self.e1 = Event.objects.create(
site=self.site_alpha,
slug="in-range",
start=self.today,
end=self.tomorrow,
)
self.e2 = Event.objects.create(
site=self.site_alpha,
slug="out-of-range1",
start=self.yesterday,
end=self.tomorrow,
)
self.e3 = Event.objects.create(
site=self.site_alpha,
slug="out-of-range2",
start=self.today,
end=self.after_tomorrow,
)
self.role = Role.objects.create(name='instructor')
Task.objects.create(event=self.e1, person=self.hermione,
role=self.role)
Task.objects.create(event=self.e2, person=self.hermione,
role=self.role)
Task.objects.create(event=self.e3, person=self.hermione,
role=self.role)
def test_debrief(self):
"Make sure proper events are returned withing specific date ranges."
data = {
'begin_date': self.today,
'end_date': self.tomorrow
}
rv = self.client.post(reverse('debrief'), data)
assert rv.status_code == 200
content = rv.content.decode('utf-8')
assert self.e1.slug in content
assert self.e2.slug not in content
assert self.e3.slug not in content
data['begin_date'] = self.yesterday
rv = self.client.post(reverse('debrief'), data)
assert rv.status_code == 200
content = rv.content.decode('utf-8')
assert self.e1.slug in content
assert self.e2.slug in content
assert self.e3.slug not in content
data['end_date'] = self.after_tomorrow
rv = self.client.post(reverse('debrief'), data)
assert rv.status_code == 200
content = rv.content.decode('utf-8')
assert self.e1.slug in content
assert self.e2.slug in content
assert self.e3.slug in content
data['begin_date'] = self.today
rv = self.client.post(reverse('debrief'), data)
assert rv.status_code == 200
content = rv.content.decode('utf-8')
assert self.e1.slug in content
assert self.e2.slug not in content
assert self.e3.slug in content
|
<commit_before><commit_msg>Add test for debrief view<commit_after>
|
import datetime
from django.core.urlresolvers import reverse
from ..models import Event, Task, Role
from .base import TestBase
class TestDebrief(TestBase):
def setUp(self):
super().setUp()
self._setUpUsersAndLogin()
self.today = datetime.date.today()
self.tomorrow = self.today + datetime.timedelta(days=1)
self.yesterday = self.today - datetime.timedelta(days=1)
self.after_tomorrow = self.today + datetime.timedelta(days=2)
# set up some testing Events
self.e1 = Event.objects.create(
site=self.site_alpha,
slug="in-range",
start=self.today,
end=self.tomorrow,
)
self.e2 = Event.objects.create(
site=self.site_alpha,
slug="out-of-range1",
start=self.yesterday,
end=self.tomorrow,
)
self.e3 = Event.objects.create(
site=self.site_alpha,
slug="out-of-range2",
start=self.today,
end=self.after_tomorrow,
)
self.role = Role.objects.create(name='instructor')
Task.objects.create(event=self.e1, person=self.hermione,
role=self.role)
Task.objects.create(event=self.e2, person=self.hermione,
role=self.role)
Task.objects.create(event=self.e3, person=self.hermione,
role=self.role)
def test_debrief(self):
"Make sure proper events are returned withing specific date ranges."
data = {
'begin_date': self.today,
'end_date': self.tomorrow
}
rv = self.client.post(reverse('debrief'), data)
assert rv.status_code == 200
content = rv.content.decode('utf-8')
assert self.e1.slug in content
assert self.e2.slug not in content
assert self.e3.slug not in content
data['begin_date'] = self.yesterday
rv = self.client.post(reverse('debrief'), data)
assert rv.status_code == 200
content = rv.content.decode('utf-8')
assert self.e1.slug in content
assert self.e2.slug in content
assert self.e3.slug not in content
data['end_date'] = self.after_tomorrow
rv = self.client.post(reverse('debrief'), data)
assert rv.status_code == 200
content = rv.content.decode('utf-8')
assert self.e1.slug in content
assert self.e2.slug in content
assert self.e3.slug in content
data['begin_date'] = self.today
rv = self.client.post(reverse('debrief'), data)
assert rv.status_code == 200
content = rv.content.decode('utf-8')
assert self.e1.slug in content
assert self.e2.slug not in content
assert self.e3.slug in content
|
Add test for debrief viewimport datetime
from django.core.urlresolvers import reverse
from ..models import Event, Task, Role
from .base import TestBase
class TestDebrief(TestBase):
def setUp(self):
super().setUp()
self._setUpUsersAndLogin()
self.today = datetime.date.today()
self.tomorrow = self.today + datetime.timedelta(days=1)
self.yesterday = self.today - datetime.timedelta(days=1)
self.after_tomorrow = self.today + datetime.timedelta(days=2)
# set up some testing Events
self.e1 = Event.objects.create(
site=self.site_alpha,
slug="in-range",
start=self.today,
end=self.tomorrow,
)
self.e2 = Event.objects.create(
site=self.site_alpha,
slug="out-of-range1",
start=self.yesterday,
end=self.tomorrow,
)
self.e3 = Event.objects.create(
site=self.site_alpha,
slug="out-of-range2",
start=self.today,
end=self.after_tomorrow,
)
self.role = Role.objects.create(name='instructor')
Task.objects.create(event=self.e1, person=self.hermione,
role=self.role)
Task.objects.create(event=self.e2, person=self.hermione,
role=self.role)
Task.objects.create(event=self.e3, person=self.hermione,
role=self.role)
def test_debrief(self):
"Make sure proper events are returned withing specific date ranges."
data = {
'begin_date': self.today,
'end_date': self.tomorrow
}
rv = self.client.post(reverse('debrief'), data)
assert rv.status_code == 200
content = rv.content.decode('utf-8')
assert self.e1.slug in content
assert self.e2.slug not in content
assert self.e3.slug not in content
data['begin_date'] = self.yesterday
rv = self.client.post(reverse('debrief'), data)
assert rv.status_code == 200
content = rv.content.decode('utf-8')
assert self.e1.slug in content
assert self.e2.slug in content
assert self.e3.slug not in content
data['end_date'] = self.after_tomorrow
rv = self.client.post(reverse('debrief'), data)
assert rv.status_code == 200
content = rv.content.decode('utf-8')
assert self.e1.slug in content
assert self.e2.slug in content
assert self.e3.slug in content
data['begin_date'] = self.today
rv = self.client.post(reverse('debrief'), data)
assert rv.status_code == 200
content = rv.content.decode('utf-8')
assert self.e1.slug in content
assert self.e2.slug not in content
assert self.e3.slug in content
|
<commit_before><commit_msg>Add test for debrief view<commit_after>import datetime
from django.core.urlresolvers import reverse
from ..models import Event, Task, Role
from .base import TestBase
class TestDebrief(TestBase):
def setUp(self):
super().setUp()
self._setUpUsersAndLogin()
self.today = datetime.date.today()
self.tomorrow = self.today + datetime.timedelta(days=1)
self.yesterday = self.today - datetime.timedelta(days=1)
self.after_tomorrow = self.today + datetime.timedelta(days=2)
# set up some testing Events
self.e1 = Event.objects.create(
site=self.site_alpha,
slug="in-range",
start=self.today,
end=self.tomorrow,
)
self.e2 = Event.objects.create(
site=self.site_alpha,
slug="out-of-range1",
start=self.yesterday,
end=self.tomorrow,
)
self.e3 = Event.objects.create(
site=self.site_alpha,
slug="out-of-range2",
start=self.today,
end=self.after_tomorrow,
)
self.role = Role.objects.create(name='instructor')
Task.objects.create(event=self.e1, person=self.hermione,
role=self.role)
Task.objects.create(event=self.e2, person=self.hermione,
role=self.role)
Task.objects.create(event=self.e3, person=self.hermione,
role=self.role)
def test_debrief(self):
"Make sure proper events are returned withing specific date ranges."
data = {
'begin_date': self.today,
'end_date': self.tomorrow
}
rv = self.client.post(reverse('debrief'), data)
assert rv.status_code == 200
content = rv.content.decode('utf-8')
assert self.e1.slug in content
assert self.e2.slug not in content
assert self.e3.slug not in content
data['begin_date'] = self.yesterday
rv = self.client.post(reverse('debrief'), data)
assert rv.status_code == 200
content = rv.content.decode('utf-8')
assert self.e1.slug in content
assert self.e2.slug in content
assert self.e3.slug not in content
data['end_date'] = self.after_tomorrow
rv = self.client.post(reverse('debrief'), data)
assert rv.status_code == 200
content = rv.content.decode('utf-8')
assert self.e1.slug in content
assert self.e2.slug in content
assert self.e3.slug in content
data['begin_date'] = self.today
rv = self.client.post(reverse('debrief'), data)
assert rv.status_code == 200
content = rv.content.decode('utf-8')
assert self.e1.slug in content
assert self.e2.slug not in content
assert self.e3.slug in content
|
|
ae131028eee40c6e187296ca9c3c748fa8afe057
|
cron/tunnelcrypt.py
|
cron/tunnelcrypt.py
|
"""This module crypts the message of ping"""
from __future__ import division
from string import ascii_lowercase
symbols = list('0123456789')
symbols.append(' ')
for i in ascii_lowercase:
symbols.append(i)
symbols.extend(list(',.-_#@!()'))
def _convert_to_symbols(message):
"""
Convert a list of characters into a list of integers using the symbols
list.
Parameters
----------
message : list of characters
The characters present in the message to encrypt
Returns
-------
message : converted message
A list of integers which symbolically represent characters
"""
for i in range(len(message)):
message[i] = symbols.index(message[i])
return message
def encrypt(message, key):
"""
Encrypt a message using a key of string
Parameters
----------
key : string
The password hash of the user stored
message : string
Message to be encrypted
Returns
-------
new_message : list of integers
Integers obtained from `_convert_to_symbols`
"""
message = message.lower()
if len(key) < len(message):
key = key*(len(message)//len(key) + 1)
message = list(message)
message = _convert_to_symbols(message)
new_message = message[:]
for i in range(len(message)):
new_message[i] += symbols.index(key[i])
return new_message
def decrypt(message, key):
"""
Decrypt a message in the form of a list of integers
Parameters
----------
message : list of integers
This is generated from the `encrypt` function
key : string
The key which was used in encryption
Returns
-------
new_message = string
The decrypted message
"""
if len(key) < len(message):
key = key*(len(message)//len(key) + 1)
new_message = message[:]
for i in range(len(message)):
new_message[i] = symbols[message[i] - symbols.index(key[i])]
return ''.join(new_message)
|
Implement a simple cryptography functions using a key
|
Implement a simple cryptography functions using a key
|
Python
|
apache-2.0
|
OrkoHunter/ping-me
|
Implement a simple cryptography functions using a key
|
"""This module crypts the message of ping"""
from __future__ import division
from string import ascii_lowercase
symbols = list('0123456789')
symbols.append(' ')
for i in ascii_lowercase:
symbols.append(i)
symbols.extend(list(',.-_#@!()'))
def _convert_to_symbols(message):
"""
Convert a list of characters into a list of integers using the symbols
list.
Parameters
----------
message : list of characters
The characters present in the message to encrypt
Returns
-------
message : converted message
A list of integers which symbolically represent characters
"""
for i in range(len(message)):
message[i] = symbols.index(message[i])
return message
def encrypt(message, key):
"""
Encrypt a message using a key of string
Parameters
----------
key : string
The password hash of the user stored
message : string
Message to be encrypted
Returns
-------
new_message : list of integers
Integers obtained from `_convert_to_symbols`
"""
message = message.lower()
if len(key) < len(message):
key = key*(len(message)//len(key) + 1)
message = list(message)
message = _convert_to_symbols(message)
new_message = message[:]
for i in range(len(message)):
new_message[i] += symbols.index(key[i])
return new_message
def decrypt(message, key):
"""
Decrypt a message in the form of a list of integers
Parameters
----------
message : list of integers
This is generated from the `encrypt` function
key : string
The key which was used in encryption
Returns
-------
new_message = string
The decrypted message
"""
if len(key) < len(message):
key = key*(len(message)//len(key) + 1)
new_message = message[:]
for i in range(len(message)):
new_message[i] = symbols[message[i] - symbols.index(key[i])]
return ''.join(new_message)
|
<commit_before><commit_msg>Implement a simple cryptography functions using a key<commit_after>
|
"""This module crypts the message of ping"""
from __future__ import division
from string import ascii_lowercase
symbols = list('0123456789')
symbols.append(' ')
for i in ascii_lowercase:
symbols.append(i)
symbols.extend(list(',.-_#@!()'))
def _convert_to_symbols(message):
"""
Convert a list of characters into a list of integers using the symbols
list.
Parameters
----------
message : list of characters
The characters present in the message to encrypt
Returns
-------
message : converted message
A list of integers which symbolically represent characters
"""
for i in range(len(message)):
message[i] = symbols.index(message[i])
return message
def encrypt(message, key):
"""
Encrypt a message using a key of string
Parameters
----------
key : string
The password hash of the user stored
message : string
Message to be encrypted
Returns
-------
new_message : list of integers
Integers obtained from `_convert_to_symbols`
"""
message = message.lower()
if len(key) < len(message):
key = key*(len(message)//len(key) + 1)
message = list(message)
message = _convert_to_symbols(message)
new_message = message[:]
for i in range(len(message)):
new_message[i] += symbols.index(key[i])
return new_message
def decrypt(message, key):
"""
Decrypt a message in the form of a list of integers
Parameters
----------
message : list of integers
This is generated from the `encrypt` function
key : string
The key which was used in encryption
Returns
-------
new_message = string
The decrypted message
"""
if len(key) < len(message):
key = key*(len(message)//len(key) + 1)
new_message = message[:]
for i in range(len(message)):
new_message[i] = symbols[message[i] - symbols.index(key[i])]
return ''.join(new_message)
|
Implement a simple cryptography functions using a key"""This module crypts the message of ping"""
from __future__ import division
from string import ascii_lowercase
symbols = list('0123456789')
symbols.append(' ')
for i in ascii_lowercase:
symbols.append(i)
symbols.extend(list(',.-_#@!()'))
def _convert_to_symbols(message):
"""
Convert a list of characters into a list of integers using the symbols
list.
Parameters
----------
message : list of characters
The characters present in the message to encrypt
Returns
-------
message : converted message
A list of integers which symbolically represent characters
"""
for i in range(len(message)):
message[i] = symbols.index(message[i])
return message
def encrypt(message, key):
"""
Encrypt a message using a key of string
Parameters
----------
key : string
The password hash of the user stored
message : string
Message to be encrypted
Returns
-------
new_message : list of integers
Integers obtained from `_convert_to_symbols`
"""
message = message.lower()
if len(key) < len(message):
key = key*(len(message)//len(key) + 1)
message = list(message)
message = _convert_to_symbols(message)
new_message = message[:]
for i in range(len(message)):
new_message[i] += symbols.index(key[i])
return new_message
def decrypt(message, key):
"""
Decrypt a message in the form of a list of integers
Parameters
----------
message : list of integers
This is generated from the `encrypt` function
key : string
The key which was used in encryption
Returns
-------
new_message = string
The decrypted message
"""
if len(key) < len(message):
key = key*(len(message)//len(key) + 1)
new_message = message[:]
for i in range(len(message)):
new_message[i] = symbols[message[i] - symbols.index(key[i])]
return ''.join(new_message)
|
<commit_before><commit_msg>Implement a simple cryptography functions using a key<commit_after>"""This module crypts the message of ping"""
from __future__ import division
from string import ascii_lowercase
symbols = list('0123456789')
symbols.append(' ')
for i in ascii_lowercase:
symbols.append(i)
symbols.extend(list(',.-_#@!()'))
def _convert_to_symbols(message):
"""
Convert a list of characters into a list of integers using the symbols
list.
Parameters
----------
message : list of characters
The characters present in the message to encrypt
Returns
-------
message : converted message
A list of integers which symbolically represent characters
"""
for i in range(len(message)):
message[i] = symbols.index(message[i])
return message
def encrypt(message, key):
"""
Encrypt a message using a key of string
Parameters
----------
key : string
The password hash of the user stored
message : string
Message to be encrypted
Returns
-------
new_message : list of integers
Integers obtained from `_convert_to_symbols`
"""
message = message.lower()
if len(key) < len(message):
key = key*(len(message)//len(key) + 1)
message = list(message)
message = _convert_to_symbols(message)
new_message = message[:]
for i in range(len(message)):
new_message[i] += symbols.index(key[i])
return new_message
def decrypt(message, key):
"""
Decrypt a message in the form of a list of integers
Parameters
----------
message : list of integers
This is generated from the `encrypt` function
key : string
The key which was used in encryption
Returns
-------
new_message = string
The decrypted message
"""
if len(key) < len(message):
key = key*(len(message)//len(key) + 1)
new_message = message[:]
for i in range(len(message)):
new_message[i] = symbols[message[i] - symbols.index(key[i])]
return ''.join(new_message)
|
|
670e9be8cefb176e9012c82413c2e76aa1448a83
|
scipy/integrate/_ivp/tests/test_rk_coefficients.py
|
scipy/integrate/_ivp/tests/test_rk_coefficients.py
|
import pytest
from numpy.testing import assert_allclose
import numpy as np
from scipy.integrate import RK23, RK45
@pytest.mark.parametrize("solver", [RK23, RK45])
def test_coefficient_properties(solver):
assert_allclose(np.sum(solver.B), 1, rtol=1e-15)
assert_allclose(np.sum(solver.A, axis=1), solver.C, rtol=1e-15)
|
Add a test for Runge-Kutta coefficient properties
|
TST: Add a test for Runge-Kutta coefficient properties
|
Python
|
bsd-3-clause
|
mdhaber/scipy,perimosocordiae/scipy,andyfaff/scipy,rgommers/scipy,matthew-brett/scipy,WarrenWeckesser/scipy,vigna/scipy,nmayorov/scipy,jamestwebber/scipy,ilayn/scipy,Eric89GXL/scipy,WarrenWeckesser/scipy,tylerjereddy/scipy,lhilt/scipy,Stefan-Endres/scipy,person142/scipy,ilayn/scipy,endolith/scipy,ilayn/scipy,aeklant/scipy,endolith/scipy,scipy/scipy,endolith/scipy,pizzathief/scipy,gertingold/scipy,vigna/scipy,nmayorov/scipy,arokem/scipy,lhilt/scipy,anntzer/scipy,pizzathief/scipy,andyfaff/scipy,e-q/scipy,zerothi/scipy,anntzer/scipy,WarrenWeckesser/scipy,rgommers/scipy,e-q/scipy,matthew-brett/scipy,Eric89GXL/scipy,zerothi/scipy,matthew-brett/scipy,jor-/scipy,Stefan-Endres/scipy,gertingold/scipy,Eric89GXL/scipy,Eric89GXL/scipy,mdhaber/scipy,nmayorov/scipy,tylerjereddy/scipy,mdhaber/scipy,ilayn/scipy,lhilt/scipy,WarrenWeckesser/scipy,tylerjereddy/scipy,scipy/scipy,Eric89GXL/scipy,jamestwebber/scipy,Stefan-Endres/scipy,jor-/scipy,scipy/scipy,anntzer/scipy,pizzathief/scipy,aarchiba/scipy,scipy/scipy,rgommers/scipy,person142/scipy,jor-/scipy,matthew-brett/scipy,perimosocordiae/scipy,nmayorov/scipy,jamestwebber/scipy,gertingold/scipy,arokem/scipy,andyfaff/scipy,scipy/scipy,ilayn/scipy,e-q/scipy,gertingold/scipy,lhilt/scipy,tylerjereddy/scipy,grlee77/scipy,anntzer/scipy,perimosocordiae/scipy,pizzathief/scipy,endolith/scipy,jamestwebber/scipy,scipy/scipy,matthew-brett/scipy,mdhaber/scipy,aeklant/scipy,perimosocordiae/scipy,Eric89GXL/scipy,anntzer/scipy,tylerjereddy/scipy,zerothi/scipy,person142/scipy,nmayorov/scipy,aeklant/scipy,aarchiba/scipy,Stefan-Endres/scipy,ilayn/scipy,WarrenWeckesser/scipy,mdhaber/scipy,gertingold/scipy,aarchiba/scipy,andyfaff/scipy,arokem/scipy,person142/scipy,WarrenWeckesser/scipy,vigna/scipy,perimosocordiae/scipy,Stefan-Endres/scipy,grlee77/scipy,jor-/scipy,zerothi/scipy,mdhaber/scipy,rgommers/scipy,grlee77/scipy,perimosocordiae/scipy,jor-/scipy,zerothi/scipy,person142/scipy,jamestwebber/scipy,grlee77/scipy,rgommers/scipy,aeklant/scipy,aeklant/scipy,endolith/scipy,aarchiba/scipy,lhilt/scipy,arokem/scipy,vigna/scipy,e-q/scipy,zerothi/scipy,Stefan-Endres/scipy,pizzathief/scipy,e-q/scipy,endolith/scipy,andyfaff/scipy,anntzer/scipy,arokem/scipy,aarchiba/scipy,vigna/scipy,andyfaff/scipy,grlee77/scipy
|
TST: Add a test for Runge-Kutta coefficient properties
|
import pytest
from numpy.testing import assert_allclose
import numpy as np
from scipy.integrate import RK23, RK45
@pytest.mark.parametrize("solver", [RK23, RK45])
def test_coefficient_properties(solver):
assert_allclose(np.sum(solver.B), 1, rtol=1e-15)
assert_allclose(np.sum(solver.A, axis=1), solver.C, rtol=1e-15)
|
<commit_before><commit_msg>TST: Add a test for Runge-Kutta coefficient properties<commit_after>
|
import pytest
from numpy.testing import assert_allclose
import numpy as np
from scipy.integrate import RK23, RK45
@pytest.mark.parametrize("solver", [RK23, RK45])
def test_coefficient_properties(solver):
assert_allclose(np.sum(solver.B), 1, rtol=1e-15)
assert_allclose(np.sum(solver.A, axis=1), solver.C, rtol=1e-15)
|
TST: Add a test for Runge-Kutta coefficient propertiesimport pytest
from numpy.testing import assert_allclose
import numpy as np
from scipy.integrate import RK23, RK45
@pytest.mark.parametrize("solver", [RK23, RK45])
def test_coefficient_properties(solver):
assert_allclose(np.sum(solver.B), 1, rtol=1e-15)
assert_allclose(np.sum(solver.A, axis=1), solver.C, rtol=1e-15)
|
<commit_before><commit_msg>TST: Add a test for Runge-Kutta coefficient properties<commit_after>import pytest
from numpy.testing import assert_allclose
import numpy as np
from scipy.integrate import RK23, RK45
@pytest.mark.parametrize("solver", [RK23, RK45])
def test_coefficient_properties(solver):
assert_allclose(np.sum(solver.B), 1, rtol=1e-15)
assert_allclose(np.sum(solver.A, axis=1), solver.C, rtol=1e-15)
|
|
d09361ba946d547155ef84b211d101f91ebee090
|
tools/push-to-trunk/generate_version.py
|
tools/push-to-trunk/generate_version.py
|
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Script to set v8's version file to the version given by the latest tag.
"""
import os
import re
import subprocess
import sys
CWD = os.path.abspath(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
VERSION_CC = os.path.join(CWD, "src", "version.cc")
def main():
tag = subprocess.check_output(
"git describe --tags",
shell=True,
cwd=CWD,
).strip()
assert tag
# Check for commits not exactly matching a tag. Those are candidate builds
# for the next version. The output has the form
# <tag name>-<n commits>-<hash>.
if "-" in tag:
version = tag.split("-")[0]
candidate = "1"
else:
version = tag
candidate = "0"
version_levels = version.split(".")
# Set default patch level if none is given.
if len(version_levels) == 3:
version_levels.append("0")
assert len(version_levels) == 4
major, minor, build, patch = version_levels
# Increment build level for candidate builds.
if candidate == "1":
build = str(int(build) + 1)
patch = "0"
# Modify version.cc with the new values.
with open(VERSION_CC, "r") as f:
text = f.read()
output = []
for line in text.split("\n"):
for definition, substitute in (
("MAJOR_VERSION", major),
("MINOR_VERSION", minor),
("BUILD_NUMBER", build),
("PATCH_LEVEL", patch),
("IS_CANDIDATE_VERSION", candidate)):
if line.startswith("#define %s" % definition):
line = re.sub("\d+$", substitute, line)
output.append(line)
with open(VERSION_CC, "w") as f:
f.write("\n".join(output))
# Log what was done.
candidate_txt = " (candidate)" if candidate == "1" else ""
patch_txt = ".%s" % patch if patch != "0" else ""
version_txt = ("%s.%s.%s%s%s" %
(major, minor, build, patch_txt, candidate_txt))
print "Modified version.cc. Set V8 version to %s" % version_txt
return 0
if __name__ == "__main__":
sys.exit(main())
|
Add script to generate the v8 version.
|
Add script to generate the v8 version.
BUG=chromium:446166
LOG=n
TBR=jkummerow@chromium.org
NOTRY=true
Review URL: https://codereview.chromium.org/835903003
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#25964}
|
Python
|
mit
|
UniversalFuture/moosh,UniversalFuture/moosh,UniversalFuture/moosh,UniversalFuture/moosh
|
Add script to generate the v8 version.
BUG=chromium:446166
LOG=n
TBR=jkummerow@chromium.org
NOTRY=true
Review URL: https://codereview.chromium.org/835903003
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#25964}
|
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Script to set v8's version file to the version given by the latest tag.
"""
import os
import re
import subprocess
import sys
CWD = os.path.abspath(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
VERSION_CC = os.path.join(CWD, "src", "version.cc")
def main():
tag = subprocess.check_output(
"git describe --tags",
shell=True,
cwd=CWD,
).strip()
assert tag
# Check for commits not exactly matching a tag. Those are candidate builds
# for the next version. The output has the form
# <tag name>-<n commits>-<hash>.
if "-" in tag:
version = tag.split("-")[0]
candidate = "1"
else:
version = tag
candidate = "0"
version_levels = version.split(".")
# Set default patch level if none is given.
if len(version_levels) == 3:
version_levels.append("0")
assert len(version_levels) == 4
major, minor, build, patch = version_levels
# Increment build level for candidate builds.
if candidate == "1":
build = str(int(build) + 1)
patch = "0"
# Modify version.cc with the new values.
with open(VERSION_CC, "r") as f:
text = f.read()
output = []
for line in text.split("\n"):
for definition, substitute in (
("MAJOR_VERSION", major),
("MINOR_VERSION", minor),
("BUILD_NUMBER", build),
("PATCH_LEVEL", patch),
("IS_CANDIDATE_VERSION", candidate)):
if line.startswith("#define %s" % definition):
line = re.sub("\d+$", substitute, line)
output.append(line)
with open(VERSION_CC, "w") as f:
f.write("\n".join(output))
# Log what was done.
candidate_txt = " (candidate)" if candidate == "1" else ""
patch_txt = ".%s" % patch if patch != "0" else ""
version_txt = ("%s.%s.%s%s%s" %
(major, minor, build, patch_txt, candidate_txt))
print "Modified version.cc. Set V8 version to %s" % version_txt
return 0
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add script to generate the v8 version.
BUG=chromium:446166
LOG=n
TBR=jkummerow@chromium.org
NOTRY=true
Review URL: https://codereview.chromium.org/835903003
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#25964}<commit_after>
|
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Script to set v8's version file to the version given by the latest tag.
"""
import os
import re
import subprocess
import sys
CWD = os.path.abspath(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
VERSION_CC = os.path.join(CWD, "src", "version.cc")
def main():
tag = subprocess.check_output(
"git describe --tags",
shell=True,
cwd=CWD,
).strip()
assert tag
# Check for commits not exactly matching a tag. Those are candidate builds
# for the next version. The output has the form
# <tag name>-<n commits>-<hash>.
if "-" in tag:
version = tag.split("-")[0]
candidate = "1"
else:
version = tag
candidate = "0"
version_levels = version.split(".")
# Set default patch level if none is given.
if len(version_levels) == 3:
version_levels.append("0")
assert len(version_levels) == 4
major, minor, build, patch = version_levels
# Increment build level for candidate builds.
if candidate == "1":
build = str(int(build) + 1)
patch = "0"
# Modify version.cc with the new values.
with open(VERSION_CC, "r") as f:
text = f.read()
output = []
for line in text.split("\n"):
for definition, substitute in (
("MAJOR_VERSION", major),
("MINOR_VERSION", minor),
("BUILD_NUMBER", build),
("PATCH_LEVEL", patch),
("IS_CANDIDATE_VERSION", candidate)):
if line.startswith("#define %s" % definition):
line = re.sub("\d+$", substitute, line)
output.append(line)
with open(VERSION_CC, "w") as f:
f.write("\n".join(output))
# Log what was done.
candidate_txt = " (candidate)" if candidate == "1" else ""
patch_txt = ".%s" % patch if patch != "0" else ""
version_txt = ("%s.%s.%s%s%s" %
(major, minor, build, patch_txt, candidate_txt))
print "Modified version.cc. Set V8 version to %s" % version_txt
return 0
if __name__ == "__main__":
sys.exit(main())
|
Add script to generate the v8 version.
BUG=chromium:446166
LOG=n
TBR=jkummerow@chromium.org
NOTRY=true
Review URL: https://codereview.chromium.org/835903003
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#25964}#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Script to set v8's version file to the version given by the latest tag.
"""
import os
import re
import subprocess
import sys
CWD = os.path.abspath(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
VERSION_CC = os.path.join(CWD, "src", "version.cc")
def main():
tag = subprocess.check_output(
"git describe --tags",
shell=True,
cwd=CWD,
).strip()
assert tag
# Check for commits not exactly matching a tag. Those are candidate builds
# for the next version. The output has the form
# <tag name>-<n commits>-<hash>.
if "-" in tag:
version = tag.split("-")[0]
candidate = "1"
else:
version = tag
candidate = "0"
version_levels = version.split(".")
# Set default patch level if none is given.
if len(version_levels) == 3:
version_levels.append("0")
assert len(version_levels) == 4
major, minor, build, patch = version_levels
# Increment build level for candidate builds.
if candidate == "1":
build = str(int(build) + 1)
patch = "0"
# Modify version.cc with the new values.
with open(VERSION_CC, "r") as f:
text = f.read()
output = []
for line in text.split("\n"):
for definition, substitute in (
("MAJOR_VERSION", major),
("MINOR_VERSION", minor),
("BUILD_NUMBER", build),
("PATCH_LEVEL", patch),
("IS_CANDIDATE_VERSION", candidate)):
if line.startswith("#define %s" % definition):
line = re.sub("\d+$", substitute, line)
output.append(line)
with open(VERSION_CC, "w") as f:
f.write("\n".join(output))
# Log what was done.
candidate_txt = " (candidate)" if candidate == "1" else ""
patch_txt = ".%s" % patch if patch != "0" else ""
version_txt = ("%s.%s.%s%s%s" %
(major, minor, build, patch_txt, candidate_txt))
print "Modified version.cc. Set V8 version to %s" % version_txt
return 0
if __name__ == "__main__":
sys.exit(main())
|
<commit_before><commit_msg>Add script to generate the v8 version.
BUG=chromium:446166
LOG=n
TBR=jkummerow@chromium.org
NOTRY=true
Review URL: https://codereview.chromium.org/835903003
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#25964}<commit_after>#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Script to set v8's version file to the version given by the latest tag.
"""
import os
import re
import subprocess
import sys
CWD = os.path.abspath(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
VERSION_CC = os.path.join(CWD, "src", "version.cc")
def main():
tag = subprocess.check_output(
"git describe --tags",
shell=True,
cwd=CWD,
).strip()
assert tag
# Check for commits not exactly matching a tag. Those are candidate builds
# for the next version. The output has the form
# <tag name>-<n commits>-<hash>.
if "-" in tag:
version = tag.split("-")[0]
candidate = "1"
else:
version = tag
candidate = "0"
version_levels = version.split(".")
# Set default patch level if none is given.
if len(version_levels) == 3:
version_levels.append("0")
assert len(version_levels) == 4
major, minor, build, patch = version_levels
# Increment build level for candidate builds.
if candidate == "1":
build = str(int(build) + 1)
patch = "0"
# Modify version.cc with the new values.
with open(VERSION_CC, "r") as f:
text = f.read()
output = []
for line in text.split("\n"):
for definition, substitute in (
("MAJOR_VERSION", major),
("MINOR_VERSION", minor),
("BUILD_NUMBER", build),
("PATCH_LEVEL", patch),
("IS_CANDIDATE_VERSION", candidate)):
if line.startswith("#define %s" % definition):
line = re.sub("\d+$", substitute, line)
output.append(line)
with open(VERSION_CC, "w") as f:
f.write("\n".join(output))
# Log what was done.
candidate_txt = " (candidate)" if candidate == "1" else ""
patch_txt = ".%s" % patch if patch != "0" else ""
version_txt = ("%s.%s.%s%s%s" %
(major, minor, build, patch_txt, candidate_txt))
print "Modified version.cc. Set V8 version to %s" % version_txt
return 0
if __name__ == "__main__":
sys.exit(main())
|
|
9db0136cef3965d20b6b138af5a0c9a5aafc36e4
|
setup.py
|
setup.py
|
#!/usr/bin/python -tt
from distutils.core import setup
setup(
name='pymajka',
version='1.0',
description='Python interface to morphological analyser majka',
author='Marek marx Grac',
author_email='grac@mail.muni.cz',
url='http://github.com/marxsk/pymajka',
py_modules=['pymajka']
)
|
Use distutils to install pymajka as standard module
|
[build] Use distutils to install pymajka as standard module
The installation of majka itself is not covered.
|
Python
|
apache-2.0
|
marxsk/pymajka
|
[build] Use distutils to install pymajka as standard module
The installation of majka itself is not covered.
|
#!/usr/bin/python -tt
from distutils.core import setup
setup(
name='pymajka',
version='1.0',
description='Python interface to morphological analyser majka',
author='Marek marx Grac',
author_email='grac@mail.muni.cz',
url='http://github.com/marxsk/pymajka',
py_modules=['pymajka']
)
|
<commit_before><commit_msg>[build] Use distutils to install pymajka as standard module
The installation of majka itself is not covered.<commit_after>
|
#!/usr/bin/python -tt
from distutils.core import setup
setup(
name='pymajka',
version='1.0',
description='Python interface to morphological analyser majka',
author='Marek marx Grac',
author_email='grac@mail.muni.cz',
url='http://github.com/marxsk/pymajka',
py_modules=['pymajka']
)
|
[build] Use distutils to install pymajka as standard module
The installation of majka itself is not covered.#!/usr/bin/python -tt
from distutils.core import setup
setup(
name='pymajka',
version='1.0',
description='Python interface to morphological analyser majka',
author='Marek marx Grac',
author_email='grac@mail.muni.cz',
url='http://github.com/marxsk/pymajka',
py_modules=['pymajka']
)
|
<commit_before><commit_msg>[build] Use distutils to install pymajka as standard module
The installation of majka itself is not covered.<commit_after>#!/usr/bin/python -tt
from distutils.core import setup
setup(
name='pymajka',
version='1.0',
description='Python interface to morphological analyser majka',
author='Marek marx Grac',
author_email='grac@mail.muni.cz',
url='http://github.com/marxsk/pymajka',
py_modules=['pymajka']
)
|
|
ba23b9845da33682f4b37eaa22b955846786d9af
|
views.py
|
views.py
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from feincms.models import Page
def handler(request, path):
page = Page.objects.page_for_path_or_404(path)
return render_to_response(page.template.path, {
'page': page,
}, context_instance=RequestContext(request))
|
Add basic cms page handler
|
Add basic cms page handler
|
Python
|
bsd-3-clause
|
hgrimelid/feincms,nickburlett/feincms,joshuajonah/feincms,matthiask/feincms2-content,nickburlett/feincms,hgrimelid/feincms,matthiask/feincms2-content,michaelkuty/feincms,matthiask/django-content-editor,joshuajonah/feincms,feincms/feincms,michaelkuty/feincms,pjdelport/feincms,michaelkuty/feincms,joshuajonah/feincms,mjl/feincms,nickburlett/feincms,matthiask/django-content-editor,feincms/feincms,matthiask/django-content-editor,mjl/feincms,pjdelport/feincms,feincms/feincms,matthiask/django-content-editor,matthiask/feincms2-content,joshuajonah/feincms,mjl/feincms,michaelkuty/feincms,pjdelport/feincms,nickburlett/feincms,hgrimelid/feincms
|
Add basic cms page handler
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from feincms.models import Page
def handler(request, path):
page = Page.objects.page_for_path_or_404(path)
return render_to_response(page.template.path, {
'page': page,
}, context_instance=RequestContext(request))
|
<commit_before><commit_msg>Add basic cms page handler<commit_after>
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from feincms.models import Page
def handler(request, path):
page = Page.objects.page_for_path_or_404(path)
return render_to_response(page.template.path, {
'page': page,
}, context_instance=RequestContext(request))
|
Add basic cms page handlerfrom django.shortcuts import render_to_response
from django.template import RequestContext
from feincms.models import Page
def handler(request, path):
page = Page.objects.page_for_path_or_404(path)
return render_to_response(page.template.path, {
'page': page,
}, context_instance=RequestContext(request))
|
<commit_before><commit_msg>Add basic cms page handler<commit_after>from django.shortcuts import render_to_response
from django.template import RequestContext
from feincms.models import Page
def handler(request, path):
page = Page.objects.page_for_path_or_404(path)
return render_to_response(page.template.path, {
'page': page,
}, context_instance=RequestContext(request))
|
|
d83f04659f429683627600a8887ca33fe0a49043
|
OnionLauncher/main.py
|
OnionLauncher/main.py
|
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5.uic import loadUi
class MainWindow(QMainWindow):
def __init__(self, *args):
super(MainWindow, self).__init__(*args)
loadUi("ui_files/main.ui", self)
if __name__ == "__main__":
app = QApplication(sys.argv)
mw = MainWindow()
mw.show()
sys.exit(app.exec_())
|
Add initial code for launching UI
|
Add initial code for launching UI
|
Python
|
bsd-2-clause
|
neelchauhan/OnionLauncher
|
Add initial code for launching UI
|
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5.uic import loadUi
class MainWindow(QMainWindow):
def __init__(self, *args):
super(MainWindow, self).__init__(*args)
loadUi("ui_files/main.ui", self)
if __name__ == "__main__":
app = QApplication(sys.argv)
mw = MainWindow()
mw.show()
sys.exit(app.exec_())
|
<commit_before><commit_msg>Add initial code for launching UI<commit_after>
|
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5.uic import loadUi
class MainWindow(QMainWindow):
def __init__(self, *args):
super(MainWindow, self).__init__(*args)
loadUi("ui_files/main.ui", self)
if __name__ == "__main__":
app = QApplication(sys.argv)
mw = MainWindow()
mw.show()
sys.exit(app.exec_())
|
Add initial code for launching UIimport sys
from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5.uic import loadUi
class MainWindow(QMainWindow):
def __init__(self, *args):
super(MainWindow, self).__init__(*args)
loadUi("ui_files/main.ui", self)
if __name__ == "__main__":
app = QApplication(sys.argv)
mw = MainWindow()
mw.show()
sys.exit(app.exec_())
|
<commit_before><commit_msg>Add initial code for launching UI<commit_after>import sys
from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5.uic import loadUi
class MainWindow(QMainWindow):
def __init__(self, *args):
super(MainWindow, self).__init__(*args)
loadUi("ui_files/main.ui", self)
if __name__ == "__main__":
app = QApplication(sys.argv)
mw = MainWindow()
mw.show()
sys.exit(app.exec_())
|
|
6f5e0338dd19d77d41400d5c2e46fb755337a727
|
render_particle.py
|
render_particle.py
|
import pygame
pygame.init()
#-- SCREEN CHARACTERISTICS ------------------------->>>
background_color = (255,255,255)
(width, height) = (300, 200)
class Particle:
def __init__(self, (x, y), radius):
self.x = x
self.y = y
self.radius = radius
self.color = (255, 0, 0)
self.thickness = 1
def display(self):
pygame.draw.circle(screen, self.color, (self.x, self.y), self.radius, self.thickness)
#-- RENDER SCREEN ---------------------------------->>>
screen = pygame.display.set_mode((width, height))
screen.fill(background_color)
#pygame.draw.circle(canvas, color, position(x,y), radius, thickness)
particle = Particle((150, 100), 20)
particle.display()
#-- RUN LOOP --------------------------------------->>>
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
|
Add module capable of rendering a circle-shaped Particle object
|
Add module capable of rendering a circle-shaped Particle object
|
Python
|
mit
|
withtwoemms/pygame-explorations
|
Add module capable of rendering a circle-shaped Particle object
|
import pygame
pygame.init()
#-- SCREEN CHARACTERISTICS ------------------------->>>
background_color = (255,255,255)
(width, height) = (300, 200)
class Particle:
def __init__(self, (x, y), radius):
self.x = x
self.y = y
self.radius = radius
self.color = (255, 0, 0)
self.thickness = 1
def display(self):
pygame.draw.circle(screen, self.color, (self.x, self.y), self.radius, self.thickness)
#-- RENDER SCREEN ---------------------------------->>>
screen = pygame.display.set_mode((width, height))
screen.fill(background_color)
#pygame.draw.circle(canvas, color, position(x,y), radius, thickness)
particle = Particle((150, 100), 20)
particle.display()
#-- RUN LOOP --------------------------------------->>>
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
|
<commit_before><commit_msg>Add module capable of rendering a circle-shaped Particle object<commit_after>
|
import pygame
pygame.init()
#-- SCREEN CHARACTERISTICS ------------------------->>>
background_color = (255,255,255)
(width, height) = (300, 200)
class Particle:
def __init__(self, (x, y), radius):
self.x = x
self.y = y
self.radius = radius
self.color = (255, 0, 0)
self.thickness = 1
def display(self):
pygame.draw.circle(screen, self.color, (self.x, self.y), self.radius, self.thickness)
#-- RENDER SCREEN ---------------------------------->>>
screen = pygame.display.set_mode((width, height))
screen.fill(background_color)
#pygame.draw.circle(canvas, color, position(x,y), radius, thickness)
particle = Particle((150, 100), 20)
particle.display()
#-- RUN LOOP --------------------------------------->>>
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
|
Add module capable of rendering a circle-shaped Particle objectimport pygame
pygame.init()
#-- SCREEN CHARACTERISTICS ------------------------->>>
background_color = (255,255,255)
(width, height) = (300, 200)
class Particle:
def __init__(self, (x, y), radius):
self.x = x
self.y = y
self.radius = radius
self.color = (255, 0, 0)
self.thickness = 1
def display(self):
pygame.draw.circle(screen, self.color, (self.x, self.y), self.radius, self.thickness)
#-- RENDER SCREEN ---------------------------------->>>
screen = pygame.display.set_mode((width, height))
screen.fill(background_color)
#pygame.draw.circle(canvas, color, position(x,y), radius, thickness)
particle = Particle((150, 100), 20)
particle.display()
#-- RUN LOOP --------------------------------------->>>
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
|
<commit_before><commit_msg>Add module capable of rendering a circle-shaped Particle object<commit_after>import pygame
pygame.init()
#-- SCREEN CHARACTERISTICS ------------------------->>>
background_color = (255,255,255)
(width, height) = (300, 200)
class Particle:
def __init__(self, (x, y), radius):
self.x = x
self.y = y
self.radius = radius
self.color = (255, 0, 0)
self.thickness = 1
def display(self):
pygame.draw.circle(screen, self.color, (self.x, self.y), self.radius, self.thickness)
#-- RENDER SCREEN ---------------------------------->>>
screen = pygame.display.set_mode((width, height))
screen.fill(background_color)
#pygame.draw.circle(canvas, color, position(x,y), radius, thickness)
particle = Particle((150, 100), 20)
particle.display()
#-- RUN LOOP --------------------------------------->>>
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
|
|
856c5576e13eb89c433bfe873d59c55971232555
|
microcosm_postgres/createall.py
|
microcosm_postgres/createall.py
|
"""
Create databases.
"""
from argparse import ArgumentParser
from microcosm_postgres.operations import create_all, drop_all
def parse_args(graph):
parser = ArgumentParser()
parser.add_argument("--drop", "-D", action="store_true")
return parser.parse_args()
def main(graph):
"""
Create and drop databases.
"""
args = parse_args(graph)
if args.drop:
drop_all(graph)
create_all(graph)
|
Add create all CLI entry point
|
Add create all CLI entry point
|
Python
|
apache-2.0
|
globality-corp/microcosm-postgres,globality-corp/microcosm-postgres
|
Add create all CLI entry point
|
"""
Create databases.
"""
from argparse import ArgumentParser
from microcosm_postgres.operations import create_all, drop_all
def parse_args(graph):
parser = ArgumentParser()
parser.add_argument("--drop", "-D", action="store_true")
return parser.parse_args()
def main(graph):
"""
Create and drop databases.
"""
args = parse_args(graph)
if args.drop:
drop_all(graph)
create_all(graph)
|
<commit_before><commit_msg>Add create all CLI entry point<commit_after>
|
"""
Create databases.
"""
from argparse import ArgumentParser
from microcosm_postgres.operations import create_all, drop_all
def parse_args(graph):
parser = ArgumentParser()
parser.add_argument("--drop", "-D", action="store_true")
return parser.parse_args()
def main(graph):
"""
Create and drop databases.
"""
args = parse_args(graph)
if args.drop:
drop_all(graph)
create_all(graph)
|
Add create all CLI entry point"""
Create databases.
"""
from argparse import ArgumentParser
from microcosm_postgres.operations import create_all, drop_all
def parse_args(graph):
parser = ArgumentParser()
parser.add_argument("--drop", "-D", action="store_true")
return parser.parse_args()
def main(graph):
"""
Create and drop databases.
"""
args = parse_args(graph)
if args.drop:
drop_all(graph)
create_all(graph)
|
<commit_before><commit_msg>Add create all CLI entry point<commit_after>"""
Create databases.
"""
from argparse import ArgumentParser
from microcosm_postgres.operations import create_all, drop_all
def parse_args(graph):
parser = ArgumentParser()
parser.add_argument("--drop", "-D", action="store_true")
return parser.parse_args()
def main(graph):
"""
Create and drop databases.
"""
args = parse_args(graph)
if args.drop:
drop_all(graph)
create_all(graph)
|
|
c5bfe8550c50977750561bd5759db5da8bcab48d
|
problem1/gml_read.py
|
problem1/gml_read.py
|
import networkx as nx
# from gml_read import read_gml2
# g = read_gml2("steiner-small.gml")
def read_gml2(path):
# Read file lines
f = open(path)
lines = f.readlines()
f.close()
# Split lines into symbols
syms = []
for line in lines:
line = line.strip().split(' ')
if len(line) != 0 and line[0] == '#': # skip comments
continue
for sym in line:
syms.append(sym)
n_syms = len(syms)
# Find node labeled 'graph'
has_graph = False
i_sym = 0
while not has_graph and i_sym < n_syms:
sym = syms[i_sym]
i_sym += 1
if sym == 'graph':
has_graph = True
break
if not has_graph:
print ("Couldn't find a graph")
return
G = nx.Graph()
# Recurse the graph structure
level = 0
expect_value = True
attribs = {1:G.graph}
current_node = None
current_edge = None
while level >= 0 and i_sym < n_syms:
sym = syms[i_sym]
if expect_value:
value = None
if sym == '[': # list
level += 1
elif sym[0] == '\"': # string
value = sym
while syms[i_sym][-1] != '\"':
i_sym += 1
value += ' ' + syms[i_sym]
value = value[1:-1]
elif '.' in sym:
value = float(sym)
else:
value = int(sym)
if value != None:
#if level not in attribs:
# attribs[level] = {}
attribs[level][current_key] = value
expect_value = False
else:
if sym == ']': # list end
level -= 1
if level == 1:
if current_node != None:
id = current_node['id']
current_node.pop('id', None) # don't need id in attribs
G.add_node(id, current_node)
current_node = None
elif current_edge != None:
source = current_edge['source']
target = current_edge['target']
current_edge.pop('source', None)
current_edge.pop('target', None)
G.add_edge(source, target, current_edge)
current_edge = None
else:
if level == 1:
if sym == 'node':
current_node = {}
attribs[level + 1] = current_node
elif sym == 'edge':
current_edge = {}
attribs[level + 1] = current_edge
current_key = sym
expect_value = True
i_sym += 1
return G
|
Add a fast GML reader (reads in seconds).
|
Add a fast GML reader (reads in seconds).
|
Python
|
mit
|
karulont/combopt
|
Add a fast GML reader (reads in seconds).
|
import networkx as nx
# from gml_read import read_gml2
# g = read_gml2("steiner-small.gml")
def read_gml2(path):
# Read file lines
f = open(path)
lines = f.readlines()
f.close()
# Split lines into symbols
syms = []
for line in lines:
line = line.strip().split(' ')
if len(line) != 0 and line[0] == '#': # skip comments
continue
for sym in line:
syms.append(sym)
n_syms = len(syms)
# Find node labeled 'graph'
has_graph = False
i_sym = 0
while not has_graph and i_sym < n_syms:
sym = syms[i_sym]
i_sym += 1
if sym == 'graph':
has_graph = True
break
if not has_graph:
print ("Couldn't find a graph")
return
G = nx.Graph()
# Recurse the graph structure
level = 0
expect_value = True
attribs = {1:G.graph}
current_node = None
current_edge = None
while level >= 0 and i_sym < n_syms:
sym = syms[i_sym]
if expect_value:
value = None
if sym == '[': # list
level += 1
elif sym[0] == '\"': # string
value = sym
while syms[i_sym][-1] != '\"':
i_sym += 1
value += ' ' + syms[i_sym]
value = value[1:-1]
elif '.' in sym:
value = float(sym)
else:
value = int(sym)
if value != None:
#if level not in attribs:
# attribs[level] = {}
attribs[level][current_key] = value
expect_value = False
else:
if sym == ']': # list end
level -= 1
if level == 1:
if current_node != None:
id = current_node['id']
current_node.pop('id', None) # don't need id in attribs
G.add_node(id, current_node)
current_node = None
elif current_edge != None:
source = current_edge['source']
target = current_edge['target']
current_edge.pop('source', None)
current_edge.pop('target', None)
G.add_edge(source, target, current_edge)
current_edge = None
else:
if level == 1:
if sym == 'node':
current_node = {}
attribs[level + 1] = current_node
elif sym == 'edge':
current_edge = {}
attribs[level + 1] = current_edge
current_key = sym
expect_value = True
i_sym += 1
return G
|
<commit_before><commit_msg>Add a fast GML reader (reads in seconds).<commit_after>
|
import networkx as nx
# from gml_read import read_gml2
# g = read_gml2("steiner-small.gml")
def read_gml2(path):
# Read file lines
f = open(path)
lines = f.readlines()
f.close()
# Split lines into symbols
syms = []
for line in lines:
line = line.strip().split(' ')
if len(line) != 0 and line[0] == '#': # skip comments
continue
for sym in line:
syms.append(sym)
n_syms = len(syms)
# Find node labeled 'graph'
has_graph = False
i_sym = 0
while not has_graph and i_sym < n_syms:
sym = syms[i_sym]
i_sym += 1
if sym == 'graph':
has_graph = True
break
if not has_graph:
print ("Couldn't find a graph")
return
G = nx.Graph()
# Recurse the graph structure
level = 0
expect_value = True
attribs = {1:G.graph}
current_node = None
current_edge = None
while level >= 0 and i_sym < n_syms:
sym = syms[i_sym]
if expect_value:
value = None
if sym == '[': # list
level += 1
elif sym[0] == '\"': # string
value = sym
while syms[i_sym][-1] != '\"':
i_sym += 1
value += ' ' + syms[i_sym]
value = value[1:-1]
elif '.' in sym:
value = float(sym)
else:
value = int(sym)
if value != None:
#if level not in attribs:
# attribs[level] = {}
attribs[level][current_key] = value
expect_value = False
else:
if sym == ']': # list end
level -= 1
if level == 1:
if current_node != None:
id = current_node['id']
current_node.pop('id', None) # don't need id in attribs
G.add_node(id, current_node)
current_node = None
elif current_edge != None:
source = current_edge['source']
target = current_edge['target']
current_edge.pop('source', None)
current_edge.pop('target', None)
G.add_edge(source, target, current_edge)
current_edge = None
else:
if level == 1:
if sym == 'node':
current_node = {}
attribs[level + 1] = current_node
elif sym == 'edge':
current_edge = {}
attribs[level + 1] = current_edge
current_key = sym
expect_value = True
i_sym += 1
return G
|
Add a fast GML reader (reads in seconds).import networkx as nx
# from gml_read import read_gml2
# g = read_gml2("steiner-small.gml")
def read_gml2(path):
# Read file lines
f = open(path)
lines = f.readlines()
f.close()
# Split lines into symbols
syms = []
for line in lines:
line = line.strip().split(' ')
if len(line) != 0 and line[0] == '#': # skip comments
continue
for sym in line:
syms.append(sym)
n_syms = len(syms)
# Find node labeled 'graph'
has_graph = False
i_sym = 0
while not has_graph and i_sym < n_syms:
sym = syms[i_sym]
i_sym += 1
if sym == 'graph':
has_graph = True
break
if not has_graph:
print ("Couldn't find a graph")
return
G = nx.Graph()
# Recurse the graph structure
level = 0
expect_value = True
attribs = {1:G.graph}
current_node = None
current_edge = None
while level >= 0 and i_sym < n_syms:
sym = syms[i_sym]
if expect_value:
value = None
if sym == '[': # list
level += 1
elif sym[0] == '\"': # string
value = sym
while syms[i_sym][-1] != '\"':
i_sym += 1
value += ' ' + syms[i_sym]
value = value[1:-1]
elif '.' in sym:
value = float(sym)
else:
value = int(sym)
if value != None:
#if level not in attribs:
# attribs[level] = {}
attribs[level][current_key] = value
expect_value = False
else:
if sym == ']': # list end
level -= 1
if level == 1:
if current_node != None:
id = current_node['id']
current_node.pop('id', None) # don't need id in attribs
G.add_node(id, current_node)
current_node = None
elif current_edge != None:
source = current_edge['source']
target = current_edge['target']
current_edge.pop('source', None)
current_edge.pop('target', None)
G.add_edge(source, target, current_edge)
current_edge = None
else:
if level == 1:
if sym == 'node':
current_node = {}
attribs[level + 1] = current_node
elif sym == 'edge':
current_edge = {}
attribs[level + 1] = current_edge
current_key = sym
expect_value = True
i_sym += 1
return G
|
<commit_before><commit_msg>Add a fast GML reader (reads in seconds).<commit_after>import networkx as nx
# from gml_read import read_gml2
# g = read_gml2("steiner-small.gml")
def read_gml2(path):
# Read file lines
f = open(path)
lines = f.readlines()
f.close()
# Split lines into symbols
syms = []
for line in lines:
line = line.strip().split(' ')
if len(line) != 0 and line[0] == '#': # skip comments
continue
for sym in line:
syms.append(sym)
n_syms = len(syms)
# Find node labeled 'graph'
has_graph = False
i_sym = 0
while not has_graph and i_sym < n_syms:
sym = syms[i_sym]
i_sym += 1
if sym == 'graph':
has_graph = True
break
if not has_graph:
print ("Couldn't find a graph")
return
G = nx.Graph()
# Recurse the graph structure
level = 0
expect_value = True
attribs = {1:G.graph}
current_node = None
current_edge = None
while level >= 0 and i_sym < n_syms:
sym = syms[i_sym]
if expect_value:
value = None
if sym == '[': # list
level += 1
elif sym[0] == '\"': # string
value = sym
while syms[i_sym][-1] != '\"':
i_sym += 1
value += ' ' + syms[i_sym]
value = value[1:-1]
elif '.' in sym:
value = float(sym)
else:
value = int(sym)
if value != None:
#if level not in attribs:
# attribs[level] = {}
attribs[level][current_key] = value
expect_value = False
else:
if sym == ']': # list end
level -= 1
if level == 1:
if current_node != None:
id = current_node['id']
current_node.pop('id', None) # don't need id in attribs
G.add_node(id, current_node)
current_node = None
elif current_edge != None:
source = current_edge['source']
target = current_edge['target']
current_edge.pop('source', None)
current_edge.pop('target', None)
G.add_edge(source, target, current_edge)
current_edge = None
else:
if level == 1:
if sym == 'node':
current_node = {}
attribs[level + 1] = current_node
elif sym == 'edge':
current_edge = {}
attribs[level + 1] = current_edge
current_key = sym
expect_value = True
i_sym += 1
return G
|
|
d567f2dcb5204cf8103ce9abcc983c187c6a8844
|
src/tests/python/dummyserver.py
|
src/tests/python/dummyserver.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
PORT = 50007
# Echo server program
import socket
import sys
import json
HOST = None # Symbolic name meaning all available interfaces
def bindto(host, port):
s = None
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error as msg:
s = None
continue
try:
s.bind(sa)
s.listen(1)
except socket.error as msg:
s.close()
s = None
continue
break
if s is None:
print 'could not open socket'
return False, False
conn, addr = s.accept()
return conn, addr
def listen(so):
left = ''
while 1:
data, left = recieve(so, left)
if not data:
break
send(so, data)
def recieve(so, before=''):
msg = before
while '\r\n' * 2 not in msg:
msg += so.recv(1024)
index = msg.find('\r\n' * 2)
m = json.loads(msg[:index])
l = msg[index + 2:] if msg[index + 2:] != '\r\n' else ""
return m, l
def send(so, j):
so.send(json.dumps(j) + '\n\n')
if __name__ == '__main__':
so, ip = bindto(HOST, PORT)
if not so:
so, ip = bindto(HOST, PORT + 1)
print PORT + 1
print 'Connected by', ip
send(so, {'hostname': 'ulb.ac.be'})
listen(so)
so.close()
|
Add a dummy python socket server
|
[src/test] Add a dummy python socket server
|
Python
|
mit
|
C4ptainCrunch/info-f-209,C4ptainCrunch/info-f-209,C4ptainCrunch/info-f-209
|
[src/test] Add a dummy python socket server
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
PORT = 50007
# Echo server program
import socket
import sys
import json
HOST = None # Symbolic name meaning all available interfaces
def bindto(host, port):
s = None
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error as msg:
s = None
continue
try:
s.bind(sa)
s.listen(1)
except socket.error as msg:
s.close()
s = None
continue
break
if s is None:
print 'could not open socket'
return False, False
conn, addr = s.accept()
return conn, addr
def listen(so):
left = ''
while 1:
data, left = recieve(so, left)
if not data:
break
send(so, data)
def recieve(so, before=''):
msg = before
while '\r\n' * 2 not in msg:
msg += so.recv(1024)
index = msg.find('\r\n' * 2)
m = json.loads(msg[:index])
l = msg[index + 2:] if msg[index + 2:] != '\r\n' else ""
return m, l
def send(so, j):
so.send(json.dumps(j) + '\n\n')
if __name__ == '__main__':
so, ip = bindto(HOST, PORT)
if not so:
so, ip = bindto(HOST, PORT + 1)
print PORT + 1
print 'Connected by', ip
send(so, {'hostname': 'ulb.ac.be'})
listen(so)
so.close()
|
<commit_before><commit_msg>[src/test] Add a dummy python socket server<commit_after>
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
PORT = 50007
# Echo server program
import socket
import sys
import json
HOST = None # Symbolic name meaning all available interfaces
def bindto(host, port):
s = None
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error as msg:
s = None
continue
try:
s.bind(sa)
s.listen(1)
except socket.error as msg:
s.close()
s = None
continue
break
if s is None:
print 'could not open socket'
return False, False
conn, addr = s.accept()
return conn, addr
def listen(so):
left = ''
while 1:
data, left = recieve(so, left)
if not data:
break
send(so, data)
def recieve(so, before=''):
msg = before
while '\r\n' * 2 not in msg:
msg += so.recv(1024)
index = msg.find('\r\n' * 2)
m = json.loads(msg[:index])
l = msg[index + 2:] if msg[index + 2:] != '\r\n' else ""
return m, l
def send(so, j):
so.send(json.dumps(j) + '\n\n')
if __name__ == '__main__':
so, ip = bindto(HOST, PORT)
if not so:
so, ip = bindto(HOST, PORT + 1)
print PORT + 1
print 'Connected by', ip
send(so, {'hostname': 'ulb.ac.be'})
listen(so)
so.close()
|
[src/test] Add a dummy python socket server#!/usr/bin/python
# -*- coding: utf-8 -*-
PORT = 50007
# Echo server program
import socket
import sys
import json
HOST = None # Symbolic name meaning all available interfaces
def bindto(host, port):
s = None
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error as msg:
s = None
continue
try:
s.bind(sa)
s.listen(1)
except socket.error as msg:
s.close()
s = None
continue
break
if s is None:
print 'could not open socket'
return False, False
conn, addr = s.accept()
return conn, addr
def listen(so):
left = ''
while 1:
data, left = recieve(so, left)
if not data:
break
send(so, data)
def recieve(so, before=''):
msg = before
while '\r\n' * 2 not in msg:
msg += so.recv(1024)
index = msg.find('\r\n' * 2)
m = json.loads(msg[:index])
l = msg[index + 2:] if msg[index + 2:] != '\r\n' else ""
return m, l
def send(so, j):
so.send(json.dumps(j) + '\n\n')
if __name__ == '__main__':
so, ip = bindto(HOST, PORT)
if not so:
so, ip = bindto(HOST, PORT + 1)
print PORT + 1
print 'Connected by', ip
send(so, {'hostname': 'ulb.ac.be'})
listen(so)
so.close()
|
<commit_before><commit_msg>[src/test] Add a dummy python socket server<commit_after>#!/usr/bin/python
# -*- coding: utf-8 -*-
PORT = 50007
# Echo server program
import socket
import sys
import json
HOST = None # Symbolic name meaning all available interfaces
def bindto(host, port):
s = None
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_PASSIVE):
af, socktype, proto, canonname, sa = res
try:
s = socket.socket(af, socktype, proto)
except socket.error as msg:
s = None
continue
try:
s.bind(sa)
s.listen(1)
except socket.error as msg:
s.close()
s = None
continue
break
if s is None:
print 'could not open socket'
return False, False
conn, addr = s.accept()
return conn, addr
def listen(so):
left = ''
while 1:
data, left = recieve(so, left)
if not data:
break
send(so, data)
def recieve(so, before=''):
msg = before
while '\r\n' * 2 not in msg:
msg += so.recv(1024)
index = msg.find('\r\n' * 2)
m = json.loads(msg[:index])
l = msg[index + 2:] if msg[index + 2:] != '\r\n' else ""
return m, l
def send(so, j):
so.send(json.dumps(j) + '\n\n')
if __name__ == '__main__':
so, ip = bindto(HOST, PORT)
if not so:
so, ip = bindto(HOST, PORT + 1)
print PORT + 1
print 'Connected by', ip
send(so, {'hostname': 'ulb.ac.be'})
listen(so)
so.close()
|
|
bc0ed201ee94fcd279372cd028e48f06502e03af
|
iddfs.py
|
iddfs.py
|
# -*- coding: utf-8 -*-
import draw
import grid
import util
def iddfs(g, start, goal):
for i in range(40):
path = iddfs_rec(g, start, goal, [start], i)
if path != None:
return path
return None
def iddfs_rec(g, pos, goal, path, max_depth):
if len(path) > max_depth:
return None
if pos == goal:
return path
for n in g.neighbours(*pos):
if n in path:
continue # don't follow loops
p = iddfs_rec(g, n, goal, path + [n], max_depth)
if p != None:
return p
return None
if __name__ == '__main__':
g, start, goal = util.generate_problem(16, 16, 0.2)
print('Start:', start, 'goal:', goal)
path = iddfs(g, start, goal)
print('Found length vs heuristic:', len(path), grid.dist(start, goal))
draw.draw_path(draw.draw_grid(g), path).show()
|
Add Iterative Deepening Depth-First Search
|
Add Iterative Deepening Depth-First Search
|
Python
|
mit
|
XeryusTC/search
|
Add Iterative Deepening Depth-First Search
|
# -*- coding: utf-8 -*-
import draw
import grid
import util
def iddfs(g, start, goal):
for i in range(40):
path = iddfs_rec(g, start, goal, [start], i)
if path != None:
return path
return None
def iddfs_rec(g, pos, goal, path, max_depth):
if len(path) > max_depth:
return None
if pos == goal:
return path
for n in g.neighbours(*pos):
if n in path:
continue # don't follow loops
p = iddfs_rec(g, n, goal, path + [n], max_depth)
if p != None:
return p
return None
if __name__ == '__main__':
g, start, goal = util.generate_problem(16, 16, 0.2)
print('Start:', start, 'goal:', goal)
path = iddfs(g, start, goal)
print('Found length vs heuristic:', len(path), grid.dist(start, goal))
draw.draw_path(draw.draw_grid(g), path).show()
|
<commit_before><commit_msg>Add Iterative Deepening Depth-First Search<commit_after>
|
# -*- coding: utf-8 -*-
import draw
import grid
import util
def iddfs(g, start, goal):
for i in range(40):
path = iddfs_rec(g, start, goal, [start], i)
if path != None:
return path
return None
def iddfs_rec(g, pos, goal, path, max_depth):
if len(path) > max_depth:
return None
if pos == goal:
return path
for n in g.neighbours(*pos):
if n in path:
continue # don't follow loops
p = iddfs_rec(g, n, goal, path + [n], max_depth)
if p != None:
return p
return None
if __name__ == '__main__':
g, start, goal = util.generate_problem(16, 16, 0.2)
print('Start:', start, 'goal:', goal)
path = iddfs(g, start, goal)
print('Found length vs heuristic:', len(path), grid.dist(start, goal))
draw.draw_path(draw.draw_grid(g), path).show()
|
Add Iterative Deepening Depth-First Search# -*- coding: utf-8 -*-
import draw
import grid
import util
def iddfs(g, start, goal):
for i in range(40):
path = iddfs_rec(g, start, goal, [start], i)
if path != None:
return path
return None
def iddfs_rec(g, pos, goal, path, max_depth):
if len(path) > max_depth:
return None
if pos == goal:
return path
for n in g.neighbours(*pos):
if n in path:
continue # don't follow loops
p = iddfs_rec(g, n, goal, path + [n], max_depth)
if p != None:
return p
return None
if __name__ == '__main__':
g, start, goal = util.generate_problem(16, 16, 0.2)
print('Start:', start, 'goal:', goal)
path = iddfs(g, start, goal)
print('Found length vs heuristic:', len(path), grid.dist(start, goal))
draw.draw_path(draw.draw_grid(g), path).show()
|
<commit_before><commit_msg>Add Iterative Deepening Depth-First Search<commit_after># -*- coding: utf-8 -*-
import draw
import grid
import util
def iddfs(g, start, goal):
for i in range(40):
path = iddfs_rec(g, start, goal, [start], i)
if path != None:
return path
return None
def iddfs_rec(g, pos, goal, path, max_depth):
if len(path) > max_depth:
return None
if pos == goal:
return path
for n in g.neighbours(*pos):
if n in path:
continue # don't follow loops
p = iddfs_rec(g, n, goal, path + [n], max_depth)
if p != None:
return p
return None
if __name__ == '__main__':
g, start, goal = util.generate_problem(16, 16, 0.2)
print('Start:', start, 'goal:', goal)
path = iddfs(g, start, goal)
print('Found length vs heuristic:', len(path), grid.dist(start, goal))
draw.draw_path(draw.draw_grid(g), path).show()
|
|
cd7ce7b8e76c6b756ea42c5f3fc08f923963bcbc
|
codingame/easy/mars_lander.py
|
codingame/easy/mars_lander.py
|
N = int(raw_input()) # the number of points used to draw the surface of Mars.
for i in xrange(N):
# LAND_X: X coordinate of a surface point. (0 to 6999)
# LAND_Y: Y coordinate of a surface point. By linking all the points together in a sequential fashion, you form the surface of Mars.
LAND_X, LAND_Y = [int(i) for i in raw_input().split()]
# Game loop
while True:
# HS: the horizontal speed (in m/s), can be negative.
# VS: the vertical speed (in m/s), can be negative.
# F: the quantity of remaining fuel in liters.
# R: the rotation angle in degrees (-90 to 90).
# P: the thrust power (0 to 4).
X, Y, HS, VS, F, R, P = [int(i) for i in raw_input().split()]
# R P. R is the desired rotation angle. P is the desired thrust power.
if VS < -40:
print "0 4"
elif VS < -30:
print "0 3"
elif VS < -20:
print "0 2"
elif VS < -10:
print "0 1"
elif VS >= -10:
print "0 0"
|
Add a naive solution for Mars Lander
|
Add a naive solution for Mars Lander
|
Python
|
mit
|
AntoineAugusti/katas,AntoineAugusti/katas,AntoineAugusti/katas
|
Add a naive solution for Mars Lander
|
N = int(raw_input()) # the number of points used to draw the surface of Mars.
for i in xrange(N):
# LAND_X: X coordinate of a surface point. (0 to 6999)
# LAND_Y: Y coordinate of a surface point. By linking all the points together in a sequential fashion, you form the surface of Mars.
LAND_X, LAND_Y = [int(i) for i in raw_input().split()]
# Game loop
while True:
# HS: the horizontal speed (in m/s), can be negative.
# VS: the vertical speed (in m/s), can be negative.
# F: the quantity of remaining fuel in liters.
# R: the rotation angle in degrees (-90 to 90).
# P: the thrust power (0 to 4).
X, Y, HS, VS, F, R, P = [int(i) for i in raw_input().split()]
# R P. R is the desired rotation angle. P is the desired thrust power.
if VS < -40:
print "0 4"
elif VS < -30:
print "0 3"
elif VS < -20:
print "0 2"
elif VS < -10:
print "0 1"
elif VS >= -10:
print "0 0"
|
<commit_before><commit_msg>Add a naive solution for Mars Lander<commit_after>
|
N = int(raw_input()) # the number of points used to draw the surface of Mars.
for i in xrange(N):
# LAND_X: X coordinate of a surface point. (0 to 6999)
# LAND_Y: Y coordinate of a surface point. By linking all the points together in a sequential fashion, you form the surface of Mars.
LAND_X, LAND_Y = [int(i) for i in raw_input().split()]
# Game loop
while True:
# HS: the horizontal speed (in m/s), can be negative.
# VS: the vertical speed (in m/s), can be negative.
# F: the quantity of remaining fuel in liters.
# R: the rotation angle in degrees (-90 to 90).
# P: the thrust power (0 to 4).
X, Y, HS, VS, F, R, P = [int(i) for i in raw_input().split()]
# R P. R is the desired rotation angle. P is the desired thrust power.
if VS < -40:
print "0 4"
elif VS < -30:
print "0 3"
elif VS < -20:
print "0 2"
elif VS < -10:
print "0 1"
elif VS >= -10:
print "0 0"
|
Add a naive solution for Mars LanderN = int(raw_input()) # the number of points used to draw the surface of Mars.
for i in xrange(N):
# LAND_X: X coordinate of a surface point. (0 to 6999)
# LAND_Y: Y coordinate of a surface point. By linking all the points together in a sequential fashion, you form the surface of Mars.
LAND_X, LAND_Y = [int(i) for i in raw_input().split()]
# Game loop
while True:
# HS: the horizontal speed (in m/s), can be negative.
# VS: the vertical speed (in m/s), can be negative.
# F: the quantity of remaining fuel in liters.
# R: the rotation angle in degrees (-90 to 90).
# P: the thrust power (0 to 4).
X, Y, HS, VS, F, R, P = [int(i) for i in raw_input().split()]
# R P. R is the desired rotation angle. P is the desired thrust power.
if VS < -40:
print "0 4"
elif VS < -30:
print "0 3"
elif VS < -20:
print "0 2"
elif VS < -10:
print "0 1"
elif VS >= -10:
print "0 0"
|
<commit_before><commit_msg>Add a naive solution for Mars Lander<commit_after>N = int(raw_input()) # the number of points used to draw the surface of Mars.
for i in xrange(N):
# LAND_X: X coordinate of a surface point. (0 to 6999)
# LAND_Y: Y coordinate of a surface point. By linking all the points together in a sequential fashion, you form the surface of Mars.
LAND_X, LAND_Y = [int(i) for i in raw_input().split()]
# Game loop
while True:
# HS: the horizontal speed (in m/s), can be negative.
# VS: the vertical speed (in m/s), can be negative.
# F: the quantity of remaining fuel in liters.
# R: the rotation angle in degrees (-90 to 90).
# P: the thrust power (0 to 4).
X, Y, HS, VS, F, R, P = [int(i) for i in raw_input().split()]
# R P. R is the desired rotation angle. P is the desired thrust power.
if VS < -40:
print "0 4"
elif VS < -30:
print "0 3"
elif VS < -20:
print "0 2"
elif VS < -10:
print "0 1"
elif VS >= -10:
print "0 0"
|
|
67ec8503b2fafbf9a2728b9175f222a448a6df02
|
extras/globalmod.py
|
extras/globalmod.py
|
#!/usr/bin/python
import sys
from redis import Redis
db = Redis()
session_id = sys.argv[2]
if sys.argv[1]=='add':
db.sadd('global-mods', session_id)
print 'Added to global mods list.'
for chat in db.smembers('session.'+session_id+'.chats'):
print 'Setting group in '+chat+' to globalmod.'
db.hset('session.'+session_id+'.meta.'+chat, 'group', 'globalmod')
elif sys.argv[1]=='remove':
print 'Removed from global mods list.'
db.srem('global-mods', session_id)
for chat in db.smembers('session.'+session_id+'.chats'):
if db.hget('session.'+session_id+'.meta.'+chat, 'counter')=='1':
print 'Setting group in '+chat+' to mod.'
db.hset('session.'+session_id+'.meta.'+chat, 'group', 'mod')
else:
print 'Setting group in '+chat+' to user.'
db.hset('session.'+session_id+'.meta.'+chat, 'group', 'user')
|
Create script for adding/removing global mods.
|
Create script for adding/removing global mods.
|
Python
|
mit
|
MSPARP/MSPARP,MSPARP/MSPARP,MSPARP/MSPARP
|
Create script for adding/removing global mods.
|
#!/usr/bin/python
import sys
from redis import Redis
db = Redis()
session_id = sys.argv[2]
if sys.argv[1]=='add':
db.sadd('global-mods', session_id)
print 'Added to global mods list.'
for chat in db.smembers('session.'+session_id+'.chats'):
print 'Setting group in '+chat+' to globalmod.'
db.hset('session.'+session_id+'.meta.'+chat, 'group', 'globalmod')
elif sys.argv[1]=='remove':
print 'Removed from global mods list.'
db.srem('global-mods', session_id)
for chat in db.smembers('session.'+session_id+'.chats'):
if db.hget('session.'+session_id+'.meta.'+chat, 'counter')=='1':
print 'Setting group in '+chat+' to mod.'
db.hset('session.'+session_id+'.meta.'+chat, 'group', 'mod')
else:
print 'Setting group in '+chat+' to user.'
db.hset('session.'+session_id+'.meta.'+chat, 'group', 'user')
|
<commit_before><commit_msg>Create script for adding/removing global mods.<commit_after>
|
#!/usr/bin/python
import sys
from redis import Redis
db = Redis()
session_id = sys.argv[2]
if sys.argv[1]=='add':
db.sadd('global-mods', session_id)
print 'Added to global mods list.'
for chat in db.smembers('session.'+session_id+'.chats'):
print 'Setting group in '+chat+' to globalmod.'
db.hset('session.'+session_id+'.meta.'+chat, 'group', 'globalmod')
elif sys.argv[1]=='remove':
print 'Removed from global mods list.'
db.srem('global-mods', session_id)
for chat in db.smembers('session.'+session_id+'.chats'):
if db.hget('session.'+session_id+'.meta.'+chat, 'counter')=='1':
print 'Setting group in '+chat+' to mod.'
db.hset('session.'+session_id+'.meta.'+chat, 'group', 'mod')
else:
print 'Setting group in '+chat+' to user.'
db.hset('session.'+session_id+'.meta.'+chat, 'group', 'user')
|
Create script for adding/removing global mods.#!/usr/bin/python
import sys
from redis import Redis
db = Redis()
session_id = sys.argv[2]
if sys.argv[1]=='add':
db.sadd('global-mods', session_id)
print 'Added to global mods list.'
for chat in db.smembers('session.'+session_id+'.chats'):
print 'Setting group in '+chat+' to globalmod.'
db.hset('session.'+session_id+'.meta.'+chat, 'group', 'globalmod')
elif sys.argv[1]=='remove':
print 'Removed from global mods list.'
db.srem('global-mods', session_id)
for chat in db.smembers('session.'+session_id+'.chats'):
if db.hget('session.'+session_id+'.meta.'+chat, 'counter')=='1':
print 'Setting group in '+chat+' to mod.'
db.hset('session.'+session_id+'.meta.'+chat, 'group', 'mod')
else:
print 'Setting group in '+chat+' to user.'
db.hset('session.'+session_id+'.meta.'+chat, 'group', 'user')
|
<commit_before><commit_msg>Create script for adding/removing global mods.<commit_after>#!/usr/bin/python
import sys
from redis import Redis
db = Redis()
session_id = sys.argv[2]
if sys.argv[1]=='add':
db.sadd('global-mods', session_id)
print 'Added to global mods list.'
for chat in db.smembers('session.'+session_id+'.chats'):
print 'Setting group in '+chat+' to globalmod.'
db.hset('session.'+session_id+'.meta.'+chat, 'group', 'globalmod')
elif sys.argv[1]=='remove':
print 'Removed from global mods list.'
db.srem('global-mods', session_id)
for chat in db.smembers('session.'+session_id+'.chats'):
if db.hget('session.'+session_id+'.meta.'+chat, 'counter')=='1':
print 'Setting group in '+chat+' to mod.'
db.hset('session.'+session_id+'.meta.'+chat, 'group', 'mod')
else:
print 'Setting group in '+chat+' to user.'
db.hset('session.'+session_id+'.meta.'+chat, 'group', 'user')
|
|
6f59fe88fa00c0c05d985ae11b0373eaf8b21303
|
data/test-biochemists-zinb-ae.py
|
data/test-biochemists-zinb-ae.py
|
import numpy as np
import tensorflow as tf
from autoencoder.io import read_text, preprocess
from autoencoder.api import autoencode
import keras.backend as K
# for full reproducibility
np.random.seed(1)
tf.set_random_seed(1)
sess = tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1))
K.set_session(sess)
x = read_text('biochemists.tsv', header='infer')
print(x.shape)
# test API
result = autoencode(x, 'test-ae', type='zinb-conddisp', hidden_size=(1,), epochs=3)
|
Add a fully reproducible autoencoder test code
|
Add a fully reproducible autoencoder test code
Former-commit-id: fef07285a8dd84dabd37eb73de7cb298b0b50944
|
Python
|
apache-2.0
|
theislab/dca,theislab/dca,theislab/dca
|
Add a fully reproducible autoencoder test code
Former-commit-id: fef07285a8dd84dabd37eb73de7cb298b0b50944
|
import numpy as np
import tensorflow as tf
from autoencoder.io import read_text, preprocess
from autoencoder.api import autoencode
import keras.backend as K
# for full reproducibility
np.random.seed(1)
tf.set_random_seed(1)
sess = tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1))
K.set_session(sess)
x = read_text('biochemists.tsv', header='infer')
print(x.shape)
# test API
result = autoencode(x, 'test-ae', type='zinb-conddisp', hidden_size=(1,), epochs=3)
|
<commit_before><commit_msg>Add a fully reproducible autoencoder test code
Former-commit-id: fef07285a8dd84dabd37eb73de7cb298b0b50944<commit_after>
|
import numpy as np
import tensorflow as tf
from autoencoder.io import read_text, preprocess
from autoencoder.api import autoencode
import keras.backend as K
# for full reproducibility
np.random.seed(1)
tf.set_random_seed(1)
sess = tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1))
K.set_session(sess)
x = read_text('biochemists.tsv', header='infer')
print(x.shape)
# test API
result = autoencode(x, 'test-ae', type='zinb-conddisp', hidden_size=(1,), epochs=3)
|
Add a fully reproducible autoencoder test code
Former-commit-id: fef07285a8dd84dabd37eb73de7cb298b0b50944import numpy as np
import tensorflow as tf
from autoencoder.io import read_text, preprocess
from autoencoder.api import autoencode
import keras.backend as K
# for full reproducibility
np.random.seed(1)
tf.set_random_seed(1)
sess = tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1))
K.set_session(sess)
x = read_text('biochemists.tsv', header='infer')
print(x.shape)
# test API
result = autoencode(x, 'test-ae', type='zinb-conddisp', hidden_size=(1,), epochs=3)
|
<commit_before><commit_msg>Add a fully reproducible autoencoder test code
Former-commit-id: fef07285a8dd84dabd37eb73de7cb298b0b50944<commit_after>import numpy as np
import tensorflow as tf
from autoencoder.io import read_text, preprocess
from autoencoder.api import autoencode
import keras.backend as K
# for full reproducibility
np.random.seed(1)
tf.set_random_seed(1)
sess = tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1))
K.set_session(sess)
x = read_text('biochemists.tsv', header='infer')
print(x.shape)
# test API
result = autoencode(x, 'test-ae', type='zinb-conddisp', hidden_size=(1,), epochs=3)
|
|
347ab0f2635bc1e91747822e3d10588e1344b6f6
|
hstsb/directsb.py
|
hstsb/directsb.py
|
#!/usr/bin/env python
# encoding: utf-8
"""
Module for directly estimating SB by adding up the light of stars in the Brown
catalog and transforming that total light into an SDSS magnitude.
"""
from astropy import log
import numpy as np
from sqlalchemy.orm import aliased
from starplex.database import connect_to_server, Session
from starplex.database import Catalog, Bandpass, CatalogStar, Observation
def main():
log.setLevel("INFO")
fieldname = "halo35b"
data = load_photometry(fieldname)
A = compute_area(fieldname)
sb606 = compute_sb(data['cfrac'], data['m606'], A)
sb814 = compute_sb(data['cfrac'], data['m814'], A)
log.info("mu_606: {:.6f}".format(sb606))
log.info("mu_814: {:.6f}".format(sb814))
def load_photometry(fieldname):
"""Get photometry from starplex."""
connect_to_server('marvin', echo=True)
session = Session()
mag606obs = aliased(Observation)
mag814obs = aliased(Observation)
bp606 = aliased(Bandpass)
bp814 = aliased(Bandpass)
q = session.query(CatalogStar.cfrac, mag606obs.mag, mag814obs.mag)\
.join(mag606obs, CatalogStar.observations)\
.join(mag814obs, CatalogStar.observations)\
.join(Catalog)\
.filter(Catalog.name == fieldname)\
.join(bp606, mag606obs.bandpass)\
.filter(bp606.name == "f606w")\
.join(bp814, mag814obs.bandpass)\
.filter(bp814.name == "f814w")
dt = [('cfrac', np.float), ('m606', np.float), ('m814', np.float)]
data = np.array(q.all(), dtype=np.dtype(dt))
log.info("Field {0} has {1:d} stars".format(fieldname, data.shape[0]))
session.close()
return data
def compute_area(fieldname):
"""Get the unmasked area for this field from the MSK image."""
pass
def compute_sb(cfrac, mag, A):
"""Compute a surface brightness for a single bandpass."""
pass
if __name__ == '__main__':
main()
|
Load HST photometry from Starplex catalog
|
Load HST photometry from Starplex catalog
|
Python
|
bsd-3-clause
|
jonathansick/synthsb
|
Load HST photometry from Starplex catalog
|
#!/usr/bin/env python
# encoding: utf-8
"""
Module for directly estimating SB by adding up the light of stars in the Brown
catalog and transforming that total light into an SDSS magnitude.
"""
from astropy import log
import numpy as np
from sqlalchemy.orm import aliased
from starplex.database import connect_to_server, Session
from starplex.database import Catalog, Bandpass, CatalogStar, Observation
def main():
log.setLevel("INFO")
fieldname = "halo35b"
data = load_photometry(fieldname)
A = compute_area(fieldname)
sb606 = compute_sb(data['cfrac'], data['m606'], A)
sb814 = compute_sb(data['cfrac'], data['m814'], A)
log.info("mu_606: {:.6f}".format(sb606))
log.info("mu_814: {:.6f}".format(sb814))
def load_photometry(fieldname):
"""Get photometry from starplex."""
connect_to_server('marvin', echo=True)
session = Session()
mag606obs = aliased(Observation)
mag814obs = aliased(Observation)
bp606 = aliased(Bandpass)
bp814 = aliased(Bandpass)
q = session.query(CatalogStar.cfrac, mag606obs.mag, mag814obs.mag)\
.join(mag606obs, CatalogStar.observations)\
.join(mag814obs, CatalogStar.observations)\
.join(Catalog)\
.filter(Catalog.name == fieldname)\
.join(bp606, mag606obs.bandpass)\
.filter(bp606.name == "f606w")\
.join(bp814, mag814obs.bandpass)\
.filter(bp814.name == "f814w")
dt = [('cfrac', np.float), ('m606', np.float), ('m814', np.float)]
data = np.array(q.all(), dtype=np.dtype(dt))
log.info("Field {0} has {1:d} stars".format(fieldname, data.shape[0]))
session.close()
return data
def compute_area(fieldname):
"""Get the unmasked area for this field from the MSK image."""
pass
def compute_sb(cfrac, mag, A):
"""Compute a surface brightness for a single bandpass."""
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Load HST photometry from Starplex catalog<commit_after>
|
#!/usr/bin/env python
# encoding: utf-8
"""
Module for directly estimating SB by adding up the light of stars in the Brown
catalog and transforming that total light into an SDSS magnitude.
"""
from astropy import log
import numpy as np
from sqlalchemy.orm import aliased
from starplex.database import connect_to_server, Session
from starplex.database import Catalog, Bandpass, CatalogStar, Observation
def main():
log.setLevel("INFO")
fieldname = "halo35b"
data = load_photometry(fieldname)
A = compute_area(fieldname)
sb606 = compute_sb(data['cfrac'], data['m606'], A)
sb814 = compute_sb(data['cfrac'], data['m814'], A)
log.info("mu_606: {:.6f}".format(sb606))
log.info("mu_814: {:.6f}".format(sb814))
def load_photometry(fieldname):
"""Get photometry from starplex."""
connect_to_server('marvin', echo=True)
session = Session()
mag606obs = aliased(Observation)
mag814obs = aliased(Observation)
bp606 = aliased(Bandpass)
bp814 = aliased(Bandpass)
q = session.query(CatalogStar.cfrac, mag606obs.mag, mag814obs.mag)\
.join(mag606obs, CatalogStar.observations)\
.join(mag814obs, CatalogStar.observations)\
.join(Catalog)\
.filter(Catalog.name == fieldname)\
.join(bp606, mag606obs.bandpass)\
.filter(bp606.name == "f606w")\
.join(bp814, mag814obs.bandpass)\
.filter(bp814.name == "f814w")
dt = [('cfrac', np.float), ('m606', np.float), ('m814', np.float)]
data = np.array(q.all(), dtype=np.dtype(dt))
log.info("Field {0} has {1:d} stars".format(fieldname, data.shape[0]))
session.close()
return data
def compute_area(fieldname):
"""Get the unmasked area for this field from the MSK image."""
pass
def compute_sb(cfrac, mag, A):
"""Compute a surface brightness for a single bandpass."""
pass
if __name__ == '__main__':
main()
|
Load HST photometry from Starplex catalog#!/usr/bin/env python
# encoding: utf-8
"""
Module for directly estimating SB by adding up the light of stars in the Brown
catalog and transforming that total light into an SDSS magnitude.
"""
from astropy import log
import numpy as np
from sqlalchemy.orm import aliased
from starplex.database import connect_to_server, Session
from starplex.database import Catalog, Bandpass, CatalogStar, Observation
def main():
log.setLevel("INFO")
fieldname = "halo35b"
data = load_photometry(fieldname)
A = compute_area(fieldname)
sb606 = compute_sb(data['cfrac'], data['m606'], A)
sb814 = compute_sb(data['cfrac'], data['m814'], A)
log.info("mu_606: {:.6f}".format(sb606))
log.info("mu_814: {:.6f}".format(sb814))
def load_photometry(fieldname):
"""Get photometry from starplex."""
connect_to_server('marvin', echo=True)
session = Session()
mag606obs = aliased(Observation)
mag814obs = aliased(Observation)
bp606 = aliased(Bandpass)
bp814 = aliased(Bandpass)
q = session.query(CatalogStar.cfrac, mag606obs.mag, mag814obs.mag)\
.join(mag606obs, CatalogStar.observations)\
.join(mag814obs, CatalogStar.observations)\
.join(Catalog)\
.filter(Catalog.name == fieldname)\
.join(bp606, mag606obs.bandpass)\
.filter(bp606.name == "f606w")\
.join(bp814, mag814obs.bandpass)\
.filter(bp814.name == "f814w")
dt = [('cfrac', np.float), ('m606', np.float), ('m814', np.float)]
data = np.array(q.all(), dtype=np.dtype(dt))
log.info("Field {0} has {1:d} stars".format(fieldname, data.shape[0]))
session.close()
return data
def compute_area(fieldname):
"""Get the unmasked area for this field from the MSK image."""
pass
def compute_sb(cfrac, mag, A):
"""Compute a surface brightness for a single bandpass."""
pass
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Load HST photometry from Starplex catalog<commit_after>#!/usr/bin/env python
# encoding: utf-8
"""
Module for directly estimating SB by adding up the light of stars in the Brown
catalog and transforming that total light into an SDSS magnitude.
"""
from astropy import log
import numpy as np
from sqlalchemy.orm import aliased
from starplex.database import connect_to_server, Session
from starplex.database import Catalog, Bandpass, CatalogStar, Observation
def main():
log.setLevel("INFO")
fieldname = "halo35b"
data = load_photometry(fieldname)
A = compute_area(fieldname)
sb606 = compute_sb(data['cfrac'], data['m606'], A)
sb814 = compute_sb(data['cfrac'], data['m814'], A)
log.info("mu_606: {:.6f}".format(sb606))
log.info("mu_814: {:.6f}".format(sb814))
def load_photometry(fieldname):
"""Get photometry from starplex."""
connect_to_server('marvin', echo=True)
session = Session()
mag606obs = aliased(Observation)
mag814obs = aliased(Observation)
bp606 = aliased(Bandpass)
bp814 = aliased(Bandpass)
q = session.query(CatalogStar.cfrac, mag606obs.mag, mag814obs.mag)\
.join(mag606obs, CatalogStar.observations)\
.join(mag814obs, CatalogStar.observations)\
.join(Catalog)\
.filter(Catalog.name == fieldname)\
.join(bp606, mag606obs.bandpass)\
.filter(bp606.name == "f606w")\
.join(bp814, mag814obs.bandpass)\
.filter(bp814.name == "f814w")
dt = [('cfrac', np.float), ('m606', np.float), ('m814', np.float)]
data = np.array(q.all(), dtype=np.dtype(dt))
log.info("Field {0} has {1:d} stars".format(fieldname, data.shape[0]))
session.close()
return data
def compute_area(fieldname):
"""Get the unmasked area for this field from the MSK image."""
pass
def compute_sb(cfrac, mag, A):
"""Compute a surface brightness for a single bandpass."""
pass
if __name__ == '__main__':
main()
|
|
06a8d1b387709a630b6b3d7b946d3acec81cc5fe
|
test/copies/gyptest-attribs.py
|
test/copies/gyptest-attribs.py
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that copying files preserves file attributes.
"""
import TestGyp
import os
import stat
import sys
def check_attribs(path, expected_exec_bit):
out_path = test.built_file_path(path, chdir='src')
in_stat = os.stat(os.path.join('src', path))
out_stat = os.stat(out_path)
if out_stat.st_mode & stat.S_IXUSR != expected_exec_bit:
test.fail_test()
test = TestGyp.TestGyp()
test.run_gyp('copies-attribs.gyp', chdir='src')
test.build('copies-attribs.gyp', chdir='src')
if sys.platform != 'win32':
out_path = test.built_file_path('executable-file.sh', chdir='src')
test.must_contain(out_path,
'#!/bin/bash\n'
'\n'
'echo echo echo echo cho ho o o\n')
check_attribs('executable-file.sh', expected_exec_bit=stat.S_IXUSR)
test.pass_test()
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that copying files preserves file attributes.
"""
import TestGyp
import os
import stat
import sys
def check_attribs(path, expected_exec_bit):
out_path = test.built_file_path(path, chdir='src')
in_stat = os.stat(os.path.join('src', path))
out_stat = os.stat(out_path)
if out_stat.st_mode & stat.S_IXUSR != expected_exec_bit:
test.fail_test()
# Doesn't pass with the android generator, see gyp bug 379.
test = TestGyp.TestGyp(formats=['!android'])
test.run_gyp('copies-attribs.gyp', chdir='src')
test.build('copies-attribs.gyp', chdir='src')
if sys.platform != 'win32':
out_path = test.built_file_path('executable-file.sh', chdir='src')
test.must_contain(out_path,
'#!/bin/bash\n'
'\n'
'echo echo echo echo cho ho o o\n')
check_attribs('executable-file.sh', expected_exec_bit=stat.S_IXUSR)
test.pass_test()
|
Disable new test from r1779 for the android generator.
|
Disable new test from r1779 for the android generator.
BUG=gyp:379
TBR=torne@chromium.org
Review URL: https://codereview.chromium.org/68333002
git-svn-id: e7e1075985beda50ea81ac4472467b4f6e91fc78@1782 78cadc50-ecff-11dd-a971-7dbc132099af
|
Python
|
bsd-3-clause
|
turbulenz/gyp,saghul/gyn,omasanori/gyp,chromium/gyp,luvit/gyp,pandaxcl/gyp,bnoordhuis/gyp,bulldy80/gyp_unofficial,alexcrichton/gyp,springmeyer/gyp,mapbox/gyp,LazyCodingCat/gyp,LazyCodingCat/gyp,Danath/gyp,Phuehvk/gyp,chromium/gyp,bnq4ever/gypgoogle,android-ia/platform_external_chromium_org_tools_gyp,adblockplus/gyp,clar/gyp,svn2github/gyp,pandaxcl/gyp,ryfx/gyp,geekboxzone/lollipop_external_chromium_org_tools_gyp,amoikevin/gyp,springmeyer/gyp,mistydemeo/gyp,yjhjstz/gyp,Phuehvk/gyp,sanyaade-teachings/gyp,dougbeal/gyp,luvit/gyp,yjhjstz/gyp,Danath/gyp,ryfx/gyp,xin3liang/platform_external_chromium_org_tools_gyp,sdklite/gyp,okumura/gyp,AWhetter/gyp,sdklite/gyp,enkripsi/gyp,openpeer/webrtc-gyp,cysp/gyp,pandaxcl/gyp,cchamberlain/gyp,svn2github/kgyp,AWhetter/gyp,pyokagan/gyp,svn2github/gyp,mgamer/gyp,azunite/gyp,dougbeal/gyp,AWhetter/gyp,ttyangf/pdfium_gyp,msc-/gyp,tarc/gyp,clar/gyp,chromium/gyp,okumura/gyp,saghul/gyn,duanhjlt/gyp,enkripsi/gyp,openpeer/webrtc-gyp,enkripsi/gyp,bnq4ever/gypgoogle,saghul/gyn,omasanori/gyp,svn2github/kgyp,bpsinc-native/src_tools_gyp,Chilledheart/gyp,luvit/gyp,sanyaade-teachings/gyp,carlTLR/gyp,geekboxzone/lollipop_external_chromium_org_tools_gyp,ryfx/gyp,AWhetter/gyp,Omegaphora/external_chromium_org_tools_gyp,yjhjstz/gyp,sdklite/gyp,LazyCodingCat/gyp,tarc/gyp,mgamer/gyp,ttyangf/gyp,LazyCodingCat/gyp,msc-/gyp,Jack-Q/GYP-copy,Chilledheart/gyp,mistydemeo/gyp,mistydemeo/gyp,ttyangf/pdfium_gyp,adblockplus/gyp,pyokagan/gyp,dougbeal/gyp,bnoordhuis/gyp,svn2github/kgyp,AOSPU/external_chromium_org_tools_gyp,ttyangf/gyp,bnq4ever/gypgoogle,yinquan529/platform-external-chromium_org-tools-gyp,clar/gyp,Jack-Q/GYP-copy,azunite/gyp_20150930,svn2github/gyp,Jack-Q/GYP-copy,cchamberlain/gyp,erikge/watch_gyp,tarc/gyp,Chilledheart/gyp,omasanori/gyp,alexcrichton/gyp,Omegaphora/external_chromium_org_tools_gyp,openpeer/webrtc-gyp,sport-monkey/GYP,erikge/watch_gyp,AOSPU/external_chromium_org_tools_gyp,yinquan529/platform-external-chromium_org-tools-gyp,azunite/gyp_20150930,sport-monkey/GYP,duanhjlt/gyp,bnoordhuis/gyp,pyokagan/gyp,azunite/gyp,cysp/gyp,duanhjlt/gyp,mapbox/gyp,enkripsi/gyp,okumura/gyp,cchamberlain/gyp,ttyangf/gyp,sanyaade-teachings/gyp,carlTLR/gyp,mgamer/gyp,trafi/gyp,ttyangf/pdfium_gyp,omasanori/gyp,Phuehvk/gyp,msc-/gyp,turbulenz/gyp,xin3liang/platform_external_chromium_org_tools_gyp,android-ia/platform_external_chromium_org_tools_gyp,sanyaade-teachings/gyp,Omegaphora/external_chromium_org_tools_gyp,springmeyer/gyp,mgamer/gyp,xin3liang/platform_external_chromium_org_tools_gyp,Chilledheart/gyp,tarc/gyp,sport-monkey/GYP,pandaxcl/gyp,cysp/gyp,yinquan529/platform-external-chromium_org-tools-gyp,clar/gyp,carlTLR/gyp,Phuehvk/gyp,Phuehvk/gyp,AOSPU/external_chromium_org_tools_gyp,ryfx/gyp,duanhjlt/gyp,okumura/gyp,yjhjstz/gyp,svn2github/gyp,msc-/gyp,LazyCodingCat/gyp,mapbox/gyp,turbulenz/gyp,ttyangf/gyp,pyokagan/gyp,svn2github/kgyp,azunite/gyp,sanyaade-teachings/gyp,alexcrichton/gyp,adblockplus/gyp,carlTLR/gyp,azunite/gyp,amoikevin/gyp,openpeer/webrtc-gyp,geekboxzone/lollipop_external_chromium_org_tools_gyp,sdklite/gyp,cysp/gyp,springmeyer/gyp,mgamer/gyp,bulldy80/gyp_unofficial,svn2github/gyp,yinquan529/platform-external-chromium_org-tools-gyp,saghul/gyn,ttyangf/pdfium_gyp,bnoordhuis/gyp,sport-monkey/GYP,turbulenz/gyp,dougbeal/gyp,adblockplus/gyp,chromium/gyp,mapbox/gyp,svn2github/kgyp,Jack-Q/GYP-copy,cysp/gyp,xin3liang/platform_external_chromium_org_tools_gyp,Danath/gyp,dougbeal/gyp,bulldy80/gyp_unofficial,azunite/gyp,enkripsi/gyp,trafi/gyp,saghul/gyn,Jack-Q/GYP-copy,turbulenz/gyp,sport-monkey/GYP,carlTLR/gyp,chromium/gyp,springmeyer/gyp,trafi/gyp,android-ia/platform_external_chromium_org_tools_gyp,amoikevin/gyp,erikge/watch_gyp,tarc/gyp,azunite/gyp_20150930,pandaxcl/gyp,cchamberlain/gyp,openpeer/webrtc-gyp,bnq4ever/gypgoogle,Omegaphora/external_chromium_org_tools_gyp,mistydemeo/gyp,bpsinc-native/src_tools_gyp,bulldy80/gyp_unofficial,amoikevin/gyp,ttyangf/pdfium_gyp,azunite/gyp_20150930,pyokagan/gyp,erikge/watch_gyp,bnoordhuis/gyp,sdklite/gyp,AWhetter/gyp,trafi/gyp,duanhjlt/gyp,adblockplus/gyp,alexcrichton/gyp,geekboxzone/lollipop_external_chromium_org_tools_gyp,Chilledheart/gyp,luvit/gyp,bpsinc-native/src_tools_gyp,AOSPU/external_chromium_org_tools_gyp,trafi/gyp,ttyangf/gyp,amoikevin/gyp,android-ia/platform_external_chromium_org_tools_gyp,cchamberlain/gyp,yjhjstz/gyp,clar/gyp,msc-/gyp,bulldy80/gyp_unofficial,erikge/watch_gyp,ryfx/gyp,mapbox/gyp,Danath/gyp,Danath/gyp,azunite/gyp_20150930,bnq4ever/gypgoogle,bpsinc-native/src_tools_gyp
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that copying files preserves file attributes.
"""
import TestGyp
import os
import stat
import sys
def check_attribs(path, expected_exec_bit):
out_path = test.built_file_path(path, chdir='src')
in_stat = os.stat(os.path.join('src', path))
out_stat = os.stat(out_path)
if out_stat.st_mode & stat.S_IXUSR != expected_exec_bit:
test.fail_test()
test = TestGyp.TestGyp()
test.run_gyp('copies-attribs.gyp', chdir='src')
test.build('copies-attribs.gyp', chdir='src')
if sys.platform != 'win32':
out_path = test.built_file_path('executable-file.sh', chdir='src')
test.must_contain(out_path,
'#!/bin/bash\n'
'\n'
'echo echo echo echo cho ho o o\n')
check_attribs('executable-file.sh', expected_exec_bit=stat.S_IXUSR)
test.pass_test()
Disable new test from r1779 for the android generator.
BUG=gyp:379
TBR=torne@chromium.org
Review URL: https://codereview.chromium.org/68333002
git-svn-id: e7e1075985beda50ea81ac4472467b4f6e91fc78@1782 78cadc50-ecff-11dd-a971-7dbc132099af
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that copying files preserves file attributes.
"""
import TestGyp
import os
import stat
import sys
def check_attribs(path, expected_exec_bit):
out_path = test.built_file_path(path, chdir='src')
in_stat = os.stat(os.path.join('src', path))
out_stat = os.stat(out_path)
if out_stat.st_mode & stat.S_IXUSR != expected_exec_bit:
test.fail_test()
# Doesn't pass with the android generator, see gyp bug 379.
test = TestGyp.TestGyp(formats=['!android'])
test.run_gyp('copies-attribs.gyp', chdir='src')
test.build('copies-attribs.gyp', chdir='src')
if sys.platform != 'win32':
out_path = test.built_file_path('executable-file.sh', chdir='src')
test.must_contain(out_path,
'#!/bin/bash\n'
'\n'
'echo echo echo echo cho ho o o\n')
check_attribs('executable-file.sh', expected_exec_bit=stat.S_IXUSR)
test.pass_test()
|
<commit_before>#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that copying files preserves file attributes.
"""
import TestGyp
import os
import stat
import sys
def check_attribs(path, expected_exec_bit):
out_path = test.built_file_path(path, chdir='src')
in_stat = os.stat(os.path.join('src', path))
out_stat = os.stat(out_path)
if out_stat.st_mode & stat.S_IXUSR != expected_exec_bit:
test.fail_test()
test = TestGyp.TestGyp()
test.run_gyp('copies-attribs.gyp', chdir='src')
test.build('copies-attribs.gyp', chdir='src')
if sys.platform != 'win32':
out_path = test.built_file_path('executable-file.sh', chdir='src')
test.must_contain(out_path,
'#!/bin/bash\n'
'\n'
'echo echo echo echo cho ho o o\n')
check_attribs('executable-file.sh', expected_exec_bit=stat.S_IXUSR)
test.pass_test()
<commit_msg>Disable new test from r1779 for the android generator.
BUG=gyp:379
TBR=torne@chromium.org
Review URL: https://codereview.chromium.org/68333002
git-svn-id: e7e1075985beda50ea81ac4472467b4f6e91fc78@1782 78cadc50-ecff-11dd-a971-7dbc132099af<commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that copying files preserves file attributes.
"""
import TestGyp
import os
import stat
import sys
def check_attribs(path, expected_exec_bit):
out_path = test.built_file_path(path, chdir='src')
in_stat = os.stat(os.path.join('src', path))
out_stat = os.stat(out_path)
if out_stat.st_mode & stat.S_IXUSR != expected_exec_bit:
test.fail_test()
# Doesn't pass with the android generator, see gyp bug 379.
test = TestGyp.TestGyp(formats=['!android'])
test.run_gyp('copies-attribs.gyp', chdir='src')
test.build('copies-attribs.gyp', chdir='src')
if sys.platform != 'win32':
out_path = test.built_file_path('executable-file.sh', chdir='src')
test.must_contain(out_path,
'#!/bin/bash\n'
'\n'
'echo echo echo echo cho ho o o\n')
check_attribs('executable-file.sh', expected_exec_bit=stat.S_IXUSR)
test.pass_test()
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that copying files preserves file attributes.
"""
import TestGyp
import os
import stat
import sys
def check_attribs(path, expected_exec_bit):
out_path = test.built_file_path(path, chdir='src')
in_stat = os.stat(os.path.join('src', path))
out_stat = os.stat(out_path)
if out_stat.st_mode & stat.S_IXUSR != expected_exec_bit:
test.fail_test()
test = TestGyp.TestGyp()
test.run_gyp('copies-attribs.gyp', chdir='src')
test.build('copies-attribs.gyp', chdir='src')
if sys.platform != 'win32':
out_path = test.built_file_path('executable-file.sh', chdir='src')
test.must_contain(out_path,
'#!/bin/bash\n'
'\n'
'echo echo echo echo cho ho o o\n')
check_attribs('executable-file.sh', expected_exec_bit=stat.S_IXUSR)
test.pass_test()
Disable new test from r1779 for the android generator.
BUG=gyp:379
TBR=torne@chromium.org
Review URL: https://codereview.chromium.org/68333002
git-svn-id: e7e1075985beda50ea81ac4472467b4f6e91fc78@1782 78cadc50-ecff-11dd-a971-7dbc132099af#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that copying files preserves file attributes.
"""
import TestGyp
import os
import stat
import sys
def check_attribs(path, expected_exec_bit):
out_path = test.built_file_path(path, chdir='src')
in_stat = os.stat(os.path.join('src', path))
out_stat = os.stat(out_path)
if out_stat.st_mode & stat.S_IXUSR != expected_exec_bit:
test.fail_test()
# Doesn't pass with the android generator, see gyp bug 379.
test = TestGyp.TestGyp(formats=['!android'])
test.run_gyp('copies-attribs.gyp', chdir='src')
test.build('copies-attribs.gyp', chdir='src')
if sys.platform != 'win32':
out_path = test.built_file_path('executable-file.sh', chdir='src')
test.must_contain(out_path,
'#!/bin/bash\n'
'\n'
'echo echo echo echo cho ho o o\n')
check_attribs('executable-file.sh', expected_exec_bit=stat.S_IXUSR)
test.pass_test()
|
<commit_before>#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that copying files preserves file attributes.
"""
import TestGyp
import os
import stat
import sys
def check_attribs(path, expected_exec_bit):
out_path = test.built_file_path(path, chdir='src')
in_stat = os.stat(os.path.join('src', path))
out_stat = os.stat(out_path)
if out_stat.st_mode & stat.S_IXUSR != expected_exec_bit:
test.fail_test()
test = TestGyp.TestGyp()
test.run_gyp('copies-attribs.gyp', chdir='src')
test.build('copies-attribs.gyp', chdir='src')
if sys.platform != 'win32':
out_path = test.built_file_path('executable-file.sh', chdir='src')
test.must_contain(out_path,
'#!/bin/bash\n'
'\n'
'echo echo echo echo cho ho o o\n')
check_attribs('executable-file.sh', expected_exec_bit=stat.S_IXUSR)
test.pass_test()
<commit_msg>Disable new test from r1779 for the android generator.
BUG=gyp:379
TBR=torne@chromium.org
Review URL: https://codereview.chromium.org/68333002
git-svn-id: e7e1075985beda50ea81ac4472467b4f6e91fc78@1782 78cadc50-ecff-11dd-a971-7dbc132099af<commit_after>#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that copying files preserves file attributes.
"""
import TestGyp
import os
import stat
import sys
def check_attribs(path, expected_exec_bit):
out_path = test.built_file_path(path, chdir='src')
in_stat = os.stat(os.path.join('src', path))
out_stat = os.stat(out_path)
if out_stat.st_mode & stat.S_IXUSR != expected_exec_bit:
test.fail_test()
# Doesn't pass with the android generator, see gyp bug 379.
test = TestGyp.TestGyp(formats=['!android'])
test.run_gyp('copies-attribs.gyp', chdir='src')
test.build('copies-attribs.gyp', chdir='src')
if sys.platform != 'win32':
out_path = test.built_file_path('executable-file.sh', chdir='src')
test.must_contain(out_path,
'#!/bin/bash\n'
'\n'
'echo echo echo echo cho ho o o\n')
check_attribs('executable-file.sh', expected_exec_bit=stat.S_IXUSR)
test.pass_test()
|
f6b8611b18348b2015c8b17f2b951214395fa612
|
migrations/versions/0190_another_letter_org.py
|
migrations/versions/0190_another_letter_org.py
|
"""empty message
Revision ID: 0190_another_letter_org
Revises: 0189_ft_billing_data_type
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0190_another_letter_org'
down_revision = '0189_ft_billing_data_type'
from alembic import op
NEW_ORGANISATIONS = [
('506', 'Tyne and Wear Fire and Rescue Service'),
('507', 'Thames Valley Police'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add letter logos for TWFRS and Thames Valley Police
|
Add letter logos for TWFRS and Thames Valley Police
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add letter logos for TWFRS and Thames Valley Police
|
"""empty message
Revision ID: 0190_another_letter_org
Revises: 0189_ft_billing_data_type
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0190_another_letter_org'
down_revision = '0189_ft_billing_data_type'
from alembic import op
NEW_ORGANISATIONS = [
('506', 'Tyne and Wear Fire and Rescue Service'),
('507', 'Thames Valley Police'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add letter logos for TWFRS and Thames Valley Police<commit_after>
|
"""empty message
Revision ID: 0190_another_letter_org
Revises: 0189_ft_billing_data_type
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0190_another_letter_org'
down_revision = '0189_ft_billing_data_type'
from alembic import op
NEW_ORGANISATIONS = [
('506', 'Tyne and Wear Fire and Rescue Service'),
('507', 'Thames Valley Police'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add letter logos for TWFRS and Thames Valley Police"""empty message
Revision ID: 0190_another_letter_org
Revises: 0189_ft_billing_data_type
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0190_another_letter_org'
down_revision = '0189_ft_billing_data_type'
from alembic import op
NEW_ORGANISATIONS = [
('506', 'Tyne and Wear Fire and Rescue Service'),
('507', 'Thames Valley Police'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add letter logos for TWFRS and Thames Valley Police<commit_after>"""empty message
Revision ID: 0190_another_letter_org
Revises: 0189_ft_billing_data_type
Create Date: 2017-06-29 12:44:16.815039
"""
# revision identifiers, used by Alembic.
revision = '0190_another_letter_org'
down_revision = '0189_ft_billing_data_type'
from alembic import op
NEW_ORGANISATIONS = [
('506', 'Tyne and Wear Fire and Rescue Service'),
('507', 'Thames Valley Police'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
|
a769d91d20de4cefb49a32769f648c658e31bcbb
|
dedupe/simplify.py
|
dedupe/simplify.py
|
from collections import defaultdict
from glob import glob
import networkx as nx
import json
import os
from pprint import pprint
import re
import subprocess
import sys
import time
import uuid
def main():
universe = nx.read_graphml(sys.argv[1])
beings = filter(lambda x: x[1]["type"] == "Being", universe.nodes(data=True))
clients = filter(lambda x: x[1]["type"] == "client", universe.nodes(data=True))
firm = filter(lambda x: x[1]["type"] == "firm", universe.nodes(data=True))
print len(beings)
print len(clients)
print len(firm)
for b in beings:
ns = nx.neighbors(universe,b[0])
rep = ns[0]
for n in ns[1:]:
for nn in nx.neighbors(universe,n):
universe.add_edge(rep,nn) #doesn't preserve directions or properties, yolo
universe.remove_node(n)
universe.remove_node(b[0])
beings = filter(lambda x: x[1]["type"] == "Being", universe.nodes(data=True))
clients = filter(lambda x: x[1]["type"] == "client", universe.nodes(data=True))
firm = filter(lambda x: x[1]["type"] == "firm", universe.nodes(data=True))
print len(beings)
print len(clients)
print len(firm)
nx.write_graphml(universe,"simplified-{}.graphml".format(int(time.time())))
if __name__ == "__main__":
main()
|
Convert entity graphs to visually simpler graphs
|
Convert entity graphs to visually simpler graphs
|
Python
|
cc0-1.0
|
influence-usa/lobbying_federal_domestic
|
Convert entity graphs to visually simpler graphs
|
from collections import defaultdict
from glob import glob
import networkx as nx
import json
import os
from pprint import pprint
import re
import subprocess
import sys
import time
import uuid
def main():
universe = nx.read_graphml(sys.argv[1])
beings = filter(lambda x: x[1]["type"] == "Being", universe.nodes(data=True))
clients = filter(lambda x: x[1]["type"] == "client", universe.nodes(data=True))
firm = filter(lambda x: x[1]["type"] == "firm", universe.nodes(data=True))
print len(beings)
print len(clients)
print len(firm)
for b in beings:
ns = nx.neighbors(universe,b[0])
rep = ns[0]
for n in ns[1:]:
for nn in nx.neighbors(universe,n):
universe.add_edge(rep,nn) #doesn't preserve directions or properties, yolo
universe.remove_node(n)
universe.remove_node(b[0])
beings = filter(lambda x: x[1]["type"] == "Being", universe.nodes(data=True))
clients = filter(lambda x: x[1]["type"] == "client", universe.nodes(data=True))
firm = filter(lambda x: x[1]["type"] == "firm", universe.nodes(data=True))
print len(beings)
print len(clients)
print len(firm)
nx.write_graphml(universe,"simplified-{}.graphml".format(int(time.time())))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Convert entity graphs to visually simpler graphs<commit_after>
|
from collections import defaultdict
from glob import glob
import networkx as nx
import json
import os
from pprint import pprint
import re
import subprocess
import sys
import time
import uuid
def main():
universe = nx.read_graphml(sys.argv[1])
beings = filter(lambda x: x[1]["type"] == "Being", universe.nodes(data=True))
clients = filter(lambda x: x[1]["type"] == "client", universe.nodes(data=True))
firm = filter(lambda x: x[1]["type"] == "firm", universe.nodes(data=True))
print len(beings)
print len(clients)
print len(firm)
for b in beings:
ns = nx.neighbors(universe,b[0])
rep = ns[0]
for n in ns[1:]:
for nn in nx.neighbors(universe,n):
universe.add_edge(rep,nn) #doesn't preserve directions or properties, yolo
universe.remove_node(n)
universe.remove_node(b[0])
beings = filter(lambda x: x[1]["type"] == "Being", universe.nodes(data=True))
clients = filter(lambda x: x[1]["type"] == "client", universe.nodes(data=True))
firm = filter(lambda x: x[1]["type"] == "firm", universe.nodes(data=True))
print len(beings)
print len(clients)
print len(firm)
nx.write_graphml(universe,"simplified-{}.graphml".format(int(time.time())))
if __name__ == "__main__":
main()
|
Convert entity graphs to visually simpler graphsfrom collections import defaultdict
from glob import glob
import networkx as nx
import json
import os
from pprint import pprint
import re
import subprocess
import sys
import time
import uuid
def main():
universe = nx.read_graphml(sys.argv[1])
beings = filter(lambda x: x[1]["type"] == "Being", universe.nodes(data=True))
clients = filter(lambda x: x[1]["type"] == "client", universe.nodes(data=True))
firm = filter(lambda x: x[1]["type"] == "firm", universe.nodes(data=True))
print len(beings)
print len(clients)
print len(firm)
for b in beings:
ns = nx.neighbors(universe,b[0])
rep = ns[0]
for n in ns[1:]:
for nn in nx.neighbors(universe,n):
universe.add_edge(rep,nn) #doesn't preserve directions or properties, yolo
universe.remove_node(n)
universe.remove_node(b[0])
beings = filter(lambda x: x[1]["type"] == "Being", universe.nodes(data=True))
clients = filter(lambda x: x[1]["type"] == "client", universe.nodes(data=True))
firm = filter(lambda x: x[1]["type"] == "firm", universe.nodes(data=True))
print len(beings)
print len(clients)
print len(firm)
nx.write_graphml(universe,"simplified-{}.graphml".format(int(time.time())))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Convert entity graphs to visually simpler graphs<commit_after>from collections import defaultdict
from glob import glob
import networkx as nx
import json
import os
from pprint import pprint
import re
import subprocess
import sys
import time
import uuid
def main():
universe = nx.read_graphml(sys.argv[1])
beings = filter(lambda x: x[1]["type"] == "Being", universe.nodes(data=True))
clients = filter(lambda x: x[1]["type"] == "client", universe.nodes(data=True))
firm = filter(lambda x: x[1]["type"] == "firm", universe.nodes(data=True))
print len(beings)
print len(clients)
print len(firm)
for b in beings:
ns = nx.neighbors(universe,b[0])
rep = ns[0]
for n in ns[1:]:
for nn in nx.neighbors(universe,n):
universe.add_edge(rep,nn) #doesn't preserve directions or properties, yolo
universe.remove_node(n)
universe.remove_node(b[0])
beings = filter(lambda x: x[1]["type"] == "Being", universe.nodes(data=True))
clients = filter(lambda x: x[1]["type"] == "client", universe.nodes(data=True))
firm = filter(lambda x: x[1]["type"] == "firm", universe.nodes(data=True))
print len(beings)
print len(clients)
print len(firm)
nx.write_graphml(universe,"simplified-{}.graphml".format(int(time.time())))
if __name__ == "__main__":
main()
|
|
c08675733d7471895103a9cb4dcdecd532ddbe17
|
hashsum/run_test.py
|
hashsum/run_test.py
|
#!/usr/bin/env python
import os
import sys
import shutil
import unittest
import hashsum
print('hashsum: v%s' % hashsum.VERSION)
print('Python: %s' % sys.version)
shutil.copytree(os.path.join(os.environ['SRC_DIR'], 'tests'), 'tests')
from tests.test_hashsum import *
try:
unittest.main(verbosity=2)
finally:
shutil.rmtree('tests')
|
Enable automatic testing for hash sum
|
Enable automatic testing for hash sum
|
Python
|
mit
|
avalentino/conda-recipes
|
Enable automatic testing for hash sum
|
#!/usr/bin/env python
import os
import sys
import shutil
import unittest
import hashsum
print('hashsum: v%s' % hashsum.VERSION)
print('Python: %s' % sys.version)
shutil.copytree(os.path.join(os.environ['SRC_DIR'], 'tests'), 'tests')
from tests.test_hashsum import *
try:
unittest.main(verbosity=2)
finally:
shutil.rmtree('tests')
|
<commit_before><commit_msg>Enable automatic testing for hash sum<commit_after>
|
#!/usr/bin/env python
import os
import sys
import shutil
import unittest
import hashsum
print('hashsum: v%s' % hashsum.VERSION)
print('Python: %s' % sys.version)
shutil.copytree(os.path.join(os.environ['SRC_DIR'], 'tests'), 'tests')
from tests.test_hashsum import *
try:
unittest.main(verbosity=2)
finally:
shutil.rmtree('tests')
|
Enable automatic testing for hash sum#!/usr/bin/env python
import os
import sys
import shutil
import unittest
import hashsum
print('hashsum: v%s' % hashsum.VERSION)
print('Python: %s' % sys.version)
shutil.copytree(os.path.join(os.environ['SRC_DIR'], 'tests'), 'tests')
from tests.test_hashsum import *
try:
unittest.main(verbosity=2)
finally:
shutil.rmtree('tests')
|
<commit_before><commit_msg>Enable automatic testing for hash sum<commit_after>#!/usr/bin/env python
import os
import sys
import shutil
import unittest
import hashsum
print('hashsum: v%s' % hashsum.VERSION)
print('Python: %s' % sys.version)
shutil.copytree(os.path.join(os.environ['SRC_DIR'], 'tests'), 'tests')
from tests.test_hashsum import *
try:
unittest.main(verbosity=2)
finally:
shutil.rmtree('tests')
|
|
87eb29de6ae7a7e15e1b5d2112c504835350450f
|
Sensor_prototype/PIR_skeleton.py
|
Sensor_prototype/PIR_skeleton.py
|
# Detect motion from PIR module
# SKELETON code modified from http://www.raspberrypi-spy.co.uk
# Added computation of voltage time from PIR and checks
# by AC Jan 06 2014
# Import required Python libraries
import RPi.GPIO as GPIO, time
# Use BCM GPIO references instead of physical pin numbers
GPIO.setmode(GPIO.BCM)
# Define GPIO to use on Pi
GPIO_PIR = 7
# Set pin as input, check connection to sensor
GPIO.setup(GPIO_PIR,GPIO.IN)
Current_State = 0
Previous_State = 0
# Check unique ID and datastream
#
#
# Define function to measure charge time
# allows for PROPORTIONAL motion level not just high/low
# (need breadboard w/resistor and capacitor in series)
def RCtime (PiPin):
measurement = 0
# Discharge capacitor
GPIO.setup(PiPin, GPIO.OUT)
GPIO.output(PiPin, GPIO.LOW)
time.sleep(0.1)
GPIO.setup(PiPin, GPIO.IN)
# Count loops until voltage across capacitor reads high on GPIO
while (GPIO.input(PiPin) == GPIO.LOW):
measurement += 1
return measurement
try:
print "Waiting for PIR to settle ..."
# This will run unless PIR output is zero, ensuring that it settles
while GPIO.input(GPIO_PIR)==1:
Current_State = 0
print " Ready"
# Loop until users quits with CTRL-C
while True :
# Read PIR state
Current_State = GPIO.input(GPIO_PIR)
if Current_State==1 and Previous_State==0:
# PIR is triggered
print " Motion detected!"
# Voltage is reocrded as high or low
# Record time taken to reach high voltage for proportional motion level
time = RCtime(7)
# Update previous state
Previous_State=1
elif Current_State==0 and Previous_State==1:
# PIR has returned to ready state
print " Ready"
Previous_State=0
# Output voltage and time to formatting function / database
#
#
# Wait for 10 milliseconds; may change with testing
time.sleep(0.01)
except KeyboardInterrupt:
print " Quit"
# Reset GPIO settings
GPIO.cleanup()
|
Add skeleton code for PIR motion sensor
|
Add skeleton code for PIR motion sensor
|
Python
|
apache-2.0
|
ThatGeoGuy/ENGO500,Bjtrenth/ENGO500_Old
|
Add skeleton code for PIR motion sensor
|
# Detect motion from PIR module
# SKELETON code modified from http://www.raspberrypi-spy.co.uk
# Added computation of voltage time from PIR and checks
# by AC Jan 06 2014
# Import required Python libraries
import RPi.GPIO as GPIO, time
# Use BCM GPIO references instead of physical pin numbers
GPIO.setmode(GPIO.BCM)
# Define GPIO to use on Pi
GPIO_PIR = 7
# Set pin as input, check connection to sensor
GPIO.setup(GPIO_PIR,GPIO.IN)
Current_State = 0
Previous_State = 0
# Check unique ID and datastream
#
#
# Define function to measure charge time
# allows for PROPORTIONAL motion level not just high/low
# (need breadboard w/resistor and capacitor in series)
def RCtime (PiPin):
measurement = 0
# Discharge capacitor
GPIO.setup(PiPin, GPIO.OUT)
GPIO.output(PiPin, GPIO.LOW)
time.sleep(0.1)
GPIO.setup(PiPin, GPIO.IN)
# Count loops until voltage across capacitor reads high on GPIO
while (GPIO.input(PiPin) == GPIO.LOW):
measurement += 1
return measurement
try:
print "Waiting for PIR to settle ..."
# This will run unless PIR output is zero, ensuring that it settles
while GPIO.input(GPIO_PIR)==1:
Current_State = 0
print " Ready"
# Loop until users quits with CTRL-C
while True :
# Read PIR state
Current_State = GPIO.input(GPIO_PIR)
if Current_State==1 and Previous_State==0:
# PIR is triggered
print " Motion detected!"
# Voltage is reocrded as high or low
# Record time taken to reach high voltage for proportional motion level
time = RCtime(7)
# Update previous state
Previous_State=1
elif Current_State==0 and Previous_State==1:
# PIR has returned to ready state
print " Ready"
Previous_State=0
# Output voltage and time to formatting function / database
#
#
# Wait for 10 milliseconds; may change with testing
time.sleep(0.01)
except KeyboardInterrupt:
print " Quit"
# Reset GPIO settings
GPIO.cleanup()
|
<commit_before><commit_msg>Add skeleton code for PIR motion sensor<commit_after>
|
# Detect motion from PIR module
# SKELETON code modified from http://www.raspberrypi-spy.co.uk
# Added computation of voltage time from PIR and checks
# by AC Jan 06 2014
# Import required Python libraries
import RPi.GPIO as GPIO, time
# Use BCM GPIO references instead of physical pin numbers
GPIO.setmode(GPIO.BCM)
# Define GPIO to use on Pi
GPIO_PIR = 7
# Set pin as input, check connection to sensor
GPIO.setup(GPIO_PIR,GPIO.IN)
Current_State = 0
Previous_State = 0
# Check unique ID and datastream
#
#
# Define function to measure charge time
# allows for PROPORTIONAL motion level not just high/low
# (need breadboard w/resistor and capacitor in series)
def RCtime (PiPin):
measurement = 0
# Discharge capacitor
GPIO.setup(PiPin, GPIO.OUT)
GPIO.output(PiPin, GPIO.LOW)
time.sleep(0.1)
GPIO.setup(PiPin, GPIO.IN)
# Count loops until voltage across capacitor reads high on GPIO
while (GPIO.input(PiPin) == GPIO.LOW):
measurement += 1
return measurement
try:
print "Waiting for PIR to settle ..."
# This will run unless PIR output is zero, ensuring that it settles
while GPIO.input(GPIO_PIR)==1:
Current_State = 0
print " Ready"
# Loop until users quits with CTRL-C
while True :
# Read PIR state
Current_State = GPIO.input(GPIO_PIR)
if Current_State==1 and Previous_State==0:
# PIR is triggered
print " Motion detected!"
# Voltage is reocrded as high or low
# Record time taken to reach high voltage for proportional motion level
time = RCtime(7)
# Update previous state
Previous_State=1
elif Current_State==0 and Previous_State==1:
# PIR has returned to ready state
print " Ready"
Previous_State=0
# Output voltage and time to formatting function / database
#
#
# Wait for 10 milliseconds; may change with testing
time.sleep(0.01)
except KeyboardInterrupt:
print " Quit"
# Reset GPIO settings
GPIO.cleanup()
|
Add skeleton code for PIR motion sensor# Detect motion from PIR module
# SKELETON code modified from http://www.raspberrypi-spy.co.uk
# Added computation of voltage time from PIR and checks
# by AC Jan 06 2014
# Import required Python libraries
import RPi.GPIO as GPIO, time
# Use BCM GPIO references instead of physical pin numbers
GPIO.setmode(GPIO.BCM)
# Define GPIO to use on Pi
GPIO_PIR = 7
# Set pin as input, check connection to sensor
GPIO.setup(GPIO_PIR,GPIO.IN)
Current_State = 0
Previous_State = 0
# Check unique ID and datastream
#
#
# Define function to measure charge time
# allows for PROPORTIONAL motion level not just high/low
# (need breadboard w/resistor and capacitor in series)
def RCtime (PiPin):
measurement = 0
# Discharge capacitor
GPIO.setup(PiPin, GPIO.OUT)
GPIO.output(PiPin, GPIO.LOW)
time.sleep(0.1)
GPIO.setup(PiPin, GPIO.IN)
# Count loops until voltage across capacitor reads high on GPIO
while (GPIO.input(PiPin) == GPIO.LOW):
measurement += 1
return measurement
try:
print "Waiting for PIR to settle ..."
# This will run unless PIR output is zero, ensuring that it settles
while GPIO.input(GPIO_PIR)==1:
Current_State = 0
print " Ready"
# Loop until users quits with CTRL-C
while True :
# Read PIR state
Current_State = GPIO.input(GPIO_PIR)
if Current_State==1 and Previous_State==0:
# PIR is triggered
print " Motion detected!"
# Voltage is reocrded as high or low
# Record time taken to reach high voltage for proportional motion level
time = RCtime(7)
# Update previous state
Previous_State=1
elif Current_State==0 and Previous_State==1:
# PIR has returned to ready state
print " Ready"
Previous_State=0
# Output voltage and time to formatting function / database
#
#
# Wait for 10 milliseconds; may change with testing
time.sleep(0.01)
except KeyboardInterrupt:
print " Quit"
# Reset GPIO settings
GPIO.cleanup()
|
<commit_before><commit_msg>Add skeleton code for PIR motion sensor<commit_after># Detect motion from PIR module
# SKELETON code modified from http://www.raspberrypi-spy.co.uk
# Added computation of voltage time from PIR and checks
# by AC Jan 06 2014
# Import required Python libraries
import RPi.GPIO as GPIO, time
# Use BCM GPIO references instead of physical pin numbers
GPIO.setmode(GPIO.BCM)
# Define GPIO to use on Pi
GPIO_PIR = 7
# Set pin as input, check connection to sensor
GPIO.setup(GPIO_PIR,GPIO.IN)
Current_State = 0
Previous_State = 0
# Check unique ID and datastream
#
#
# Define function to measure charge time
# allows for PROPORTIONAL motion level not just high/low
# (need breadboard w/resistor and capacitor in series)
def RCtime (PiPin):
measurement = 0
# Discharge capacitor
GPIO.setup(PiPin, GPIO.OUT)
GPIO.output(PiPin, GPIO.LOW)
time.sleep(0.1)
GPIO.setup(PiPin, GPIO.IN)
# Count loops until voltage across capacitor reads high on GPIO
while (GPIO.input(PiPin) == GPIO.LOW):
measurement += 1
return measurement
try:
print "Waiting for PIR to settle ..."
# This will run unless PIR output is zero, ensuring that it settles
while GPIO.input(GPIO_PIR)==1:
Current_State = 0
print " Ready"
# Loop until users quits with CTRL-C
while True :
# Read PIR state
Current_State = GPIO.input(GPIO_PIR)
if Current_State==1 and Previous_State==0:
# PIR is triggered
print " Motion detected!"
# Voltage is reocrded as high or low
# Record time taken to reach high voltage for proportional motion level
time = RCtime(7)
# Update previous state
Previous_State=1
elif Current_State==0 and Previous_State==1:
# PIR has returned to ready state
print " Ready"
Previous_State=0
# Output voltage and time to formatting function / database
#
#
# Wait for 10 milliseconds; may change with testing
time.sleep(0.01)
except KeyboardInterrupt:
print " Quit"
# Reset GPIO settings
GPIO.cleanup()
|
|
ff4eea250d030f09c7c4deb70328bf7de1f0231c
|
tests/lib/test_bio.py
|
tests/lib/test_bio.py
|
"""Testing functions in lib/bio."""
from hypothesis import given
import hypothesis.strategies as st
import lib.bio as bio
DNA = 'ACGTUWSMKRYBDHVNXacgtuwsmkrybdhvnx'
REVERSIBLE = 'ACGTWSMKRYBDHVNXacgtwsmkrybdhvnx' # 'U's removed
PROTEIN = 'EFILPQefilpq'
HAS_PROTEIN = '[{}]*[{}]+[{}]*'.format(DNA, PROTEIN, DNA)
@given(seq=st.text(alphabet=REVERSIBLE))
def test_reverse_complement_twice(seq):
actual = bio.reverse_complement(bio.reverse_complement(seq))
assert seq == actual
@given(seq=st.text(alphabet=DNA))
def test_is_protein_no(seq):
assert not bio.is_protein(seq)
@given(seq=st.from_regex(HAS_PROTEIN))
def test_is_protein_yes(seq):
assert bio.is_protein(seq)
|
Add tests for lib/bio module
|
Add tests for lib/bio module
|
Python
|
bsd-3-clause
|
juliema/aTRAM
|
Add tests for lib/bio module
|
"""Testing functions in lib/bio."""
from hypothesis import given
import hypothesis.strategies as st
import lib.bio as bio
DNA = 'ACGTUWSMKRYBDHVNXacgtuwsmkrybdhvnx'
REVERSIBLE = 'ACGTWSMKRYBDHVNXacgtwsmkrybdhvnx' # 'U's removed
PROTEIN = 'EFILPQefilpq'
HAS_PROTEIN = '[{}]*[{}]+[{}]*'.format(DNA, PROTEIN, DNA)
@given(seq=st.text(alphabet=REVERSIBLE))
def test_reverse_complement_twice(seq):
actual = bio.reverse_complement(bio.reverse_complement(seq))
assert seq == actual
@given(seq=st.text(alphabet=DNA))
def test_is_protein_no(seq):
assert not bio.is_protein(seq)
@given(seq=st.from_regex(HAS_PROTEIN))
def test_is_protein_yes(seq):
assert bio.is_protein(seq)
|
<commit_before><commit_msg>Add tests for lib/bio module<commit_after>
|
"""Testing functions in lib/bio."""
from hypothesis import given
import hypothesis.strategies as st
import lib.bio as bio
DNA = 'ACGTUWSMKRYBDHVNXacgtuwsmkrybdhvnx'
REVERSIBLE = 'ACGTWSMKRYBDHVNXacgtwsmkrybdhvnx' # 'U's removed
PROTEIN = 'EFILPQefilpq'
HAS_PROTEIN = '[{}]*[{}]+[{}]*'.format(DNA, PROTEIN, DNA)
@given(seq=st.text(alphabet=REVERSIBLE))
def test_reverse_complement_twice(seq):
actual = bio.reverse_complement(bio.reverse_complement(seq))
assert seq == actual
@given(seq=st.text(alphabet=DNA))
def test_is_protein_no(seq):
assert not bio.is_protein(seq)
@given(seq=st.from_regex(HAS_PROTEIN))
def test_is_protein_yes(seq):
assert bio.is_protein(seq)
|
Add tests for lib/bio module"""Testing functions in lib/bio."""
from hypothesis import given
import hypothesis.strategies as st
import lib.bio as bio
DNA = 'ACGTUWSMKRYBDHVNXacgtuwsmkrybdhvnx'
REVERSIBLE = 'ACGTWSMKRYBDHVNXacgtwsmkrybdhvnx' # 'U's removed
PROTEIN = 'EFILPQefilpq'
HAS_PROTEIN = '[{}]*[{}]+[{}]*'.format(DNA, PROTEIN, DNA)
@given(seq=st.text(alphabet=REVERSIBLE))
def test_reverse_complement_twice(seq):
actual = bio.reverse_complement(bio.reverse_complement(seq))
assert seq == actual
@given(seq=st.text(alphabet=DNA))
def test_is_protein_no(seq):
assert not bio.is_protein(seq)
@given(seq=st.from_regex(HAS_PROTEIN))
def test_is_protein_yes(seq):
assert bio.is_protein(seq)
|
<commit_before><commit_msg>Add tests for lib/bio module<commit_after>"""Testing functions in lib/bio."""
from hypothesis import given
import hypothesis.strategies as st
import lib.bio as bio
DNA = 'ACGTUWSMKRYBDHVNXacgtuwsmkrybdhvnx'
REVERSIBLE = 'ACGTWSMKRYBDHVNXacgtwsmkrybdhvnx' # 'U's removed
PROTEIN = 'EFILPQefilpq'
HAS_PROTEIN = '[{}]*[{}]+[{}]*'.format(DNA, PROTEIN, DNA)
@given(seq=st.text(alphabet=REVERSIBLE))
def test_reverse_complement_twice(seq):
actual = bio.reverse_complement(bio.reverse_complement(seq))
assert seq == actual
@given(seq=st.text(alphabet=DNA))
def test_is_protein_no(seq):
assert not bio.is_protein(seq)
@given(seq=st.from_regex(HAS_PROTEIN))
def test_is_protein_yes(seq):
assert bio.is_protein(seq)
|
|
1869dd0a450411b77f651ac528543fbb22e723e4
|
thespian/test/test_badmessage.py
|
thespian/test/test_badmessage.py
|
from thespian.actors import *
from thespian.test import *
import time
from datetime import timedelta
import sys
max_response_delay = timedelta(seconds=1.0)
class BadMessage(object):
def __init__(self, val):
self.val = val
def __str__(self):
return 'Using an invalid member: ' + str(self.this_does_not_exist)
class BadMessage2(object): # ok to str() this one
def __init__(self, val):
self.val = val
def __str__(self):
return 'BadMsg2=' + str(self.val)
class MyActor(Actor):
def __init__(self):
self.count = 0
def receiveMessage(self, msg, sender):
self.count += 1
if isinstance(msg, (BadMessage, BadMessage2)) and (self.count & 1):
raise Exception('Got a BadMessage: ' + str(msg))
if not isinstance(msg, ActorSystemMessage):
self.send(sender, str(msg))
def test01_actorWorks(asys):
mya = asys.createActor(MyActor)
r = asys.ask(mya, 123, max_response_delay)
assert r == '123'
def test02_alwaysBad(asys):
mya = asys.createActor(MyActor)
r = asys.ask(mya, BadMessage(135), max_response_delay)
assert isinstance(r, PoisonMessage)
assert isinstance(r.poisonMessage, BadMessage)
assert r.poisonMessage.val == 135
def test03_intermittentlyBad(asys):
mya = asys.createActor(MyActor)
# First one should be OK
r = asys.ask(mya, BadMessage2(987), max_response_delay)
assert r is not None
assert '987' in r
# Second one gets the exception the first time around, but the
# Actor should be re-instated and the message retried, and it
# should work the second time, so the failure is undetectable at
# this level.
r2 = asys.ask(mya, BadMessage2(654), max_response_delay)
assert r2 is not None
assert '654' in r2
|
Add tests for bad messages, and actor message retries.
|
Add tests for bad messages, and actor message retries.
|
Python
|
mit
|
kquick/Thespian,kquick/Thespian
|
Add tests for bad messages, and actor message retries.
|
from thespian.actors import *
from thespian.test import *
import time
from datetime import timedelta
import sys
max_response_delay = timedelta(seconds=1.0)
class BadMessage(object):
def __init__(self, val):
self.val = val
def __str__(self):
return 'Using an invalid member: ' + str(self.this_does_not_exist)
class BadMessage2(object): # ok to str() this one
def __init__(self, val):
self.val = val
def __str__(self):
return 'BadMsg2=' + str(self.val)
class MyActor(Actor):
def __init__(self):
self.count = 0
def receiveMessage(self, msg, sender):
self.count += 1
if isinstance(msg, (BadMessage, BadMessage2)) and (self.count & 1):
raise Exception('Got a BadMessage: ' + str(msg))
if not isinstance(msg, ActorSystemMessage):
self.send(sender, str(msg))
def test01_actorWorks(asys):
mya = asys.createActor(MyActor)
r = asys.ask(mya, 123, max_response_delay)
assert r == '123'
def test02_alwaysBad(asys):
mya = asys.createActor(MyActor)
r = asys.ask(mya, BadMessage(135), max_response_delay)
assert isinstance(r, PoisonMessage)
assert isinstance(r.poisonMessage, BadMessage)
assert r.poisonMessage.val == 135
def test03_intermittentlyBad(asys):
mya = asys.createActor(MyActor)
# First one should be OK
r = asys.ask(mya, BadMessage2(987), max_response_delay)
assert r is not None
assert '987' in r
# Second one gets the exception the first time around, but the
# Actor should be re-instated and the message retried, and it
# should work the second time, so the failure is undetectable at
# this level.
r2 = asys.ask(mya, BadMessage2(654), max_response_delay)
assert r2 is not None
assert '654' in r2
|
<commit_before><commit_msg>Add tests for bad messages, and actor message retries.<commit_after>
|
from thespian.actors import *
from thespian.test import *
import time
from datetime import timedelta
import sys
max_response_delay = timedelta(seconds=1.0)
class BadMessage(object):
def __init__(self, val):
self.val = val
def __str__(self):
return 'Using an invalid member: ' + str(self.this_does_not_exist)
class BadMessage2(object): # ok to str() this one
def __init__(self, val):
self.val = val
def __str__(self):
return 'BadMsg2=' + str(self.val)
class MyActor(Actor):
def __init__(self):
self.count = 0
def receiveMessage(self, msg, sender):
self.count += 1
if isinstance(msg, (BadMessage, BadMessage2)) and (self.count & 1):
raise Exception('Got a BadMessage: ' + str(msg))
if not isinstance(msg, ActorSystemMessage):
self.send(sender, str(msg))
def test01_actorWorks(asys):
mya = asys.createActor(MyActor)
r = asys.ask(mya, 123, max_response_delay)
assert r == '123'
def test02_alwaysBad(asys):
mya = asys.createActor(MyActor)
r = asys.ask(mya, BadMessage(135), max_response_delay)
assert isinstance(r, PoisonMessage)
assert isinstance(r.poisonMessage, BadMessage)
assert r.poisonMessage.val == 135
def test03_intermittentlyBad(asys):
mya = asys.createActor(MyActor)
# First one should be OK
r = asys.ask(mya, BadMessage2(987), max_response_delay)
assert r is not None
assert '987' in r
# Second one gets the exception the first time around, but the
# Actor should be re-instated and the message retried, and it
# should work the second time, so the failure is undetectable at
# this level.
r2 = asys.ask(mya, BadMessage2(654), max_response_delay)
assert r2 is not None
assert '654' in r2
|
Add tests for bad messages, and actor message retries.from thespian.actors import *
from thespian.test import *
import time
from datetime import timedelta
import sys
max_response_delay = timedelta(seconds=1.0)
class BadMessage(object):
def __init__(self, val):
self.val = val
def __str__(self):
return 'Using an invalid member: ' + str(self.this_does_not_exist)
class BadMessage2(object): # ok to str() this one
def __init__(self, val):
self.val = val
def __str__(self):
return 'BadMsg2=' + str(self.val)
class MyActor(Actor):
def __init__(self):
self.count = 0
def receiveMessage(self, msg, sender):
self.count += 1
if isinstance(msg, (BadMessage, BadMessage2)) and (self.count & 1):
raise Exception('Got a BadMessage: ' + str(msg))
if not isinstance(msg, ActorSystemMessage):
self.send(sender, str(msg))
def test01_actorWorks(asys):
mya = asys.createActor(MyActor)
r = asys.ask(mya, 123, max_response_delay)
assert r == '123'
def test02_alwaysBad(asys):
mya = asys.createActor(MyActor)
r = asys.ask(mya, BadMessage(135), max_response_delay)
assert isinstance(r, PoisonMessage)
assert isinstance(r.poisonMessage, BadMessage)
assert r.poisonMessage.val == 135
def test03_intermittentlyBad(asys):
mya = asys.createActor(MyActor)
# First one should be OK
r = asys.ask(mya, BadMessage2(987), max_response_delay)
assert r is not None
assert '987' in r
# Second one gets the exception the first time around, but the
# Actor should be re-instated and the message retried, and it
# should work the second time, so the failure is undetectable at
# this level.
r2 = asys.ask(mya, BadMessage2(654), max_response_delay)
assert r2 is not None
assert '654' in r2
|
<commit_before><commit_msg>Add tests for bad messages, and actor message retries.<commit_after>from thespian.actors import *
from thespian.test import *
import time
from datetime import timedelta
import sys
max_response_delay = timedelta(seconds=1.0)
class BadMessage(object):
def __init__(self, val):
self.val = val
def __str__(self):
return 'Using an invalid member: ' + str(self.this_does_not_exist)
class BadMessage2(object): # ok to str() this one
def __init__(self, val):
self.val = val
def __str__(self):
return 'BadMsg2=' + str(self.val)
class MyActor(Actor):
def __init__(self):
self.count = 0
def receiveMessage(self, msg, sender):
self.count += 1
if isinstance(msg, (BadMessage, BadMessage2)) and (self.count & 1):
raise Exception('Got a BadMessage: ' + str(msg))
if not isinstance(msg, ActorSystemMessage):
self.send(sender, str(msg))
def test01_actorWorks(asys):
mya = asys.createActor(MyActor)
r = asys.ask(mya, 123, max_response_delay)
assert r == '123'
def test02_alwaysBad(asys):
mya = asys.createActor(MyActor)
r = asys.ask(mya, BadMessage(135), max_response_delay)
assert isinstance(r, PoisonMessage)
assert isinstance(r.poisonMessage, BadMessage)
assert r.poisonMessage.val == 135
def test03_intermittentlyBad(asys):
mya = asys.createActor(MyActor)
# First one should be OK
r = asys.ask(mya, BadMessage2(987), max_response_delay)
assert r is not None
assert '987' in r
# Second one gets the exception the first time around, but the
# Actor should be re-instated and the message retried, and it
# should work the second time, so the failure is undetectable at
# this level.
r2 = asys.ask(mya, BadMessage2(654), max_response_delay)
assert r2 is not None
assert '654' in r2
|
|
489ffcda5143a2ef28d4cbcf5418babd963f2b0f
|
tests/test_signals.py
|
tests/test_signals.py
|
from twisted.internet import defer
from twisted.trial import unittest
from scrapy import signals, Request, Spider
from scrapy.utils.test import get_crawler
from tests.mockserver import MockServer
class ItemSpider(Spider):
name = 'itemspider'
def start_requests(self):
for _ in range(10):
yield Request(self.mockserver.url('/status?n=200'),
dont_filter=True)
def parse(self, response):
return {'field': 42}
class AsyncSignalTestCase(unittest.TestCase):
def setUp(self):
self.mockserver = MockServer()
self.mockserver.__enter__()
self.items = []
def tearDown(self):
self.mockserver.__exit__(None, None, None)
async def _on_item_scraped(self, item):
self.items.append(item)
@defer.inlineCallbacks
def test_simple_pipeline(self):
crawler = get_crawler(ItemSpider)
crawler.signals.connect(self._on_item_scraped, signals.item_scraped)
yield crawler.crawl(mockserver=self.mockserver)
self.assertEqual(len(self.items), 10)
|
Add a test for an async item_scraped handler.
|
Add a test for an async item_scraped handler.
|
Python
|
bsd-3-clause
|
scrapy/scrapy,eLRuLL/scrapy,scrapy/scrapy,elacuesta/scrapy,elacuesta/scrapy,pawelmhm/scrapy,scrapy/scrapy,eLRuLL/scrapy,elacuesta/scrapy,starrify/scrapy,dangra/scrapy,dangra/scrapy,starrify/scrapy,pablohoffman/scrapy,pawelmhm/scrapy,pablohoffman/scrapy,pablohoffman/scrapy,starrify/scrapy,dangra/scrapy,pawelmhm/scrapy,eLRuLL/scrapy
|
Add a test for an async item_scraped handler.
|
from twisted.internet import defer
from twisted.trial import unittest
from scrapy import signals, Request, Spider
from scrapy.utils.test import get_crawler
from tests.mockserver import MockServer
class ItemSpider(Spider):
name = 'itemspider'
def start_requests(self):
for _ in range(10):
yield Request(self.mockserver.url('/status?n=200'),
dont_filter=True)
def parse(self, response):
return {'field': 42}
class AsyncSignalTestCase(unittest.TestCase):
def setUp(self):
self.mockserver = MockServer()
self.mockserver.__enter__()
self.items = []
def tearDown(self):
self.mockserver.__exit__(None, None, None)
async def _on_item_scraped(self, item):
self.items.append(item)
@defer.inlineCallbacks
def test_simple_pipeline(self):
crawler = get_crawler(ItemSpider)
crawler.signals.connect(self._on_item_scraped, signals.item_scraped)
yield crawler.crawl(mockserver=self.mockserver)
self.assertEqual(len(self.items), 10)
|
<commit_before><commit_msg>Add a test for an async item_scraped handler.<commit_after>
|
from twisted.internet import defer
from twisted.trial import unittest
from scrapy import signals, Request, Spider
from scrapy.utils.test import get_crawler
from tests.mockserver import MockServer
class ItemSpider(Spider):
name = 'itemspider'
def start_requests(self):
for _ in range(10):
yield Request(self.mockserver.url('/status?n=200'),
dont_filter=True)
def parse(self, response):
return {'field': 42}
class AsyncSignalTestCase(unittest.TestCase):
def setUp(self):
self.mockserver = MockServer()
self.mockserver.__enter__()
self.items = []
def tearDown(self):
self.mockserver.__exit__(None, None, None)
async def _on_item_scraped(self, item):
self.items.append(item)
@defer.inlineCallbacks
def test_simple_pipeline(self):
crawler = get_crawler(ItemSpider)
crawler.signals.connect(self._on_item_scraped, signals.item_scraped)
yield crawler.crawl(mockserver=self.mockserver)
self.assertEqual(len(self.items), 10)
|
Add a test for an async item_scraped handler.from twisted.internet import defer
from twisted.trial import unittest
from scrapy import signals, Request, Spider
from scrapy.utils.test import get_crawler
from tests.mockserver import MockServer
class ItemSpider(Spider):
name = 'itemspider'
def start_requests(self):
for _ in range(10):
yield Request(self.mockserver.url('/status?n=200'),
dont_filter=True)
def parse(self, response):
return {'field': 42}
class AsyncSignalTestCase(unittest.TestCase):
def setUp(self):
self.mockserver = MockServer()
self.mockserver.__enter__()
self.items = []
def tearDown(self):
self.mockserver.__exit__(None, None, None)
async def _on_item_scraped(self, item):
self.items.append(item)
@defer.inlineCallbacks
def test_simple_pipeline(self):
crawler = get_crawler(ItemSpider)
crawler.signals.connect(self._on_item_scraped, signals.item_scraped)
yield crawler.crawl(mockserver=self.mockserver)
self.assertEqual(len(self.items), 10)
|
<commit_before><commit_msg>Add a test for an async item_scraped handler.<commit_after>from twisted.internet import defer
from twisted.trial import unittest
from scrapy import signals, Request, Spider
from scrapy.utils.test import get_crawler
from tests.mockserver import MockServer
class ItemSpider(Spider):
name = 'itemspider'
def start_requests(self):
for _ in range(10):
yield Request(self.mockserver.url('/status?n=200'),
dont_filter=True)
def parse(self, response):
return {'field': 42}
class AsyncSignalTestCase(unittest.TestCase):
def setUp(self):
self.mockserver = MockServer()
self.mockserver.__enter__()
self.items = []
def tearDown(self):
self.mockserver.__exit__(None, None, None)
async def _on_item_scraped(self, item):
self.items.append(item)
@defer.inlineCallbacks
def test_simple_pipeline(self):
crawler = get_crawler(ItemSpider)
crawler.signals.connect(self._on_item_scraped, signals.item_scraped)
yield crawler.crawl(mockserver=self.mockserver)
self.assertEqual(len(self.items), 10)
|
|
1302421f613ee12f62f6610fe50ff98610d41000
|
vcirc.py
|
vcirc.py
|
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# GENHERNQUIST.VCIRC
# Laura L Watkins [lauralwatkins@gmail.com]
# -----------------------------------------------------------------------------
from numpy import *
from astropy import units as u, constants as c
from scipy import special
from encmass import encmass
def vcirc(r, norm, rs, alpha, beta, gamma):
"""
Enclosed mass profile of a generalised Hernquist model.
INPUTS
r : radial variable (requires unit)
norm : normalisation (requires unit)
rs : scale radius of model (requires unit)
alpha : sharpness of transition between inner and outer
beta : outer logarithmic slope
gamma : inner logarithmic slope
"""
mass = encmass(r, norm, rs, alpha, beta, gamma)
vcirc = (sqrt(c.G*mass/r)).to("km/s")
return vcirc
|
Add routine to calculate circular velocity.
|
Add routine to calculate circular velocity.
|
Python
|
bsd-2-clause
|
lauralwatkins/genhernquist
|
Add routine to calculate circular velocity.
|
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# GENHERNQUIST.VCIRC
# Laura L Watkins [lauralwatkins@gmail.com]
# -----------------------------------------------------------------------------
from numpy import *
from astropy import units as u, constants as c
from scipy import special
from encmass import encmass
def vcirc(r, norm, rs, alpha, beta, gamma):
"""
Enclosed mass profile of a generalised Hernquist model.
INPUTS
r : radial variable (requires unit)
norm : normalisation (requires unit)
rs : scale radius of model (requires unit)
alpha : sharpness of transition between inner and outer
beta : outer logarithmic slope
gamma : inner logarithmic slope
"""
mass = encmass(r, norm, rs, alpha, beta, gamma)
vcirc = (sqrt(c.G*mass/r)).to("km/s")
return vcirc
|
<commit_before><commit_msg>Add routine to calculate circular velocity.<commit_after>
|
#!/usr/bin/env python
# -----------------------------------------------------------------------------
# GENHERNQUIST.VCIRC
# Laura L Watkins [lauralwatkins@gmail.com]
# -----------------------------------------------------------------------------
from numpy import *
from astropy import units as u, constants as c
from scipy import special
from encmass import encmass
def vcirc(r, norm, rs, alpha, beta, gamma):
"""
Enclosed mass profile of a generalised Hernquist model.
INPUTS
r : radial variable (requires unit)
norm : normalisation (requires unit)
rs : scale radius of model (requires unit)
alpha : sharpness of transition between inner and outer
beta : outer logarithmic slope
gamma : inner logarithmic slope
"""
mass = encmass(r, norm, rs, alpha, beta, gamma)
vcirc = (sqrt(c.G*mass/r)).to("km/s")
return vcirc
|
Add routine to calculate circular velocity.#!/usr/bin/env python
# -----------------------------------------------------------------------------
# GENHERNQUIST.VCIRC
# Laura L Watkins [lauralwatkins@gmail.com]
# -----------------------------------------------------------------------------
from numpy import *
from astropy import units as u, constants as c
from scipy import special
from encmass import encmass
def vcirc(r, norm, rs, alpha, beta, gamma):
"""
Enclosed mass profile of a generalised Hernquist model.
INPUTS
r : radial variable (requires unit)
norm : normalisation (requires unit)
rs : scale radius of model (requires unit)
alpha : sharpness of transition between inner and outer
beta : outer logarithmic slope
gamma : inner logarithmic slope
"""
mass = encmass(r, norm, rs, alpha, beta, gamma)
vcirc = (sqrt(c.G*mass/r)).to("km/s")
return vcirc
|
<commit_before><commit_msg>Add routine to calculate circular velocity.<commit_after>#!/usr/bin/env python
# -----------------------------------------------------------------------------
# GENHERNQUIST.VCIRC
# Laura L Watkins [lauralwatkins@gmail.com]
# -----------------------------------------------------------------------------
from numpy import *
from astropy import units as u, constants as c
from scipy import special
from encmass import encmass
def vcirc(r, norm, rs, alpha, beta, gamma):
"""
Enclosed mass profile of a generalised Hernquist model.
INPUTS
r : radial variable (requires unit)
norm : normalisation (requires unit)
rs : scale radius of model (requires unit)
alpha : sharpness of transition between inner and outer
beta : outer logarithmic slope
gamma : inner logarithmic slope
"""
mass = encmass(r, norm, rs, alpha, beta, gamma)
vcirc = (sqrt(c.G*mass/r)).to("km/s")
return vcirc
|
|
53bca73d70db98b3657b65c071b32240e8403010
|
bin/2000/shape_msa_blockgroup.py
|
bin/2000/shape_msa_blockgroup.py
|
"""shape_msa_blockgroup.py
Output one shapefile per MSA containing all the blockgroups it contains
"""
import os
import csv
import fiona
#
# Import MSA to blockgroup crosswalk
#
msa_to_bg = {}
with open('data/2000/crosswalks/msa_blockgroup.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
msa = rows[0]
bg = rows[1]
if msa not in msa_to_bg:
msa_to_bg[msa] = []
msa_to_bg[msa].append(bg)
#
# Perform the extraction
#
for msa in msa_to_bg:
states = list(set([b[:2] for b in msa_to_bg[msa]]))
## Get all blockgroups
all_bg = {}
for st in states:
with fiona.open('data/2000/shp/state/%s/blockgroups.shp'%st, 'r',
'ESRI Shapefile') as source:
source_crs = source.crs
for f in source:
all_bg[f['properties']['BKGPIDFP00']] = f['geometry']
## blockgroups within cbsa
msa_bg = {bg: all_bg[bg] for bg in msa_to_bg[msa]}
## Save
if not os.path.isdir('data/2000/shp/msa/%s'%msa):
os.mkdir('data/2000/shp/msa/%s'%msa)
schema = {'geometry': 'Polygon',
'properties': {'BKGPIDFP00': 'str'}}
with fiona.open('data/2000/shp/msa/%s/blockgroups.shp'%msa, 'w',
'ESRI Shapefile',
crs = source_crs,
schema = schema) as output:
for bg in msa_bg:
rec = {'geometry':msa_bg[bg], 'properties':{'BKGPIDFP00':bg}}
output.write(rec)
|
Add script to extract the shape of blockgroups and aggregate them per msa
|
Add script to extract the shape of blockgroups and aggregate them per msa
|
Python
|
bsd-2-clause
|
scities/2000-us-metro-atlas
|
Add script to extract the shape of blockgroups and aggregate them per msa
|
"""shape_msa_blockgroup.py
Output one shapefile per MSA containing all the blockgroups it contains
"""
import os
import csv
import fiona
#
# Import MSA to blockgroup crosswalk
#
msa_to_bg = {}
with open('data/2000/crosswalks/msa_blockgroup.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
msa = rows[0]
bg = rows[1]
if msa not in msa_to_bg:
msa_to_bg[msa] = []
msa_to_bg[msa].append(bg)
#
# Perform the extraction
#
for msa in msa_to_bg:
states = list(set([b[:2] for b in msa_to_bg[msa]]))
## Get all blockgroups
all_bg = {}
for st in states:
with fiona.open('data/2000/shp/state/%s/blockgroups.shp'%st, 'r',
'ESRI Shapefile') as source:
source_crs = source.crs
for f in source:
all_bg[f['properties']['BKGPIDFP00']] = f['geometry']
## blockgroups within cbsa
msa_bg = {bg: all_bg[bg] for bg in msa_to_bg[msa]}
## Save
if not os.path.isdir('data/2000/shp/msa/%s'%msa):
os.mkdir('data/2000/shp/msa/%s'%msa)
schema = {'geometry': 'Polygon',
'properties': {'BKGPIDFP00': 'str'}}
with fiona.open('data/2000/shp/msa/%s/blockgroups.shp'%msa, 'w',
'ESRI Shapefile',
crs = source_crs,
schema = schema) as output:
for bg in msa_bg:
rec = {'geometry':msa_bg[bg], 'properties':{'BKGPIDFP00':bg}}
output.write(rec)
|
<commit_before><commit_msg>Add script to extract the shape of blockgroups and aggregate them per msa<commit_after>
|
"""shape_msa_blockgroup.py
Output one shapefile per MSA containing all the blockgroups it contains
"""
import os
import csv
import fiona
#
# Import MSA to blockgroup crosswalk
#
msa_to_bg = {}
with open('data/2000/crosswalks/msa_blockgroup.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
msa = rows[0]
bg = rows[1]
if msa not in msa_to_bg:
msa_to_bg[msa] = []
msa_to_bg[msa].append(bg)
#
# Perform the extraction
#
for msa in msa_to_bg:
states = list(set([b[:2] for b in msa_to_bg[msa]]))
## Get all blockgroups
all_bg = {}
for st in states:
with fiona.open('data/2000/shp/state/%s/blockgroups.shp'%st, 'r',
'ESRI Shapefile') as source:
source_crs = source.crs
for f in source:
all_bg[f['properties']['BKGPIDFP00']] = f['geometry']
## blockgroups within cbsa
msa_bg = {bg: all_bg[bg] for bg in msa_to_bg[msa]}
## Save
if not os.path.isdir('data/2000/shp/msa/%s'%msa):
os.mkdir('data/2000/shp/msa/%s'%msa)
schema = {'geometry': 'Polygon',
'properties': {'BKGPIDFP00': 'str'}}
with fiona.open('data/2000/shp/msa/%s/blockgroups.shp'%msa, 'w',
'ESRI Shapefile',
crs = source_crs,
schema = schema) as output:
for bg in msa_bg:
rec = {'geometry':msa_bg[bg], 'properties':{'BKGPIDFP00':bg}}
output.write(rec)
|
Add script to extract the shape of blockgroups and aggregate them per msa"""shape_msa_blockgroup.py
Output one shapefile per MSA containing all the blockgroups it contains
"""
import os
import csv
import fiona
#
# Import MSA to blockgroup crosswalk
#
msa_to_bg = {}
with open('data/2000/crosswalks/msa_blockgroup.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
msa = rows[0]
bg = rows[1]
if msa not in msa_to_bg:
msa_to_bg[msa] = []
msa_to_bg[msa].append(bg)
#
# Perform the extraction
#
for msa in msa_to_bg:
states = list(set([b[:2] for b in msa_to_bg[msa]]))
## Get all blockgroups
all_bg = {}
for st in states:
with fiona.open('data/2000/shp/state/%s/blockgroups.shp'%st, 'r',
'ESRI Shapefile') as source:
source_crs = source.crs
for f in source:
all_bg[f['properties']['BKGPIDFP00']] = f['geometry']
## blockgroups within cbsa
msa_bg = {bg: all_bg[bg] for bg in msa_to_bg[msa]}
## Save
if not os.path.isdir('data/2000/shp/msa/%s'%msa):
os.mkdir('data/2000/shp/msa/%s'%msa)
schema = {'geometry': 'Polygon',
'properties': {'BKGPIDFP00': 'str'}}
with fiona.open('data/2000/shp/msa/%s/blockgroups.shp'%msa, 'w',
'ESRI Shapefile',
crs = source_crs,
schema = schema) as output:
for bg in msa_bg:
rec = {'geometry':msa_bg[bg], 'properties':{'BKGPIDFP00':bg}}
output.write(rec)
|
<commit_before><commit_msg>Add script to extract the shape of blockgroups and aggregate them per msa<commit_after>"""shape_msa_blockgroup.py
Output one shapefile per MSA containing all the blockgroups it contains
"""
import os
import csv
import fiona
#
# Import MSA to blockgroup crosswalk
#
msa_to_bg = {}
with open('data/2000/crosswalks/msa_blockgroup.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
msa = rows[0]
bg = rows[1]
if msa not in msa_to_bg:
msa_to_bg[msa] = []
msa_to_bg[msa].append(bg)
#
# Perform the extraction
#
for msa in msa_to_bg:
states = list(set([b[:2] for b in msa_to_bg[msa]]))
## Get all blockgroups
all_bg = {}
for st in states:
with fiona.open('data/2000/shp/state/%s/blockgroups.shp'%st, 'r',
'ESRI Shapefile') as source:
source_crs = source.crs
for f in source:
all_bg[f['properties']['BKGPIDFP00']] = f['geometry']
## blockgroups within cbsa
msa_bg = {bg: all_bg[bg] for bg in msa_to_bg[msa]}
## Save
if not os.path.isdir('data/2000/shp/msa/%s'%msa):
os.mkdir('data/2000/shp/msa/%s'%msa)
schema = {'geometry': 'Polygon',
'properties': {'BKGPIDFP00': 'str'}}
with fiona.open('data/2000/shp/msa/%s/blockgroups.shp'%msa, 'w',
'ESRI Shapefile',
crs = source_crs,
schema = schema) as output:
for bg in msa_bg:
rec = {'geometry':msa_bg[bg], 'properties':{'BKGPIDFP00':bg}}
output.write(rec)
|
|
c81f577153a258176cabedb1c194fdb58d8c1d67
|
telemetry/telemetry/page/actions/action_runner_unittest.py
|
telemetry/telemetry/page/actions/action_runner_unittest.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core.backends.chrome import tracing_backend
from telemetry.core.timeline import model
from telemetry.page.actions import action_runner as action_runner_module
from telemetry.unittest import tab_test_case
from telemetry.web_perf import timeline_interaction_record as tir_module
class ActionRunnerTest(tab_test_case.TabTestCase):
def testIssuingInteractionRecord(self):
self.Navigate('blank.html')
action_runner = action_runner_module.ActionRunner(None, self._tab)
self._browser.StartTracing(tracing_backend.MINIMAL_TRACE_CATEGORIES)
action_runner.BeginInteraction('TestInteraction', [tir_module.IS_SMOOTH])
action_runner.EndInteraction('TestInteraction', [tir_module.IS_SMOOTH])
trace_data = self._browser.StopTracing()
timeline_model = model.TimelineModel(trace_data)
records = []
renderer_thread = timeline_model.GetRendererThreadFromTab(self._tab)
for event in renderer_thread.async_slices:
if not tir_module.IsTimelineInteractionRecord(event.name):
continue
records.append(tir_module.TimelineInteractionRecord.FromEvent(event))
self.assertEqual(1, len(records),
'Fail to issue the interaction record on tracing timeline.'
' Trace data:\n%s' % repr(trace_data.EventData()))
self.assertEqual('TestInteraction', records[0].logical_name)
self.assertTrue(records[0].is_smooth)
|
Add test for action_runner.BeginInteraction and action_runner.EndInteraction.
|
Add test for action_runner.BeginInteraction and action_runner.EndInteraction.
BUG=368767
Review URL: https://codereview.chromium.org/294943006
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@272549 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
catapult-project/catapult-csm,sahiljain/catapult,catapult-project/catapult,sahiljain/catapult,benschmaus/catapult,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,SummerLW/Perf-Insight-Report,catapult-project/catapult,benschmaus/catapult,benschmaus/catapult,benschmaus/catapult,catapult-project/catapult-csm,catapult-project/catapult,SummerLW/Perf-Insight-Report,catapult-project/catapult,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,SummerLW/Perf-Insight-Report,catapult-project/catapult,catapult-project/catapult-csm,benschmaus/catapult,catapult-project/catapult-csm,sahiljain/catapult,sahiljain/catapult,catapult-project/catapult,catapult-project/catapult,benschmaus/catapult,benschmaus/catapult,sahiljain/catapult,sahiljain/catapult
|
Add test for action_runner.BeginInteraction and action_runner.EndInteraction.
BUG=368767
Review URL: https://codereview.chromium.org/294943006
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@272549 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core.backends.chrome import tracing_backend
from telemetry.core.timeline import model
from telemetry.page.actions import action_runner as action_runner_module
from telemetry.unittest import tab_test_case
from telemetry.web_perf import timeline_interaction_record as tir_module
class ActionRunnerTest(tab_test_case.TabTestCase):
def testIssuingInteractionRecord(self):
self.Navigate('blank.html')
action_runner = action_runner_module.ActionRunner(None, self._tab)
self._browser.StartTracing(tracing_backend.MINIMAL_TRACE_CATEGORIES)
action_runner.BeginInteraction('TestInteraction', [tir_module.IS_SMOOTH])
action_runner.EndInteraction('TestInteraction', [tir_module.IS_SMOOTH])
trace_data = self._browser.StopTracing()
timeline_model = model.TimelineModel(trace_data)
records = []
renderer_thread = timeline_model.GetRendererThreadFromTab(self._tab)
for event in renderer_thread.async_slices:
if not tir_module.IsTimelineInteractionRecord(event.name):
continue
records.append(tir_module.TimelineInteractionRecord.FromEvent(event))
self.assertEqual(1, len(records),
'Fail to issue the interaction record on tracing timeline.'
' Trace data:\n%s' % repr(trace_data.EventData()))
self.assertEqual('TestInteraction', records[0].logical_name)
self.assertTrue(records[0].is_smooth)
|
<commit_before><commit_msg>Add test for action_runner.BeginInteraction and action_runner.EndInteraction.
BUG=368767
Review URL: https://codereview.chromium.org/294943006
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@272549 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core.backends.chrome import tracing_backend
from telemetry.core.timeline import model
from telemetry.page.actions import action_runner as action_runner_module
from telemetry.unittest import tab_test_case
from telemetry.web_perf import timeline_interaction_record as tir_module
class ActionRunnerTest(tab_test_case.TabTestCase):
def testIssuingInteractionRecord(self):
self.Navigate('blank.html')
action_runner = action_runner_module.ActionRunner(None, self._tab)
self._browser.StartTracing(tracing_backend.MINIMAL_TRACE_CATEGORIES)
action_runner.BeginInteraction('TestInteraction', [tir_module.IS_SMOOTH])
action_runner.EndInteraction('TestInteraction', [tir_module.IS_SMOOTH])
trace_data = self._browser.StopTracing()
timeline_model = model.TimelineModel(trace_data)
records = []
renderer_thread = timeline_model.GetRendererThreadFromTab(self._tab)
for event in renderer_thread.async_slices:
if not tir_module.IsTimelineInteractionRecord(event.name):
continue
records.append(tir_module.TimelineInteractionRecord.FromEvent(event))
self.assertEqual(1, len(records),
'Fail to issue the interaction record on tracing timeline.'
' Trace data:\n%s' % repr(trace_data.EventData()))
self.assertEqual('TestInteraction', records[0].logical_name)
self.assertTrue(records[0].is_smooth)
|
Add test for action_runner.BeginInteraction and action_runner.EndInteraction.
BUG=368767
Review URL: https://codereview.chromium.org/294943006
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@272549 0039d316-1c4b-4281-b951-d872f2087c98# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core.backends.chrome import tracing_backend
from telemetry.core.timeline import model
from telemetry.page.actions import action_runner as action_runner_module
from telemetry.unittest import tab_test_case
from telemetry.web_perf import timeline_interaction_record as tir_module
class ActionRunnerTest(tab_test_case.TabTestCase):
def testIssuingInteractionRecord(self):
self.Navigate('blank.html')
action_runner = action_runner_module.ActionRunner(None, self._tab)
self._browser.StartTracing(tracing_backend.MINIMAL_TRACE_CATEGORIES)
action_runner.BeginInteraction('TestInteraction', [tir_module.IS_SMOOTH])
action_runner.EndInteraction('TestInteraction', [tir_module.IS_SMOOTH])
trace_data = self._browser.StopTracing()
timeline_model = model.TimelineModel(trace_data)
records = []
renderer_thread = timeline_model.GetRendererThreadFromTab(self._tab)
for event in renderer_thread.async_slices:
if not tir_module.IsTimelineInteractionRecord(event.name):
continue
records.append(tir_module.TimelineInteractionRecord.FromEvent(event))
self.assertEqual(1, len(records),
'Fail to issue the interaction record on tracing timeline.'
' Trace data:\n%s' % repr(trace_data.EventData()))
self.assertEqual('TestInteraction', records[0].logical_name)
self.assertTrue(records[0].is_smooth)
|
<commit_before><commit_msg>Add test for action_runner.BeginInteraction and action_runner.EndInteraction.
BUG=368767
Review URL: https://codereview.chromium.org/294943006
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@272549 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.core.backends.chrome import tracing_backend
from telemetry.core.timeline import model
from telemetry.page.actions import action_runner as action_runner_module
from telemetry.unittest import tab_test_case
from telemetry.web_perf import timeline_interaction_record as tir_module
class ActionRunnerTest(tab_test_case.TabTestCase):
def testIssuingInteractionRecord(self):
self.Navigate('blank.html')
action_runner = action_runner_module.ActionRunner(None, self._tab)
self._browser.StartTracing(tracing_backend.MINIMAL_TRACE_CATEGORIES)
action_runner.BeginInteraction('TestInteraction', [tir_module.IS_SMOOTH])
action_runner.EndInteraction('TestInteraction', [tir_module.IS_SMOOTH])
trace_data = self._browser.StopTracing()
timeline_model = model.TimelineModel(trace_data)
records = []
renderer_thread = timeline_model.GetRendererThreadFromTab(self._tab)
for event in renderer_thread.async_slices:
if not tir_module.IsTimelineInteractionRecord(event.name):
continue
records.append(tir_module.TimelineInteractionRecord.FromEvent(event))
self.assertEqual(1, len(records),
'Fail to issue the interaction record on tracing timeline.'
' Trace data:\n%s' % repr(trace_data.EventData()))
self.assertEqual('TestInteraction', records[0].logical_name)
self.assertTrue(records[0].is_smooth)
|
|
f06aeb490fe5c57c1924b46595be26909f579178
|
tests/cpydiff/syntax_assign_expr.py
|
tests/cpydiff/syntax_assign_expr.py
|
"""
categories: Syntax,Operators
description: MicroPython allows using := to assign to the variable of a comprehension, CPython raises a SyntaxError.
cause: MicroPython is optimised for code size and doesn't check this case.
workaround: Do not rely on this behaviour if writing CPython compatible code.
"""
print([i := -1 for i in range(4)])
|
Add CPy diff test for assignment expression behaviour.
|
tests/cpydiff: Add CPy diff test for assignment expression behaviour.
|
Python
|
mit
|
pfalcon/micropython,pfalcon/micropython,pfalcon/micropython,pfalcon/micropython,pfalcon/micropython
|
tests/cpydiff: Add CPy diff test for assignment expression behaviour.
|
"""
categories: Syntax,Operators
description: MicroPython allows using := to assign to the variable of a comprehension, CPython raises a SyntaxError.
cause: MicroPython is optimised for code size and doesn't check this case.
workaround: Do not rely on this behaviour if writing CPython compatible code.
"""
print([i := -1 for i in range(4)])
|
<commit_before><commit_msg>tests/cpydiff: Add CPy diff test for assignment expression behaviour.<commit_after>
|
"""
categories: Syntax,Operators
description: MicroPython allows using := to assign to the variable of a comprehension, CPython raises a SyntaxError.
cause: MicroPython is optimised for code size and doesn't check this case.
workaround: Do not rely on this behaviour if writing CPython compatible code.
"""
print([i := -1 for i in range(4)])
|
tests/cpydiff: Add CPy diff test for assignment expression behaviour."""
categories: Syntax,Operators
description: MicroPython allows using := to assign to the variable of a comprehension, CPython raises a SyntaxError.
cause: MicroPython is optimised for code size and doesn't check this case.
workaround: Do not rely on this behaviour if writing CPython compatible code.
"""
print([i := -1 for i in range(4)])
|
<commit_before><commit_msg>tests/cpydiff: Add CPy diff test for assignment expression behaviour.<commit_after>"""
categories: Syntax,Operators
description: MicroPython allows using := to assign to the variable of a comprehension, CPython raises a SyntaxError.
cause: MicroPython is optimised for code size and doesn't check this case.
workaround: Do not rely on this behaviour if writing CPython compatible code.
"""
print([i := -1 for i in range(4)])
|
|
56992cc873c5837da0907a44af562646b54ed173
|
scripts/run_gunicorn_server.py
|
scripts/run_gunicorn_server.py
|
import subprocess
import argparse
import pwd
import os
def _get_user_home_dir(user):
return os.path.expanduser('~' + user)
def build_parser():
def _get_current_user():
return pwd.getpwuid(os.getuid())[0]
DEFAULTS = {
'workers': 3,
'user': _get_current_user(),
'group': _get_current_user(),
'log_file': os.path.join(
_get_user_home_dir(_get_current_user()),
'gunicorn',
'tca.log'),
'virtualenv': 'tca-dev',
}
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--workers', type=int,
default=DEFAULTS['workers'])
parser.add_argument('--user', type=str,
default=DEFAULTS['user'])
parser.add_argument('--group', type=str,
default=DEFAULTS['group'])
parser.add_argument('--log-file', type=str,
default=DEFAULTS['log_file'])
parser.add_argument('--virtualenv', type=str,
default=DEFAULTS['virtualenv'])
parser.add_argument('--socket-name', type=str, required=True)
return parser
def start_server(args):
# Create the log directory, if necessary
log_dir = os.path.dirname(args.log_file)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# Build the bind address
# Make it a local UNIX socket instead of loopback TCP port
bind_address = 'unix:/tmp/{socket_name}.socket'.format(
socket_name=args.socket_name)
# Activate the virtualenv
activate_this_file = os.path.join(
_get_user_home_dir(args.user),
'.virtualenvs',
args.virtualenv,
'bin',
'activate_this.py'
)
execfile(activate_this_file, dict(__file__=activate_this_file))
# Start gunicorn processes -- spawns them and exits
# NOTE: This makes the script dependent on the cwd.
# TODO: Make it independent of the cwd.
os.chdir('../tca')
subprocess.call([
'gunicorn',
'tca.wsgi:application',
'-b', bind_address,
'-w', str(args.workers),
'--user', args.user,
'--group', args.group,
'--log-file', args.log_file,
'--log-level', 'debug',
])
if __name__ == '__main__':
parser = build_parser()
start_server(parser.parse_args())
|
Add a convenience script for running gunicorn workers
|
Add a convenience script for running gunicorn workers
|
Python
|
bsd-3-clause
|
mlalic/TumCampusAppBackend,mlalic/TumCampusAppBackend
|
Add a convenience script for running gunicorn workers
|
import subprocess
import argparse
import pwd
import os
def _get_user_home_dir(user):
return os.path.expanduser('~' + user)
def build_parser():
def _get_current_user():
return pwd.getpwuid(os.getuid())[0]
DEFAULTS = {
'workers': 3,
'user': _get_current_user(),
'group': _get_current_user(),
'log_file': os.path.join(
_get_user_home_dir(_get_current_user()),
'gunicorn',
'tca.log'),
'virtualenv': 'tca-dev',
}
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--workers', type=int,
default=DEFAULTS['workers'])
parser.add_argument('--user', type=str,
default=DEFAULTS['user'])
parser.add_argument('--group', type=str,
default=DEFAULTS['group'])
parser.add_argument('--log-file', type=str,
default=DEFAULTS['log_file'])
parser.add_argument('--virtualenv', type=str,
default=DEFAULTS['virtualenv'])
parser.add_argument('--socket-name', type=str, required=True)
return parser
def start_server(args):
# Create the log directory, if necessary
log_dir = os.path.dirname(args.log_file)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# Build the bind address
# Make it a local UNIX socket instead of loopback TCP port
bind_address = 'unix:/tmp/{socket_name}.socket'.format(
socket_name=args.socket_name)
# Activate the virtualenv
activate_this_file = os.path.join(
_get_user_home_dir(args.user),
'.virtualenvs',
args.virtualenv,
'bin',
'activate_this.py'
)
execfile(activate_this_file, dict(__file__=activate_this_file))
# Start gunicorn processes -- spawns them and exits
# NOTE: This makes the script dependent on the cwd.
# TODO: Make it independent of the cwd.
os.chdir('../tca')
subprocess.call([
'gunicorn',
'tca.wsgi:application',
'-b', bind_address,
'-w', str(args.workers),
'--user', args.user,
'--group', args.group,
'--log-file', args.log_file,
'--log-level', 'debug',
])
if __name__ == '__main__':
parser = build_parser()
start_server(parser.parse_args())
|
<commit_before><commit_msg>Add a convenience script for running gunicorn workers<commit_after>
|
import subprocess
import argparse
import pwd
import os
def _get_user_home_dir(user):
return os.path.expanduser('~' + user)
def build_parser():
def _get_current_user():
return pwd.getpwuid(os.getuid())[0]
DEFAULTS = {
'workers': 3,
'user': _get_current_user(),
'group': _get_current_user(),
'log_file': os.path.join(
_get_user_home_dir(_get_current_user()),
'gunicorn',
'tca.log'),
'virtualenv': 'tca-dev',
}
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--workers', type=int,
default=DEFAULTS['workers'])
parser.add_argument('--user', type=str,
default=DEFAULTS['user'])
parser.add_argument('--group', type=str,
default=DEFAULTS['group'])
parser.add_argument('--log-file', type=str,
default=DEFAULTS['log_file'])
parser.add_argument('--virtualenv', type=str,
default=DEFAULTS['virtualenv'])
parser.add_argument('--socket-name', type=str, required=True)
return parser
def start_server(args):
# Create the log directory, if necessary
log_dir = os.path.dirname(args.log_file)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# Build the bind address
# Make it a local UNIX socket instead of loopback TCP port
bind_address = 'unix:/tmp/{socket_name}.socket'.format(
socket_name=args.socket_name)
# Activate the virtualenv
activate_this_file = os.path.join(
_get_user_home_dir(args.user),
'.virtualenvs',
args.virtualenv,
'bin',
'activate_this.py'
)
execfile(activate_this_file, dict(__file__=activate_this_file))
# Start gunicorn processes -- spawns them and exits
# NOTE: This makes the script dependent on the cwd.
# TODO: Make it independent of the cwd.
os.chdir('../tca')
subprocess.call([
'gunicorn',
'tca.wsgi:application',
'-b', bind_address,
'-w', str(args.workers),
'--user', args.user,
'--group', args.group,
'--log-file', args.log_file,
'--log-level', 'debug',
])
if __name__ == '__main__':
parser = build_parser()
start_server(parser.parse_args())
|
Add a convenience script for running gunicorn workersimport subprocess
import argparse
import pwd
import os
def _get_user_home_dir(user):
return os.path.expanduser('~' + user)
def build_parser():
def _get_current_user():
return pwd.getpwuid(os.getuid())[0]
DEFAULTS = {
'workers': 3,
'user': _get_current_user(),
'group': _get_current_user(),
'log_file': os.path.join(
_get_user_home_dir(_get_current_user()),
'gunicorn',
'tca.log'),
'virtualenv': 'tca-dev',
}
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--workers', type=int,
default=DEFAULTS['workers'])
parser.add_argument('--user', type=str,
default=DEFAULTS['user'])
parser.add_argument('--group', type=str,
default=DEFAULTS['group'])
parser.add_argument('--log-file', type=str,
default=DEFAULTS['log_file'])
parser.add_argument('--virtualenv', type=str,
default=DEFAULTS['virtualenv'])
parser.add_argument('--socket-name', type=str, required=True)
return parser
def start_server(args):
# Create the log directory, if necessary
log_dir = os.path.dirname(args.log_file)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# Build the bind address
# Make it a local UNIX socket instead of loopback TCP port
bind_address = 'unix:/tmp/{socket_name}.socket'.format(
socket_name=args.socket_name)
# Activate the virtualenv
activate_this_file = os.path.join(
_get_user_home_dir(args.user),
'.virtualenvs',
args.virtualenv,
'bin',
'activate_this.py'
)
execfile(activate_this_file, dict(__file__=activate_this_file))
# Start gunicorn processes -- spawns them and exits
# NOTE: This makes the script dependent on the cwd.
# TODO: Make it independent of the cwd.
os.chdir('../tca')
subprocess.call([
'gunicorn',
'tca.wsgi:application',
'-b', bind_address,
'-w', str(args.workers),
'--user', args.user,
'--group', args.group,
'--log-file', args.log_file,
'--log-level', 'debug',
])
if __name__ == '__main__':
parser = build_parser()
start_server(parser.parse_args())
|
<commit_before><commit_msg>Add a convenience script for running gunicorn workers<commit_after>import subprocess
import argparse
import pwd
import os
def _get_user_home_dir(user):
return os.path.expanduser('~' + user)
def build_parser():
def _get_current_user():
return pwd.getpwuid(os.getuid())[0]
DEFAULTS = {
'workers': 3,
'user': _get_current_user(),
'group': _get_current_user(),
'log_file': os.path.join(
_get_user_home_dir(_get_current_user()),
'gunicorn',
'tca.log'),
'virtualenv': 'tca-dev',
}
parser = argparse.ArgumentParser()
parser.add_argument('-w', '--workers', type=int,
default=DEFAULTS['workers'])
parser.add_argument('--user', type=str,
default=DEFAULTS['user'])
parser.add_argument('--group', type=str,
default=DEFAULTS['group'])
parser.add_argument('--log-file', type=str,
default=DEFAULTS['log_file'])
parser.add_argument('--virtualenv', type=str,
default=DEFAULTS['virtualenv'])
parser.add_argument('--socket-name', type=str, required=True)
return parser
def start_server(args):
# Create the log directory, if necessary
log_dir = os.path.dirname(args.log_file)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# Build the bind address
# Make it a local UNIX socket instead of loopback TCP port
bind_address = 'unix:/tmp/{socket_name}.socket'.format(
socket_name=args.socket_name)
# Activate the virtualenv
activate_this_file = os.path.join(
_get_user_home_dir(args.user),
'.virtualenvs',
args.virtualenv,
'bin',
'activate_this.py'
)
execfile(activate_this_file, dict(__file__=activate_this_file))
# Start gunicorn processes -- spawns them and exits
# NOTE: This makes the script dependent on the cwd.
# TODO: Make it independent of the cwd.
os.chdir('../tca')
subprocess.call([
'gunicorn',
'tca.wsgi:application',
'-b', bind_address,
'-w', str(args.workers),
'--user', args.user,
'--group', args.group,
'--log-file', args.log_file,
'--log-level', 'debug',
])
if __name__ == '__main__':
parser = build_parser()
start_server(parser.parse_args())
|
|
b5c02ab5789d228876ef647f35acdf287166256f
|
csl-add-updated.py
|
csl-add-updated.py
|
# Python script to add timestamp to style with empty updated field
# Author: Rintze M. Zelle
# Version: 2011-12-17
# * Requires lxml library (http://lxml.de/)
import os, glob, re
from lxml import etree
path = 'C:\Documents and Settings\zelle\My Documents\CSL\styles\dependent\\'
verbatims = {}
for independentStyle in glob.glob( os.path.join(path, '*.csl') ):
style = etree.parse(independentStyle)
styleElement = style.getroot()
updatedContent = None
updated = styleElement.find(".//{http://purl.org/net/xbiblio/csl}updated")
updatedContent = updated.text
if updatedContent == None:
updated.text = "2012-01-01T00:00:00+00:00"
style = etree.tostring(style, pretty_print=True, xml_declaration=True, encoding="utf-8")
style = style.replace("'", '"', 4)
f = open(independentStyle, 'w')
f.write ( style )
f.close()
|
Add old script to add cs:updated element to styles.
|
Add old script to add cs:updated element to styles.
|
Python
|
mit
|
citation-style-language/utilities,citation-style-language/utilities,citation-style-language/utilities,citation-style-language/utilities
|
Add old script to add cs:updated element to styles.
|
# Python script to add timestamp to style with empty updated field
# Author: Rintze M. Zelle
# Version: 2011-12-17
# * Requires lxml library (http://lxml.de/)
import os, glob, re
from lxml import etree
path = 'C:\Documents and Settings\zelle\My Documents\CSL\styles\dependent\\'
verbatims = {}
for independentStyle in glob.glob( os.path.join(path, '*.csl') ):
style = etree.parse(independentStyle)
styleElement = style.getroot()
updatedContent = None
updated = styleElement.find(".//{http://purl.org/net/xbiblio/csl}updated")
updatedContent = updated.text
if updatedContent == None:
updated.text = "2012-01-01T00:00:00+00:00"
style = etree.tostring(style, pretty_print=True, xml_declaration=True, encoding="utf-8")
style = style.replace("'", '"', 4)
f = open(independentStyle, 'w')
f.write ( style )
f.close()
|
<commit_before><commit_msg>Add old script to add cs:updated element to styles.<commit_after>
|
# Python script to add timestamp to style with empty updated field
# Author: Rintze M. Zelle
# Version: 2011-12-17
# * Requires lxml library (http://lxml.de/)
import os, glob, re
from lxml import etree
path = 'C:\Documents and Settings\zelle\My Documents\CSL\styles\dependent\\'
verbatims = {}
for independentStyle in glob.glob( os.path.join(path, '*.csl') ):
style = etree.parse(independentStyle)
styleElement = style.getroot()
updatedContent = None
updated = styleElement.find(".//{http://purl.org/net/xbiblio/csl}updated")
updatedContent = updated.text
if updatedContent == None:
updated.text = "2012-01-01T00:00:00+00:00"
style = etree.tostring(style, pretty_print=True, xml_declaration=True, encoding="utf-8")
style = style.replace("'", '"', 4)
f = open(independentStyle, 'w')
f.write ( style )
f.close()
|
Add old script to add cs:updated element to styles.# Python script to add timestamp to style with empty updated field
# Author: Rintze M. Zelle
# Version: 2011-12-17
# * Requires lxml library (http://lxml.de/)
import os, glob, re
from lxml import etree
path = 'C:\Documents and Settings\zelle\My Documents\CSL\styles\dependent\\'
verbatims = {}
for independentStyle in glob.glob( os.path.join(path, '*.csl') ):
style = etree.parse(independentStyle)
styleElement = style.getroot()
updatedContent = None
updated = styleElement.find(".//{http://purl.org/net/xbiblio/csl}updated")
updatedContent = updated.text
if updatedContent == None:
updated.text = "2012-01-01T00:00:00+00:00"
style = etree.tostring(style, pretty_print=True, xml_declaration=True, encoding="utf-8")
style = style.replace("'", '"', 4)
f = open(independentStyle, 'w')
f.write ( style )
f.close()
|
<commit_before><commit_msg>Add old script to add cs:updated element to styles.<commit_after># Python script to add timestamp to style with empty updated field
# Author: Rintze M. Zelle
# Version: 2011-12-17
# * Requires lxml library (http://lxml.de/)
import os, glob, re
from lxml import etree
path = 'C:\Documents and Settings\zelle\My Documents\CSL\styles\dependent\\'
verbatims = {}
for independentStyle in glob.glob( os.path.join(path, '*.csl') ):
style = etree.parse(independentStyle)
styleElement = style.getroot()
updatedContent = None
updated = styleElement.find(".//{http://purl.org/net/xbiblio/csl}updated")
updatedContent = updated.text
if updatedContent == None:
updated.text = "2012-01-01T00:00:00+00:00"
style = etree.tostring(style, pretty_print=True, xml_declaration=True, encoding="utf-8")
style = style.replace("'", '"', 4)
f = open(independentStyle, 'w')
f.write ( style )
f.close()
|
|
6bd463c7f2627782816896f42e92f36f7f07b3b9
|
scripts/hidefast.py
|
scripts/hidefast.py
|
from optparse import OptionParser
from queue import LifoQueue
from typing import Iterator, List
import requests
from logger import logger
BASE_URL = 'http://showfast.sc.couchbase.com'
def get_menu() -> dict:
return requests.get(url=BASE_URL + '/static/menu.json').json()
def get_benchmarks(component: str, category: str) -> List[dict]:
api = '/api/v1/benchmarks/{}/{}'.format(component, category)
return requests.get(url=BASE_URL + api).json() or []
def hide_benchmark(benchmark_id: str):
api = '/api/v1/benchmarks/{}'.format(benchmark_id)
requests.patch(url=BASE_URL + api)
def showfast_iterator(components: List[str]) -> Iterator:
for component, meta in get_menu()['components'].items():
if component in components:
for category in meta['categories']:
yield component, category['id']
def parse_release(build: str) -> str:
return build.split('-')[0]
def benchmark_iterator(components: List[str], max_builds: int) -> Iterator:
for component, category in showfast_iterator(components=components):
curr_metric, curr_release = None, None
queue = LifoQueue(maxsize=max_builds)
for benchmark in get_benchmarks(component, category):
if not benchmark['hidden']:
release = parse_release(benchmark['build'])
if curr_metric != benchmark['metric']:
curr_metric, curr_release = benchmark['metric'], release
queue.queue.clear()
if release != curr_release:
curr_release = release
queue.queue.clear()
if queue.full():
yield benchmark
else:
queue.put(benchmark)
def hide(components: List[str], max_builds: int):
for b in benchmark_iterator(components=components, max_builds=max_builds):
logger.info('Hiding: build={build}, metric={metric}'.format(**b))
hide_benchmark(b['id'])
def main():
parser = OptionParser()
parser.add_option('-c', '--components', dest='components', default=[],
type='str', help='comma separated list of components')
parser.add_option('-m', '--max-builds', dest='max_builds', default=8,
type='int', help='maximum number of builds per release')
options, args = parser.parse_args()
hide(components=options.components.split(','),
max_builds=options.max_builds)
if __name__ == '__main__':
main()
|
Add a script for hiding old ShowFast results
|
Add a script for hiding old ShowFast results
The script keeps the specified number of the most recent results
per release and hides everything else. It is helpful for keeping
ShowFast clean and normalized.
Here is an example of hiding Spock results for XDCR and Views:
$ env/bin/python scripts/hideslow.py --components xdcr,views --max-builds 7
[28/Mar/2017 08:30:17] INFO - Hiding: build=5.0.0-1885, metric=index_20M_initial_leto
[28/Mar/2017 08:30:17] INFO - Hiding: build=5.0.0-1885, metric=index_50M_dgm_initial_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=index_20M_incremental_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=index_50M_dgm_incremental_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=query_lat_100M_dgm_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_index_95th_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_basic_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_body_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_compute_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_group_by_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_multi_emits_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_range_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_thr_20M_leto
[28/Mar/2017 08:30:20] INFO - Hiding: build=5.0.0-2110, metric=xdcr_init_1x1_unidir_100M_fe_titan_5x5
[28/Mar/2017 08:30:20] INFO - Hiding: build=5.0.0-2110, metric=xdcr_init_1x1_unidir_100M_lww_titan_5x5
...
This script also demonstrates usage of type hints in Python 3.
Change-Id: Iccd1329248dae79a56d767520f32a22e824f0cd4
Reviewed-on: http://review.couchbase.org/75875
Tested-by: Build Bot <80754af91bfb6d1073585b046fe0a474ce868509@couchbase.com>
Reviewed-by: Pavel Paulau <dd88eded64e90046a680e3a6c0828ceb8fe8a0e7@gmail.com>
|
Python
|
apache-2.0
|
couchbase/perfrunner,couchbase/perfrunner,pavel-paulau/perfrunner,pavel-paulau/perfrunner,pavel-paulau/perfrunner,pavel-paulau/perfrunner,couchbase/perfrunner,couchbase/perfrunner,pavel-paulau/perfrunner,couchbase/perfrunner,couchbase/perfrunner
|
Add a script for hiding old ShowFast results
The script keeps the specified number of the most recent results
per release and hides everything else. It is helpful for keeping
ShowFast clean and normalized.
Here is an example of hiding Spock results for XDCR and Views:
$ env/bin/python scripts/hideslow.py --components xdcr,views --max-builds 7
[28/Mar/2017 08:30:17] INFO - Hiding: build=5.0.0-1885, metric=index_20M_initial_leto
[28/Mar/2017 08:30:17] INFO - Hiding: build=5.0.0-1885, metric=index_50M_dgm_initial_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=index_20M_incremental_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=index_50M_dgm_incremental_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=query_lat_100M_dgm_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_index_95th_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_basic_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_body_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_compute_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_group_by_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_multi_emits_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_range_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_thr_20M_leto
[28/Mar/2017 08:30:20] INFO - Hiding: build=5.0.0-2110, metric=xdcr_init_1x1_unidir_100M_fe_titan_5x5
[28/Mar/2017 08:30:20] INFO - Hiding: build=5.0.0-2110, metric=xdcr_init_1x1_unidir_100M_lww_titan_5x5
...
This script also demonstrates usage of type hints in Python 3.
Change-Id: Iccd1329248dae79a56d767520f32a22e824f0cd4
Reviewed-on: http://review.couchbase.org/75875
Tested-by: Build Bot <80754af91bfb6d1073585b046fe0a474ce868509@couchbase.com>
Reviewed-by: Pavel Paulau <dd88eded64e90046a680e3a6c0828ceb8fe8a0e7@gmail.com>
|
from optparse import OptionParser
from queue import LifoQueue
from typing import Iterator, List
import requests
from logger import logger
BASE_URL = 'http://showfast.sc.couchbase.com'
def get_menu() -> dict:
return requests.get(url=BASE_URL + '/static/menu.json').json()
def get_benchmarks(component: str, category: str) -> List[dict]:
api = '/api/v1/benchmarks/{}/{}'.format(component, category)
return requests.get(url=BASE_URL + api).json() or []
def hide_benchmark(benchmark_id: str):
api = '/api/v1/benchmarks/{}'.format(benchmark_id)
requests.patch(url=BASE_URL + api)
def showfast_iterator(components: List[str]) -> Iterator:
for component, meta in get_menu()['components'].items():
if component in components:
for category in meta['categories']:
yield component, category['id']
def parse_release(build: str) -> str:
return build.split('-')[0]
def benchmark_iterator(components: List[str], max_builds: int) -> Iterator:
for component, category in showfast_iterator(components=components):
curr_metric, curr_release = None, None
queue = LifoQueue(maxsize=max_builds)
for benchmark in get_benchmarks(component, category):
if not benchmark['hidden']:
release = parse_release(benchmark['build'])
if curr_metric != benchmark['metric']:
curr_metric, curr_release = benchmark['metric'], release
queue.queue.clear()
if release != curr_release:
curr_release = release
queue.queue.clear()
if queue.full():
yield benchmark
else:
queue.put(benchmark)
def hide(components: List[str], max_builds: int):
for b in benchmark_iterator(components=components, max_builds=max_builds):
logger.info('Hiding: build={build}, metric={metric}'.format(**b))
hide_benchmark(b['id'])
def main():
parser = OptionParser()
parser.add_option('-c', '--components', dest='components', default=[],
type='str', help='comma separated list of components')
parser.add_option('-m', '--max-builds', dest='max_builds', default=8,
type='int', help='maximum number of builds per release')
options, args = parser.parse_args()
hide(components=options.components.split(','),
max_builds=options.max_builds)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script for hiding old ShowFast results
The script keeps the specified number of the most recent results
per release and hides everything else. It is helpful for keeping
ShowFast clean and normalized.
Here is an example of hiding Spock results for XDCR and Views:
$ env/bin/python scripts/hideslow.py --components xdcr,views --max-builds 7
[28/Mar/2017 08:30:17] INFO - Hiding: build=5.0.0-1885, metric=index_20M_initial_leto
[28/Mar/2017 08:30:17] INFO - Hiding: build=5.0.0-1885, metric=index_50M_dgm_initial_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=index_20M_incremental_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=index_50M_dgm_incremental_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=query_lat_100M_dgm_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_index_95th_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_basic_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_body_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_compute_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_group_by_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_multi_emits_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_range_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_thr_20M_leto
[28/Mar/2017 08:30:20] INFO - Hiding: build=5.0.0-2110, metric=xdcr_init_1x1_unidir_100M_fe_titan_5x5
[28/Mar/2017 08:30:20] INFO - Hiding: build=5.0.0-2110, metric=xdcr_init_1x1_unidir_100M_lww_titan_5x5
...
This script also demonstrates usage of type hints in Python 3.
Change-Id: Iccd1329248dae79a56d767520f32a22e824f0cd4
Reviewed-on: http://review.couchbase.org/75875
Tested-by: Build Bot <80754af91bfb6d1073585b046fe0a474ce868509@couchbase.com>
Reviewed-by: Pavel Paulau <dd88eded64e90046a680e3a6c0828ceb8fe8a0e7@gmail.com><commit_after>
|
from optparse import OptionParser
from queue import LifoQueue
from typing import Iterator, List
import requests
from logger import logger
BASE_URL = 'http://showfast.sc.couchbase.com'
def get_menu() -> dict:
return requests.get(url=BASE_URL + '/static/menu.json').json()
def get_benchmarks(component: str, category: str) -> List[dict]:
api = '/api/v1/benchmarks/{}/{}'.format(component, category)
return requests.get(url=BASE_URL + api).json() or []
def hide_benchmark(benchmark_id: str):
api = '/api/v1/benchmarks/{}'.format(benchmark_id)
requests.patch(url=BASE_URL + api)
def showfast_iterator(components: List[str]) -> Iterator:
for component, meta in get_menu()['components'].items():
if component in components:
for category in meta['categories']:
yield component, category['id']
def parse_release(build: str) -> str:
return build.split('-')[0]
def benchmark_iterator(components: List[str], max_builds: int) -> Iterator:
for component, category in showfast_iterator(components=components):
curr_metric, curr_release = None, None
queue = LifoQueue(maxsize=max_builds)
for benchmark in get_benchmarks(component, category):
if not benchmark['hidden']:
release = parse_release(benchmark['build'])
if curr_metric != benchmark['metric']:
curr_metric, curr_release = benchmark['metric'], release
queue.queue.clear()
if release != curr_release:
curr_release = release
queue.queue.clear()
if queue.full():
yield benchmark
else:
queue.put(benchmark)
def hide(components: List[str], max_builds: int):
for b in benchmark_iterator(components=components, max_builds=max_builds):
logger.info('Hiding: build={build}, metric={metric}'.format(**b))
hide_benchmark(b['id'])
def main():
parser = OptionParser()
parser.add_option('-c', '--components', dest='components', default=[],
type='str', help='comma separated list of components')
parser.add_option('-m', '--max-builds', dest='max_builds', default=8,
type='int', help='maximum number of builds per release')
options, args = parser.parse_args()
hide(components=options.components.split(','),
max_builds=options.max_builds)
if __name__ == '__main__':
main()
|
Add a script for hiding old ShowFast results
The script keeps the specified number of the most recent results
per release and hides everything else. It is helpful for keeping
ShowFast clean and normalized.
Here is an example of hiding Spock results for XDCR and Views:
$ env/bin/python scripts/hideslow.py --components xdcr,views --max-builds 7
[28/Mar/2017 08:30:17] INFO - Hiding: build=5.0.0-1885, metric=index_20M_initial_leto
[28/Mar/2017 08:30:17] INFO - Hiding: build=5.0.0-1885, metric=index_50M_dgm_initial_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=index_20M_incremental_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=index_50M_dgm_incremental_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=query_lat_100M_dgm_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_index_95th_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_basic_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_body_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_compute_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_group_by_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_multi_emits_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_range_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_thr_20M_leto
[28/Mar/2017 08:30:20] INFO - Hiding: build=5.0.0-2110, metric=xdcr_init_1x1_unidir_100M_fe_titan_5x5
[28/Mar/2017 08:30:20] INFO - Hiding: build=5.0.0-2110, metric=xdcr_init_1x1_unidir_100M_lww_titan_5x5
...
This script also demonstrates usage of type hints in Python 3.
Change-Id: Iccd1329248dae79a56d767520f32a22e824f0cd4
Reviewed-on: http://review.couchbase.org/75875
Tested-by: Build Bot <80754af91bfb6d1073585b046fe0a474ce868509@couchbase.com>
Reviewed-by: Pavel Paulau <dd88eded64e90046a680e3a6c0828ceb8fe8a0e7@gmail.com>from optparse import OptionParser
from queue import LifoQueue
from typing import Iterator, List
import requests
from logger import logger
BASE_URL = 'http://showfast.sc.couchbase.com'
def get_menu() -> dict:
return requests.get(url=BASE_URL + '/static/menu.json').json()
def get_benchmarks(component: str, category: str) -> List[dict]:
api = '/api/v1/benchmarks/{}/{}'.format(component, category)
return requests.get(url=BASE_URL + api).json() or []
def hide_benchmark(benchmark_id: str):
api = '/api/v1/benchmarks/{}'.format(benchmark_id)
requests.patch(url=BASE_URL + api)
def showfast_iterator(components: List[str]) -> Iterator:
for component, meta in get_menu()['components'].items():
if component in components:
for category in meta['categories']:
yield component, category['id']
def parse_release(build: str) -> str:
return build.split('-')[0]
def benchmark_iterator(components: List[str], max_builds: int) -> Iterator:
for component, category in showfast_iterator(components=components):
curr_metric, curr_release = None, None
queue = LifoQueue(maxsize=max_builds)
for benchmark in get_benchmarks(component, category):
if not benchmark['hidden']:
release = parse_release(benchmark['build'])
if curr_metric != benchmark['metric']:
curr_metric, curr_release = benchmark['metric'], release
queue.queue.clear()
if release != curr_release:
curr_release = release
queue.queue.clear()
if queue.full():
yield benchmark
else:
queue.put(benchmark)
def hide(components: List[str], max_builds: int):
for b in benchmark_iterator(components=components, max_builds=max_builds):
logger.info('Hiding: build={build}, metric={metric}'.format(**b))
hide_benchmark(b['id'])
def main():
parser = OptionParser()
parser.add_option('-c', '--components', dest='components', default=[],
type='str', help='comma separated list of components')
parser.add_option('-m', '--max-builds', dest='max_builds', default=8,
type='int', help='maximum number of builds per release')
options, args = parser.parse_args()
hide(components=options.components.split(','),
max_builds=options.max_builds)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script for hiding old ShowFast results
The script keeps the specified number of the most recent results
per release and hides everything else. It is helpful for keeping
ShowFast clean and normalized.
Here is an example of hiding Spock results for XDCR and Views:
$ env/bin/python scripts/hideslow.py --components xdcr,views --max-builds 7
[28/Mar/2017 08:30:17] INFO - Hiding: build=5.0.0-1885, metric=index_20M_initial_leto
[28/Mar/2017 08:30:17] INFO - Hiding: build=5.0.0-1885, metric=index_50M_dgm_initial_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=index_20M_incremental_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=index_50M_dgm_incremental_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=query_lat_100M_dgm_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_index_95th_leto
[28/Mar/2017 08:30:18] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_basic_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_body_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_compute_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_group_by_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_multi_emits_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_lat_20M_range_leto
[28/Mar/2017 08:30:19] INFO - Hiding: build=5.0.0-1885, metric=query_thr_20M_leto
[28/Mar/2017 08:30:20] INFO - Hiding: build=5.0.0-2110, metric=xdcr_init_1x1_unidir_100M_fe_titan_5x5
[28/Mar/2017 08:30:20] INFO - Hiding: build=5.0.0-2110, metric=xdcr_init_1x1_unidir_100M_lww_titan_5x5
...
This script also demonstrates usage of type hints in Python 3.
Change-Id: Iccd1329248dae79a56d767520f32a22e824f0cd4
Reviewed-on: http://review.couchbase.org/75875
Tested-by: Build Bot <80754af91bfb6d1073585b046fe0a474ce868509@couchbase.com>
Reviewed-by: Pavel Paulau <dd88eded64e90046a680e3a6c0828ceb8fe8a0e7@gmail.com><commit_after>from optparse import OptionParser
from queue import LifoQueue
from typing import Iterator, List
import requests
from logger import logger
BASE_URL = 'http://showfast.sc.couchbase.com'
def get_menu() -> dict:
return requests.get(url=BASE_URL + '/static/menu.json').json()
def get_benchmarks(component: str, category: str) -> List[dict]:
api = '/api/v1/benchmarks/{}/{}'.format(component, category)
return requests.get(url=BASE_URL + api).json() or []
def hide_benchmark(benchmark_id: str):
api = '/api/v1/benchmarks/{}'.format(benchmark_id)
requests.patch(url=BASE_URL + api)
def showfast_iterator(components: List[str]) -> Iterator:
for component, meta in get_menu()['components'].items():
if component in components:
for category in meta['categories']:
yield component, category['id']
def parse_release(build: str) -> str:
return build.split('-')[0]
def benchmark_iterator(components: List[str], max_builds: int) -> Iterator:
for component, category in showfast_iterator(components=components):
curr_metric, curr_release = None, None
queue = LifoQueue(maxsize=max_builds)
for benchmark in get_benchmarks(component, category):
if not benchmark['hidden']:
release = parse_release(benchmark['build'])
if curr_metric != benchmark['metric']:
curr_metric, curr_release = benchmark['metric'], release
queue.queue.clear()
if release != curr_release:
curr_release = release
queue.queue.clear()
if queue.full():
yield benchmark
else:
queue.put(benchmark)
def hide(components: List[str], max_builds: int):
for b in benchmark_iterator(components=components, max_builds=max_builds):
logger.info('Hiding: build={build}, metric={metric}'.format(**b))
hide_benchmark(b['id'])
def main():
parser = OptionParser()
parser.add_option('-c', '--components', dest='components', default=[],
type='str', help='comma separated list of components')
parser.add_option('-m', '--max-builds', dest='max_builds', default=8,
type='int', help='maximum number of builds per release')
options, args = parser.parse_args()
hide(components=options.components.split(','),
max_builds=options.max_builds)
if __name__ == '__main__':
main()
|
|
3fe44d5aecbc80d2db3bacaf61aca69a3e36e388
|
plot_graph.py
|
plot_graph.py
|
from graphviz import Digraph
#Add the path of graphviz to render the graph
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files/graphviz-2.38/bin'
dot = Digraph(comment='The Round Table')
#add nodes
dot.node('I', 'Inflow')
dot.node('V', 'Volume')
dot.node('O', 'Outflow')
#add edges
dot.edge('I', 'V', label='I+')
dot.edge('V', 'O', label='P+')
dot.edge('O', 'V', label="I-")
#print the graph
print(dot.source)
#view graph
dot.render('test-output/round-table.gv', view=True)
|
Add naive code for generating and rendering casual model and state graph visualization
|
Add naive code for generating and rendering casual model and state graph visualization
|
Python
|
mit
|
Kaleidophon/puzzled-platypus
|
Add naive code for generating and rendering casual model and state graph visualization
|
from graphviz import Digraph
#Add the path of graphviz to render the graph
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files/graphviz-2.38/bin'
dot = Digraph(comment='The Round Table')
#add nodes
dot.node('I', 'Inflow')
dot.node('V', 'Volume')
dot.node('O', 'Outflow')
#add edges
dot.edge('I', 'V', label='I+')
dot.edge('V', 'O', label='P+')
dot.edge('O', 'V', label="I-")
#print the graph
print(dot.source)
#view graph
dot.render('test-output/round-table.gv', view=True)
|
<commit_before><commit_msg>Add naive code for generating and rendering casual model and state graph visualization<commit_after>
|
from graphviz import Digraph
#Add the path of graphviz to render the graph
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files/graphviz-2.38/bin'
dot = Digraph(comment='The Round Table')
#add nodes
dot.node('I', 'Inflow')
dot.node('V', 'Volume')
dot.node('O', 'Outflow')
#add edges
dot.edge('I', 'V', label='I+')
dot.edge('V', 'O', label='P+')
dot.edge('O', 'V', label="I-")
#print the graph
print(dot.source)
#view graph
dot.render('test-output/round-table.gv', view=True)
|
Add naive code for generating and rendering casual model and state graph visualizationfrom graphviz import Digraph
#Add the path of graphviz to render the graph
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files/graphviz-2.38/bin'
dot = Digraph(comment='The Round Table')
#add nodes
dot.node('I', 'Inflow')
dot.node('V', 'Volume')
dot.node('O', 'Outflow')
#add edges
dot.edge('I', 'V', label='I+')
dot.edge('V', 'O', label='P+')
dot.edge('O', 'V', label="I-")
#print the graph
print(dot.source)
#view graph
dot.render('test-output/round-table.gv', view=True)
|
<commit_before><commit_msg>Add naive code for generating and rendering casual model and state graph visualization<commit_after>from graphviz import Digraph
#Add the path of graphviz to render the graph
import os
os.environ["PATH"] += os.pathsep + 'C:/Program Files/graphviz-2.38/bin'
dot = Digraph(comment='The Round Table')
#add nodes
dot.node('I', 'Inflow')
dot.node('V', 'Volume')
dot.node('O', 'Outflow')
#add edges
dot.edge('I', 'V', label='I+')
dot.edge('V', 'O', label='P+')
dot.edge('O', 'V', label="I-")
#print the graph
print(dot.source)
#view graph
dot.render('test-output/round-table.gv', view=True)
|
|
9d2269dc8d7fd487f1107733efcba5426d65dd95
|
gmn/src/d1_gmn/tests/conftest.py
|
gmn/src/d1_gmn/tests/conftest.py
|
# -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from d1_client.mnclient_1_2 import MemberNodeClient_1_2 as mn_v1
from d1_client.mnclient_2_0 import MemberNodeClient_2_0 as mn_v2
MOCK_GMN_BASE_URL = 'http://gmn.client/node'
@pytest.fixture(scope='function', params=[mn_v1])
def gmn_client_v1(request):
yield request.param(MOCK_GMN_BASE_URL)
@pytest.fixture(scope='function', params=[mn_v2])
def gmn_client_v2(request):
yield request.param(MOCK_GMN_BASE_URL)
@pytest.fixture(scope='function', params=[mn_v1, mn_v2])
def gmn_client_v1_v2(request):
yield request.param(MOCK_GMN_BASE_URL)
|
Fix hidden unit test dependencies
|
Fix hidden unit test dependencies
|
Python
|
apache-2.0
|
DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python,DataONEorg/d1_python
|
Fix hidden unit test dependencies
|
# -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from d1_client.mnclient_1_2 import MemberNodeClient_1_2 as mn_v1
from d1_client.mnclient_2_0 import MemberNodeClient_2_0 as mn_v2
MOCK_GMN_BASE_URL = 'http://gmn.client/node'
@pytest.fixture(scope='function', params=[mn_v1])
def gmn_client_v1(request):
yield request.param(MOCK_GMN_BASE_URL)
@pytest.fixture(scope='function', params=[mn_v2])
def gmn_client_v2(request):
yield request.param(MOCK_GMN_BASE_URL)
@pytest.fixture(scope='function', params=[mn_v1, mn_v2])
def gmn_client_v1_v2(request):
yield request.param(MOCK_GMN_BASE_URL)
|
<commit_before><commit_msg>Fix hidden unit test dependencies<commit_after>
|
# -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from d1_client.mnclient_1_2 import MemberNodeClient_1_2 as mn_v1
from d1_client.mnclient_2_0 import MemberNodeClient_2_0 as mn_v2
MOCK_GMN_BASE_URL = 'http://gmn.client/node'
@pytest.fixture(scope='function', params=[mn_v1])
def gmn_client_v1(request):
yield request.param(MOCK_GMN_BASE_URL)
@pytest.fixture(scope='function', params=[mn_v2])
def gmn_client_v2(request):
yield request.param(MOCK_GMN_BASE_URL)
@pytest.fixture(scope='function', params=[mn_v1, mn_v2])
def gmn_client_v1_v2(request):
yield request.param(MOCK_GMN_BASE_URL)
|
Fix hidden unit test dependencies# -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from d1_client.mnclient_1_2 import MemberNodeClient_1_2 as mn_v1
from d1_client.mnclient_2_0 import MemberNodeClient_2_0 as mn_v2
MOCK_GMN_BASE_URL = 'http://gmn.client/node'
@pytest.fixture(scope='function', params=[mn_v1])
def gmn_client_v1(request):
yield request.param(MOCK_GMN_BASE_URL)
@pytest.fixture(scope='function', params=[mn_v2])
def gmn_client_v2(request):
yield request.param(MOCK_GMN_BASE_URL)
@pytest.fixture(scope='function', params=[mn_v1, mn_v2])
def gmn_client_v1_v2(request):
yield request.param(MOCK_GMN_BASE_URL)
|
<commit_before><commit_msg>Fix hidden unit test dependencies<commit_after># -*- coding: utf-8 -*-
# This work was created by participants in the DataONE project, and is
# jointly copyrighted by participating institutions in DataONE. For
# more information on DataONE, see our web site at http://dataone.org.
#
# Copyright 2009-2016 DataONE
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from d1_client.mnclient_1_2 import MemberNodeClient_1_2 as mn_v1
from d1_client.mnclient_2_0 import MemberNodeClient_2_0 as mn_v2
MOCK_GMN_BASE_URL = 'http://gmn.client/node'
@pytest.fixture(scope='function', params=[mn_v1])
def gmn_client_v1(request):
yield request.param(MOCK_GMN_BASE_URL)
@pytest.fixture(scope='function', params=[mn_v2])
def gmn_client_v2(request):
yield request.param(MOCK_GMN_BASE_URL)
@pytest.fixture(scope='function', params=[mn_v1, mn_v2])
def gmn_client_v1_v2(request):
yield request.param(MOCK_GMN_BASE_URL)
|
|
b17d1d72513e46b1c43664ea89dd37bc7f8d137c
|
problem_38.py
|
problem_38.py
|
from time import time
from itertools import permutations
DIGITS = 123456789
def is_pandigital_product(num):
for l in range(1, len(str(num))):
candidate_num = int(str(num)[0:l])
if candidate_num > 2 * int(str(num)[l:]):
break
product_idx = 2
products = [str(candidate_num)]
try:
while True:
product = product_idx * candidate_num
start_product = len(''.join(products))
if product != int(str(num)[start_product:start_product + len(str(product))]):
break
products.append(str(product))
product_idx += 1
except ValueError:
print 'Products:', products
return True
return False
def main():
pandigitals = sorted([int('9' + ''.join(p)) for p in permutations(str(DIGITS)[:-1])], reverse=True)
for candidate in pandigitals:
if is_pandigital_product(candidate):
break
else:
candidate -= 1
print 'Max pandigital concatenated product:', candidate
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
Add problem 38, pandigital multiples
|
Add problem 38, pandigital multiples
|
Python
|
mit
|
dimkarakostas/project-euler
|
Add problem 38, pandigital multiples
|
from time import time
from itertools import permutations
DIGITS = 123456789
def is_pandigital_product(num):
for l in range(1, len(str(num))):
candidate_num = int(str(num)[0:l])
if candidate_num > 2 * int(str(num)[l:]):
break
product_idx = 2
products = [str(candidate_num)]
try:
while True:
product = product_idx * candidate_num
start_product = len(''.join(products))
if product != int(str(num)[start_product:start_product + len(str(product))]):
break
products.append(str(product))
product_idx += 1
except ValueError:
print 'Products:', products
return True
return False
def main():
pandigitals = sorted([int('9' + ''.join(p)) for p in permutations(str(DIGITS)[:-1])], reverse=True)
for candidate in pandigitals:
if is_pandigital_product(candidate):
break
else:
candidate -= 1
print 'Max pandigital concatenated product:', candidate
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
<commit_before><commit_msg>Add problem 38, pandigital multiples<commit_after>
|
from time import time
from itertools import permutations
DIGITS = 123456789
def is_pandigital_product(num):
for l in range(1, len(str(num))):
candidate_num = int(str(num)[0:l])
if candidate_num > 2 * int(str(num)[l:]):
break
product_idx = 2
products = [str(candidate_num)]
try:
while True:
product = product_idx * candidate_num
start_product = len(''.join(products))
if product != int(str(num)[start_product:start_product + len(str(product))]):
break
products.append(str(product))
product_idx += 1
except ValueError:
print 'Products:', products
return True
return False
def main():
pandigitals = sorted([int('9' + ''.join(p)) for p in permutations(str(DIGITS)[:-1])], reverse=True)
for candidate in pandigitals:
if is_pandigital_product(candidate):
break
else:
candidate -= 1
print 'Max pandigital concatenated product:', candidate
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
Add problem 38, pandigital multiplesfrom time import time
from itertools import permutations
DIGITS = 123456789
def is_pandigital_product(num):
for l in range(1, len(str(num))):
candidate_num = int(str(num)[0:l])
if candidate_num > 2 * int(str(num)[l:]):
break
product_idx = 2
products = [str(candidate_num)]
try:
while True:
product = product_idx * candidate_num
start_product = len(''.join(products))
if product != int(str(num)[start_product:start_product + len(str(product))]):
break
products.append(str(product))
product_idx += 1
except ValueError:
print 'Products:', products
return True
return False
def main():
pandigitals = sorted([int('9' + ''.join(p)) for p in permutations(str(DIGITS)[:-1])], reverse=True)
for candidate in pandigitals:
if is_pandigital_product(candidate):
break
else:
candidate -= 1
print 'Max pandigital concatenated product:', candidate
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
<commit_before><commit_msg>Add problem 38, pandigital multiples<commit_after>from time import time
from itertools import permutations
DIGITS = 123456789
def is_pandigital_product(num):
for l in range(1, len(str(num))):
candidate_num = int(str(num)[0:l])
if candidate_num > 2 * int(str(num)[l:]):
break
product_idx = 2
products = [str(candidate_num)]
try:
while True:
product = product_idx * candidate_num
start_product = len(''.join(products))
if product != int(str(num)[start_product:start_product + len(str(product))]):
break
products.append(str(product))
product_idx += 1
except ValueError:
print 'Products:', products
return True
return False
def main():
pandigitals = sorted([int('9' + ''.join(p)) for p in permutations(str(DIGITS)[:-1])], reverse=True)
for candidate in pandigitals:
if is_pandigital_product(candidate):
break
else:
candidate -= 1
print 'Max pandigital concatenated product:', candidate
if __name__ == '__main__':
t = time()
main()
print 'Time:', time() - t
|
|
ae556bc7878ce3da255cf3286dfad12b92324c3b
|
contract.v.py
|
contract.v.py
|
# Ethereum flashcards smart-contract v1.0
#
# This contract is used to place/execute sell and buy
# orders for flashcard categories in codingchili/flashcards-webapp.git
# maps category owners to a category and its cost.
categories: {
category: bytes32,
owner: address,
cost: wei_value,
}[bytes32]
# maps buyers to categories for successful purchases.
orders: {
buyer: address,
category: bytes32,
}[address]
# a fee in percentage 0.0-1.0 to deduct from purchases
fee: public(decimal)
# the address of the broker, for arbitrage.
broker: public(address)
@public
def __init__():
self.fee = 0.05
self.broker = msg.sender
# adds a new category and assigns it to the sending address.
@public
def submit(categoryId: bytes32, cost: wei_value):
# assert category does not exist - to prevent overwriting.
assert not (self.categories[categoryId])
self.categories[categoryId].cost = cost;
self.categories[categoryId].owner = msg.sender;
self.categories[categoryId].category = categoryId;
pass
# changes the cost of a category, cannot change the address!
@public
def update(categoryId: bytes32, cost: wei_value):
assert self.categories[categoryId]
assert self.categories[categoryId].owner == msg.sender
self.categories[categoryId].cost = cost
# attempts to purchase the specified category.
@public
@payable
def purchase(categoryId: bytes32):
# ensure the payment is appropriate.
assert msg.value == self.categories[categoryId].cost;
# ensure the sender does not already own the category.
assert not (self.orders[msg.sender])
# transfer funds to owner and the broker.
send(self.broker, as_wei_value(msg.value * self.fee, wei))
send(self.categories[categoryId].owner, as_wei_value(msg.value * (1 - self.fee), wei))
# add the order: todo: enable one-to-many mappings.
self.orders[msg.sender].category = categoryId
# returns the cost of the given category id.
@public
@constant
def cost(categoryId: bytes32) -> wei_value:
return self.categories[categoryId].cost;
# returns true if the given address has bought
@public
@constant
def allowed(buyer: bytes32) -> bool:
return self.orders[buyer];
|
Add first draft of ethereum trading contract in Viper.
|
Add first draft of ethereum trading contract in Viper.
|
Python
|
mit
|
codingchili/flashcards-webapp,codingchili/flashcards-webapp,codingchili/flashcards-webapp,codingchili/flashcards-webapp,codingchili/flashcards-webapp
|
Add first draft of ethereum trading contract in Viper.
|
# Ethereum flashcards smart-contract v1.0
#
# This contract is used to place/execute sell and buy
# orders for flashcard categories in codingchili/flashcards-webapp.git
# maps category owners to a category and its cost.
categories: {
category: bytes32,
owner: address,
cost: wei_value,
}[bytes32]
# maps buyers to categories for successful purchases.
orders: {
buyer: address,
category: bytes32,
}[address]
# a fee in percentage 0.0-1.0 to deduct from purchases
fee: public(decimal)
# the address of the broker, for arbitrage.
broker: public(address)
@public
def __init__():
self.fee = 0.05
self.broker = msg.sender
# adds a new category and assigns it to the sending address.
@public
def submit(categoryId: bytes32, cost: wei_value):
# assert category does not exist - to prevent overwriting.
assert not (self.categories[categoryId])
self.categories[categoryId].cost = cost;
self.categories[categoryId].owner = msg.sender;
self.categories[categoryId].category = categoryId;
pass
# changes the cost of a category, cannot change the address!
@public
def update(categoryId: bytes32, cost: wei_value):
assert self.categories[categoryId]
assert self.categories[categoryId].owner == msg.sender
self.categories[categoryId].cost = cost
# attempts to purchase the specified category.
@public
@payable
def purchase(categoryId: bytes32):
# ensure the payment is appropriate.
assert msg.value == self.categories[categoryId].cost;
# ensure the sender does not already own the category.
assert not (self.orders[msg.sender])
# transfer funds to owner and the broker.
send(self.broker, as_wei_value(msg.value * self.fee, wei))
send(self.categories[categoryId].owner, as_wei_value(msg.value * (1 - self.fee), wei))
# add the order: todo: enable one-to-many mappings.
self.orders[msg.sender].category = categoryId
# returns the cost of the given category id.
@public
@constant
def cost(categoryId: bytes32) -> wei_value:
return self.categories[categoryId].cost;
# returns true if the given address has bought
@public
@constant
def allowed(buyer: bytes32) -> bool:
return self.orders[buyer];
|
<commit_before><commit_msg>Add first draft of ethereum trading contract in Viper.<commit_after>
|
# Ethereum flashcards smart-contract v1.0
#
# This contract is used to place/execute sell and buy
# orders for flashcard categories in codingchili/flashcards-webapp.git
# maps category owners to a category and its cost.
categories: {
category: bytes32,
owner: address,
cost: wei_value,
}[bytes32]
# maps buyers to categories for successful purchases.
orders: {
buyer: address,
category: bytes32,
}[address]
# a fee in percentage 0.0-1.0 to deduct from purchases
fee: public(decimal)
# the address of the broker, for arbitrage.
broker: public(address)
@public
def __init__():
self.fee = 0.05
self.broker = msg.sender
# adds a new category and assigns it to the sending address.
@public
def submit(categoryId: bytes32, cost: wei_value):
# assert category does not exist - to prevent overwriting.
assert not (self.categories[categoryId])
self.categories[categoryId].cost = cost;
self.categories[categoryId].owner = msg.sender;
self.categories[categoryId].category = categoryId;
pass
# changes the cost of a category, cannot change the address!
@public
def update(categoryId: bytes32, cost: wei_value):
assert self.categories[categoryId]
assert self.categories[categoryId].owner == msg.sender
self.categories[categoryId].cost = cost
# attempts to purchase the specified category.
@public
@payable
def purchase(categoryId: bytes32):
# ensure the payment is appropriate.
assert msg.value == self.categories[categoryId].cost;
# ensure the sender does not already own the category.
assert not (self.orders[msg.sender])
# transfer funds to owner and the broker.
send(self.broker, as_wei_value(msg.value * self.fee, wei))
send(self.categories[categoryId].owner, as_wei_value(msg.value * (1 - self.fee), wei))
# add the order: todo: enable one-to-many mappings.
self.orders[msg.sender].category = categoryId
# returns the cost of the given category id.
@public
@constant
def cost(categoryId: bytes32) -> wei_value:
return self.categories[categoryId].cost;
# returns true if the given address has bought
@public
@constant
def allowed(buyer: bytes32) -> bool:
return self.orders[buyer];
|
Add first draft of ethereum trading contract in Viper.# Ethereum flashcards smart-contract v1.0
#
# This contract is used to place/execute sell and buy
# orders for flashcard categories in codingchili/flashcards-webapp.git
# maps category owners to a category and its cost.
categories: {
category: bytes32,
owner: address,
cost: wei_value,
}[bytes32]
# maps buyers to categories for successful purchases.
orders: {
buyer: address,
category: bytes32,
}[address]
# a fee in percentage 0.0-1.0 to deduct from purchases
fee: public(decimal)
# the address of the broker, for arbitrage.
broker: public(address)
@public
def __init__():
self.fee = 0.05
self.broker = msg.sender
# adds a new category and assigns it to the sending address.
@public
def submit(categoryId: bytes32, cost: wei_value):
# assert category does not exist - to prevent overwriting.
assert not (self.categories[categoryId])
self.categories[categoryId].cost = cost;
self.categories[categoryId].owner = msg.sender;
self.categories[categoryId].category = categoryId;
pass
# changes the cost of a category, cannot change the address!
@public
def update(categoryId: bytes32, cost: wei_value):
assert self.categories[categoryId]
assert self.categories[categoryId].owner == msg.sender
self.categories[categoryId].cost = cost
# attempts to purchase the specified category.
@public
@payable
def purchase(categoryId: bytes32):
# ensure the payment is appropriate.
assert msg.value == self.categories[categoryId].cost;
# ensure the sender does not already own the category.
assert not (self.orders[msg.sender])
# transfer funds to owner and the broker.
send(self.broker, as_wei_value(msg.value * self.fee, wei))
send(self.categories[categoryId].owner, as_wei_value(msg.value * (1 - self.fee), wei))
# add the order: todo: enable one-to-many mappings.
self.orders[msg.sender].category = categoryId
# returns the cost of the given category id.
@public
@constant
def cost(categoryId: bytes32) -> wei_value:
return self.categories[categoryId].cost;
# returns true if the given address has bought
@public
@constant
def allowed(buyer: bytes32) -> bool:
return self.orders[buyer];
|
<commit_before><commit_msg>Add first draft of ethereum trading contract in Viper.<commit_after># Ethereum flashcards smart-contract v1.0
#
# This contract is used to place/execute sell and buy
# orders for flashcard categories in codingchili/flashcards-webapp.git
# maps category owners to a category and its cost.
categories: {
category: bytes32,
owner: address,
cost: wei_value,
}[bytes32]
# maps buyers to categories for successful purchases.
orders: {
buyer: address,
category: bytes32,
}[address]
# a fee in percentage 0.0-1.0 to deduct from purchases
fee: public(decimal)
# the address of the broker, for arbitrage.
broker: public(address)
@public
def __init__():
self.fee = 0.05
self.broker = msg.sender
# adds a new category and assigns it to the sending address.
@public
def submit(categoryId: bytes32, cost: wei_value):
# assert category does not exist - to prevent overwriting.
assert not (self.categories[categoryId])
self.categories[categoryId].cost = cost;
self.categories[categoryId].owner = msg.sender;
self.categories[categoryId].category = categoryId;
pass
# changes the cost of a category, cannot change the address!
@public
def update(categoryId: bytes32, cost: wei_value):
assert self.categories[categoryId]
assert self.categories[categoryId].owner == msg.sender
self.categories[categoryId].cost = cost
# attempts to purchase the specified category.
@public
@payable
def purchase(categoryId: bytes32):
# ensure the payment is appropriate.
assert msg.value == self.categories[categoryId].cost;
# ensure the sender does not already own the category.
assert not (self.orders[msg.sender])
# transfer funds to owner and the broker.
send(self.broker, as_wei_value(msg.value * self.fee, wei))
send(self.categories[categoryId].owner, as_wei_value(msg.value * (1 - self.fee), wei))
# add the order: todo: enable one-to-many mappings.
self.orders[msg.sender].category = categoryId
# returns the cost of the given category id.
@public
@constant
def cost(categoryId: bytes32) -> wei_value:
return self.categories[categoryId].cost;
# returns true if the given address has bought
@public
@constant
def allowed(buyer: bytes32) -> bool:
return self.orders[buyer];
|
|
ef66a692305bfd580071095906ae42feb988f3bd
|
ha-server-details.py
|
ha-server-details.py
|
#!/usr/bin/env python
#usage: ha-server-details UUID
import sys
import pyrax
import getopt
import pprint
from common import init_pyrax
def main(argv):
if len(argv) != 1:
print "usage: ha-server-details UUID";
return
init_pyrax()
for ha in pyrax.cloud_databases.list_ha():
if ha.id == argv[0]:
ha.get()
print ha
break
if __name__ == "__main__":
main(sys.argv[1:])
|
Add a script to get server details.
|
Add a script to get server details.
|
Python
|
apache-2.0
|
ddaeschler/rackspace-ha-dbtools,ddaeschler/rackspace-ha-dbtools
|
Add a script to get server details.
|
#!/usr/bin/env python
#usage: ha-server-details UUID
import sys
import pyrax
import getopt
import pprint
from common import init_pyrax
def main(argv):
if len(argv) != 1:
print "usage: ha-server-details UUID";
return
init_pyrax()
for ha in pyrax.cloud_databases.list_ha():
if ha.id == argv[0]:
ha.get()
print ha
break
if __name__ == "__main__":
main(sys.argv[1:])
|
<commit_before><commit_msg>Add a script to get server details.<commit_after>
|
#!/usr/bin/env python
#usage: ha-server-details UUID
import sys
import pyrax
import getopt
import pprint
from common import init_pyrax
def main(argv):
if len(argv) != 1:
print "usage: ha-server-details UUID";
return
init_pyrax()
for ha in pyrax.cloud_databases.list_ha():
if ha.id == argv[0]:
ha.get()
print ha
break
if __name__ == "__main__":
main(sys.argv[1:])
|
Add a script to get server details.#!/usr/bin/env python
#usage: ha-server-details UUID
import sys
import pyrax
import getopt
import pprint
from common import init_pyrax
def main(argv):
if len(argv) != 1:
print "usage: ha-server-details UUID";
return
init_pyrax()
for ha in pyrax.cloud_databases.list_ha():
if ha.id == argv[0]:
ha.get()
print ha
break
if __name__ == "__main__":
main(sys.argv[1:])
|
<commit_before><commit_msg>Add a script to get server details.<commit_after>#!/usr/bin/env python
#usage: ha-server-details UUID
import sys
import pyrax
import getopt
import pprint
from common import init_pyrax
def main(argv):
if len(argv) != 1:
print "usage: ha-server-details UUID";
return
init_pyrax()
for ha in pyrax.cloud_databases.list_ha():
if ha.id == argv[0]:
ha.get()
print ha
break
if __name__ == "__main__":
main(sys.argv[1:])
|
|
abaccb07d3acc13a49765c5d203bc886a06b6a4e
|
python/039.py
|
python/039.py
|
'''
Integer Right Triangles
=======================
If p is the perimeter of a right angle triangle with integral length sides,
{a,b,c}, there are exactly three solutions for p = 120.
{20,48,52}, {24,45,51}, {30,40,50}
For which value of p ≤ 1000, is the number of solutions maximised?
'''
# In a right angle triangle a^2 + b^2 = c^2. Rather than search for right angle
# triangles of perimeter exactly p for each p, instead find all right angle
# triangles with p <= 1,000 in any order and tabulate them based on their
# perimeters.
#
# To avoid duplication by symmetry in a, b, we ensure a <= b in the search. In
# a right angle triangle the hypotenuse c is the largest side, so b < c also.
# And since a + b + c <= p_max and a <= b < c, it must be then a < p_max/3 and
# b < p_max/2 or the perimeter will be > than p_max.
from math import sqrt
p_max = 1000
counts = [0] * (p_max + 1) # Extra element so can index as p instead of p - 1.
for a in range(1, int(p_max / 3)):
for b in range(a, int(p_max / 2)):
# Check c is an integer by rounding it to the closest integer and
# confirming the equality without float operations.
c = round(sqrt(a * a + b * b))
if a * a + b * b == c * c:
# Count the right triangle if perimeter is not larger than p_max.
if a + b + c <= p_max:
counts[a + b + c] += 1
max_index, _ = max(enumerate(counts), key=lambda pair: pair[1])
answer = max_index # = 840
print(answer)
|
Add python implementation for problem 39.
|
Add python implementation for problem 39.
|
Python
|
apache-2.0
|
daithiocrualaoich/euler
|
Add python implementation for problem 39.
|
'''
Integer Right Triangles
=======================
If p is the perimeter of a right angle triangle with integral length sides,
{a,b,c}, there are exactly three solutions for p = 120.
{20,48,52}, {24,45,51}, {30,40,50}
For which value of p ≤ 1000, is the number of solutions maximised?
'''
# In a right angle triangle a^2 + b^2 = c^2. Rather than search for right angle
# triangles of perimeter exactly p for each p, instead find all right angle
# triangles with p <= 1,000 in any order and tabulate them based on their
# perimeters.
#
# To avoid duplication by symmetry in a, b, we ensure a <= b in the search. In
# a right angle triangle the hypotenuse c is the largest side, so b < c also.
# And since a + b + c <= p_max and a <= b < c, it must be then a < p_max/3 and
# b < p_max/2 or the perimeter will be > than p_max.
from math import sqrt
p_max = 1000
counts = [0] * (p_max + 1) # Extra element so can index as p instead of p - 1.
for a in range(1, int(p_max / 3)):
for b in range(a, int(p_max / 2)):
# Check c is an integer by rounding it to the closest integer and
# confirming the equality without float operations.
c = round(sqrt(a * a + b * b))
if a * a + b * b == c * c:
# Count the right triangle if perimeter is not larger than p_max.
if a + b + c <= p_max:
counts[a + b + c] += 1
max_index, _ = max(enumerate(counts), key=lambda pair: pair[1])
answer = max_index # = 840
print(answer)
|
<commit_before><commit_msg>Add python implementation for problem 39.<commit_after>
|
'''
Integer Right Triangles
=======================
If p is the perimeter of a right angle triangle with integral length sides,
{a,b,c}, there are exactly three solutions for p = 120.
{20,48,52}, {24,45,51}, {30,40,50}
For which value of p ≤ 1000, is the number of solutions maximised?
'''
# In a right angle triangle a^2 + b^2 = c^2. Rather than search for right angle
# triangles of perimeter exactly p for each p, instead find all right angle
# triangles with p <= 1,000 in any order and tabulate them based on their
# perimeters.
#
# To avoid duplication by symmetry in a, b, we ensure a <= b in the search. In
# a right angle triangle the hypotenuse c is the largest side, so b < c also.
# And since a + b + c <= p_max and a <= b < c, it must be then a < p_max/3 and
# b < p_max/2 or the perimeter will be > than p_max.
from math import sqrt
p_max = 1000
counts = [0] * (p_max + 1) # Extra element so can index as p instead of p - 1.
for a in range(1, int(p_max / 3)):
for b in range(a, int(p_max / 2)):
# Check c is an integer by rounding it to the closest integer and
# confirming the equality without float operations.
c = round(sqrt(a * a + b * b))
if a * a + b * b == c * c:
# Count the right triangle if perimeter is not larger than p_max.
if a + b + c <= p_max:
counts[a + b + c] += 1
max_index, _ = max(enumerate(counts), key=lambda pair: pair[1])
answer = max_index # = 840
print(answer)
|
Add python implementation for problem 39.'''
Integer Right Triangles
=======================
If p is the perimeter of a right angle triangle with integral length sides,
{a,b,c}, there are exactly three solutions for p = 120.
{20,48,52}, {24,45,51}, {30,40,50}
For which value of p ≤ 1000, is the number of solutions maximised?
'''
# In a right angle triangle a^2 + b^2 = c^2. Rather than search for right angle
# triangles of perimeter exactly p for each p, instead find all right angle
# triangles with p <= 1,000 in any order and tabulate them based on their
# perimeters.
#
# To avoid duplication by symmetry in a, b, we ensure a <= b in the search. In
# a right angle triangle the hypotenuse c is the largest side, so b < c also.
# And since a + b + c <= p_max and a <= b < c, it must be then a < p_max/3 and
# b < p_max/2 or the perimeter will be > than p_max.
from math import sqrt
p_max = 1000
counts = [0] * (p_max + 1) # Extra element so can index as p instead of p - 1.
for a in range(1, int(p_max / 3)):
for b in range(a, int(p_max / 2)):
# Check c is an integer by rounding it to the closest integer and
# confirming the equality without float operations.
c = round(sqrt(a * a + b * b))
if a * a + b * b == c * c:
# Count the right triangle if perimeter is not larger than p_max.
if a + b + c <= p_max:
counts[a + b + c] += 1
max_index, _ = max(enumerate(counts), key=lambda pair: pair[1])
answer = max_index # = 840
print(answer)
|
<commit_before><commit_msg>Add python implementation for problem 39.<commit_after>'''
Integer Right Triangles
=======================
If p is the perimeter of a right angle triangle with integral length sides,
{a,b,c}, there are exactly three solutions for p = 120.
{20,48,52}, {24,45,51}, {30,40,50}
For which value of p ≤ 1000, is the number of solutions maximised?
'''
# In a right angle triangle a^2 + b^2 = c^2. Rather than search for right angle
# triangles of perimeter exactly p for each p, instead find all right angle
# triangles with p <= 1,000 in any order and tabulate them based on their
# perimeters.
#
# To avoid duplication by symmetry in a, b, we ensure a <= b in the search. In
# a right angle triangle the hypotenuse c is the largest side, so b < c also.
# And since a + b + c <= p_max and a <= b < c, it must be then a < p_max/3 and
# b < p_max/2 or the perimeter will be > than p_max.
from math import sqrt
p_max = 1000
counts = [0] * (p_max + 1) # Extra element so can index as p instead of p - 1.
for a in range(1, int(p_max / 3)):
for b in range(a, int(p_max / 2)):
# Check c is an integer by rounding it to the closest integer and
# confirming the equality without float operations.
c = round(sqrt(a * a + b * b))
if a * a + b * b == c * c:
# Count the right triangle if perimeter is not larger than p_max.
if a + b + c <= p_max:
counts[a + b + c] += 1
max_index, _ = max(enumerate(counts), key=lambda pair: pair[1])
answer = max_index # = 840
print(answer)
|
|
5f6befdd8a90de3257b9d5bca9a103d1bdb43cfa
|
onebot/plugins/psa.py
|
onebot/plugins/psa.py
|
# -*- coding: utf-8 -*-
"""
================================================
:mod:`onebot.plugins.psa` PSA
================================================
This plugin allows admins to send broadcasts
"""
from irc3 import plugin
from irc3.plugins.command import command
@plugin
class PSAPlugin(object):
"""PSA Plugin"""
requires = [
'irc3.plugins.command',
'irc3.plugins.userlist',
]
def __init__(self, bot):
self.bot = bot
@command(permission='admin', show_in_help_list=False)
def psa(self, mask, target, args):
"""Broadcast a public service announcement to all channels
%%psa <message>...
"""
for channel in self.bot.channels:
self.bot.privmsg(channel, ' '.join(args['<message>']))
|
Create PSA plugin, only accessible for admins
|
Create PSA plugin, only accessible for admins
|
Python
|
bsd-3-clause
|
thomwiggers/onebot
|
Create PSA plugin, only accessible for admins
|
# -*- coding: utf-8 -*-
"""
================================================
:mod:`onebot.plugins.psa` PSA
================================================
This plugin allows admins to send broadcasts
"""
from irc3 import plugin
from irc3.plugins.command import command
@plugin
class PSAPlugin(object):
"""PSA Plugin"""
requires = [
'irc3.plugins.command',
'irc3.plugins.userlist',
]
def __init__(self, bot):
self.bot = bot
@command(permission='admin', show_in_help_list=False)
def psa(self, mask, target, args):
"""Broadcast a public service announcement to all channels
%%psa <message>...
"""
for channel in self.bot.channels:
self.bot.privmsg(channel, ' '.join(args['<message>']))
|
<commit_before><commit_msg>Create PSA plugin, only accessible for admins<commit_after>
|
# -*- coding: utf-8 -*-
"""
================================================
:mod:`onebot.plugins.psa` PSA
================================================
This plugin allows admins to send broadcasts
"""
from irc3 import plugin
from irc3.plugins.command import command
@plugin
class PSAPlugin(object):
"""PSA Plugin"""
requires = [
'irc3.plugins.command',
'irc3.plugins.userlist',
]
def __init__(self, bot):
self.bot = bot
@command(permission='admin', show_in_help_list=False)
def psa(self, mask, target, args):
"""Broadcast a public service announcement to all channels
%%psa <message>...
"""
for channel in self.bot.channels:
self.bot.privmsg(channel, ' '.join(args['<message>']))
|
Create PSA plugin, only accessible for admins# -*- coding: utf-8 -*-
"""
================================================
:mod:`onebot.plugins.psa` PSA
================================================
This plugin allows admins to send broadcasts
"""
from irc3 import plugin
from irc3.plugins.command import command
@plugin
class PSAPlugin(object):
"""PSA Plugin"""
requires = [
'irc3.plugins.command',
'irc3.plugins.userlist',
]
def __init__(self, bot):
self.bot = bot
@command(permission='admin', show_in_help_list=False)
def psa(self, mask, target, args):
"""Broadcast a public service announcement to all channels
%%psa <message>...
"""
for channel in self.bot.channels:
self.bot.privmsg(channel, ' '.join(args['<message>']))
|
<commit_before><commit_msg>Create PSA plugin, only accessible for admins<commit_after># -*- coding: utf-8 -*-
"""
================================================
:mod:`onebot.plugins.psa` PSA
================================================
This plugin allows admins to send broadcasts
"""
from irc3 import plugin
from irc3.plugins.command import command
@plugin
class PSAPlugin(object):
"""PSA Plugin"""
requires = [
'irc3.plugins.command',
'irc3.plugins.userlist',
]
def __init__(self, bot):
self.bot = bot
@command(permission='admin', show_in_help_list=False)
def psa(self, mask, target, args):
"""Broadcast a public service announcement to all channels
%%psa <message>...
"""
for channel in self.bot.channels:
self.bot.privmsg(channel, ' '.join(args['<message>']))
|
|
5d9b4af15b60f5f597179fdfc66f0539acc48798
|
phonetics_download.py
|
phonetics_download.py
|
'''
Created on 2013-12-20
@author: bn
'''
# -*- coding: gbk -*-
import re
try:
input = raw_input
except NameError:
pass
try:
import urllib.request
#import urllib.parse
except ImportError:
import urllib
urllib.request = __import__('urllib2')
urllib.parse = __import__('urlparse')
urlopen = urllib.request.urlopen
request = urllib.request.Request
def get_content_from_url(url):
attempts = 0
content = ''
while attempts < 5:
try:
content = urlopen(url).read().decode('utf-8', 'ignore')
break
except Exception as e:
attempts += 1
print(e)
return content
def download_from_aiciba(query, path):
"""Download full size images from Bing image search.
Don't print or republish images without permission.
I used this to train a learning algorithm.
"""
BASE_URL = 'http://www.aiciba.com/'+query
content = get_content_from_url(BASE_URL)
phonetic_list = re.findall(
"\[</strong><strong lang=\"EN-US\" xml:lang=\"EN-US\">([^<>/]*)</strong><strong>\]", content, re.M | re.S)
print(phonetic_list)
file = open(path, "a")
for item in phonetic_list:
file.write(item.encode('utf-8')+'\n')
file.close()
download_from_aiciba("person", "phonetics.log")
|
Add phonetics download from aiciba, can download more things from aiciba in future
|
Add phonetics download from aiciba, can download more things from aiciba in future
|
Python
|
apache-2.0
|
crike/crike,crike/crike,crike/crike,crike/crike
|
Add phonetics download from aiciba, can download more things from aiciba in future
|
'''
Created on 2013-12-20
@author: bn
'''
# -*- coding: gbk -*-
import re
try:
input = raw_input
except NameError:
pass
try:
import urllib.request
#import urllib.parse
except ImportError:
import urllib
urllib.request = __import__('urllib2')
urllib.parse = __import__('urlparse')
urlopen = urllib.request.urlopen
request = urllib.request.Request
def get_content_from_url(url):
attempts = 0
content = ''
while attempts < 5:
try:
content = urlopen(url).read().decode('utf-8', 'ignore')
break
except Exception as e:
attempts += 1
print(e)
return content
def download_from_aiciba(query, path):
"""Download full size images from Bing image search.
Don't print or republish images without permission.
I used this to train a learning algorithm.
"""
BASE_URL = 'http://www.aiciba.com/'+query
content = get_content_from_url(BASE_URL)
phonetic_list = re.findall(
"\[</strong><strong lang=\"EN-US\" xml:lang=\"EN-US\">([^<>/]*)</strong><strong>\]", content, re.M | re.S)
print(phonetic_list)
file = open(path, "a")
for item in phonetic_list:
file.write(item.encode('utf-8')+'\n')
file.close()
download_from_aiciba("person", "phonetics.log")
|
<commit_before><commit_msg>Add phonetics download from aiciba, can download more things from aiciba in future<commit_after>
|
'''
Created on 2013-12-20
@author: bn
'''
# -*- coding: gbk -*-
import re
try:
input = raw_input
except NameError:
pass
try:
import urllib.request
#import urllib.parse
except ImportError:
import urllib
urllib.request = __import__('urllib2')
urllib.parse = __import__('urlparse')
urlopen = urllib.request.urlopen
request = urllib.request.Request
def get_content_from_url(url):
attempts = 0
content = ''
while attempts < 5:
try:
content = urlopen(url).read().decode('utf-8', 'ignore')
break
except Exception as e:
attempts += 1
print(e)
return content
def download_from_aiciba(query, path):
"""Download full size images from Bing image search.
Don't print or republish images without permission.
I used this to train a learning algorithm.
"""
BASE_URL = 'http://www.aiciba.com/'+query
content = get_content_from_url(BASE_URL)
phonetic_list = re.findall(
"\[</strong><strong lang=\"EN-US\" xml:lang=\"EN-US\">([^<>/]*)</strong><strong>\]", content, re.M | re.S)
print(phonetic_list)
file = open(path, "a")
for item in phonetic_list:
file.write(item.encode('utf-8')+'\n')
file.close()
download_from_aiciba("person", "phonetics.log")
|
Add phonetics download from aiciba, can download more things from aiciba in future'''
Created on 2013-12-20
@author: bn
'''
# -*- coding: gbk -*-
import re
try:
input = raw_input
except NameError:
pass
try:
import urllib.request
#import urllib.parse
except ImportError:
import urllib
urllib.request = __import__('urllib2')
urllib.parse = __import__('urlparse')
urlopen = urllib.request.urlopen
request = urllib.request.Request
def get_content_from_url(url):
attempts = 0
content = ''
while attempts < 5:
try:
content = urlopen(url).read().decode('utf-8', 'ignore')
break
except Exception as e:
attempts += 1
print(e)
return content
def download_from_aiciba(query, path):
"""Download full size images from Bing image search.
Don't print or republish images without permission.
I used this to train a learning algorithm.
"""
BASE_URL = 'http://www.aiciba.com/'+query
content = get_content_from_url(BASE_URL)
phonetic_list = re.findall(
"\[</strong><strong lang=\"EN-US\" xml:lang=\"EN-US\">([^<>/]*)</strong><strong>\]", content, re.M | re.S)
print(phonetic_list)
file = open(path, "a")
for item in phonetic_list:
file.write(item.encode('utf-8')+'\n')
file.close()
download_from_aiciba("person", "phonetics.log")
|
<commit_before><commit_msg>Add phonetics download from aiciba, can download more things from aiciba in future<commit_after>'''
Created on 2013-12-20
@author: bn
'''
# -*- coding: gbk -*-
import re
try:
input = raw_input
except NameError:
pass
try:
import urllib.request
#import urllib.parse
except ImportError:
import urllib
urllib.request = __import__('urllib2')
urllib.parse = __import__('urlparse')
urlopen = urllib.request.urlopen
request = urllib.request.Request
def get_content_from_url(url):
attempts = 0
content = ''
while attempts < 5:
try:
content = urlopen(url).read().decode('utf-8', 'ignore')
break
except Exception as e:
attempts += 1
print(e)
return content
def download_from_aiciba(query, path):
"""Download full size images from Bing image search.
Don't print or republish images without permission.
I used this to train a learning algorithm.
"""
BASE_URL = 'http://www.aiciba.com/'+query
content = get_content_from_url(BASE_URL)
phonetic_list = re.findall(
"\[</strong><strong lang=\"EN-US\" xml:lang=\"EN-US\">([^<>/]*)</strong><strong>\]", content, re.M | re.S)
print(phonetic_list)
file = open(path, "a")
for item in phonetic_list:
file.write(item.encode('utf-8')+'\n')
file.close()
download_from_aiciba("person", "phonetics.log")
|
|
3cb3572b4542235cc62828ff6559546746578d68
|
import_mp3.py
|
import_mp3.py
|
import argparse
import os.path
from fore import database
parser = argparse.ArgumentParser(description="Bulk-import MP3 files into the appension database",
epilog="Story, lyrics, and comments will all be blank.")
parser.add_argument("filename", nargs="+", help="MP3 file(s) to import")
parser.add_argument("--submitter", help="Name of submitter", default="Bulk import")
parser.add_argument("--submitteremail", help="Email address of submitter", default="bulk@import.invalid") # or use a real address here
args = parser.parse_args()
# Build up a form-like dictionary for the info mapping. This is the downside of
# the breaching of encapsulation in database.create_track().
info = {"SubmitterName": [args.submitter], "Email": [args.submitteremail]}
for fn in args.filename:
print("Importing %s"%fn)
with open(fn, "rb") as f: data = f.read()
id = database.create_track(data, os.path.split(fn)[-1], info)
print("Saved as track #%d."%id)
|
Introduce a bulk importer, partly to prove how easy it is
|
Introduce a bulk importer, partly to prove how easy it is
|
Python
|
artistic-2.0
|
MikeiLL/appension,Rosuav/appension,Rosuav/appension,Rosuav/appension,MikeiLL/appension,MikeiLL/appension,MikeiLL/appension,Rosuav/appension
|
Introduce a bulk importer, partly to prove how easy it is
|
import argparse
import os.path
from fore import database
parser = argparse.ArgumentParser(description="Bulk-import MP3 files into the appension database",
epilog="Story, lyrics, and comments will all be blank.")
parser.add_argument("filename", nargs="+", help="MP3 file(s) to import")
parser.add_argument("--submitter", help="Name of submitter", default="Bulk import")
parser.add_argument("--submitteremail", help="Email address of submitter", default="bulk@import.invalid") # or use a real address here
args = parser.parse_args()
# Build up a form-like dictionary for the info mapping. This is the downside of
# the breaching of encapsulation in database.create_track().
info = {"SubmitterName": [args.submitter], "Email": [args.submitteremail]}
for fn in args.filename:
print("Importing %s"%fn)
with open(fn, "rb") as f: data = f.read()
id = database.create_track(data, os.path.split(fn)[-1], info)
print("Saved as track #%d."%id)
|
<commit_before><commit_msg>Introduce a bulk importer, partly to prove how easy it is<commit_after>
|
import argparse
import os.path
from fore import database
parser = argparse.ArgumentParser(description="Bulk-import MP3 files into the appension database",
epilog="Story, lyrics, and comments will all be blank.")
parser.add_argument("filename", nargs="+", help="MP3 file(s) to import")
parser.add_argument("--submitter", help="Name of submitter", default="Bulk import")
parser.add_argument("--submitteremail", help="Email address of submitter", default="bulk@import.invalid") # or use a real address here
args = parser.parse_args()
# Build up a form-like dictionary for the info mapping. This is the downside of
# the breaching of encapsulation in database.create_track().
info = {"SubmitterName": [args.submitter], "Email": [args.submitteremail]}
for fn in args.filename:
print("Importing %s"%fn)
with open(fn, "rb") as f: data = f.read()
id = database.create_track(data, os.path.split(fn)[-1], info)
print("Saved as track #%d."%id)
|
Introduce a bulk importer, partly to prove how easy it isimport argparse
import os.path
from fore import database
parser = argparse.ArgumentParser(description="Bulk-import MP3 files into the appension database",
epilog="Story, lyrics, and comments will all be blank.")
parser.add_argument("filename", nargs="+", help="MP3 file(s) to import")
parser.add_argument("--submitter", help="Name of submitter", default="Bulk import")
parser.add_argument("--submitteremail", help="Email address of submitter", default="bulk@import.invalid") # or use a real address here
args = parser.parse_args()
# Build up a form-like dictionary for the info mapping. This is the downside of
# the breaching of encapsulation in database.create_track().
info = {"SubmitterName": [args.submitter], "Email": [args.submitteremail]}
for fn in args.filename:
print("Importing %s"%fn)
with open(fn, "rb") as f: data = f.read()
id = database.create_track(data, os.path.split(fn)[-1], info)
print("Saved as track #%d."%id)
|
<commit_before><commit_msg>Introduce a bulk importer, partly to prove how easy it is<commit_after>import argparse
import os.path
from fore import database
parser = argparse.ArgumentParser(description="Bulk-import MP3 files into the appension database",
epilog="Story, lyrics, and comments will all be blank.")
parser.add_argument("filename", nargs="+", help="MP3 file(s) to import")
parser.add_argument("--submitter", help="Name of submitter", default="Bulk import")
parser.add_argument("--submitteremail", help="Email address of submitter", default="bulk@import.invalid") # or use a real address here
args = parser.parse_args()
# Build up a form-like dictionary for the info mapping. This is the downside of
# the breaching of encapsulation in database.create_track().
info = {"SubmitterName": [args.submitter], "Email": [args.submitteremail]}
for fn in args.filename:
print("Importing %s"%fn)
with open(fn, "rb") as f: data = f.read()
id = database.create_track(data, os.path.split(fn)[-1], info)
print("Saved as track #%d."%id)
|
|
24788a676642a77be7d8859d4835e496b348f155
|
src/scripts/combine_relnotes.py
|
src/scripts/combine_relnotes.py
|
#!/usr/bin/python
import re
import sys
def main(args = None):
if args is None:
args = sys.argv
re_version = re.compile('Version (\d+\.\d+\.\d+), ([0-9]{4}-[0-9]{2}-[0-9]{2})$')
re_nyr = re.compile('Version (\d+\.\d+\.\d+), Not Yet Released$')
version_contents = {}
version_date = {}
versions = []
versions_nyr = []
for f in args[1:]:
contents = open(f).readlines()
match = re_version.match(contents[0])
if match:
version = match.group(1)
date = match.group(2)
versions.append(version)
version_date[version] = date
else:
match = re_nyr.match(contents[0])
version = match.group(1)
versions_nyr.append(version)
if not match:
raise Exception('No version match for %s' % (f))
version_contents[version] = (''.join(contents)).strip()
def make_label(v):
return ".. _v%s:\n" % (v.replace('.', '_'))
print "Release Notes"
print "========================================"
print
date_to_version = {}
for (v,d) in version_date.items():
date_to_version.setdefault(d, []).append(v)
if len(versions_nyr) > 0:
for v in versions_nyr:
print make_label(v)
print version_contents[v], "\n"
for d in sorted(date_to_version.keys(), reverse=True):
for v in sorted(date_to_version[d]):
print make_label(v)
print version_contents[v], "\n"
if __name__ == '__main__':
sys.exit(main())
|
Add a script for combining version .rst files
|
Add a script for combining version .rst files
|
Python
|
bsd-2-clause
|
webmaster128/botan,Rohde-Schwarz-Cybersecurity/botan,randombit/botan,Rohde-Schwarz-Cybersecurity/botan,webmaster128/botan,randombit/botan,randombit/botan,Rohde-Schwarz-Cybersecurity/botan,Rohde-Schwarz-Cybersecurity/botan,randombit/botan,Rohde-Schwarz-Cybersecurity/botan,webmaster128/botan,randombit/botan,Rohde-Schwarz-Cybersecurity/botan,webmaster128/botan,webmaster128/botan
|
Add a script for combining version .rst files
|
#!/usr/bin/python
import re
import sys
def main(args = None):
if args is None:
args = sys.argv
re_version = re.compile('Version (\d+\.\d+\.\d+), ([0-9]{4}-[0-9]{2}-[0-9]{2})$')
re_nyr = re.compile('Version (\d+\.\d+\.\d+), Not Yet Released$')
version_contents = {}
version_date = {}
versions = []
versions_nyr = []
for f in args[1:]:
contents = open(f).readlines()
match = re_version.match(contents[0])
if match:
version = match.group(1)
date = match.group(2)
versions.append(version)
version_date[version] = date
else:
match = re_nyr.match(contents[0])
version = match.group(1)
versions_nyr.append(version)
if not match:
raise Exception('No version match for %s' % (f))
version_contents[version] = (''.join(contents)).strip()
def make_label(v):
return ".. _v%s:\n" % (v.replace('.', '_'))
print "Release Notes"
print "========================================"
print
date_to_version = {}
for (v,d) in version_date.items():
date_to_version.setdefault(d, []).append(v)
if len(versions_nyr) > 0:
for v in versions_nyr:
print make_label(v)
print version_contents[v], "\n"
for d in sorted(date_to_version.keys(), reverse=True):
for v in sorted(date_to_version[d]):
print make_label(v)
print version_contents[v], "\n"
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add a script for combining version .rst files<commit_after>
|
#!/usr/bin/python
import re
import sys
def main(args = None):
if args is None:
args = sys.argv
re_version = re.compile('Version (\d+\.\d+\.\d+), ([0-9]{4}-[0-9]{2}-[0-9]{2})$')
re_nyr = re.compile('Version (\d+\.\d+\.\d+), Not Yet Released$')
version_contents = {}
version_date = {}
versions = []
versions_nyr = []
for f in args[1:]:
contents = open(f).readlines()
match = re_version.match(contents[0])
if match:
version = match.group(1)
date = match.group(2)
versions.append(version)
version_date[version] = date
else:
match = re_nyr.match(contents[0])
version = match.group(1)
versions_nyr.append(version)
if not match:
raise Exception('No version match for %s' % (f))
version_contents[version] = (''.join(contents)).strip()
def make_label(v):
return ".. _v%s:\n" % (v.replace('.', '_'))
print "Release Notes"
print "========================================"
print
date_to_version = {}
for (v,d) in version_date.items():
date_to_version.setdefault(d, []).append(v)
if len(versions_nyr) > 0:
for v in versions_nyr:
print make_label(v)
print version_contents[v], "\n"
for d in sorted(date_to_version.keys(), reverse=True):
for v in sorted(date_to_version[d]):
print make_label(v)
print version_contents[v], "\n"
if __name__ == '__main__':
sys.exit(main())
|
Add a script for combining version .rst files#!/usr/bin/python
import re
import sys
def main(args = None):
if args is None:
args = sys.argv
re_version = re.compile('Version (\d+\.\d+\.\d+), ([0-9]{4}-[0-9]{2}-[0-9]{2})$')
re_nyr = re.compile('Version (\d+\.\d+\.\d+), Not Yet Released$')
version_contents = {}
version_date = {}
versions = []
versions_nyr = []
for f in args[1:]:
contents = open(f).readlines()
match = re_version.match(contents[0])
if match:
version = match.group(1)
date = match.group(2)
versions.append(version)
version_date[version] = date
else:
match = re_nyr.match(contents[0])
version = match.group(1)
versions_nyr.append(version)
if not match:
raise Exception('No version match for %s' % (f))
version_contents[version] = (''.join(contents)).strip()
def make_label(v):
return ".. _v%s:\n" % (v.replace('.', '_'))
print "Release Notes"
print "========================================"
print
date_to_version = {}
for (v,d) in version_date.items():
date_to_version.setdefault(d, []).append(v)
if len(versions_nyr) > 0:
for v in versions_nyr:
print make_label(v)
print version_contents[v], "\n"
for d in sorted(date_to_version.keys(), reverse=True):
for v in sorted(date_to_version[d]):
print make_label(v)
print version_contents[v], "\n"
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add a script for combining version .rst files<commit_after>#!/usr/bin/python
import re
import sys
def main(args = None):
if args is None:
args = sys.argv
re_version = re.compile('Version (\d+\.\d+\.\d+), ([0-9]{4}-[0-9]{2}-[0-9]{2})$')
re_nyr = re.compile('Version (\d+\.\d+\.\d+), Not Yet Released$')
version_contents = {}
version_date = {}
versions = []
versions_nyr = []
for f in args[1:]:
contents = open(f).readlines()
match = re_version.match(contents[0])
if match:
version = match.group(1)
date = match.group(2)
versions.append(version)
version_date[version] = date
else:
match = re_nyr.match(contents[0])
version = match.group(1)
versions_nyr.append(version)
if not match:
raise Exception('No version match for %s' % (f))
version_contents[version] = (''.join(contents)).strip()
def make_label(v):
return ".. _v%s:\n" % (v.replace('.', '_'))
print "Release Notes"
print "========================================"
print
date_to_version = {}
for (v,d) in version_date.items():
date_to_version.setdefault(d, []).append(v)
if len(versions_nyr) > 0:
for v in versions_nyr:
print make_label(v)
print version_contents[v], "\n"
for d in sorted(date_to_version.keys(), reverse=True):
for v in sorted(date_to_version[d]):
print make_label(v)
print version_contents[v], "\n"
if __name__ == '__main__':
sys.exit(main())
|
|
c6566b2917adce3f94cd1233671ecba07f7ea4e0
|
problem_3/solution.py
|
problem_3/solution.py
|
def largest_prime_factor(n, h):
for i in xrange(2, n+1):
d, m = divmod(n, i)
if m == 0:
largest_prime_factor(d, i)
break
if n == 1: print h
largest_prime_factor(600851475143, 0)
|
Add python implementation for problem 3
|
Add python implementation for problem 3
|
Python
|
mit
|
mdsrosa/project_euler,mdsrosa/project_euler,mdsrosa/project_euler,mdsrosa/project_euler,mdsrosa/project_euler,mdsrosa/project_euler,mdsrosa/project_euler,mdsrosa/project_euler
|
Add python implementation for problem 3
|
def largest_prime_factor(n, h):
for i in xrange(2, n+1):
d, m = divmod(n, i)
if m == 0:
largest_prime_factor(d, i)
break
if n == 1: print h
largest_prime_factor(600851475143, 0)
|
<commit_before><commit_msg>Add python implementation for problem 3<commit_after>
|
def largest_prime_factor(n, h):
for i in xrange(2, n+1):
d, m = divmod(n, i)
if m == 0:
largest_prime_factor(d, i)
break
if n == 1: print h
largest_prime_factor(600851475143, 0)
|
Add python implementation for problem 3def largest_prime_factor(n, h):
for i in xrange(2, n+1):
d, m = divmod(n, i)
if m == 0:
largest_prime_factor(d, i)
break
if n == 1: print h
largest_prime_factor(600851475143, 0)
|
<commit_before><commit_msg>Add python implementation for problem 3<commit_after>def largest_prime_factor(n, h):
for i in xrange(2, n+1):
d, m = divmod(n, i)
if m == 0:
largest_prime_factor(d, i)
break
if n == 1: print h
largest_prime_factor(600851475143, 0)
|
|
208522ad6cf627d50953e4146e5361e42d5b9e13
|
pyes/tests/errors.py
|
pyes/tests/errors.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Unit tests for pyes. These require an es server with thrift plugin running on the default port (localhost:9500).
"""
import unittest
from pyes.tests import ESTestCase
import pyes.exceptions
class ErrorReportingTestCase(ESTestCase):
def setUp(self):
super(ErrorReportingTestCase, self).setUp()
def testCreateDelete(self):
"""
Test errors thrown when creating or deleting.
"""
try:
self.conn.delete_index("test-index")
except pyes.exceptions.NotFoundException:
pass
self.conn.create_index("test-index")
self.assertRaises(pyes.exceptions.AlreadyExistsException, self.conn.create_index, "test-index")
self.conn.delete_index("test-index")
self.assertRaises(pyes.exceptions.NotFoundException, self.conn.delete_index, "test-index")
if __name__ == "__main__":
unittest.main()
|
Add test of the AlreadyExistsException and the NotFoundException, when creating or deleting databases
|
Add test of the AlreadyExistsException and the NotFoundException, when creating or deleting databases
|
Python
|
bsd-3-clause
|
mavarick/pyes,rookdev/pyes,Fiedzia/pyes,haiwen/pyes,Fiedzia/pyes,mavarick/pyes,jayzeng/pyes,HackLinux/pyes,HackLinux/pyes,rookdev/pyes,jayzeng/pyes,aparo/pyes,aparo/pyes,mavarick/pyes,Fiedzia/pyes,haiwen/pyes,mouadino/pyes,haiwen/pyes,mouadino/pyes,jayzeng/pyes,HackLinux/pyes,aparo/pyes
|
Add test of the AlreadyExistsException and the NotFoundException, when creating or deleting databases
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Unit tests for pyes. These require an es server with thrift plugin running on the default port (localhost:9500).
"""
import unittest
from pyes.tests import ESTestCase
import pyes.exceptions
class ErrorReportingTestCase(ESTestCase):
def setUp(self):
super(ErrorReportingTestCase, self).setUp()
def testCreateDelete(self):
"""
Test errors thrown when creating or deleting.
"""
try:
self.conn.delete_index("test-index")
except pyes.exceptions.NotFoundException:
pass
self.conn.create_index("test-index")
self.assertRaises(pyes.exceptions.AlreadyExistsException, self.conn.create_index, "test-index")
self.conn.delete_index("test-index")
self.assertRaises(pyes.exceptions.NotFoundException, self.conn.delete_index, "test-index")
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add test of the AlreadyExistsException and the NotFoundException, when creating or deleting databases<commit_after>
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Unit tests for pyes. These require an es server with thrift plugin running on the default port (localhost:9500).
"""
import unittest
from pyes.tests import ESTestCase
import pyes.exceptions
class ErrorReportingTestCase(ESTestCase):
def setUp(self):
super(ErrorReportingTestCase, self).setUp()
def testCreateDelete(self):
"""
Test errors thrown when creating or deleting.
"""
try:
self.conn.delete_index("test-index")
except pyes.exceptions.NotFoundException:
pass
self.conn.create_index("test-index")
self.assertRaises(pyes.exceptions.AlreadyExistsException, self.conn.create_index, "test-index")
self.conn.delete_index("test-index")
self.assertRaises(pyes.exceptions.NotFoundException, self.conn.delete_index, "test-index")
if __name__ == "__main__":
unittest.main()
|
Add test of the AlreadyExistsException and the NotFoundException, when creating or deleting databases#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Unit tests for pyes. These require an es server with thrift plugin running on the default port (localhost:9500).
"""
import unittest
from pyes.tests import ESTestCase
import pyes.exceptions
class ErrorReportingTestCase(ESTestCase):
def setUp(self):
super(ErrorReportingTestCase, self).setUp()
def testCreateDelete(self):
"""
Test errors thrown when creating or deleting.
"""
try:
self.conn.delete_index("test-index")
except pyes.exceptions.NotFoundException:
pass
self.conn.create_index("test-index")
self.assertRaises(pyes.exceptions.AlreadyExistsException, self.conn.create_index, "test-index")
self.conn.delete_index("test-index")
self.assertRaises(pyes.exceptions.NotFoundException, self.conn.delete_index, "test-index")
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add test of the AlreadyExistsException and the NotFoundException, when creating or deleting databases<commit_after>#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Unit tests for pyes. These require an es server with thrift plugin running on the default port (localhost:9500).
"""
import unittest
from pyes.tests import ESTestCase
import pyes.exceptions
class ErrorReportingTestCase(ESTestCase):
def setUp(self):
super(ErrorReportingTestCase, self).setUp()
def testCreateDelete(self):
"""
Test errors thrown when creating or deleting.
"""
try:
self.conn.delete_index("test-index")
except pyes.exceptions.NotFoundException:
pass
self.conn.create_index("test-index")
self.assertRaises(pyes.exceptions.AlreadyExistsException, self.conn.create_index, "test-index")
self.conn.delete_index("test-index")
self.assertRaises(pyes.exceptions.NotFoundException, self.conn.delete_index, "test-index")
if __name__ == "__main__":
unittest.main()
|
|
f8e2e57a4f31faf69137dbd986fea15255dc35cf
|
tests/test_classloader.py
|
tests/test_classloader.py
|
from jawa.cf import ClassFile
from jawa.util.classloader import ClassLoader
from jawa.transforms.simple_swap import simple_swap
from jawa.assemble import assemble
def test_load_from_class():
"""Ensure we can add ClassFile's directly to the ClassLoader."""
cl = ClassLoader()
cf = ClassFile.create('TestClass')
cl.update(cf)
assert cl.load('TestClass') is cf
def test_default_bytecode_transforms():
cl = ClassLoader(bytecode_transforms=[simple_swap])
cf = ClassFile.create('TestClass')
cl.update(cf)
test_method = cf.methods.create('test', '(V)V;', code=True)
test_method.code.max_stack = 2
test_method.code.max_locals = 0
test_method.code.assemble(assemble([
('iconst_0',),
('pop',),
('return',)
]))
# Load from the ClassLoader to bind to it.
cf = cl.load('TestClass')
# Ensure the defaults apply.
ins_iter = test_method.code.disassemble()
ins = next(ins_iter)
assert ins.mnemonic == 'bipush'
assert len(ins.operands) == 1
assert ins.operands[0].value == 0
# Ensure we can override the default.
ins_iter = test_method.code.disassemble(transforms=[])
ins = next(ins_iter)
assert ins.mnemonic == 'iconst_0'
assert len(ins.operands) == 0
|
Add a test for ClassLoader default transforms and default transform overriding.
|
Add a test for ClassLoader default transforms and default transform overriding.
|
Python
|
mit
|
TkTech/Jawa,TkTech/Jawa
|
Add a test for ClassLoader default transforms and default transform overriding.
|
from jawa.cf import ClassFile
from jawa.util.classloader import ClassLoader
from jawa.transforms.simple_swap import simple_swap
from jawa.assemble import assemble
def test_load_from_class():
"""Ensure we can add ClassFile's directly to the ClassLoader."""
cl = ClassLoader()
cf = ClassFile.create('TestClass')
cl.update(cf)
assert cl.load('TestClass') is cf
def test_default_bytecode_transforms():
cl = ClassLoader(bytecode_transforms=[simple_swap])
cf = ClassFile.create('TestClass')
cl.update(cf)
test_method = cf.methods.create('test', '(V)V;', code=True)
test_method.code.max_stack = 2
test_method.code.max_locals = 0
test_method.code.assemble(assemble([
('iconst_0',),
('pop',),
('return',)
]))
# Load from the ClassLoader to bind to it.
cf = cl.load('TestClass')
# Ensure the defaults apply.
ins_iter = test_method.code.disassemble()
ins = next(ins_iter)
assert ins.mnemonic == 'bipush'
assert len(ins.operands) == 1
assert ins.operands[0].value == 0
# Ensure we can override the default.
ins_iter = test_method.code.disassemble(transforms=[])
ins = next(ins_iter)
assert ins.mnemonic == 'iconst_0'
assert len(ins.operands) == 0
|
<commit_before><commit_msg>Add a test for ClassLoader default transforms and default transform overriding.<commit_after>
|
from jawa.cf import ClassFile
from jawa.util.classloader import ClassLoader
from jawa.transforms.simple_swap import simple_swap
from jawa.assemble import assemble
def test_load_from_class():
"""Ensure we can add ClassFile's directly to the ClassLoader."""
cl = ClassLoader()
cf = ClassFile.create('TestClass')
cl.update(cf)
assert cl.load('TestClass') is cf
def test_default_bytecode_transforms():
cl = ClassLoader(bytecode_transforms=[simple_swap])
cf = ClassFile.create('TestClass')
cl.update(cf)
test_method = cf.methods.create('test', '(V)V;', code=True)
test_method.code.max_stack = 2
test_method.code.max_locals = 0
test_method.code.assemble(assemble([
('iconst_0',),
('pop',),
('return',)
]))
# Load from the ClassLoader to bind to it.
cf = cl.load('TestClass')
# Ensure the defaults apply.
ins_iter = test_method.code.disassemble()
ins = next(ins_iter)
assert ins.mnemonic == 'bipush'
assert len(ins.operands) == 1
assert ins.operands[0].value == 0
# Ensure we can override the default.
ins_iter = test_method.code.disassemble(transforms=[])
ins = next(ins_iter)
assert ins.mnemonic == 'iconst_0'
assert len(ins.operands) == 0
|
Add a test for ClassLoader default transforms and default transform overriding.from jawa.cf import ClassFile
from jawa.util.classloader import ClassLoader
from jawa.transforms.simple_swap import simple_swap
from jawa.assemble import assemble
def test_load_from_class():
"""Ensure we can add ClassFile's directly to the ClassLoader."""
cl = ClassLoader()
cf = ClassFile.create('TestClass')
cl.update(cf)
assert cl.load('TestClass') is cf
def test_default_bytecode_transforms():
cl = ClassLoader(bytecode_transforms=[simple_swap])
cf = ClassFile.create('TestClass')
cl.update(cf)
test_method = cf.methods.create('test', '(V)V;', code=True)
test_method.code.max_stack = 2
test_method.code.max_locals = 0
test_method.code.assemble(assemble([
('iconst_0',),
('pop',),
('return',)
]))
# Load from the ClassLoader to bind to it.
cf = cl.load('TestClass')
# Ensure the defaults apply.
ins_iter = test_method.code.disassemble()
ins = next(ins_iter)
assert ins.mnemonic == 'bipush'
assert len(ins.operands) == 1
assert ins.operands[0].value == 0
# Ensure we can override the default.
ins_iter = test_method.code.disassemble(transforms=[])
ins = next(ins_iter)
assert ins.mnemonic == 'iconst_0'
assert len(ins.operands) == 0
|
<commit_before><commit_msg>Add a test for ClassLoader default transforms and default transform overriding.<commit_after>from jawa.cf import ClassFile
from jawa.util.classloader import ClassLoader
from jawa.transforms.simple_swap import simple_swap
from jawa.assemble import assemble
def test_load_from_class():
"""Ensure we can add ClassFile's directly to the ClassLoader."""
cl = ClassLoader()
cf = ClassFile.create('TestClass')
cl.update(cf)
assert cl.load('TestClass') is cf
def test_default_bytecode_transforms():
cl = ClassLoader(bytecode_transforms=[simple_swap])
cf = ClassFile.create('TestClass')
cl.update(cf)
test_method = cf.methods.create('test', '(V)V;', code=True)
test_method.code.max_stack = 2
test_method.code.max_locals = 0
test_method.code.assemble(assemble([
('iconst_0',),
('pop',),
('return',)
]))
# Load from the ClassLoader to bind to it.
cf = cl.load('TestClass')
# Ensure the defaults apply.
ins_iter = test_method.code.disassemble()
ins = next(ins_iter)
assert ins.mnemonic == 'bipush'
assert len(ins.operands) == 1
assert ins.operands[0].value == 0
# Ensure we can override the default.
ins_iter = test_method.code.disassemble(transforms=[])
ins = next(ins_iter)
assert ins.mnemonic == 'iconst_0'
assert len(ins.operands) == 0
|
|
57e3b5f4224a8cf72b8378d2198d5b627a955b67
|
website/tests/test_csp.py
|
website/tests/test_csp.py
|
# -*- coding: utf-8 -*-
##
# Copyright (C) 2015 Jessica Tallon & Matt Molyneaux
#
# This file is part of Inboxen.
#
# Inboxen is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inboxen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Inboxen. If not, see <http://www.gnu.org/licenses/>.
##
from django import test
from django.core.urlresolvers import reverse
class CSPTestCase(test.TestCase):
def test_csp_report_view(self):
url = reverse("csp_logger")
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
|
Test CSP logging view returns a 200
|
Test CSP logging view returns a 200
Touch #81
|
Python
|
agpl-3.0
|
Inboxen/Inboxen,Inboxen/Inboxen,Inboxen/Inboxen,Inboxen/Inboxen
|
Test CSP logging view returns a 200
Touch #81
|
# -*- coding: utf-8 -*-
##
# Copyright (C) 2015 Jessica Tallon & Matt Molyneaux
#
# This file is part of Inboxen.
#
# Inboxen is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inboxen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Inboxen. If not, see <http://www.gnu.org/licenses/>.
##
from django import test
from django.core.urlresolvers import reverse
class CSPTestCase(test.TestCase):
def test_csp_report_view(self):
url = reverse("csp_logger")
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
|
<commit_before><commit_msg>Test CSP logging view returns a 200
Touch #81<commit_after>
|
# -*- coding: utf-8 -*-
##
# Copyright (C) 2015 Jessica Tallon & Matt Molyneaux
#
# This file is part of Inboxen.
#
# Inboxen is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inboxen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Inboxen. If not, see <http://www.gnu.org/licenses/>.
##
from django import test
from django.core.urlresolvers import reverse
class CSPTestCase(test.TestCase):
def test_csp_report_view(self):
url = reverse("csp_logger")
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
|
Test CSP logging view returns a 200
Touch #81# -*- coding: utf-8 -*-
##
# Copyright (C) 2015 Jessica Tallon & Matt Molyneaux
#
# This file is part of Inboxen.
#
# Inboxen is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inboxen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Inboxen. If not, see <http://www.gnu.org/licenses/>.
##
from django import test
from django.core.urlresolvers import reverse
class CSPTestCase(test.TestCase):
def test_csp_report_view(self):
url = reverse("csp_logger")
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
|
<commit_before><commit_msg>Test CSP logging view returns a 200
Touch #81<commit_after># -*- coding: utf-8 -*-
##
# Copyright (C) 2015 Jessica Tallon & Matt Molyneaux
#
# This file is part of Inboxen.
#
# Inboxen is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Inboxen is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Inboxen. If not, see <http://www.gnu.org/licenses/>.
##
from django import test
from django.core.urlresolvers import reverse
class CSPTestCase(test.TestCase):
def test_csp_report_view(self):
url = reverse("csp_logger")
response = self.client.post(url)
self.assertEqual(response.status_code, 200)
|
|
45b65a851f258f808143e674bd904599adb7a468
|
ctypeslib/test/test_toolchain.py
|
ctypeslib/test/test_toolchain.py
|
import unittest
import sys
from ctypeslib import h2xml, xml2py
class ToolchainTest(unittest.TestCase):
if sys.platform == "win32":
def test(self):
h2xml.main(["h2xml", "-q",
"-D WIN32_LEAN_AND_MEAN",
"-D _UNICODE", "-D UNICODE",
"-c", "windows.h",
"-o", "_windows_gen.xml"])
xml2py.main(["xml2py", "_windows_gen.xml", "-w", "-o", "_winapi_gen.py"])
import _winapi_gen
if __name__ == "__main__":
import unittest
unittest.main()
|
Test the complete h2xml and xml2py toolchain on Windows by running it over 'windows.h'.
|
Test the complete h2xml and xml2py toolchain on Windows by running it
over 'windows.h'.
|
Python
|
mit
|
sugarmanz/ctypeslib
|
Test the complete h2xml and xml2py toolchain on Windows by running it
over 'windows.h'.
|
import unittest
import sys
from ctypeslib import h2xml, xml2py
class ToolchainTest(unittest.TestCase):
if sys.platform == "win32":
def test(self):
h2xml.main(["h2xml", "-q",
"-D WIN32_LEAN_AND_MEAN",
"-D _UNICODE", "-D UNICODE",
"-c", "windows.h",
"-o", "_windows_gen.xml"])
xml2py.main(["xml2py", "_windows_gen.xml", "-w", "-o", "_winapi_gen.py"])
import _winapi_gen
if __name__ == "__main__":
import unittest
unittest.main()
|
<commit_before><commit_msg>Test the complete h2xml and xml2py toolchain on Windows by running it
over 'windows.h'.<commit_after>
|
import unittest
import sys
from ctypeslib import h2xml, xml2py
class ToolchainTest(unittest.TestCase):
if sys.platform == "win32":
def test(self):
h2xml.main(["h2xml", "-q",
"-D WIN32_LEAN_AND_MEAN",
"-D _UNICODE", "-D UNICODE",
"-c", "windows.h",
"-o", "_windows_gen.xml"])
xml2py.main(["xml2py", "_windows_gen.xml", "-w", "-o", "_winapi_gen.py"])
import _winapi_gen
if __name__ == "__main__":
import unittest
unittest.main()
|
Test the complete h2xml and xml2py toolchain on Windows by running it
over 'windows.h'.import unittest
import sys
from ctypeslib import h2xml, xml2py
class ToolchainTest(unittest.TestCase):
if sys.platform == "win32":
def test(self):
h2xml.main(["h2xml", "-q",
"-D WIN32_LEAN_AND_MEAN",
"-D _UNICODE", "-D UNICODE",
"-c", "windows.h",
"-o", "_windows_gen.xml"])
xml2py.main(["xml2py", "_windows_gen.xml", "-w", "-o", "_winapi_gen.py"])
import _winapi_gen
if __name__ == "__main__":
import unittest
unittest.main()
|
<commit_before><commit_msg>Test the complete h2xml and xml2py toolchain on Windows by running it
over 'windows.h'.<commit_after>import unittest
import sys
from ctypeslib import h2xml, xml2py
class ToolchainTest(unittest.TestCase):
if sys.platform == "win32":
def test(self):
h2xml.main(["h2xml", "-q",
"-D WIN32_LEAN_AND_MEAN",
"-D _UNICODE", "-D UNICODE",
"-c", "windows.h",
"-o", "_windows_gen.xml"])
xml2py.main(["xml2py", "_windows_gen.xml", "-w", "-o", "_winapi_gen.py"])
import _winapi_gen
if __name__ == "__main__":
import unittest
unittest.main()
|
|
03329897f8730702eee03114eeb4b529c5067b53
|
crmapp/urls.py
|
crmapp/urls.py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from marketing.views import HomePage
urlpatterns = patterns('',
# Marketing pages
url(r'^$', HomePage.as_view(), name="home"),
# Subscriber related URLs
url(r'^signup/$',
'crmapp.subscribers.views.subscriber_new', name='sub_new'
),
# Admin URL
(r'^admin/', include(admin.site.urls)),
# Login/Logout URLs
(r'^login/$',
'django.contrib.auth.views.login', {'template_name': 'login.html'}
),
(r'^logout/$',
'django.contrib.auth.views.logout', {'next_page': '/login/'}
),
# Account related URLs
# Contact related URLS
# Communication related URLs
)
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from marketing.views import HomePage
from accounts.views import AccountList
urlpatterns = patterns('',
# Marketing pages
url(r'^$', HomePage.as_view(), name="home"),
# Subscriber related URLs
url(r'^signup/$',
'crmapp.subscribers.views.subscriber_new', name='sub_new'
),
# Admin URL
(r'^admin/', include(admin.site.urls)),
# Login/Logout URLs
(r'^login/$',
'django.contrib.auth.views.login', {'template_name': 'login.html'}
),
(r'^logout/$',
'django.contrib.auth.views.logout', {'next_page': '/login/'}
),
# Account related URLs
url(r'^account/list/$',
AccountList.as_view(), name='account_list'
),
# Contact related URLS
# Communication related URLs
)
|
Create the Account List > List Accounts - Create URL
|
Create the Account List > List Accounts - Create URL
|
Python
|
mit
|
deenaariff/Django,tabdon/crmeasyapp,tabdon/crmeasyapp
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from marketing.views import HomePage
urlpatterns = patterns('',
# Marketing pages
url(r'^$', HomePage.as_view(), name="home"),
# Subscriber related URLs
url(r'^signup/$',
'crmapp.subscribers.views.subscriber_new', name='sub_new'
),
# Admin URL
(r'^admin/', include(admin.site.urls)),
# Login/Logout URLs
(r'^login/$',
'django.contrib.auth.views.login', {'template_name': 'login.html'}
),
(r'^logout/$',
'django.contrib.auth.views.logout', {'next_page': '/login/'}
),
# Account related URLs
# Contact related URLS
# Communication related URLs
)Create the Account List > List Accounts - Create URL
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from marketing.views import HomePage
from accounts.views import AccountList
urlpatterns = patterns('',
# Marketing pages
url(r'^$', HomePage.as_view(), name="home"),
# Subscriber related URLs
url(r'^signup/$',
'crmapp.subscribers.views.subscriber_new', name='sub_new'
),
# Admin URL
(r'^admin/', include(admin.site.urls)),
# Login/Logout URLs
(r'^login/$',
'django.contrib.auth.views.login', {'template_name': 'login.html'}
),
(r'^logout/$',
'django.contrib.auth.views.logout', {'next_page': '/login/'}
),
# Account related URLs
url(r'^account/list/$',
AccountList.as_view(), name='account_list'
),
# Contact related URLS
# Communication related URLs
)
|
<commit_before>from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from marketing.views import HomePage
urlpatterns = patterns('',
# Marketing pages
url(r'^$', HomePage.as_view(), name="home"),
# Subscriber related URLs
url(r'^signup/$',
'crmapp.subscribers.views.subscriber_new', name='sub_new'
),
# Admin URL
(r'^admin/', include(admin.site.urls)),
# Login/Logout URLs
(r'^login/$',
'django.contrib.auth.views.login', {'template_name': 'login.html'}
),
(r'^logout/$',
'django.contrib.auth.views.logout', {'next_page': '/login/'}
),
# Account related URLs
# Contact related URLS
# Communication related URLs
)<commit_msg>Create the Account List > List Accounts - Create URL<commit_after>
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from marketing.views import HomePage
from accounts.views import AccountList
urlpatterns = patterns('',
# Marketing pages
url(r'^$', HomePage.as_view(), name="home"),
# Subscriber related URLs
url(r'^signup/$',
'crmapp.subscribers.views.subscriber_new', name='sub_new'
),
# Admin URL
(r'^admin/', include(admin.site.urls)),
# Login/Logout URLs
(r'^login/$',
'django.contrib.auth.views.login', {'template_name': 'login.html'}
),
(r'^logout/$',
'django.contrib.auth.views.logout', {'next_page': '/login/'}
),
# Account related URLs
url(r'^account/list/$',
AccountList.as_view(), name='account_list'
),
# Contact related URLS
# Communication related URLs
)
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from marketing.views import HomePage
urlpatterns = patterns('',
# Marketing pages
url(r'^$', HomePage.as_view(), name="home"),
# Subscriber related URLs
url(r'^signup/$',
'crmapp.subscribers.views.subscriber_new', name='sub_new'
),
# Admin URL
(r'^admin/', include(admin.site.urls)),
# Login/Logout URLs
(r'^login/$',
'django.contrib.auth.views.login', {'template_name': 'login.html'}
),
(r'^logout/$',
'django.contrib.auth.views.logout', {'next_page': '/login/'}
),
# Account related URLs
# Contact related URLS
# Communication related URLs
)Create the Account List > List Accounts - Create URLfrom django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from marketing.views import HomePage
from accounts.views import AccountList
urlpatterns = patterns('',
# Marketing pages
url(r'^$', HomePage.as_view(), name="home"),
# Subscriber related URLs
url(r'^signup/$',
'crmapp.subscribers.views.subscriber_new', name='sub_new'
),
# Admin URL
(r'^admin/', include(admin.site.urls)),
# Login/Logout URLs
(r'^login/$',
'django.contrib.auth.views.login', {'template_name': 'login.html'}
),
(r'^logout/$',
'django.contrib.auth.views.logout', {'next_page': '/login/'}
),
# Account related URLs
url(r'^account/list/$',
AccountList.as_view(), name='account_list'
),
# Contact related URLS
# Communication related URLs
)
|
<commit_before>from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from marketing.views import HomePage
urlpatterns = patterns('',
# Marketing pages
url(r'^$', HomePage.as_view(), name="home"),
# Subscriber related URLs
url(r'^signup/$',
'crmapp.subscribers.views.subscriber_new', name='sub_new'
),
# Admin URL
(r'^admin/', include(admin.site.urls)),
# Login/Logout URLs
(r'^login/$',
'django.contrib.auth.views.login', {'template_name': 'login.html'}
),
(r'^logout/$',
'django.contrib.auth.views.logout', {'next_page': '/login/'}
),
# Account related URLs
# Contact related URLS
# Communication related URLs
)<commit_msg>Create the Account List > List Accounts - Create URL<commit_after>from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from marketing.views import HomePage
from accounts.views import AccountList
urlpatterns = patterns('',
# Marketing pages
url(r'^$', HomePage.as_view(), name="home"),
# Subscriber related URLs
url(r'^signup/$',
'crmapp.subscribers.views.subscriber_new', name='sub_new'
),
# Admin URL
(r'^admin/', include(admin.site.urls)),
# Login/Logout URLs
(r'^login/$',
'django.contrib.auth.views.login', {'template_name': 'login.html'}
),
(r'^logout/$',
'django.contrib.auth.views.logout', {'next_page': '/login/'}
),
# Account related URLs
url(r'^account/list/$',
AccountList.as_view(), name='account_list'
),
# Contact related URLS
# Communication related URLs
)
|
c3e08edd09b860e3ffbc2ae56df730f794fb17e5
|
bin/oneoffs/load_external_data.py
|
bin/oneoffs/load_external_data.py
|
#!/usr/bin/env python
import bson
import copy
import datetime
import dateutil.parser
import json
from api import config
## DEFAULTS ##
USER_ID = "meganhenning@flywheel.io"
SAFE_FILE_HASH = "v0-sha384-a8d0d1bd9368e5385f31d3582db07f9bc257537d5e1f207d36a91fdd3d2f188fff56616c0874bb3535c37fdf761a446c"
PROJECT_ID = "5a26e049c6fa4a00161e4a1a"
GROUP_ID = 'scitran'
# Some day maybe this can use the SDK/API calls to get the proper test data
# For now, paste it in
SESSIONS = []
ACQUISITIONS = []
def handle_permissions(obj):
obj['permissions'] = [{
"access": "admin",
"_id": USER_ID
}]
def handle_dates(obj):
if obj.get('timestamp'):
obj['timestamp'] = dateutil.parser.parse(obj['timestamp'])
if obj.get('created'):
obj['created'] = dateutil.parser.parse(obj['created'])
if obj.get('modified'):
obj['modified'] = dateutil.parser.parse(obj['modified'])
def handle_file(f):
handle_dates(f)
f.pop('info_exists', None)
f.pop('join_origin', None)
f['hash'] = SAFE_FILE_HASH
for i, s in enumerate(SESSIONS):
print "Processing session {} of {} sessions".format(i+1, len(SESSIONS))
s.pop('join-origin', None)
s['_id'] = bson.ObjectId(s['_id'])
s['project'] = bson.ObjectId(str(PROJECT_ID))
s['group'] = GROUP_ID
handle_dates(s)
handle_permissions(s)
for f in s.get('files', []):
handle_file(f)
config.db.sessions.delete_many({'_id': s['_id']})
config.db.sessions.insert(s)
for i, a in enumerate(ACQUISITIONS):
print "Processing acquisition {} of {} acquisitions".format(i+1, len(ACQUISITIONS))
a['_id'] = bson.ObjectId(a['_id'])
a['session'] = bson.ObjectId(a['session'])
a.pop('join-origin', None)
handle_dates(a)
handle_permissions(a)
for f in a.get('files', []):
handle_file(f)
config.db.acquisitions.delete_many({'_id': a['_id']})
config.db.acquisitions.insert(a)
|
Add script for loading external data for testing
|
Add script for loading external data for testing
|
Python
|
mit
|
scitran/api,scitran/core,scitran/core,scitran/core,scitran/core,scitran/api
|
Add script for loading external data for testing
|
#!/usr/bin/env python
import bson
import copy
import datetime
import dateutil.parser
import json
from api import config
## DEFAULTS ##
USER_ID = "meganhenning@flywheel.io"
SAFE_FILE_HASH = "v0-sha384-a8d0d1bd9368e5385f31d3582db07f9bc257537d5e1f207d36a91fdd3d2f188fff56616c0874bb3535c37fdf761a446c"
PROJECT_ID = "5a26e049c6fa4a00161e4a1a"
GROUP_ID = 'scitran'
# Some day maybe this can use the SDK/API calls to get the proper test data
# For now, paste it in
SESSIONS = []
ACQUISITIONS = []
def handle_permissions(obj):
obj['permissions'] = [{
"access": "admin",
"_id": USER_ID
}]
def handle_dates(obj):
if obj.get('timestamp'):
obj['timestamp'] = dateutil.parser.parse(obj['timestamp'])
if obj.get('created'):
obj['created'] = dateutil.parser.parse(obj['created'])
if obj.get('modified'):
obj['modified'] = dateutil.parser.parse(obj['modified'])
def handle_file(f):
handle_dates(f)
f.pop('info_exists', None)
f.pop('join_origin', None)
f['hash'] = SAFE_FILE_HASH
for i, s in enumerate(SESSIONS):
print "Processing session {} of {} sessions".format(i+1, len(SESSIONS))
s.pop('join-origin', None)
s['_id'] = bson.ObjectId(s['_id'])
s['project'] = bson.ObjectId(str(PROJECT_ID))
s['group'] = GROUP_ID
handle_dates(s)
handle_permissions(s)
for f in s.get('files', []):
handle_file(f)
config.db.sessions.delete_many({'_id': s['_id']})
config.db.sessions.insert(s)
for i, a in enumerate(ACQUISITIONS):
print "Processing acquisition {} of {} acquisitions".format(i+1, len(ACQUISITIONS))
a['_id'] = bson.ObjectId(a['_id'])
a['session'] = bson.ObjectId(a['session'])
a.pop('join-origin', None)
handle_dates(a)
handle_permissions(a)
for f in a.get('files', []):
handle_file(f)
config.db.acquisitions.delete_many({'_id': a['_id']})
config.db.acquisitions.insert(a)
|
<commit_before><commit_msg>Add script for loading external data for testing<commit_after>
|
#!/usr/bin/env python
import bson
import copy
import datetime
import dateutil.parser
import json
from api import config
## DEFAULTS ##
USER_ID = "meganhenning@flywheel.io"
SAFE_FILE_HASH = "v0-sha384-a8d0d1bd9368e5385f31d3582db07f9bc257537d5e1f207d36a91fdd3d2f188fff56616c0874bb3535c37fdf761a446c"
PROJECT_ID = "5a26e049c6fa4a00161e4a1a"
GROUP_ID = 'scitran'
# Some day maybe this can use the SDK/API calls to get the proper test data
# For now, paste it in
SESSIONS = []
ACQUISITIONS = []
def handle_permissions(obj):
obj['permissions'] = [{
"access": "admin",
"_id": USER_ID
}]
def handle_dates(obj):
if obj.get('timestamp'):
obj['timestamp'] = dateutil.parser.parse(obj['timestamp'])
if obj.get('created'):
obj['created'] = dateutil.parser.parse(obj['created'])
if obj.get('modified'):
obj['modified'] = dateutil.parser.parse(obj['modified'])
def handle_file(f):
handle_dates(f)
f.pop('info_exists', None)
f.pop('join_origin', None)
f['hash'] = SAFE_FILE_HASH
for i, s in enumerate(SESSIONS):
print "Processing session {} of {} sessions".format(i+1, len(SESSIONS))
s.pop('join-origin', None)
s['_id'] = bson.ObjectId(s['_id'])
s['project'] = bson.ObjectId(str(PROJECT_ID))
s['group'] = GROUP_ID
handle_dates(s)
handle_permissions(s)
for f in s.get('files', []):
handle_file(f)
config.db.sessions.delete_many({'_id': s['_id']})
config.db.sessions.insert(s)
for i, a in enumerate(ACQUISITIONS):
print "Processing acquisition {} of {} acquisitions".format(i+1, len(ACQUISITIONS))
a['_id'] = bson.ObjectId(a['_id'])
a['session'] = bson.ObjectId(a['session'])
a.pop('join-origin', None)
handle_dates(a)
handle_permissions(a)
for f in a.get('files', []):
handle_file(f)
config.db.acquisitions.delete_many({'_id': a['_id']})
config.db.acquisitions.insert(a)
|
Add script for loading external data for testing#!/usr/bin/env python
import bson
import copy
import datetime
import dateutil.parser
import json
from api import config
## DEFAULTS ##
USER_ID = "meganhenning@flywheel.io"
SAFE_FILE_HASH = "v0-sha384-a8d0d1bd9368e5385f31d3582db07f9bc257537d5e1f207d36a91fdd3d2f188fff56616c0874bb3535c37fdf761a446c"
PROJECT_ID = "5a26e049c6fa4a00161e4a1a"
GROUP_ID = 'scitran'
# Some day maybe this can use the SDK/API calls to get the proper test data
# For now, paste it in
SESSIONS = []
ACQUISITIONS = []
def handle_permissions(obj):
obj['permissions'] = [{
"access": "admin",
"_id": USER_ID
}]
def handle_dates(obj):
if obj.get('timestamp'):
obj['timestamp'] = dateutil.parser.parse(obj['timestamp'])
if obj.get('created'):
obj['created'] = dateutil.parser.parse(obj['created'])
if obj.get('modified'):
obj['modified'] = dateutil.parser.parse(obj['modified'])
def handle_file(f):
handle_dates(f)
f.pop('info_exists', None)
f.pop('join_origin', None)
f['hash'] = SAFE_FILE_HASH
for i, s in enumerate(SESSIONS):
print "Processing session {} of {} sessions".format(i+1, len(SESSIONS))
s.pop('join-origin', None)
s['_id'] = bson.ObjectId(s['_id'])
s['project'] = bson.ObjectId(str(PROJECT_ID))
s['group'] = GROUP_ID
handle_dates(s)
handle_permissions(s)
for f in s.get('files', []):
handle_file(f)
config.db.sessions.delete_many({'_id': s['_id']})
config.db.sessions.insert(s)
for i, a in enumerate(ACQUISITIONS):
print "Processing acquisition {} of {} acquisitions".format(i+1, len(ACQUISITIONS))
a['_id'] = bson.ObjectId(a['_id'])
a['session'] = bson.ObjectId(a['session'])
a.pop('join-origin', None)
handle_dates(a)
handle_permissions(a)
for f in a.get('files', []):
handle_file(f)
config.db.acquisitions.delete_many({'_id': a['_id']})
config.db.acquisitions.insert(a)
|
<commit_before><commit_msg>Add script for loading external data for testing<commit_after>#!/usr/bin/env python
import bson
import copy
import datetime
import dateutil.parser
import json
from api import config
## DEFAULTS ##
USER_ID = "meganhenning@flywheel.io"
SAFE_FILE_HASH = "v0-sha384-a8d0d1bd9368e5385f31d3582db07f9bc257537d5e1f207d36a91fdd3d2f188fff56616c0874bb3535c37fdf761a446c"
PROJECT_ID = "5a26e049c6fa4a00161e4a1a"
GROUP_ID = 'scitran'
# Some day maybe this can use the SDK/API calls to get the proper test data
# For now, paste it in
SESSIONS = []
ACQUISITIONS = []
def handle_permissions(obj):
obj['permissions'] = [{
"access": "admin",
"_id": USER_ID
}]
def handle_dates(obj):
if obj.get('timestamp'):
obj['timestamp'] = dateutil.parser.parse(obj['timestamp'])
if obj.get('created'):
obj['created'] = dateutil.parser.parse(obj['created'])
if obj.get('modified'):
obj['modified'] = dateutil.parser.parse(obj['modified'])
def handle_file(f):
handle_dates(f)
f.pop('info_exists', None)
f.pop('join_origin', None)
f['hash'] = SAFE_FILE_HASH
for i, s in enumerate(SESSIONS):
print "Processing session {} of {} sessions".format(i+1, len(SESSIONS))
s.pop('join-origin', None)
s['_id'] = bson.ObjectId(s['_id'])
s['project'] = bson.ObjectId(str(PROJECT_ID))
s['group'] = GROUP_ID
handle_dates(s)
handle_permissions(s)
for f in s.get('files', []):
handle_file(f)
config.db.sessions.delete_many({'_id': s['_id']})
config.db.sessions.insert(s)
for i, a in enumerate(ACQUISITIONS):
print "Processing acquisition {} of {} acquisitions".format(i+1, len(ACQUISITIONS))
a['_id'] = bson.ObjectId(a['_id'])
a['session'] = bson.ObjectId(a['session'])
a.pop('join-origin', None)
handle_dates(a)
handle_permissions(a)
for f in a.get('files', []):
handle_file(f)
config.db.acquisitions.delete_many({'_id': a['_id']})
config.db.acquisitions.insert(a)
|
|
f8a99aadc362c1b5a83fd62ff5829b6d92aba3cd
|
funbox/strings.py
|
funbox/strings.py
|
#! /usr/bin/env python
"""Tools for strings.
"""
def join(sep):
"""join(sep)(iterable) Join strings in iterable with sep.
str -> [str] -> str
>>> comma_separate = join(', ')
>>> comma_separate(['a', 'b', 'c', 'd'])
'a, b, c, d'
"""
def join_sep(iterable):
return sep.join(iterable)
return join_sep
if __name__ == "__main__":
import doctest
doctest.testmod()
|
Add curried join from my old functoolsx module.
|
Add curried join from my old functoolsx module.
|
Python
|
mit
|
nmbooker/python-funbox,nmbooker/python-funbox
|
Add curried join from my old functoolsx module.
|
#! /usr/bin/env python
"""Tools for strings.
"""
def join(sep):
"""join(sep)(iterable) Join strings in iterable with sep.
str -> [str] -> str
>>> comma_separate = join(', ')
>>> comma_separate(['a', 'b', 'c', 'd'])
'a, b, c, d'
"""
def join_sep(iterable):
return sep.join(iterable)
return join_sep
if __name__ == "__main__":
import doctest
doctest.testmod()
|
<commit_before><commit_msg>Add curried join from my old functoolsx module.<commit_after>
|
#! /usr/bin/env python
"""Tools for strings.
"""
def join(sep):
"""join(sep)(iterable) Join strings in iterable with sep.
str -> [str] -> str
>>> comma_separate = join(', ')
>>> comma_separate(['a', 'b', 'c', 'd'])
'a, b, c, d'
"""
def join_sep(iterable):
return sep.join(iterable)
return join_sep
if __name__ == "__main__":
import doctest
doctest.testmod()
|
Add curried join from my old functoolsx module.#! /usr/bin/env python
"""Tools for strings.
"""
def join(sep):
"""join(sep)(iterable) Join strings in iterable with sep.
str -> [str] -> str
>>> comma_separate = join(', ')
>>> comma_separate(['a', 'b', 'c', 'd'])
'a, b, c, d'
"""
def join_sep(iterable):
return sep.join(iterable)
return join_sep
if __name__ == "__main__":
import doctest
doctest.testmod()
|
<commit_before><commit_msg>Add curried join from my old functoolsx module.<commit_after>#! /usr/bin/env python
"""Tools for strings.
"""
def join(sep):
"""join(sep)(iterable) Join strings in iterable with sep.
str -> [str] -> str
>>> comma_separate = join(', ')
>>> comma_separate(['a', 'b', 'c', 'd'])
'a, b, c, d'
"""
def join_sep(iterable):
return sep.join(iterable)
return join_sep
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
cde10eba16094920d074079bbf0fe779a42c8bf4
|
interface/net/udp/nitroshare.py
|
interface/net/udp/nitroshare.py
|
#!/usr/bin/env python
"""
Attempt to comunicate with Nitroshare.
[x] listen nitroshare broadcasts and show who is available
[x] listen for UDP packets on port 40816
"""
# --- communicating.. networking.. ---
import socket
IP4 = socket.AF_INET
UDP = socket.SOCK_DGRAM
class UDPSocketStream(object):
""" Convert network socket endpoint to a readable stream object """
def __init__(self, host='0.0.0.0', port=40816):
# reading from socket blocks keyboard input, so CtrlC/CtrlBreak
# may not work until read operation completes
sock = socket.socket(IP4, UDP)
# on socket level, allow multiple programs listen in the same address
#sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
self.sock = sock
def read(self, size):
return self.sock.recvfrom(size)[0]
def close(self):
try:
self.sock.shutdown(socket.SHUT_RDWR)
except:
pass
self.sock.close()
# --- /networking ---
def nitrolisten():
import json
s = UDPSocketStream()
while True:
data, remote = s.sock.recvfrom(1024)
#print("Got packet!")
#print data
# msg = {u'uuid': u'{eb8d3a1e-c50f-459d-9be7-6a70b91ca6bd}', u'operating_system': u'linux', u'name': u'XONiTE', u'port': u'40818'}
msg = json.loads(data)
print("Got response from %s (%s) on %s:%s " % (msg[u'name'], msg[u'operating_system'].capitalize(), remote[0], msg[u'port']))
if __name__ == '__main__':
nitrolisten()
|
Add first draft of Nitroshare UDP discovery
|
Add first draft of Nitroshare UDP discovery
|
Python
|
unlicense
|
techtonik/discovery,techtonik/discovery,techtonik/discovery
|
Add first draft of Nitroshare UDP discovery
|
#!/usr/bin/env python
"""
Attempt to comunicate with Nitroshare.
[x] listen nitroshare broadcasts and show who is available
[x] listen for UDP packets on port 40816
"""
# --- communicating.. networking.. ---
import socket
IP4 = socket.AF_INET
UDP = socket.SOCK_DGRAM
class UDPSocketStream(object):
""" Convert network socket endpoint to a readable stream object """
def __init__(self, host='0.0.0.0', port=40816):
# reading from socket blocks keyboard input, so CtrlC/CtrlBreak
# may not work until read operation completes
sock = socket.socket(IP4, UDP)
# on socket level, allow multiple programs listen in the same address
#sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
self.sock = sock
def read(self, size):
return self.sock.recvfrom(size)[0]
def close(self):
try:
self.sock.shutdown(socket.SHUT_RDWR)
except:
pass
self.sock.close()
# --- /networking ---
def nitrolisten():
import json
s = UDPSocketStream()
while True:
data, remote = s.sock.recvfrom(1024)
#print("Got packet!")
#print data
# msg = {u'uuid': u'{eb8d3a1e-c50f-459d-9be7-6a70b91ca6bd}', u'operating_system': u'linux', u'name': u'XONiTE', u'port': u'40818'}
msg = json.loads(data)
print("Got response from %s (%s) on %s:%s " % (msg[u'name'], msg[u'operating_system'].capitalize(), remote[0], msg[u'port']))
if __name__ == '__main__':
nitrolisten()
|
<commit_before><commit_msg>Add first draft of Nitroshare UDP discovery<commit_after>
|
#!/usr/bin/env python
"""
Attempt to comunicate with Nitroshare.
[x] listen nitroshare broadcasts and show who is available
[x] listen for UDP packets on port 40816
"""
# --- communicating.. networking.. ---
import socket
IP4 = socket.AF_INET
UDP = socket.SOCK_DGRAM
class UDPSocketStream(object):
""" Convert network socket endpoint to a readable stream object """
def __init__(self, host='0.0.0.0', port=40816):
# reading from socket blocks keyboard input, so CtrlC/CtrlBreak
# may not work until read operation completes
sock = socket.socket(IP4, UDP)
# on socket level, allow multiple programs listen in the same address
#sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
self.sock = sock
def read(self, size):
return self.sock.recvfrom(size)[0]
def close(self):
try:
self.sock.shutdown(socket.SHUT_RDWR)
except:
pass
self.sock.close()
# --- /networking ---
def nitrolisten():
import json
s = UDPSocketStream()
while True:
data, remote = s.sock.recvfrom(1024)
#print("Got packet!")
#print data
# msg = {u'uuid': u'{eb8d3a1e-c50f-459d-9be7-6a70b91ca6bd}', u'operating_system': u'linux', u'name': u'XONiTE', u'port': u'40818'}
msg = json.loads(data)
print("Got response from %s (%s) on %s:%s " % (msg[u'name'], msg[u'operating_system'].capitalize(), remote[0], msg[u'port']))
if __name__ == '__main__':
nitrolisten()
|
Add first draft of Nitroshare UDP discovery#!/usr/bin/env python
"""
Attempt to comunicate with Nitroshare.
[x] listen nitroshare broadcasts and show who is available
[x] listen for UDP packets on port 40816
"""
# --- communicating.. networking.. ---
import socket
IP4 = socket.AF_INET
UDP = socket.SOCK_DGRAM
class UDPSocketStream(object):
""" Convert network socket endpoint to a readable stream object """
def __init__(self, host='0.0.0.0', port=40816):
# reading from socket blocks keyboard input, so CtrlC/CtrlBreak
# may not work until read operation completes
sock = socket.socket(IP4, UDP)
# on socket level, allow multiple programs listen in the same address
#sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
self.sock = sock
def read(self, size):
return self.sock.recvfrom(size)[0]
def close(self):
try:
self.sock.shutdown(socket.SHUT_RDWR)
except:
pass
self.sock.close()
# --- /networking ---
def nitrolisten():
import json
s = UDPSocketStream()
while True:
data, remote = s.sock.recvfrom(1024)
#print("Got packet!")
#print data
# msg = {u'uuid': u'{eb8d3a1e-c50f-459d-9be7-6a70b91ca6bd}', u'operating_system': u'linux', u'name': u'XONiTE', u'port': u'40818'}
msg = json.loads(data)
print("Got response from %s (%s) on %s:%s " % (msg[u'name'], msg[u'operating_system'].capitalize(), remote[0], msg[u'port']))
if __name__ == '__main__':
nitrolisten()
|
<commit_before><commit_msg>Add first draft of Nitroshare UDP discovery<commit_after>#!/usr/bin/env python
"""
Attempt to comunicate with Nitroshare.
[x] listen nitroshare broadcasts and show who is available
[x] listen for UDP packets on port 40816
"""
# --- communicating.. networking.. ---
import socket
IP4 = socket.AF_INET
UDP = socket.SOCK_DGRAM
class UDPSocketStream(object):
""" Convert network socket endpoint to a readable stream object """
def __init__(self, host='0.0.0.0', port=40816):
# reading from socket blocks keyboard input, so CtrlC/CtrlBreak
# may not work until read operation completes
sock = socket.socket(IP4, UDP)
# on socket level, allow multiple programs listen in the same address
#sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind((host, port))
self.sock = sock
def read(self, size):
return self.sock.recvfrom(size)[0]
def close(self):
try:
self.sock.shutdown(socket.SHUT_RDWR)
except:
pass
self.sock.close()
# --- /networking ---
def nitrolisten():
import json
s = UDPSocketStream()
while True:
data, remote = s.sock.recvfrom(1024)
#print("Got packet!")
#print data
# msg = {u'uuid': u'{eb8d3a1e-c50f-459d-9be7-6a70b91ca6bd}', u'operating_system': u'linux', u'name': u'XONiTE', u'port': u'40818'}
msg = json.loads(data)
print("Got response from %s (%s) on %s:%s " % (msg[u'name'], msg[u'operating_system'].capitalize(), remote[0], msg[u'port']))
if __name__ == '__main__':
nitrolisten()
|
|
c1099e9410b8ad35e69d59e1d27f36903495cd67
|
scripts/migrate_categories.py
|
scripts/migrate_categories.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from tests.base import OsfTestCase
from tests.factories import NodeFactory
from website.project.model import Node
logger = logging.getLogger(__name__)
# legacy => new category
MIGRATE_MAP = {
'category': '',
'measure': 'methods and measures',
}
def migrate_category(node):
"""Migrate legacy, invalid category to new, valid category. Return whether
the node was changed.
"""
if node.category not in Node.CATEGORY_MAP.keys(): # invalid category
node.category = MIGRATE_MAP.get(node.category, 'other')
return True
return False
def migrate_nodes():
migrated_count = 0
for node in Node.find():
was_migrated = migrate_category(node)
if was_migrated:
node.save()
logger.info('Migrated {0}'.format(node._id))
migrated_count += 1
logger.info('Finished migrating {0} nodes.'.format(migrated_count))
class TestMigratingCategories(OsfTestCase):
def test_migrate_category(self):
node = NodeFactory(category='category')
was_migrated = migrate_category(node)
assert was_migrated is True
node.save()
assert node.category == ''
def test_migrate_measure(self):
node = NodeFactory(category='measure')
migrate_category(node)
node.save()
assert node.category == 'methods and measures'
def test_everything_else_is_migrated_to_other(self):
node1 = NodeFactory(category='background')
migrate_category(node1)
node1.save()
assert node1.category == 'other'
node2 = NodeFactory(category=u'プロジェクト')
migrate_category(node2)
node2.save()
assert node2.category == 'other'
def test_valid_categories_not_migrated(self):
node1 = NodeFactory(category='project')
node2 = NodeFactory(category='hypothesis')
was_migrated1 = migrate_category(node1)
was_migrated2 = migrate_category(node2)
node1.save()
node2.save()
assert was_migrated1 is False
assert was_migrated2 is False
assert node1.category == 'project'
assert node2.category == 'hypothesis'
class TestMigrateAll(OsfTestCase):
def test_migrate_categories_all(self):
n1 = NodeFactory(category='hypothesis')
n2 = NodeFactory(category='category')
migrate_nodes()
assert n1.category == 'hypothesis'
assert n2.category == ''
if __name__ == '__main__':
main()
|
Add script for migrating categories
|
Add script for migrating categories
|
Python
|
apache-2.0
|
rdhyee/osf.io,mluo613/osf.io,cldershem/osf.io,sbt9uc/osf.io,KAsante95/osf.io,zachjanicki/osf.io,danielneis/osf.io,caseyrollins/osf.io,jeffreyliu3230/osf.io,CenterForOpenScience/osf.io,danielneis/osf.io,caneruguz/osf.io,himanshuo/osf.io,Ghalko/osf.io,icereval/osf.io,ckc6cz/osf.io,arpitar/osf.io,MerlinZhang/osf.io,reinaH/osf.io,acshi/osf.io,sloria/osf.io,ckc6cz/osf.io,wearpants/osf.io,samchrisinger/osf.io,ZobairAlijan/osf.io,SSJohns/osf.io,emetsger/osf.io,felliott/osf.io,asanfilippo7/osf.io,baylee-d/osf.io,SSJohns/osf.io,sbt9uc/osf.io,mfraezz/osf.io,kushG/osf.io,kch8qx/osf.io,saradbowman/osf.io,kwierman/osf.io,adlius/osf.io,cosenal/osf.io,monikagrabowska/osf.io,KAsante95/osf.io,alexschiller/osf.io,felliott/osf.io,sbt9uc/osf.io,Nesiehr/osf.io,alexschiller/osf.io,adlius/osf.io,mluo613/osf.io,jeffreyliu3230/osf.io,brianjgeiger/osf.io,hmoco/osf.io,MerlinZhang/osf.io,cwisecarver/osf.io,crcresearch/osf.io,Johnetordoff/osf.io,mluke93/osf.io,hmoco/osf.io,fabianvf/osf.io,emetsger/osf.io,reinaH/osf.io,rdhyee/osf.io,leb2dg/osf.io,acshi/osf.io,cosenal/osf.io,himanshuo/osf.io,HalcyonChimera/osf.io,baylee-d/osf.io,petermalcolm/osf.io,binoculars/osf.io,kwierman/osf.io,doublebits/osf.io,jinluyuan/osf.io,brandonPurvis/osf.io,petermalcolm/osf.io,jeffreyliu3230/osf.io,billyhunt/osf.io,doublebits/osf.io,acshi/osf.io,jolene-esposito/osf.io,HalcyonChimera/osf.io,dplorimer/osf,GaryKriebel/osf.io,GaryKriebel/osf.io,HarryRybacki/osf.io,njantrania/osf.io,TomBaxter/osf.io,abought/osf.io,samanehsan/osf.io,chrisseto/osf.io,zkraime/osf.io,danielneis/osf.io,ZobairAlijan/osf.io,mluo613/osf.io,cosenal/osf.io,zachjanicki/osf.io,aaxelb/osf.io,jnayak1/osf.io,mluke93/osf.io,caneruguz/osf.io,brandonPurvis/osf.io,abought/osf.io,CenterForOpenScience/osf.io,njantrania/osf.io,zamattiac/osf.io,GaryKriebel/osf.io,amyshi188/osf.io,samchrisinger/osf.io,alexschiller/osf.io,zkraime/osf.io,monikagrabowska/osf.io,lyndsysimon/osf.io,billyhunt/osf.io,asanfilippo7/osf.io,Ghalko/osf.io,billyhunt/osf.io,petermalcolm/osf.io,abought/osf.io,mattclark/osf.io,leb2dg/osf.io,caseyrygt/osf.io,laurenrevere/osf.io,adlius/osf.io,GageGaskins/osf.io,aaxelb/osf.io,Ghalko/osf.io,arpitar/osf.io,njantrania/osf.io,brianjgeiger/osf.io,zkraime/osf.io,felliott/osf.io,kch8qx/osf.io,brandonPurvis/osf.io,acshi/osf.io,mfraezz/osf.io,revanthkolli/osf.io,CenterForOpenScience/osf.io,cslzchen/osf.io,arpitar/osf.io,TomBaxter/osf.io,sbt9uc/osf.io,jinluyuan/osf.io,chrisseto/osf.io,fabianvf/osf.io,laurenrevere/osf.io,aaxelb/osf.io,erinspace/osf.io,lyndsysimon/osf.io,emetsger/osf.io,arpitar/osf.io,laurenrevere/osf.io,rdhyee/osf.io,alexschiller/osf.io,ZobairAlijan/osf.io,TomHeatwole/osf.io,mluke93/osf.io,ticklemepierce/osf.io,AndrewSallans/osf.io,billyhunt/osf.io,icereval/osf.io,brianjgeiger/osf.io,bdyetton/prettychart,hmoco/osf.io,adlius/osf.io,pattisdr/osf.io,TomBaxter/osf.io,cldershem/osf.io,kushG/osf.io,njantrania/osf.io,samanehsan/osf.io,RomanZWang/osf.io,jinluyuan/osf.io,ckc6cz/osf.io,RomanZWang/osf.io,fabianvf/osf.io,cldershem/osf.io,bdyetton/prettychart,GageGaskins/osf.io,Johnetordoff/osf.io,haoyuchen1992/osf.io,felliott/osf.io,SSJohns/osf.io,KAsante95/osf.io,KAsante95/osf.io,MerlinZhang/osf.io,dplorimer/osf,crcresearch/osf.io,HalcyonChimera/osf.io,CenterForOpenScience/osf.io,danielneis/osf.io,barbour-em/osf.io,HarryRybacki/osf.io,Ghalko/osf.io,baylee-d/osf.io,doublebits/osf.io,aaxelb/osf.io,himanshuo/osf.io,cwisecarver/osf.io,fabianvf/osf.io,doublebits/osf.io,lamdnhan/osf.io,dplorimer/osf,mfraezz/osf.io,caseyrygt/osf.io,mattclark/osf.io,amyshi188/osf.io,TomHeatwole/osf.io,wearpants/osf.io,acshi/osf.io,ZobairAlijan/osf.io,zachjanicki/osf.io,cslzchen/osf.io,samchrisinger/osf.io,wearpants/osf.io,monikagrabowska/osf.io,barbour-em/osf.io,KAsante95/osf.io,GageGaskins/osf.io,caneruguz/osf.io,zamattiac/osf.io,sloria/osf.io,revanthkolli/osf.io,barbour-em/osf.io,RomanZWang/osf.io,jmcarp/osf.io,kushG/osf.io,billyhunt/osf.io,HarryRybacki/osf.io,cslzchen/osf.io,binoculars/osf.io,zachjanicki/osf.io,lyndsysimon/osf.io,himanshuo/osf.io,pattisdr/osf.io,jmcarp/osf.io,lamdnhan/osf.io,wearpants/osf.io,GageGaskins/osf.io,mluo613/osf.io,MerlinZhang/osf.io,asanfilippo7/osf.io,brandonPurvis/osf.io,samanehsan/osf.io,rdhyee/osf.io,samanehsan/osf.io,cwisecarver/osf.io,kch8qx/osf.io,haoyuchen1992/osf.io,ticklemepierce/osf.io,caseyrollins/osf.io,revanthkolli/osf.io,saradbowman/osf.io,ticklemepierce/osf.io,lyndsysimon/osf.io,jolene-esposito/osf.io,zamattiac/osf.io,chrisseto/osf.io,caseyrygt/osf.io,jnayak1/osf.io,GaryKriebel/osf.io,cldershem/osf.io,DanielSBrown/osf.io,ckc6cz/osf.io,icereval/osf.io,DanielSBrown/osf.io,barbour-em/osf.io,alexschiller/osf.io,kch8qx/osf.io,abought/osf.io,Nesiehr/osf.io,caneruguz/osf.io,mluo613/osf.io,erinspace/osf.io,amyshi188/osf.io,Johnetordoff/osf.io,cslzchen/osf.io,GageGaskins/osf.io,caseyrygt/osf.io,kch8qx/osf.io,chennan47/osf.io,jmcarp/osf.io,jeffreyliu3230/osf.io,chennan47/osf.io,jinluyuan/osf.io,RomanZWang/osf.io,DanielSBrown/osf.io,Nesiehr/osf.io,jnayak1/osf.io,leb2dg/osf.io,kushG/osf.io,dplorimer/osf,monikagrabowska/osf.io,lamdnhan/osf.io,samchrisinger/osf.io,amyshi188/osf.io,mfraezz/osf.io,HalcyonChimera/osf.io,erinspace/osf.io,TomHeatwole/osf.io,SSJohns/osf.io,cosenal/osf.io,brandonPurvis/osf.io,mattclark/osf.io,lamdnhan/osf.io,caseyrollins/osf.io,RomanZWang/osf.io,TomHeatwole/osf.io,emetsger/osf.io,monikagrabowska/osf.io,asanfilippo7/osf.io,bdyetton/prettychart,bdyetton/prettychart,AndrewSallans/osf.io,pattisdr/osf.io,chennan47/osf.io,doublebits/osf.io,DanielSBrown/osf.io,sloria/osf.io,Johnetordoff/osf.io,reinaH/osf.io,petermalcolm/osf.io,binoculars/osf.io,leb2dg/osf.io,chrisseto/osf.io,jolene-esposito/osf.io,haoyuchen1992/osf.io,kwierman/osf.io,mluke93/osf.io,jolene-esposito/osf.io,jmcarp/osf.io,haoyuchen1992/osf.io,HarryRybacki/osf.io,cwisecarver/osf.io,crcresearch/osf.io,hmoco/osf.io,Nesiehr/osf.io,ticklemepierce/osf.io,zamattiac/osf.io,reinaH/osf.io,kwierman/osf.io,zkraime/osf.io,brianjgeiger/osf.io,revanthkolli/osf.io,jnayak1/osf.io
|
Add script for migrating categories
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from tests.base import OsfTestCase
from tests.factories import NodeFactory
from website.project.model import Node
logger = logging.getLogger(__name__)
# legacy => new category
MIGRATE_MAP = {
'category': '',
'measure': 'methods and measures',
}
def migrate_category(node):
"""Migrate legacy, invalid category to new, valid category. Return whether
the node was changed.
"""
if node.category not in Node.CATEGORY_MAP.keys(): # invalid category
node.category = MIGRATE_MAP.get(node.category, 'other')
return True
return False
def migrate_nodes():
migrated_count = 0
for node in Node.find():
was_migrated = migrate_category(node)
if was_migrated:
node.save()
logger.info('Migrated {0}'.format(node._id))
migrated_count += 1
logger.info('Finished migrating {0} nodes.'.format(migrated_count))
class TestMigratingCategories(OsfTestCase):
def test_migrate_category(self):
node = NodeFactory(category='category')
was_migrated = migrate_category(node)
assert was_migrated is True
node.save()
assert node.category == ''
def test_migrate_measure(self):
node = NodeFactory(category='measure')
migrate_category(node)
node.save()
assert node.category == 'methods and measures'
def test_everything_else_is_migrated_to_other(self):
node1 = NodeFactory(category='background')
migrate_category(node1)
node1.save()
assert node1.category == 'other'
node2 = NodeFactory(category=u'プロジェクト')
migrate_category(node2)
node2.save()
assert node2.category == 'other'
def test_valid_categories_not_migrated(self):
node1 = NodeFactory(category='project')
node2 = NodeFactory(category='hypothesis')
was_migrated1 = migrate_category(node1)
was_migrated2 = migrate_category(node2)
node1.save()
node2.save()
assert was_migrated1 is False
assert was_migrated2 is False
assert node1.category == 'project'
assert node2.category == 'hypothesis'
class TestMigrateAll(OsfTestCase):
def test_migrate_categories_all(self):
n1 = NodeFactory(category='hypothesis')
n2 = NodeFactory(category='category')
migrate_nodes()
assert n1.category == 'hypothesis'
assert n2.category == ''
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for migrating categories<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from tests.base import OsfTestCase
from tests.factories import NodeFactory
from website.project.model import Node
logger = logging.getLogger(__name__)
# legacy => new category
MIGRATE_MAP = {
'category': '',
'measure': 'methods and measures',
}
def migrate_category(node):
"""Migrate legacy, invalid category to new, valid category. Return whether
the node was changed.
"""
if node.category not in Node.CATEGORY_MAP.keys(): # invalid category
node.category = MIGRATE_MAP.get(node.category, 'other')
return True
return False
def migrate_nodes():
migrated_count = 0
for node in Node.find():
was_migrated = migrate_category(node)
if was_migrated:
node.save()
logger.info('Migrated {0}'.format(node._id))
migrated_count += 1
logger.info('Finished migrating {0} nodes.'.format(migrated_count))
class TestMigratingCategories(OsfTestCase):
def test_migrate_category(self):
node = NodeFactory(category='category')
was_migrated = migrate_category(node)
assert was_migrated is True
node.save()
assert node.category == ''
def test_migrate_measure(self):
node = NodeFactory(category='measure')
migrate_category(node)
node.save()
assert node.category == 'methods and measures'
def test_everything_else_is_migrated_to_other(self):
node1 = NodeFactory(category='background')
migrate_category(node1)
node1.save()
assert node1.category == 'other'
node2 = NodeFactory(category=u'プロジェクト')
migrate_category(node2)
node2.save()
assert node2.category == 'other'
def test_valid_categories_not_migrated(self):
node1 = NodeFactory(category='project')
node2 = NodeFactory(category='hypothesis')
was_migrated1 = migrate_category(node1)
was_migrated2 = migrate_category(node2)
node1.save()
node2.save()
assert was_migrated1 is False
assert was_migrated2 is False
assert node1.category == 'project'
assert node2.category == 'hypothesis'
class TestMigrateAll(OsfTestCase):
def test_migrate_categories_all(self):
n1 = NodeFactory(category='hypothesis')
n2 = NodeFactory(category='category')
migrate_nodes()
assert n1.category == 'hypothesis'
assert n2.category == ''
if __name__ == '__main__':
main()
|
Add script for migrating categories#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from tests.base import OsfTestCase
from tests.factories import NodeFactory
from website.project.model import Node
logger = logging.getLogger(__name__)
# legacy => new category
MIGRATE_MAP = {
'category': '',
'measure': 'methods and measures',
}
def migrate_category(node):
"""Migrate legacy, invalid category to new, valid category. Return whether
the node was changed.
"""
if node.category not in Node.CATEGORY_MAP.keys(): # invalid category
node.category = MIGRATE_MAP.get(node.category, 'other')
return True
return False
def migrate_nodes():
migrated_count = 0
for node in Node.find():
was_migrated = migrate_category(node)
if was_migrated:
node.save()
logger.info('Migrated {0}'.format(node._id))
migrated_count += 1
logger.info('Finished migrating {0} nodes.'.format(migrated_count))
class TestMigratingCategories(OsfTestCase):
def test_migrate_category(self):
node = NodeFactory(category='category')
was_migrated = migrate_category(node)
assert was_migrated is True
node.save()
assert node.category == ''
def test_migrate_measure(self):
node = NodeFactory(category='measure')
migrate_category(node)
node.save()
assert node.category == 'methods and measures'
def test_everything_else_is_migrated_to_other(self):
node1 = NodeFactory(category='background')
migrate_category(node1)
node1.save()
assert node1.category == 'other'
node2 = NodeFactory(category=u'プロジェクト')
migrate_category(node2)
node2.save()
assert node2.category == 'other'
def test_valid_categories_not_migrated(self):
node1 = NodeFactory(category='project')
node2 = NodeFactory(category='hypothesis')
was_migrated1 = migrate_category(node1)
was_migrated2 = migrate_category(node2)
node1.save()
node2.save()
assert was_migrated1 is False
assert was_migrated2 is False
assert node1.category == 'project'
assert node2.category == 'hypothesis'
class TestMigrateAll(OsfTestCase):
def test_migrate_categories_all(self):
n1 = NodeFactory(category='hypothesis')
n2 = NodeFactory(category='category')
migrate_nodes()
assert n1.category == 'hypothesis'
assert n2.category == ''
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for migrating categories<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
from tests.base import OsfTestCase
from tests.factories import NodeFactory
from website.project.model import Node
logger = logging.getLogger(__name__)
# legacy => new category
MIGRATE_MAP = {
'category': '',
'measure': 'methods and measures',
}
def migrate_category(node):
"""Migrate legacy, invalid category to new, valid category. Return whether
the node was changed.
"""
if node.category not in Node.CATEGORY_MAP.keys(): # invalid category
node.category = MIGRATE_MAP.get(node.category, 'other')
return True
return False
def migrate_nodes():
migrated_count = 0
for node in Node.find():
was_migrated = migrate_category(node)
if was_migrated:
node.save()
logger.info('Migrated {0}'.format(node._id))
migrated_count += 1
logger.info('Finished migrating {0} nodes.'.format(migrated_count))
class TestMigratingCategories(OsfTestCase):
def test_migrate_category(self):
node = NodeFactory(category='category')
was_migrated = migrate_category(node)
assert was_migrated is True
node.save()
assert node.category == ''
def test_migrate_measure(self):
node = NodeFactory(category='measure')
migrate_category(node)
node.save()
assert node.category == 'methods and measures'
def test_everything_else_is_migrated_to_other(self):
node1 = NodeFactory(category='background')
migrate_category(node1)
node1.save()
assert node1.category == 'other'
node2 = NodeFactory(category=u'プロジェクト')
migrate_category(node2)
node2.save()
assert node2.category == 'other'
def test_valid_categories_not_migrated(self):
node1 = NodeFactory(category='project')
node2 = NodeFactory(category='hypothesis')
was_migrated1 = migrate_category(node1)
was_migrated2 = migrate_category(node2)
node1.save()
node2.save()
assert was_migrated1 is False
assert was_migrated2 is False
assert node1.category == 'project'
assert node2.category == 'hypothesis'
class TestMigrateAll(OsfTestCase):
def test_migrate_categories_all(self):
n1 = NodeFactory(category='hypothesis')
n2 = NodeFactory(category='category')
migrate_nodes()
assert n1.category == 'hypothesis'
assert n2.category == ''
if __name__ == '__main__':
main()
|
|
00687827e0290fd862455adf34d6e64b1fe0b9f8
|
maxwellbloch/tests/test_sigma.py
|
maxwellbloch/tests/test_sigma.py
|
"""Unit tests for the sigma module.
Thomas Ogden <t@ogden.eu>
"""
import sys
import unittest
import numpy as np
import qutip as qu
from maxwellbloch import sigma
class TestSigma(unittest.TestCase):
""" Tests for sigma.sigma. """
def test_sigma_2_0_0(self):
""" Test |0><0| for a two-level system. """
sigma_2_0_0 = qu.Qobj([[1.,0.],[0.,0.]])
self.assertEqual(sigma.sigma(2,0,0), sigma_2_0_0)
def test_sigma_10_9_9(self):
""" Test |9><9| for a ten-level system. """
sigma_10_9_9 = np.zeros((10,10))
sigma_10_9_9[9,9] = 1.
sigma_10_9_9 = qu.Qobj(sigma_10_9_9)
self.assertEqual(sigma.sigma(10,9,9), sigma_10_9_9)
class TestSigmaN(unittest.TestCase):
""" Tests for sigma.sigma_N. """
def test_sigma_N_2_0_0_0_1(self):
""" Test that sigma_N with 1 subsystem returns same as sigma. """
self.assertEqual(sigma.sigma_N(2,0,0,0,1),sigma.sigma(2,0,0))
def test_sigma_N_2_0_1_1_2(self):
""" Test for |0><1| on the 2nd of 2 interacting two-level systems. """
sigma_2_0_1 = qu.Qobj([[0.,1.],[0.,0.]])
sigma_N_2_0_1_1_2 = qu.tensor(qu.identity(2), sigma_2_0_1)
self.assertEqual(sigma.sigma_N(2,0,1,1,2),sigma_N_2_0_1_1_2)
def main():
unittest.main(verbosity=3)
if __name__ == "__main__":
status = main()
sys.exit(status)
|
Add sigma tests from OB
|
Add sigma tests from OB
|
Python
|
mit
|
tommyogden/maxwellbloch,tommyogden/maxwellbloch
|
Add sigma tests from OB
|
"""Unit tests for the sigma module.
Thomas Ogden <t@ogden.eu>
"""
import sys
import unittest
import numpy as np
import qutip as qu
from maxwellbloch import sigma
class TestSigma(unittest.TestCase):
""" Tests for sigma.sigma. """
def test_sigma_2_0_0(self):
""" Test |0><0| for a two-level system. """
sigma_2_0_0 = qu.Qobj([[1.,0.],[0.,0.]])
self.assertEqual(sigma.sigma(2,0,0), sigma_2_0_0)
def test_sigma_10_9_9(self):
""" Test |9><9| for a ten-level system. """
sigma_10_9_9 = np.zeros((10,10))
sigma_10_9_9[9,9] = 1.
sigma_10_9_9 = qu.Qobj(sigma_10_9_9)
self.assertEqual(sigma.sigma(10,9,9), sigma_10_9_9)
class TestSigmaN(unittest.TestCase):
""" Tests for sigma.sigma_N. """
def test_sigma_N_2_0_0_0_1(self):
""" Test that sigma_N with 1 subsystem returns same as sigma. """
self.assertEqual(sigma.sigma_N(2,0,0,0,1),sigma.sigma(2,0,0))
def test_sigma_N_2_0_1_1_2(self):
""" Test for |0><1| on the 2nd of 2 interacting two-level systems. """
sigma_2_0_1 = qu.Qobj([[0.,1.],[0.,0.]])
sigma_N_2_0_1_1_2 = qu.tensor(qu.identity(2), sigma_2_0_1)
self.assertEqual(sigma.sigma_N(2,0,1,1,2),sigma_N_2_0_1_1_2)
def main():
unittest.main(verbosity=3)
if __name__ == "__main__":
status = main()
sys.exit(status)
|
<commit_before><commit_msg>Add sigma tests from OB<commit_after>
|
"""Unit tests for the sigma module.
Thomas Ogden <t@ogden.eu>
"""
import sys
import unittest
import numpy as np
import qutip as qu
from maxwellbloch import sigma
class TestSigma(unittest.TestCase):
""" Tests for sigma.sigma. """
def test_sigma_2_0_0(self):
""" Test |0><0| for a two-level system. """
sigma_2_0_0 = qu.Qobj([[1.,0.],[0.,0.]])
self.assertEqual(sigma.sigma(2,0,0), sigma_2_0_0)
def test_sigma_10_9_9(self):
""" Test |9><9| for a ten-level system. """
sigma_10_9_9 = np.zeros((10,10))
sigma_10_9_9[9,9] = 1.
sigma_10_9_9 = qu.Qobj(sigma_10_9_9)
self.assertEqual(sigma.sigma(10,9,9), sigma_10_9_9)
class TestSigmaN(unittest.TestCase):
""" Tests for sigma.sigma_N. """
def test_sigma_N_2_0_0_0_1(self):
""" Test that sigma_N with 1 subsystem returns same as sigma. """
self.assertEqual(sigma.sigma_N(2,0,0,0,1),sigma.sigma(2,0,0))
def test_sigma_N_2_0_1_1_2(self):
""" Test for |0><1| on the 2nd of 2 interacting two-level systems. """
sigma_2_0_1 = qu.Qobj([[0.,1.],[0.,0.]])
sigma_N_2_0_1_1_2 = qu.tensor(qu.identity(2), sigma_2_0_1)
self.assertEqual(sigma.sigma_N(2,0,1,1,2),sigma_N_2_0_1_1_2)
def main():
unittest.main(verbosity=3)
if __name__ == "__main__":
status = main()
sys.exit(status)
|
Add sigma tests from OB"""Unit tests for the sigma module.
Thomas Ogden <t@ogden.eu>
"""
import sys
import unittest
import numpy as np
import qutip as qu
from maxwellbloch import sigma
class TestSigma(unittest.TestCase):
""" Tests for sigma.sigma. """
def test_sigma_2_0_0(self):
""" Test |0><0| for a two-level system. """
sigma_2_0_0 = qu.Qobj([[1.,0.],[0.,0.]])
self.assertEqual(sigma.sigma(2,0,0), sigma_2_0_0)
def test_sigma_10_9_9(self):
""" Test |9><9| for a ten-level system. """
sigma_10_9_9 = np.zeros((10,10))
sigma_10_9_9[9,9] = 1.
sigma_10_9_9 = qu.Qobj(sigma_10_9_9)
self.assertEqual(sigma.sigma(10,9,9), sigma_10_9_9)
class TestSigmaN(unittest.TestCase):
""" Tests for sigma.sigma_N. """
def test_sigma_N_2_0_0_0_1(self):
""" Test that sigma_N with 1 subsystem returns same as sigma. """
self.assertEqual(sigma.sigma_N(2,0,0,0,1),sigma.sigma(2,0,0))
def test_sigma_N_2_0_1_1_2(self):
""" Test for |0><1| on the 2nd of 2 interacting two-level systems. """
sigma_2_0_1 = qu.Qobj([[0.,1.],[0.,0.]])
sigma_N_2_0_1_1_2 = qu.tensor(qu.identity(2), sigma_2_0_1)
self.assertEqual(sigma.sigma_N(2,0,1,1,2),sigma_N_2_0_1_1_2)
def main():
unittest.main(verbosity=3)
if __name__ == "__main__":
status = main()
sys.exit(status)
|
<commit_before><commit_msg>Add sigma tests from OB<commit_after>"""Unit tests for the sigma module.
Thomas Ogden <t@ogden.eu>
"""
import sys
import unittest
import numpy as np
import qutip as qu
from maxwellbloch import sigma
class TestSigma(unittest.TestCase):
""" Tests for sigma.sigma. """
def test_sigma_2_0_0(self):
""" Test |0><0| for a two-level system. """
sigma_2_0_0 = qu.Qobj([[1.,0.],[0.,0.]])
self.assertEqual(sigma.sigma(2,0,0), sigma_2_0_0)
def test_sigma_10_9_9(self):
""" Test |9><9| for a ten-level system. """
sigma_10_9_9 = np.zeros((10,10))
sigma_10_9_9[9,9] = 1.
sigma_10_9_9 = qu.Qobj(sigma_10_9_9)
self.assertEqual(sigma.sigma(10,9,9), sigma_10_9_9)
class TestSigmaN(unittest.TestCase):
""" Tests for sigma.sigma_N. """
def test_sigma_N_2_0_0_0_1(self):
""" Test that sigma_N with 1 subsystem returns same as sigma. """
self.assertEqual(sigma.sigma_N(2,0,0,0,1),sigma.sigma(2,0,0))
def test_sigma_N_2_0_1_1_2(self):
""" Test for |0><1| on the 2nd of 2 interacting two-level systems. """
sigma_2_0_1 = qu.Qobj([[0.,1.],[0.,0.]])
sigma_N_2_0_1_1_2 = qu.tensor(qu.identity(2), sigma_2_0_1)
self.assertEqual(sigma.sigma_N(2,0,1,1,2),sigma_N_2_0_1_1_2)
def main():
unittest.main(verbosity=3)
if __name__ == "__main__":
status = main()
sys.exit(status)
|
|
e4cfb0cdba360bc65621ebec6d8c184725b11749
|
test_aps_eventlet.py
|
test_aps_eventlet.py
|
#!/usr/bin/env python2.7
from pytz import utc
import time
import eventlet
from apscheduler.schedulers.background import BackgroundScheduler
def test_aps_eventlet():
def showMessage():
print "Show this message"
sh = BackgroundScheduler()
sh.start()
sh.add_job(showMessage, 'interval', seconds=2, timezone=utc)
time.sleep(10)
if __name__ == '__main__':
eventlet.monkey_patch(
os=True,
select=True,
socket=True,
thread=True,
time=True)
test_aps_eventlet()
|
Test APS 3.0 with eventlet
|
Test APS 3.0 with eventlet
|
Python
|
apache-2.0
|
lakshmi-kannan/python-scratchpad
|
Test APS 3.0 with eventlet
|
#!/usr/bin/env python2.7
from pytz import utc
import time
import eventlet
from apscheduler.schedulers.background import BackgroundScheduler
def test_aps_eventlet():
def showMessage():
print "Show this message"
sh = BackgroundScheduler()
sh.start()
sh.add_job(showMessage, 'interval', seconds=2, timezone=utc)
time.sleep(10)
if __name__ == '__main__':
eventlet.monkey_patch(
os=True,
select=True,
socket=True,
thread=True,
time=True)
test_aps_eventlet()
|
<commit_before><commit_msg>Test APS 3.0 with eventlet<commit_after>
|
#!/usr/bin/env python2.7
from pytz import utc
import time
import eventlet
from apscheduler.schedulers.background import BackgroundScheduler
def test_aps_eventlet():
def showMessage():
print "Show this message"
sh = BackgroundScheduler()
sh.start()
sh.add_job(showMessage, 'interval', seconds=2, timezone=utc)
time.sleep(10)
if __name__ == '__main__':
eventlet.monkey_patch(
os=True,
select=True,
socket=True,
thread=True,
time=True)
test_aps_eventlet()
|
Test APS 3.0 with eventlet#!/usr/bin/env python2.7
from pytz import utc
import time
import eventlet
from apscheduler.schedulers.background import BackgroundScheduler
def test_aps_eventlet():
def showMessage():
print "Show this message"
sh = BackgroundScheduler()
sh.start()
sh.add_job(showMessage, 'interval', seconds=2, timezone=utc)
time.sleep(10)
if __name__ == '__main__':
eventlet.monkey_patch(
os=True,
select=True,
socket=True,
thread=True,
time=True)
test_aps_eventlet()
|
<commit_before><commit_msg>Test APS 3.0 with eventlet<commit_after>#!/usr/bin/env python2.7
from pytz import utc
import time
import eventlet
from apscheduler.schedulers.background import BackgroundScheduler
def test_aps_eventlet():
def showMessage():
print "Show this message"
sh = BackgroundScheduler()
sh.start()
sh.add_job(showMessage, 'interval', seconds=2, timezone=utc)
time.sleep(10)
if __name__ == '__main__':
eventlet.monkey_patch(
os=True,
select=True,
socket=True,
thread=True,
time=True)
test_aps_eventlet()
|
|
d41dec0e206f4c2904e83ab7d4934d224bea0a95
|
tests/test_anonymous_json.py
|
tests/test_anonymous_json.py
|
from __future__ import print_function, unicode_literals
from aspen import json
from gittip.elsewhere.twitter import TwitterAccount
from gittip.testing import Harness
from gittip.testing.client import TestClient
class Tests(Harness):
def hit_anonymous(self, method='GET', expected_code=200):
user, ignored = TwitterAccount('alice', {}).opt_in('alice')
client = TestClient()
response = client.get('/')
csrf_token = response.request.context['csrf_token']
if method == 'GET':
response = client.get( "/alice/anonymous.json"
, user='alice'
)
else:
assert method == 'POST'
response = client.post( "/alice/anonymous.json"
, {'csrf_token': csrf_token}
, user='alice'
)
if response.code != expected_code:
print(response.body)
return response
def test_participant_can_get_their_anonymity_setting(self):
response = self.hit_anonymous('GET')
actual = json.loads(response.body)['anonymous']
assert actual is False, actual
def test_participant_can_toggle_their_anonymity_setting(self):
response = self.hit_anonymous('POST')
actual = json.loads(response.body)['anonymous']
assert actual is True, actual
def test_participant_can_toggle_their_anonymity_setting_back(self):
response = self.hit_anonymous('POST')
response = self.hit_anonymous('POST')
actual = json.loads(response.body)['anonymous']
assert actual is False, actual
|
Add a few tests for anonymous.json
|
Add a few tests for anonymous.json
|
Python
|
mit
|
gratipay/gratipay.com,mccolgst/www.gittip.com,studio666/gratipay.com,studio666/gratipay.com,studio666/gratipay.com,studio666/gratipay.com,eXcomm/gratipay.com,eXcomm/gratipay.com,eXcomm/gratipay.com,eXcomm/gratipay.com,mccolgst/www.gittip.com,mccolgst/www.gittip.com,gratipay/gratipay.com,gratipay/gratipay.com,gratipay/gratipay.com,mccolgst/www.gittip.com
|
Add a few tests for anonymous.json
|
from __future__ import print_function, unicode_literals
from aspen import json
from gittip.elsewhere.twitter import TwitterAccount
from gittip.testing import Harness
from gittip.testing.client import TestClient
class Tests(Harness):
def hit_anonymous(self, method='GET', expected_code=200):
user, ignored = TwitterAccount('alice', {}).opt_in('alice')
client = TestClient()
response = client.get('/')
csrf_token = response.request.context['csrf_token']
if method == 'GET':
response = client.get( "/alice/anonymous.json"
, user='alice'
)
else:
assert method == 'POST'
response = client.post( "/alice/anonymous.json"
, {'csrf_token': csrf_token}
, user='alice'
)
if response.code != expected_code:
print(response.body)
return response
def test_participant_can_get_their_anonymity_setting(self):
response = self.hit_anonymous('GET')
actual = json.loads(response.body)['anonymous']
assert actual is False, actual
def test_participant_can_toggle_their_anonymity_setting(self):
response = self.hit_anonymous('POST')
actual = json.loads(response.body)['anonymous']
assert actual is True, actual
def test_participant_can_toggle_their_anonymity_setting_back(self):
response = self.hit_anonymous('POST')
response = self.hit_anonymous('POST')
actual = json.loads(response.body)['anonymous']
assert actual is False, actual
|
<commit_before><commit_msg>Add a few tests for anonymous.json<commit_after>
|
from __future__ import print_function, unicode_literals
from aspen import json
from gittip.elsewhere.twitter import TwitterAccount
from gittip.testing import Harness
from gittip.testing.client import TestClient
class Tests(Harness):
def hit_anonymous(self, method='GET', expected_code=200):
user, ignored = TwitterAccount('alice', {}).opt_in('alice')
client = TestClient()
response = client.get('/')
csrf_token = response.request.context['csrf_token']
if method == 'GET':
response = client.get( "/alice/anonymous.json"
, user='alice'
)
else:
assert method == 'POST'
response = client.post( "/alice/anonymous.json"
, {'csrf_token': csrf_token}
, user='alice'
)
if response.code != expected_code:
print(response.body)
return response
def test_participant_can_get_their_anonymity_setting(self):
response = self.hit_anonymous('GET')
actual = json.loads(response.body)['anonymous']
assert actual is False, actual
def test_participant_can_toggle_their_anonymity_setting(self):
response = self.hit_anonymous('POST')
actual = json.loads(response.body)['anonymous']
assert actual is True, actual
def test_participant_can_toggle_their_anonymity_setting_back(self):
response = self.hit_anonymous('POST')
response = self.hit_anonymous('POST')
actual = json.loads(response.body)['anonymous']
assert actual is False, actual
|
Add a few tests for anonymous.jsonfrom __future__ import print_function, unicode_literals
from aspen import json
from gittip.elsewhere.twitter import TwitterAccount
from gittip.testing import Harness
from gittip.testing.client import TestClient
class Tests(Harness):
def hit_anonymous(self, method='GET', expected_code=200):
user, ignored = TwitterAccount('alice', {}).opt_in('alice')
client = TestClient()
response = client.get('/')
csrf_token = response.request.context['csrf_token']
if method == 'GET':
response = client.get( "/alice/anonymous.json"
, user='alice'
)
else:
assert method == 'POST'
response = client.post( "/alice/anonymous.json"
, {'csrf_token': csrf_token}
, user='alice'
)
if response.code != expected_code:
print(response.body)
return response
def test_participant_can_get_their_anonymity_setting(self):
response = self.hit_anonymous('GET')
actual = json.loads(response.body)['anonymous']
assert actual is False, actual
def test_participant_can_toggle_their_anonymity_setting(self):
response = self.hit_anonymous('POST')
actual = json.loads(response.body)['anonymous']
assert actual is True, actual
def test_participant_can_toggle_their_anonymity_setting_back(self):
response = self.hit_anonymous('POST')
response = self.hit_anonymous('POST')
actual = json.loads(response.body)['anonymous']
assert actual is False, actual
|
<commit_before><commit_msg>Add a few tests for anonymous.json<commit_after>from __future__ import print_function, unicode_literals
from aspen import json
from gittip.elsewhere.twitter import TwitterAccount
from gittip.testing import Harness
from gittip.testing.client import TestClient
class Tests(Harness):
def hit_anonymous(self, method='GET', expected_code=200):
user, ignored = TwitterAccount('alice', {}).opt_in('alice')
client = TestClient()
response = client.get('/')
csrf_token = response.request.context['csrf_token']
if method == 'GET':
response = client.get( "/alice/anonymous.json"
, user='alice'
)
else:
assert method == 'POST'
response = client.post( "/alice/anonymous.json"
, {'csrf_token': csrf_token}
, user='alice'
)
if response.code != expected_code:
print(response.body)
return response
def test_participant_can_get_their_anonymity_setting(self):
response = self.hit_anonymous('GET')
actual = json.loads(response.body)['anonymous']
assert actual is False, actual
def test_participant_can_toggle_their_anonymity_setting(self):
response = self.hit_anonymous('POST')
actual = json.loads(response.body)['anonymous']
assert actual is True, actual
def test_participant_can_toggle_their_anonymity_setting_back(self):
response = self.hit_anonymous('POST')
response = self.hit_anonymous('POST')
actual = json.loads(response.body)['anonymous']
assert actual is False, actual
|
|
ebc31e8c510559c4bc862283de284247700088ea
|
numpy/core/tests/test_dtype.py
|
numpy/core/tests/test_dtype.py
|
import numpy as np
from numpy.testing import *
class TestBuiltin(TestCase):
def test_run(self):
"""Only test hash runs at all."""
for t in [np.int, np.float, np.complex, np.int32, np.str, np.object,
np.unicode]:
dt = np.dtype(t)
hash(dt)
class TestRecord(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', np.int)])
b = np.dtype([('yo', np.int)])
self.failUnless(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', np.int)])
b = np.dtype([('ye', np.int)])
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestSubarray(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 3)))
self.failUnless(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (3, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (1, 2, 3)))
b = np.dtype((np.int, (1, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestMonsterType(TestCase):
"""Test deeply nested subtypes."""
pass
if __name__ == "__main__":
run_module_suite()
|
Add some unit tests for the hashing protocol of dtype (fail currently).
|
Add some unit tests for the hashing protocol of dtype (fail currently).
|
Python
|
bsd-3-clause
|
brandon-rhodes/numpy,AustereCuriosity/numpy,nguyentu1602/numpy,BabeNovelty/numpy,MSeifert04/numpy,drasmuss/numpy,njase/numpy,ogrisel/numpy,charris/numpy,ahaldane/numpy,bringingheavendown/numpy,stefanv/numpy,MichaelAquilina/numpy,mathdd/numpy,skwbc/numpy,ChanderG/numpy,larsmans/numpy,mwiebe/numpy,ChristopherHogan/numpy,ogrisel/numpy,njase/numpy,mattip/numpy,WillieMaddox/numpy,mortada/numpy,KaelChen/numpy,embray/numpy,KaelChen/numpy,empeeu/numpy,pyparallel/numpy,leifdenby/numpy,sonnyhu/numpy,musically-ut/numpy,numpy/numpy,jakirkham/numpy,kirillzhuravlev/numpy,groutr/numpy,moreati/numpy,tynn/numpy,b-carter/numpy,rudimeier/numpy,Linkid/numpy,bmorris3/numpy,shoyer/numpy,bringingheavendown/numpy,numpy/numpy,mhvk/numpy,Dapid/numpy,kirillzhuravlev/numpy,ajdawson/numpy,dwf/numpy,jorisvandenbossche/numpy,BabeNovelty/numpy,nbeaver/numpy,pbrod/numpy,madphysicist/numpy,Anwesh43/numpy,mattip/numpy,maniteja123/numpy,ChanderG/numpy,matthew-brett/numpy,Linkid/numpy,MaPePeR/numpy,seberg/numpy,ChanderG/numpy,empeeu/numpy,Yusa95/numpy,dwillmer/numpy,has2k1/numpy,cjermain/numpy,ssanderson/numpy,rgommers/numpy,astrofrog/numpy,WarrenWeckesser/numpy,Srisai85/numpy,trankmichael/numpy,pelson/numpy,dch312/numpy,jschueller/numpy,stuarteberg/numpy,naritta/numpy,sigma-random/numpy,NextThought/pypy-numpy,astrofrog/numpy,andsor/numpy,andsor/numpy,rmcgibbo/numpy,moreati/numpy,dwillmer/numpy,mindw/numpy,ogrisel/numpy,skymanaditya1/numpy,mhvk/numpy,joferkington/numpy,Eric89GXL/numpy,githubmlai/numpy,sinhrks/numpy,MichaelAquilina/numpy,grlee77/numpy,rajathkumarmp/numpy,bmorris3/numpy,charris/numpy,Dapid/numpy,rudimeier/numpy,jakirkham/numpy,leifdenby/numpy,larsmans/numpy,seberg/numpy,Anwesh43/numpy,githubmlai/numpy,SiccarPoint/numpy,kirillzhuravlev/numpy,mathdd/numpy,leifdenby/numpy,joferkington/numpy,bmorris3/numpy,ewmoore/numpy,numpy/numpy-refactor,rherault-insa/numpy,rmcgibbo/numpy,MaPePeR/numpy,brandon-rhodes/numpy,KaelChen/numpy,pyparallel/numpy,BabeNovelty/numpy,Dapid/numpy,dimasad/numpy,rherault-insa/numpy,GaZ3ll3/numpy,has2k1/numpy,jschueller/numpy,ContinuumIO/numpy,SunghanKim/numpy,ssanderson/numpy,ESSS/numpy,rhythmsosad/numpy,argriffing/numpy,MichaelAquilina/numpy,sinhrks/numpy,stefanv/numpy,hainm/numpy,bertrand-l/numpy,seberg/numpy,WillieMaddox/numpy,simongibbons/numpy,AustereCuriosity/numpy,MaPePeR/numpy,stuarteberg/numpy,Eric89GXL/numpy,anntzer/numpy,ewmoore/numpy,ajdawson/numpy,ekalosak/numpy,rhythmsosad/numpy,mattip/numpy,pbrod/numpy,chatcannon/numpy,Srisai85/numpy,simongibbons/numpy,Srisai85/numpy,naritta/numpy,astrofrog/numpy,drasmuss/numpy,mingwpy/numpy,groutr/numpy,AustereCuriosity/numpy,grlee77/numpy,jankoslavic/numpy,gmcastil/numpy,ekalosak/numpy,cjermain/numpy,utke1/numpy,gmcastil/numpy,yiakwy/numpy,yiakwy/numpy,felipebetancur/numpy,SunghanKim/numpy,trankmichael/numpy,embray/numpy,ChristopherHogan/numpy,sinhrks/numpy,mindw/numpy,dwillmer/numpy,rajathkumarmp/numpy,cjermain/numpy,tynn/numpy,jonathanunderwood/numpy,mhvk/numpy,anntzer/numpy,mortada/numpy,immerrr/numpy,b-carter/numpy,mingwpy/numpy,matthew-brett/numpy,dch312/numpy,cowlicks/numpy,GrimDerp/numpy,felipebetancur/numpy,sonnyhu/numpy,mattip/numpy,yiakwy/numpy,behzadnouri/numpy,rhythmsosad/numpy,ekalosak/numpy,maniteja123/numpy,kiwifb/numpy,numpy/numpy,pizzathief/numpy,GaZ3ll3/numpy,shoyer/numpy,bringingheavendown/numpy,kiwifb/numpy,dato-code/numpy,matthew-brett/numpy,BMJHayward/numpy,rgommers/numpy,has2k1/numpy,skymanaditya1/numpy,stefanv/numpy,shoyer/numpy,nguyentu1602/numpy,hainm/numpy,bertrand-l/numpy,nbeaver/numpy,Anwesh43/numpy,ESSS/numpy,ddasilva/numpy,mortada/numpy,ddasilva/numpy,jankoslavic/numpy,matthew-brett/numpy,cowlicks/numpy,dato-code/numpy,solarjoe/numpy,madphysicist/numpy,MSeifert04/numpy,endolith/numpy,jankoslavic/numpy,pdebuyl/numpy,jakirkham/numpy,ajdawson/numpy,bmorris3/numpy,behzadnouri/numpy,njase/numpy,b-carter/numpy,BMJHayward/numpy,solarjoe/numpy,Linkid/numpy,pbrod/numpy,pizzathief/numpy,hainm/numpy,simongibbons/numpy,trankmichael/numpy,stefanv/numpy,skwbc/numpy,WarrenWeckesser/numpy,pelson/numpy,pbrod/numpy,jakirkham/numpy,mingwpy/numpy,jschueller/numpy,drasmuss/numpy,numpy/numpy,dato-code/numpy,ChristopherHogan/numpy,ewmoore/numpy,sigma-random/numpy,pelson/numpy,anntzer/numpy,endolith/numpy,rherault-insa/numpy,Yusa95/numpy,BMJHayward/numpy,jorisvandenbossche/numpy,tynn/numpy,MichaelAquilina/numpy,larsmans/numpy,ViralLeadership/numpy,chiffa/numpy,dimasad/numpy,brandon-rhodes/numpy,jakirkham/numpy,astrofrog/numpy,empeeu/numpy,grlee77/numpy,NextThought/pypy-numpy,hainm/numpy,MSeifert04/numpy,grlee77/numpy,seberg/numpy,immerrr/numpy,mindw/numpy,musically-ut/numpy,WillieMaddox/numpy,SiccarPoint/numpy,abalkin/numpy,ContinuumIO/numpy,endolith/numpy,endolith/numpy,WarrenWeckesser/numpy,ahaldane/numpy,Linkid/numpy,yiakwy/numpy,mingwpy/numpy,rudimeier/numpy,tacaswell/numpy,anntzer/numpy,solarjoe/numpy,githubmlai/numpy,nbeaver/numpy,mwiebe/numpy,ogrisel/numpy,NextThought/pypy-numpy,groutr/numpy,tdsmith/numpy,ewmoore/numpy,dwf/numpy,simongibbons/numpy,madphysicist/numpy,tdsmith/numpy,jankoslavic/numpy,kirillzhuravlev/numpy,pizzathief/numpy,ddasilva/numpy,gfyoung/numpy,kiwifb/numpy,dwf/numpy,jorisvandenbossche/numpy,dimasad/numpy,immerrr/numpy,felipebetancur/numpy,mhvk/numpy,sigma-random/numpy,ewmoore/numpy,cowlicks/numpy,CMartelLML/numpy,rmcgibbo/numpy,gmcastil/numpy,ekalosak/numpy,naritta/numpy,mhvk/numpy,ahaldane/numpy,chatcannon/numpy,dch312/numpy,GrimDerp/numpy,chiffa/numpy,Srisai85/numpy,moreati/numpy,cowlicks/numpy,larsmans/numpy,charris/numpy,CMartelLML/numpy,dwf/numpy,dwillmer/numpy,felipebetancur/numpy,madphysicist/numpy,ajdawson/numpy,charris/numpy,cjermain/numpy,sonnyhu/numpy,rgommers/numpy,embray/numpy,pyparallel/numpy,pdebuyl/numpy,ahaldane/numpy,pdebuyl/numpy,ContinuumIO/numpy,MSeifert04/numpy,brandon-rhodes/numpy,sonnyhu/numpy,Anwesh43/numpy,nguyentu1602/numpy,skymanaditya1/numpy,tdsmith/numpy,pizzathief/numpy,has2k1/numpy,GaZ3ll3/numpy,WarrenWeckesser/numpy,andsor/numpy,behzadnouri/numpy,rudimeier/numpy,rgommers/numpy,ogrisel/numpy,dch312/numpy,argriffing/numpy,sigma-random/numpy,skwbc/numpy,andsor/numpy,astrofrog/numpy,shoyer/numpy,numpy/numpy-refactor,naritta/numpy,jonathanunderwood/numpy,CMartelLML/numpy,SiccarPoint/numpy,joferkington/numpy,mindw/numpy,embray/numpy,BMJHayward/numpy,musically-ut/numpy,empeeu/numpy,dimasad/numpy,maniteja123/numpy,numpy/numpy-refactor,pdebuyl/numpy,mathdd/numpy,SunghanKim/numpy,pbrod/numpy,joferkington/numpy,sinhrks/numpy,dato-code/numpy,trankmichael/numpy,Eric89GXL/numpy,githubmlai/numpy,ESSS/numpy,grlee77/numpy,gfyoung/numpy,abalkin/numpy,rajathkumarmp/numpy,pelson/numpy,NextThought/pypy-numpy,madphysicist/numpy,shoyer/numpy,nguyentu1602/numpy,Yusa95/numpy,Eric89GXL/numpy,tacaswell/numpy,chiffa/numpy,BabeNovelty/numpy,dwf/numpy,mortada/numpy,ViralLeadership/numpy,simongibbons/numpy,utke1/numpy,chatcannon/numpy,Yusa95/numpy,tacaswell/numpy,jonathanunderwood/numpy,MSeifert04/numpy,ChanderG/numpy,MaPePeR/numpy,numpy/numpy-refactor,mathdd/numpy,rmcgibbo/numpy,ahaldane/numpy,stuarteberg/numpy,pizzathief/numpy,gfyoung/numpy,argriffing/numpy,GrimDerp/numpy,SiccarPoint/numpy,pelson/numpy,immerrr/numpy,tdsmith/numpy,mwiebe/numpy,rajathkumarmp/numpy,rhythmsosad/numpy,GaZ3ll3/numpy,stuarteberg/numpy,jschueller/numpy,embray/numpy,KaelChen/numpy,bertrand-l/numpy,ViralLeadership/numpy,jorisvandenbossche/numpy,stefanv/numpy,skymanaditya1/numpy,ChristopherHogan/numpy,abalkin/numpy,WarrenWeckesser/numpy,numpy/numpy-refactor,jorisvandenbossche/numpy,CMartelLML/numpy,SunghanKim/numpy,GrimDerp/numpy,musically-ut/numpy,utke1/numpy,ssanderson/numpy,matthew-brett/numpy
|
Add some unit tests for the hashing protocol of dtype (fail currently).
|
import numpy as np
from numpy.testing import *
class TestBuiltin(TestCase):
def test_run(self):
"""Only test hash runs at all."""
for t in [np.int, np.float, np.complex, np.int32, np.str, np.object,
np.unicode]:
dt = np.dtype(t)
hash(dt)
class TestRecord(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', np.int)])
b = np.dtype([('yo', np.int)])
self.failUnless(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', np.int)])
b = np.dtype([('ye', np.int)])
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestSubarray(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 3)))
self.failUnless(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (3, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (1, 2, 3)))
b = np.dtype((np.int, (1, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestMonsterType(TestCase):
"""Test deeply nested subtypes."""
pass
if __name__ == "__main__":
run_module_suite()
|
<commit_before><commit_msg>Add some unit tests for the hashing protocol of dtype (fail currently).<commit_after>
|
import numpy as np
from numpy.testing import *
class TestBuiltin(TestCase):
def test_run(self):
"""Only test hash runs at all."""
for t in [np.int, np.float, np.complex, np.int32, np.str, np.object,
np.unicode]:
dt = np.dtype(t)
hash(dt)
class TestRecord(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', np.int)])
b = np.dtype([('yo', np.int)])
self.failUnless(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', np.int)])
b = np.dtype([('ye', np.int)])
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestSubarray(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 3)))
self.failUnless(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (3, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (1, 2, 3)))
b = np.dtype((np.int, (1, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestMonsterType(TestCase):
"""Test deeply nested subtypes."""
pass
if __name__ == "__main__":
run_module_suite()
|
Add some unit tests for the hashing protocol of dtype (fail currently).import numpy as np
from numpy.testing import *
class TestBuiltin(TestCase):
def test_run(self):
"""Only test hash runs at all."""
for t in [np.int, np.float, np.complex, np.int32, np.str, np.object,
np.unicode]:
dt = np.dtype(t)
hash(dt)
class TestRecord(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', np.int)])
b = np.dtype([('yo', np.int)])
self.failUnless(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', np.int)])
b = np.dtype([('ye', np.int)])
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestSubarray(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 3)))
self.failUnless(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (3, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (1, 2, 3)))
b = np.dtype((np.int, (1, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestMonsterType(TestCase):
"""Test deeply nested subtypes."""
pass
if __name__ == "__main__":
run_module_suite()
|
<commit_before><commit_msg>Add some unit tests for the hashing protocol of dtype (fail currently).<commit_after>import numpy as np
from numpy.testing import *
class TestBuiltin(TestCase):
def test_run(self):
"""Only test hash runs at all."""
for t in [np.int, np.float, np.complex, np.int32, np.str, np.object,
np.unicode]:
dt = np.dtype(t)
hash(dt)
class TestRecord(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent record dtypes hash the same."""
a = np.dtype([('yo', np.int)])
b = np.dtype([('yo', np.int)])
self.failUnless(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_different_names(self):
# In theory, they may hash the same (collision) ?
a = np.dtype([('yo', np.int)])
b = np.dtype([('ye', np.int)])
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
def test_different_titles(self):
# In theory, they may hash the same (collision) ?
a = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['Red pixel', 'Blue pixel']})
b = np.dtype({'names': ['r','b'], 'formats': ['u1', 'u1'],
'titles': ['RRed pixel', 'Blue pixel']})
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestSubarray(TestCase):
def test_equivalent_record(self):
"""Test whether equivalent subarray dtypes hash the same."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 3)))
self.failUnless(hash(a) == hash(b),
"two equivalent types do not hash to the same value !")
def test_nonequivalent_record(self):
"""Test whether different subarray dtypes hash differently."""
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (3, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (2, 3)))
b = np.dtype((np.int, (2, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
a = np.dtype((np.int, (1, 2, 3)))
b = np.dtype((np.int, (1, 2)))
self.failUnless(hash(a) != hash(b),
"%s and %s hash the same !" % (a, b))
class TestMonsterType(TestCase):
"""Test deeply nested subtypes."""
pass
if __name__ == "__main__":
run_module_suite()
|
|
d8411d654e9ca1de67b44e5384eadc476c99e0b9
|
tests/test_arrays.py
|
tests/test_arrays.py
|
from thinglang.thinglang import run
def test_simple_arrays():
assert run("""
thing Program
does start
array names = ["yotam", "andrew", "john"]
Output.write(names)
""").output == """['yotam', 'andrew', 'john']"""
|
Add test for simple array initialziation
|
Add test for simple array initialziation
|
Python
|
mit
|
ytanay/thinglang,ytanay/thinglang,ytanay/thinglang,ytanay/thinglang
|
Add test for simple array initialziation
|
from thinglang.thinglang import run
def test_simple_arrays():
assert run("""
thing Program
does start
array names = ["yotam", "andrew", "john"]
Output.write(names)
""").output == """['yotam', 'andrew', 'john']"""
|
<commit_before><commit_msg>Add test for simple array initialziation<commit_after>
|
from thinglang.thinglang import run
def test_simple_arrays():
assert run("""
thing Program
does start
array names = ["yotam", "andrew", "john"]
Output.write(names)
""").output == """['yotam', 'andrew', 'john']"""
|
Add test for simple array initialziationfrom thinglang.thinglang import run
def test_simple_arrays():
assert run("""
thing Program
does start
array names = ["yotam", "andrew", "john"]
Output.write(names)
""").output == """['yotam', 'andrew', 'john']"""
|
<commit_before><commit_msg>Add test for simple array initialziation<commit_after>from thinglang.thinglang import run
def test_simple_arrays():
assert run("""
thing Program
does start
array names = ["yotam", "andrew", "john"]
Output.write(names)
""").output == """['yotam', 'andrew', 'john']"""
|
|
85801ea57f2b8477ca1d73fa350b82e3def40303
|
tests/test_rtnorm.py
|
tests/test_rtnorm.py
|
# This should plot a histogram looking like a gaussian
# ... It does.
## CONFIGURATION (play with different values)
samples = int(1e6)
minimum = 0.
maximum = 15.
center = 7.
stddev = 5.
## VARIABLES FROM RANDOM TRUNCATED NORMAL DISTRIBUTION
from lib.rtnorm import rtnorm
variables = rtnorm(minimum, maximum, mu=center, sigma=stddev, size=samples)
## PLOT THEIR HISTOGRAM
import matplotlib.pyplot as plot
plot.hist(variables, bins=400)
plot.show()
|
Add a little test for rtnorm, to make sure this is not the problem. It is not, apparently.
|
Add a little test for rtnorm, to make sure this is not the problem. It is not, apparently.
|
Python
|
mit
|
irap-omp/deconv3d,irap-omp/deconv3d
|
Add a little test for rtnorm, to make sure this is not the problem. It is not, apparently.
|
# This should plot a histogram looking like a gaussian
# ... It does.
## CONFIGURATION (play with different values)
samples = int(1e6)
minimum = 0.
maximum = 15.
center = 7.
stddev = 5.
## VARIABLES FROM RANDOM TRUNCATED NORMAL DISTRIBUTION
from lib.rtnorm import rtnorm
variables = rtnorm(minimum, maximum, mu=center, sigma=stddev, size=samples)
## PLOT THEIR HISTOGRAM
import matplotlib.pyplot as plot
plot.hist(variables, bins=400)
plot.show()
|
<commit_before><commit_msg>Add a little test for rtnorm, to make sure this is not the problem. It is not, apparently.<commit_after>
|
# This should plot a histogram looking like a gaussian
# ... It does.
## CONFIGURATION (play with different values)
samples = int(1e6)
minimum = 0.
maximum = 15.
center = 7.
stddev = 5.
## VARIABLES FROM RANDOM TRUNCATED NORMAL DISTRIBUTION
from lib.rtnorm import rtnorm
variables = rtnorm(minimum, maximum, mu=center, sigma=stddev, size=samples)
## PLOT THEIR HISTOGRAM
import matplotlib.pyplot as plot
plot.hist(variables, bins=400)
plot.show()
|
Add a little test for rtnorm, to make sure this is not the problem. It is not, apparently.
# This should plot a histogram looking like a gaussian
# ... It does.
## CONFIGURATION (play with different values)
samples = int(1e6)
minimum = 0.
maximum = 15.
center = 7.
stddev = 5.
## VARIABLES FROM RANDOM TRUNCATED NORMAL DISTRIBUTION
from lib.rtnorm import rtnorm
variables = rtnorm(minimum, maximum, mu=center, sigma=stddev, size=samples)
## PLOT THEIR HISTOGRAM
import matplotlib.pyplot as plot
plot.hist(variables, bins=400)
plot.show()
|
<commit_before><commit_msg>Add a little test for rtnorm, to make sure this is not the problem. It is not, apparently.<commit_after>
# This should plot a histogram looking like a gaussian
# ... It does.
## CONFIGURATION (play with different values)
samples = int(1e6)
minimum = 0.
maximum = 15.
center = 7.
stddev = 5.
## VARIABLES FROM RANDOM TRUNCATED NORMAL DISTRIBUTION
from lib.rtnorm import rtnorm
variables = rtnorm(minimum, maximum, mu=center, sigma=stddev, size=samples)
## PLOT THEIR HISTOGRAM
import matplotlib.pyplot as plot
plot.hist(variables, bins=400)
plot.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.