commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
32f69883aa5c5f2a86ddd7abf93f99628de45c0c
|
tests/test_search_grondwaterfilter.py
|
tests/test_search_grondwaterfilter.py
|
"""Module grouping tests for the search grondwaterfilter module."""
import pydov
from pydov.search.grondwaterfilter import GrondwaterFilterSearch
from pydov.types.grondwaterfilter import GrondwaterFilter
class TestGrondwaterFilterSearch(object):
"""Class grouping tests for the pydov.search.GrondwaterFilterSearch class.
"""
pass
|
Add template for filter specific search tests
|
Add template for filter specific search tests
|
Python
|
mit
|
DOV-Vlaanderen/pydov
|
Add template for filter specific search tests
|
"""Module grouping tests for the search grondwaterfilter module."""
import pydov
from pydov.search.grondwaterfilter import GrondwaterFilterSearch
from pydov.types.grondwaterfilter import GrondwaterFilter
class TestGrondwaterFilterSearch(object):
"""Class grouping tests for the pydov.search.GrondwaterFilterSearch class.
"""
pass
|
<commit_before><commit_msg>Add template for filter specific search tests<commit_after>
|
"""Module grouping tests for the search grondwaterfilter module."""
import pydov
from pydov.search.grondwaterfilter import GrondwaterFilterSearch
from pydov.types.grondwaterfilter import GrondwaterFilter
class TestGrondwaterFilterSearch(object):
"""Class grouping tests for the pydov.search.GrondwaterFilterSearch class.
"""
pass
|
Add template for filter specific search tests"""Module grouping tests for the search grondwaterfilter module."""
import pydov
from pydov.search.grondwaterfilter import GrondwaterFilterSearch
from pydov.types.grondwaterfilter import GrondwaterFilter
class TestGrondwaterFilterSearch(object):
"""Class grouping tests for the pydov.search.GrondwaterFilterSearch class.
"""
pass
|
<commit_before><commit_msg>Add template for filter specific search tests<commit_after>"""Module grouping tests for the search grondwaterfilter module."""
import pydov
from pydov.search.grondwaterfilter import GrondwaterFilterSearch
from pydov.types.grondwaterfilter import GrondwaterFilter
class TestGrondwaterFilterSearch(object):
"""Class grouping tests for the pydov.search.GrondwaterFilterSearch class.
"""
pass
|
|
3a4903bd09cd30397ed98734ba7a92ae9cc23697
|
location.py
|
location.py
|
#!/usr/local/python
import re
import subprocess
import sys
def main():
latlon = subprocess.check_output(["exiftool", "-GPSLatitude", "-GPSLongitude", sys.argv[1]])
latitude = get_latitude(latlon)
longitude = get_longitude(latlon)
print "[%f, %f]" % (longitude, latitude)
def get_latitude(latlon):
lat_regex_results = re.search('GPS Latitude.*: (\d+) deg (\d+)\' ([\d.]+)\" ([NSEW])', latlon)
degrees = int(lat_regex_results.group(1))
minutes = int(lat_regex_results.group(2))
seconds = float(lat_regex_results.group(3))
direction = lat_regex_results.group(4)
return convert_dms_to_decimal(degrees, minutes, seconds, direction)
def get_longitude(latlon):
lon_regex_results = re.search('GPS Longitude.*: (\d+) deg (\d+)\' ([\d.]+)\" ([NSEW])', latlon)
degrees = int(lon_regex_results.group(1))
minutes = int(lon_regex_results.group(2))
seconds = float(lon_regex_results.group(3))
direction = lon_regex_results.group(4)
return convert_dms_to_decimal(degrees, minutes, seconds, direction)
def convert_dms_to_decimal(degrees, minutes, seconds, direction):
value = (minutes * 60 + seconds) / 3600 + degrees
if direction == 'W' or direction == 'S':
value *= -1
return value
if __name__ == '__main__':
main()
|
Add python script to take GPS EXIF data and convert for GeoJSON use.
|
Add python script to take GPS EXIF data and convert for GeoJSON use.
|
Python
|
mit
|
Tanner/Technique-Newsstands-Map
|
Add python script to take GPS EXIF data and convert for GeoJSON use.
|
#!/usr/local/python
import re
import subprocess
import sys
def main():
latlon = subprocess.check_output(["exiftool", "-GPSLatitude", "-GPSLongitude", sys.argv[1]])
latitude = get_latitude(latlon)
longitude = get_longitude(latlon)
print "[%f, %f]" % (longitude, latitude)
def get_latitude(latlon):
lat_regex_results = re.search('GPS Latitude.*: (\d+) deg (\d+)\' ([\d.]+)\" ([NSEW])', latlon)
degrees = int(lat_regex_results.group(1))
minutes = int(lat_regex_results.group(2))
seconds = float(lat_regex_results.group(3))
direction = lat_regex_results.group(4)
return convert_dms_to_decimal(degrees, minutes, seconds, direction)
def get_longitude(latlon):
lon_regex_results = re.search('GPS Longitude.*: (\d+) deg (\d+)\' ([\d.]+)\" ([NSEW])', latlon)
degrees = int(lon_regex_results.group(1))
minutes = int(lon_regex_results.group(2))
seconds = float(lon_regex_results.group(3))
direction = lon_regex_results.group(4)
return convert_dms_to_decimal(degrees, minutes, seconds, direction)
def convert_dms_to_decimal(degrees, minutes, seconds, direction):
value = (minutes * 60 + seconds) / 3600 + degrees
if direction == 'W' or direction == 'S':
value *= -1
return value
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add python script to take GPS EXIF data and convert for GeoJSON use.<commit_after>
|
#!/usr/local/python
import re
import subprocess
import sys
def main():
latlon = subprocess.check_output(["exiftool", "-GPSLatitude", "-GPSLongitude", sys.argv[1]])
latitude = get_latitude(latlon)
longitude = get_longitude(latlon)
print "[%f, %f]" % (longitude, latitude)
def get_latitude(latlon):
lat_regex_results = re.search('GPS Latitude.*: (\d+) deg (\d+)\' ([\d.]+)\" ([NSEW])', latlon)
degrees = int(lat_regex_results.group(1))
minutes = int(lat_regex_results.group(2))
seconds = float(lat_regex_results.group(3))
direction = lat_regex_results.group(4)
return convert_dms_to_decimal(degrees, minutes, seconds, direction)
def get_longitude(latlon):
lon_regex_results = re.search('GPS Longitude.*: (\d+) deg (\d+)\' ([\d.]+)\" ([NSEW])', latlon)
degrees = int(lon_regex_results.group(1))
minutes = int(lon_regex_results.group(2))
seconds = float(lon_regex_results.group(3))
direction = lon_regex_results.group(4)
return convert_dms_to_decimal(degrees, minutes, seconds, direction)
def convert_dms_to_decimal(degrees, minutes, seconds, direction):
value = (minutes * 60 + seconds) / 3600 + degrees
if direction == 'W' or direction == 'S':
value *= -1
return value
if __name__ == '__main__':
main()
|
Add python script to take GPS EXIF data and convert for GeoJSON use.#!/usr/local/python
import re
import subprocess
import sys
def main():
latlon = subprocess.check_output(["exiftool", "-GPSLatitude", "-GPSLongitude", sys.argv[1]])
latitude = get_latitude(latlon)
longitude = get_longitude(latlon)
print "[%f, %f]" % (longitude, latitude)
def get_latitude(latlon):
lat_regex_results = re.search('GPS Latitude.*: (\d+) deg (\d+)\' ([\d.]+)\" ([NSEW])', latlon)
degrees = int(lat_regex_results.group(1))
minutes = int(lat_regex_results.group(2))
seconds = float(lat_regex_results.group(3))
direction = lat_regex_results.group(4)
return convert_dms_to_decimal(degrees, minutes, seconds, direction)
def get_longitude(latlon):
lon_regex_results = re.search('GPS Longitude.*: (\d+) deg (\d+)\' ([\d.]+)\" ([NSEW])', latlon)
degrees = int(lon_regex_results.group(1))
minutes = int(lon_regex_results.group(2))
seconds = float(lon_regex_results.group(3))
direction = lon_regex_results.group(4)
return convert_dms_to_decimal(degrees, minutes, seconds, direction)
def convert_dms_to_decimal(degrees, minutes, seconds, direction):
value = (minutes * 60 + seconds) / 3600 + degrees
if direction == 'W' or direction == 'S':
value *= -1
return value
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add python script to take GPS EXIF data and convert for GeoJSON use.<commit_after>#!/usr/local/python
import re
import subprocess
import sys
def main():
latlon = subprocess.check_output(["exiftool", "-GPSLatitude", "-GPSLongitude", sys.argv[1]])
latitude = get_latitude(latlon)
longitude = get_longitude(latlon)
print "[%f, %f]" % (longitude, latitude)
def get_latitude(latlon):
lat_regex_results = re.search('GPS Latitude.*: (\d+) deg (\d+)\' ([\d.]+)\" ([NSEW])', latlon)
degrees = int(lat_regex_results.group(1))
minutes = int(lat_regex_results.group(2))
seconds = float(lat_regex_results.group(3))
direction = lat_regex_results.group(4)
return convert_dms_to_decimal(degrees, minutes, seconds, direction)
def get_longitude(latlon):
lon_regex_results = re.search('GPS Longitude.*: (\d+) deg (\d+)\' ([\d.]+)\" ([NSEW])', latlon)
degrees = int(lon_regex_results.group(1))
minutes = int(lon_regex_results.group(2))
seconds = float(lon_regex_results.group(3))
direction = lon_regex_results.group(4)
return convert_dms_to_decimal(degrees, minutes, seconds, direction)
def convert_dms_to_decimal(degrees, minutes, seconds, direction):
value = (minutes * 60 + seconds) / 3600 + degrees
if direction == 'W' or direction == 'S':
value *= -1
return value
if __name__ == '__main__':
main()
|
|
f0f22d018d5dae1a362512da1f8e5290ac755795
|
examples/python/forum/router_spin.py
|
examples/python/forum/router_spin.py
|
import random
import json
import tornado.ioloop
import tornado.httpclient
http_client = tornado.httpclient.AsyncHTTPClient()
schema_json = json.load(open('example_forum_sharded.json'))
schema_json['name'] = 'example_forum'
# set the datastore id
schema_json['datastores'][0]['datastore']['_id'] = 54
@tornado.gen.coroutine
def ensure_database():
request = tornado.httpclient.HTTPRequest(
'http://127.0.0.1:8080/v1/database/example_forum',
method='POST',
body=json.dumps(schema_json),
connect_timeout=9999999,
request_timeout=9999999,
)
try:
ret = yield http_client.fetch(request)
print 'add database (', ret.request.method, ret.request.url, ')'
print ret.request_time
finally:
spawn_callback()
@tornado.gen.coroutine
def remove_database():
request = tornado.httpclient.HTTPRequest(
'http://127.0.0.1:8080/v1/database/example_forum',
method='DELETE',
connect_timeout=9999999,
request_timeout=9999999,
)
try:
ret = yield http_client.fetch(request)
print 'remove database (', ret.request.method, ret.request.url, ')'
print ret.request_time
finally:
spawn_callback()
funcs = [ensure_database, remove_database]
def spawn_callback():
ioloop.spawn_callback(random.choice(funcs))
def main():
for x in xrange(10):
spawn_callback()
if __name__ == '__main__':
ioloop = tornado.ioloop.IOLoop.current()
ioloop.spawn_callback(main)
ioloop.start()
|
Add database provision test script
|
Add database provision test script
This just constantly adds/removes (at random) the database to ensure it
doesn't cause problems
|
Python
|
mit
|
jacksontj/dataman
|
Add database provision test script
This just constantly adds/removes (at random) the database to ensure it
doesn't cause problems
|
import random
import json
import tornado.ioloop
import tornado.httpclient
http_client = tornado.httpclient.AsyncHTTPClient()
schema_json = json.load(open('example_forum_sharded.json'))
schema_json['name'] = 'example_forum'
# set the datastore id
schema_json['datastores'][0]['datastore']['_id'] = 54
@tornado.gen.coroutine
def ensure_database():
request = tornado.httpclient.HTTPRequest(
'http://127.0.0.1:8080/v1/database/example_forum',
method='POST',
body=json.dumps(schema_json),
connect_timeout=9999999,
request_timeout=9999999,
)
try:
ret = yield http_client.fetch(request)
print 'add database (', ret.request.method, ret.request.url, ')'
print ret.request_time
finally:
spawn_callback()
@tornado.gen.coroutine
def remove_database():
request = tornado.httpclient.HTTPRequest(
'http://127.0.0.1:8080/v1/database/example_forum',
method='DELETE',
connect_timeout=9999999,
request_timeout=9999999,
)
try:
ret = yield http_client.fetch(request)
print 'remove database (', ret.request.method, ret.request.url, ')'
print ret.request_time
finally:
spawn_callback()
funcs = [ensure_database, remove_database]
def spawn_callback():
ioloop.spawn_callback(random.choice(funcs))
def main():
for x in xrange(10):
spawn_callback()
if __name__ == '__main__':
ioloop = tornado.ioloop.IOLoop.current()
ioloop.spawn_callback(main)
ioloop.start()
|
<commit_before><commit_msg>Add database provision test script
This just constantly adds/removes (at random) the database to ensure it
doesn't cause problems<commit_after>
|
import random
import json
import tornado.ioloop
import tornado.httpclient
http_client = tornado.httpclient.AsyncHTTPClient()
schema_json = json.load(open('example_forum_sharded.json'))
schema_json['name'] = 'example_forum'
# set the datastore id
schema_json['datastores'][0]['datastore']['_id'] = 54
@tornado.gen.coroutine
def ensure_database():
request = tornado.httpclient.HTTPRequest(
'http://127.0.0.1:8080/v1/database/example_forum',
method='POST',
body=json.dumps(schema_json),
connect_timeout=9999999,
request_timeout=9999999,
)
try:
ret = yield http_client.fetch(request)
print 'add database (', ret.request.method, ret.request.url, ')'
print ret.request_time
finally:
spawn_callback()
@tornado.gen.coroutine
def remove_database():
request = tornado.httpclient.HTTPRequest(
'http://127.0.0.1:8080/v1/database/example_forum',
method='DELETE',
connect_timeout=9999999,
request_timeout=9999999,
)
try:
ret = yield http_client.fetch(request)
print 'remove database (', ret.request.method, ret.request.url, ')'
print ret.request_time
finally:
spawn_callback()
funcs = [ensure_database, remove_database]
def spawn_callback():
ioloop.spawn_callback(random.choice(funcs))
def main():
for x in xrange(10):
spawn_callback()
if __name__ == '__main__':
ioloop = tornado.ioloop.IOLoop.current()
ioloop.spawn_callback(main)
ioloop.start()
|
Add database provision test script
This just constantly adds/removes (at random) the database to ensure it
doesn't cause problemsimport random
import json
import tornado.ioloop
import tornado.httpclient
http_client = tornado.httpclient.AsyncHTTPClient()
schema_json = json.load(open('example_forum_sharded.json'))
schema_json['name'] = 'example_forum'
# set the datastore id
schema_json['datastores'][0]['datastore']['_id'] = 54
@tornado.gen.coroutine
def ensure_database():
request = tornado.httpclient.HTTPRequest(
'http://127.0.0.1:8080/v1/database/example_forum',
method='POST',
body=json.dumps(schema_json),
connect_timeout=9999999,
request_timeout=9999999,
)
try:
ret = yield http_client.fetch(request)
print 'add database (', ret.request.method, ret.request.url, ')'
print ret.request_time
finally:
spawn_callback()
@tornado.gen.coroutine
def remove_database():
request = tornado.httpclient.HTTPRequest(
'http://127.0.0.1:8080/v1/database/example_forum',
method='DELETE',
connect_timeout=9999999,
request_timeout=9999999,
)
try:
ret = yield http_client.fetch(request)
print 'remove database (', ret.request.method, ret.request.url, ')'
print ret.request_time
finally:
spawn_callback()
funcs = [ensure_database, remove_database]
def spawn_callback():
ioloop.spawn_callback(random.choice(funcs))
def main():
for x in xrange(10):
spawn_callback()
if __name__ == '__main__':
ioloop = tornado.ioloop.IOLoop.current()
ioloop.spawn_callback(main)
ioloop.start()
|
<commit_before><commit_msg>Add database provision test script
This just constantly adds/removes (at random) the database to ensure it
doesn't cause problems<commit_after>import random
import json
import tornado.ioloop
import tornado.httpclient
http_client = tornado.httpclient.AsyncHTTPClient()
schema_json = json.load(open('example_forum_sharded.json'))
schema_json['name'] = 'example_forum'
# set the datastore id
schema_json['datastores'][0]['datastore']['_id'] = 54
@tornado.gen.coroutine
def ensure_database():
request = tornado.httpclient.HTTPRequest(
'http://127.0.0.1:8080/v1/database/example_forum',
method='POST',
body=json.dumps(schema_json),
connect_timeout=9999999,
request_timeout=9999999,
)
try:
ret = yield http_client.fetch(request)
print 'add database (', ret.request.method, ret.request.url, ')'
print ret.request_time
finally:
spawn_callback()
@tornado.gen.coroutine
def remove_database():
request = tornado.httpclient.HTTPRequest(
'http://127.0.0.1:8080/v1/database/example_forum',
method='DELETE',
connect_timeout=9999999,
request_timeout=9999999,
)
try:
ret = yield http_client.fetch(request)
print 'remove database (', ret.request.method, ret.request.url, ')'
print ret.request_time
finally:
spawn_callback()
funcs = [ensure_database, remove_database]
def spawn_callback():
ioloop.spawn_callback(random.choice(funcs))
def main():
for x in xrange(10):
spawn_callback()
if __name__ == '__main__':
ioloop = tornado.ioloop.IOLoop.current()
ioloop.spawn_callback(main)
ioloop.start()
|
|
c95d472bf5ef9746521cf54c83bb61333c3eafcd
|
tests/test_eb/test_eb.py
|
tests/test_eb/test_eb.py
|
import boto3
from moto import mock_eb
@mock_eb
def test_application():
# Create Elastic Beanstalk Application
eb_client = boto3.client('elasticbeanstalk', region_name='us-east-1')
eb_client.create_application(
ApplicationName="myapp",
)
eb_apps = eb_client.describe_applications()
eb_apps['Applications'][0]['ApplicationName'].should.equal("myapp")
|
Add (failing) test for ElasticBeanstalk
|
Add (failing) test for ElasticBeanstalk
|
Python
|
apache-2.0
|
william-richard/moto,spulec/moto,william-richard/moto,spulec/moto,william-richard/moto,spulec/moto,spulec/moto,william-richard/moto,william-richard/moto,spulec/moto,william-richard/moto,spulec/moto
|
Add (failing) test for ElasticBeanstalk
|
import boto3
from moto import mock_eb
@mock_eb
def test_application():
# Create Elastic Beanstalk Application
eb_client = boto3.client('elasticbeanstalk', region_name='us-east-1')
eb_client.create_application(
ApplicationName="myapp",
)
eb_apps = eb_client.describe_applications()
eb_apps['Applications'][0]['ApplicationName'].should.equal("myapp")
|
<commit_before><commit_msg>Add (failing) test for ElasticBeanstalk<commit_after>
|
import boto3
from moto import mock_eb
@mock_eb
def test_application():
# Create Elastic Beanstalk Application
eb_client = boto3.client('elasticbeanstalk', region_name='us-east-1')
eb_client.create_application(
ApplicationName="myapp",
)
eb_apps = eb_client.describe_applications()
eb_apps['Applications'][0]['ApplicationName'].should.equal("myapp")
|
Add (failing) test for ElasticBeanstalkimport boto3
from moto import mock_eb
@mock_eb
def test_application():
# Create Elastic Beanstalk Application
eb_client = boto3.client('elasticbeanstalk', region_name='us-east-1')
eb_client.create_application(
ApplicationName="myapp",
)
eb_apps = eb_client.describe_applications()
eb_apps['Applications'][0]['ApplicationName'].should.equal("myapp")
|
<commit_before><commit_msg>Add (failing) test for ElasticBeanstalk<commit_after>import boto3
from moto import mock_eb
@mock_eb
def test_application():
# Create Elastic Beanstalk Application
eb_client = boto3.client('elasticbeanstalk', region_name='us-east-1')
eb_client.create_application(
ApplicationName="myapp",
)
eb_apps = eb_client.describe_applications()
eb_apps['Applications'][0]['ApplicationName'].should.equal("myapp")
|
|
45db81042b58c72da04193cdcdc6d400b5370c18
|
convert_quotes_json_to_sqlite.py
|
convert_quotes_json_to_sqlite.py
|
#!/usr/bin/env python3
# Convert legacy JSON quotes format to SQLite3 format
# It non-destructively reads in "quotes.json"
# and writes out a new file "quotes.sqlite3".
# It will prompt if "quotes.sqlite3" exists
# and will ask if you want to merge the quotes.
# "quotes.sqlite3" modifications will not be committed until the process is finished,
# so don't open it in the meantime and expect new data.
import json
from pathlib import Path
import sqlite3
import sys
print("Converting quotes.json to quotes.sqlite3")
with open("quotes.json") as f:
quotes = json.load(f)
if Path("quotes.sqlite3").exists():
print("'quotes.sqlite3' exists. It could be from a previous run, and may have quotes in it.")
print("You may want to check the existing file for data, or at least back it up.")
print()
print("Do you want to continue and attempt to merge the existing and new quotes?")
answer = input('If you wish to merge them, type "yes" without quotes: ')
if answer != "yes":
print("Aborting conversion. Move, delete, or prepare to merge 'quotes.sqlite3' and rerun this script.")
sys.exit("'quotes.sqlite3' exists")
# Should create a new file if it doesn't exist
quotes_db_connection = sqlite3.connect("quotes.sqlite3")
quotes_db_cursor = quotes_db_connection.cursor()
quotes_db_cursor.execute("CREATE TABLE IF NOT EXISTS quotes "
"(author text NOT NULL, quote text NOT NULL)")
for author in quotes:
for quote in quotes[author]:
quotes_db_cursor.execute("INSERT INTO quotes VALUES "
"(?,?)", (author, quote))
quotes_db_connection.commit()
quotes_db_connection.close()
print("quotes.sqlite3 written. Should be good to go.")
|
Add conversion script for quotes database
|
Add conversion script for quotes database
Figured I'd keep this around for a while just in case. Maybe remove it
in some future cleanup and declare the old format dead.
|
Python
|
mit
|
TAOTheCrab/CrabBot
|
Add conversion script for quotes database
Figured I'd keep this around for a while just in case. Maybe remove it
in some future cleanup and declare the old format dead.
|
#!/usr/bin/env python3
# Convert legacy JSON quotes format to SQLite3 format
# It non-destructively reads in "quotes.json"
# and writes out a new file "quotes.sqlite3".
# It will prompt if "quotes.sqlite3" exists
# and will ask if you want to merge the quotes.
# "quotes.sqlite3" modifications will not be committed until the process is finished,
# so don't open it in the meantime and expect new data.
import json
from pathlib import Path
import sqlite3
import sys
print("Converting quotes.json to quotes.sqlite3")
with open("quotes.json") as f:
quotes = json.load(f)
if Path("quotes.sqlite3").exists():
print("'quotes.sqlite3' exists. It could be from a previous run, and may have quotes in it.")
print("You may want to check the existing file for data, or at least back it up.")
print()
print("Do you want to continue and attempt to merge the existing and new quotes?")
answer = input('If you wish to merge them, type "yes" without quotes: ')
if answer != "yes":
print("Aborting conversion. Move, delete, or prepare to merge 'quotes.sqlite3' and rerun this script.")
sys.exit("'quotes.sqlite3' exists")
# Should create a new file if it doesn't exist
quotes_db_connection = sqlite3.connect("quotes.sqlite3")
quotes_db_cursor = quotes_db_connection.cursor()
quotes_db_cursor.execute("CREATE TABLE IF NOT EXISTS quotes "
"(author text NOT NULL, quote text NOT NULL)")
for author in quotes:
for quote in quotes[author]:
quotes_db_cursor.execute("INSERT INTO quotes VALUES "
"(?,?)", (author, quote))
quotes_db_connection.commit()
quotes_db_connection.close()
print("quotes.sqlite3 written. Should be good to go.")
|
<commit_before><commit_msg>Add conversion script for quotes database
Figured I'd keep this around for a while just in case. Maybe remove it
in some future cleanup and declare the old format dead.<commit_after>
|
#!/usr/bin/env python3
# Convert legacy JSON quotes format to SQLite3 format
# It non-destructively reads in "quotes.json"
# and writes out a new file "quotes.sqlite3".
# It will prompt if "quotes.sqlite3" exists
# and will ask if you want to merge the quotes.
# "quotes.sqlite3" modifications will not be committed until the process is finished,
# so don't open it in the meantime and expect new data.
import json
from pathlib import Path
import sqlite3
import sys
print("Converting quotes.json to quotes.sqlite3")
with open("quotes.json") as f:
quotes = json.load(f)
if Path("quotes.sqlite3").exists():
print("'quotes.sqlite3' exists. It could be from a previous run, and may have quotes in it.")
print("You may want to check the existing file for data, or at least back it up.")
print()
print("Do you want to continue and attempt to merge the existing and new quotes?")
answer = input('If you wish to merge them, type "yes" without quotes: ')
if answer != "yes":
print("Aborting conversion. Move, delete, or prepare to merge 'quotes.sqlite3' and rerun this script.")
sys.exit("'quotes.sqlite3' exists")
# Should create a new file if it doesn't exist
quotes_db_connection = sqlite3.connect("quotes.sqlite3")
quotes_db_cursor = quotes_db_connection.cursor()
quotes_db_cursor.execute("CREATE TABLE IF NOT EXISTS quotes "
"(author text NOT NULL, quote text NOT NULL)")
for author in quotes:
for quote in quotes[author]:
quotes_db_cursor.execute("INSERT INTO quotes VALUES "
"(?,?)", (author, quote))
quotes_db_connection.commit()
quotes_db_connection.close()
print("quotes.sqlite3 written. Should be good to go.")
|
Add conversion script for quotes database
Figured I'd keep this around for a while just in case. Maybe remove it
in some future cleanup and declare the old format dead.#!/usr/bin/env python3
# Convert legacy JSON quotes format to SQLite3 format
# It non-destructively reads in "quotes.json"
# and writes out a new file "quotes.sqlite3".
# It will prompt if "quotes.sqlite3" exists
# and will ask if you want to merge the quotes.
# "quotes.sqlite3" modifications will not be committed until the process is finished,
# so don't open it in the meantime and expect new data.
import json
from pathlib import Path
import sqlite3
import sys
print("Converting quotes.json to quotes.sqlite3")
with open("quotes.json") as f:
quotes = json.load(f)
if Path("quotes.sqlite3").exists():
print("'quotes.sqlite3' exists. It could be from a previous run, and may have quotes in it.")
print("You may want to check the existing file for data, or at least back it up.")
print()
print("Do you want to continue and attempt to merge the existing and new quotes?")
answer = input('If you wish to merge them, type "yes" without quotes: ')
if answer != "yes":
print("Aborting conversion. Move, delete, or prepare to merge 'quotes.sqlite3' and rerun this script.")
sys.exit("'quotes.sqlite3' exists")
# Should create a new file if it doesn't exist
quotes_db_connection = sqlite3.connect("quotes.sqlite3")
quotes_db_cursor = quotes_db_connection.cursor()
quotes_db_cursor.execute("CREATE TABLE IF NOT EXISTS quotes "
"(author text NOT NULL, quote text NOT NULL)")
for author in quotes:
for quote in quotes[author]:
quotes_db_cursor.execute("INSERT INTO quotes VALUES "
"(?,?)", (author, quote))
quotes_db_connection.commit()
quotes_db_connection.close()
print("quotes.sqlite3 written. Should be good to go.")
|
<commit_before><commit_msg>Add conversion script for quotes database
Figured I'd keep this around for a while just in case. Maybe remove it
in some future cleanup and declare the old format dead.<commit_after>#!/usr/bin/env python3
# Convert legacy JSON quotes format to SQLite3 format
# It non-destructively reads in "quotes.json"
# and writes out a new file "quotes.sqlite3".
# It will prompt if "quotes.sqlite3" exists
# and will ask if you want to merge the quotes.
# "quotes.sqlite3" modifications will not be committed until the process is finished,
# so don't open it in the meantime and expect new data.
import json
from pathlib import Path
import sqlite3
import sys
print("Converting quotes.json to quotes.sqlite3")
with open("quotes.json") as f:
quotes = json.load(f)
if Path("quotes.sqlite3").exists():
print("'quotes.sqlite3' exists. It could be from a previous run, and may have quotes in it.")
print("You may want to check the existing file for data, or at least back it up.")
print()
print("Do you want to continue and attempt to merge the existing and new quotes?")
answer = input('If you wish to merge them, type "yes" without quotes: ')
if answer != "yes":
print("Aborting conversion. Move, delete, or prepare to merge 'quotes.sqlite3' and rerun this script.")
sys.exit("'quotes.sqlite3' exists")
# Should create a new file if it doesn't exist
quotes_db_connection = sqlite3.connect("quotes.sqlite3")
quotes_db_cursor = quotes_db_connection.cursor()
quotes_db_cursor.execute("CREATE TABLE IF NOT EXISTS quotes "
"(author text NOT NULL, quote text NOT NULL)")
for author in quotes:
for quote in quotes[author]:
quotes_db_cursor.execute("INSERT INTO quotes VALUES "
"(?,?)", (author, quote))
quotes_db_connection.commit()
quotes_db_connection.close()
print("quotes.sqlite3 written. Should be good to go.")
|
|
e0d1c88b8bc37160b93f8a99ee89702a153e803f
|
testtest/test_allgood.py
|
testtest/test_allgood.py
|
# -*- encoding: utf-8 -*-
"""
"""
import unittest
import time
DELAY = 0.25
class Case1(unittest.TestCase):
def test_success(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success1(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success2(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success3(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success4(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success5(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
class Case2(unittest.TestCase):
def test_success(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success1(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success2(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success3(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success4(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success5(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
class Case3(unittest.TestCase):
def test_success(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success1(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success2(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success3(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success4(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success5(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
|
Add more example in testtest/
|
Add more example in testtest/
|
Python
|
bsd-2-clause
|
nicolasdespres/hunittest
|
Add more example in testtest/
|
# -*- encoding: utf-8 -*-
"""
"""
import unittest
import time
DELAY = 0.25
class Case1(unittest.TestCase):
def test_success(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success1(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success2(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success3(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success4(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success5(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
class Case2(unittest.TestCase):
def test_success(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success1(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success2(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success3(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success4(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success5(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
class Case3(unittest.TestCase):
def test_success(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success1(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success2(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success3(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success4(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success5(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
|
<commit_before><commit_msg>Add more example in testtest/<commit_after>
|
# -*- encoding: utf-8 -*-
"""
"""
import unittest
import time
DELAY = 0.25
class Case1(unittest.TestCase):
def test_success(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success1(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success2(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success3(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success4(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success5(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
class Case2(unittest.TestCase):
def test_success(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success1(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success2(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success3(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success4(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success5(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
class Case3(unittest.TestCase):
def test_success(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success1(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success2(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success3(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success4(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success5(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
|
Add more example in testtest/# -*- encoding: utf-8 -*-
"""
"""
import unittest
import time
DELAY = 0.25
class Case1(unittest.TestCase):
def test_success(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success1(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success2(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success3(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success4(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success5(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
class Case2(unittest.TestCase):
def test_success(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success1(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success2(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success3(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success4(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success5(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
class Case3(unittest.TestCase):
def test_success(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success1(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success2(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success3(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success4(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success5(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
|
<commit_before><commit_msg>Add more example in testtest/<commit_after># -*- encoding: utf-8 -*-
"""
"""
import unittest
import time
DELAY = 0.25
class Case1(unittest.TestCase):
def test_success(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success1(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success2(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success3(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success4(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success5(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
class Case2(unittest.TestCase):
def test_success(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success1(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success2(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success3(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success4(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success5(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
class Case3(unittest.TestCase):
def test_success(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success1(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success2(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success3(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success4(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
def test_success5(self):
time.sleep(DELAY)
self.assertEqual(1, 1)
|
|
a8129cfb7544c5b2f021f8258462ec3b7933b38f
|
resources/middlewares/body_checker.py
|
resources/middlewares/body_checker.py
|
import json
import sys
import falcon
def body_checker(required_params=(), documentation_link=None):
def hook(req, resp, resource, params):
if req.content_length in (None, 0, ):
raise falcon.HTTPBadRequest('Bad request',
'В запросе деолжны быть параметры, дружок.',
href=documentation_link)
#todo: https://github.com/falconry/falcon/pull/748
try:
body = json.loads(req.stream.read(sys.maxsize).decode('utf-8'))
except (ValueError, UnicodeDecodeError):
raise falcon.HTTPBadRequest('Bad request',
'Ты прислал плохой json, няша, попробуй прислать другой.',
href=documentation_link)
params = {}
description = "Ты забыл параметр '%s', няша."
for key in required_params:
if key not in body:
raise falcon.HTTPBadRequest('Bad request', description % key, href=documentation_link)
params[key] = body[key]
req.context['parsed_body'] = params
return hook
|
Add util for parsing required params from body.
|
Add util for parsing required params from body.
|
Python
|
cc0-1.0
|
sketchturnerr/WaifuSim-backend,sketchturnerr/WaifuSim-backend
|
Add util for parsing required params from body.
|
import json
import sys
import falcon
def body_checker(required_params=(), documentation_link=None):
def hook(req, resp, resource, params):
if req.content_length in (None, 0, ):
raise falcon.HTTPBadRequest('Bad request',
'В запросе деолжны быть параметры, дружок.',
href=documentation_link)
#todo: https://github.com/falconry/falcon/pull/748
try:
body = json.loads(req.stream.read(sys.maxsize).decode('utf-8'))
except (ValueError, UnicodeDecodeError):
raise falcon.HTTPBadRequest('Bad request',
'Ты прислал плохой json, няша, попробуй прислать другой.',
href=documentation_link)
params = {}
description = "Ты забыл параметр '%s', няша."
for key in required_params:
if key not in body:
raise falcon.HTTPBadRequest('Bad request', description % key, href=documentation_link)
params[key] = body[key]
req.context['parsed_body'] = params
return hook
|
<commit_before><commit_msg>Add util for parsing required params from body.<commit_after>
|
import json
import sys
import falcon
def body_checker(required_params=(), documentation_link=None):
def hook(req, resp, resource, params):
if req.content_length in (None, 0, ):
raise falcon.HTTPBadRequest('Bad request',
'В запросе деолжны быть параметры, дружок.',
href=documentation_link)
#todo: https://github.com/falconry/falcon/pull/748
try:
body = json.loads(req.stream.read(sys.maxsize).decode('utf-8'))
except (ValueError, UnicodeDecodeError):
raise falcon.HTTPBadRequest('Bad request',
'Ты прислал плохой json, няша, попробуй прислать другой.',
href=documentation_link)
params = {}
description = "Ты забыл параметр '%s', няша."
for key in required_params:
if key not in body:
raise falcon.HTTPBadRequest('Bad request', description % key, href=documentation_link)
params[key] = body[key]
req.context['parsed_body'] = params
return hook
|
Add util for parsing required params from body.import json
import sys
import falcon
def body_checker(required_params=(), documentation_link=None):
def hook(req, resp, resource, params):
if req.content_length in (None, 0, ):
raise falcon.HTTPBadRequest('Bad request',
'В запросе деолжны быть параметры, дружок.',
href=documentation_link)
#todo: https://github.com/falconry/falcon/pull/748
try:
body = json.loads(req.stream.read(sys.maxsize).decode('utf-8'))
except (ValueError, UnicodeDecodeError):
raise falcon.HTTPBadRequest('Bad request',
'Ты прислал плохой json, няша, попробуй прислать другой.',
href=documentation_link)
params = {}
description = "Ты забыл параметр '%s', няша."
for key in required_params:
if key not in body:
raise falcon.HTTPBadRequest('Bad request', description % key, href=documentation_link)
params[key] = body[key]
req.context['parsed_body'] = params
return hook
|
<commit_before><commit_msg>Add util for parsing required params from body.<commit_after>import json
import sys
import falcon
def body_checker(required_params=(), documentation_link=None):
def hook(req, resp, resource, params):
if req.content_length in (None, 0, ):
raise falcon.HTTPBadRequest('Bad request',
'В запросе деолжны быть параметры, дружок.',
href=documentation_link)
#todo: https://github.com/falconry/falcon/pull/748
try:
body = json.loads(req.stream.read(sys.maxsize).decode('utf-8'))
except (ValueError, UnicodeDecodeError):
raise falcon.HTTPBadRequest('Bad request',
'Ты прислал плохой json, няша, попробуй прислать другой.',
href=documentation_link)
params = {}
description = "Ты забыл параметр '%s', няша."
for key in required_params:
if key not in body:
raise falcon.HTTPBadRequest('Bad request', description % key, href=documentation_link)
params[key] = body[key]
req.context['parsed_body'] = params
return hook
|
|
9f0e850f30e9b9c11526054de00a460f0da1b359
|
website/tests/helpers/test_filters.py
|
website/tests/helpers/test_filters.py
|
import pytest
import helpers.filters as filters
class Model:
def __init__(self, value):
self.value = value
def test_select_filter():
test_objects = [
Model('a'),
Model('a'),
Model('b'),
Model('b'),
Model('c'),
]
tested_filter = filters.Filter(
'Source', Model, 'value', widget='select',
comparators=['in'], default_comparator='in',
choices=['a', 'b', 'c', 'd'],
default='a', nullable=False,
)
# test the default value
assert len(tested_filter.apply(test_objects)) == 2
test_values = (
('b', 2),
('c', 1),
('d', 0)
)
for value, length in test_values:
tested_filter.update(value)
assert len(tested_filter.apply(test_objects)) == length
with pytest.raises(Exception):
tested_filter.update('e')
tested_filter.apply(test_objects)
def test_multiselect_filter():
test_objects = [
Model(['a']),
Model(['a']),
Model(['b']),
Model(['b']),
Model(['c']),
Model(['a', 'c']),
Model(['b', 'a']),
]
tested_filter = filters.Filter(
'Source', Model, 'value', widget='multiselect',
comparators=['in'], default_comparator='in',
choices=['a', 'b', 'c', 'd'],
default='a', nullable=False,
multiple='any'
)
test_values = (
('b', 3),
('c', 2),
('d', 0),
(['a', 'b', 'c'], 7),
(['a', 'c'], 5),
(['b', 'c'], 5)
)
# test the default value
assert len(tested_filter.apply(test_objects)) == 4
with pytest.raises(Exception):
tested_filter.update('e')
tested_filter.apply(test_objects)
for value, length in test_values:
tested_filter.update(value)
assert len(tested_filter.apply(test_objects)) == length
|
Add basic tests for filters
|
Add basic tests for filters
|
Python
|
lgpl-2.1
|
reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/Visualistion-Framework-for-Genome-Mutations
|
Add basic tests for filters
|
import pytest
import helpers.filters as filters
class Model:
def __init__(self, value):
self.value = value
def test_select_filter():
test_objects = [
Model('a'),
Model('a'),
Model('b'),
Model('b'),
Model('c'),
]
tested_filter = filters.Filter(
'Source', Model, 'value', widget='select',
comparators=['in'], default_comparator='in',
choices=['a', 'b', 'c', 'd'],
default='a', nullable=False,
)
# test the default value
assert len(tested_filter.apply(test_objects)) == 2
test_values = (
('b', 2),
('c', 1),
('d', 0)
)
for value, length in test_values:
tested_filter.update(value)
assert len(tested_filter.apply(test_objects)) == length
with pytest.raises(Exception):
tested_filter.update('e')
tested_filter.apply(test_objects)
def test_multiselect_filter():
test_objects = [
Model(['a']),
Model(['a']),
Model(['b']),
Model(['b']),
Model(['c']),
Model(['a', 'c']),
Model(['b', 'a']),
]
tested_filter = filters.Filter(
'Source', Model, 'value', widget='multiselect',
comparators=['in'], default_comparator='in',
choices=['a', 'b', 'c', 'd'],
default='a', nullable=False,
multiple='any'
)
test_values = (
('b', 3),
('c', 2),
('d', 0),
(['a', 'b', 'c'], 7),
(['a', 'c'], 5),
(['b', 'c'], 5)
)
# test the default value
assert len(tested_filter.apply(test_objects)) == 4
with pytest.raises(Exception):
tested_filter.update('e')
tested_filter.apply(test_objects)
for value, length in test_values:
tested_filter.update(value)
assert len(tested_filter.apply(test_objects)) == length
|
<commit_before><commit_msg>Add basic tests for filters<commit_after>
|
import pytest
import helpers.filters as filters
class Model:
def __init__(self, value):
self.value = value
def test_select_filter():
test_objects = [
Model('a'),
Model('a'),
Model('b'),
Model('b'),
Model('c'),
]
tested_filter = filters.Filter(
'Source', Model, 'value', widget='select',
comparators=['in'], default_comparator='in',
choices=['a', 'b', 'c', 'd'],
default='a', nullable=False,
)
# test the default value
assert len(tested_filter.apply(test_objects)) == 2
test_values = (
('b', 2),
('c', 1),
('d', 0)
)
for value, length in test_values:
tested_filter.update(value)
assert len(tested_filter.apply(test_objects)) == length
with pytest.raises(Exception):
tested_filter.update('e')
tested_filter.apply(test_objects)
def test_multiselect_filter():
test_objects = [
Model(['a']),
Model(['a']),
Model(['b']),
Model(['b']),
Model(['c']),
Model(['a', 'c']),
Model(['b', 'a']),
]
tested_filter = filters.Filter(
'Source', Model, 'value', widget='multiselect',
comparators=['in'], default_comparator='in',
choices=['a', 'b', 'c', 'd'],
default='a', nullable=False,
multiple='any'
)
test_values = (
('b', 3),
('c', 2),
('d', 0),
(['a', 'b', 'c'], 7),
(['a', 'c'], 5),
(['b', 'c'], 5)
)
# test the default value
assert len(tested_filter.apply(test_objects)) == 4
with pytest.raises(Exception):
tested_filter.update('e')
tested_filter.apply(test_objects)
for value, length in test_values:
tested_filter.update(value)
assert len(tested_filter.apply(test_objects)) == length
|
Add basic tests for filtersimport pytest
import helpers.filters as filters
class Model:
def __init__(self, value):
self.value = value
def test_select_filter():
test_objects = [
Model('a'),
Model('a'),
Model('b'),
Model('b'),
Model('c'),
]
tested_filter = filters.Filter(
'Source', Model, 'value', widget='select',
comparators=['in'], default_comparator='in',
choices=['a', 'b', 'c', 'd'],
default='a', nullable=False,
)
# test the default value
assert len(tested_filter.apply(test_objects)) == 2
test_values = (
('b', 2),
('c', 1),
('d', 0)
)
for value, length in test_values:
tested_filter.update(value)
assert len(tested_filter.apply(test_objects)) == length
with pytest.raises(Exception):
tested_filter.update('e')
tested_filter.apply(test_objects)
def test_multiselect_filter():
test_objects = [
Model(['a']),
Model(['a']),
Model(['b']),
Model(['b']),
Model(['c']),
Model(['a', 'c']),
Model(['b', 'a']),
]
tested_filter = filters.Filter(
'Source', Model, 'value', widget='multiselect',
comparators=['in'], default_comparator='in',
choices=['a', 'b', 'c', 'd'],
default='a', nullable=False,
multiple='any'
)
test_values = (
('b', 3),
('c', 2),
('d', 0),
(['a', 'b', 'c'], 7),
(['a', 'c'], 5),
(['b', 'c'], 5)
)
# test the default value
assert len(tested_filter.apply(test_objects)) == 4
with pytest.raises(Exception):
tested_filter.update('e')
tested_filter.apply(test_objects)
for value, length in test_values:
tested_filter.update(value)
assert len(tested_filter.apply(test_objects)) == length
|
<commit_before><commit_msg>Add basic tests for filters<commit_after>import pytest
import helpers.filters as filters
class Model:
def __init__(self, value):
self.value = value
def test_select_filter():
test_objects = [
Model('a'),
Model('a'),
Model('b'),
Model('b'),
Model('c'),
]
tested_filter = filters.Filter(
'Source', Model, 'value', widget='select',
comparators=['in'], default_comparator='in',
choices=['a', 'b', 'c', 'd'],
default='a', nullable=False,
)
# test the default value
assert len(tested_filter.apply(test_objects)) == 2
test_values = (
('b', 2),
('c', 1),
('d', 0)
)
for value, length in test_values:
tested_filter.update(value)
assert len(tested_filter.apply(test_objects)) == length
with pytest.raises(Exception):
tested_filter.update('e')
tested_filter.apply(test_objects)
def test_multiselect_filter():
test_objects = [
Model(['a']),
Model(['a']),
Model(['b']),
Model(['b']),
Model(['c']),
Model(['a', 'c']),
Model(['b', 'a']),
]
tested_filter = filters.Filter(
'Source', Model, 'value', widget='multiselect',
comparators=['in'], default_comparator='in',
choices=['a', 'b', 'c', 'd'],
default='a', nullable=False,
multiple='any'
)
test_values = (
('b', 3),
('c', 2),
('d', 0),
(['a', 'b', 'c'], 7),
(['a', 'c'], 5),
(['b', 'c'], 5)
)
# test the default value
assert len(tested_filter.apply(test_objects)) == 4
with pytest.raises(Exception):
tested_filter.update('e')
tested_filter.apply(test_objects)
for value, length in test_values:
tested_filter.update(value)
assert len(tested_filter.apply(test_objects)) == length
|
|
7576d66b34ea01b336daa7750a1b45a077e15bb2
|
faker/providers/internet/uk_UA/__init__.py
|
faker/providers/internet/uk_UA/__init__.py
|
# coding=utf-8
from __future__ import unicode_literals
from .. import Provider as InternetProvider
class Provider(InternetProvider):
free_email_domains = [
'email.ua', 'gmail.com', 'gov.ua', 'i.ua', 'meta.ua', 'ukr.net'
]
tlds = ['com', 'info', 'net', 'org', 'ua', 'укр']
|
Add a Ukrainian Internet provider
|
Add a Ukrainian Internet provider
|
Python
|
mit
|
joke2k/faker,trtd/faker,danhuss/faker,joke2k/faker
|
Add a Ukrainian Internet provider
|
# coding=utf-8
from __future__ import unicode_literals
from .. import Provider as InternetProvider
class Provider(InternetProvider):
free_email_domains = [
'email.ua', 'gmail.com', 'gov.ua', 'i.ua', 'meta.ua', 'ukr.net'
]
tlds = ['com', 'info', 'net', 'org', 'ua', 'укр']
|
<commit_before><commit_msg>Add a Ukrainian Internet provider<commit_after>
|
# coding=utf-8
from __future__ import unicode_literals
from .. import Provider as InternetProvider
class Provider(InternetProvider):
free_email_domains = [
'email.ua', 'gmail.com', 'gov.ua', 'i.ua', 'meta.ua', 'ukr.net'
]
tlds = ['com', 'info', 'net', 'org', 'ua', 'укр']
|
Add a Ukrainian Internet provider# coding=utf-8
from __future__ import unicode_literals
from .. import Provider as InternetProvider
class Provider(InternetProvider):
free_email_domains = [
'email.ua', 'gmail.com', 'gov.ua', 'i.ua', 'meta.ua', 'ukr.net'
]
tlds = ['com', 'info', 'net', 'org', 'ua', 'укр']
|
<commit_before><commit_msg>Add a Ukrainian Internet provider<commit_after># coding=utf-8
from __future__ import unicode_literals
from .. import Provider as InternetProvider
class Provider(InternetProvider):
free_email_domains = [
'email.ua', 'gmail.com', 'gov.ua', 'i.ua', 'meta.ua', 'ukr.net'
]
tlds = ['com', 'info', 'net', 'org', 'ua', 'укр']
|
|
5d665d1511dbef99bbfca6ce735c0db927b0a8b4
|
randomize-music.py
|
randomize-music.py
|
#!/usr/bin/env python
import os
import subprocess
import sys
import uuid
if __name__ == '__main__':
dir_name = sys.argv[1]
for file_name in os.listdir(dir_name):
rand_name = uuid.uuid4().hex
src = os.path.join(dir_name, file_name)
subprocess.check_call(['eyeD3', '--artist', rand_name, '--album', rand_name, src])
os.rename(src, os.path.join(dir_name, '{} {}'.format(rand_name, file_name)))
|
Add script that randomly shuffles a list of songs
|
Add script that randomly shuffles a list of songs
|
Python
|
mit
|
cataliniacob/misc,cataliniacob/misc
|
Add script that randomly shuffles a list of songs
|
#!/usr/bin/env python
import os
import subprocess
import sys
import uuid
if __name__ == '__main__':
dir_name = sys.argv[1]
for file_name in os.listdir(dir_name):
rand_name = uuid.uuid4().hex
src = os.path.join(dir_name, file_name)
subprocess.check_call(['eyeD3', '--artist', rand_name, '--album', rand_name, src])
os.rename(src, os.path.join(dir_name, '{} {}'.format(rand_name, file_name)))
|
<commit_before><commit_msg>Add script that randomly shuffles a list of songs<commit_after>
|
#!/usr/bin/env python
import os
import subprocess
import sys
import uuid
if __name__ == '__main__':
dir_name = sys.argv[1]
for file_name in os.listdir(dir_name):
rand_name = uuid.uuid4().hex
src = os.path.join(dir_name, file_name)
subprocess.check_call(['eyeD3', '--artist', rand_name, '--album', rand_name, src])
os.rename(src, os.path.join(dir_name, '{} {}'.format(rand_name, file_name)))
|
Add script that randomly shuffles a list of songs#!/usr/bin/env python
import os
import subprocess
import sys
import uuid
if __name__ == '__main__':
dir_name = sys.argv[1]
for file_name in os.listdir(dir_name):
rand_name = uuid.uuid4().hex
src = os.path.join(dir_name, file_name)
subprocess.check_call(['eyeD3', '--artist', rand_name, '--album', rand_name, src])
os.rename(src, os.path.join(dir_name, '{} {}'.format(rand_name, file_name)))
|
<commit_before><commit_msg>Add script that randomly shuffles a list of songs<commit_after>#!/usr/bin/env python
import os
import subprocess
import sys
import uuid
if __name__ == '__main__':
dir_name = sys.argv[1]
for file_name in os.listdir(dir_name):
rand_name = uuid.uuid4().hex
src = os.path.join(dir_name, file_name)
subprocess.check_call(['eyeD3', '--artist', rand_name, '--album', rand_name, src])
os.rename(src, os.path.join(dir_name, '{} {}'.format(rand_name, file_name)))
|
|
e3e8515de67dd2971385b98fa9276dc54c1e7f4b
|
perftest.py
|
perftest.py
|
"""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
Add a very simple performance testing tool.
|
Add a very simple performance testing tool.
|
Python
|
bsd-3-clause
|
zielmicha/couchdb-python
|
Add a very simple performance testing tool.
|
"""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a very simple performance testing tool.<commit_after>
|
"""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
Add a very simple performance testing tool."""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a very simple performance testing tool.<commit_after>"""
Simple peformance tests.
"""
import sys
import time
import couchdb
def main():
print 'sys.version : %r' % (sys.version,)
print 'sys.platform : %r' % (sys.platform,)
tests = [create_doc, create_bulk_docs]
if len(sys.argv) > 1:
tests = [test for test in tests if test.__name__ in sys.argv[1:]]
server = couchdb.Server()
for test in tests:
_run(server, test)
def _run(server, func):
"""Run a test in a clean db and log its execution time."""
sys.stdout.write("* [%s] %s ... " % (func.__name__, func.__doc__.strip()))
sys.stdout.flush()
db_name = 'couchdb-python/perftest'
db = server.create(db_name)
try:
start = time.time()
func(db)
stop = time.time()
sys.stdout.write("%0.2fs\n" % (stop-start,))
sys.stdout.flush()
except Exception, e:
sys.stdout.write("FAILED - %r\n" % (unicode(e),))
sys.stdout.flush()
finally:
server.delete(db_name)
def create_doc(db):
"""Create lots of docs, one at a time"""
for i in range(1000):
db.save({'_id': unicode(i)})
def create_bulk_docs(db):
"""Create lots of docs, lots at a time"""
batch_size = 100
num_batches = 1000
for i in range(num_batches):
db.update([{'_id': unicode((i * batch_size) + j)} for j in range(batch_size)])
if __name__ == '__main__':
main()
|
|
5b94b839d7aa4e27581874a92d01618ab2c24a79
|
manoseimas/mps_v2/migrations/0009_auto_20150606_1619.py
|
manoseimas/mps_v2/migrations/0009_auto_20150606_1619.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mps_v2', '0008_groupranking_mpranking'),
]
operations = [
migrations.AlterField(
model_name='groupmembership',
name='member',
field=models.ForeignKey(related_name='memberships', to='mps_v2.ParliamentMember'),
),
]
|
Add a redundant migration that changes related_name for GroupMembership.
|
Add a redundant migration that changes related_name for GroupMembership.
|
Python
|
agpl-3.0
|
ManoSeimas/manoseimas.lt,ManoSeimas/manoseimas.lt,ManoSeimas/manoseimas.lt,ManoSeimas/manoseimas.lt
|
Add a redundant migration that changes related_name for GroupMembership.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mps_v2', '0008_groupranking_mpranking'),
]
operations = [
migrations.AlterField(
model_name='groupmembership',
name='member',
field=models.ForeignKey(related_name='memberships', to='mps_v2.ParliamentMember'),
),
]
|
<commit_before><commit_msg>Add a redundant migration that changes related_name for GroupMembership.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mps_v2', '0008_groupranking_mpranking'),
]
operations = [
migrations.AlterField(
model_name='groupmembership',
name='member',
field=models.ForeignKey(related_name='memberships', to='mps_v2.ParliamentMember'),
),
]
|
Add a redundant migration that changes related_name for GroupMembership.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mps_v2', '0008_groupranking_mpranking'),
]
operations = [
migrations.AlterField(
model_name='groupmembership',
name='member',
field=models.ForeignKey(related_name='memberships', to='mps_v2.ParliamentMember'),
),
]
|
<commit_before><commit_msg>Add a redundant migration that changes related_name for GroupMembership.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mps_v2', '0008_groupranking_mpranking'),
]
operations = [
migrations.AlterField(
model_name='groupmembership',
name='member',
field=models.ForeignKey(related_name='memberships', to='mps_v2.ParliamentMember'),
),
]
|
|
10169d66c91360f562367a00002bac23ec036719
|
src/sidecar/connection.py
|
src/sidecar/connection.py
|
# -*- coding: utf-8 -*-
import json
import logging
import os
from sockjs.tornado import SockJSRouter, SockJSConnection
from tornado.web import RequestHandler, StaticFileHandler
from tornado.web import Application
from tornado.ioloop import IOLoop
from sidecar.utils import log
class WebHandler(RequestHandler):
def initialize(self, page, **kwargs):
self.page = page
self.kwargs = kwargs
def get(self):
self.render(self.page, **self.kwargs)
class TextHandler(RequestHandler):
def initialize(self, content):
self.content = content
def get(self):
self.finish(self.content)
class FileHandler(StaticFileHandler):
def initialize(self, path):
if path is None:
self.absolute_path = None
else:
path = os.path.join(os.path.dirname(__file__), path)
self.absolute_path = os.path.abspath(os.path.expanduser(path))
self.root, self.filename = os.path.split(self.absolute_path)
def get(self, path=None, include_body=True):
if self.absolute_path is not None:
return super(FileHandler, self).get(self.filename, include_body)
self.finish('')
class Connection(SockJSConnection):
def send_json(self, kind, data=None):
log.debug()
self.send(json.dumps({'kind': kind, 'data': data or {}}))
def on_open(self, info):
log.debug()
self.send_json('ready')
def on_message(self, msg):
msg = json.loads(msg)
log.debug(msg)
def on_close(self):
log.debug()
@classmethod
def tornado_app(cls, ui, title, debug=False):
root = os.path.dirname(__file__)
router = SockJSRouter(cls, '/api')
settings = {
'static_path': os.path.join(root, 'static'),
'template_path': os.path.join(root, 'static'),
'debug': debug
}
handlers = [
('/', WebHandler, {'page': 'index.html', 'title': title}),
('/ui.json', TextHandler, {'content': ui})
]
handlers.extend(router.urls)
return Application(handlers, **settings)
@classmethod
def start(cls, ui, title, debug=False, port=9999):
if debug:
logging.basicConfig(level=logging.DEBUG)
log.debug()
app = cls.tornado_app(ui, title, debug=debug)
app.listen(port)
IOLoop.instance().start()
|
Add basic SockJS server implementation
|
Add basic SockJS server implementation
|
Python
|
apache-2.0
|
aldanor/sidecar,aldanor/sidecar,aldanor/sidecar
|
Add basic SockJS server implementation
|
# -*- coding: utf-8 -*-
import json
import logging
import os
from sockjs.tornado import SockJSRouter, SockJSConnection
from tornado.web import RequestHandler, StaticFileHandler
from tornado.web import Application
from tornado.ioloop import IOLoop
from sidecar.utils import log
class WebHandler(RequestHandler):
def initialize(self, page, **kwargs):
self.page = page
self.kwargs = kwargs
def get(self):
self.render(self.page, **self.kwargs)
class TextHandler(RequestHandler):
def initialize(self, content):
self.content = content
def get(self):
self.finish(self.content)
class FileHandler(StaticFileHandler):
def initialize(self, path):
if path is None:
self.absolute_path = None
else:
path = os.path.join(os.path.dirname(__file__), path)
self.absolute_path = os.path.abspath(os.path.expanduser(path))
self.root, self.filename = os.path.split(self.absolute_path)
def get(self, path=None, include_body=True):
if self.absolute_path is not None:
return super(FileHandler, self).get(self.filename, include_body)
self.finish('')
class Connection(SockJSConnection):
def send_json(self, kind, data=None):
log.debug()
self.send(json.dumps({'kind': kind, 'data': data or {}}))
def on_open(self, info):
log.debug()
self.send_json('ready')
def on_message(self, msg):
msg = json.loads(msg)
log.debug(msg)
def on_close(self):
log.debug()
@classmethod
def tornado_app(cls, ui, title, debug=False):
root = os.path.dirname(__file__)
router = SockJSRouter(cls, '/api')
settings = {
'static_path': os.path.join(root, 'static'),
'template_path': os.path.join(root, 'static'),
'debug': debug
}
handlers = [
('/', WebHandler, {'page': 'index.html', 'title': title}),
('/ui.json', TextHandler, {'content': ui})
]
handlers.extend(router.urls)
return Application(handlers, **settings)
@classmethod
def start(cls, ui, title, debug=False, port=9999):
if debug:
logging.basicConfig(level=logging.DEBUG)
log.debug()
app = cls.tornado_app(ui, title, debug=debug)
app.listen(port)
IOLoop.instance().start()
|
<commit_before><commit_msg>Add basic SockJS server implementation<commit_after>
|
# -*- coding: utf-8 -*-
import json
import logging
import os
from sockjs.tornado import SockJSRouter, SockJSConnection
from tornado.web import RequestHandler, StaticFileHandler
from tornado.web import Application
from tornado.ioloop import IOLoop
from sidecar.utils import log
class WebHandler(RequestHandler):
def initialize(self, page, **kwargs):
self.page = page
self.kwargs = kwargs
def get(self):
self.render(self.page, **self.kwargs)
class TextHandler(RequestHandler):
def initialize(self, content):
self.content = content
def get(self):
self.finish(self.content)
class FileHandler(StaticFileHandler):
def initialize(self, path):
if path is None:
self.absolute_path = None
else:
path = os.path.join(os.path.dirname(__file__), path)
self.absolute_path = os.path.abspath(os.path.expanduser(path))
self.root, self.filename = os.path.split(self.absolute_path)
def get(self, path=None, include_body=True):
if self.absolute_path is not None:
return super(FileHandler, self).get(self.filename, include_body)
self.finish('')
class Connection(SockJSConnection):
def send_json(self, kind, data=None):
log.debug()
self.send(json.dumps({'kind': kind, 'data': data or {}}))
def on_open(self, info):
log.debug()
self.send_json('ready')
def on_message(self, msg):
msg = json.loads(msg)
log.debug(msg)
def on_close(self):
log.debug()
@classmethod
def tornado_app(cls, ui, title, debug=False):
root = os.path.dirname(__file__)
router = SockJSRouter(cls, '/api')
settings = {
'static_path': os.path.join(root, 'static'),
'template_path': os.path.join(root, 'static'),
'debug': debug
}
handlers = [
('/', WebHandler, {'page': 'index.html', 'title': title}),
('/ui.json', TextHandler, {'content': ui})
]
handlers.extend(router.urls)
return Application(handlers, **settings)
@classmethod
def start(cls, ui, title, debug=False, port=9999):
if debug:
logging.basicConfig(level=logging.DEBUG)
log.debug()
app = cls.tornado_app(ui, title, debug=debug)
app.listen(port)
IOLoop.instance().start()
|
Add basic SockJS server implementation# -*- coding: utf-8 -*-
import json
import logging
import os
from sockjs.tornado import SockJSRouter, SockJSConnection
from tornado.web import RequestHandler, StaticFileHandler
from tornado.web import Application
from tornado.ioloop import IOLoop
from sidecar.utils import log
class WebHandler(RequestHandler):
def initialize(self, page, **kwargs):
self.page = page
self.kwargs = kwargs
def get(self):
self.render(self.page, **self.kwargs)
class TextHandler(RequestHandler):
def initialize(self, content):
self.content = content
def get(self):
self.finish(self.content)
class FileHandler(StaticFileHandler):
def initialize(self, path):
if path is None:
self.absolute_path = None
else:
path = os.path.join(os.path.dirname(__file__), path)
self.absolute_path = os.path.abspath(os.path.expanduser(path))
self.root, self.filename = os.path.split(self.absolute_path)
def get(self, path=None, include_body=True):
if self.absolute_path is not None:
return super(FileHandler, self).get(self.filename, include_body)
self.finish('')
class Connection(SockJSConnection):
def send_json(self, kind, data=None):
log.debug()
self.send(json.dumps({'kind': kind, 'data': data or {}}))
def on_open(self, info):
log.debug()
self.send_json('ready')
def on_message(self, msg):
msg = json.loads(msg)
log.debug(msg)
def on_close(self):
log.debug()
@classmethod
def tornado_app(cls, ui, title, debug=False):
root = os.path.dirname(__file__)
router = SockJSRouter(cls, '/api')
settings = {
'static_path': os.path.join(root, 'static'),
'template_path': os.path.join(root, 'static'),
'debug': debug
}
handlers = [
('/', WebHandler, {'page': 'index.html', 'title': title}),
('/ui.json', TextHandler, {'content': ui})
]
handlers.extend(router.urls)
return Application(handlers, **settings)
@classmethod
def start(cls, ui, title, debug=False, port=9999):
if debug:
logging.basicConfig(level=logging.DEBUG)
log.debug()
app = cls.tornado_app(ui, title, debug=debug)
app.listen(port)
IOLoop.instance().start()
|
<commit_before><commit_msg>Add basic SockJS server implementation<commit_after># -*- coding: utf-8 -*-
import json
import logging
import os
from sockjs.tornado import SockJSRouter, SockJSConnection
from tornado.web import RequestHandler, StaticFileHandler
from tornado.web import Application
from tornado.ioloop import IOLoop
from sidecar.utils import log
class WebHandler(RequestHandler):
def initialize(self, page, **kwargs):
self.page = page
self.kwargs = kwargs
def get(self):
self.render(self.page, **self.kwargs)
class TextHandler(RequestHandler):
def initialize(self, content):
self.content = content
def get(self):
self.finish(self.content)
class FileHandler(StaticFileHandler):
def initialize(self, path):
if path is None:
self.absolute_path = None
else:
path = os.path.join(os.path.dirname(__file__), path)
self.absolute_path = os.path.abspath(os.path.expanduser(path))
self.root, self.filename = os.path.split(self.absolute_path)
def get(self, path=None, include_body=True):
if self.absolute_path is not None:
return super(FileHandler, self).get(self.filename, include_body)
self.finish('')
class Connection(SockJSConnection):
def send_json(self, kind, data=None):
log.debug()
self.send(json.dumps({'kind': kind, 'data': data or {}}))
def on_open(self, info):
log.debug()
self.send_json('ready')
def on_message(self, msg):
msg = json.loads(msg)
log.debug(msg)
def on_close(self):
log.debug()
@classmethod
def tornado_app(cls, ui, title, debug=False):
root = os.path.dirname(__file__)
router = SockJSRouter(cls, '/api')
settings = {
'static_path': os.path.join(root, 'static'),
'template_path': os.path.join(root, 'static'),
'debug': debug
}
handlers = [
('/', WebHandler, {'page': 'index.html', 'title': title}),
('/ui.json', TextHandler, {'content': ui})
]
handlers.extend(router.urls)
return Application(handlers, **settings)
@classmethod
def start(cls, ui, title, debug=False, port=9999):
if debug:
logging.basicConfig(level=logging.DEBUG)
log.debug()
app = cls.tornado_app(ui, title, debug=debug)
app.listen(port)
IOLoop.instance().start()
|
|
3b32c08772f06046723352731def8cbb5a59263f
|
scripts/wav2raw.py
|
scripts/wav2raw.py
|
#!/usr/bin/env python
from array import array
import wave
import argparse
import struct
import os
def main():
parser = argparse.ArgumentParser(
description='Converts input image to raw image and palette data.')
parser.add_argument('-f', '--force', action='store_true',
help='If output files exist, the tool will overwrite them.')
parser.add_argument('input', metavar='INPUT', type=str,
help='Input image filename.')
parser.add_argument('output', metavar='OUTPUT', type=str,
help='Output files basename (without extension).')
args = parser.parse_args()
inputPath = os.path.abspath(args.input)
outputPath = os.path.abspath(args.output)
if not os.path.isfile(inputPath):
raise SystemExit('Input file does not exists!')
try:
sound = wave.open(inputPath)
except IOError as ex:
raise SystemExit('Error: %s.' % ex)
bps = sound.getsampwidth() * 8
stereo = (sound.getnchannels() == 2)
frameRate = sound.getframerate()
frames = sound.getnframes()
info = ["%.1f s" % (float(frames) / frameRate),
"stereo" if stereo else "mono",
"%d bps" % bps,
"%d Hz" % frameRate]
print "%s:" % inputPath, ", ".join(info)
rawSoundFile = outputPath + '.snd'
if os.path.isfile(rawSoundFile) and not args.force:
raise SystemExit('Will not overwrite output file!')
with open(rawSoundFile, 'w') as soundFile:
fmt = 'b' if bps <= 8 else 'h'
samples = array(fmt)
if stereo:
fmt = fmt * 2
samples.fromstring(sound.readframes(frames))
samples.byteswap()
soundFile.write(struct.pack('>BBHI', bps, stereo, frameRate, frames))
soundFile.write(samples.tostring())
if __name__ == '__main__':
main()
|
Convert WAV files to format digestible by Amiga.
|
Convert WAV files to format digestible by Amiga.
|
Python
|
artistic-2.0
|
cahirwpz/demoscene,cahirwpz/demoscene,cahirwpz/demoscene,cahirwpz/demoscene
|
Convert WAV files to format digestible by Amiga.
|
#!/usr/bin/env python
from array import array
import wave
import argparse
import struct
import os
def main():
parser = argparse.ArgumentParser(
description='Converts input image to raw image and palette data.')
parser.add_argument('-f', '--force', action='store_true',
help='If output files exist, the tool will overwrite them.')
parser.add_argument('input', metavar='INPUT', type=str,
help='Input image filename.')
parser.add_argument('output', metavar='OUTPUT', type=str,
help='Output files basename (without extension).')
args = parser.parse_args()
inputPath = os.path.abspath(args.input)
outputPath = os.path.abspath(args.output)
if not os.path.isfile(inputPath):
raise SystemExit('Input file does not exists!')
try:
sound = wave.open(inputPath)
except IOError as ex:
raise SystemExit('Error: %s.' % ex)
bps = sound.getsampwidth() * 8
stereo = (sound.getnchannels() == 2)
frameRate = sound.getframerate()
frames = sound.getnframes()
info = ["%.1f s" % (float(frames) / frameRate),
"stereo" if stereo else "mono",
"%d bps" % bps,
"%d Hz" % frameRate]
print "%s:" % inputPath, ", ".join(info)
rawSoundFile = outputPath + '.snd'
if os.path.isfile(rawSoundFile) and not args.force:
raise SystemExit('Will not overwrite output file!')
with open(rawSoundFile, 'w') as soundFile:
fmt = 'b' if bps <= 8 else 'h'
samples = array(fmt)
if stereo:
fmt = fmt * 2
samples.fromstring(sound.readframes(frames))
samples.byteswap()
soundFile.write(struct.pack('>BBHI', bps, stereo, frameRate, frames))
soundFile.write(samples.tostring())
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Convert WAV files to format digestible by Amiga.<commit_after>
|
#!/usr/bin/env python
from array import array
import wave
import argparse
import struct
import os
def main():
parser = argparse.ArgumentParser(
description='Converts input image to raw image and palette data.')
parser.add_argument('-f', '--force', action='store_true',
help='If output files exist, the tool will overwrite them.')
parser.add_argument('input', metavar='INPUT', type=str,
help='Input image filename.')
parser.add_argument('output', metavar='OUTPUT', type=str,
help='Output files basename (without extension).')
args = parser.parse_args()
inputPath = os.path.abspath(args.input)
outputPath = os.path.abspath(args.output)
if not os.path.isfile(inputPath):
raise SystemExit('Input file does not exists!')
try:
sound = wave.open(inputPath)
except IOError as ex:
raise SystemExit('Error: %s.' % ex)
bps = sound.getsampwidth() * 8
stereo = (sound.getnchannels() == 2)
frameRate = sound.getframerate()
frames = sound.getnframes()
info = ["%.1f s" % (float(frames) / frameRate),
"stereo" if stereo else "mono",
"%d bps" % bps,
"%d Hz" % frameRate]
print "%s:" % inputPath, ", ".join(info)
rawSoundFile = outputPath + '.snd'
if os.path.isfile(rawSoundFile) and not args.force:
raise SystemExit('Will not overwrite output file!')
with open(rawSoundFile, 'w') as soundFile:
fmt = 'b' if bps <= 8 else 'h'
samples = array(fmt)
if stereo:
fmt = fmt * 2
samples.fromstring(sound.readframes(frames))
samples.byteswap()
soundFile.write(struct.pack('>BBHI', bps, stereo, frameRate, frames))
soundFile.write(samples.tostring())
if __name__ == '__main__':
main()
|
Convert WAV files to format digestible by Amiga.#!/usr/bin/env python
from array import array
import wave
import argparse
import struct
import os
def main():
parser = argparse.ArgumentParser(
description='Converts input image to raw image and palette data.')
parser.add_argument('-f', '--force', action='store_true',
help='If output files exist, the tool will overwrite them.')
parser.add_argument('input', metavar='INPUT', type=str,
help='Input image filename.')
parser.add_argument('output', metavar='OUTPUT', type=str,
help='Output files basename (without extension).')
args = parser.parse_args()
inputPath = os.path.abspath(args.input)
outputPath = os.path.abspath(args.output)
if not os.path.isfile(inputPath):
raise SystemExit('Input file does not exists!')
try:
sound = wave.open(inputPath)
except IOError as ex:
raise SystemExit('Error: %s.' % ex)
bps = sound.getsampwidth() * 8
stereo = (sound.getnchannels() == 2)
frameRate = sound.getframerate()
frames = sound.getnframes()
info = ["%.1f s" % (float(frames) / frameRate),
"stereo" if stereo else "mono",
"%d bps" % bps,
"%d Hz" % frameRate]
print "%s:" % inputPath, ", ".join(info)
rawSoundFile = outputPath + '.snd'
if os.path.isfile(rawSoundFile) and not args.force:
raise SystemExit('Will not overwrite output file!')
with open(rawSoundFile, 'w') as soundFile:
fmt = 'b' if bps <= 8 else 'h'
samples = array(fmt)
if stereo:
fmt = fmt * 2
samples.fromstring(sound.readframes(frames))
samples.byteswap()
soundFile.write(struct.pack('>BBHI', bps, stereo, frameRate, frames))
soundFile.write(samples.tostring())
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Convert WAV files to format digestible by Amiga.<commit_after>#!/usr/bin/env python
from array import array
import wave
import argparse
import struct
import os
def main():
parser = argparse.ArgumentParser(
description='Converts input image to raw image and palette data.')
parser.add_argument('-f', '--force', action='store_true',
help='If output files exist, the tool will overwrite them.')
parser.add_argument('input', metavar='INPUT', type=str,
help='Input image filename.')
parser.add_argument('output', metavar='OUTPUT', type=str,
help='Output files basename (without extension).')
args = parser.parse_args()
inputPath = os.path.abspath(args.input)
outputPath = os.path.abspath(args.output)
if not os.path.isfile(inputPath):
raise SystemExit('Input file does not exists!')
try:
sound = wave.open(inputPath)
except IOError as ex:
raise SystemExit('Error: %s.' % ex)
bps = sound.getsampwidth() * 8
stereo = (sound.getnchannels() == 2)
frameRate = sound.getframerate()
frames = sound.getnframes()
info = ["%.1f s" % (float(frames) / frameRate),
"stereo" if stereo else "mono",
"%d bps" % bps,
"%d Hz" % frameRate]
print "%s:" % inputPath, ", ".join(info)
rawSoundFile = outputPath + '.snd'
if os.path.isfile(rawSoundFile) and not args.force:
raise SystemExit('Will not overwrite output file!')
with open(rawSoundFile, 'w') as soundFile:
fmt = 'b' if bps <= 8 else 'h'
samples = array(fmt)
if stereo:
fmt = fmt * 2
samples.fromstring(sound.readframes(frames))
samples.byteswap()
soundFile.write(struct.pack('>BBHI', bps, stereo, frameRate, frames))
soundFile.write(samples.tostring())
if __name__ == '__main__':
main()
|
|
a79273430fd3f3a4629cfdba6a5db119a3b2c3ba
|
lintcode/Medium/106_Convert_Sorted_List_to_Balanced_BST.py
|
lintcode/Medium/106_Convert_Sorted_List_to_Balanced_BST.py
|
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param head: The first node of linked list.
@return: a tree node
"""
def sortedListToBST(self, head):
# write your code here
# Solution 1
# arr = []
# while (head):
# arr.append(head.val)
# head = head.next
# def toBST(l):
# if (len(l) < 1):
# return None
# if (len(l) == 1):
# return TreeNode(l[0])
# mid = len(l) / 2
# res = TreeNode(l[mid])
# res.left = toBST(l[:mid])
# res.right = toBST(l[mid + 1:])
# return res
# return toBST(arr)
# Solution 2
if (head is None):
return None
dummy = ListNode(0)
dummy.next = head
slow = dummy
fast = dummy
while (fast and fast.next and fast.next.next and fast.next.next.next):
fast = fast.next.next
slow = slow.next
res = TreeNode(slow.next.val)
right = slow.next.next
slow.next = None
left = dummy.next
res.left = self.sortedListToBST(left)
res.right = self.sortedListToBST(right)
return res
|
Add solution to lintcode question 106
|
Add solution to lintcode question 106
|
Python
|
mit
|
Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode
|
Add solution to lintcode question 106
|
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param head: The first node of linked list.
@return: a tree node
"""
def sortedListToBST(self, head):
# write your code here
# Solution 1
# arr = []
# while (head):
# arr.append(head.val)
# head = head.next
# def toBST(l):
# if (len(l) < 1):
# return None
# if (len(l) == 1):
# return TreeNode(l[0])
# mid = len(l) / 2
# res = TreeNode(l[mid])
# res.left = toBST(l[:mid])
# res.right = toBST(l[mid + 1:])
# return res
# return toBST(arr)
# Solution 2
if (head is None):
return None
dummy = ListNode(0)
dummy.next = head
slow = dummy
fast = dummy
while (fast and fast.next and fast.next.next and fast.next.next.next):
fast = fast.next.next
slow = slow.next
res = TreeNode(slow.next.val)
right = slow.next.next
slow.next = None
left = dummy.next
res.left = self.sortedListToBST(left)
res.right = self.sortedListToBST(right)
return res
|
<commit_before><commit_msg>Add solution to lintcode question 106<commit_after>
|
"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param head: The first node of linked list.
@return: a tree node
"""
def sortedListToBST(self, head):
# write your code here
# Solution 1
# arr = []
# while (head):
# arr.append(head.val)
# head = head.next
# def toBST(l):
# if (len(l) < 1):
# return None
# if (len(l) == 1):
# return TreeNode(l[0])
# mid = len(l) / 2
# res = TreeNode(l[mid])
# res.left = toBST(l[:mid])
# res.right = toBST(l[mid + 1:])
# return res
# return toBST(arr)
# Solution 2
if (head is None):
return None
dummy = ListNode(0)
dummy.next = head
slow = dummy
fast = dummy
while (fast and fast.next and fast.next.next and fast.next.next.next):
fast = fast.next.next
slow = slow.next
res = TreeNode(slow.next.val)
right = slow.next.next
slow.next = None
left = dummy.next
res.left = self.sortedListToBST(left)
res.right = self.sortedListToBST(right)
return res
|
Add solution to lintcode question 106"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param head: The first node of linked list.
@return: a tree node
"""
def sortedListToBST(self, head):
# write your code here
# Solution 1
# arr = []
# while (head):
# arr.append(head.val)
# head = head.next
# def toBST(l):
# if (len(l) < 1):
# return None
# if (len(l) == 1):
# return TreeNode(l[0])
# mid = len(l) / 2
# res = TreeNode(l[mid])
# res.left = toBST(l[:mid])
# res.right = toBST(l[mid + 1:])
# return res
# return toBST(arr)
# Solution 2
if (head is None):
return None
dummy = ListNode(0)
dummy.next = head
slow = dummy
fast = dummy
while (fast and fast.next and fast.next.next and fast.next.next.next):
fast = fast.next.next
slow = slow.next
res = TreeNode(slow.next.val)
right = slow.next.next
slow.next = None
left = dummy.next
res.left = self.sortedListToBST(left)
res.right = self.sortedListToBST(right)
return res
|
<commit_before><commit_msg>Add solution to lintcode question 106<commit_after>"""
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param head: The first node of linked list.
@return: a tree node
"""
def sortedListToBST(self, head):
# write your code here
# Solution 1
# arr = []
# while (head):
# arr.append(head.val)
# head = head.next
# def toBST(l):
# if (len(l) < 1):
# return None
# if (len(l) == 1):
# return TreeNode(l[0])
# mid = len(l) / 2
# res = TreeNode(l[mid])
# res.left = toBST(l[:mid])
# res.right = toBST(l[mid + 1:])
# return res
# return toBST(arr)
# Solution 2
if (head is None):
return None
dummy = ListNode(0)
dummy.next = head
slow = dummy
fast = dummy
while (fast and fast.next and fast.next.next and fast.next.next.next):
fast = fast.next.next
slow = slow.next
res = TreeNode(slow.next.val)
right = slow.next.next
slow.next = None
left = dummy.next
res.left = self.sortedListToBST(left)
res.right = self.sortedListToBST(right)
return res
|
|
d290665b9ca99a9748e0935645afa3a56f76fea4
|
python/getmonotime.py
|
python/getmonotime.py
|
import getopt, sys
if __name__ == '__main__':
sippy_path = None
try:
opts, args = getopt.getopt(sys.argv[1:], 's:S:i:o:b')
except getopt.GetoptError:
usage()
for o, a in opts:
if o == '-S':
sippy_path = a.strip()
continue
if sippy_path != None:
sys.path.insert(0, sippy_path)
from sippy.Time.clock_dtime import clock_getdtime, CLOCK_MONOTONIC
print clock_getdtime(CLOCK_MONOTONIC)
|
Implement RTPP_LOG_TSTART and RTPP_LOG_TFORM="rel" env parameters to aid debugging.
|
Implement RTPP_LOG_TSTART and RTPP_LOG_TFORM="rel" env parameters
to aid debugging.
|
Python
|
bsd-2-clause
|
sippy/b2bua,sippy/b2bua
|
Implement RTPP_LOG_TSTART and RTPP_LOG_TFORM="rel" env parameters
to aid debugging.
|
import getopt, sys
if __name__ == '__main__':
sippy_path = None
try:
opts, args = getopt.getopt(sys.argv[1:], 's:S:i:o:b')
except getopt.GetoptError:
usage()
for o, a in opts:
if o == '-S':
sippy_path = a.strip()
continue
if sippy_path != None:
sys.path.insert(0, sippy_path)
from sippy.Time.clock_dtime import clock_getdtime, CLOCK_MONOTONIC
print clock_getdtime(CLOCK_MONOTONIC)
|
<commit_before><commit_msg>Implement RTPP_LOG_TSTART and RTPP_LOG_TFORM="rel" env parameters
to aid debugging.<commit_after>
|
import getopt, sys
if __name__ == '__main__':
sippy_path = None
try:
opts, args = getopt.getopt(sys.argv[1:], 's:S:i:o:b')
except getopt.GetoptError:
usage()
for o, a in opts:
if o == '-S':
sippy_path = a.strip()
continue
if sippy_path != None:
sys.path.insert(0, sippy_path)
from sippy.Time.clock_dtime import clock_getdtime, CLOCK_MONOTONIC
print clock_getdtime(CLOCK_MONOTONIC)
|
Implement RTPP_LOG_TSTART and RTPP_LOG_TFORM="rel" env parameters
to aid debugging.import getopt, sys
if __name__ == '__main__':
sippy_path = None
try:
opts, args = getopt.getopt(sys.argv[1:], 's:S:i:o:b')
except getopt.GetoptError:
usage()
for o, a in opts:
if o == '-S':
sippy_path = a.strip()
continue
if sippy_path != None:
sys.path.insert(0, sippy_path)
from sippy.Time.clock_dtime import clock_getdtime, CLOCK_MONOTONIC
print clock_getdtime(CLOCK_MONOTONIC)
|
<commit_before><commit_msg>Implement RTPP_LOG_TSTART and RTPP_LOG_TFORM="rel" env parameters
to aid debugging.<commit_after>import getopt, sys
if __name__ == '__main__':
sippy_path = None
try:
opts, args = getopt.getopt(sys.argv[1:], 's:S:i:o:b')
except getopt.GetoptError:
usage()
for o, a in opts:
if o == '-S':
sippy_path = a.strip()
continue
if sippy_path != None:
sys.path.insert(0, sippy_path)
from sippy.Time.clock_dtime import clock_getdtime, CLOCK_MONOTONIC
print clock_getdtime(CLOCK_MONOTONIC)
|
|
41c99eb2c186082869396c19e0ada2b9f3d0cee2
|
scripts/util/afos2flatfile.py
|
scripts/util/afos2flatfile.py
|
"""
Dump what I have stored in the AFOS database to flat files
"""
import psycopg2
pgconn = psycopg2.connect(database='afos', host='iemdb', user='nobody')
cursor = pgconn.cursor()
import datetime
import subprocess
pils = "LSR|FWW|CFW|TCV|RFW|FFA|SVR|TOR|SVS|SMW|MWS|NPW|WCN|WSW|EWW|FLS|FLW|SPS|SEL|SWO|FFW"
def sanitize(data):
""" Make sure we have the right control characters """
if data.find("\001") == -1:
data = "\001"+data
if data.find("\003") == -1:
data = data+"\003"
return data
def do(date):
""" Process a given UTC date """
table = "products_%s_%s" % (date.year, "0712" if date.month > 6 else '0106')
for pil in pils.split("|"):
cursor.execute("""SELECT data from """+table+""" WHERE
entered >= '%s 00:00+00' and entered < '%s 00:00+00' and
substr(pil,1,3) = '%s' ORDER by entered ASC""" % (
date.strftime("%Y-%m-%d"),
(date + datetime.timedelta(hours=36)).strftime("%Y-%m-%d"),
pil))
if cursor.rowcount == 0:
continue
print('%s %s %s' % (date, pil, cursor.rowcount))
o = open('/tmp/afos.tmp', 'w')
for row in cursor:
o.write(sanitize(row[0]))
o.close()
cmd = "data a %s0000 bogus text/noaaport/%s_%s.txt txt" % (
date.strftime("%Y%m%d"), pil, date.strftime("%Y%m%d"))
cmd = "/home/ldm/bin/pqinsert -p '%s' /tmp/afos.tmp" % (cmd,)
subprocess.call(cmd, shell=True)
def main():
sts = datetime.datetime(2000,1,1)
ets = datetime.datetime(2006,8,4)
interval = datetime.timedelta(days=1)
now = sts
while now < ets:
do(now)
now += interval
if __name__ == '__main__':
# go
main()
|
Add util to dump out archived noaaport files
|
Add util to dump out archived noaaport files
|
Python
|
mit
|
akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem
|
Add util to dump out archived noaaport files
|
"""
Dump what I have stored in the AFOS database to flat files
"""
import psycopg2
pgconn = psycopg2.connect(database='afos', host='iemdb', user='nobody')
cursor = pgconn.cursor()
import datetime
import subprocess
pils = "LSR|FWW|CFW|TCV|RFW|FFA|SVR|TOR|SVS|SMW|MWS|NPW|WCN|WSW|EWW|FLS|FLW|SPS|SEL|SWO|FFW"
def sanitize(data):
""" Make sure we have the right control characters """
if data.find("\001") == -1:
data = "\001"+data
if data.find("\003") == -1:
data = data+"\003"
return data
def do(date):
""" Process a given UTC date """
table = "products_%s_%s" % (date.year, "0712" if date.month > 6 else '0106')
for pil in pils.split("|"):
cursor.execute("""SELECT data from """+table+""" WHERE
entered >= '%s 00:00+00' and entered < '%s 00:00+00' and
substr(pil,1,3) = '%s' ORDER by entered ASC""" % (
date.strftime("%Y-%m-%d"),
(date + datetime.timedelta(hours=36)).strftime("%Y-%m-%d"),
pil))
if cursor.rowcount == 0:
continue
print('%s %s %s' % (date, pil, cursor.rowcount))
o = open('/tmp/afos.tmp', 'w')
for row in cursor:
o.write(sanitize(row[0]))
o.close()
cmd = "data a %s0000 bogus text/noaaport/%s_%s.txt txt" % (
date.strftime("%Y%m%d"), pil, date.strftime("%Y%m%d"))
cmd = "/home/ldm/bin/pqinsert -p '%s' /tmp/afos.tmp" % (cmd,)
subprocess.call(cmd, shell=True)
def main():
sts = datetime.datetime(2000,1,1)
ets = datetime.datetime(2006,8,4)
interval = datetime.timedelta(days=1)
now = sts
while now < ets:
do(now)
now += interval
if __name__ == '__main__':
# go
main()
|
<commit_before><commit_msg>Add util to dump out archived noaaport files<commit_after>
|
"""
Dump what I have stored in the AFOS database to flat files
"""
import psycopg2
pgconn = psycopg2.connect(database='afos', host='iemdb', user='nobody')
cursor = pgconn.cursor()
import datetime
import subprocess
pils = "LSR|FWW|CFW|TCV|RFW|FFA|SVR|TOR|SVS|SMW|MWS|NPW|WCN|WSW|EWW|FLS|FLW|SPS|SEL|SWO|FFW"
def sanitize(data):
""" Make sure we have the right control characters """
if data.find("\001") == -1:
data = "\001"+data
if data.find("\003") == -1:
data = data+"\003"
return data
def do(date):
""" Process a given UTC date """
table = "products_%s_%s" % (date.year, "0712" if date.month > 6 else '0106')
for pil in pils.split("|"):
cursor.execute("""SELECT data from """+table+""" WHERE
entered >= '%s 00:00+00' and entered < '%s 00:00+00' and
substr(pil,1,3) = '%s' ORDER by entered ASC""" % (
date.strftime("%Y-%m-%d"),
(date + datetime.timedelta(hours=36)).strftime("%Y-%m-%d"),
pil))
if cursor.rowcount == 0:
continue
print('%s %s %s' % (date, pil, cursor.rowcount))
o = open('/tmp/afos.tmp', 'w')
for row in cursor:
o.write(sanitize(row[0]))
o.close()
cmd = "data a %s0000 bogus text/noaaport/%s_%s.txt txt" % (
date.strftime("%Y%m%d"), pil, date.strftime("%Y%m%d"))
cmd = "/home/ldm/bin/pqinsert -p '%s' /tmp/afos.tmp" % (cmd,)
subprocess.call(cmd, shell=True)
def main():
sts = datetime.datetime(2000,1,1)
ets = datetime.datetime(2006,8,4)
interval = datetime.timedelta(days=1)
now = sts
while now < ets:
do(now)
now += interval
if __name__ == '__main__':
# go
main()
|
Add util to dump out archived noaaport files"""
Dump what I have stored in the AFOS database to flat files
"""
import psycopg2
pgconn = psycopg2.connect(database='afos', host='iemdb', user='nobody')
cursor = pgconn.cursor()
import datetime
import subprocess
pils = "LSR|FWW|CFW|TCV|RFW|FFA|SVR|TOR|SVS|SMW|MWS|NPW|WCN|WSW|EWW|FLS|FLW|SPS|SEL|SWO|FFW"
def sanitize(data):
""" Make sure we have the right control characters """
if data.find("\001") == -1:
data = "\001"+data
if data.find("\003") == -1:
data = data+"\003"
return data
def do(date):
""" Process a given UTC date """
table = "products_%s_%s" % (date.year, "0712" if date.month > 6 else '0106')
for pil in pils.split("|"):
cursor.execute("""SELECT data from """+table+""" WHERE
entered >= '%s 00:00+00' and entered < '%s 00:00+00' and
substr(pil,1,3) = '%s' ORDER by entered ASC""" % (
date.strftime("%Y-%m-%d"),
(date + datetime.timedelta(hours=36)).strftime("%Y-%m-%d"),
pil))
if cursor.rowcount == 0:
continue
print('%s %s %s' % (date, pil, cursor.rowcount))
o = open('/tmp/afos.tmp', 'w')
for row in cursor:
o.write(sanitize(row[0]))
o.close()
cmd = "data a %s0000 bogus text/noaaport/%s_%s.txt txt" % (
date.strftime("%Y%m%d"), pil, date.strftime("%Y%m%d"))
cmd = "/home/ldm/bin/pqinsert -p '%s' /tmp/afos.tmp" % (cmd,)
subprocess.call(cmd, shell=True)
def main():
sts = datetime.datetime(2000,1,1)
ets = datetime.datetime(2006,8,4)
interval = datetime.timedelta(days=1)
now = sts
while now < ets:
do(now)
now += interval
if __name__ == '__main__':
# go
main()
|
<commit_before><commit_msg>Add util to dump out archived noaaport files<commit_after>"""
Dump what I have stored in the AFOS database to flat files
"""
import psycopg2
pgconn = psycopg2.connect(database='afos', host='iemdb', user='nobody')
cursor = pgconn.cursor()
import datetime
import subprocess
pils = "LSR|FWW|CFW|TCV|RFW|FFA|SVR|TOR|SVS|SMW|MWS|NPW|WCN|WSW|EWW|FLS|FLW|SPS|SEL|SWO|FFW"
def sanitize(data):
""" Make sure we have the right control characters """
if data.find("\001") == -1:
data = "\001"+data
if data.find("\003") == -1:
data = data+"\003"
return data
def do(date):
""" Process a given UTC date """
table = "products_%s_%s" % (date.year, "0712" if date.month > 6 else '0106')
for pil in pils.split("|"):
cursor.execute("""SELECT data from """+table+""" WHERE
entered >= '%s 00:00+00' and entered < '%s 00:00+00' and
substr(pil,1,3) = '%s' ORDER by entered ASC""" % (
date.strftime("%Y-%m-%d"),
(date + datetime.timedelta(hours=36)).strftime("%Y-%m-%d"),
pil))
if cursor.rowcount == 0:
continue
print('%s %s %s' % (date, pil, cursor.rowcount))
o = open('/tmp/afos.tmp', 'w')
for row in cursor:
o.write(sanitize(row[0]))
o.close()
cmd = "data a %s0000 bogus text/noaaport/%s_%s.txt txt" % (
date.strftime("%Y%m%d"), pil, date.strftime("%Y%m%d"))
cmd = "/home/ldm/bin/pqinsert -p '%s' /tmp/afos.tmp" % (cmd,)
subprocess.call(cmd, shell=True)
def main():
sts = datetime.datetime(2000,1,1)
ets = datetime.datetime(2006,8,4)
interval = datetime.timedelta(days=1)
now = sts
while now < ets:
do(now)
now += interval
if __name__ == '__main__':
# go
main()
|
|
c0d98d5fe5b095dcbe6f01050f7e0addc2b950e8
|
senlin/tests/tempest/api/clusters/test_cluster_delete_negative.py
|
senlin/tests/tempest/api/clusters/test_cluster_delete_negative.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import test
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestClusterDeleteNegative(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestClusterDeleteNegative, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(
constants.spec_nova_server)
# Create a test cluster
cls.cluster = cls.create_test_cluster(cls.profile['id'], 0)
# Create policy and attach to cluster
cls.policy = cls.create_test_policy()
cls.attach_policy(cls.cluster['id'], cls.policy['id'])
@classmethod
def resource_cleanup(cls):
# Detach policy from cluster and delete it
cls.detach_policy(cls.cluster['id'], cls.policy['id'])
cls.client.delete_obj('policies', cls.policy['id'])
# Delete test cluster
cls.delete_test_cluster(cls.cluster['id'])
# Delete profile
cls.delete_profile(cls.profile['id'])
super(TestClusterDeleteNegative, cls).resource_cleanup()
@test.attr(type=['negative'])
@decorators.idempotent_id('0de81427-2b2f-4821-9462-c893d35fb212')
def test_cluster_delete_conflict(self):
# Verify conflict exception(409) is raised.
self.assertRaises(exceptions.Conflict,
self.client.delete_obj,
'clusters',
self.cluster['id'])
@test.attr(type=['negative'])
@decorators.idempotent_id('8a583b8e-eeaa-4920-a6f5-2880b070624f')
def test_cluster_delete_not_found(self):
# Verify notfound exception(404) is raised.
self.assertRaises(exceptions.NotFound,
self.client.delete_obj,
'clusters',
'8a583b8e-eeaa-4920-a6f5-2880b070624f')
|
Add negative tempest API test for cluster_delete
|
Add negative tempest API test for cluster_delete
This patch adds negative tempest API test for cluster_delete for
failure cases of 404(NotFound) and 409(Conflict).
Change-Id: I5e0d917499fe897d0c8b46a8b323c9606db545e5
|
Python
|
apache-2.0
|
stackforge/senlin,openstack/senlin,openstack/senlin,openstack/senlin,stackforge/senlin
|
Add negative tempest API test for cluster_delete
This patch adds negative tempest API test for cluster_delete for
failure cases of 404(NotFound) and 409(Conflict).
Change-Id: I5e0d917499fe897d0c8b46a8b323c9606db545e5
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import test
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestClusterDeleteNegative(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestClusterDeleteNegative, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(
constants.spec_nova_server)
# Create a test cluster
cls.cluster = cls.create_test_cluster(cls.profile['id'], 0)
# Create policy and attach to cluster
cls.policy = cls.create_test_policy()
cls.attach_policy(cls.cluster['id'], cls.policy['id'])
@classmethod
def resource_cleanup(cls):
# Detach policy from cluster and delete it
cls.detach_policy(cls.cluster['id'], cls.policy['id'])
cls.client.delete_obj('policies', cls.policy['id'])
# Delete test cluster
cls.delete_test_cluster(cls.cluster['id'])
# Delete profile
cls.delete_profile(cls.profile['id'])
super(TestClusterDeleteNegative, cls).resource_cleanup()
@test.attr(type=['negative'])
@decorators.idempotent_id('0de81427-2b2f-4821-9462-c893d35fb212')
def test_cluster_delete_conflict(self):
# Verify conflict exception(409) is raised.
self.assertRaises(exceptions.Conflict,
self.client.delete_obj,
'clusters',
self.cluster['id'])
@test.attr(type=['negative'])
@decorators.idempotent_id('8a583b8e-eeaa-4920-a6f5-2880b070624f')
def test_cluster_delete_not_found(self):
# Verify notfound exception(404) is raised.
self.assertRaises(exceptions.NotFound,
self.client.delete_obj,
'clusters',
'8a583b8e-eeaa-4920-a6f5-2880b070624f')
|
<commit_before><commit_msg>Add negative tempest API test for cluster_delete
This patch adds negative tempest API test for cluster_delete for
failure cases of 404(NotFound) and 409(Conflict).
Change-Id: I5e0d917499fe897d0c8b46a8b323c9606db545e5<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import test
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestClusterDeleteNegative(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestClusterDeleteNegative, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(
constants.spec_nova_server)
# Create a test cluster
cls.cluster = cls.create_test_cluster(cls.profile['id'], 0)
# Create policy and attach to cluster
cls.policy = cls.create_test_policy()
cls.attach_policy(cls.cluster['id'], cls.policy['id'])
@classmethod
def resource_cleanup(cls):
# Detach policy from cluster and delete it
cls.detach_policy(cls.cluster['id'], cls.policy['id'])
cls.client.delete_obj('policies', cls.policy['id'])
# Delete test cluster
cls.delete_test_cluster(cls.cluster['id'])
# Delete profile
cls.delete_profile(cls.profile['id'])
super(TestClusterDeleteNegative, cls).resource_cleanup()
@test.attr(type=['negative'])
@decorators.idempotent_id('0de81427-2b2f-4821-9462-c893d35fb212')
def test_cluster_delete_conflict(self):
# Verify conflict exception(409) is raised.
self.assertRaises(exceptions.Conflict,
self.client.delete_obj,
'clusters',
self.cluster['id'])
@test.attr(type=['negative'])
@decorators.idempotent_id('8a583b8e-eeaa-4920-a6f5-2880b070624f')
def test_cluster_delete_not_found(self):
# Verify notfound exception(404) is raised.
self.assertRaises(exceptions.NotFound,
self.client.delete_obj,
'clusters',
'8a583b8e-eeaa-4920-a6f5-2880b070624f')
|
Add negative tempest API test for cluster_delete
This patch adds negative tempest API test for cluster_delete for
failure cases of 404(NotFound) and 409(Conflict).
Change-Id: I5e0d917499fe897d0c8b46a8b323c9606db545e5# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import test
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestClusterDeleteNegative(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestClusterDeleteNegative, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(
constants.spec_nova_server)
# Create a test cluster
cls.cluster = cls.create_test_cluster(cls.profile['id'], 0)
# Create policy and attach to cluster
cls.policy = cls.create_test_policy()
cls.attach_policy(cls.cluster['id'], cls.policy['id'])
@classmethod
def resource_cleanup(cls):
# Detach policy from cluster and delete it
cls.detach_policy(cls.cluster['id'], cls.policy['id'])
cls.client.delete_obj('policies', cls.policy['id'])
# Delete test cluster
cls.delete_test_cluster(cls.cluster['id'])
# Delete profile
cls.delete_profile(cls.profile['id'])
super(TestClusterDeleteNegative, cls).resource_cleanup()
@test.attr(type=['negative'])
@decorators.idempotent_id('0de81427-2b2f-4821-9462-c893d35fb212')
def test_cluster_delete_conflict(self):
# Verify conflict exception(409) is raised.
self.assertRaises(exceptions.Conflict,
self.client.delete_obj,
'clusters',
self.cluster['id'])
@test.attr(type=['negative'])
@decorators.idempotent_id('8a583b8e-eeaa-4920-a6f5-2880b070624f')
def test_cluster_delete_not_found(self):
# Verify notfound exception(404) is raised.
self.assertRaises(exceptions.NotFound,
self.client.delete_obj,
'clusters',
'8a583b8e-eeaa-4920-a6f5-2880b070624f')
|
<commit_before><commit_msg>Add negative tempest API test for cluster_delete
This patch adds negative tempest API test for cluster_delete for
failure cases of 404(NotFound) and 409(Conflict).
Change-Id: I5e0d917499fe897d0c8b46a8b323c9606db545e5<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from tempest.lib import exceptions
from tempest import test
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestClusterDeleteNegative(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestClusterDeleteNegative, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(
constants.spec_nova_server)
# Create a test cluster
cls.cluster = cls.create_test_cluster(cls.profile['id'], 0)
# Create policy and attach to cluster
cls.policy = cls.create_test_policy()
cls.attach_policy(cls.cluster['id'], cls.policy['id'])
@classmethod
def resource_cleanup(cls):
# Detach policy from cluster and delete it
cls.detach_policy(cls.cluster['id'], cls.policy['id'])
cls.client.delete_obj('policies', cls.policy['id'])
# Delete test cluster
cls.delete_test_cluster(cls.cluster['id'])
# Delete profile
cls.delete_profile(cls.profile['id'])
super(TestClusterDeleteNegative, cls).resource_cleanup()
@test.attr(type=['negative'])
@decorators.idempotent_id('0de81427-2b2f-4821-9462-c893d35fb212')
def test_cluster_delete_conflict(self):
# Verify conflict exception(409) is raised.
self.assertRaises(exceptions.Conflict,
self.client.delete_obj,
'clusters',
self.cluster['id'])
@test.attr(type=['negative'])
@decorators.idempotent_id('8a583b8e-eeaa-4920-a6f5-2880b070624f')
def test_cluster_delete_not_found(self):
# Verify notfound exception(404) is raised.
self.assertRaises(exceptions.NotFound,
self.client.delete_obj,
'clusters',
'8a583b8e-eeaa-4920-a6f5-2880b070624f')
|
|
22b897c20ac63a6aea4383f4a4378bca1c3688b2
|
scripts/spectral-graph-display.py
|
scripts/spectral-graph-display.py
|
import numpy as np
from scipy import io, sparse, linalg
# run this from elegant scipy chapter
chem = np.load('chem-network.npy')
gap = np.load('gap-network.npy')
neuron_types = np.load('neuron-types.npy')
neuron_ids = np.load('neurons.npy')
A = chem + gap
n = A.shape[0]
c = (A + A.T) / 2
d = sparse.diags([np.sum(c, axis=0)], [0])
d = d.toarray()
L = np.array(d - c)
b = np.sum(c * np.sign(A - A.T), axis=1)
z = np.linalg.pinv(L) @ b
# IPython log file
dinv2 = np.copy(d)
diag = (np.arange(n), np.arange(n))
dinv2[diag] = dinv[diag] ** (-.5)
q = dinv2 @ L @ dinv2
eigvals, vec = linalg.eig(q)
x = dinv2 @ vec[:, 1]
x.shape
from matplotlib import pyplot as plt
from matplotlib import colors
ii, jj = np.nonzero(c)
plt.scatter(x, z, c=neuron_types, cmap=colors.ListedColormap(((1, 0, 0), (0, 1, 0), (0, 0, 1))), zorder=1)
for src, dst in zip(ii, jj):
plt.plot(x[[src, dst]], z[[src, dst]], c=(0.85, 0.85, 0.85), lw=0.2, alpha=0.5, zorder=0)
for x0, z0, neuron_id in zip(x, z, neuron_ids):
plt.text(x0, z0, ' ' + neuron_id,
horizontalalignment='left', verticalalignment='center',
fontsize=4, zorder=2)
|
Add script to generate spectral graph
|
Add script to generate spectral graph
This script assumes several data files adapted from
http://www.ifp.illinois.edu/~varshney/elegans/
It uses dense matrices and so is unsuitable for large datasets.
|
Python
|
mit
|
jni/spectral-graphs
|
Add script to generate spectral graph
This script assumes several data files adapted from
http://www.ifp.illinois.edu/~varshney/elegans/
It uses dense matrices and so is unsuitable for large datasets.
|
import numpy as np
from scipy import io, sparse, linalg
# run this from elegant scipy chapter
chem = np.load('chem-network.npy')
gap = np.load('gap-network.npy')
neuron_types = np.load('neuron-types.npy')
neuron_ids = np.load('neurons.npy')
A = chem + gap
n = A.shape[0]
c = (A + A.T) / 2
d = sparse.diags([np.sum(c, axis=0)], [0])
d = d.toarray()
L = np.array(d - c)
b = np.sum(c * np.sign(A - A.T), axis=1)
z = np.linalg.pinv(L) @ b
# IPython log file
dinv2 = np.copy(d)
diag = (np.arange(n), np.arange(n))
dinv2[diag] = dinv[diag] ** (-.5)
q = dinv2 @ L @ dinv2
eigvals, vec = linalg.eig(q)
x = dinv2 @ vec[:, 1]
x.shape
from matplotlib import pyplot as plt
from matplotlib import colors
ii, jj = np.nonzero(c)
plt.scatter(x, z, c=neuron_types, cmap=colors.ListedColormap(((1, 0, 0), (0, 1, 0), (0, 0, 1))), zorder=1)
for src, dst in zip(ii, jj):
plt.plot(x[[src, dst]], z[[src, dst]], c=(0.85, 0.85, 0.85), lw=0.2, alpha=0.5, zorder=0)
for x0, z0, neuron_id in zip(x, z, neuron_ids):
plt.text(x0, z0, ' ' + neuron_id,
horizontalalignment='left', verticalalignment='center',
fontsize=4, zorder=2)
|
<commit_before><commit_msg>Add script to generate spectral graph
This script assumes several data files adapted from
http://www.ifp.illinois.edu/~varshney/elegans/
It uses dense matrices and so is unsuitable for large datasets.<commit_after>
|
import numpy as np
from scipy import io, sparse, linalg
# run this from elegant scipy chapter
chem = np.load('chem-network.npy')
gap = np.load('gap-network.npy')
neuron_types = np.load('neuron-types.npy')
neuron_ids = np.load('neurons.npy')
A = chem + gap
n = A.shape[0]
c = (A + A.T) / 2
d = sparse.diags([np.sum(c, axis=0)], [0])
d = d.toarray()
L = np.array(d - c)
b = np.sum(c * np.sign(A - A.T), axis=1)
z = np.linalg.pinv(L) @ b
# IPython log file
dinv2 = np.copy(d)
diag = (np.arange(n), np.arange(n))
dinv2[diag] = dinv[diag] ** (-.5)
q = dinv2 @ L @ dinv2
eigvals, vec = linalg.eig(q)
x = dinv2 @ vec[:, 1]
x.shape
from matplotlib import pyplot as plt
from matplotlib import colors
ii, jj = np.nonzero(c)
plt.scatter(x, z, c=neuron_types, cmap=colors.ListedColormap(((1, 0, 0), (0, 1, 0), (0, 0, 1))), zorder=1)
for src, dst in zip(ii, jj):
plt.plot(x[[src, dst]], z[[src, dst]], c=(0.85, 0.85, 0.85), lw=0.2, alpha=0.5, zorder=0)
for x0, z0, neuron_id in zip(x, z, neuron_ids):
plt.text(x0, z0, ' ' + neuron_id,
horizontalalignment='left', verticalalignment='center',
fontsize=4, zorder=2)
|
Add script to generate spectral graph
This script assumes several data files adapted from
http://www.ifp.illinois.edu/~varshney/elegans/
It uses dense matrices and so is unsuitable for large datasets.import numpy as np
from scipy import io, sparse, linalg
# run this from elegant scipy chapter
chem = np.load('chem-network.npy')
gap = np.load('gap-network.npy')
neuron_types = np.load('neuron-types.npy')
neuron_ids = np.load('neurons.npy')
A = chem + gap
n = A.shape[0]
c = (A + A.T) / 2
d = sparse.diags([np.sum(c, axis=0)], [0])
d = d.toarray()
L = np.array(d - c)
b = np.sum(c * np.sign(A - A.T), axis=1)
z = np.linalg.pinv(L) @ b
# IPython log file
dinv2 = np.copy(d)
diag = (np.arange(n), np.arange(n))
dinv2[diag] = dinv[diag] ** (-.5)
q = dinv2 @ L @ dinv2
eigvals, vec = linalg.eig(q)
x = dinv2 @ vec[:, 1]
x.shape
from matplotlib import pyplot as plt
from matplotlib import colors
ii, jj = np.nonzero(c)
plt.scatter(x, z, c=neuron_types, cmap=colors.ListedColormap(((1, 0, 0), (0, 1, 0), (0, 0, 1))), zorder=1)
for src, dst in zip(ii, jj):
plt.plot(x[[src, dst]], z[[src, dst]], c=(0.85, 0.85, 0.85), lw=0.2, alpha=0.5, zorder=0)
for x0, z0, neuron_id in zip(x, z, neuron_ids):
plt.text(x0, z0, ' ' + neuron_id,
horizontalalignment='left', verticalalignment='center',
fontsize=4, zorder=2)
|
<commit_before><commit_msg>Add script to generate spectral graph
This script assumes several data files adapted from
http://www.ifp.illinois.edu/~varshney/elegans/
It uses dense matrices and so is unsuitable for large datasets.<commit_after>import numpy as np
from scipy import io, sparse, linalg
# run this from elegant scipy chapter
chem = np.load('chem-network.npy')
gap = np.load('gap-network.npy')
neuron_types = np.load('neuron-types.npy')
neuron_ids = np.load('neurons.npy')
A = chem + gap
n = A.shape[0]
c = (A + A.T) / 2
d = sparse.diags([np.sum(c, axis=0)], [0])
d = d.toarray()
L = np.array(d - c)
b = np.sum(c * np.sign(A - A.T), axis=1)
z = np.linalg.pinv(L) @ b
# IPython log file
dinv2 = np.copy(d)
diag = (np.arange(n), np.arange(n))
dinv2[diag] = dinv[diag] ** (-.5)
q = dinv2 @ L @ dinv2
eigvals, vec = linalg.eig(q)
x = dinv2 @ vec[:, 1]
x.shape
from matplotlib import pyplot as plt
from matplotlib import colors
ii, jj = np.nonzero(c)
plt.scatter(x, z, c=neuron_types, cmap=colors.ListedColormap(((1, 0, 0), (0, 1, 0), (0, 0, 1))), zorder=1)
for src, dst in zip(ii, jj):
plt.plot(x[[src, dst]], z[[src, dst]], c=(0.85, 0.85, 0.85), lw=0.2, alpha=0.5, zorder=0)
for x0, z0, neuron_id in zip(x, z, neuron_ids):
plt.text(x0, z0, ' ' + neuron_id,
horizontalalignment='left', verticalalignment='center',
fontsize=4, zorder=2)
|
|
162a0cf6f19878f20bd6a7c92aeff2eac2a240c3
|
tests/CYK/OneRuleTest.py
|
tests/CYK/OneRuleTest.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 31.08.2017 14:51
:Licence GNUv3
Part of pyparsers
"""
from unittest import main, TestCase
from grammpy import *
from pyparsers import cyk
class S(Nonterminal): pass
class R(Rule): rule=([S], [0, 1])
class OneRuleTest(TestCase):
def __init__(self, methodName='runTest'):
super().__init__(methodName)
self.g = None
def setUp(self):
self.g = Grammar(terminals=[0,1],
nonterminals=[S],
rules=[R],
start_symbol=S)
def test_shouldParse(self):
parsed = cyk(self.g, [0, 1])
def test_shouldParseCorrectTypes(self):
parsed = cyk(self.g, [0, 1])
self.assertIsInstance(parsed, Nonterminal)
self.assertIsInstance(parsed.to_rule, Rule)
self.assertIsInstance(parsed.to_rule.to_nonterms[0], Terminal)
self.assertIsInstance(parsed.to_rule.to_nonterms[1], Terminal)
def test_shouldParseCorrectSymbols(self):
parsed = cyk(self.g, [0, 1])
self.assertIsInstance(parsed.to_rule.to_nonterms[0].s, 0)
self.assertIsInstance(parsed.to_rule.to_nonterms[1].s, 1)
if __name__ == '__main__':
main()
|
Add test of cyk for one rule
|
Add test of cyk for one rule
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add test of cyk for one rule
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 31.08.2017 14:51
:Licence GNUv3
Part of pyparsers
"""
from unittest import main, TestCase
from grammpy import *
from pyparsers import cyk
class S(Nonterminal): pass
class R(Rule): rule=([S], [0, 1])
class OneRuleTest(TestCase):
def __init__(self, methodName='runTest'):
super().__init__(methodName)
self.g = None
def setUp(self):
self.g = Grammar(terminals=[0,1],
nonterminals=[S],
rules=[R],
start_symbol=S)
def test_shouldParse(self):
parsed = cyk(self.g, [0, 1])
def test_shouldParseCorrectTypes(self):
parsed = cyk(self.g, [0, 1])
self.assertIsInstance(parsed, Nonterminal)
self.assertIsInstance(parsed.to_rule, Rule)
self.assertIsInstance(parsed.to_rule.to_nonterms[0], Terminal)
self.assertIsInstance(parsed.to_rule.to_nonterms[1], Terminal)
def test_shouldParseCorrectSymbols(self):
parsed = cyk(self.g, [0, 1])
self.assertIsInstance(parsed.to_rule.to_nonterms[0].s, 0)
self.assertIsInstance(parsed.to_rule.to_nonterms[1].s, 1)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add test of cyk for one rule<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 31.08.2017 14:51
:Licence GNUv3
Part of pyparsers
"""
from unittest import main, TestCase
from grammpy import *
from pyparsers import cyk
class S(Nonterminal): pass
class R(Rule): rule=([S], [0, 1])
class OneRuleTest(TestCase):
def __init__(self, methodName='runTest'):
super().__init__(methodName)
self.g = None
def setUp(self):
self.g = Grammar(terminals=[0,1],
nonterminals=[S],
rules=[R],
start_symbol=S)
def test_shouldParse(self):
parsed = cyk(self.g, [0, 1])
def test_shouldParseCorrectTypes(self):
parsed = cyk(self.g, [0, 1])
self.assertIsInstance(parsed, Nonterminal)
self.assertIsInstance(parsed.to_rule, Rule)
self.assertIsInstance(parsed.to_rule.to_nonterms[0], Terminal)
self.assertIsInstance(parsed.to_rule.to_nonterms[1], Terminal)
def test_shouldParseCorrectSymbols(self):
parsed = cyk(self.g, [0, 1])
self.assertIsInstance(parsed.to_rule.to_nonterms[0].s, 0)
self.assertIsInstance(parsed.to_rule.to_nonterms[1].s, 1)
if __name__ == '__main__':
main()
|
Add test of cyk for one rule#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 31.08.2017 14:51
:Licence GNUv3
Part of pyparsers
"""
from unittest import main, TestCase
from grammpy import *
from pyparsers import cyk
class S(Nonterminal): pass
class R(Rule): rule=([S], [0, 1])
class OneRuleTest(TestCase):
def __init__(self, methodName='runTest'):
super().__init__(methodName)
self.g = None
def setUp(self):
self.g = Grammar(terminals=[0,1],
nonterminals=[S],
rules=[R],
start_symbol=S)
def test_shouldParse(self):
parsed = cyk(self.g, [0, 1])
def test_shouldParseCorrectTypes(self):
parsed = cyk(self.g, [0, 1])
self.assertIsInstance(parsed, Nonterminal)
self.assertIsInstance(parsed.to_rule, Rule)
self.assertIsInstance(parsed.to_rule.to_nonterms[0], Terminal)
self.assertIsInstance(parsed.to_rule.to_nonterms[1], Terminal)
def test_shouldParseCorrectSymbols(self):
parsed = cyk(self.g, [0, 1])
self.assertIsInstance(parsed.to_rule.to_nonterms[0].s, 0)
self.assertIsInstance(parsed.to_rule.to_nonterms[1].s, 1)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add test of cyk for one rule<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 31.08.2017 14:51
:Licence GNUv3
Part of pyparsers
"""
from unittest import main, TestCase
from grammpy import *
from pyparsers import cyk
class S(Nonterminal): pass
class R(Rule): rule=([S], [0, 1])
class OneRuleTest(TestCase):
def __init__(self, methodName='runTest'):
super().__init__(methodName)
self.g = None
def setUp(self):
self.g = Grammar(terminals=[0,1],
nonterminals=[S],
rules=[R],
start_symbol=S)
def test_shouldParse(self):
parsed = cyk(self.g, [0, 1])
def test_shouldParseCorrectTypes(self):
parsed = cyk(self.g, [0, 1])
self.assertIsInstance(parsed, Nonterminal)
self.assertIsInstance(parsed.to_rule, Rule)
self.assertIsInstance(parsed.to_rule.to_nonterms[0], Terminal)
self.assertIsInstance(parsed.to_rule.to_nonterms[1], Terminal)
def test_shouldParseCorrectSymbols(self):
parsed = cyk(self.g, [0, 1])
self.assertIsInstance(parsed.to_rule.to_nonterms[0].s, 0)
self.assertIsInstance(parsed.to_rule.to_nonterms[1].s, 1)
if __name__ == '__main__':
main()
|
|
f4812deb2bdba677d77b35ad3a97a9367166f5c2
|
polling_stations/apps/data_collection/management/commands/import_kingston.py
|
polling_stations/apps/data_collection/management/commands/import_kingston.py
|
"""
Imports Kingston
"""
import sys
from django.contrib.gis.geos import Point, GEOSGeometry
from data_collection.management.commands import BaseKamlImporter
class Command(BaseKamlImporter):
"""
Imports the Polling Station data from Kingston Council
"""
council_id = 'E09000021'
districts_name = 'Polling_districts.kmz'
stations_name = 'Polling_stations.csv'
csv_encoding = 'latin-1'
def station_record_to_dict(self, record):
point = Point(float(record.eastings), float(record.northings), srid=self.get_srid())
# split out address and postcode
address = record.location
address_parts = address.split(', ')
postcode = address_parts[-1]
del(address_parts[-1])
address = "\n".join(address_parts)
return {
'internal_council_id': record.polling_station_address,
'postcode': postcode,
'address': address,
'location': point
}
|
Add import script for Kingston
|
Add import script for Kingston
|
Python
|
bsd-3-clause
|
chris48s/UK-Polling-Stations,andylolz/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,andylolz/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,andylolz/UK-Polling-Stations,chris48s/UK-Polling-Stations
|
Add import script for Kingston
|
"""
Imports Kingston
"""
import sys
from django.contrib.gis.geos import Point, GEOSGeometry
from data_collection.management.commands import BaseKamlImporter
class Command(BaseKamlImporter):
"""
Imports the Polling Station data from Kingston Council
"""
council_id = 'E09000021'
districts_name = 'Polling_districts.kmz'
stations_name = 'Polling_stations.csv'
csv_encoding = 'latin-1'
def station_record_to_dict(self, record):
point = Point(float(record.eastings), float(record.northings), srid=self.get_srid())
# split out address and postcode
address = record.location
address_parts = address.split(', ')
postcode = address_parts[-1]
del(address_parts[-1])
address = "\n".join(address_parts)
return {
'internal_council_id': record.polling_station_address,
'postcode': postcode,
'address': address,
'location': point
}
|
<commit_before><commit_msg>Add import script for Kingston<commit_after>
|
"""
Imports Kingston
"""
import sys
from django.contrib.gis.geos import Point, GEOSGeometry
from data_collection.management.commands import BaseKamlImporter
class Command(BaseKamlImporter):
"""
Imports the Polling Station data from Kingston Council
"""
council_id = 'E09000021'
districts_name = 'Polling_districts.kmz'
stations_name = 'Polling_stations.csv'
csv_encoding = 'latin-1'
def station_record_to_dict(self, record):
point = Point(float(record.eastings), float(record.northings), srid=self.get_srid())
# split out address and postcode
address = record.location
address_parts = address.split(', ')
postcode = address_parts[-1]
del(address_parts[-1])
address = "\n".join(address_parts)
return {
'internal_council_id': record.polling_station_address,
'postcode': postcode,
'address': address,
'location': point
}
|
Add import script for Kingston"""
Imports Kingston
"""
import sys
from django.contrib.gis.geos import Point, GEOSGeometry
from data_collection.management.commands import BaseKamlImporter
class Command(BaseKamlImporter):
"""
Imports the Polling Station data from Kingston Council
"""
council_id = 'E09000021'
districts_name = 'Polling_districts.kmz'
stations_name = 'Polling_stations.csv'
csv_encoding = 'latin-1'
def station_record_to_dict(self, record):
point = Point(float(record.eastings), float(record.northings), srid=self.get_srid())
# split out address and postcode
address = record.location
address_parts = address.split(', ')
postcode = address_parts[-1]
del(address_parts[-1])
address = "\n".join(address_parts)
return {
'internal_council_id': record.polling_station_address,
'postcode': postcode,
'address': address,
'location': point
}
|
<commit_before><commit_msg>Add import script for Kingston<commit_after>"""
Imports Kingston
"""
import sys
from django.contrib.gis.geos import Point, GEOSGeometry
from data_collection.management.commands import BaseKamlImporter
class Command(BaseKamlImporter):
"""
Imports the Polling Station data from Kingston Council
"""
council_id = 'E09000021'
districts_name = 'Polling_districts.kmz'
stations_name = 'Polling_stations.csv'
csv_encoding = 'latin-1'
def station_record_to_dict(self, record):
point = Point(float(record.eastings), float(record.northings), srid=self.get_srid())
# split out address and postcode
address = record.location
address_parts = address.split(', ')
postcode = address_parts[-1]
del(address_parts[-1])
address = "\n".join(address_parts)
return {
'internal_council_id': record.polling_station_address,
'postcode': postcode,
'address': address,
'location': point
}
|
|
35d34a9fe3bb12cc42ed9c91331687eff883a378
|
remove-deprecated-json.py
|
remove-deprecated-json.py
|
#!/usr/bin/python
import argparse
import json
import sys
def main():
ap = argparse.ArgumentParser()
ap.add_argument(
'infile',
type=argparse.FileType('r'),
help='01.org style json to remove deprecrated events from')
ap.add_argument(
'outfile',
nargs='?',
type=argparse.FileType('w'),
help='Generated file name',
default=sys.stdout)
args = ap.parse_args()
args.outfile.write(
json.dumps([
x for x in json.load(args.infile)
if 'Deprecated' not in x or x['Deprecated'] != '1'
], sort_keys=True, indent=4, separators=(',', ': ')))
if __name__ == '__main__':
main()
|
Add tool to create a json with deprecated events removed
|
Add tool to create a json with deprecated events removed
The new tool takes an input json filename and an optional output
filename (default is stdout) and writes the input to the output removing
any events that have a "Deprecated":"1".
|
Python
|
bsd-3-clause
|
intel/event-converter-for-linux-perf,intel/event-converter-for-linux-perf
|
Add tool to create a json with deprecated events removed
The new tool takes an input json filename and an optional output
filename (default is stdout) and writes the input to the output removing
any events that have a "Deprecated":"1".
|
#!/usr/bin/python
import argparse
import json
import sys
def main():
ap = argparse.ArgumentParser()
ap.add_argument(
'infile',
type=argparse.FileType('r'),
help='01.org style json to remove deprecrated events from')
ap.add_argument(
'outfile',
nargs='?',
type=argparse.FileType('w'),
help='Generated file name',
default=sys.stdout)
args = ap.parse_args()
args.outfile.write(
json.dumps([
x for x in json.load(args.infile)
if 'Deprecated' not in x or x['Deprecated'] != '1'
], sort_keys=True, indent=4, separators=(',', ': ')))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add tool to create a json with deprecated events removed
The new tool takes an input json filename and an optional output
filename (default is stdout) and writes the input to the output removing
any events that have a "Deprecated":"1".<commit_after>
|
#!/usr/bin/python
import argparse
import json
import sys
def main():
ap = argparse.ArgumentParser()
ap.add_argument(
'infile',
type=argparse.FileType('r'),
help='01.org style json to remove deprecrated events from')
ap.add_argument(
'outfile',
nargs='?',
type=argparse.FileType('w'),
help='Generated file name',
default=sys.stdout)
args = ap.parse_args()
args.outfile.write(
json.dumps([
x for x in json.load(args.infile)
if 'Deprecated' not in x or x['Deprecated'] != '1'
], sort_keys=True, indent=4, separators=(',', ': ')))
if __name__ == '__main__':
main()
|
Add tool to create a json with deprecated events removed
The new tool takes an input json filename and an optional output
filename (default is stdout) and writes the input to the output removing
any events that have a "Deprecated":"1".#!/usr/bin/python
import argparse
import json
import sys
def main():
ap = argparse.ArgumentParser()
ap.add_argument(
'infile',
type=argparse.FileType('r'),
help='01.org style json to remove deprecrated events from')
ap.add_argument(
'outfile',
nargs='?',
type=argparse.FileType('w'),
help='Generated file name',
default=sys.stdout)
args = ap.parse_args()
args.outfile.write(
json.dumps([
x for x in json.load(args.infile)
if 'Deprecated' not in x or x['Deprecated'] != '1'
], sort_keys=True, indent=4, separators=(',', ': ')))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add tool to create a json with deprecated events removed
The new tool takes an input json filename and an optional output
filename (default is stdout) and writes the input to the output removing
any events that have a "Deprecated":"1".<commit_after>#!/usr/bin/python
import argparse
import json
import sys
def main():
ap = argparse.ArgumentParser()
ap.add_argument(
'infile',
type=argparse.FileType('r'),
help='01.org style json to remove deprecrated events from')
ap.add_argument(
'outfile',
nargs='?',
type=argparse.FileType('w'),
help='Generated file name',
default=sys.stdout)
args = ap.parse_args()
args.outfile.write(
json.dumps([
x for x in json.load(args.infile)
if 'Deprecated' not in x or x['Deprecated'] != '1'
], sort_keys=True, indent=4, separators=(',', ': ')))
if __name__ == '__main__':
main()
|
|
47f6859fa11f7cc02f340e96567a0a4a78310b42
|
whats_fresh/whats_fresh_api/migrations/0002_auto_20141120_2246.py
|
whats_fresh/whats_fresh_api/migrations/0002_auto_20141120_2246.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.contrib.auth.models import Group
def create_group(apps, schema_editor):
if Group.objects.filter(name='Data Entry Users').exists():
return
else:
group = Group.objects.create(name='Data Entry Users')
group.save()
return
class Migration(migrations.Migration):
dependencies = [
('whats_fresh_api', '0001_initial'),
]
operations = [
migrations.RunPython(create_group)
]
|
Add default group data migration
|
Add default group data migration
refs #17433
|
Python
|
apache-2.0
|
osu-cass/whats-fresh-api,osu-cass/whats-fresh-api,osu-cass/whats-fresh-api,iCHAIT/whats-fresh-api,iCHAIT/whats-fresh-api,iCHAIT/whats-fresh-api,iCHAIT/whats-fresh-api,osu-cass/whats-fresh-api
|
Add default group data migration
refs #17433
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.contrib.auth.models import Group
def create_group(apps, schema_editor):
if Group.objects.filter(name='Data Entry Users').exists():
return
else:
group = Group.objects.create(name='Data Entry Users')
group.save()
return
class Migration(migrations.Migration):
dependencies = [
('whats_fresh_api', '0001_initial'),
]
operations = [
migrations.RunPython(create_group)
]
|
<commit_before><commit_msg>Add default group data migration
refs #17433<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.contrib.auth.models import Group
def create_group(apps, schema_editor):
if Group.objects.filter(name='Data Entry Users').exists():
return
else:
group = Group.objects.create(name='Data Entry Users')
group.save()
return
class Migration(migrations.Migration):
dependencies = [
('whats_fresh_api', '0001_initial'),
]
operations = [
migrations.RunPython(create_group)
]
|
Add default group data migration
refs #17433# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.contrib.auth.models import Group
def create_group(apps, schema_editor):
if Group.objects.filter(name='Data Entry Users').exists():
return
else:
group = Group.objects.create(name='Data Entry Users')
group.save()
return
class Migration(migrations.Migration):
dependencies = [
('whats_fresh_api', '0001_initial'),
]
operations = [
migrations.RunPython(create_group)
]
|
<commit_before><commit_msg>Add default group data migration
refs #17433<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.contrib.auth.models import Group
def create_group(apps, schema_editor):
if Group.objects.filter(name='Data Entry Users').exists():
return
else:
group = Group.objects.create(name='Data Entry Users')
group.save()
return
class Migration(migrations.Migration):
dependencies = [
('whats_fresh_api', '0001_initial'),
]
operations = [
migrations.RunPython(create_group)
]
|
|
ac7a9cc262890976cefd0c83551f6ea7e2a0e6a3
|
tests/test_endpoints_check.py
|
tests/test_endpoints_check.py
|
# -*- coding: utf-8 -*-
import os
import sshrc.endpoints.check
def test_mainfunc_ok(cliargs_default, templater, mock_get_content):
mock_get_content.return_value = """\
Compression yes
Host q
HostName e
Host b
HostName lalala
"""
main = sshrc.endpoints.common.main(sshrc.endpoints.check.CheckApp)
result = main()
assert result is None or result == os.EX_OK
def test_mainfunc_exception(cliargs_default, templater, mock_get_content):
mock_get_content.side_effect = Exception
main = sshrc.endpoints.common.main(sshrc.endpoints.check.CheckApp)
assert main() != os.EX_OK
|
Add unittests for check app
|
Add unittests for check app
|
Python
|
mit
|
9seconds/concierge,9seconds/sshrc
|
Add unittests for check app
|
# -*- coding: utf-8 -*-
import os
import sshrc.endpoints.check
def test_mainfunc_ok(cliargs_default, templater, mock_get_content):
mock_get_content.return_value = """\
Compression yes
Host q
HostName e
Host b
HostName lalala
"""
main = sshrc.endpoints.common.main(sshrc.endpoints.check.CheckApp)
result = main()
assert result is None or result == os.EX_OK
def test_mainfunc_exception(cliargs_default, templater, mock_get_content):
mock_get_content.side_effect = Exception
main = sshrc.endpoints.common.main(sshrc.endpoints.check.CheckApp)
assert main() != os.EX_OK
|
<commit_before><commit_msg>Add unittests for check app<commit_after>
|
# -*- coding: utf-8 -*-
import os
import sshrc.endpoints.check
def test_mainfunc_ok(cliargs_default, templater, mock_get_content):
mock_get_content.return_value = """\
Compression yes
Host q
HostName e
Host b
HostName lalala
"""
main = sshrc.endpoints.common.main(sshrc.endpoints.check.CheckApp)
result = main()
assert result is None or result == os.EX_OK
def test_mainfunc_exception(cliargs_default, templater, mock_get_content):
mock_get_content.side_effect = Exception
main = sshrc.endpoints.common.main(sshrc.endpoints.check.CheckApp)
assert main() != os.EX_OK
|
Add unittests for check app# -*- coding: utf-8 -*-
import os
import sshrc.endpoints.check
def test_mainfunc_ok(cliargs_default, templater, mock_get_content):
mock_get_content.return_value = """\
Compression yes
Host q
HostName e
Host b
HostName lalala
"""
main = sshrc.endpoints.common.main(sshrc.endpoints.check.CheckApp)
result = main()
assert result is None or result == os.EX_OK
def test_mainfunc_exception(cliargs_default, templater, mock_get_content):
mock_get_content.side_effect = Exception
main = sshrc.endpoints.common.main(sshrc.endpoints.check.CheckApp)
assert main() != os.EX_OK
|
<commit_before><commit_msg>Add unittests for check app<commit_after># -*- coding: utf-8 -*-
import os
import sshrc.endpoints.check
def test_mainfunc_ok(cliargs_default, templater, mock_get_content):
mock_get_content.return_value = """\
Compression yes
Host q
HostName e
Host b
HostName lalala
"""
main = sshrc.endpoints.common.main(sshrc.endpoints.check.CheckApp)
result = main()
assert result is None or result == os.EX_OK
def test_mainfunc_exception(cliargs_default, templater, mock_get_content):
mock_get_content.side_effect = Exception
main = sshrc.endpoints.common.main(sshrc.endpoints.check.CheckApp)
assert main() != os.EX_OK
|
|
2cf1b0c8080864a46ede994ece3211c1a4a35db6
|
tools/testr_to_stestr.py
|
tools/testr_to_stestr.py
|
#!/usr/bin/env python
import os
import sys
import six
if not os.path.isfile('.testr.conf'):
print("Testr config file not found")
sys.exit(1)
testr_conf_file = open('.testr.conf', 'r')
config = six.moves.configparser.ConfigParser()
config.readfp(testr_conf_file)
test_command = config.get('DEFAULT', 'test_command')
group_regex = None
if config.has_option('DEFAULT', 'group_regex'):
group_regex = config.get('DEFAULT', 'group_regex')
top_dir = None
test_dir = None
for line in test_command.split('\n'):
if 'subunit.run discover' in line:
command_parts = line.split(' ')
top_dir_present = '-t' in line
for idx, val in enumerate(command_parts):
if top_dir_present:
if val == '-t':
top_dir = command_parts[idx + 1]
test_dir = command_parts[idx + 2]
else:
if val == 'discover':
test_dir = command_parts[idx + 2]
stestr_conf_file = open('.stestr.conf', 'w')
stestr_conf_file.write('[DEFAULT]\n')
stestr_conf_file.write('test_path=%s\n' % test_dir)
if top_dir:
stestr_conf_file.write('top_dir=%s\n' % top_dir)
if group_regex:
stestr_conf_file.write('group_regex=%s\n' % group_regex)
stestr_conf_file.close()
|
Add tool to convert testr.conf to stestr.conf
|
Add tool to convert testr.conf to stestr.conf
Since stestr isn't strictly backwards compatible with testr user
intervention will be required to start using stestr as a replacement for
testrepository. This commit adds a tools directory and populates it with
a naive script that attempts to automate creating a .stestr.conf file
from the contents of a pre-existing .testr.conf.
|
Python
|
apache-2.0
|
masayukig/stestr,mtreinish/stestr,mtreinish/stestr,masayukig/stestr
|
Add tool to convert testr.conf to stestr.conf
Since stestr isn't strictly backwards compatible with testr user
intervention will be required to start using stestr as a replacement for
testrepository. This commit adds a tools directory and populates it with
a naive script that attempts to automate creating a .stestr.conf file
from the contents of a pre-existing .testr.conf.
|
#!/usr/bin/env python
import os
import sys
import six
if not os.path.isfile('.testr.conf'):
print("Testr config file not found")
sys.exit(1)
testr_conf_file = open('.testr.conf', 'r')
config = six.moves.configparser.ConfigParser()
config.readfp(testr_conf_file)
test_command = config.get('DEFAULT', 'test_command')
group_regex = None
if config.has_option('DEFAULT', 'group_regex'):
group_regex = config.get('DEFAULT', 'group_regex')
top_dir = None
test_dir = None
for line in test_command.split('\n'):
if 'subunit.run discover' in line:
command_parts = line.split(' ')
top_dir_present = '-t' in line
for idx, val in enumerate(command_parts):
if top_dir_present:
if val == '-t':
top_dir = command_parts[idx + 1]
test_dir = command_parts[idx + 2]
else:
if val == 'discover':
test_dir = command_parts[idx + 2]
stestr_conf_file = open('.stestr.conf', 'w')
stestr_conf_file.write('[DEFAULT]\n')
stestr_conf_file.write('test_path=%s\n' % test_dir)
if top_dir:
stestr_conf_file.write('top_dir=%s\n' % top_dir)
if group_regex:
stestr_conf_file.write('group_regex=%s\n' % group_regex)
stestr_conf_file.close()
|
<commit_before><commit_msg>Add tool to convert testr.conf to stestr.conf
Since stestr isn't strictly backwards compatible with testr user
intervention will be required to start using stestr as a replacement for
testrepository. This commit adds a tools directory and populates it with
a naive script that attempts to automate creating a .stestr.conf file
from the contents of a pre-existing .testr.conf.<commit_after>
|
#!/usr/bin/env python
import os
import sys
import six
if not os.path.isfile('.testr.conf'):
print("Testr config file not found")
sys.exit(1)
testr_conf_file = open('.testr.conf', 'r')
config = six.moves.configparser.ConfigParser()
config.readfp(testr_conf_file)
test_command = config.get('DEFAULT', 'test_command')
group_regex = None
if config.has_option('DEFAULT', 'group_regex'):
group_regex = config.get('DEFAULT', 'group_regex')
top_dir = None
test_dir = None
for line in test_command.split('\n'):
if 'subunit.run discover' in line:
command_parts = line.split(' ')
top_dir_present = '-t' in line
for idx, val in enumerate(command_parts):
if top_dir_present:
if val == '-t':
top_dir = command_parts[idx + 1]
test_dir = command_parts[idx + 2]
else:
if val == 'discover':
test_dir = command_parts[idx + 2]
stestr_conf_file = open('.stestr.conf', 'w')
stestr_conf_file.write('[DEFAULT]\n')
stestr_conf_file.write('test_path=%s\n' % test_dir)
if top_dir:
stestr_conf_file.write('top_dir=%s\n' % top_dir)
if group_regex:
stestr_conf_file.write('group_regex=%s\n' % group_regex)
stestr_conf_file.close()
|
Add tool to convert testr.conf to stestr.conf
Since stestr isn't strictly backwards compatible with testr user
intervention will be required to start using stestr as a replacement for
testrepository. This commit adds a tools directory and populates it with
a naive script that attempts to automate creating a .stestr.conf file
from the contents of a pre-existing .testr.conf.#!/usr/bin/env python
import os
import sys
import six
if not os.path.isfile('.testr.conf'):
print("Testr config file not found")
sys.exit(1)
testr_conf_file = open('.testr.conf', 'r')
config = six.moves.configparser.ConfigParser()
config.readfp(testr_conf_file)
test_command = config.get('DEFAULT', 'test_command')
group_regex = None
if config.has_option('DEFAULT', 'group_regex'):
group_regex = config.get('DEFAULT', 'group_regex')
top_dir = None
test_dir = None
for line in test_command.split('\n'):
if 'subunit.run discover' in line:
command_parts = line.split(' ')
top_dir_present = '-t' in line
for idx, val in enumerate(command_parts):
if top_dir_present:
if val == '-t':
top_dir = command_parts[idx + 1]
test_dir = command_parts[idx + 2]
else:
if val == 'discover':
test_dir = command_parts[idx + 2]
stestr_conf_file = open('.stestr.conf', 'w')
stestr_conf_file.write('[DEFAULT]\n')
stestr_conf_file.write('test_path=%s\n' % test_dir)
if top_dir:
stestr_conf_file.write('top_dir=%s\n' % top_dir)
if group_regex:
stestr_conf_file.write('group_regex=%s\n' % group_regex)
stestr_conf_file.close()
|
<commit_before><commit_msg>Add tool to convert testr.conf to stestr.conf
Since stestr isn't strictly backwards compatible with testr user
intervention will be required to start using stestr as a replacement for
testrepository. This commit adds a tools directory and populates it with
a naive script that attempts to automate creating a .stestr.conf file
from the contents of a pre-existing .testr.conf.<commit_after>#!/usr/bin/env python
import os
import sys
import six
if not os.path.isfile('.testr.conf'):
print("Testr config file not found")
sys.exit(1)
testr_conf_file = open('.testr.conf', 'r')
config = six.moves.configparser.ConfigParser()
config.readfp(testr_conf_file)
test_command = config.get('DEFAULT', 'test_command')
group_regex = None
if config.has_option('DEFAULT', 'group_regex'):
group_regex = config.get('DEFAULT', 'group_regex')
top_dir = None
test_dir = None
for line in test_command.split('\n'):
if 'subunit.run discover' in line:
command_parts = line.split(' ')
top_dir_present = '-t' in line
for idx, val in enumerate(command_parts):
if top_dir_present:
if val == '-t':
top_dir = command_parts[idx + 1]
test_dir = command_parts[idx + 2]
else:
if val == 'discover':
test_dir = command_parts[idx + 2]
stestr_conf_file = open('.stestr.conf', 'w')
stestr_conf_file.write('[DEFAULT]\n')
stestr_conf_file.write('test_path=%s\n' % test_dir)
if top_dir:
stestr_conf_file.write('top_dir=%s\n' % top_dir)
if group_regex:
stestr_conf_file.write('group_regex=%s\n' % group_regex)
stestr_conf_file.close()
|
|
405c3c3a0264d158a878934d6e23078d250aec57
|
hash.py
|
hash.py
|
class HashItem(object):
def __init__(self, key, value):
self.key = key
self.value = value
class HashTable(object):
def __init__(self, tablesize=1024):
self.table = []
for i in range(tablesize):
self.table.append(tuple())
t = HashTable(100)
t.table[5] = ('hash', 'hash')
print t.table
|
Initialize Table and Hash item
|
Initialize Table and Hash item
|
Python
|
mit
|
nbeck90/data_structures_2
|
Initialize Table and Hash item
|
class HashItem(object):
def __init__(self, key, value):
self.key = key
self.value = value
class HashTable(object):
def __init__(self, tablesize=1024):
self.table = []
for i in range(tablesize):
self.table.append(tuple())
t = HashTable(100)
t.table[5] = ('hash', 'hash')
print t.table
|
<commit_before><commit_msg>Initialize Table and Hash item<commit_after>
|
class HashItem(object):
def __init__(self, key, value):
self.key = key
self.value = value
class HashTable(object):
def __init__(self, tablesize=1024):
self.table = []
for i in range(tablesize):
self.table.append(tuple())
t = HashTable(100)
t.table[5] = ('hash', 'hash')
print t.table
|
Initialize Table and Hash itemclass HashItem(object):
def __init__(self, key, value):
self.key = key
self.value = value
class HashTable(object):
def __init__(self, tablesize=1024):
self.table = []
for i in range(tablesize):
self.table.append(tuple())
t = HashTable(100)
t.table[5] = ('hash', 'hash')
print t.table
|
<commit_before><commit_msg>Initialize Table and Hash item<commit_after>class HashItem(object):
def __init__(self, key, value):
self.key = key
self.value = value
class HashTable(object):
def __init__(self, tablesize=1024):
self.table = []
for i in range(tablesize):
self.table.append(tuple())
t = HashTable(100)
t.table[5] = ('hash', 'hash')
print t.table
|
|
af28c1449cf525460de16304c231616873b2ca3d
|
tests/test_memory_leak.py
|
tests/test_memory_leak.py
|
import resource
import pytest
from .models import TestModel as DirtyMixinModel
pytestmark = pytest.mark.django_db
def test_rss_usage():
DirtyMixinModel()
rss_1 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
for _ in range(1000):
DirtyMixinModel()
rss_2 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
assert rss_2 == rss_1, 'There is a memory leak!'
|
Add test for detecting memory leaks
|
Add test for detecting memory leaks
|
Python
|
bsd-3-clause
|
romgar/django-dirtyfields,smn/django-dirtyfields
|
Add test for detecting memory leaks
|
import resource
import pytest
from .models import TestModel as DirtyMixinModel
pytestmark = pytest.mark.django_db
def test_rss_usage():
DirtyMixinModel()
rss_1 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
for _ in range(1000):
DirtyMixinModel()
rss_2 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
assert rss_2 == rss_1, 'There is a memory leak!'
|
<commit_before><commit_msg>Add test for detecting memory leaks<commit_after>
|
import resource
import pytest
from .models import TestModel as DirtyMixinModel
pytestmark = pytest.mark.django_db
def test_rss_usage():
DirtyMixinModel()
rss_1 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
for _ in range(1000):
DirtyMixinModel()
rss_2 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
assert rss_2 == rss_1, 'There is a memory leak!'
|
Add test for detecting memory leaksimport resource
import pytest
from .models import TestModel as DirtyMixinModel
pytestmark = pytest.mark.django_db
def test_rss_usage():
DirtyMixinModel()
rss_1 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
for _ in range(1000):
DirtyMixinModel()
rss_2 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
assert rss_2 == rss_1, 'There is a memory leak!'
|
<commit_before><commit_msg>Add test for detecting memory leaks<commit_after>import resource
import pytest
from .models import TestModel as DirtyMixinModel
pytestmark = pytest.mark.django_db
def test_rss_usage():
DirtyMixinModel()
rss_1 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
for _ in range(1000):
DirtyMixinModel()
rss_2 = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
assert rss_2 == rss_1, 'There is a memory leak!'
|
|
28af79e1f4362db812247911b3cda1831bed75bf
|
utils/result_analysis.py
|
utils/result_analysis.py
|
__author__ = 'quinnosha'
"""Tools for viewing and analyzing prediction results
.. moduleauthor:: Quinn Osha
"""
from os.path import abspath, dirname, join
from utils.data_paths import RESULTS_DIR_PATH
def find_lowest_rmse(rmse_file_name):
rmse_file_path = join(RESULTS_DIR_PATH, rmse_file_name)
read_format = 'r'
rmse_values = []
with open(rmse_file_path, read_format) as rmse_file:
for line in rmse_file:
rmse_value = line.strip();
rmse_values.append(rmse_value)
return min(rmse_values)
if __name__ == '__main__':
rmse_file_name = 'svd_base_8epochs_100features__rmse_valid_Apr-26-12h-43m.txt'
lowest_rmse = find_lowest_rmse(rmse_file_name)
print(lowest_rmse)
|
Add script to find lowest rmse result values
|
Add script to find lowest rmse result values
|
Python
|
mit
|
jvanbrug/netflix,jvanbrug/netflix
|
Add script to find lowest rmse result values
|
__author__ = 'quinnosha'
"""Tools for viewing and analyzing prediction results
.. moduleauthor:: Quinn Osha
"""
from os.path import abspath, dirname, join
from utils.data_paths import RESULTS_DIR_PATH
def find_lowest_rmse(rmse_file_name):
rmse_file_path = join(RESULTS_DIR_PATH, rmse_file_name)
read_format = 'r'
rmse_values = []
with open(rmse_file_path, read_format) as rmse_file:
for line in rmse_file:
rmse_value = line.strip();
rmse_values.append(rmse_value)
return min(rmse_values)
if __name__ == '__main__':
rmse_file_name = 'svd_base_8epochs_100features__rmse_valid_Apr-26-12h-43m.txt'
lowest_rmse = find_lowest_rmse(rmse_file_name)
print(lowest_rmse)
|
<commit_before><commit_msg>Add script to find lowest rmse result values<commit_after>
|
__author__ = 'quinnosha'
"""Tools for viewing and analyzing prediction results
.. moduleauthor:: Quinn Osha
"""
from os.path import abspath, dirname, join
from utils.data_paths import RESULTS_DIR_PATH
def find_lowest_rmse(rmse_file_name):
rmse_file_path = join(RESULTS_DIR_PATH, rmse_file_name)
read_format = 'r'
rmse_values = []
with open(rmse_file_path, read_format) as rmse_file:
for line in rmse_file:
rmse_value = line.strip();
rmse_values.append(rmse_value)
return min(rmse_values)
if __name__ == '__main__':
rmse_file_name = 'svd_base_8epochs_100features__rmse_valid_Apr-26-12h-43m.txt'
lowest_rmse = find_lowest_rmse(rmse_file_name)
print(lowest_rmse)
|
Add script to find lowest rmse result values__author__ = 'quinnosha'
"""Tools for viewing and analyzing prediction results
.. moduleauthor:: Quinn Osha
"""
from os.path import abspath, dirname, join
from utils.data_paths import RESULTS_DIR_PATH
def find_lowest_rmse(rmse_file_name):
rmse_file_path = join(RESULTS_DIR_PATH, rmse_file_name)
read_format = 'r'
rmse_values = []
with open(rmse_file_path, read_format) as rmse_file:
for line in rmse_file:
rmse_value = line.strip();
rmse_values.append(rmse_value)
return min(rmse_values)
if __name__ == '__main__':
rmse_file_name = 'svd_base_8epochs_100features__rmse_valid_Apr-26-12h-43m.txt'
lowest_rmse = find_lowest_rmse(rmse_file_name)
print(lowest_rmse)
|
<commit_before><commit_msg>Add script to find lowest rmse result values<commit_after>__author__ = 'quinnosha'
"""Tools for viewing and analyzing prediction results
.. moduleauthor:: Quinn Osha
"""
from os.path import abspath, dirname, join
from utils.data_paths import RESULTS_DIR_PATH
def find_lowest_rmse(rmse_file_name):
rmse_file_path = join(RESULTS_DIR_PATH, rmse_file_name)
read_format = 'r'
rmse_values = []
with open(rmse_file_path, read_format) as rmse_file:
for line in rmse_file:
rmse_value = line.strip();
rmse_values.append(rmse_value)
return min(rmse_values)
if __name__ == '__main__':
rmse_file_name = 'svd_base_8epochs_100features__rmse_valid_Apr-26-12h-43m.txt'
lowest_rmse = find_lowest_rmse(rmse_file_name)
print(lowest_rmse)
|
|
203fceedd93a56032f7b78154620f4da89cb4768
|
sequana/vcf_filter.py
|
sequana/vcf_filter.py
|
"""
Python script to filter a vcf-file generated by freebayes.
"""
import sys
import vcf
class VCF(vcf.Reader):
def __init__(self, filename, **kwargs):
"""
Filter vcf file with a dictionnary.
It takes a vcf file as entry.
"""
try:
filin = open(filename, "r")
vcf.Reader.__init__(self, fsock=filin, **kwargs)
except IOError as e:
print("I/O error({0}): {1}".format(e.errno, e.strerror))
def _calcul_freq(self, vcf_line):
alt_freq = []
for count in vcf_line.INFO["AO"]:
alt_freq.append(count/vcf_line.INFO["DP"])
return alt_freq
def _filter_info_field(self, info_value, treshold):
if(treshold.startswith("<")):
if(info_value < int(treshold[1:])):
return False
else:
if(info_value > int(treshold[1:])):
return False
return True
def _filter_line(self, vcf_line, filter_dict):
if(vcf_line.QUAL < filter_dict["QUAL"]):
return False
alt_freq = self._calcul_freq(vcf_line)
if(alt_freq[0] < filter_dict["FREQ"]):
return False
for key, value in filter_dict["INFO"].items():
if(type(vcf_line.INFO[key]) != list):
if(self._filter_info_field(vcf_line.INFO[key], value)):
return False
else:
if(self._filter_info_field(vcf_line.INFO[key][0], value)):
return False
return True
def filter_vcf(self, filter_dict, output):
"""
Read the vcf file and write the filter vcf file.
"""
with open(output, "w") as fp:
vcf_writer = vcf.Writer(fp, self)
for variant in self:
if(self._filter_line(variant, filter_dict)):
vcf_writer.write_record(variant)
|
Add script to filter VCF.
|
Add script to filter VCF.
|
Python
|
bsd-3-clause
|
sequana/sequana,sequana/sequana,sequana/sequana,sequana/sequana,sequana/sequana
|
Add script to filter VCF.
|
"""
Python script to filter a vcf-file generated by freebayes.
"""
import sys
import vcf
class VCF(vcf.Reader):
def __init__(self, filename, **kwargs):
"""
Filter vcf file with a dictionnary.
It takes a vcf file as entry.
"""
try:
filin = open(filename, "r")
vcf.Reader.__init__(self, fsock=filin, **kwargs)
except IOError as e:
print("I/O error({0}): {1}".format(e.errno, e.strerror))
def _calcul_freq(self, vcf_line):
alt_freq = []
for count in vcf_line.INFO["AO"]:
alt_freq.append(count/vcf_line.INFO["DP"])
return alt_freq
def _filter_info_field(self, info_value, treshold):
if(treshold.startswith("<")):
if(info_value < int(treshold[1:])):
return False
else:
if(info_value > int(treshold[1:])):
return False
return True
def _filter_line(self, vcf_line, filter_dict):
if(vcf_line.QUAL < filter_dict["QUAL"]):
return False
alt_freq = self._calcul_freq(vcf_line)
if(alt_freq[0] < filter_dict["FREQ"]):
return False
for key, value in filter_dict["INFO"].items():
if(type(vcf_line.INFO[key]) != list):
if(self._filter_info_field(vcf_line.INFO[key], value)):
return False
else:
if(self._filter_info_field(vcf_line.INFO[key][0], value)):
return False
return True
def filter_vcf(self, filter_dict, output):
"""
Read the vcf file and write the filter vcf file.
"""
with open(output, "w") as fp:
vcf_writer = vcf.Writer(fp, self)
for variant in self:
if(self._filter_line(variant, filter_dict)):
vcf_writer.write_record(variant)
|
<commit_before><commit_msg>Add script to filter VCF.<commit_after>
|
"""
Python script to filter a vcf-file generated by freebayes.
"""
import sys
import vcf
class VCF(vcf.Reader):
def __init__(self, filename, **kwargs):
"""
Filter vcf file with a dictionnary.
It takes a vcf file as entry.
"""
try:
filin = open(filename, "r")
vcf.Reader.__init__(self, fsock=filin, **kwargs)
except IOError as e:
print("I/O error({0}): {1}".format(e.errno, e.strerror))
def _calcul_freq(self, vcf_line):
alt_freq = []
for count in vcf_line.INFO["AO"]:
alt_freq.append(count/vcf_line.INFO["DP"])
return alt_freq
def _filter_info_field(self, info_value, treshold):
if(treshold.startswith("<")):
if(info_value < int(treshold[1:])):
return False
else:
if(info_value > int(treshold[1:])):
return False
return True
def _filter_line(self, vcf_line, filter_dict):
if(vcf_line.QUAL < filter_dict["QUAL"]):
return False
alt_freq = self._calcul_freq(vcf_line)
if(alt_freq[0] < filter_dict["FREQ"]):
return False
for key, value in filter_dict["INFO"].items():
if(type(vcf_line.INFO[key]) != list):
if(self._filter_info_field(vcf_line.INFO[key], value)):
return False
else:
if(self._filter_info_field(vcf_line.INFO[key][0], value)):
return False
return True
def filter_vcf(self, filter_dict, output):
"""
Read the vcf file and write the filter vcf file.
"""
with open(output, "w") as fp:
vcf_writer = vcf.Writer(fp, self)
for variant in self:
if(self._filter_line(variant, filter_dict)):
vcf_writer.write_record(variant)
|
Add script to filter VCF."""
Python script to filter a vcf-file generated by freebayes.
"""
import sys
import vcf
class VCF(vcf.Reader):
def __init__(self, filename, **kwargs):
"""
Filter vcf file with a dictionnary.
It takes a vcf file as entry.
"""
try:
filin = open(filename, "r")
vcf.Reader.__init__(self, fsock=filin, **kwargs)
except IOError as e:
print("I/O error({0}): {1}".format(e.errno, e.strerror))
def _calcul_freq(self, vcf_line):
alt_freq = []
for count in vcf_line.INFO["AO"]:
alt_freq.append(count/vcf_line.INFO["DP"])
return alt_freq
def _filter_info_field(self, info_value, treshold):
if(treshold.startswith("<")):
if(info_value < int(treshold[1:])):
return False
else:
if(info_value > int(treshold[1:])):
return False
return True
def _filter_line(self, vcf_line, filter_dict):
if(vcf_line.QUAL < filter_dict["QUAL"]):
return False
alt_freq = self._calcul_freq(vcf_line)
if(alt_freq[0] < filter_dict["FREQ"]):
return False
for key, value in filter_dict["INFO"].items():
if(type(vcf_line.INFO[key]) != list):
if(self._filter_info_field(vcf_line.INFO[key], value)):
return False
else:
if(self._filter_info_field(vcf_line.INFO[key][0], value)):
return False
return True
def filter_vcf(self, filter_dict, output):
"""
Read the vcf file and write the filter vcf file.
"""
with open(output, "w") as fp:
vcf_writer = vcf.Writer(fp, self)
for variant in self:
if(self._filter_line(variant, filter_dict)):
vcf_writer.write_record(variant)
|
<commit_before><commit_msg>Add script to filter VCF.<commit_after>"""
Python script to filter a vcf-file generated by freebayes.
"""
import sys
import vcf
class VCF(vcf.Reader):
def __init__(self, filename, **kwargs):
"""
Filter vcf file with a dictionnary.
It takes a vcf file as entry.
"""
try:
filin = open(filename, "r")
vcf.Reader.__init__(self, fsock=filin, **kwargs)
except IOError as e:
print("I/O error({0}): {1}".format(e.errno, e.strerror))
def _calcul_freq(self, vcf_line):
alt_freq = []
for count in vcf_line.INFO["AO"]:
alt_freq.append(count/vcf_line.INFO["DP"])
return alt_freq
def _filter_info_field(self, info_value, treshold):
if(treshold.startswith("<")):
if(info_value < int(treshold[1:])):
return False
else:
if(info_value > int(treshold[1:])):
return False
return True
def _filter_line(self, vcf_line, filter_dict):
if(vcf_line.QUAL < filter_dict["QUAL"]):
return False
alt_freq = self._calcul_freq(vcf_line)
if(alt_freq[0] < filter_dict["FREQ"]):
return False
for key, value in filter_dict["INFO"].items():
if(type(vcf_line.INFO[key]) != list):
if(self._filter_info_field(vcf_line.INFO[key], value)):
return False
else:
if(self._filter_info_field(vcf_line.INFO[key][0], value)):
return False
return True
def filter_vcf(self, filter_dict, output):
"""
Read the vcf file and write the filter vcf file.
"""
with open(output, "w") as fp:
vcf_writer = vcf.Writer(fp, self)
for variant in self:
if(self._filter_line(variant, filter_dict)):
vcf_writer.write_record(variant)
|
|
9b618c71bb7d8cc0d38c5cbafb40e9775205a544
|
fluent/tests/test_scanner.py
|
fluent/tests/test_scanner.py
|
from djangae.test import TestCase
from fluent.scanner import parse_file, DEFAULT_TRANSLATION_GROUP
TEST_CONTENT = """{% trans "Test trans string with group" group "public" %}
{% trans "Test trans string without group" %}
Regular string
{% blocktrans group "public" %}
Test trans block with group
{% endblocktrans %}
{% blocktrans %}
Test trans block without group
{% endblocktrans %}"""
class ScannerTests(TestCase):
def setUp(self):
pass
def test_basic_html_parsing(self):
results = parse_file(TEST_CONTENT, ".html")
expected = [
('Test trans string with group', '', '', 'public'),
('Test trans string without group', '', '', DEFAULT_TRANSLATION_GROUP),
('\nTest trans block with group\n', '', '', 'public'),
('\nTest trans block without group\n', '', '', DEFAULT_TRANSLATION_GROUP),
]
self.assertEqual(results, expected)
|
Add basic test for scanner parsing html block with specified groups correctly
|
Add basic test for scanner parsing html block with specified groups correctly
|
Python
|
mit
|
potatolondon/fluent-2.0,potatolondon/fluent-2.0
|
Add basic test for scanner parsing html block with specified groups correctly
|
from djangae.test import TestCase
from fluent.scanner import parse_file, DEFAULT_TRANSLATION_GROUP
TEST_CONTENT = """{% trans "Test trans string with group" group "public" %}
{% trans "Test trans string without group" %}
Regular string
{% blocktrans group "public" %}
Test trans block with group
{% endblocktrans %}
{% blocktrans %}
Test trans block without group
{% endblocktrans %}"""
class ScannerTests(TestCase):
def setUp(self):
pass
def test_basic_html_parsing(self):
results = parse_file(TEST_CONTENT, ".html")
expected = [
('Test trans string with group', '', '', 'public'),
('Test trans string without group', '', '', DEFAULT_TRANSLATION_GROUP),
('\nTest trans block with group\n', '', '', 'public'),
('\nTest trans block without group\n', '', '', DEFAULT_TRANSLATION_GROUP),
]
self.assertEqual(results, expected)
|
<commit_before><commit_msg>Add basic test for scanner parsing html block with specified groups correctly<commit_after>
|
from djangae.test import TestCase
from fluent.scanner import parse_file, DEFAULT_TRANSLATION_GROUP
TEST_CONTENT = """{% trans "Test trans string with group" group "public" %}
{% trans "Test trans string without group" %}
Regular string
{% blocktrans group "public" %}
Test trans block with group
{% endblocktrans %}
{% blocktrans %}
Test trans block without group
{% endblocktrans %}"""
class ScannerTests(TestCase):
def setUp(self):
pass
def test_basic_html_parsing(self):
results = parse_file(TEST_CONTENT, ".html")
expected = [
('Test trans string with group', '', '', 'public'),
('Test trans string without group', '', '', DEFAULT_TRANSLATION_GROUP),
('\nTest trans block with group\n', '', '', 'public'),
('\nTest trans block without group\n', '', '', DEFAULT_TRANSLATION_GROUP),
]
self.assertEqual(results, expected)
|
Add basic test for scanner parsing html block with specified groups correctlyfrom djangae.test import TestCase
from fluent.scanner import parse_file, DEFAULT_TRANSLATION_GROUP
TEST_CONTENT = """{% trans "Test trans string with group" group "public" %}
{% trans "Test trans string without group" %}
Regular string
{% blocktrans group "public" %}
Test trans block with group
{% endblocktrans %}
{% blocktrans %}
Test trans block without group
{% endblocktrans %}"""
class ScannerTests(TestCase):
def setUp(self):
pass
def test_basic_html_parsing(self):
results = parse_file(TEST_CONTENT, ".html")
expected = [
('Test trans string with group', '', '', 'public'),
('Test trans string without group', '', '', DEFAULT_TRANSLATION_GROUP),
('\nTest trans block with group\n', '', '', 'public'),
('\nTest trans block without group\n', '', '', DEFAULT_TRANSLATION_GROUP),
]
self.assertEqual(results, expected)
|
<commit_before><commit_msg>Add basic test for scanner parsing html block with specified groups correctly<commit_after>from djangae.test import TestCase
from fluent.scanner import parse_file, DEFAULT_TRANSLATION_GROUP
TEST_CONTENT = """{% trans "Test trans string with group" group "public" %}
{% trans "Test trans string without group" %}
Regular string
{% blocktrans group "public" %}
Test trans block with group
{% endblocktrans %}
{% blocktrans %}
Test trans block without group
{% endblocktrans %}"""
class ScannerTests(TestCase):
def setUp(self):
pass
def test_basic_html_parsing(self):
results = parse_file(TEST_CONTENT, ".html")
expected = [
('Test trans string with group', '', '', 'public'),
('Test trans string without group', '', '', DEFAULT_TRANSLATION_GROUP),
('\nTest trans block with group\n', '', '', 'public'),
('\nTest trans block without group\n', '', '', DEFAULT_TRANSLATION_GROUP),
]
self.assertEqual(results, expected)
|
|
1874d7c9a0da128ae640ad7bb2fc64a292369c0a
|
test/test_webservice.py
|
test/test_webservice.py
|
import unittest
from test_database import make_test_jobs
from memory_database import MemoryDatabase
from saliweb.backend import WebService, Config, Job
class WebServiceTest(unittest.TestCase):
"""Check WebService class"""
def test_init(self):
"""Check WebService init"""
db = MemoryDatabase(Job)
c = Config(None)
ws = WebService(c, db)
def test_get_job_by_name(self):
"""Check WebService.get_job_by_name()"""
db = MemoryDatabase(Job)
c = Config(None)
ws = WebService(c, db)
db.create_tables()
make_test_jobs(db.conn)
job = ws.get_job_by_name('RUNNING', 'job3')
self.assertEqual(job.name, 'job3')
job = ws.get_job_by_name('RUNNING', 'job9')
self.assertEqual(job, None)
if __name__ == '__main__':
unittest.main()
|
Add basic testing of the WebService class.
|
Add basic testing of the WebService class.
|
Python
|
lgpl-2.1
|
salilab/saliweb,salilab/saliweb,salilab/saliweb,salilab/saliweb,salilab/saliweb
|
Add basic testing of the WebService class.
|
import unittest
from test_database import make_test_jobs
from memory_database import MemoryDatabase
from saliweb.backend import WebService, Config, Job
class WebServiceTest(unittest.TestCase):
"""Check WebService class"""
def test_init(self):
"""Check WebService init"""
db = MemoryDatabase(Job)
c = Config(None)
ws = WebService(c, db)
def test_get_job_by_name(self):
"""Check WebService.get_job_by_name()"""
db = MemoryDatabase(Job)
c = Config(None)
ws = WebService(c, db)
db.create_tables()
make_test_jobs(db.conn)
job = ws.get_job_by_name('RUNNING', 'job3')
self.assertEqual(job.name, 'job3')
job = ws.get_job_by_name('RUNNING', 'job9')
self.assertEqual(job, None)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add basic testing of the WebService class.<commit_after>
|
import unittest
from test_database import make_test_jobs
from memory_database import MemoryDatabase
from saliweb.backend import WebService, Config, Job
class WebServiceTest(unittest.TestCase):
"""Check WebService class"""
def test_init(self):
"""Check WebService init"""
db = MemoryDatabase(Job)
c = Config(None)
ws = WebService(c, db)
def test_get_job_by_name(self):
"""Check WebService.get_job_by_name()"""
db = MemoryDatabase(Job)
c = Config(None)
ws = WebService(c, db)
db.create_tables()
make_test_jobs(db.conn)
job = ws.get_job_by_name('RUNNING', 'job3')
self.assertEqual(job.name, 'job3')
job = ws.get_job_by_name('RUNNING', 'job9')
self.assertEqual(job, None)
if __name__ == '__main__':
unittest.main()
|
Add basic testing of the WebService class.import unittest
from test_database import make_test_jobs
from memory_database import MemoryDatabase
from saliweb.backend import WebService, Config, Job
class WebServiceTest(unittest.TestCase):
"""Check WebService class"""
def test_init(self):
"""Check WebService init"""
db = MemoryDatabase(Job)
c = Config(None)
ws = WebService(c, db)
def test_get_job_by_name(self):
"""Check WebService.get_job_by_name()"""
db = MemoryDatabase(Job)
c = Config(None)
ws = WebService(c, db)
db.create_tables()
make_test_jobs(db.conn)
job = ws.get_job_by_name('RUNNING', 'job3')
self.assertEqual(job.name, 'job3')
job = ws.get_job_by_name('RUNNING', 'job9')
self.assertEqual(job, None)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add basic testing of the WebService class.<commit_after>import unittest
from test_database import make_test_jobs
from memory_database import MemoryDatabase
from saliweb.backend import WebService, Config, Job
class WebServiceTest(unittest.TestCase):
"""Check WebService class"""
def test_init(self):
"""Check WebService init"""
db = MemoryDatabase(Job)
c = Config(None)
ws = WebService(c, db)
def test_get_job_by_name(self):
"""Check WebService.get_job_by_name()"""
db = MemoryDatabase(Job)
c = Config(None)
ws = WebService(c, db)
db.create_tables()
make_test_jobs(db.conn)
job = ws.get_job_by_name('RUNNING', 'job3')
self.assertEqual(job.name, 'job3')
job = ws.get_job_by_name('RUNNING', 'job9')
self.assertEqual(job, None)
if __name__ == '__main__':
unittest.main()
|
|
d689ac08e6d0b928d674997aafb383c8bf2a4861
|
geotrek/common/migrations/0026_auto_20220425_0938.py
|
geotrek/common/migrations/0026_auto_20220425_0938.py
|
# Generated by Django 3.1.14 on 2022-04-25 09:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('common', '0025_auto_20220425_0850'),
]
operations = [
migrations.AlterModelOptions(
name='license',
options={'ordering': ['label'], 'verbose_name': 'Attachment license', 'verbose_name_plural': 'Attachment licenses'},
),
migrations.AddField(
model_name='accessibilityattachment',
name='license',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='common.license', verbose_name='License'),
),
]
|
Add api v2 license, add license accessibility attachment
|
Add api v2 license, add license accessibility attachment
|
Python
|
bsd-2-clause
|
makinacorpus/Geotrek,makinacorpus/Geotrek,makinacorpus/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin
|
Add api v2 license, add license accessibility attachment
|
# Generated by Django 3.1.14 on 2022-04-25 09:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('common', '0025_auto_20220425_0850'),
]
operations = [
migrations.AlterModelOptions(
name='license',
options={'ordering': ['label'], 'verbose_name': 'Attachment license', 'verbose_name_plural': 'Attachment licenses'},
),
migrations.AddField(
model_name='accessibilityattachment',
name='license',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='common.license', verbose_name='License'),
),
]
|
<commit_before><commit_msg>Add api v2 license, add license accessibility attachment<commit_after>
|
# Generated by Django 3.1.14 on 2022-04-25 09:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('common', '0025_auto_20220425_0850'),
]
operations = [
migrations.AlterModelOptions(
name='license',
options={'ordering': ['label'], 'verbose_name': 'Attachment license', 'verbose_name_plural': 'Attachment licenses'},
),
migrations.AddField(
model_name='accessibilityattachment',
name='license',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='common.license', verbose_name='License'),
),
]
|
Add api v2 license, add license accessibility attachment# Generated by Django 3.1.14 on 2022-04-25 09:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('common', '0025_auto_20220425_0850'),
]
operations = [
migrations.AlterModelOptions(
name='license',
options={'ordering': ['label'], 'verbose_name': 'Attachment license', 'verbose_name_plural': 'Attachment licenses'},
),
migrations.AddField(
model_name='accessibilityattachment',
name='license',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='common.license', verbose_name='License'),
),
]
|
<commit_before><commit_msg>Add api v2 license, add license accessibility attachment<commit_after># Generated by Django 3.1.14 on 2022-04-25 09:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('common', '0025_auto_20220425_0850'),
]
operations = [
migrations.AlterModelOptions(
name='license',
options={'ordering': ['label'], 'verbose_name': 'Attachment license', 'verbose_name_plural': 'Attachment licenses'},
),
migrations.AddField(
model_name='accessibilityattachment',
name='license',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='common.license', verbose_name='License'),
),
]
|
|
19ab63c1f5c377a58b3b91ae2c716f6d3ed6b78b
|
euler036.py
|
euler036.py
|
#!/usr/bin/python
from math import pow
LIMIT = 1000000
palindrome_sum = 0
def is_palindrome(res):
return res == res[::-1]
def binary(x):
res = []
while(x):
res.insert(0, x % 2)
x //= 2
return res
for palindrome in range(1, LIMIT):
if(is_palindrome(list(str(palindrome)))):
binary_n = binary(palindrome)
if(is_palindrome(binary_n)):
palindrome_sum += palindrome
print(palindrome_sum)
|
Add solution for problem 36, after long time...
|
Add solution for problem 36, after long time...
|
Python
|
mit
|
cifvts/PyEuler
|
Add solution for problem 36, after long time...
|
#!/usr/bin/python
from math import pow
LIMIT = 1000000
palindrome_sum = 0
def is_palindrome(res):
return res == res[::-1]
def binary(x):
res = []
while(x):
res.insert(0, x % 2)
x //= 2
return res
for palindrome in range(1, LIMIT):
if(is_palindrome(list(str(palindrome)))):
binary_n = binary(palindrome)
if(is_palindrome(binary_n)):
palindrome_sum += palindrome
print(palindrome_sum)
|
<commit_before><commit_msg>Add solution for problem 36, after long time...<commit_after>
|
#!/usr/bin/python
from math import pow
LIMIT = 1000000
palindrome_sum = 0
def is_palindrome(res):
return res == res[::-1]
def binary(x):
res = []
while(x):
res.insert(0, x % 2)
x //= 2
return res
for palindrome in range(1, LIMIT):
if(is_palindrome(list(str(palindrome)))):
binary_n = binary(palindrome)
if(is_palindrome(binary_n)):
palindrome_sum += palindrome
print(palindrome_sum)
|
Add solution for problem 36, after long time...#!/usr/bin/python
from math import pow
LIMIT = 1000000
palindrome_sum = 0
def is_palindrome(res):
return res == res[::-1]
def binary(x):
res = []
while(x):
res.insert(0, x % 2)
x //= 2
return res
for palindrome in range(1, LIMIT):
if(is_palindrome(list(str(palindrome)))):
binary_n = binary(palindrome)
if(is_palindrome(binary_n)):
palindrome_sum += palindrome
print(palindrome_sum)
|
<commit_before><commit_msg>Add solution for problem 36, after long time...<commit_after>#!/usr/bin/python
from math import pow
LIMIT = 1000000
palindrome_sum = 0
def is_palindrome(res):
return res == res[::-1]
def binary(x):
res = []
while(x):
res.insert(0, x % 2)
x //= 2
return res
for palindrome in range(1, LIMIT):
if(is_palindrome(list(str(palindrome)))):
binary_n = binary(palindrome)
if(is_palindrome(binary_n)):
palindrome_sum += palindrome
print(palindrome_sum)
|
|
6e4d457244b49c903e706d8deffaf74c4a737990
|
mail_extractor/mail_extractor.py
|
mail_extractor/mail_extractor.py
|
import email
import sys
import os
import argparse
import textwrap
def make_person_schema(msg, schemaFile):
schema = """\
<div itemscope itemtype="http://schema.org/Person">
<span itemprop="email">%s</span>
</div>""" % msg['from']
schemaFile.write(textwrap.dedent(schema))
def mails2schema(mailDir, outputDir):
i = 0
for mail in os.listdir(mailDir):
mailFilename = mailDir + "/" + mail
if(os.path.isfile(mailFilename)):
schemaFilename = "%s/person%d.html" % (outputDir,i)
i = i + 1
with open(mailFilename, 'r') as mailFile, open(schemaFilename, 'w') as schemaFile:
make_person_schema(email.message_from_file(mailFile), schemaFile)
def main():
parser = argparse.ArgumentParser(description='Mail to schema')
parser.add_argument('-d', required=True, help='Directory containing mail files (.eml).')
parser.add_argument('-o', required=True, help='Output directory for the schemas.')
args = parser.parse_args()
if not os.path.isdir(args.d):
print('%s is not a directory (option -d).', args.d)
elif not os.path.isdir(args.o):
print('%s is not a directory (option -o).', args.o)
else:
mails2schema(args.d, args.o)
if __name__ == "__main__":
main()
|
Add a simple mail extractor of eml files. Convert the email to schema format.
|
Add a simple mail extractor of eml files. Convert the email to schema format.
|
Python
|
mit
|
ptal/people-link
|
Add a simple mail extractor of eml files. Convert the email to schema format.
|
import email
import sys
import os
import argparse
import textwrap
def make_person_schema(msg, schemaFile):
schema = """\
<div itemscope itemtype="http://schema.org/Person">
<span itemprop="email">%s</span>
</div>""" % msg['from']
schemaFile.write(textwrap.dedent(schema))
def mails2schema(mailDir, outputDir):
i = 0
for mail in os.listdir(mailDir):
mailFilename = mailDir + "/" + mail
if(os.path.isfile(mailFilename)):
schemaFilename = "%s/person%d.html" % (outputDir,i)
i = i + 1
with open(mailFilename, 'r') as mailFile, open(schemaFilename, 'w') as schemaFile:
make_person_schema(email.message_from_file(mailFile), schemaFile)
def main():
parser = argparse.ArgumentParser(description='Mail to schema')
parser.add_argument('-d', required=True, help='Directory containing mail files (.eml).')
parser.add_argument('-o', required=True, help='Output directory for the schemas.')
args = parser.parse_args()
if not os.path.isdir(args.d):
print('%s is not a directory (option -d).', args.d)
elif not os.path.isdir(args.o):
print('%s is not a directory (option -o).', args.o)
else:
mails2schema(args.d, args.o)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a simple mail extractor of eml files. Convert the email to schema format.<commit_after>
|
import email
import sys
import os
import argparse
import textwrap
def make_person_schema(msg, schemaFile):
schema = """\
<div itemscope itemtype="http://schema.org/Person">
<span itemprop="email">%s</span>
</div>""" % msg['from']
schemaFile.write(textwrap.dedent(schema))
def mails2schema(mailDir, outputDir):
i = 0
for mail in os.listdir(mailDir):
mailFilename = mailDir + "/" + mail
if(os.path.isfile(mailFilename)):
schemaFilename = "%s/person%d.html" % (outputDir,i)
i = i + 1
with open(mailFilename, 'r') as mailFile, open(schemaFilename, 'w') as schemaFile:
make_person_schema(email.message_from_file(mailFile), schemaFile)
def main():
parser = argparse.ArgumentParser(description='Mail to schema')
parser.add_argument('-d', required=True, help='Directory containing mail files (.eml).')
parser.add_argument('-o', required=True, help='Output directory for the schemas.')
args = parser.parse_args()
if not os.path.isdir(args.d):
print('%s is not a directory (option -d).', args.d)
elif not os.path.isdir(args.o):
print('%s is not a directory (option -o).', args.o)
else:
mails2schema(args.d, args.o)
if __name__ == "__main__":
main()
|
Add a simple mail extractor of eml files. Convert the email to schema format.import email
import sys
import os
import argparse
import textwrap
def make_person_schema(msg, schemaFile):
schema = """\
<div itemscope itemtype="http://schema.org/Person">
<span itemprop="email">%s</span>
</div>""" % msg['from']
schemaFile.write(textwrap.dedent(schema))
def mails2schema(mailDir, outputDir):
i = 0
for mail in os.listdir(mailDir):
mailFilename = mailDir + "/" + mail
if(os.path.isfile(mailFilename)):
schemaFilename = "%s/person%d.html" % (outputDir,i)
i = i + 1
with open(mailFilename, 'r') as mailFile, open(schemaFilename, 'w') as schemaFile:
make_person_schema(email.message_from_file(mailFile), schemaFile)
def main():
parser = argparse.ArgumentParser(description='Mail to schema')
parser.add_argument('-d', required=True, help='Directory containing mail files (.eml).')
parser.add_argument('-o', required=True, help='Output directory for the schemas.')
args = parser.parse_args()
if not os.path.isdir(args.d):
print('%s is not a directory (option -d).', args.d)
elif not os.path.isdir(args.o):
print('%s is not a directory (option -o).', args.o)
else:
mails2schema(args.d, args.o)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a simple mail extractor of eml files. Convert the email to schema format.<commit_after>import email
import sys
import os
import argparse
import textwrap
def make_person_schema(msg, schemaFile):
schema = """\
<div itemscope itemtype="http://schema.org/Person">
<span itemprop="email">%s</span>
</div>""" % msg['from']
schemaFile.write(textwrap.dedent(schema))
def mails2schema(mailDir, outputDir):
i = 0
for mail in os.listdir(mailDir):
mailFilename = mailDir + "/" + mail
if(os.path.isfile(mailFilename)):
schemaFilename = "%s/person%d.html" % (outputDir,i)
i = i + 1
with open(mailFilename, 'r') as mailFile, open(schemaFilename, 'w') as schemaFile:
make_person_schema(email.message_from_file(mailFile), schemaFile)
def main():
parser = argparse.ArgumentParser(description='Mail to schema')
parser.add_argument('-d', required=True, help='Directory containing mail files (.eml).')
parser.add_argument('-o', required=True, help='Output directory for the schemas.')
args = parser.parse_args()
if not os.path.isdir(args.d):
print('%s is not a directory (option -d).', args.d)
elif not os.path.isdir(args.o):
print('%s is not a directory (option -o).', args.o)
else:
mails2schema(args.d, args.o)
if __name__ == "__main__":
main()
|
|
41bc62e8cd9e52f443670f326dad84b39d4d9ca1
|
tests/test_managers/test_experiment_job.py
|
tests/test_managers/test_experiment_job.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from unittest import TestCase
from polyaxon_cli.managers.experiment_job import ExperimentJobManager
from polyaxon_schemas.experiment import ExperimentJobConfig
class TestJobManager(TestCase):
def test_default_props(self):
assert ExperimentJobManager.IS_GLOBAL is False
assert ExperimentJobManager.IS_POLYAXON_DIR is True
assert ExperimentJobManager.CONFIG_FILE_NAME == '.polyaxonjob'
assert ExperimentJobManager.CONFIG == ExperimentJobConfig
|
Add experiment job manager tests
|
Add experiment job manager tests
|
Python
|
apache-2.0
|
polyaxon/polyaxon,polyaxon/polyaxon,polyaxon/polyaxon
|
Add experiment job manager tests
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from unittest import TestCase
from polyaxon_cli.managers.experiment_job import ExperimentJobManager
from polyaxon_schemas.experiment import ExperimentJobConfig
class TestJobManager(TestCase):
def test_default_props(self):
assert ExperimentJobManager.IS_GLOBAL is False
assert ExperimentJobManager.IS_POLYAXON_DIR is True
assert ExperimentJobManager.CONFIG_FILE_NAME == '.polyaxonjob'
assert ExperimentJobManager.CONFIG == ExperimentJobConfig
|
<commit_before><commit_msg>Add experiment job manager tests<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from unittest import TestCase
from polyaxon_cli.managers.experiment_job import ExperimentJobManager
from polyaxon_schemas.experiment import ExperimentJobConfig
class TestJobManager(TestCase):
def test_default_props(self):
assert ExperimentJobManager.IS_GLOBAL is False
assert ExperimentJobManager.IS_POLYAXON_DIR is True
assert ExperimentJobManager.CONFIG_FILE_NAME == '.polyaxonjob'
assert ExperimentJobManager.CONFIG == ExperimentJobConfig
|
Add experiment job manager tests# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from unittest import TestCase
from polyaxon_cli.managers.experiment_job import ExperimentJobManager
from polyaxon_schemas.experiment import ExperimentJobConfig
class TestJobManager(TestCase):
def test_default_props(self):
assert ExperimentJobManager.IS_GLOBAL is False
assert ExperimentJobManager.IS_POLYAXON_DIR is True
assert ExperimentJobManager.CONFIG_FILE_NAME == '.polyaxonjob'
assert ExperimentJobManager.CONFIG == ExperimentJobConfig
|
<commit_before><commit_msg>Add experiment job manager tests<commit_after># -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from unittest import TestCase
from polyaxon_cli.managers.experiment_job import ExperimentJobManager
from polyaxon_schemas.experiment import ExperimentJobConfig
class TestJobManager(TestCase):
def test_default_props(self):
assert ExperimentJobManager.IS_GLOBAL is False
assert ExperimentJobManager.IS_POLYAXON_DIR is True
assert ExperimentJobManager.CONFIG_FILE_NAME == '.polyaxonjob'
assert ExperimentJobManager.CONFIG == ExperimentJobConfig
|
|
caaec1dd39b10ceae04736fd2fb1278d6f74d7cd
|
tests/testnet/aio/test_jsonrpc.py
|
tests/testnet/aio/test_jsonrpc.py
|
# -*- coding: utf-8 -*-
import asyncio
import pytest
import logging
from bitshares.aio.asset import Asset
logger = logging.getLogger("websockets")
logger.setLevel(logging.DEBUG)
log = logging.getLogger("grapheneapi")
log.setLevel(logging.DEBUG)
@pytest.mark.asyncio
async def test_parallel_queries(event_loop, bitshares, assets):
""" When performing multiple calls at once from different coroutines, responses should correctly match with queries
"""
async def get_asset(asset):
a = await Asset(asset, blockchain_instance=bitshares)
assert a["symbol"] == asset
async def get_info():
await bitshares.info()
for _ in range(0, 40):
tasks = []
tasks.append(asyncio.ensure_future(get_asset("USD")))
tasks.append(asyncio.ensure_future(get_asset("GOLD")))
tasks.append(asyncio.ensure_future(get_info()))
await asyncio.gather(*tasks)
|
Add test for parallel queries
|
Add test for parallel queries
|
Python
|
mit
|
xeroc/python-bitshares
|
Add test for parallel queries
|
# -*- coding: utf-8 -*-
import asyncio
import pytest
import logging
from bitshares.aio.asset import Asset
logger = logging.getLogger("websockets")
logger.setLevel(logging.DEBUG)
log = logging.getLogger("grapheneapi")
log.setLevel(logging.DEBUG)
@pytest.mark.asyncio
async def test_parallel_queries(event_loop, bitshares, assets):
""" When performing multiple calls at once from different coroutines, responses should correctly match with queries
"""
async def get_asset(asset):
a = await Asset(asset, blockchain_instance=bitshares)
assert a["symbol"] == asset
async def get_info():
await bitshares.info()
for _ in range(0, 40):
tasks = []
tasks.append(asyncio.ensure_future(get_asset("USD")))
tasks.append(asyncio.ensure_future(get_asset("GOLD")))
tasks.append(asyncio.ensure_future(get_info()))
await asyncio.gather(*tasks)
|
<commit_before><commit_msg>Add test for parallel queries<commit_after>
|
# -*- coding: utf-8 -*-
import asyncio
import pytest
import logging
from bitshares.aio.asset import Asset
logger = logging.getLogger("websockets")
logger.setLevel(logging.DEBUG)
log = logging.getLogger("grapheneapi")
log.setLevel(logging.DEBUG)
@pytest.mark.asyncio
async def test_parallel_queries(event_loop, bitshares, assets):
""" When performing multiple calls at once from different coroutines, responses should correctly match with queries
"""
async def get_asset(asset):
a = await Asset(asset, blockchain_instance=bitshares)
assert a["symbol"] == asset
async def get_info():
await bitshares.info()
for _ in range(0, 40):
tasks = []
tasks.append(asyncio.ensure_future(get_asset("USD")))
tasks.append(asyncio.ensure_future(get_asset("GOLD")))
tasks.append(asyncio.ensure_future(get_info()))
await asyncio.gather(*tasks)
|
Add test for parallel queries# -*- coding: utf-8 -*-
import asyncio
import pytest
import logging
from bitshares.aio.asset import Asset
logger = logging.getLogger("websockets")
logger.setLevel(logging.DEBUG)
log = logging.getLogger("grapheneapi")
log.setLevel(logging.DEBUG)
@pytest.mark.asyncio
async def test_parallel_queries(event_loop, bitshares, assets):
""" When performing multiple calls at once from different coroutines, responses should correctly match with queries
"""
async def get_asset(asset):
a = await Asset(asset, blockchain_instance=bitshares)
assert a["symbol"] == asset
async def get_info():
await bitshares.info()
for _ in range(0, 40):
tasks = []
tasks.append(asyncio.ensure_future(get_asset("USD")))
tasks.append(asyncio.ensure_future(get_asset("GOLD")))
tasks.append(asyncio.ensure_future(get_info()))
await asyncio.gather(*tasks)
|
<commit_before><commit_msg>Add test for parallel queries<commit_after># -*- coding: utf-8 -*-
import asyncio
import pytest
import logging
from bitshares.aio.asset import Asset
logger = logging.getLogger("websockets")
logger.setLevel(logging.DEBUG)
log = logging.getLogger("grapheneapi")
log.setLevel(logging.DEBUG)
@pytest.mark.asyncio
async def test_parallel_queries(event_loop, bitshares, assets):
""" When performing multiple calls at once from different coroutines, responses should correctly match with queries
"""
async def get_asset(asset):
a = await Asset(asset, blockchain_instance=bitshares)
assert a["symbol"] == asset
async def get_info():
await bitshares.info()
for _ in range(0, 40):
tasks = []
tasks.append(asyncio.ensure_future(get_asset("USD")))
tasks.append(asyncio.ensure_future(get_asset("GOLD")))
tasks.append(asyncio.ensure_future(get_info()))
await asyncio.gather(*tasks)
|
|
8f341dcb684e852b7a920ee930f4714e83bb180f
|
test_radix_sort.py
|
test_radix_sort.py
|
# -*- coding: utf-8 -*-
from radix_sort import int_radix, str_radix
def test_sorted():
my_list = list(range(100))
new_list = int_radix(my_list)
assert new_list == list(range(100))
def test_reverse():
my_list = list(range(100))[::-1]
new_list = int_radix(my_list)
assert new_list == list(range(100))
def test_empty():
my_list = []
new_list = str_radix(my_list)
assert new_list == []
def test_abc():
my_list = ['a', 'b', 'c', 'd', 'e']
new_list = str_radix(my_list)
assert new_list == ['a', 'b', 'c', 'd', 'e']
my_list = ['e', 'd', 'c', 'b', 'a']
new_list = str_radix(my_list)
assert new_list == ['a', 'b', 'c', 'd', 'e']
def test_words():
my_list = ['apple', 'berry', 'candle', 'deck', 'equal']
new_list = str_radix(my_list)
assert new_list == ['apple', 'berry', 'candle', 'deck', 'equal']
my_list = ['equal', 'deck', 'candle', 'berry', 'apple']
new_list = str_radix(my_list)
assert new_list == ['apple', 'berry', 'candle', 'deck', 'equal']
def test_mixed_case():
my_list = ['doG', 'Apple', 'aPPle', 'DOG', 'anVIL', 'applE']
new_list = str_radix(my_list)
assert new_list == ['anVIL', 'Apple', 'aPPle', 'applE', 'doG', 'DOG']
def test_duplicate():
my_list = [1, 2, 2, 5, 3]
new_list = int_radix(my_list)
assert new_list == [1, 2, 2, 3, 5]
|
Add tests for Radix sort
|
Add tests for Radix sort
|
Python
|
mit
|
nbeck90/data_structures_2
|
Add tests for Radix sort
|
# -*- coding: utf-8 -*-
from radix_sort import int_radix, str_radix
def test_sorted():
my_list = list(range(100))
new_list = int_radix(my_list)
assert new_list == list(range(100))
def test_reverse():
my_list = list(range(100))[::-1]
new_list = int_radix(my_list)
assert new_list == list(range(100))
def test_empty():
my_list = []
new_list = str_radix(my_list)
assert new_list == []
def test_abc():
my_list = ['a', 'b', 'c', 'd', 'e']
new_list = str_radix(my_list)
assert new_list == ['a', 'b', 'c', 'd', 'e']
my_list = ['e', 'd', 'c', 'b', 'a']
new_list = str_radix(my_list)
assert new_list == ['a', 'b', 'c', 'd', 'e']
def test_words():
my_list = ['apple', 'berry', 'candle', 'deck', 'equal']
new_list = str_radix(my_list)
assert new_list == ['apple', 'berry', 'candle', 'deck', 'equal']
my_list = ['equal', 'deck', 'candle', 'berry', 'apple']
new_list = str_radix(my_list)
assert new_list == ['apple', 'berry', 'candle', 'deck', 'equal']
def test_mixed_case():
my_list = ['doG', 'Apple', 'aPPle', 'DOG', 'anVIL', 'applE']
new_list = str_radix(my_list)
assert new_list == ['anVIL', 'Apple', 'aPPle', 'applE', 'doG', 'DOG']
def test_duplicate():
my_list = [1, 2, 2, 5, 3]
new_list = int_radix(my_list)
assert new_list == [1, 2, 2, 3, 5]
|
<commit_before><commit_msg>Add tests for Radix sort<commit_after>
|
# -*- coding: utf-8 -*-
from radix_sort import int_radix, str_radix
def test_sorted():
my_list = list(range(100))
new_list = int_radix(my_list)
assert new_list == list(range(100))
def test_reverse():
my_list = list(range(100))[::-1]
new_list = int_radix(my_list)
assert new_list == list(range(100))
def test_empty():
my_list = []
new_list = str_radix(my_list)
assert new_list == []
def test_abc():
my_list = ['a', 'b', 'c', 'd', 'e']
new_list = str_radix(my_list)
assert new_list == ['a', 'b', 'c', 'd', 'e']
my_list = ['e', 'd', 'c', 'b', 'a']
new_list = str_radix(my_list)
assert new_list == ['a', 'b', 'c', 'd', 'e']
def test_words():
my_list = ['apple', 'berry', 'candle', 'deck', 'equal']
new_list = str_radix(my_list)
assert new_list == ['apple', 'berry', 'candle', 'deck', 'equal']
my_list = ['equal', 'deck', 'candle', 'berry', 'apple']
new_list = str_radix(my_list)
assert new_list == ['apple', 'berry', 'candle', 'deck', 'equal']
def test_mixed_case():
my_list = ['doG', 'Apple', 'aPPle', 'DOG', 'anVIL', 'applE']
new_list = str_radix(my_list)
assert new_list == ['anVIL', 'Apple', 'aPPle', 'applE', 'doG', 'DOG']
def test_duplicate():
my_list = [1, 2, 2, 5, 3]
new_list = int_radix(my_list)
assert new_list == [1, 2, 2, 3, 5]
|
Add tests for Radix sort# -*- coding: utf-8 -*-
from radix_sort import int_radix, str_radix
def test_sorted():
my_list = list(range(100))
new_list = int_radix(my_list)
assert new_list == list(range(100))
def test_reverse():
my_list = list(range(100))[::-1]
new_list = int_radix(my_list)
assert new_list == list(range(100))
def test_empty():
my_list = []
new_list = str_radix(my_list)
assert new_list == []
def test_abc():
my_list = ['a', 'b', 'c', 'd', 'e']
new_list = str_radix(my_list)
assert new_list == ['a', 'b', 'c', 'd', 'e']
my_list = ['e', 'd', 'c', 'b', 'a']
new_list = str_radix(my_list)
assert new_list == ['a', 'b', 'c', 'd', 'e']
def test_words():
my_list = ['apple', 'berry', 'candle', 'deck', 'equal']
new_list = str_radix(my_list)
assert new_list == ['apple', 'berry', 'candle', 'deck', 'equal']
my_list = ['equal', 'deck', 'candle', 'berry', 'apple']
new_list = str_radix(my_list)
assert new_list == ['apple', 'berry', 'candle', 'deck', 'equal']
def test_mixed_case():
my_list = ['doG', 'Apple', 'aPPle', 'DOG', 'anVIL', 'applE']
new_list = str_radix(my_list)
assert new_list == ['anVIL', 'Apple', 'aPPle', 'applE', 'doG', 'DOG']
def test_duplicate():
my_list = [1, 2, 2, 5, 3]
new_list = int_radix(my_list)
assert new_list == [1, 2, 2, 3, 5]
|
<commit_before><commit_msg>Add tests for Radix sort<commit_after># -*- coding: utf-8 -*-
from radix_sort import int_radix, str_radix
def test_sorted():
my_list = list(range(100))
new_list = int_radix(my_list)
assert new_list == list(range(100))
def test_reverse():
my_list = list(range(100))[::-1]
new_list = int_radix(my_list)
assert new_list == list(range(100))
def test_empty():
my_list = []
new_list = str_radix(my_list)
assert new_list == []
def test_abc():
my_list = ['a', 'b', 'c', 'd', 'e']
new_list = str_radix(my_list)
assert new_list == ['a', 'b', 'c', 'd', 'e']
my_list = ['e', 'd', 'c', 'b', 'a']
new_list = str_radix(my_list)
assert new_list == ['a', 'b', 'c', 'd', 'e']
def test_words():
my_list = ['apple', 'berry', 'candle', 'deck', 'equal']
new_list = str_radix(my_list)
assert new_list == ['apple', 'berry', 'candle', 'deck', 'equal']
my_list = ['equal', 'deck', 'candle', 'berry', 'apple']
new_list = str_radix(my_list)
assert new_list == ['apple', 'berry', 'candle', 'deck', 'equal']
def test_mixed_case():
my_list = ['doG', 'Apple', 'aPPle', 'DOG', 'anVIL', 'applE']
new_list = str_radix(my_list)
assert new_list == ['anVIL', 'Apple', 'aPPle', 'applE', 'doG', 'DOG']
def test_duplicate():
my_list = [1, 2, 2, 5, 3]
new_list = int_radix(my_list)
assert new_list == [1, 2, 2, 3, 5]
|
|
3fd7cda9be34dd0bbf884aae8012096d3962fad3
|
tests/test_web_urldispatcher.py
|
tests/test_web_urldispatcher.py
|
import pytest
import tempfile
import aiohttp
from aiohttp import web
import os
import shutil
import asyncio
SERVER_HOST = '127.0.0.1'
SERVER_PORT = 8080
# Timeout in seconds for an asynchronous test:
ASYNC_TEST_TIMEOUT = 1
class ExceptAsyncTestTimeout(Exception): pass
def run_timeout(cor,loop,timeout=ASYNC_TEST_TIMEOUT):
"""
Run a given coroutine with timeout.
"""
task_with_timeout = asyncio.wait_for(cor,timeout,loop=loop)
try:
return loop.run_until_complete(task_with_timeout)
except asyncio.futures.TimeoutError:
# Timeout:
raise ExceptAsyncTestTimeout()
@pytest.fixture(scope='function')
def tloop(request):
"""
Obtain a test loop. We want each test case to have its own loop.
"""
# Create a new test loop:
tloop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def teardown():
# Close the test loop:
tloop.close()
request.addfinalizer(teardown)
return tloop
@pytest.fixture(scope='function')
def tmp_dir_path(request):
"""
Give a path for a temporary directory
The directory is destroyed at the end of the test.
"""
# Temporary directory.
tmp_dir = tempfile.mkdtemp()
def teardown():
# Delete the whole directory:
shutil.rmtree(tmp_dir)
request.addfinalizer(teardown)
return tmp_dir
def test_access_root_of_static_handler(tloop,tmp_dir_path):
"""
Tests the operation of static file server.
Try to access the root of static file server, and make
sure that a proper not found error is returned.
"""
# Put a file inside tmp_dir_path:
my_file_path = os.path.join(tmp_dir_path,'my_file')
with open(my_file_path,'w') as fw:
fw.write('hello')
asyncio.set_event_loop(None)
app = web.Application(loop=tloop)
# Register global static route:
app.router.add_static('/', tmp_dir_path)
@asyncio.coroutine
def inner_cor():
handler = app.make_handler()
srv = yield from tloop.create_server(handler,\
SERVER_HOST,SERVER_PORT ,reuse_address=True)
# Request the root of the static directory.
# Expect an 404 error page.
url = 'http://{}:{}/'.format(\
SERVER_HOST,SERVER_PORT)
r = ( yield from aiohttp.get(url,loop=tloop) )
assert r.status == 404
# data = (yield from r.read())
yield from r.release()
srv.close()
yield from srv.wait_closed()
yield from app.shutdown()
yield from handler.finish_connections(10.0)
yield from app.cleanup()
run_timeout(inner_cor(),tloop,timeout=5)
|
Test for accessing the root of a statically served dir.
|
Test for accessing the root of a statically served dir.
|
Python
|
apache-2.0
|
z2v/aiohttp,esaezgil/aiohttp,AraHaanOrg/aiohttp,elastic-coders/aiohttp,moden-py/aiohttp,vaskalas/aiohttp,panda73111/aiohttp,jashandeep-sohi/aiohttp,elastic-coders/aiohttp,hellysmile/aiohttp,juliatem/aiohttp,panda73111/aiohttp,moden-py/aiohttp,esaezgil/aiohttp,jashandeep-sohi/aiohttp,KeepSafe/aiohttp,singulared/aiohttp,z2v/aiohttp,vaskalas/aiohttp,z2v/aiohttp,vaskalas/aiohttp,elastic-coders/aiohttp,playpauseandstop/aiohttp,singulared/aiohttp,mind1master/aiohttp,panda73111/aiohttp,moden-py/aiohttp,AraHaanOrg/aiohttp,mind1master/aiohttp,jashandeep-sohi/aiohttp,rutsky/aiohttp,arthurdarcet/aiohttp,KeepSafe/aiohttp,KeepSafe/aiohttp,arthurdarcet/aiohttp,arthurdarcet/aiohttp,esaezgil/aiohttp,pfreixes/aiohttp,jettify/aiohttp,pfreixes/aiohttp,singulared/aiohttp,Eyepea/aiohttp,rutsky/aiohttp,alex-eri/aiohttp-1,juliatem/aiohttp,rutsky/aiohttp,alex-eri/aiohttp-1,hellysmile/aiohttp,mind1master/aiohttp,alex-eri/aiohttp-1,jettify/aiohttp,jettify/aiohttp
|
Test for accessing the root of a statically served dir.
|
import pytest
import tempfile
import aiohttp
from aiohttp import web
import os
import shutil
import asyncio
SERVER_HOST = '127.0.0.1'
SERVER_PORT = 8080
# Timeout in seconds for an asynchronous test:
ASYNC_TEST_TIMEOUT = 1
class ExceptAsyncTestTimeout(Exception): pass
def run_timeout(cor,loop,timeout=ASYNC_TEST_TIMEOUT):
"""
Run a given coroutine with timeout.
"""
task_with_timeout = asyncio.wait_for(cor,timeout,loop=loop)
try:
return loop.run_until_complete(task_with_timeout)
except asyncio.futures.TimeoutError:
# Timeout:
raise ExceptAsyncTestTimeout()
@pytest.fixture(scope='function')
def tloop(request):
"""
Obtain a test loop. We want each test case to have its own loop.
"""
# Create a new test loop:
tloop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def teardown():
# Close the test loop:
tloop.close()
request.addfinalizer(teardown)
return tloop
@pytest.fixture(scope='function')
def tmp_dir_path(request):
"""
Give a path for a temporary directory
The directory is destroyed at the end of the test.
"""
# Temporary directory.
tmp_dir = tempfile.mkdtemp()
def teardown():
# Delete the whole directory:
shutil.rmtree(tmp_dir)
request.addfinalizer(teardown)
return tmp_dir
def test_access_root_of_static_handler(tloop,tmp_dir_path):
"""
Tests the operation of static file server.
Try to access the root of static file server, and make
sure that a proper not found error is returned.
"""
# Put a file inside tmp_dir_path:
my_file_path = os.path.join(tmp_dir_path,'my_file')
with open(my_file_path,'w') as fw:
fw.write('hello')
asyncio.set_event_loop(None)
app = web.Application(loop=tloop)
# Register global static route:
app.router.add_static('/', tmp_dir_path)
@asyncio.coroutine
def inner_cor():
handler = app.make_handler()
srv = yield from tloop.create_server(handler,\
SERVER_HOST,SERVER_PORT ,reuse_address=True)
# Request the root of the static directory.
# Expect an 404 error page.
url = 'http://{}:{}/'.format(\
SERVER_HOST,SERVER_PORT)
r = ( yield from aiohttp.get(url,loop=tloop) )
assert r.status == 404
# data = (yield from r.read())
yield from r.release()
srv.close()
yield from srv.wait_closed()
yield from app.shutdown()
yield from handler.finish_connections(10.0)
yield from app.cleanup()
run_timeout(inner_cor(),tloop,timeout=5)
|
<commit_before><commit_msg>Test for accessing the root of a statically served dir.<commit_after>
|
import pytest
import tempfile
import aiohttp
from aiohttp import web
import os
import shutil
import asyncio
SERVER_HOST = '127.0.0.1'
SERVER_PORT = 8080
# Timeout in seconds for an asynchronous test:
ASYNC_TEST_TIMEOUT = 1
class ExceptAsyncTestTimeout(Exception): pass
def run_timeout(cor,loop,timeout=ASYNC_TEST_TIMEOUT):
"""
Run a given coroutine with timeout.
"""
task_with_timeout = asyncio.wait_for(cor,timeout,loop=loop)
try:
return loop.run_until_complete(task_with_timeout)
except asyncio.futures.TimeoutError:
# Timeout:
raise ExceptAsyncTestTimeout()
@pytest.fixture(scope='function')
def tloop(request):
"""
Obtain a test loop. We want each test case to have its own loop.
"""
# Create a new test loop:
tloop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def teardown():
# Close the test loop:
tloop.close()
request.addfinalizer(teardown)
return tloop
@pytest.fixture(scope='function')
def tmp_dir_path(request):
"""
Give a path for a temporary directory
The directory is destroyed at the end of the test.
"""
# Temporary directory.
tmp_dir = tempfile.mkdtemp()
def teardown():
# Delete the whole directory:
shutil.rmtree(tmp_dir)
request.addfinalizer(teardown)
return tmp_dir
def test_access_root_of_static_handler(tloop,tmp_dir_path):
"""
Tests the operation of static file server.
Try to access the root of static file server, and make
sure that a proper not found error is returned.
"""
# Put a file inside tmp_dir_path:
my_file_path = os.path.join(tmp_dir_path,'my_file')
with open(my_file_path,'w') as fw:
fw.write('hello')
asyncio.set_event_loop(None)
app = web.Application(loop=tloop)
# Register global static route:
app.router.add_static('/', tmp_dir_path)
@asyncio.coroutine
def inner_cor():
handler = app.make_handler()
srv = yield from tloop.create_server(handler,\
SERVER_HOST,SERVER_PORT ,reuse_address=True)
# Request the root of the static directory.
# Expect an 404 error page.
url = 'http://{}:{}/'.format(\
SERVER_HOST,SERVER_PORT)
r = ( yield from aiohttp.get(url,loop=tloop) )
assert r.status == 404
# data = (yield from r.read())
yield from r.release()
srv.close()
yield from srv.wait_closed()
yield from app.shutdown()
yield from handler.finish_connections(10.0)
yield from app.cleanup()
run_timeout(inner_cor(),tloop,timeout=5)
|
Test for accessing the root of a statically served dir.import pytest
import tempfile
import aiohttp
from aiohttp import web
import os
import shutil
import asyncio
SERVER_HOST = '127.0.0.1'
SERVER_PORT = 8080
# Timeout in seconds for an asynchronous test:
ASYNC_TEST_TIMEOUT = 1
class ExceptAsyncTestTimeout(Exception): pass
def run_timeout(cor,loop,timeout=ASYNC_TEST_TIMEOUT):
"""
Run a given coroutine with timeout.
"""
task_with_timeout = asyncio.wait_for(cor,timeout,loop=loop)
try:
return loop.run_until_complete(task_with_timeout)
except asyncio.futures.TimeoutError:
# Timeout:
raise ExceptAsyncTestTimeout()
@pytest.fixture(scope='function')
def tloop(request):
"""
Obtain a test loop. We want each test case to have its own loop.
"""
# Create a new test loop:
tloop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def teardown():
# Close the test loop:
tloop.close()
request.addfinalizer(teardown)
return tloop
@pytest.fixture(scope='function')
def tmp_dir_path(request):
"""
Give a path for a temporary directory
The directory is destroyed at the end of the test.
"""
# Temporary directory.
tmp_dir = tempfile.mkdtemp()
def teardown():
# Delete the whole directory:
shutil.rmtree(tmp_dir)
request.addfinalizer(teardown)
return tmp_dir
def test_access_root_of_static_handler(tloop,tmp_dir_path):
"""
Tests the operation of static file server.
Try to access the root of static file server, and make
sure that a proper not found error is returned.
"""
# Put a file inside tmp_dir_path:
my_file_path = os.path.join(tmp_dir_path,'my_file')
with open(my_file_path,'w') as fw:
fw.write('hello')
asyncio.set_event_loop(None)
app = web.Application(loop=tloop)
# Register global static route:
app.router.add_static('/', tmp_dir_path)
@asyncio.coroutine
def inner_cor():
handler = app.make_handler()
srv = yield from tloop.create_server(handler,\
SERVER_HOST,SERVER_PORT ,reuse_address=True)
# Request the root of the static directory.
# Expect an 404 error page.
url = 'http://{}:{}/'.format(\
SERVER_HOST,SERVER_PORT)
r = ( yield from aiohttp.get(url,loop=tloop) )
assert r.status == 404
# data = (yield from r.read())
yield from r.release()
srv.close()
yield from srv.wait_closed()
yield from app.shutdown()
yield from handler.finish_connections(10.0)
yield from app.cleanup()
run_timeout(inner_cor(),tloop,timeout=5)
|
<commit_before><commit_msg>Test for accessing the root of a statically served dir.<commit_after>import pytest
import tempfile
import aiohttp
from aiohttp import web
import os
import shutil
import asyncio
SERVER_HOST = '127.0.0.1'
SERVER_PORT = 8080
# Timeout in seconds for an asynchronous test:
ASYNC_TEST_TIMEOUT = 1
class ExceptAsyncTestTimeout(Exception): pass
def run_timeout(cor,loop,timeout=ASYNC_TEST_TIMEOUT):
"""
Run a given coroutine with timeout.
"""
task_with_timeout = asyncio.wait_for(cor,timeout,loop=loop)
try:
return loop.run_until_complete(task_with_timeout)
except asyncio.futures.TimeoutError:
# Timeout:
raise ExceptAsyncTestTimeout()
@pytest.fixture(scope='function')
def tloop(request):
"""
Obtain a test loop. We want each test case to have its own loop.
"""
# Create a new test loop:
tloop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def teardown():
# Close the test loop:
tloop.close()
request.addfinalizer(teardown)
return tloop
@pytest.fixture(scope='function')
def tmp_dir_path(request):
"""
Give a path for a temporary directory
The directory is destroyed at the end of the test.
"""
# Temporary directory.
tmp_dir = tempfile.mkdtemp()
def teardown():
# Delete the whole directory:
shutil.rmtree(tmp_dir)
request.addfinalizer(teardown)
return tmp_dir
def test_access_root_of_static_handler(tloop,tmp_dir_path):
"""
Tests the operation of static file server.
Try to access the root of static file server, and make
sure that a proper not found error is returned.
"""
# Put a file inside tmp_dir_path:
my_file_path = os.path.join(tmp_dir_path,'my_file')
with open(my_file_path,'w') as fw:
fw.write('hello')
asyncio.set_event_loop(None)
app = web.Application(loop=tloop)
# Register global static route:
app.router.add_static('/', tmp_dir_path)
@asyncio.coroutine
def inner_cor():
handler = app.make_handler()
srv = yield from tloop.create_server(handler,\
SERVER_HOST,SERVER_PORT ,reuse_address=True)
# Request the root of the static directory.
# Expect an 404 error page.
url = 'http://{}:{}/'.format(\
SERVER_HOST,SERVER_PORT)
r = ( yield from aiohttp.get(url,loop=tloop) )
assert r.status == 404
# data = (yield from r.read())
yield from r.release()
srv.close()
yield from srv.wait_closed()
yield from app.shutdown()
yield from handler.finish_connections(10.0)
yield from app.cleanup()
run_timeout(inner_cor(),tloop,timeout=5)
|
|
62f6f127a8b74daa74d38850d9b9d9346c7b1144
|
gpmcc/tests/inference/test_simulate.py
|
gpmcc/tests/inference/test_simulate.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2015, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import pylab
import gpmcc.utils.sampling as su
import gpmcc.utils.general as gu
def test_predictive_draw(state, N=None):
if state.n_cols != 2:
print("state must have exactly 2 columns")
return
if N is None:
N = state.n_rows
view_1 = state.Zv[0]
view_2 = state.Zv[1]
if view_1 != view_2:
print("Columns not in same view")
return
log_crp = su.get_cluster_crps(state, 0)
K = len(log_crp)
X = np.zeros(N)
Y = np.zeros(N)
clusters_col_1 = su.create_cluster_set(state, 0)
clusters_col_2 = su.create_cluster_set(state, 1)
for i in range(N):
c = gu.log_pflip(log_crp)
x = clusters_col_1[c].predictive_draw()
y = clusters_col_2[c].predictive_draw()
X[i] = x
Y[i] = y
pylab.scatter(X,Y, color='red', label='inferred')
pylab.scatter(state.dims[0].X, state.dims[1].X, color='blue', label='actual')
pylab.show()
|
Create separate file for simulate.
|
Create separate file for simulate.
|
Python
|
apache-2.0
|
probcomp/cgpm,probcomp/cgpm
|
Create separate file for simulate.
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2015, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import pylab
import gpmcc.utils.sampling as su
import gpmcc.utils.general as gu
def test_predictive_draw(state, N=None):
if state.n_cols != 2:
print("state must have exactly 2 columns")
return
if N is None:
N = state.n_rows
view_1 = state.Zv[0]
view_2 = state.Zv[1]
if view_1 != view_2:
print("Columns not in same view")
return
log_crp = su.get_cluster_crps(state, 0)
K = len(log_crp)
X = np.zeros(N)
Y = np.zeros(N)
clusters_col_1 = su.create_cluster_set(state, 0)
clusters_col_2 = su.create_cluster_set(state, 1)
for i in range(N):
c = gu.log_pflip(log_crp)
x = clusters_col_1[c].predictive_draw()
y = clusters_col_2[c].predictive_draw()
X[i] = x
Y[i] = y
pylab.scatter(X,Y, color='red', label='inferred')
pylab.scatter(state.dims[0].X, state.dims[1].X, color='blue', label='actual')
pylab.show()
|
<commit_before><commit_msg>Create separate file for simulate.<commit_after>
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010-2015, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import pylab
import gpmcc.utils.sampling as su
import gpmcc.utils.general as gu
def test_predictive_draw(state, N=None):
if state.n_cols != 2:
print("state must have exactly 2 columns")
return
if N is None:
N = state.n_rows
view_1 = state.Zv[0]
view_2 = state.Zv[1]
if view_1 != view_2:
print("Columns not in same view")
return
log_crp = su.get_cluster_crps(state, 0)
K = len(log_crp)
X = np.zeros(N)
Y = np.zeros(N)
clusters_col_1 = su.create_cluster_set(state, 0)
clusters_col_2 = su.create_cluster_set(state, 1)
for i in range(N):
c = gu.log_pflip(log_crp)
x = clusters_col_1[c].predictive_draw()
y = clusters_col_2[c].predictive_draw()
X[i] = x
Y[i] = y
pylab.scatter(X,Y, color='red', label='inferred')
pylab.scatter(state.dims[0].X, state.dims[1].X, color='blue', label='actual')
pylab.show()
|
Create separate file for simulate.# -*- coding: utf-8 -*-
# Copyright (c) 2010-2015, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import pylab
import gpmcc.utils.sampling as su
import gpmcc.utils.general as gu
def test_predictive_draw(state, N=None):
if state.n_cols != 2:
print("state must have exactly 2 columns")
return
if N is None:
N = state.n_rows
view_1 = state.Zv[0]
view_2 = state.Zv[1]
if view_1 != view_2:
print("Columns not in same view")
return
log_crp = su.get_cluster_crps(state, 0)
K = len(log_crp)
X = np.zeros(N)
Y = np.zeros(N)
clusters_col_1 = su.create_cluster_set(state, 0)
clusters_col_2 = su.create_cluster_set(state, 1)
for i in range(N):
c = gu.log_pflip(log_crp)
x = clusters_col_1[c].predictive_draw()
y = clusters_col_2[c].predictive_draw()
X[i] = x
Y[i] = y
pylab.scatter(X,Y, color='red', label='inferred')
pylab.scatter(state.dims[0].X, state.dims[1].X, color='blue', label='actual')
pylab.show()
|
<commit_before><commit_msg>Create separate file for simulate.<commit_after># -*- coding: utf-8 -*-
# Copyright (c) 2010-2015, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import pylab
import gpmcc.utils.sampling as su
import gpmcc.utils.general as gu
def test_predictive_draw(state, N=None):
if state.n_cols != 2:
print("state must have exactly 2 columns")
return
if N is None:
N = state.n_rows
view_1 = state.Zv[0]
view_2 = state.Zv[1]
if view_1 != view_2:
print("Columns not in same view")
return
log_crp = su.get_cluster_crps(state, 0)
K = len(log_crp)
X = np.zeros(N)
Y = np.zeros(N)
clusters_col_1 = su.create_cluster_set(state, 0)
clusters_col_2 = su.create_cluster_set(state, 1)
for i in range(N):
c = gu.log_pflip(log_crp)
x = clusters_col_1[c].predictive_draw()
y = clusters_col_2[c].predictive_draw()
X[i] = x
Y[i] = y
pylab.scatter(X,Y, color='red', label='inferred')
pylab.scatter(state.dims[0].X, state.dims[1].X, color='blue', label='actual')
pylab.show()
|
|
fb61f99e97e0d6f630e9f17132a33e06e4a64d4c
|
plotpoints.py
|
plotpoints.py
|
# Quick hack to visualize non-gridified tSNE placed images
# Adapted from https://github.com/ml4a/ml4a-guides/blob/master/notebooks/image-tsne.ipynb
import json
import matplotlib.pyplot
from PIL import Image
with open('points.json') as json_file:
data = json.load(json_file)
arr = []
for tup in data:
point = tup['point']
arr.append([point[0], point[1], tup['path']])
width = 4000
height = 3000
max_dim = 100
full_image = Image.new('RGBA', (width, height))
for x, y, img in arr:
tile = Image.open(img)
rs = max(1, tile.width/max_dim, tile.height/max_dim)
tile = tile.resize((int(tile.width/rs), int(tile.height/rs)), Image.ANTIALIAS)
full_image.paste(tile, (int((width-max_dim)*x), int((height-max_dim)*y)), mask=tile.convert('RGBA'))
full_image.save("preview.png");
|
Add script for generating a preview image for non-gridified tSNE placed images
|
Add script for generating a preview image for non-gridified tSNE placed images
|
Python
|
apache-2.0
|
tokee/juxta,tokee/juxta,tokee/juxta
|
Add script for generating a preview image for non-gridified tSNE placed images
|
# Quick hack to visualize non-gridified tSNE placed images
# Adapted from https://github.com/ml4a/ml4a-guides/blob/master/notebooks/image-tsne.ipynb
import json
import matplotlib.pyplot
from PIL import Image
with open('points.json') as json_file:
data = json.load(json_file)
arr = []
for tup in data:
point = tup['point']
arr.append([point[0], point[1], tup['path']])
width = 4000
height = 3000
max_dim = 100
full_image = Image.new('RGBA', (width, height))
for x, y, img in arr:
tile = Image.open(img)
rs = max(1, tile.width/max_dim, tile.height/max_dim)
tile = tile.resize((int(tile.width/rs), int(tile.height/rs)), Image.ANTIALIAS)
full_image.paste(tile, (int((width-max_dim)*x), int((height-max_dim)*y)), mask=tile.convert('RGBA'))
full_image.save("preview.png");
|
<commit_before><commit_msg>Add script for generating a preview image for non-gridified tSNE placed images<commit_after>
|
# Quick hack to visualize non-gridified tSNE placed images
# Adapted from https://github.com/ml4a/ml4a-guides/blob/master/notebooks/image-tsne.ipynb
import json
import matplotlib.pyplot
from PIL import Image
with open('points.json') as json_file:
data = json.load(json_file)
arr = []
for tup in data:
point = tup['point']
arr.append([point[0], point[1], tup['path']])
width = 4000
height = 3000
max_dim = 100
full_image = Image.new('RGBA', (width, height))
for x, y, img in arr:
tile = Image.open(img)
rs = max(1, tile.width/max_dim, tile.height/max_dim)
tile = tile.resize((int(tile.width/rs), int(tile.height/rs)), Image.ANTIALIAS)
full_image.paste(tile, (int((width-max_dim)*x), int((height-max_dim)*y)), mask=tile.convert('RGBA'))
full_image.save("preview.png");
|
Add script for generating a preview image for non-gridified tSNE placed images# Quick hack to visualize non-gridified tSNE placed images
# Adapted from https://github.com/ml4a/ml4a-guides/blob/master/notebooks/image-tsne.ipynb
import json
import matplotlib.pyplot
from PIL import Image
with open('points.json') as json_file:
data = json.load(json_file)
arr = []
for tup in data:
point = tup['point']
arr.append([point[0], point[1], tup['path']])
width = 4000
height = 3000
max_dim = 100
full_image = Image.new('RGBA', (width, height))
for x, y, img in arr:
tile = Image.open(img)
rs = max(1, tile.width/max_dim, tile.height/max_dim)
tile = tile.resize((int(tile.width/rs), int(tile.height/rs)), Image.ANTIALIAS)
full_image.paste(tile, (int((width-max_dim)*x), int((height-max_dim)*y)), mask=tile.convert('RGBA'))
full_image.save("preview.png");
|
<commit_before><commit_msg>Add script for generating a preview image for non-gridified tSNE placed images<commit_after># Quick hack to visualize non-gridified tSNE placed images
# Adapted from https://github.com/ml4a/ml4a-guides/blob/master/notebooks/image-tsne.ipynb
import json
import matplotlib.pyplot
from PIL import Image
with open('points.json') as json_file:
data = json.load(json_file)
arr = []
for tup in data:
point = tup['point']
arr.append([point[0], point[1], tup['path']])
width = 4000
height = 3000
max_dim = 100
full_image = Image.new('RGBA', (width, height))
for x, y, img in arr:
tile = Image.open(img)
rs = max(1, tile.width/max_dim, tile.height/max_dim)
tile = tile.resize((int(tile.width/rs), int(tile.height/rs)), Image.ANTIALIAS)
full_image.paste(tile, (int((width-max_dim)*x), int((height-max_dim)*y)), mask=tile.convert('RGBA'))
full_image.save("preview.png");
|
|
1218583a097575e7befd6eea339e34a0cbfda76f
|
test_goeslc.py
|
test_goeslc.py
|
from sunpy.time import TimeRange
from sunpy.lightcurve import GOESLightCurve
dt = TimeRange('1981/01/10 00:00', '2014/04/18 23:00')
tr_not_found = []
time_ranges = dt.window(60*60*24, 60*60*24)
total_days = len(time_ranges)
total_fails = 0
# missing files http://umbra.nascom.nasa.gov/goes/fits/2005/go1220051116.fits
# http://umbra.nascom.nasa.gov/goes/fits/2005/go1220051116.fits
for time_range in time_ranges:
print(time_range.start())
try:
goes = GOESLightCurve.create(time_range)
print(goes.data['xrsa'].max())
print(goes.data['xrsb'].max())
except:
print("File Not Found!")
tr_not_found.append(time_range)
total_fails = total_fails + 1
print('Number of fails:%i' % total_fails)
print('Number of tries:%i' % total_days)
print('Percent Fail: %d' % (float(total_fails)/total_days * 100))
for tr in tr_not_found:
print(tr.start())
|
Test for GOES light curve
|
Test for GOES light curve
|
Python
|
apache-2.0
|
ehsteve/sunpy-tests
|
Test for GOES light curve
|
from sunpy.time import TimeRange
from sunpy.lightcurve import GOESLightCurve
dt = TimeRange('1981/01/10 00:00', '2014/04/18 23:00')
tr_not_found = []
time_ranges = dt.window(60*60*24, 60*60*24)
total_days = len(time_ranges)
total_fails = 0
# missing files http://umbra.nascom.nasa.gov/goes/fits/2005/go1220051116.fits
# http://umbra.nascom.nasa.gov/goes/fits/2005/go1220051116.fits
for time_range in time_ranges:
print(time_range.start())
try:
goes = GOESLightCurve.create(time_range)
print(goes.data['xrsa'].max())
print(goes.data['xrsb'].max())
except:
print("File Not Found!")
tr_not_found.append(time_range)
total_fails = total_fails + 1
print('Number of fails:%i' % total_fails)
print('Number of tries:%i' % total_days)
print('Percent Fail: %d' % (float(total_fails)/total_days * 100))
for tr in tr_not_found:
print(tr.start())
|
<commit_before><commit_msg>Test for GOES light curve<commit_after>
|
from sunpy.time import TimeRange
from sunpy.lightcurve import GOESLightCurve
dt = TimeRange('1981/01/10 00:00', '2014/04/18 23:00')
tr_not_found = []
time_ranges = dt.window(60*60*24, 60*60*24)
total_days = len(time_ranges)
total_fails = 0
# missing files http://umbra.nascom.nasa.gov/goes/fits/2005/go1220051116.fits
# http://umbra.nascom.nasa.gov/goes/fits/2005/go1220051116.fits
for time_range in time_ranges:
print(time_range.start())
try:
goes = GOESLightCurve.create(time_range)
print(goes.data['xrsa'].max())
print(goes.data['xrsb'].max())
except:
print("File Not Found!")
tr_not_found.append(time_range)
total_fails = total_fails + 1
print('Number of fails:%i' % total_fails)
print('Number of tries:%i' % total_days)
print('Percent Fail: %d' % (float(total_fails)/total_days * 100))
for tr in tr_not_found:
print(tr.start())
|
Test for GOES light curvefrom sunpy.time import TimeRange
from sunpy.lightcurve import GOESLightCurve
dt = TimeRange('1981/01/10 00:00', '2014/04/18 23:00')
tr_not_found = []
time_ranges = dt.window(60*60*24, 60*60*24)
total_days = len(time_ranges)
total_fails = 0
# missing files http://umbra.nascom.nasa.gov/goes/fits/2005/go1220051116.fits
# http://umbra.nascom.nasa.gov/goes/fits/2005/go1220051116.fits
for time_range in time_ranges:
print(time_range.start())
try:
goes = GOESLightCurve.create(time_range)
print(goes.data['xrsa'].max())
print(goes.data['xrsb'].max())
except:
print("File Not Found!")
tr_not_found.append(time_range)
total_fails = total_fails + 1
print('Number of fails:%i' % total_fails)
print('Number of tries:%i' % total_days)
print('Percent Fail: %d' % (float(total_fails)/total_days * 100))
for tr in tr_not_found:
print(tr.start())
|
<commit_before><commit_msg>Test for GOES light curve<commit_after>from sunpy.time import TimeRange
from sunpy.lightcurve import GOESLightCurve
dt = TimeRange('1981/01/10 00:00', '2014/04/18 23:00')
tr_not_found = []
time_ranges = dt.window(60*60*24, 60*60*24)
total_days = len(time_ranges)
total_fails = 0
# missing files http://umbra.nascom.nasa.gov/goes/fits/2005/go1220051116.fits
# http://umbra.nascom.nasa.gov/goes/fits/2005/go1220051116.fits
for time_range in time_ranges:
print(time_range.start())
try:
goes = GOESLightCurve.create(time_range)
print(goes.data['xrsa'].max())
print(goes.data['xrsb'].max())
except:
print("File Not Found!")
tr_not_found.append(time_range)
total_fails = total_fails + 1
print('Number of fails:%i' % total_fails)
print('Number of tries:%i' % total_days)
print('Percent Fail: %d' % (float(total_fails)/total_days * 100))
for tr in tr_not_found:
print(tr.start())
|
|
95366beb54dfecc43e6ba3dc651fe5fd12aeb5a5
|
python/gui.py
|
python/gui.py
|
import netgen
def StartGUI():
from tkinter import Tk
global win
win = Tk()
win.tk.eval('lappend ::auto_path ' + netgen._netgen_lib_dir)
win.tk.eval('lappend ::auto_path ' + netgen._netgen_bin_dir)
# load with absolute path to avoid issues on MacOS
win.tk.eval('load '+netgen._netgen_lib_dir.replace('\\','/')+'/libgui[info sharedlibextension] gui')
win.tk.eval( netgen.libngpy._meshing._ngscript)
if not netgen.libngpy._meshing._netgen_executable_started:
StartGUI()
|
import netgen
def StartGUI():
from tkinter import Tk
global win
win = Tk()
win.tk.eval('lappend ::auto_path ' + netgen._netgen_lib_dir)
win.tk.eval('lappend ::auto_path ' + netgen._netgen_bin_dir)
# load with absolute path to avoid issues on MacOS
win.tk.eval('load "'+netgen._netgen_lib_dir.replace('\\','/')+'/libgui[info sharedlibextension]" gui')
win.tk.eval( netgen.libngpy._meshing._ngscript)
if not netgen.libngpy._meshing._netgen_executable_started:
StartGUI()
|
Fix spaces in install dir
|
Fix spaces in install dir
|
Python
|
lgpl-2.1
|
looooo/netgen,live-clones/netgen,looooo/netgen,looooo/netgen,live-clones/netgen,looooo/netgen,live-clones/netgen,looooo/netgen,looooo/netgen,live-clones/netgen,live-clones/netgen,live-clones/netgen
|
import netgen
def StartGUI():
from tkinter import Tk
global win
win = Tk()
win.tk.eval('lappend ::auto_path ' + netgen._netgen_lib_dir)
win.tk.eval('lappend ::auto_path ' + netgen._netgen_bin_dir)
# load with absolute path to avoid issues on MacOS
win.tk.eval('load '+netgen._netgen_lib_dir.replace('\\','/')+'/libgui[info sharedlibextension] gui')
win.tk.eval( netgen.libngpy._meshing._ngscript)
if not netgen.libngpy._meshing._netgen_executable_started:
StartGUI()
Fix spaces in install dir
|
import netgen
def StartGUI():
from tkinter import Tk
global win
win = Tk()
win.tk.eval('lappend ::auto_path ' + netgen._netgen_lib_dir)
win.tk.eval('lappend ::auto_path ' + netgen._netgen_bin_dir)
# load with absolute path to avoid issues on MacOS
win.tk.eval('load "'+netgen._netgen_lib_dir.replace('\\','/')+'/libgui[info sharedlibextension]" gui')
win.tk.eval( netgen.libngpy._meshing._ngscript)
if not netgen.libngpy._meshing._netgen_executable_started:
StartGUI()
|
<commit_before>import netgen
def StartGUI():
from tkinter import Tk
global win
win = Tk()
win.tk.eval('lappend ::auto_path ' + netgen._netgen_lib_dir)
win.tk.eval('lappend ::auto_path ' + netgen._netgen_bin_dir)
# load with absolute path to avoid issues on MacOS
win.tk.eval('load '+netgen._netgen_lib_dir.replace('\\','/')+'/libgui[info sharedlibextension] gui')
win.tk.eval( netgen.libngpy._meshing._ngscript)
if not netgen.libngpy._meshing._netgen_executable_started:
StartGUI()
<commit_msg>Fix spaces in install dir<commit_after>
|
import netgen
def StartGUI():
from tkinter import Tk
global win
win = Tk()
win.tk.eval('lappend ::auto_path ' + netgen._netgen_lib_dir)
win.tk.eval('lappend ::auto_path ' + netgen._netgen_bin_dir)
# load with absolute path to avoid issues on MacOS
win.tk.eval('load "'+netgen._netgen_lib_dir.replace('\\','/')+'/libgui[info sharedlibextension]" gui')
win.tk.eval( netgen.libngpy._meshing._ngscript)
if not netgen.libngpy._meshing._netgen_executable_started:
StartGUI()
|
import netgen
def StartGUI():
from tkinter import Tk
global win
win = Tk()
win.tk.eval('lappend ::auto_path ' + netgen._netgen_lib_dir)
win.tk.eval('lappend ::auto_path ' + netgen._netgen_bin_dir)
# load with absolute path to avoid issues on MacOS
win.tk.eval('load '+netgen._netgen_lib_dir.replace('\\','/')+'/libgui[info sharedlibextension] gui')
win.tk.eval( netgen.libngpy._meshing._ngscript)
if not netgen.libngpy._meshing._netgen_executable_started:
StartGUI()
Fix spaces in install dirimport netgen
def StartGUI():
from tkinter import Tk
global win
win = Tk()
win.tk.eval('lappend ::auto_path ' + netgen._netgen_lib_dir)
win.tk.eval('lappend ::auto_path ' + netgen._netgen_bin_dir)
# load with absolute path to avoid issues on MacOS
win.tk.eval('load "'+netgen._netgen_lib_dir.replace('\\','/')+'/libgui[info sharedlibextension]" gui')
win.tk.eval( netgen.libngpy._meshing._ngscript)
if not netgen.libngpy._meshing._netgen_executable_started:
StartGUI()
|
<commit_before>import netgen
def StartGUI():
from tkinter import Tk
global win
win = Tk()
win.tk.eval('lappend ::auto_path ' + netgen._netgen_lib_dir)
win.tk.eval('lappend ::auto_path ' + netgen._netgen_bin_dir)
# load with absolute path to avoid issues on MacOS
win.tk.eval('load '+netgen._netgen_lib_dir.replace('\\','/')+'/libgui[info sharedlibextension] gui')
win.tk.eval( netgen.libngpy._meshing._ngscript)
if not netgen.libngpy._meshing._netgen_executable_started:
StartGUI()
<commit_msg>Fix spaces in install dir<commit_after>import netgen
def StartGUI():
from tkinter import Tk
global win
win = Tk()
win.tk.eval('lappend ::auto_path ' + netgen._netgen_lib_dir)
win.tk.eval('lappend ::auto_path ' + netgen._netgen_bin_dir)
# load with absolute path to avoid issues on MacOS
win.tk.eval('load "'+netgen._netgen_lib_dir.replace('\\','/')+'/libgui[info sharedlibextension]" gui')
win.tk.eval( netgen.libngpy._meshing._ngscript)
if not netgen.libngpy._meshing._netgen_executable_started:
StartGUI()
|
a3d70cd799126f489737b193f6917fe607a03652
|
wafer/sponsors/migrations/0005_sponsorshippackage_symbol.py
|
wafer/sponsors/migrations/0005_sponsorshippackage_symbol.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0004_auto_20160813_1328'),
]
operations = [
migrations.AddField(
model_name='sponsorshippackage',
name='symbol',
field=models.CharField(help_text='Optional symbol to display next to sponsors backing at this level sponsors list', max_length=1, blank=True),
),
]
|
Add migration for sponsor package changes
|
Add migration for sponsor package changes
|
Python
|
isc
|
CTPUG/wafer,CTPUG/wafer,CTPUG/wafer,CTPUG/wafer
|
Add migration for sponsor package changes
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0004_auto_20160813_1328'),
]
operations = [
migrations.AddField(
model_name='sponsorshippackage',
name='symbol',
field=models.CharField(help_text='Optional symbol to display next to sponsors backing at this level sponsors list', max_length=1, blank=True),
),
]
|
<commit_before><commit_msg>Add migration for sponsor package changes<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0004_auto_20160813_1328'),
]
operations = [
migrations.AddField(
model_name='sponsorshippackage',
name='symbol',
field=models.CharField(help_text='Optional symbol to display next to sponsors backing at this level sponsors list', max_length=1, blank=True),
),
]
|
Add migration for sponsor package changes# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0004_auto_20160813_1328'),
]
operations = [
migrations.AddField(
model_name='sponsorshippackage',
name='symbol',
field=models.CharField(help_text='Optional symbol to display next to sponsors backing at this level sponsors list', max_length=1, blank=True),
),
]
|
<commit_before><commit_msg>Add migration for sponsor package changes<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0004_auto_20160813_1328'),
]
operations = [
migrations.AddField(
model_name='sponsorshippackage',
name='symbol',
field=models.CharField(help_text='Optional symbol to display next to sponsors backing at this level sponsors list', max_length=1, blank=True),
),
]
|
|
5e74bfcf03afdc0eaf58c668e063fd41432b6da0
|
text_filter.py
|
text_filter.py
|
from abc import ABCMeta, abstractmethod
class TextFilter(object, metaclass=ABCMeta):
""" An intertface for text filters
Only has one method: apply() which recieves a string as
an argument and returns a string.
"""
@abstractmethod
def apply(self, text):
""" Recieves a string, filters it, and returns a string. """
pass
class Pipeline(object):
""" A composite TextFilter class
Uses the Composite Pattern with TextFilters
"""
def __init__(self, filters=None):
self._filters = []
if filters is not None:
for fil in filters:
self.add_filter(fil)
def add(self, fil):
if isinstance(filter, TextFilter):
raise TypeError("fil must be a subclass of TextFilter")
self._filters += [fil]
return self # allow chained additions
def apply(self, string):
result = string
for fil in self._filters:
result = fil.apply(result)
return result
|
Add text filter and text filter composite (pipeline)
|
Add text filter and text filter composite (pipeline)
|
Python
|
mit
|
iluxonchik/lyricist
|
Add text filter and text filter composite (pipeline)
|
from abc import ABCMeta, abstractmethod
class TextFilter(object, metaclass=ABCMeta):
""" An intertface for text filters
Only has one method: apply() which recieves a string as
an argument and returns a string.
"""
@abstractmethod
def apply(self, text):
""" Recieves a string, filters it, and returns a string. """
pass
class Pipeline(object):
""" A composite TextFilter class
Uses the Composite Pattern with TextFilters
"""
def __init__(self, filters=None):
self._filters = []
if filters is not None:
for fil in filters:
self.add_filter(fil)
def add(self, fil):
if isinstance(filter, TextFilter):
raise TypeError("fil must be a subclass of TextFilter")
self._filters += [fil]
return self # allow chained additions
def apply(self, string):
result = string
for fil in self._filters:
result = fil.apply(result)
return result
|
<commit_before><commit_msg>Add text filter and text filter composite (pipeline)<commit_after>
|
from abc import ABCMeta, abstractmethod
class TextFilter(object, metaclass=ABCMeta):
""" An intertface for text filters
Only has one method: apply() which recieves a string as
an argument and returns a string.
"""
@abstractmethod
def apply(self, text):
""" Recieves a string, filters it, and returns a string. """
pass
class Pipeline(object):
""" A composite TextFilter class
Uses the Composite Pattern with TextFilters
"""
def __init__(self, filters=None):
self._filters = []
if filters is not None:
for fil in filters:
self.add_filter(fil)
def add(self, fil):
if isinstance(filter, TextFilter):
raise TypeError("fil must be a subclass of TextFilter")
self._filters += [fil]
return self # allow chained additions
def apply(self, string):
result = string
for fil in self._filters:
result = fil.apply(result)
return result
|
Add text filter and text filter composite (pipeline)from abc import ABCMeta, abstractmethod
class TextFilter(object, metaclass=ABCMeta):
""" An intertface for text filters
Only has one method: apply() which recieves a string as
an argument and returns a string.
"""
@abstractmethod
def apply(self, text):
""" Recieves a string, filters it, and returns a string. """
pass
class Pipeline(object):
""" A composite TextFilter class
Uses the Composite Pattern with TextFilters
"""
def __init__(self, filters=None):
self._filters = []
if filters is not None:
for fil in filters:
self.add_filter(fil)
def add(self, fil):
if isinstance(filter, TextFilter):
raise TypeError("fil must be a subclass of TextFilter")
self._filters += [fil]
return self # allow chained additions
def apply(self, string):
result = string
for fil in self._filters:
result = fil.apply(result)
return result
|
<commit_before><commit_msg>Add text filter and text filter composite (pipeline)<commit_after>from abc import ABCMeta, abstractmethod
class TextFilter(object, metaclass=ABCMeta):
""" An intertface for text filters
Only has one method: apply() which recieves a string as
an argument and returns a string.
"""
@abstractmethod
def apply(self, text):
""" Recieves a string, filters it, and returns a string. """
pass
class Pipeline(object):
""" A composite TextFilter class
Uses the Composite Pattern with TextFilters
"""
def __init__(self, filters=None):
self._filters = []
if filters is not None:
for fil in filters:
self.add_filter(fil)
def add(self, fil):
if isinstance(filter, TextFilter):
raise TypeError("fil must be a subclass of TextFilter")
self._filters += [fil]
return self # allow chained additions
def apply(self, string):
result = string
for fil in self._filters:
result = fil.apply(result)
return result
|
|
78454dcde6be6da73d99fd77c79b6af21dfe02c1
|
xml_hidden_extensions_hotfix.py
|
xml_hidden_extensions_hotfix.py
|
# The XML package includes a `XML.sublime-settings` file that sets `hidden_extensions` to some
# of the extension we want to highlight with our package.
# There is currently no other way to override this, so we manually remove these extensions from the
# setting with a plugin.
#
# See also:
# https://github.com/sublimehq/Packages/issues/823
# https://github.com/SublimeTextIssues/Core/issues/1326
import sublime
DEFAULT_VALUE = ["rss", "sublime-snippet", "vcproj", "tmLanguage", "tmTheme", "tmSnippet",
"tmPreferences", "dae"]
MODIFIED_VALUE = ["rss", "vcproj", "tmLanguage", "tmTheme", "tmSnippet", "dae"]
# Encode ST build and date of last change (of this file) into the bootstrap value.
# I'm not sure what exactly I'm gonna do with it, so just include info I might find useful later.
BOOTRSTAP_VALUE = [3126, 2017, 3, 13]
def plugin_loaded():
settings = sublime.load_settings("XML.sublime-settings")
if settings.get('hidden_extensions') == DEFAULT_VALUE:
settings.set('hidden_extensions', MODIFIED_VALUE)
settings.set('package_dev.bootstrapped', BOOTRSTAP_VALUE)
sublime.save_settings("XML.sublime-settings")
print("[PackageDev] Bootstrapped XML's `hidden_extensions` setting")
|
Add plugin to make syntax detection work
|
Add plugin to make syntax detection work
|
Python
|
mit
|
SublimeText/PackageDev,SublimeText/AAAPackageDev,SublimeText/AAAPackageDev
|
Add plugin to make syntax detection work
|
# The XML package includes a `XML.sublime-settings` file that sets `hidden_extensions` to some
# of the extension we want to highlight with our package.
# There is currently no other way to override this, so we manually remove these extensions from the
# setting with a plugin.
#
# See also:
# https://github.com/sublimehq/Packages/issues/823
# https://github.com/SublimeTextIssues/Core/issues/1326
import sublime
DEFAULT_VALUE = ["rss", "sublime-snippet", "vcproj", "tmLanguage", "tmTheme", "tmSnippet",
"tmPreferences", "dae"]
MODIFIED_VALUE = ["rss", "vcproj", "tmLanguage", "tmTheme", "tmSnippet", "dae"]
# Encode ST build and date of last change (of this file) into the bootstrap value.
# I'm not sure what exactly I'm gonna do with it, so just include info I might find useful later.
BOOTRSTAP_VALUE = [3126, 2017, 3, 13]
def plugin_loaded():
settings = sublime.load_settings("XML.sublime-settings")
if settings.get('hidden_extensions') == DEFAULT_VALUE:
settings.set('hidden_extensions', MODIFIED_VALUE)
settings.set('package_dev.bootstrapped', BOOTRSTAP_VALUE)
sublime.save_settings("XML.sublime-settings")
print("[PackageDev] Bootstrapped XML's `hidden_extensions` setting")
|
<commit_before><commit_msg>Add plugin to make syntax detection work<commit_after>
|
# The XML package includes a `XML.sublime-settings` file that sets `hidden_extensions` to some
# of the extension we want to highlight with our package.
# There is currently no other way to override this, so we manually remove these extensions from the
# setting with a plugin.
#
# See also:
# https://github.com/sublimehq/Packages/issues/823
# https://github.com/SublimeTextIssues/Core/issues/1326
import sublime
DEFAULT_VALUE = ["rss", "sublime-snippet", "vcproj", "tmLanguage", "tmTheme", "tmSnippet",
"tmPreferences", "dae"]
MODIFIED_VALUE = ["rss", "vcproj", "tmLanguage", "tmTheme", "tmSnippet", "dae"]
# Encode ST build and date of last change (of this file) into the bootstrap value.
# I'm not sure what exactly I'm gonna do with it, so just include info I might find useful later.
BOOTRSTAP_VALUE = [3126, 2017, 3, 13]
def plugin_loaded():
settings = sublime.load_settings("XML.sublime-settings")
if settings.get('hidden_extensions') == DEFAULT_VALUE:
settings.set('hidden_extensions', MODIFIED_VALUE)
settings.set('package_dev.bootstrapped', BOOTRSTAP_VALUE)
sublime.save_settings("XML.sublime-settings")
print("[PackageDev] Bootstrapped XML's `hidden_extensions` setting")
|
Add plugin to make syntax detection work# The XML package includes a `XML.sublime-settings` file that sets `hidden_extensions` to some
# of the extension we want to highlight with our package.
# There is currently no other way to override this, so we manually remove these extensions from the
# setting with a plugin.
#
# See also:
# https://github.com/sublimehq/Packages/issues/823
# https://github.com/SublimeTextIssues/Core/issues/1326
import sublime
DEFAULT_VALUE = ["rss", "sublime-snippet", "vcproj", "tmLanguage", "tmTheme", "tmSnippet",
"tmPreferences", "dae"]
MODIFIED_VALUE = ["rss", "vcproj", "tmLanguage", "tmTheme", "tmSnippet", "dae"]
# Encode ST build and date of last change (of this file) into the bootstrap value.
# I'm not sure what exactly I'm gonna do with it, so just include info I might find useful later.
BOOTRSTAP_VALUE = [3126, 2017, 3, 13]
def plugin_loaded():
settings = sublime.load_settings("XML.sublime-settings")
if settings.get('hidden_extensions') == DEFAULT_VALUE:
settings.set('hidden_extensions', MODIFIED_VALUE)
settings.set('package_dev.bootstrapped', BOOTRSTAP_VALUE)
sublime.save_settings("XML.sublime-settings")
print("[PackageDev] Bootstrapped XML's `hidden_extensions` setting")
|
<commit_before><commit_msg>Add plugin to make syntax detection work<commit_after># The XML package includes a `XML.sublime-settings` file that sets `hidden_extensions` to some
# of the extension we want to highlight with our package.
# There is currently no other way to override this, so we manually remove these extensions from the
# setting with a plugin.
#
# See also:
# https://github.com/sublimehq/Packages/issues/823
# https://github.com/SublimeTextIssues/Core/issues/1326
import sublime
DEFAULT_VALUE = ["rss", "sublime-snippet", "vcproj", "tmLanguage", "tmTheme", "tmSnippet",
"tmPreferences", "dae"]
MODIFIED_VALUE = ["rss", "vcproj", "tmLanguage", "tmTheme", "tmSnippet", "dae"]
# Encode ST build and date of last change (of this file) into the bootstrap value.
# I'm not sure what exactly I'm gonna do with it, so just include info I might find useful later.
BOOTRSTAP_VALUE = [3126, 2017, 3, 13]
def plugin_loaded():
settings = sublime.load_settings("XML.sublime-settings")
if settings.get('hidden_extensions') == DEFAULT_VALUE:
settings.set('hidden_extensions', MODIFIED_VALUE)
settings.set('package_dev.bootstrapped', BOOTRSTAP_VALUE)
sublime.save_settings("XML.sublime-settings")
print("[PackageDev] Bootstrapped XML's `hidden_extensions` setting")
|
|
029183cfd855c1fc157df771d1b5a705aec38854
|
py/minimum-absolute-difference-in-bst.py
|
py/minimum-absolute-difference-in-bst.py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inOrder(self, cur):
if cur:
self.inOrder(cur.left)
if self.prev is not None:
d = abs(cur.val - self.prev)
if self.ans is None:
self.ans = d
else:
self.ans = min(self.ans, d)
self.prev = cur.val
self.inOrder(cur.right)
def getMinimumDifference(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.prev = None
self.ans = None
self.inOrder(root)
return self.ans
|
Add py solution for 530. Minimum Absolute Difference in BST
|
Add py solution for 530. Minimum Absolute Difference in BST
530. Minimum Absolute Difference in BST: https://leetcode.com/problems/minimum-absolute-difference-in-bst/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 530. Minimum Absolute Difference in BST
530. Minimum Absolute Difference in BST: https://leetcode.com/problems/minimum-absolute-difference-in-bst/
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inOrder(self, cur):
if cur:
self.inOrder(cur.left)
if self.prev is not None:
d = abs(cur.val - self.prev)
if self.ans is None:
self.ans = d
else:
self.ans = min(self.ans, d)
self.prev = cur.val
self.inOrder(cur.right)
def getMinimumDifference(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.prev = None
self.ans = None
self.inOrder(root)
return self.ans
|
<commit_before><commit_msg>Add py solution for 530. Minimum Absolute Difference in BST
530. Minimum Absolute Difference in BST: https://leetcode.com/problems/minimum-absolute-difference-in-bst/<commit_after>
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inOrder(self, cur):
if cur:
self.inOrder(cur.left)
if self.prev is not None:
d = abs(cur.val - self.prev)
if self.ans is None:
self.ans = d
else:
self.ans = min(self.ans, d)
self.prev = cur.val
self.inOrder(cur.right)
def getMinimumDifference(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.prev = None
self.ans = None
self.inOrder(root)
return self.ans
|
Add py solution for 530. Minimum Absolute Difference in BST
530. Minimum Absolute Difference in BST: https://leetcode.com/problems/minimum-absolute-difference-in-bst/# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inOrder(self, cur):
if cur:
self.inOrder(cur.left)
if self.prev is not None:
d = abs(cur.val - self.prev)
if self.ans is None:
self.ans = d
else:
self.ans = min(self.ans, d)
self.prev = cur.val
self.inOrder(cur.right)
def getMinimumDifference(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.prev = None
self.ans = None
self.inOrder(root)
return self.ans
|
<commit_before><commit_msg>Add py solution for 530. Minimum Absolute Difference in BST
530. Minimum Absolute Difference in BST: https://leetcode.com/problems/minimum-absolute-difference-in-bst/<commit_after># Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def inOrder(self, cur):
if cur:
self.inOrder(cur.left)
if self.prev is not None:
d = abs(cur.val - self.prev)
if self.ans is None:
self.ans = d
else:
self.ans = min(self.ans, d)
self.prev = cur.val
self.inOrder(cur.right)
def getMinimumDifference(self, root):
"""
:type root: TreeNode
:rtype: int
"""
self.prev = None
self.ans = None
self.inOrder(root)
return self.ans
|
|
aa37928593ae84526d56f2c50ba7c21b2be6c5e8
|
rapidtide/tests/test_mi.py
|
rapidtide/tests/test_mi.py
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
#
# Copyright 2016-2019 Blaise Frederick
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import numpy as np
import pylab as plt
import rapidtide.io as tide_io
from rapidtide.correlate import calc_MI
def test_calc_MI(display=False):
inlen = 1000
offset = 100
filename1 = "testdata/lforegressor.txt"
filename2 = "testdata/lforegressor.txt"
sig1 = tide_io.readvec(filename1)
sig2 = np.power(sig1, 2.0)
sig3 = np.power(sig1, 3.0)
kstart = 3
kend = 100
linmivals = []
sqmivals = []
cubemivals = []
for clustersize in range(kstart, kend, 2):
linmivals.append(calc_MI(sig1, sig1, clustersize) / np.log(clustersize))
sqmivals.append(calc_MI(sig2, sig1, clustersize) / np.log(clustersize))
cubemivals.append(calc_MI(sig3, sig1, clustersize) / np.log(clustersize))
if display:
plt.figure()
#plt.ylim([-1.0, 3.0])
plt.plot(np.array(range(kstart, kend, 2)), np.array(linmivals), 'r')
plt.plot(np.array(range(kstart, kend, 2)), np.array(sqmivals), 'g')
plt.plot(np.array(range(kstart, kend, 2)), np.array(cubemivals), 'b')
#print('maximum occurs at offset', np.argmax(stdcorrelate_result) - midpoint + 1)
plt.legend(['Mutual information', 'Squared mutual information', 'Cubed mutual information'])
plt.show()
aethresh = 10
np.testing.assert_almost_equal(1.0, 1.0, 1e-5)
def main():
test_calc_MI(display=True)
if __name__ == '__main__':
main()
|
Test for mutual information function
|
Test for mutual information function
|
Python
|
apache-2.0
|
bbfrederick/rapidtide,bbfrederick/rapidtide
|
Test for mutual information function
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
#
# Copyright 2016-2019 Blaise Frederick
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import numpy as np
import pylab as plt
import rapidtide.io as tide_io
from rapidtide.correlate import calc_MI
def test_calc_MI(display=False):
inlen = 1000
offset = 100
filename1 = "testdata/lforegressor.txt"
filename2 = "testdata/lforegressor.txt"
sig1 = tide_io.readvec(filename1)
sig2 = np.power(sig1, 2.0)
sig3 = np.power(sig1, 3.0)
kstart = 3
kend = 100
linmivals = []
sqmivals = []
cubemivals = []
for clustersize in range(kstart, kend, 2):
linmivals.append(calc_MI(sig1, sig1, clustersize) / np.log(clustersize))
sqmivals.append(calc_MI(sig2, sig1, clustersize) / np.log(clustersize))
cubemivals.append(calc_MI(sig3, sig1, clustersize) / np.log(clustersize))
if display:
plt.figure()
#plt.ylim([-1.0, 3.0])
plt.plot(np.array(range(kstart, kend, 2)), np.array(linmivals), 'r')
plt.plot(np.array(range(kstart, kend, 2)), np.array(sqmivals), 'g')
plt.plot(np.array(range(kstart, kend, 2)), np.array(cubemivals), 'b')
#print('maximum occurs at offset', np.argmax(stdcorrelate_result) - midpoint + 1)
plt.legend(['Mutual information', 'Squared mutual information', 'Cubed mutual information'])
plt.show()
aethresh = 10
np.testing.assert_almost_equal(1.0, 1.0, 1e-5)
def main():
test_calc_MI(display=True)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Test for mutual information function<commit_after>
|
#!/usr/bin/env python
# -*- coding: latin-1 -*-
#
# Copyright 2016-2019 Blaise Frederick
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import numpy as np
import pylab as plt
import rapidtide.io as tide_io
from rapidtide.correlate import calc_MI
def test_calc_MI(display=False):
inlen = 1000
offset = 100
filename1 = "testdata/lforegressor.txt"
filename2 = "testdata/lforegressor.txt"
sig1 = tide_io.readvec(filename1)
sig2 = np.power(sig1, 2.0)
sig3 = np.power(sig1, 3.0)
kstart = 3
kend = 100
linmivals = []
sqmivals = []
cubemivals = []
for clustersize in range(kstart, kend, 2):
linmivals.append(calc_MI(sig1, sig1, clustersize) / np.log(clustersize))
sqmivals.append(calc_MI(sig2, sig1, clustersize) / np.log(clustersize))
cubemivals.append(calc_MI(sig3, sig1, clustersize) / np.log(clustersize))
if display:
plt.figure()
#plt.ylim([-1.0, 3.0])
plt.plot(np.array(range(kstart, kend, 2)), np.array(linmivals), 'r')
plt.plot(np.array(range(kstart, kend, 2)), np.array(sqmivals), 'g')
plt.plot(np.array(range(kstart, kend, 2)), np.array(cubemivals), 'b')
#print('maximum occurs at offset', np.argmax(stdcorrelate_result) - midpoint + 1)
plt.legend(['Mutual information', 'Squared mutual information', 'Cubed mutual information'])
plt.show()
aethresh = 10
np.testing.assert_almost_equal(1.0, 1.0, 1e-5)
def main():
test_calc_MI(display=True)
if __name__ == '__main__':
main()
|
Test for mutual information function#!/usr/bin/env python
# -*- coding: latin-1 -*-
#
# Copyright 2016-2019 Blaise Frederick
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import numpy as np
import pylab as plt
import rapidtide.io as tide_io
from rapidtide.correlate import calc_MI
def test_calc_MI(display=False):
inlen = 1000
offset = 100
filename1 = "testdata/lforegressor.txt"
filename2 = "testdata/lforegressor.txt"
sig1 = tide_io.readvec(filename1)
sig2 = np.power(sig1, 2.0)
sig3 = np.power(sig1, 3.0)
kstart = 3
kend = 100
linmivals = []
sqmivals = []
cubemivals = []
for clustersize in range(kstart, kend, 2):
linmivals.append(calc_MI(sig1, sig1, clustersize) / np.log(clustersize))
sqmivals.append(calc_MI(sig2, sig1, clustersize) / np.log(clustersize))
cubemivals.append(calc_MI(sig3, sig1, clustersize) / np.log(clustersize))
if display:
plt.figure()
#plt.ylim([-1.0, 3.0])
plt.plot(np.array(range(kstart, kend, 2)), np.array(linmivals), 'r')
plt.plot(np.array(range(kstart, kend, 2)), np.array(sqmivals), 'g')
plt.plot(np.array(range(kstart, kend, 2)), np.array(cubemivals), 'b')
#print('maximum occurs at offset', np.argmax(stdcorrelate_result) - midpoint + 1)
plt.legend(['Mutual information', 'Squared mutual information', 'Cubed mutual information'])
plt.show()
aethresh = 10
np.testing.assert_almost_equal(1.0, 1.0, 1e-5)
def main():
test_calc_MI(display=True)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Test for mutual information function<commit_after>#!/usr/bin/env python
# -*- coding: latin-1 -*-
#
# Copyright 2016-2019 Blaise Frederick
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import numpy as np
import pylab as plt
import rapidtide.io as tide_io
from rapidtide.correlate import calc_MI
def test_calc_MI(display=False):
inlen = 1000
offset = 100
filename1 = "testdata/lforegressor.txt"
filename2 = "testdata/lforegressor.txt"
sig1 = tide_io.readvec(filename1)
sig2 = np.power(sig1, 2.0)
sig3 = np.power(sig1, 3.0)
kstart = 3
kend = 100
linmivals = []
sqmivals = []
cubemivals = []
for clustersize in range(kstart, kend, 2):
linmivals.append(calc_MI(sig1, sig1, clustersize) / np.log(clustersize))
sqmivals.append(calc_MI(sig2, sig1, clustersize) / np.log(clustersize))
cubemivals.append(calc_MI(sig3, sig1, clustersize) / np.log(clustersize))
if display:
plt.figure()
#plt.ylim([-1.0, 3.0])
plt.plot(np.array(range(kstart, kend, 2)), np.array(linmivals), 'r')
plt.plot(np.array(range(kstart, kend, 2)), np.array(sqmivals), 'g')
plt.plot(np.array(range(kstart, kend, 2)), np.array(cubemivals), 'b')
#print('maximum occurs at offset', np.argmax(stdcorrelate_result) - midpoint + 1)
plt.legend(['Mutual information', 'Squared mutual information', 'Cubed mutual information'])
plt.show()
aethresh = 10
np.testing.assert_almost_equal(1.0, 1.0, 1e-5)
def main():
test_calc_MI(display=True)
if __name__ == '__main__':
main()
|
|
68ef5177f0519d1bb889a51974fdd5f075c8b0f0
|
netbox/utilities/templatetags/perms.py
|
netbox/utilities/templatetags/perms.py
|
from django import template
register = template.Library()
def _check_permission(user, instance, action):
return user.has_perm(
perm=f'{instance._meta.app_label}.{action}_{instance._meta.model_name}',
obj=instance
)
@register.filter()
def can_view(user, instance):
return _check_permission(user, instance, 'view')
@register.filter()
def can_add(user, instance):
return _check_permission(user, instance, 'add')
@register.filter()
def can_change(user, instance):
return _check_permission(user, instance, 'change')
@register.filter()
def can_delete(user, instance):
return _check_permission(user, instance, 'delete')
|
Introduce template filters for checking dynamic permissions
|
Introduce template filters for checking dynamic permissions
|
Python
|
apache-2.0
|
digitalocean/netbox,digitalocean/netbox,digitalocean/netbox,digitalocean/netbox
|
Introduce template filters for checking dynamic permissions
|
from django import template
register = template.Library()
def _check_permission(user, instance, action):
return user.has_perm(
perm=f'{instance._meta.app_label}.{action}_{instance._meta.model_name}',
obj=instance
)
@register.filter()
def can_view(user, instance):
return _check_permission(user, instance, 'view')
@register.filter()
def can_add(user, instance):
return _check_permission(user, instance, 'add')
@register.filter()
def can_change(user, instance):
return _check_permission(user, instance, 'change')
@register.filter()
def can_delete(user, instance):
return _check_permission(user, instance, 'delete')
|
<commit_before><commit_msg>Introduce template filters for checking dynamic permissions<commit_after>
|
from django import template
register = template.Library()
def _check_permission(user, instance, action):
return user.has_perm(
perm=f'{instance._meta.app_label}.{action}_{instance._meta.model_name}',
obj=instance
)
@register.filter()
def can_view(user, instance):
return _check_permission(user, instance, 'view')
@register.filter()
def can_add(user, instance):
return _check_permission(user, instance, 'add')
@register.filter()
def can_change(user, instance):
return _check_permission(user, instance, 'change')
@register.filter()
def can_delete(user, instance):
return _check_permission(user, instance, 'delete')
|
Introduce template filters for checking dynamic permissionsfrom django import template
register = template.Library()
def _check_permission(user, instance, action):
return user.has_perm(
perm=f'{instance._meta.app_label}.{action}_{instance._meta.model_name}',
obj=instance
)
@register.filter()
def can_view(user, instance):
return _check_permission(user, instance, 'view')
@register.filter()
def can_add(user, instance):
return _check_permission(user, instance, 'add')
@register.filter()
def can_change(user, instance):
return _check_permission(user, instance, 'change')
@register.filter()
def can_delete(user, instance):
return _check_permission(user, instance, 'delete')
|
<commit_before><commit_msg>Introduce template filters for checking dynamic permissions<commit_after>from django import template
register = template.Library()
def _check_permission(user, instance, action):
return user.has_perm(
perm=f'{instance._meta.app_label}.{action}_{instance._meta.model_name}',
obj=instance
)
@register.filter()
def can_view(user, instance):
return _check_permission(user, instance, 'view')
@register.filter()
def can_add(user, instance):
return _check_permission(user, instance, 'add')
@register.filter()
def can_change(user, instance):
return _check_permission(user, instance, 'change')
@register.filter()
def can_delete(user, instance):
return _check_permission(user, instance, 'delete')
|
|
4c354a4941b15ea90e72483881c95c7f8c496f71
|
scripts/turn_on_and_off.py
|
scripts/turn_on_and_off.py
|
#!/usr/bin/python
#Definindo a biblioteca GPIO
import RPi.GPIO as GPIO
#Definindo a biblioteca TIME
import time
import sys
#Aqui definimos que vamos usar o numero de ordem de Porta, e nao o numero que refere a BOARD.
# Para alterar troque GPIO.BCM para GPIO.BOARD
GPIO.setmode(GPIO.BCM)
# Aqui vamos desativar msg de log no shell (ex. >>> RuntimeWarning: This channel is already in use, continuing anyway.
GPIO.setwarnings(False)
# Para ativar novamente apenas comente linha de cima.
# Aqui criamos um Array com as portas GPIO que seram utilizadas.
pinList = [sys.argv[1]]
# Criamos um laco com as portas listadas em pinList (ex. 2,3,4,7...) e setamos o valor como OUT (False/0)
for i in pinList:
GPIO.setup(int(i), GPIO.OUT)
# GPIO.output(i, GPIO.HIGH) # Comentei para se saber o oposto, ou seja, valor como HIGH (Verdadeiro/1)
# Criando o loop e imprime no shell ON
try:
GPIO.output(int(sys.argv[1]), GPIO.HIGH)
print "ON"
GPIO.output(int(sys.argv[1]), GPIO.LOW)
print "OFF"
# Para sair/cancelar (crtl + c) e imprime Sair
except KeyboardInterrupt:
print "Sair"
# Reseta/Limpa configuracao da GPIO
GPIO.cleanup()
|
Create turn on and off script
|
Create turn on and off script
|
Python
|
mit
|
biorreator/bioreator-api,biorreator/bioreator-api
|
Create turn on and off script
|
#!/usr/bin/python
#Definindo a biblioteca GPIO
import RPi.GPIO as GPIO
#Definindo a biblioteca TIME
import time
import sys
#Aqui definimos que vamos usar o numero de ordem de Porta, e nao o numero que refere a BOARD.
# Para alterar troque GPIO.BCM para GPIO.BOARD
GPIO.setmode(GPIO.BCM)
# Aqui vamos desativar msg de log no shell (ex. >>> RuntimeWarning: This channel is already in use, continuing anyway.
GPIO.setwarnings(False)
# Para ativar novamente apenas comente linha de cima.
# Aqui criamos um Array com as portas GPIO que seram utilizadas.
pinList = [sys.argv[1]]
# Criamos um laco com as portas listadas em pinList (ex. 2,3,4,7...) e setamos o valor como OUT (False/0)
for i in pinList:
GPIO.setup(int(i), GPIO.OUT)
# GPIO.output(i, GPIO.HIGH) # Comentei para se saber o oposto, ou seja, valor como HIGH (Verdadeiro/1)
# Criando o loop e imprime no shell ON
try:
GPIO.output(int(sys.argv[1]), GPIO.HIGH)
print "ON"
GPIO.output(int(sys.argv[1]), GPIO.LOW)
print "OFF"
# Para sair/cancelar (crtl + c) e imprime Sair
except KeyboardInterrupt:
print "Sair"
# Reseta/Limpa configuracao da GPIO
GPIO.cleanup()
|
<commit_before><commit_msg>Create turn on and off script<commit_after>
|
#!/usr/bin/python
#Definindo a biblioteca GPIO
import RPi.GPIO as GPIO
#Definindo a biblioteca TIME
import time
import sys
#Aqui definimos que vamos usar o numero de ordem de Porta, e nao o numero que refere a BOARD.
# Para alterar troque GPIO.BCM para GPIO.BOARD
GPIO.setmode(GPIO.BCM)
# Aqui vamos desativar msg de log no shell (ex. >>> RuntimeWarning: This channel is already in use, continuing anyway.
GPIO.setwarnings(False)
# Para ativar novamente apenas comente linha de cima.
# Aqui criamos um Array com as portas GPIO que seram utilizadas.
pinList = [sys.argv[1]]
# Criamos um laco com as portas listadas em pinList (ex. 2,3,4,7...) e setamos o valor como OUT (False/0)
for i in pinList:
GPIO.setup(int(i), GPIO.OUT)
# GPIO.output(i, GPIO.HIGH) # Comentei para se saber o oposto, ou seja, valor como HIGH (Verdadeiro/1)
# Criando o loop e imprime no shell ON
try:
GPIO.output(int(sys.argv[1]), GPIO.HIGH)
print "ON"
GPIO.output(int(sys.argv[1]), GPIO.LOW)
print "OFF"
# Para sair/cancelar (crtl + c) e imprime Sair
except KeyboardInterrupt:
print "Sair"
# Reseta/Limpa configuracao da GPIO
GPIO.cleanup()
|
Create turn on and off script#!/usr/bin/python
#Definindo a biblioteca GPIO
import RPi.GPIO as GPIO
#Definindo a biblioteca TIME
import time
import sys
#Aqui definimos que vamos usar o numero de ordem de Porta, e nao o numero que refere a BOARD.
# Para alterar troque GPIO.BCM para GPIO.BOARD
GPIO.setmode(GPIO.BCM)
# Aqui vamos desativar msg de log no shell (ex. >>> RuntimeWarning: This channel is already in use, continuing anyway.
GPIO.setwarnings(False)
# Para ativar novamente apenas comente linha de cima.
# Aqui criamos um Array com as portas GPIO que seram utilizadas.
pinList = [sys.argv[1]]
# Criamos um laco com as portas listadas em pinList (ex. 2,3,4,7...) e setamos o valor como OUT (False/0)
for i in pinList:
GPIO.setup(int(i), GPIO.OUT)
# GPIO.output(i, GPIO.HIGH) # Comentei para se saber o oposto, ou seja, valor como HIGH (Verdadeiro/1)
# Criando o loop e imprime no shell ON
try:
GPIO.output(int(sys.argv[1]), GPIO.HIGH)
print "ON"
GPIO.output(int(sys.argv[1]), GPIO.LOW)
print "OFF"
# Para sair/cancelar (crtl + c) e imprime Sair
except KeyboardInterrupt:
print "Sair"
# Reseta/Limpa configuracao da GPIO
GPIO.cleanup()
|
<commit_before><commit_msg>Create turn on and off script<commit_after>#!/usr/bin/python
#Definindo a biblioteca GPIO
import RPi.GPIO as GPIO
#Definindo a biblioteca TIME
import time
import sys
#Aqui definimos que vamos usar o numero de ordem de Porta, e nao o numero que refere a BOARD.
# Para alterar troque GPIO.BCM para GPIO.BOARD
GPIO.setmode(GPIO.BCM)
# Aqui vamos desativar msg de log no shell (ex. >>> RuntimeWarning: This channel is already in use, continuing anyway.
GPIO.setwarnings(False)
# Para ativar novamente apenas comente linha de cima.
# Aqui criamos um Array com as portas GPIO que seram utilizadas.
pinList = [sys.argv[1]]
# Criamos um laco com as portas listadas em pinList (ex. 2,3,4,7...) e setamos o valor como OUT (False/0)
for i in pinList:
GPIO.setup(int(i), GPIO.OUT)
# GPIO.output(i, GPIO.HIGH) # Comentei para se saber o oposto, ou seja, valor como HIGH (Verdadeiro/1)
# Criando o loop e imprime no shell ON
try:
GPIO.output(int(sys.argv[1]), GPIO.HIGH)
print "ON"
GPIO.output(int(sys.argv[1]), GPIO.LOW)
print "OFF"
# Para sair/cancelar (crtl + c) e imprime Sair
except KeyboardInterrupt:
print "Sair"
# Reseta/Limpa configuracao da GPIO
GPIO.cleanup()
|
|
73321680795cff9b1c680ba20fc6ebf2fb87eeb9
|
dockci/migrations/0003.py
|
dockci/migrations/0003.py
|
"""
Migrate version to tag in build models
"""
import os
import shutil
import yaml
job_dirs = (
filename for filename in
os.listdir(os.path.join('data', 'builds'))
if os.path.isdir(os.path.join('data', 'builds', filename))
)
for job_dir in job_dirs:
build_files = (
filename for filename in
os.listdir(os.path.join('data', 'builds', job_dir))
if filename[-5:] == '.yaml'
)
for build_file in build_files:
build_slug = build_file[:-5]
build_file_path = os.path.join('data', 'builds', job_dir, build_file)
with open(build_file_path) as handle:
build_dict = yaml.load(handle)
try:
version = build_dict.pop('version')
if version:
build_dict['tag'] = version
with open(build_file_path, 'w') as handle:
yaml.dump(build_dict, handle, default_flow_style=False)
except KeyError:
pass
|
Add migration to rename version -> tag field
|
Add migration to rename version -> tag field
|
Python
|
isc
|
sprucedev/DockCI,sprucedev/DockCI,sprucedev/DockCI,sprucedev/DockCI-Agent,RickyCook/DockCI,sprucedev/DockCI-Agent,RickyCook/DockCI,RickyCook/DockCI,sprucedev/DockCI,RickyCook/DockCI
|
Add migration to rename version -> tag field
|
"""
Migrate version to tag in build models
"""
import os
import shutil
import yaml
job_dirs = (
filename for filename in
os.listdir(os.path.join('data', 'builds'))
if os.path.isdir(os.path.join('data', 'builds', filename))
)
for job_dir in job_dirs:
build_files = (
filename for filename in
os.listdir(os.path.join('data', 'builds', job_dir))
if filename[-5:] == '.yaml'
)
for build_file in build_files:
build_slug = build_file[:-5]
build_file_path = os.path.join('data', 'builds', job_dir, build_file)
with open(build_file_path) as handle:
build_dict = yaml.load(handle)
try:
version = build_dict.pop('version')
if version:
build_dict['tag'] = version
with open(build_file_path, 'w') as handle:
yaml.dump(build_dict, handle, default_flow_style=False)
except KeyError:
pass
|
<commit_before><commit_msg>Add migration to rename version -> tag field<commit_after>
|
"""
Migrate version to tag in build models
"""
import os
import shutil
import yaml
job_dirs = (
filename for filename in
os.listdir(os.path.join('data', 'builds'))
if os.path.isdir(os.path.join('data', 'builds', filename))
)
for job_dir in job_dirs:
build_files = (
filename for filename in
os.listdir(os.path.join('data', 'builds', job_dir))
if filename[-5:] == '.yaml'
)
for build_file in build_files:
build_slug = build_file[:-5]
build_file_path = os.path.join('data', 'builds', job_dir, build_file)
with open(build_file_path) as handle:
build_dict = yaml.load(handle)
try:
version = build_dict.pop('version')
if version:
build_dict['tag'] = version
with open(build_file_path, 'w') as handle:
yaml.dump(build_dict, handle, default_flow_style=False)
except KeyError:
pass
|
Add migration to rename version -> tag field"""
Migrate version to tag in build models
"""
import os
import shutil
import yaml
job_dirs = (
filename for filename in
os.listdir(os.path.join('data', 'builds'))
if os.path.isdir(os.path.join('data', 'builds', filename))
)
for job_dir in job_dirs:
build_files = (
filename for filename in
os.listdir(os.path.join('data', 'builds', job_dir))
if filename[-5:] == '.yaml'
)
for build_file in build_files:
build_slug = build_file[:-5]
build_file_path = os.path.join('data', 'builds', job_dir, build_file)
with open(build_file_path) as handle:
build_dict = yaml.load(handle)
try:
version = build_dict.pop('version')
if version:
build_dict['tag'] = version
with open(build_file_path, 'w') as handle:
yaml.dump(build_dict, handle, default_flow_style=False)
except KeyError:
pass
|
<commit_before><commit_msg>Add migration to rename version -> tag field<commit_after>"""
Migrate version to tag in build models
"""
import os
import shutil
import yaml
job_dirs = (
filename for filename in
os.listdir(os.path.join('data', 'builds'))
if os.path.isdir(os.path.join('data', 'builds', filename))
)
for job_dir in job_dirs:
build_files = (
filename for filename in
os.listdir(os.path.join('data', 'builds', job_dir))
if filename[-5:] == '.yaml'
)
for build_file in build_files:
build_slug = build_file[:-5]
build_file_path = os.path.join('data', 'builds', job_dir, build_file)
with open(build_file_path) as handle:
build_dict = yaml.load(handle)
try:
version = build_dict.pop('version')
if version:
build_dict['tag'] = version
with open(build_file_path, 'w') as handle:
yaml.dump(build_dict, handle, default_flow_style=False)
except KeyError:
pass
|
|
5eb33cd8ab278be2f8b4e879ef39dfa5ade3c3d7
|
pymatbridge/tests/test_set_variable.py
|
pymatbridge/tests/test_set_variable.py
|
import pymatbridge as pymat
import random as rd
import numpy as np
import numpy.testing as npt
import test_utils as tu
class TestArray:
# Start a Matlab session before any tests
@classmethod
def setup_class(cls):
cls.mlab = tu.connect_to_matlab()
# Tear down the Matlab session after all the tests are done
@classmethod
def teardown_class(cls):
tu.stop_matlab(cls.mlab)
# Pass a 1000*1000 array to Matlab
def test_array_size(self):
array = np.random.random_sample((50,50))
res = self.mlab.run_func("array_size.m",{'val':array})['result']
npt.assert_almost_equal(res, array, decimal=8, err_msg = "test_array_size: error")
def test_array_content(self):
test_array = np.random.random_integers(2, 20, (5, 10))
self.mlab.set_variable('test', test_array)
npt.assert_equal(self.mlab.get_variable('test'), test_array)
test_array = np.asfortranarray(test_array)
self.mlab.set_variable('test', test_array)
npt.assert_equal(self.mlab.get_variable('test'), test_array)
# force non-contiguous
test_array = test_array[::-1]
self.mlab.set_variable('test', test_array)
npt.assert_equal(self.mlab.get_variable('test'), test_array)
def test_object_array(self):
test_array = np.array(['hello', 1])
self.mlab.set_variable('test', test_array)
npt.assert_equal(self.mlab.get_variable('test'), test_array)
def test_others(self):
self.mlab.set_variable('test', np.float(1.5))
npt.assert_equal(self.mlab.get_variable('test'), 1.5)
self.mlab.set_variable('test', 'hello')
npt.assert_equal(self.mlab.get_variable('test'), 'hello')
|
Rename test_array and add more variations
|
Rename test_array and add more variations
|
Python
|
bsd-3-clause
|
jjangsangy/python-matlab-bridge,jjangsangy/python-matlab-bridge,arokem/python-matlab-bridge,arokem/python-matlab-bridge,blink1073/python-matlab-bridge,jjangsangy/python-matlab-bridge,blink1073/python-matlab-bridge,blink1073/python-matlab-bridge,arokem/python-matlab-bridge
|
Rename test_array and add more variations
|
import pymatbridge as pymat
import random as rd
import numpy as np
import numpy.testing as npt
import test_utils as tu
class TestArray:
# Start a Matlab session before any tests
@classmethod
def setup_class(cls):
cls.mlab = tu.connect_to_matlab()
# Tear down the Matlab session after all the tests are done
@classmethod
def teardown_class(cls):
tu.stop_matlab(cls.mlab)
# Pass a 1000*1000 array to Matlab
def test_array_size(self):
array = np.random.random_sample((50,50))
res = self.mlab.run_func("array_size.m",{'val':array})['result']
npt.assert_almost_equal(res, array, decimal=8, err_msg = "test_array_size: error")
def test_array_content(self):
test_array = np.random.random_integers(2, 20, (5, 10))
self.mlab.set_variable('test', test_array)
npt.assert_equal(self.mlab.get_variable('test'), test_array)
test_array = np.asfortranarray(test_array)
self.mlab.set_variable('test', test_array)
npt.assert_equal(self.mlab.get_variable('test'), test_array)
# force non-contiguous
test_array = test_array[::-1]
self.mlab.set_variable('test', test_array)
npt.assert_equal(self.mlab.get_variable('test'), test_array)
def test_object_array(self):
test_array = np.array(['hello', 1])
self.mlab.set_variable('test', test_array)
npt.assert_equal(self.mlab.get_variable('test'), test_array)
def test_others(self):
self.mlab.set_variable('test', np.float(1.5))
npt.assert_equal(self.mlab.get_variable('test'), 1.5)
self.mlab.set_variable('test', 'hello')
npt.assert_equal(self.mlab.get_variable('test'), 'hello')
|
<commit_before><commit_msg>Rename test_array and add more variations<commit_after>
|
import pymatbridge as pymat
import random as rd
import numpy as np
import numpy.testing as npt
import test_utils as tu
class TestArray:
# Start a Matlab session before any tests
@classmethod
def setup_class(cls):
cls.mlab = tu.connect_to_matlab()
# Tear down the Matlab session after all the tests are done
@classmethod
def teardown_class(cls):
tu.stop_matlab(cls.mlab)
# Pass a 1000*1000 array to Matlab
def test_array_size(self):
array = np.random.random_sample((50,50))
res = self.mlab.run_func("array_size.m",{'val':array})['result']
npt.assert_almost_equal(res, array, decimal=8, err_msg = "test_array_size: error")
def test_array_content(self):
test_array = np.random.random_integers(2, 20, (5, 10))
self.mlab.set_variable('test', test_array)
npt.assert_equal(self.mlab.get_variable('test'), test_array)
test_array = np.asfortranarray(test_array)
self.mlab.set_variable('test', test_array)
npt.assert_equal(self.mlab.get_variable('test'), test_array)
# force non-contiguous
test_array = test_array[::-1]
self.mlab.set_variable('test', test_array)
npt.assert_equal(self.mlab.get_variable('test'), test_array)
def test_object_array(self):
test_array = np.array(['hello', 1])
self.mlab.set_variable('test', test_array)
npt.assert_equal(self.mlab.get_variable('test'), test_array)
def test_others(self):
self.mlab.set_variable('test', np.float(1.5))
npt.assert_equal(self.mlab.get_variable('test'), 1.5)
self.mlab.set_variable('test', 'hello')
npt.assert_equal(self.mlab.get_variable('test'), 'hello')
|
Rename test_array and add more variationsimport pymatbridge as pymat
import random as rd
import numpy as np
import numpy.testing as npt
import test_utils as tu
class TestArray:
# Start a Matlab session before any tests
@classmethod
def setup_class(cls):
cls.mlab = tu.connect_to_matlab()
# Tear down the Matlab session after all the tests are done
@classmethod
def teardown_class(cls):
tu.stop_matlab(cls.mlab)
# Pass a 1000*1000 array to Matlab
def test_array_size(self):
array = np.random.random_sample((50,50))
res = self.mlab.run_func("array_size.m",{'val':array})['result']
npt.assert_almost_equal(res, array, decimal=8, err_msg = "test_array_size: error")
def test_array_content(self):
test_array = np.random.random_integers(2, 20, (5, 10))
self.mlab.set_variable('test', test_array)
npt.assert_equal(self.mlab.get_variable('test'), test_array)
test_array = np.asfortranarray(test_array)
self.mlab.set_variable('test', test_array)
npt.assert_equal(self.mlab.get_variable('test'), test_array)
# force non-contiguous
test_array = test_array[::-1]
self.mlab.set_variable('test', test_array)
npt.assert_equal(self.mlab.get_variable('test'), test_array)
def test_object_array(self):
test_array = np.array(['hello', 1])
self.mlab.set_variable('test', test_array)
npt.assert_equal(self.mlab.get_variable('test'), test_array)
def test_others(self):
self.mlab.set_variable('test', np.float(1.5))
npt.assert_equal(self.mlab.get_variable('test'), 1.5)
self.mlab.set_variable('test', 'hello')
npt.assert_equal(self.mlab.get_variable('test'), 'hello')
|
<commit_before><commit_msg>Rename test_array and add more variations<commit_after>import pymatbridge as pymat
import random as rd
import numpy as np
import numpy.testing as npt
import test_utils as tu
class TestArray:
# Start a Matlab session before any tests
@classmethod
def setup_class(cls):
cls.mlab = tu.connect_to_matlab()
# Tear down the Matlab session after all the tests are done
@classmethod
def teardown_class(cls):
tu.stop_matlab(cls.mlab)
# Pass a 1000*1000 array to Matlab
def test_array_size(self):
array = np.random.random_sample((50,50))
res = self.mlab.run_func("array_size.m",{'val':array})['result']
npt.assert_almost_equal(res, array, decimal=8, err_msg = "test_array_size: error")
def test_array_content(self):
test_array = np.random.random_integers(2, 20, (5, 10))
self.mlab.set_variable('test', test_array)
npt.assert_equal(self.mlab.get_variable('test'), test_array)
test_array = np.asfortranarray(test_array)
self.mlab.set_variable('test', test_array)
npt.assert_equal(self.mlab.get_variable('test'), test_array)
# force non-contiguous
test_array = test_array[::-1]
self.mlab.set_variable('test', test_array)
npt.assert_equal(self.mlab.get_variable('test'), test_array)
def test_object_array(self):
test_array = np.array(['hello', 1])
self.mlab.set_variable('test', test_array)
npt.assert_equal(self.mlab.get_variable('test'), test_array)
def test_others(self):
self.mlab.set_variable('test', np.float(1.5))
npt.assert_equal(self.mlab.get_variable('test'), 1.5)
self.mlab.set_variable('test', 'hello')
npt.assert_equal(self.mlab.get_variable('test'), 'hello')
|
|
11b139e7e4fa42d58157b2c872677fe807127a3d
|
tree/108.py
|
tree/108.py
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
#recursive solution
class Solution:
def sortedArrayToBST(self, nums: List[int]) -> TreeNode:
if not nums:
return None
pivot = len(nums) // 2
root = TreeNode(nums[pivot])
root.left = self.sortedArrayToBST(nums[:pivot])
root.right = self.sortedArrayToBST(nums[pivot+1:])
return root
|
Convert Sorted Array to Binary Search Tree
|
Convert Sorted Array to Binary Search Tree
|
Python
|
apache-2.0
|
MingfeiPan/leetcode,MingfeiPan/leetcode,MingfeiPan/leetcode,MingfeiPan/leetcode,MingfeiPan/leetcode
|
Convert Sorted Array to Binary Search Tree
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
#recursive solution
class Solution:
def sortedArrayToBST(self, nums: List[int]) -> TreeNode:
if not nums:
return None
pivot = len(nums) // 2
root = TreeNode(nums[pivot])
root.left = self.sortedArrayToBST(nums[:pivot])
root.right = self.sortedArrayToBST(nums[pivot+1:])
return root
|
<commit_before><commit_msg>Convert Sorted Array to Binary Search Tree<commit_after>
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
#recursive solution
class Solution:
def sortedArrayToBST(self, nums: List[int]) -> TreeNode:
if not nums:
return None
pivot = len(nums) // 2
root = TreeNode(nums[pivot])
root.left = self.sortedArrayToBST(nums[:pivot])
root.right = self.sortedArrayToBST(nums[pivot+1:])
return root
|
Convert Sorted Array to Binary Search Tree# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
#recursive solution
class Solution:
def sortedArrayToBST(self, nums: List[int]) -> TreeNode:
if not nums:
return None
pivot = len(nums) // 2
root = TreeNode(nums[pivot])
root.left = self.sortedArrayToBST(nums[:pivot])
root.right = self.sortedArrayToBST(nums[pivot+1:])
return root
|
<commit_before><commit_msg>Convert Sorted Array to Binary Search Tree<commit_after># Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
#recursive solution
class Solution:
def sortedArrayToBST(self, nums: List[int]) -> TreeNode:
if not nums:
return None
pivot = len(nums) // 2
root = TreeNode(nums[pivot])
root.left = self.sortedArrayToBST(nums[:pivot])
root.right = self.sortedArrayToBST(nums[pivot+1:])
return root
|
|
c5aa8f0c6b631ee7f4d0beefb20c8537ecbcf011
|
tests/test_c_solutions.py
|
tests/test_c_solutions.py
|
import glob
import json
import os
import pytest
from helpers import solutions_dir
# NOTE: If we make solution_files a fixture instead of a normal attr/function,
# then we can't use it in pytest's parametrize
solution_files = glob.glob(os.path.join(solutions_dir("c"), "*.c"))
@pytest.mark.c
def test_c_solutions_exist():
assert solution_files
def id_func(param):
problem_name, ext = os.path.splitext(os.path.basename(param))
return problem_name
@pytest.mark.c
@pytest.mark.parametrize("solution_file", solution_files, ids=id_func)
def test_submit_file(solution_file, submit_solution):
result = submit_solution(solution_file)
assert result.get("success") is True, "Failed. Engine output:\n{:}".format(
json.dumps(result, indent=4)
)
|
Add separate test file for c
|
Add separate test file for c
|
Python
|
mit
|
project-lovelace/lovelace-engine,project-lovelace/lovelace-engine,project-lovelace/lovelace-engine
|
Add separate test file for c
|
import glob
import json
import os
import pytest
from helpers import solutions_dir
# NOTE: If we make solution_files a fixture instead of a normal attr/function,
# then we can't use it in pytest's parametrize
solution_files = glob.glob(os.path.join(solutions_dir("c"), "*.c"))
@pytest.mark.c
def test_c_solutions_exist():
assert solution_files
def id_func(param):
problem_name, ext = os.path.splitext(os.path.basename(param))
return problem_name
@pytest.mark.c
@pytest.mark.parametrize("solution_file", solution_files, ids=id_func)
def test_submit_file(solution_file, submit_solution):
result = submit_solution(solution_file)
assert result.get("success") is True, "Failed. Engine output:\n{:}".format(
json.dumps(result, indent=4)
)
|
<commit_before><commit_msg>Add separate test file for c<commit_after>
|
import glob
import json
import os
import pytest
from helpers import solutions_dir
# NOTE: If we make solution_files a fixture instead of a normal attr/function,
# then we can't use it in pytest's parametrize
solution_files = glob.glob(os.path.join(solutions_dir("c"), "*.c"))
@pytest.mark.c
def test_c_solutions_exist():
assert solution_files
def id_func(param):
problem_name, ext = os.path.splitext(os.path.basename(param))
return problem_name
@pytest.mark.c
@pytest.mark.parametrize("solution_file", solution_files, ids=id_func)
def test_submit_file(solution_file, submit_solution):
result = submit_solution(solution_file)
assert result.get("success") is True, "Failed. Engine output:\n{:}".format(
json.dumps(result, indent=4)
)
|
Add separate test file for cimport glob
import json
import os
import pytest
from helpers import solutions_dir
# NOTE: If we make solution_files a fixture instead of a normal attr/function,
# then we can't use it in pytest's parametrize
solution_files = glob.glob(os.path.join(solutions_dir("c"), "*.c"))
@pytest.mark.c
def test_c_solutions_exist():
assert solution_files
def id_func(param):
problem_name, ext = os.path.splitext(os.path.basename(param))
return problem_name
@pytest.mark.c
@pytest.mark.parametrize("solution_file", solution_files, ids=id_func)
def test_submit_file(solution_file, submit_solution):
result = submit_solution(solution_file)
assert result.get("success") is True, "Failed. Engine output:\n{:}".format(
json.dumps(result, indent=4)
)
|
<commit_before><commit_msg>Add separate test file for c<commit_after>import glob
import json
import os
import pytest
from helpers import solutions_dir
# NOTE: If we make solution_files a fixture instead of a normal attr/function,
# then we can't use it in pytest's parametrize
solution_files = glob.glob(os.path.join(solutions_dir("c"), "*.c"))
@pytest.mark.c
def test_c_solutions_exist():
assert solution_files
def id_func(param):
problem_name, ext = os.path.splitext(os.path.basename(param))
return problem_name
@pytest.mark.c
@pytest.mark.parametrize("solution_file", solution_files, ids=id_func)
def test_submit_file(solution_file, submit_solution):
result = submit_solution(solution_file)
assert result.get("success") is True, "Failed. Engine output:\n{:}".format(
json.dumps(result, indent=4)
)
|
|
58d9ac5372d6d013750e570a37f6a425dfc244e1
|
tests/test_mau_a_vs_an.py
|
tests/test_mau_a_vs_an.py
|
from check import Check
from proselint.checks import mau_a_vs_an as chk
class TestCheck(Check):
__test__ = True
@property
def this_check(self):
return chk
def test_clean(self):
assert self.check(
"""An apple a day keeps the doctor away."""
)
def test_a_apple(self):
assert not self.check(
"""A apple a day keeps the doctor away."""
)
def test_an_day(self):
assert not self.check(
"""An apple an day keeps the doctor away."""
)
def test_linebreak(self):
assert not self.check(
"""An apple an\nday keeps the doctor away."""
)
def test_mid_word(self):
assert self.check(
"""The Epicurean garden."""
)
|
Add tests for 'a' vs. 'an' rule
|
Add tests for 'a' vs. 'an' rule
Closes #31.
|
Python
|
bsd-3-clause
|
amperser/proselint,amperser/proselint,jstewmon/proselint,amperser/proselint,amperser/proselint,jstewmon/proselint,amperser/proselint,jstewmon/proselint
|
Add tests for 'a' vs. 'an' rule
Closes #31.
|
from check import Check
from proselint.checks import mau_a_vs_an as chk
class TestCheck(Check):
__test__ = True
@property
def this_check(self):
return chk
def test_clean(self):
assert self.check(
"""An apple a day keeps the doctor away."""
)
def test_a_apple(self):
assert not self.check(
"""A apple a day keeps the doctor away."""
)
def test_an_day(self):
assert not self.check(
"""An apple an day keeps the doctor away."""
)
def test_linebreak(self):
assert not self.check(
"""An apple an\nday keeps the doctor away."""
)
def test_mid_word(self):
assert self.check(
"""The Epicurean garden."""
)
|
<commit_before><commit_msg>Add tests for 'a' vs. 'an' rule
Closes #31.<commit_after>
|
from check import Check
from proselint.checks import mau_a_vs_an as chk
class TestCheck(Check):
__test__ = True
@property
def this_check(self):
return chk
def test_clean(self):
assert self.check(
"""An apple a day keeps the doctor away."""
)
def test_a_apple(self):
assert not self.check(
"""A apple a day keeps the doctor away."""
)
def test_an_day(self):
assert not self.check(
"""An apple an day keeps the doctor away."""
)
def test_linebreak(self):
assert not self.check(
"""An apple an\nday keeps the doctor away."""
)
def test_mid_word(self):
assert self.check(
"""The Epicurean garden."""
)
|
Add tests for 'a' vs. 'an' rule
Closes #31.from check import Check
from proselint.checks import mau_a_vs_an as chk
class TestCheck(Check):
__test__ = True
@property
def this_check(self):
return chk
def test_clean(self):
assert self.check(
"""An apple a day keeps the doctor away."""
)
def test_a_apple(self):
assert not self.check(
"""A apple a day keeps the doctor away."""
)
def test_an_day(self):
assert not self.check(
"""An apple an day keeps the doctor away."""
)
def test_linebreak(self):
assert not self.check(
"""An apple an\nday keeps the doctor away."""
)
def test_mid_word(self):
assert self.check(
"""The Epicurean garden."""
)
|
<commit_before><commit_msg>Add tests for 'a' vs. 'an' rule
Closes #31.<commit_after>from check import Check
from proselint.checks import mau_a_vs_an as chk
class TestCheck(Check):
__test__ = True
@property
def this_check(self):
return chk
def test_clean(self):
assert self.check(
"""An apple a day keeps the doctor away."""
)
def test_a_apple(self):
assert not self.check(
"""A apple a day keeps the doctor away."""
)
def test_an_day(self):
assert not self.check(
"""An apple an day keeps the doctor away."""
)
def test_linebreak(self):
assert not self.check(
"""An apple an\nday keeps the doctor away."""
)
def test_mid_word(self):
assert self.check(
"""The Epicurean garden."""
)
|
|
8f91a2e8dea6076cdb148b53f6f938faad693ab1
|
kerrokantasi/migrations/0005_alter_user_first_name.py
|
kerrokantasi/migrations/0005_alter_user_first_name.py
|
# Generated by Django 3.2.13 on 2022-06-22 14:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kerrokantasi', '0004_auto_20200225_1349'),
]
operations = [
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=150, verbose_name='first name'),
),
]
|
Add missing migration for User
|
Add missing migration for User
|
Python
|
mit
|
City-of-Helsinki/kerrokantasi,City-of-Helsinki/kerrokantasi,City-of-Helsinki/kerrokantasi,City-of-Helsinki/kerrokantasi
|
Add missing migration for User
|
# Generated by Django 3.2.13 on 2022-06-22 14:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kerrokantasi', '0004_auto_20200225_1349'),
]
operations = [
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=150, verbose_name='first name'),
),
]
|
<commit_before><commit_msg>Add missing migration for User<commit_after>
|
# Generated by Django 3.2.13 on 2022-06-22 14:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kerrokantasi', '0004_auto_20200225_1349'),
]
operations = [
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=150, verbose_name='first name'),
),
]
|
Add missing migration for User# Generated by Django 3.2.13 on 2022-06-22 14:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kerrokantasi', '0004_auto_20200225_1349'),
]
operations = [
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=150, verbose_name='first name'),
),
]
|
<commit_before><commit_msg>Add missing migration for User<commit_after># Generated by Django 3.2.13 on 2022-06-22 14:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('kerrokantasi', '0004_auto_20200225_1349'),
]
operations = [
migrations.AlterField(
model_name='user',
name='first_name',
field=models.CharField(blank=True, max_length=150, verbose_name='first name'),
),
]
|
|
1644f743996c950264a4be58ba0c58b013102828
|
testing/test_generate_movie.py
|
testing/test_generate_movie.py
|
import sys
import os
sys.path.insert(0, os.path.realpath('.'))
import mimetypes
from create_movie import generate_movie
def test_generate_movie(tmpdir):
images_dir = 'images'
output_filename = tmpdir.join('out.mp4')
generate_movie(images_dir, str(output_filename))
assert (os.path.isfile(str(output_filename)) and
mimetypes.guess_type(str(output_filename)) == ('video/mp4', None))
|
Add test for generating movie
|
Add test for generating movie
|
Python
|
mit
|
NGTS/frame-movies,NGTS/frame-movies
|
Add test for generating movie
|
import sys
import os
sys.path.insert(0, os.path.realpath('.'))
import mimetypes
from create_movie import generate_movie
def test_generate_movie(tmpdir):
images_dir = 'images'
output_filename = tmpdir.join('out.mp4')
generate_movie(images_dir, str(output_filename))
assert (os.path.isfile(str(output_filename)) and
mimetypes.guess_type(str(output_filename)) == ('video/mp4', None))
|
<commit_before><commit_msg>Add test for generating movie<commit_after>
|
import sys
import os
sys.path.insert(0, os.path.realpath('.'))
import mimetypes
from create_movie import generate_movie
def test_generate_movie(tmpdir):
images_dir = 'images'
output_filename = tmpdir.join('out.mp4')
generate_movie(images_dir, str(output_filename))
assert (os.path.isfile(str(output_filename)) and
mimetypes.guess_type(str(output_filename)) == ('video/mp4', None))
|
Add test for generating movieimport sys
import os
sys.path.insert(0, os.path.realpath('.'))
import mimetypes
from create_movie import generate_movie
def test_generate_movie(tmpdir):
images_dir = 'images'
output_filename = tmpdir.join('out.mp4')
generate_movie(images_dir, str(output_filename))
assert (os.path.isfile(str(output_filename)) and
mimetypes.guess_type(str(output_filename)) == ('video/mp4', None))
|
<commit_before><commit_msg>Add test for generating movie<commit_after>import sys
import os
sys.path.insert(0, os.path.realpath('.'))
import mimetypes
from create_movie import generate_movie
def test_generate_movie(tmpdir):
images_dir = 'images'
output_filename = tmpdir.join('out.mp4')
generate_movie(images_dir, str(output_filename))
assert (os.path.isfile(str(output_filename)) and
mimetypes.guess_type(str(output_filename)) == ('video/mp4', None))
|
|
e7f75db6e2b1be77ce0382737a4679c15cac7167
|
website/tests/helpers/test_bioinf.py
|
website/tests/helpers/test_bioinf.py
|
import pytest
import helpers.bioinf as bioinf
def test_complement():
test_sequences = (
('ACTCGGTAA', 'TGAGCCATT'),
('TGAGCCATT', 'ACTCGGTAA'),
('TTAAGGCC', 'AATTCCGG'),
)
for sequence, complement in test_sequences:
assert bioinf.complement(sequence) == complement
def test_decode_mutation():
test_mutations = (
('c.G130A', ('G', 130, 'A')),
('p.V44I', ('V', 44, 'I')),
('c.C617T', ('C', 617, 'T')),
('p.S206L', ('S', 206, 'L')),
)
for mutation_string, result in test_mutations:
assert bioinf.decode_mutation(mutation_string) == result
def test_decode_mutation_wrong():
incorrect_mutations = ('p-F10A', 'pF10A')
for mutation in incorrect_mutations:
with pytest.raises(AssertionError):
bioinf.decode_mutation(mutation)
|
Add few tests for bioinf helpers
|
Add few tests for bioinf helpers
|
Python
|
lgpl-2.1
|
reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualistion-Framework-for-Genome-Mutations
|
Add few tests for bioinf helpers
|
import pytest
import helpers.bioinf as bioinf
def test_complement():
test_sequences = (
('ACTCGGTAA', 'TGAGCCATT'),
('TGAGCCATT', 'ACTCGGTAA'),
('TTAAGGCC', 'AATTCCGG'),
)
for sequence, complement in test_sequences:
assert bioinf.complement(sequence) == complement
def test_decode_mutation():
test_mutations = (
('c.G130A', ('G', 130, 'A')),
('p.V44I', ('V', 44, 'I')),
('c.C617T', ('C', 617, 'T')),
('p.S206L', ('S', 206, 'L')),
)
for mutation_string, result in test_mutations:
assert bioinf.decode_mutation(mutation_string) == result
def test_decode_mutation_wrong():
incorrect_mutations = ('p-F10A', 'pF10A')
for mutation in incorrect_mutations:
with pytest.raises(AssertionError):
bioinf.decode_mutation(mutation)
|
<commit_before><commit_msg>Add few tests for bioinf helpers<commit_after>
|
import pytest
import helpers.bioinf as bioinf
def test_complement():
test_sequences = (
('ACTCGGTAA', 'TGAGCCATT'),
('TGAGCCATT', 'ACTCGGTAA'),
('TTAAGGCC', 'AATTCCGG'),
)
for sequence, complement in test_sequences:
assert bioinf.complement(sequence) == complement
def test_decode_mutation():
test_mutations = (
('c.G130A', ('G', 130, 'A')),
('p.V44I', ('V', 44, 'I')),
('c.C617T', ('C', 617, 'T')),
('p.S206L', ('S', 206, 'L')),
)
for mutation_string, result in test_mutations:
assert bioinf.decode_mutation(mutation_string) == result
def test_decode_mutation_wrong():
incorrect_mutations = ('p-F10A', 'pF10A')
for mutation in incorrect_mutations:
with pytest.raises(AssertionError):
bioinf.decode_mutation(mutation)
|
Add few tests for bioinf helpersimport pytest
import helpers.bioinf as bioinf
def test_complement():
test_sequences = (
('ACTCGGTAA', 'TGAGCCATT'),
('TGAGCCATT', 'ACTCGGTAA'),
('TTAAGGCC', 'AATTCCGG'),
)
for sequence, complement in test_sequences:
assert bioinf.complement(sequence) == complement
def test_decode_mutation():
test_mutations = (
('c.G130A', ('G', 130, 'A')),
('p.V44I', ('V', 44, 'I')),
('c.C617T', ('C', 617, 'T')),
('p.S206L', ('S', 206, 'L')),
)
for mutation_string, result in test_mutations:
assert bioinf.decode_mutation(mutation_string) == result
def test_decode_mutation_wrong():
incorrect_mutations = ('p-F10A', 'pF10A')
for mutation in incorrect_mutations:
with pytest.raises(AssertionError):
bioinf.decode_mutation(mutation)
|
<commit_before><commit_msg>Add few tests for bioinf helpers<commit_after>import pytest
import helpers.bioinf as bioinf
def test_complement():
test_sequences = (
('ACTCGGTAA', 'TGAGCCATT'),
('TGAGCCATT', 'ACTCGGTAA'),
('TTAAGGCC', 'AATTCCGG'),
)
for sequence, complement in test_sequences:
assert bioinf.complement(sequence) == complement
def test_decode_mutation():
test_mutations = (
('c.G130A', ('G', 130, 'A')),
('p.V44I', ('V', 44, 'I')),
('c.C617T', ('C', 617, 'T')),
('p.S206L', ('S', 206, 'L')),
)
for mutation_string, result in test_mutations:
assert bioinf.decode_mutation(mutation_string) == result
def test_decode_mutation_wrong():
incorrect_mutations = ('p-F10A', 'pF10A')
for mutation in incorrect_mutations:
with pytest.raises(AssertionError):
bioinf.decode_mutation(mutation)
|
|
f381214b4d05fb0c809888ea6362a4125ae3b779
|
test/Configure/VariantDir2.py
|
test/Configure/VariantDir2.py
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that Configure contexts work with SConstruct/SConscript structure
"""
import os
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """\
SConscript('SConscript', build_dir='build', src='.')
""")
test.write('SConscript', """\
env = Environment()
config = env.Configure(conf_dir='sconf', log_file='config.log')
config.TryRun("int main() {}", ".c")
config.Finish()
""")
test.run()
test.pass_test()
|
Add test case for configure failure.
|
Add test case for configure failure.
TryRun fails to find the executable when VariantDir is set up from
SConscript/SConstruct.
git-svn-id: 7892167f69f80ee5d3024affce49f20c74bcb41d@4363 fdb21ef1-2011-0410-befe-b5e4ea1792b1
|
Python
|
mit
|
azverkan/scons,azverkan/scons,azverkan/scons,azverkan/scons,azverkan/scons
|
Add test case for configure failure.
TryRun fails to find the executable when VariantDir is set up from
SConscript/SConstruct.
git-svn-id: 7892167f69f80ee5d3024affce49f20c74bcb41d@4363 fdb21ef1-2011-0410-befe-b5e4ea1792b1
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that Configure contexts work with SConstruct/SConscript structure
"""
import os
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """\
SConscript('SConscript', build_dir='build', src='.')
""")
test.write('SConscript', """\
env = Environment()
config = env.Configure(conf_dir='sconf', log_file='config.log')
config.TryRun("int main() {}", ".c")
config.Finish()
""")
test.run()
test.pass_test()
|
<commit_before><commit_msg>Add test case for configure failure.
TryRun fails to find the executable when VariantDir is set up from
SConscript/SConstruct.
git-svn-id: 7892167f69f80ee5d3024affce49f20c74bcb41d@4363 fdb21ef1-2011-0410-befe-b5e4ea1792b1<commit_after>
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that Configure contexts work with SConstruct/SConscript structure
"""
import os
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """\
SConscript('SConscript', build_dir='build', src='.')
""")
test.write('SConscript', """\
env = Environment()
config = env.Configure(conf_dir='sconf', log_file='config.log')
config.TryRun("int main() {}", ".c")
config.Finish()
""")
test.run()
test.pass_test()
|
Add test case for configure failure.
TryRun fails to find the executable when VariantDir is set up from
SConscript/SConstruct.
git-svn-id: 7892167f69f80ee5d3024affce49f20c74bcb41d@4363 fdb21ef1-2011-0410-befe-b5e4ea1792b1#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that Configure contexts work with SConstruct/SConscript structure
"""
import os
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """\
SConscript('SConscript', build_dir='build', src='.')
""")
test.write('SConscript', """\
env = Environment()
config = env.Configure(conf_dir='sconf', log_file='config.log')
config.TryRun("int main() {}", ".c")
config.Finish()
""")
test.run()
test.pass_test()
|
<commit_before><commit_msg>Add test case for configure failure.
TryRun fails to find the executable when VariantDir is set up from
SConscript/SConstruct.
git-svn-id: 7892167f69f80ee5d3024affce49f20c74bcb41d@4363 fdb21ef1-2011-0410-befe-b5e4ea1792b1<commit_after>#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Verify that Configure contexts work with SConstruct/SConscript structure
"""
import os
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', """\
SConscript('SConscript', build_dir='build', src='.')
""")
test.write('SConscript', """\
env = Environment()
config = env.Configure(conf_dir='sconf', log_file='config.log')
config.TryRun("int main() {}", ".c")
config.Finish()
""")
test.run()
test.pass_test()
|
|
0d3e94a4043347d671e7d898e382b11b1a5e1c23
|
qutip/tests/test_heom.py
|
qutip/tests/test_heom.py
|
"""
Tests for qutip.nonmarkov.heom.
"""
from qutip.nonmarkov.heom import (
BathExponent,
Bath,
BosonicBath,
DrudeLorentzBath,
DrudeLorentzPadeBath,
FermionicBath,
HEOMSolver,
BosonicHEOMSolver,
FermionicHEOMSolver,
HSolverDL,
)
class TestBathAPI:
def test_api(self):
# just assert that the baths are importable
assert BathExponent
assert Bath
assert BosonicBath
assert DrudeLorentzBath
assert DrudeLorentzPadeBath
assert FermionicBath
class TestSolverAPI:
def test_api(self):
# just assert that the baths are importable
assert HEOMSolver
assert BosonicHEOMSolver
assert FermionicHEOMSolver
assert HSolverDL
|
Add tests for the heom module API.
|
Add tests for the heom module API.
|
Python
|
bsd-3-clause
|
qutip/qutip,qutip/qutip,cgranade/qutip,cgranade/qutip
|
Add tests for the heom module API.
|
"""
Tests for qutip.nonmarkov.heom.
"""
from qutip.nonmarkov.heom import (
BathExponent,
Bath,
BosonicBath,
DrudeLorentzBath,
DrudeLorentzPadeBath,
FermionicBath,
HEOMSolver,
BosonicHEOMSolver,
FermionicHEOMSolver,
HSolverDL,
)
class TestBathAPI:
def test_api(self):
# just assert that the baths are importable
assert BathExponent
assert Bath
assert BosonicBath
assert DrudeLorentzBath
assert DrudeLorentzPadeBath
assert FermionicBath
class TestSolverAPI:
def test_api(self):
# just assert that the baths are importable
assert HEOMSolver
assert BosonicHEOMSolver
assert FermionicHEOMSolver
assert HSolverDL
|
<commit_before><commit_msg>Add tests for the heom module API.<commit_after>
|
"""
Tests for qutip.nonmarkov.heom.
"""
from qutip.nonmarkov.heom import (
BathExponent,
Bath,
BosonicBath,
DrudeLorentzBath,
DrudeLorentzPadeBath,
FermionicBath,
HEOMSolver,
BosonicHEOMSolver,
FermionicHEOMSolver,
HSolverDL,
)
class TestBathAPI:
def test_api(self):
# just assert that the baths are importable
assert BathExponent
assert Bath
assert BosonicBath
assert DrudeLorentzBath
assert DrudeLorentzPadeBath
assert FermionicBath
class TestSolverAPI:
def test_api(self):
# just assert that the baths are importable
assert HEOMSolver
assert BosonicHEOMSolver
assert FermionicHEOMSolver
assert HSolverDL
|
Add tests for the heom module API."""
Tests for qutip.nonmarkov.heom.
"""
from qutip.nonmarkov.heom import (
BathExponent,
Bath,
BosonicBath,
DrudeLorentzBath,
DrudeLorentzPadeBath,
FermionicBath,
HEOMSolver,
BosonicHEOMSolver,
FermionicHEOMSolver,
HSolverDL,
)
class TestBathAPI:
def test_api(self):
# just assert that the baths are importable
assert BathExponent
assert Bath
assert BosonicBath
assert DrudeLorentzBath
assert DrudeLorentzPadeBath
assert FermionicBath
class TestSolverAPI:
def test_api(self):
# just assert that the baths are importable
assert HEOMSolver
assert BosonicHEOMSolver
assert FermionicHEOMSolver
assert HSolverDL
|
<commit_before><commit_msg>Add tests for the heom module API.<commit_after>"""
Tests for qutip.nonmarkov.heom.
"""
from qutip.nonmarkov.heom import (
BathExponent,
Bath,
BosonicBath,
DrudeLorentzBath,
DrudeLorentzPadeBath,
FermionicBath,
HEOMSolver,
BosonicHEOMSolver,
FermionicHEOMSolver,
HSolverDL,
)
class TestBathAPI:
def test_api(self):
# just assert that the baths are importable
assert BathExponent
assert Bath
assert BosonicBath
assert DrudeLorentzBath
assert DrudeLorentzPadeBath
assert FermionicBath
class TestSolverAPI:
def test_api(self):
# just assert that the baths are importable
assert HEOMSolver
assert BosonicHEOMSolver
assert FermionicHEOMSolver
assert HSolverDL
|
|
8a2b6649481e204458b8a3895330e059a3d631c5
|
tests/unit/test_gitops.py
|
tests/unit/test_gitops.py
|
"""Tests for the GitOps deployment strategy."""
from . import pytest_generate_tests # noqa, pylint: disable=unused-import
# pylint: disable=too-few-public-methods
class TestGitops:
"""
Tests for verifying generated projects deployed using GitOps.
"""
scenarios = [
('Pipeline', {
'deployment_strategy': 'pipeline',
'project_slug': 'monolith',
'framework': 'SpringBoot',
'files_present': [
'monolith/.git/config',
'monolith/.gitignore',
'monolith/deployment/application/base/kustomization.yaml',
'monolith/deployment/database/base/kustomization.yaml',
'monolith/deployment/webserver/',
'monolith/docker-compose.yml',
'monolith/Dockerfile',
'monolith/README.rst',
],
'files_absent': [
'monolith/gitops/',
'monolith-gitops/',
],
}),
('GitOps', {
'deployment_strategy': 'gitops',
'project_slug': 'microsrvc',
'framework': 'SpringBoot',
'files_present': [
'microsrvc/.git/config',
'microsrvc/.gitignore',
'microsrvc/.dockerignore',
'microsrvc/README.rst',
'microsrvc/docker-compose.yml',
'microsrvc/Dockerfile',
'microsrvc-gitops/.git/config',
'microsrvc-gitops/.gitignore',
'microsrvc-gitops/deployment/application/base/kustomization.yaml', # noqa
'microsrvc-gitops/deployment/database/base/kustomization.yaml',
'microsrvc-gitops/deployment/webserver/',
'microsrvc-gitops/README.rst',
],
'files_absent': [
'microsrvc/deployment/',
'microsrvc/gitops/',
'microsrvc-gitops/docker-compose.yml',
'microsrvc-gitops/Dockerfile',
],
}),
]
# pylint: disable=too-many-arguments,no-self-use
def test_gitops(self, cookies, deployment_strategy, project_slug,
framework, files_present, files_absent):
"""
Generate a project with a specific deployment strategy and verify
it is complete and working.
"""
result = cookies.bake(extra_context={
'deployment_strategy': deployment_strategy,
'project_slug': project_slug,
'framework': framework,
})
assert result.exit_code == 0
assert result.exception is None
for filename in files_present:
thefile = result.project.join('..').join(filename)
assert thefile.exists(), \
'File %s missing in generated project.' % filename
for filename in files_absent:
thefile = result.project.join('..').join(filename)
assert not thefile.exists(), \
'File %s found in generated project.' % filename
|
Add tests for GitOps deployment strategy
|
Add tests for GitOps deployment strategy
|
Python
|
apache-2.0
|
painless-software/painless-continuous-delivery,painless-software/painless-continuous-delivery,painless-software/painless-continuous-delivery,painless-software/painless-continuous-delivery
|
Add tests for GitOps deployment strategy
|
"""Tests for the GitOps deployment strategy."""
from . import pytest_generate_tests # noqa, pylint: disable=unused-import
# pylint: disable=too-few-public-methods
class TestGitops:
"""
Tests for verifying generated projects deployed using GitOps.
"""
scenarios = [
('Pipeline', {
'deployment_strategy': 'pipeline',
'project_slug': 'monolith',
'framework': 'SpringBoot',
'files_present': [
'monolith/.git/config',
'monolith/.gitignore',
'monolith/deployment/application/base/kustomization.yaml',
'monolith/deployment/database/base/kustomization.yaml',
'monolith/deployment/webserver/',
'monolith/docker-compose.yml',
'monolith/Dockerfile',
'monolith/README.rst',
],
'files_absent': [
'monolith/gitops/',
'monolith-gitops/',
],
}),
('GitOps', {
'deployment_strategy': 'gitops',
'project_slug': 'microsrvc',
'framework': 'SpringBoot',
'files_present': [
'microsrvc/.git/config',
'microsrvc/.gitignore',
'microsrvc/.dockerignore',
'microsrvc/README.rst',
'microsrvc/docker-compose.yml',
'microsrvc/Dockerfile',
'microsrvc-gitops/.git/config',
'microsrvc-gitops/.gitignore',
'microsrvc-gitops/deployment/application/base/kustomization.yaml', # noqa
'microsrvc-gitops/deployment/database/base/kustomization.yaml',
'microsrvc-gitops/deployment/webserver/',
'microsrvc-gitops/README.rst',
],
'files_absent': [
'microsrvc/deployment/',
'microsrvc/gitops/',
'microsrvc-gitops/docker-compose.yml',
'microsrvc-gitops/Dockerfile',
],
}),
]
# pylint: disable=too-many-arguments,no-self-use
def test_gitops(self, cookies, deployment_strategy, project_slug,
framework, files_present, files_absent):
"""
Generate a project with a specific deployment strategy and verify
it is complete and working.
"""
result = cookies.bake(extra_context={
'deployment_strategy': deployment_strategy,
'project_slug': project_slug,
'framework': framework,
})
assert result.exit_code == 0
assert result.exception is None
for filename in files_present:
thefile = result.project.join('..').join(filename)
assert thefile.exists(), \
'File %s missing in generated project.' % filename
for filename in files_absent:
thefile = result.project.join('..').join(filename)
assert not thefile.exists(), \
'File %s found in generated project.' % filename
|
<commit_before><commit_msg>Add tests for GitOps deployment strategy<commit_after>
|
"""Tests for the GitOps deployment strategy."""
from . import pytest_generate_tests # noqa, pylint: disable=unused-import
# pylint: disable=too-few-public-methods
class TestGitops:
"""
Tests for verifying generated projects deployed using GitOps.
"""
scenarios = [
('Pipeline', {
'deployment_strategy': 'pipeline',
'project_slug': 'monolith',
'framework': 'SpringBoot',
'files_present': [
'monolith/.git/config',
'monolith/.gitignore',
'monolith/deployment/application/base/kustomization.yaml',
'monolith/deployment/database/base/kustomization.yaml',
'monolith/deployment/webserver/',
'monolith/docker-compose.yml',
'monolith/Dockerfile',
'monolith/README.rst',
],
'files_absent': [
'monolith/gitops/',
'monolith-gitops/',
],
}),
('GitOps', {
'deployment_strategy': 'gitops',
'project_slug': 'microsrvc',
'framework': 'SpringBoot',
'files_present': [
'microsrvc/.git/config',
'microsrvc/.gitignore',
'microsrvc/.dockerignore',
'microsrvc/README.rst',
'microsrvc/docker-compose.yml',
'microsrvc/Dockerfile',
'microsrvc-gitops/.git/config',
'microsrvc-gitops/.gitignore',
'microsrvc-gitops/deployment/application/base/kustomization.yaml', # noqa
'microsrvc-gitops/deployment/database/base/kustomization.yaml',
'microsrvc-gitops/deployment/webserver/',
'microsrvc-gitops/README.rst',
],
'files_absent': [
'microsrvc/deployment/',
'microsrvc/gitops/',
'microsrvc-gitops/docker-compose.yml',
'microsrvc-gitops/Dockerfile',
],
}),
]
# pylint: disable=too-many-arguments,no-self-use
def test_gitops(self, cookies, deployment_strategy, project_slug,
framework, files_present, files_absent):
"""
Generate a project with a specific deployment strategy and verify
it is complete and working.
"""
result = cookies.bake(extra_context={
'deployment_strategy': deployment_strategy,
'project_slug': project_slug,
'framework': framework,
})
assert result.exit_code == 0
assert result.exception is None
for filename in files_present:
thefile = result.project.join('..').join(filename)
assert thefile.exists(), \
'File %s missing in generated project.' % filename
for filename in files_absent:
thefile = result.project.join('..').join(filename)
assert not thefile.exists(), \
'File %s found in generated project.' % filename
|
Add tests for GitOps deployment strategy"""Tests for the GitOps deployment strategy."""
from . import pytest_generate_tests # noqa, pylint: disable=unused-import
# pylint: disable=too-few-public-methods
class TestGitops:
"""
Tests for verifying generated projects deployed using GitOps.
"""
scenarios = [
('Pipeline', {
'deployment_strategy': 'pipeline',
'project_slug': 'monolith',
'framework': 'SpringBoot',
'files_present': [
'monolith/.git/config',
'monolith/.gitignore',
'monolith/deployment/application/base/kustomization.yaml',
'monolith/deployment/database/base/kustomization.yaml',
'monolith/deployment/webserver/',
'monolith/docker-compose.yml',
'monolith/Dockerfile',
'monolith/README.rst',
],
'files_absent': [
'monolith/gitops/',
'monolith-gitops/',
],
}),
('GitOps', {
'deployment_strategy': 'gitops',
'project_slug': 'microsrvc',
'framework': 'SpringBoot',
'files_present': [
'microsrvc/.git/config',
'microsrvc/.gitignore',
'microsrvc/.dockerignore',
'microsrvc/README.rst',
'microsrvc/docker-compose.yml',
'microsrvc/Dockerfile',
'microsrvc-gitops/.git/config',
'microsrvc-gitops/.gitignore',
'microsrvc-gitops/deployment/application/base/kustomization.yaml', # noqa
'microsrvc-gitops/deployment/database/base/kustomization.yaml',
'microsrvc-gitops/deployment/webserver/',
'microsrvc-gitops/README.rst',
],
'files_absent': [
'microsrvc/deployment/',
'microsrvc/gitops/',
'microsrvc-gitops/docker-compose.yml',
'microsrvc-gitops/Dockerfile',
],
}),
]
# pylint: disable=too-many-arguments,no-self-use
def test_gitops(self, cookies, deployment_strategy, project_slug,
framework, files_present, files_absent):
"""
Generate a project with a specific deployment strategy and verify
it is complete and working.
"""
result = cookies.bake(extra_context={
'deployment_strategy': deployment_strategy,
'project_slug': project_slug,
'framework': framework,
})
assert result.exit_code == 0
assert result.exception is None
for filename in files_present:
thefile = result.project.join('..').join(filename)
assert thefile.exists(), \
'File %s missing in generated project.' % filename
for filename in files_absent:
thefile = result.project.join('..').join(filename)
assert not thefile.exists(), \
'File %s found in generated project.' % filename
|
<commit_before><commit_msg>Add tests for GitOps deployment strategy<commit_after>"""Tests for the GitOps deployment strategy."""
from . import pytest_generate_tests # noqa, pylint: disable=unused-import
# pylint: disable=too-few-public-methods
class TestGitops:
"""
Tests for verifying generated projects deployed using GitOps.
"""
scenarios = [
('Pipeline', {
'deployment_strategy': 'pipeline',
'project_slug': 'monolith',
'framework': 'SpringBoot',
'files_present': [
'monolith/.git/config',
'monolith/.gitignore',
'monolith/deployment/application/base/kustomization.yaml',
'monolith/deployment/database/base/kustomization.yaml',
'monolith/deployment/webserver/',
'monolith/docker-compose.yml',
'monolith/Dockerfile',
'monolith/README.rst',
],
'files_absent': [
'monolith/gitops/',
'monolith-gitops/',
],
}),
('GitOps', {
'deployment_strategy': 'gitops',
'project_slug': 'microsrvc',
'framework': 'SpringBoot',
'files_present': [
'microsrvc/.git/config',
'microsrvc/.gitignore',
'microsrvc/.dockerignore',
'microsrvc/README.rst',
'microsrvc/docker-compose.yml',
'microsrvc/Dockerfile',
'microsrvc-gitops/.git/config',
'microsrvc-gitops/.gitignore',
'microsrvc-gitops/deployment/application/base/kustomization.yaml', # noqa
'microsrvc-gitops/deployment/database/base/kustomization.yaml',
'microsrvc-gitops/deployment/webserver/',
'microsrvc-gitops/README.rst',
],
'files_absent': [
'microsrvc/deployment/',
'microsrvc/gitops/',
'microsrvc-gitops/docker-compose.yml',
'microsrvc-gitops/Dockerfile',
],
}),
]
# pylint: disable=too-many-arguments,no-self-use
def test_gitops(self, cookies, deployment_strategy, project_slug,
framework, files_present, files_absent):
"""
Generate a project with a specific deployment strategy and verify
it is complete and working.
"""
result = cookies.bake(extra_context={
'deployment_strategy': deployment_strategy,
'project_slug': project_slug,
'framework': framework,
})
assert result.exit_code == 0
assert result.exception is None
for filename in files_present:
thefile = result.project.join('..').join(filename)
assert thefile.exists(), \
'File %s missing in generated project.' % filename
for filename in files_absent:
thefile = result.project.join('..').join(filename)
assert not thefile.exists(), \
'File %s found in generated project.' % filename
|
|
465f0156a6e92991ed277cd0434cc65f25ef8af8
|
examples/gto/20-soc_ao_integrals.py
|
examples/gto/20-soc_ao_integrals.py
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import numpy
from pyscf import gto
'''
Integrals for spin-orbit coupling
'''
mol = gto.M(
verbose = 0,
atom = 'C 0 0 0; O 0 0 1.5',
basis = 'ccpvdz'
)
# J Chem Phys, 122, 034107, Eq (2)
mat = 0
for atm_id in range(mol.natm):
mol.set_rinv_orig(mol.coord_of_atm(atm_id))
chg = mol.charge_of_atm(atm_id)
mat += chg * mol.intor('cint1e_prinvxp_sph', 3)
# J Chem Phys, 122, 034107, Eq (3)
mat = mol.intor('cint2e_p1vxp1_sph', comp=3)
# spin-spin dipole-dipole coupling integrals
# Chem Phys 279, 133, Eq (1)
def ss(mol):
n = mol.nao_nr()
mat1 = mol.intor('cint2e_ip1ip2_sph', comp=9).reshape(3,3,n,n,n,n) # <nabla1 nabla2 | 1 2>
mat2 =-mat1.transpose(0,1,2,3,5,4) # <nabla1 2 | 1 nabla2>
mat3 =-mat2.transpose(1,0,3,2,4,5) # <1 nabla2 | nabla1 2>
mat4 = mat1.transpose(0,1,3,2,5,4) # <1 2 | nabla1 nabla2>
mat = mat1 - mat2 - mat3 + mat4
s = numpy.array((((0, 1),
(1, 0)),
((0, -1j),
(1j, 0)),
((1, 0),
(0, -1)))) * .5
# wxyz are the spin indices, ijkl are the AO indicies
mat = numpy.einsum('swx,tyz,stijkl->wxyzijkl', s[:,0,0], s[:,0,0], mat)
return mat
|
Add example for SOC integrals
|
Add example for SOC integrals
|
Python
|
apache-2.0
|
gkc1000/pyscf,sunqm/pyscf,sunqm/pyscf,sunqm/pyscf,gkc1000/pyscf,gkc1000/pyscf,gkc1000/pyscf,gkc1000/pyscf,sunqm/pyscf
|
Add example for SOC integrals
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import numpy
from pyscf import gto
'''
Integrals for spin-orbit coupling
'''
mol = gto.M(
verbose = 0,
atom = 'C 0 0 0; O 0 0 1.5',
basis = 'ccpvdz'
)
# J Chem Phys, 122, 034107, Eq (2)
mat = 0
for atm_id in range(mol.natm):
mol.set_rinv_orig(mol.coord_of_atm(atm_id))
chg = mol.charge_of_atm(atm_id)
mat += chg * mol.intor('cint1e_prinvxp_sph', 3)
# J Chem Phys, 122, 034107, Eq (3)
mat = mol.intor('cint2e_p1vxp1_sph', comp=3)
# spin-spin dipole-dipole coupling integrals
# Chem Phys 279, 133, Eq (1)
def ss(mol):
n = mol.nao_nr()
mat1 = mol.intor('cint2e_ip1ip2_sph', comp=9).reshape(3,3,n,n,n,n) # <nabla1 nabla2 | 1 2>
mat2 =-mat1.transpose(0,1,2,3,5,4) # <nabla1 2 | 1 nabla2>
mat3 =-mat2.transpose(1,0,3,2,4,5) # <1 nabla2 | nabla1 2>
mat4 = mat1.transpose(0,1,3,2,5,4) # <1 2 | nabla1 nabla2>
mat = mat1 - mat2 - mat3 + mat4
s = numpy.array((((0, 1),
(1, 0)),
((0, -1j),
(1j, 0)),
((1, 0),
(0, -1)))) * .5
# wxyz are the spin indices, ijkl are the AO indicies
mat = numpy.einsum('swx,tyz,stijkl->wxyzijkl', s[:,0,0], s[:,0,0], mat)
return mat
|
<commit_before><commit_msg>Add example for SOC integrals<commit_after>
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import numpy
from pyscf import gto
'''
Integrals for spin-orbit coupling
'''
mol = gto.M(
verbose = 0,
atom = 'C 0 0 0; O 0 0 1.5',
basis = 'ccpvdz'
)
# J Chem Phys, 122, 034107, Eq (2)
mat = 0
for atm_id in range(mol.natm):
mol.set_rinv_orig(mol.coord_of_atm(atm_id))
chg = mol.charge_of_atm(atm_id)
mat += chg * mol.intor('cint1e_prinvxp_sph', 3)
# J Chem Phys, 122, 034107, Eq (3)
mat = mol.intor('cint2e_p1vxp1_sph', comp=3)
# spin-spin dipole-dipole coupling integrals
# Chem Phys 279, 133, Eq (1)
def ss(mol):
n = mol.nao_nr()
mat1 = mol.intor('cint2e_ip1ip2_sph', comp=9).reshape(3,3,n,n,n,n) # <nabla1 nabla2 | 1 2>
mat2 =-mat1.transpose(0,1,2,3,5,4) # <nabla1 2 | 1 nabla2>
mat3 =-mat2.transpose(1,0,3,2,4,5) # <1 nabla2 | nabla1 2>
mat4 = mat1.transpose(0,1,3,2,5,4) # <1 2 | nabla1 nabla2>
mat = mat1 - mat2 - mat3 + mat4
s = numpy.array((((0, 1),
(1, 0)),
((0, -1j),
(1j, 0)),
((1, 0),
(0, -1)))) * .5
# wxyz are the spin indices, ijkl are the AO indicies
mat = numpy.einsum('swx,tyz,stijkl->wxyzijkl', s[:,0,0], s[:,0,0], mat)
return mat
|
Add example for SOC integrals#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import numpy
from pyscf import gto
'''
Integrals for spin-orbit coupling
'''
mol = gto.M(
verbose = 0,
atom = 'C 0 0 0; O 0 0 1.5',
basis = 'ccpvdz'
)
# J Chem Phys, 122, 034107, Eq (2)
mat = 0
for atm_id in range(mol.natm):
mol.set_rinv_orig(mol.coord_of_atm(atm_id))
chg = mol.charge_of_atm(atm_id)
mat += chg * mol.intor('cint1e_prinvxp_sph', 3)
# J Chem Phys, 122, 034107, Eq (3)
mat = mol.intor('cint2e_p1vxp1_sph', comp=3)
# spin-spin dipole-dipole coupling integrals
# Chem Phys 279, 133, Eq (1)
def ss(mol):
n = mol.nao_nr()
mat1 = mol.intor('cint2e_ip1ip2_sph', comp=9).reshape(3,3,n,n,n,n) # <nabla1 nabla2 | 1 2>
mat2 =-mat1.transpose(0,1,2,3,5,4) # <nabla1 2 | 1 nabla2>
mat3 =-mat2.transpose(1,0,3,2,4,5) # <1 nabla2 | nabla1 2>
mat4 = mat1.transpose(0,1,3,2,5,4) # <1 2 | nabla1 nabla2>
mat = mat1 - mat2 - mat3 + mat4
s = numpy.array((((0, 1),
(1, 0)),
((0, -1j),
(1j, 0)),
((1, 0),
(0, -1)))) * .5
# wxyz are the spin indices, ijkl are the AO indicies
mat = numpy.einsum('swx,tyz,stijkl->wxyzijkl', s[:,0,0], s[:,0,0], mat)
return mat
|
<commit_before><commit_msg>Add example for SOC integrals<commit_after>#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import numpy
from pyscf import gto
'''
Integrals for spin-orbit coupling
'''
mol = gto.M(
verbose = 0,
atom = 'C 0 0 0; O 0 0 1.5',
basis = 'ccpvdz'
)
# J Chem Phys, 122, 034107, Eq (2)
mat = 0
for atm_id in range(mol.natm):
mol.set_rinv_orig(mol.coord_of_atm(atm_id))
chg = mol.charge_of_atm(atm_id)
mat += chg * mol.intor('cint1e_prinvxp_sph', 3)
# J Chem Phys, 122, 034107, Eq (3)
mat = mol.intor('cint2e_p1vxp1_sph', comp=3)
# spin-spin dipole-dipole coupling integrals
# Chem Phys 279, 133, Eq (1)
def ss(mol):
n = mol.nao_nr()
mat1 = mol.intor('cint2e_ip1ip2_sph', comp=9).reshape(3,3,n,n,n,n) # <nabla1 nabla2 | 1 2>
mat2 =-mat1.transpose(0,1,2,3,5,4) # <nabla1 2 | 1 nabla2>
mat3 =-mat2.transpose(1,0,3,2,4,5) # <1 nabla2 | nabla1 2>
mat4 = mat1.transpose(0,1,3,2,5,4) # <1 2 | nabla1 nabla2>
mat = mat1 - mat2 - mat3 + mat4
s = numpy.array((((0, 1),
(1, 0)),
((0, -1j),
(1j, 0)),
((1, 0),
(0, -1)))) * .5
# wxyz are the spin indices, ijkl are the AO indicies
mat = numpy.einsum('swx,tyz,stijkl->wxyzijkl', s[:,0,0], s[:,0,0], mat)
return mat
|
|
1c5a7141e9f80e275acb3f758ca0657b1390fe24
|
python_scripts/extractor_python_readability_server.py
|
python_scripts/extractor_python_readability_server.py
|
#!/usr/bin/python
import sys
import glob
sys.path.append("python_scripts/gen-py")
sys.path.append("gen-py/thrift_solr/")
from thrift.transport import TSocket
from thrift.server import TServer
#import thrift_solr
import ExtractorService
import readability
import readability
def extract_with_python_readability( raw_content ):
doc = readability.Document( raw_content )
return [ doc.short_title(),
doc.summary() ]
class ExtractorHandler:
def extract_html( self, raw_html ):
return extract_with_python_readability( raw_html )
handler = ExtractorHandler()
processor = ExtractorService.Processor(handler)
listening_socket = TSocket.TServerSocket(port=9090)
server = TServer.TThreadPoolServer(processor, listening_socket)
print ("[Server] Started")
server.serve()
|
Add thrift Python readability server.
|
Add thrift Python readability server.
|
Python
|
agpl-3.0
|
AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,berkmancenter/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,berkmancenter/mediacloud
|
Add thrift Python readability server.
|
#!/usr/bin/python
import sys
import glob
sys.path.append("python_scripts/gen-py")
sys.path.append("gen-py/thrift_solr/")
from thrift.transport import TSocket
from thrift.server import TServer
#import thrift_solr
import ExtractorService
import readability
import readability
def extract_with_python_readability( raw_content ):
doc = readability.Document( raw_content )
return [ doc.short_title(),
doc.summary() ]
class ExtractorHandler:
def extract_html( self, raw_html ):
return extract_with_python_readability( raw_html )
handler = ExtractorHandler()
processor = ExtractorService.Processor(handler)
listening_socket = TSocket.TServerSocket(port=9090)
server = TServer.TThreadPoolServer(processor, listening_socket)
print ("[Server] Started")
server.serve()
|
<commit_before><commit_msg>Add thrift Python readability server.<commit_after>
|
#!/usr/bin/python
import sys
import glob
sys.path.append("python_scripts/gen-py")
sys.path.append("gen-py/thrift_solr/")
from thrift.transport import TSocket
from thrift.server import TServer
#import thrift_solr
import ExtractorService
import readability
import readability
def extract_with_python_readability( raw_content ):
doc = readability.Document( raw_content )
return [ doc.short_title(),
doc.summary() ]
class ExtractorHandler:
def extract_html( self, raw_html ):
return extract_with_python_readability( raw_html )
handler = ExtractorHandler()
processor = ExtractorService.Processor(handler)
listening_socket = TSocket.TServerSocket(port=9090)
server = TServer.TThreadPoolServer(processor, listening_socket)
print ("[Server] Started")
server.serve()
|
Add thrift Python readability server.#!/usr/bin/python
import sys
import glob
sys.path.append("python_scripts/gen-py")
sys.path.append("gen-py/thrift_solr/")
from thrift.transport import TSocket
from thrift.server import TServer
#import thrift_solr
import ExtractorService
import readability
import readability
def extract_with_python_readability( raw_content ):
doc = readability.Document( raw_content )
return [ doc.short_title(),
doc.summary() ]
class ExtractorHandler:
def extract_html( self, raw_html ):
return extract_with_python_readability( raw_html )
handler = ExtractorHandler()
processor = ExtractorService.Processor(handler)
listening_socket = TSocket.TServerSocket(port=9090)
server = TServer.TThreadPoolServer(processor, listening_socket)
print ("[Server] Started")
server.serve()
|
<commit_before><commit_msg>Add thrift Python readability server.<commit_after>#!/usr/bin/python
import sys
import glob
sys.path.append("python_scripts/gen-py")
sys.path.append("gen-py/thrift_solr/")
from thrift.transport import TSocket
from thrift.server import TServer
#import thrift_solr
import ExtractorService
import readability
import readability
def extract_with_python_readability( raw_content ):
doc = readability.Document( raw_content )
return [ doc.short_title(),
doc.summary() ]
class ExtractorHandler:
def extract_html( self, raw_html ):
return extract_with_python_readability( raw_html )
handler = ExtractorHandler()
processor = ExtractorService.Processor(handler)
listening_socket = TSocket.TServerSocket(port=9090)
server = TServer.TThreadPoolServer(processor, listening_socket)
print ("[Server] Started")
server.serve()
|
|
85e0c4d75bf8ccff20eff338242c6e37af1c705c
|
examples/dwaq_map_to_nc.py
|
examples/dwaq_map_to_nc.py
|
#!/usr/bin/env python
"""
Command-line tool to convert a binary map output to netcdf.
"""
from __future__ import print_function
import argparse
import sys,os
import numpy as np
import stompy.model.delft.io as dio
parser = argparse.ArgumentParser(description='Convert D-WAQ binary map output to NetCDF.')
parser.add_argument('map_fn', metavar='somefile.map', type=str,
help='path to map file output')
parser.add_argument('hyd_fn', metavar='other.hyd', type=str,
help='path to hyd file')
parser.add_argument('--totaldepth',default='TotalDepth',
help='output variable to use as total depth. none to disable sigma coordinate')
args = parser.parse_args()
# DBG args=parser.parse_args(['--totaldepth','none',"wy2011.map","com-wy2011.hyd"])
map_fn=args.map_fn
hyd_fn=args.hyd_fn
output_fn=map_fn.replace('.map','.nc')
if os.path.exists(output_fn):
print("Output file '%s' exists. Aborting"%output_fn)
sys.exit(1)
print("Reading map data and grid")
map_ds=dio.read_map(map_fn,hyd_fn)
if args.totaldepth != 'none':
total_depth=args.totaldepth
print("Adding minor metadata")
if total_depth not in map_ds:
print("Fabricating a total-depth variable to allow ugrid-ish output")
map_ds[total_depth]=('time','layer','face'),np.ones( (len(map_ds.time),
len(map_ds.layer),
len(map_ds.face)), '<i1')
dio.map_add_z_coordinate(map_ds,total_depth=total_depth,coord_type='sigma',
layer_dim='layer')
print("Writing to %s"%output_fn)
map_ds.to_netcdf(output_fn)
|
Add CLI for converting dwaq binary map to nc
|
Add CLI for converting dwaq binary map to nc
|
Python
|
mit
|
rustychris/stompy,rustychris/stompy
|
Add CLI for converting dwaq binary map to nc
|
#!/usr/bin/env python
"""
Command-line tool to convert a binary map output to netcdf.
"""
from __future__ import print_function
import argparse
import sys,os
import numpy as np
import stompy.model.delft.io as dio
parser = argparse.ArgumentParser(description='Convert D-WAQ binary map output to NetCDF.')
parser.add_argument('map_fn', metavar='somefile.map', type=str,
help='path to map file output')
parser.add_argument('hyd_fn', metavar='other.hyd', type=str,
help='path to hyd file')
parser.add_argument('--totaldepth',default='TotalDepth',
help='output variable to use as total depth. none to disable sigma coordinate')
args = parser.parse_args()
# DBG args=parser.parse_args(['--totaldepth','none',"wy2011.map","com-wy2011.hyd"])
map_fn=args.map_fn
hyd_fn=args.hyd_fn
output_fn=map_fn.replace('.map','.nc')
if os.path.exists(output_fn):
print("Output file '%s' exists. Aborting"%output_fn)
sys.exit(1)
print("Reading map data and grid")
map_ds=dio.read_map(map_fn,hyd_fn)
if args.totaldepth != 'none':
total_depth=args.totaldepth
print("Adding minor metadata")
if total_depth not in map_ds:
print("Fabricating a total-depth variable to allow ugrid-ish output")
map_ds[total_depth]=('time','layer','face'),np.ones( (len(map_ds.time),
len(map_ds.layer),
len(map_ds.face)), '<i1')
dio.map_add_z_coordinate(map_ds,total_depth=total_depth,coord_type='sigma',
layer_dim='layer')
print("Writing to %s"%output_fn)
map_ds.to_netcdf(output_fn)
|
<commit_before><commit_msg>Add CLI for converting dwaq binary map to nc<commit_after>
|
#!/usr/bin/env python
"""
Command-line tool to convert a binary map output to netcdf.
"""
from __future__ import print_function
import argparse
import sys,os
import numpy as np
import stompy.model.delft.io as dio
parser = argparse.ArgumentParser(description='Convert D-WAQ binary map output to NetCDF.')
parser.add_argument('map_fn', metavar='somefile.map', type=str,
help='path to map file output')
parser.add_argument('hyd_fn', metavar='other.hyd', type=str,
help='path to hyd file')
parser.add_argument('--totaldepth',default='TotalDepth',
help='output variable to use as total depth. none to disable sigma coordinate')
args = parser.parse_args()
# DBG args=parser.parse_args(['--totaldepth','none',"wy2011.map","com-wy2011.hyd"])
map_fn=args.map_fn
hyd_fn=args.hyd_fn
output_fn=map_fn.replace('.map','.nc')
if os.path.exists(output_fn):
print("Output file '%s' exists. Aborting"%output_fn)
sys.exit(1)
print("Reading map data and grid")
map_ds=dio.read_map(map_fn,hyd_fn)
if args.totaldepth != 'none':
total_depth=args.totaldepth
print("Adding minor metadata")
if total_depth not in map_ds:
print("Fabricating a total-depth variable to allow ugrid-ish output")
map_ds[total_depth]=('time','layer','face'),np.ones( (len(map_ds.time),
len(map_ds.layer),
len(map_ds.face)), '<i1')
dio.map_add_z_coordinate(map_ds,total_depth=total_depth,coord_type='sigma',
layer_dim='layer')
print("Writing to %s"%output_fn)
map_ds.to_netcdf(output_fn)
|
Add CLI for converting dwaq binary map to nc#!/usr/bin/env python
"""
Command-line tool to convert a binary map output to netcdf.
"""
from __future__ import print_function
import argparse
import sys,os
import numpy as np
import stompy.model.delft.io as dio
parser = argparse.ArgumentParser(description='Convert D-WAQ binary map output to NetCDF.')
parser.add_argument('map_fn', metavar='somefile.map', type=str,
help='path to map file output')
parser.add_argument('hyd_fn', metavar='other.hyd', type=str,
help='path to hyd file')
parser.add_argument('--totaldepth',default='TotalDepth',
help='output variable to use as total depth. none to disable sigma coordinate')
args = parser.parse_args()
# DBG args=parser.parse_args(['--totaldepth','none',"wy2011.map","com-wy2011.hyd"])
map_fn=args.map_fn
hyd_fn=args.hyd_fn
output_fn=map_fn.replace('.map','.nc')
if os.path.exists(output_fn):
print("Output file '%s' exists. Aborting"%output_fn)
sys.exit(1)
print("Reading map data and grid")
map_ds=dio.read_map(map_fn,hyd_fn)
if args.totaldepth != 'none':
total_depth=args.totaldepth
print("Adding minor metadata")
if total_depth not in map_ds:
print("Fabricating a total-depth variable to allow ugrid-ish output")
map_ds[total_depth]=('time','layer','face'),np.ones( (len(map_ds.time),
len(map_ds.layer),
len(map_ds.face)), '<i1')
dio.map_add_z_coordinate(map_ds,total_depth=total_depth,coord_type='sigma',
layer_dim='layer')
print("Writing to %s"%output_fn)
map_ds.to_netcdf(output_fn)
|
<commit_before><commit_msg>Add CLI for converting dwaq binary map to nc<commit_after>#!/usr/bin/env python
"""
Command-line tool to convert a binary map output to netcdf.
"""
from __future__ import print_function
import argparse
import sys,os
import numpy as np
import stompy.model.delft.io as dio
parser = argparse.ArgumentParser(description='Convert D-WAQ binary map output to NetCDF.')
parser.add_argument('map_fn', metavar='somefile.map', type=str,
help='path to map file output')
parser.add_argument('hyd_fn', metavar='other.hyd', type=str,
help='path to hyd file')
parser.add_argument('--totaldepth',default='TotalDepth',
help='output variable to use as total depth. none to disable sigma coordinate')
args = parser.parse_args()
# DBG args=parser.parse_args(['--totaldepth','none',"wy2011.map","com-wy2011.hyd"])
map_fn=args.map_fn
hyd_fn=args.hyd_fn
output_fn=map_fn.replace('.map','.nc')
if os.path.exists(output_fn):
print("Output file '%s' exists. Aborting"%output_fn)
sys.exit(1)
print("Reading map data and grid")
map_ds=dio.read_map(map_fn,hyd_fn)
if args.totaldepth != 'none':
total_depth=args.totaldepth
print("Adding minor metadata")
if total_depth not in map_ds:
print("Fabricating a total-depth variable to allow ugrid-ish output")
map_ds[total_depth]=('time','layer','face'),np.ones( (len(map_ds.time),
len(map_ds.layer),
len(map_ds.face)), '<i1')
dio.map_add_z_coordinate(map_ds,total_depth=total_depth,coord_type='sigma',
layer_dim='layer')
print("Writing to %s"%output_fn)
map_ds.to_netcdf(output_fn)
|
|
cf978fed09339dc31a665fc508516961c52565f2
|
examples/modelsummaries.py
|
examples/modelsummaries.py
|
"""
This example:
1. Connects to the current model
2. Starts an AllWatcher
3. Prints all changes received from the AllWatcher
4. Runs forever (kill with Ctrl-C)
"""
import asyncio
import logging
from juju import loop
from juju.controller import Controller
from juju.model import Model
async def watch():
controller = Controller()
# connect to current
# controller with current user, per Juju CLI
await controller.connect()
# Need to call the WatchModelSummaries or WatchAllModelSummaries on the
# controller.
def callback(summary):
print("-- change --\n{}\n".format(summary))
event = await controller.watch_model_summaries(callback)
while True:
await asyncio.sleep(1)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
ws_logger = logging.getLogger('websockets.protocol')
ws_logger.setLevel(logging.INFO)
logging.getLogger('juju.client.connection').setLevel(logging.INFO)
# Run loop until the process is manually stopped (watch will loop
# forever).
loop.run(watch())
|
Add the model summary example.
|
Add the model summary example.
|
Python
|
apache-2.0
|
juju/python-libjuju,juju/python-libjuju
|
Add the model summary example.
|
"""
This example:
1. Connects to the current model
2. Starts an AllWatcher
3. Prints all changes received from the AllWatcher
4. Runs forever (kill with Ctrl-C)
"""
import asyncio
import logging
from juju import loop
from juju.controller import Controller
from juju.model import Model
async def watch():
controller = Controller()
# connect to current
# controller with current user, per Juju CLI
await controller.connect()
# Need to call the WatchModelSummaries or WatchAllModelSummaries on the
# controller.
def callback(summary):
print("-- change --\n{}\n".format(summary))
event = await controller.watch_model_summaries(callback)
while True:
await asyncio.sleep(1)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
ws_logger = logging.getLogger('websockets.protocol')
ws_logger.setLevel(logging.INFO)
logging.getLogger('juju.client.connection').setLevel(logging.INFO)
# Run loop until the process is manually stopped (watch will loop
# forever).
loop.run(watch())
|
<commit_before><commit_msg>Add the model summary example.<commit_after>
|
"""
This example:
1. Connects to the current model
2. Starts an AllWatcher
3. Prints all changes received from the AllWatcher
4. Runs forever (kill with Ctrl-C)
"""
import asyncio
import logging
from juju import loop
from juju.controller import Controller
from juju.model import Model
async def watch():
controller = Controller()
# connect to current
# controller with current user, per Juju CLI
await controller.connect()
# Need to call the WatchModelSummaries or WatchAllModelSummaries on the
# controller.
def callback(summary):
print("-- change --\n{}\n".format(summary))
event = await controller.watch_model_summaries(callback)
while True:
await asyncio.sleep(1)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
ws_logger = logging.getLogger('websockets.protocol')
ws_logger.setLevel(logging.INFO)
logging.getLogger('juju.client.connection').setLevel(logging.INFO)
# Run loop until the process is manually stopped (watch will loop
# forever).
loop.run(watch())
|
Add the model summary example."""
This example:
1. Connects to the current model
2. Starts an AllWatcher
3. Prints all changes received from the AllWatcher
4. Runs forever (kill with Ctrl-C)
"""
import asyncio
import logging
from juju import loop
from juju.controller import Controller
from juju.model import Model
async def watch():
controller = Controller()
# connect to current
# controller with current user, per Juju CLI
await controller.connect()
# Need to call the WatchModelSummaries or WatchAllModelSummaries on the
# controller.
def callback(summary):
print("-- change --\n{}\n".format(summary))
event = await controller.watch_model_summaries(callback)
while True:
await asyncio.sleep(1)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
ws_logger = logging.getLogger('websockets.protocol')
ws_logger.setLevel(logging.INFO)
logging.getLogger('juju.client.connection').setLevel(logging.INFO)
# Run loop until the process is manually stopped (watch will loop
# forever).
loop.run(watch())
|
<commit_before><commit_msg>Add the model summary example.<commit_after>"""
This example:
1. Connects to the current model
2. Starts an AllWatcher
3. Prints all changes received from the AllWatcher
4. Runs forever (kill with Ctrl-C)
"""
import asyncio
import logging
from juju import loop
from juju.controller import Controller
from juju.model import Model
async def watch():
controller = Controller()
# connect to current
# controller with current user, per Juju CLI
await controller.connect()
# Need to call the WatchModelSummaries or WatchAllModelSummaries on the
# controller.
def callback(summary):
print("-- change --\n{}\n".format(summary))
event = await controller.watch_model_summaries(callback)
while True:
await asyncio.sleep(1)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
ws_logger = logging.getLogger('websockets.protocol')
ws_logger.setLevel(logging.INFO)
logging.getLogger('juju.client.connection').setLevel(logging.INFO)
# Run loop until the process is manually stopped (watch will loop
# forever).
loop.run(watch())
|
|
4ae010a286f6660fbbad38f6f867c5d185eac784
|
test.py
|
test.py
|
#!/usr/bin/env python
import sys; sys.path.insert(0, './src/')
import libcask.container
cont = libcask.container.Container(
name='Example',
root_path='/home/dev/cask/containers/example/',
pid_path='/home/dev/cask/pid/example',
hostname='exampl',
ipaddr='10.1.1.1',
ipaddr_host='10.1.1.2',
entry_point='/busybox sh',
)
if sys.argv[1] == 'start':
cont.start()
elif sys.argv[1] == 'shell':
cont.attach().run('/busybox sh')
elif sys.argv[1] == 'kill':
cont.kill()
elif sys.argv[1] == 'pid':
print 'Container running with PID:', cont.pid()
else:
print 'usage: {} <command>'.format(sys.argv[0])
|
Add placeholder CLI interface to libcask.container.Container
|
Add placeholder CLI interface to libcask.container.Container
|
Python
|
mit
|
ianpreston/cask,ianpreston/cask
|
Add placeholder CLI interface to libcask.container.Container
|
#!/usr/bin/env python
import sys; sys.path.insert(0, './src/')
import libcask.container
cont = libcask.container.Container(
name='Example',
root_path='/home/dev/cask/containers/example/',
pid_path='/home/dev/cask/pid/example',
hostname='exampl',
ipaddr='10.1.1.1',
ipaddr_host='10.1.1.2',
entry_point='/busybox sh',
)
if sys.argv[1] == 'start':
cont.start()
elif sys.argv[1] == 'shell':
cont.attach().run('/busybox sh')
elif sys.argv[1] == 'kill':
cont.kill()
elif sys.argv[1] == 'pid':
print 'Container running with PID:', cont.pid()
else:
print 'usage: {} <command>'.format(sys.argv[0])
|
<commit_before><commit_msg>Add placeholder CLI interface to libcask.container.Container<commit_after>
|
#!/usr/bin/env python
import sys; sys.path.insert(0, './src/')
import libcask.container
cont = libcask.container.Container(
name='Example',
root_path='/home/dev/cask/containers/example/',
pid_path='/home/dev/cask/pid/example',
hostname='exampl',
ipaddr='10.1.1.1',
ipaddr_host='10.1.1.2',
entry_point='/busybox sh',
)
if sys.argv[1] == 'start':
cont.start()
elif sys.argv[1] == 'shell':
cont.attach().run('/busybox sh')
elif sys.argv[1] == 'kill':
cont.kill()
elif sys.argv[1] == 'pid':
print 'Container running with PID:', cont.pid()
else:
print 'usage: {} <command>'.format(sys.argv[0])
|
Add placeholder CLI interface to libcask.container.Container#!/usr/bin/env python
import sys; sys.path.insert(0, './src/')
import libcask.container
cont = libcask.container.Container(
name='Example',
root_path='/home/dev/cask/containers/example/',
pid_path='/home/dev/cask/pid/example',
hostname='exampl',
ipaddr='10.1.1.1',
ipaddr_host='10.1.1.2',
entry_point='/busybox sh',
)
if sys.argv[1] == 'start':
cont.start()
elif sys.argv[1] == 'shell':
cont.attach().run('/busybox sh')
elif sys.argv[1] == 'kill':
cont.kill()
elif sys.argv[1] == 'pid':
print 'Container running with PID:', cont.pid()
else:
print 'usage: {} <command>'.format(sys.argv[0])
|
<commit_before><commit_msg>Add placeholder CLI interface to libcask.container.Container<commit_after>#!/usr/bin/env python
import sys; sys.path.insert(0, './src/')
import libcask.container
cont = libcask.container.Container(
name='Example',
root_path='/home/dev/cask/containers/example/',
pid_path='/home/dev/cask/pid/example',
hostname='exampl',
ipaddr='10.1.1.1',
ipaddr_host='10.1.1.2',
entry_point='/busybox sh',
)
if sys.argv[1] == 'start':
cont.start()
elif sys.argv[1] == 'shell':
cont.attach().run('/busybox sh')
elif sys.argv[1] == 'kill':
cont.kill()
elif sys.argv[1] == 'pid':
print 'Container running with PID:', cont.pid()
else:
print 'usage: {} <command>'.format(sys.argv[0])
|
|
099e42bb6264ddb67136d8c6d13926605c2f525c
|
tube.py
|
tube.py
|
from __future__ import unicode_literals
import dateutil.parser
def get_playlists(client):
playlists = []
def process(item):
snippet = item['snippet']
p = {
'id': item['id'],
'title': snippet['title'],
'description': snippet['description'],
}
playlists.append(p)
params = {
'part': 'snippet',
'mine': 'true',
}
client.get('/playlists', params, process)
return playlists
def get_playlist_videos(client, playlist_id):
videos = []
def process(item):
snippet = item['snippet']
v = {
'watch_id': snippet['resourceId']['videoId'],
'title': snippet['title'],
'description': snippet['description'],
'position': snippet['position'],
'date_published': dateutil.parser.parse(snippet['publishedAt']),
}
videos.append(v)
params = {
'part': 'snippet',
'playlistId': playlist_id,
}
client.get('/playlistItems', params, process)
return videos
|
Add playlist list and playlist video list getters
|
Add playlist list and playlist video list getters
|
Python
|
mit
|
drkitty/metatube,drkitty/metatube
|
Add playlist list and playlist video list getters
|
from __future__ import unicode_literals
import dateutil.parser
def get_playlists(client):
playlists = []
def process(item):
snippet = item['snippet']
p = {
'id': item['id'],
'title': snippet['title'],
'description': snippet['description'],
}
playlists.append(p)
params = {
'part': 'snippet',
'mine': 'true',
}
client.get('/playlists', params, process)
return playlists
def get_playlist_videos(client, playlist_id):
videos = []
def process(item):
snippet = item['snippet']
v = {
'watch_id': snippet['resourceId']['videoId'],
'title': snippet['title'],
'description': snippet['description'],
'position': snippet['position'],
'date_published': dateutil.parser.parse(snippet['publishedAt']),
}
videos.append(v)
params = {
'part': 'snippet',
'playlistId': playlist_id,
}
client.get('/playlistItems', params, process)
return videos
|
<commit_before><commit_msg>Add playlist list and playlist video list getters<commit_after>
|
from __future__ import unicode_literals
import dateutil.parser
def get_playlists(client):
playlists = []
def process(item):
snippet = item['snippet']
p = {
'id': item['id'],
'title': snippet['title'],
'description': snippet['description'],
}
playlists.append(p)
params = {
'part': 'snippet',
'mine': 'true',
}
client.get('/playlists', params, process)
return playlists
def get_playlist_videos(client, playlist_id):
videos = []
def process(item):
snippet = item['snippet']
v = {
'watch_id': snippet['resourceId']['videoId'],
'title': snippet['title'],
'description': snippet['description'],
'position': snippet['position'],
'date_published': dateutil.parser.parse(snippet['publishedAt']),
}
videos.append(v)
params = {
'part': 'snippet',
'playlistId': playlist_id,
}
client.get('/playlistItems', params, process)
return videos
|
Add playlist list and playlist video list gettersfrom __future__ import unicode_literals
import dateutil.parser
def get_playlists(client):
playlists = []
def process(item):
snippet = item['snippet']
p = {
'id': item['id'],
'title': snippet['title'],
'description': snippet['description'],
}
playlists.append(p)
params = {
'part': 'snippet',
'mine': 'true',
}
client.get('/playlists', params, process)
return playlists
def get_playlist_videos(client, playlist_id):
videos = []
def process(item):
snippet = item['snippet']
v = {
'watch_id': snippet['resourceId']['videoId'],
'title': snippet['title'],
'description': snippet['description'],
'position': snippet['position'],
'date_published': dateutil.parser.parse(snippet['publishedAt']),
}
videos.append(v)
params = {
'part': 'snippet',
'playlistId': playlist_id,
}
client.get('/playlistItems', params, process)
return videos
|
<commit_before><commit_msg>Add playlist list and playlist video list getters<commit_after>from __future__ import unicode_literals
import dateutil.parser
def get_playlists(client):
playlists = []
def process(item):
snippet = item['snippet']
p = {
'id': item['id'],
'title': snippet['title'],
'description': snippet['description'],
}
playlists.append(p)
params = {
'part': 'snippet',
'mine': 'true',
}
client.get('/playlists', params, process)
return playlists
def get_playlist_videos(client, playlist_id):
videos = []
def process(item):
snippet = item['snippet']
v = {
'watch_id': snippet['resourceId']['videoId'],
'title': snippet['title'],
'description': snippet['description'],
'position': snippet['position'],
'date_published': dateutil.parser.parse(snippet['publishedAt']),
}
videos.append(v)
params = {
'part': 'snippet',
'playlistId': playlist_id,
}
client.get('/playlistItems', params, process)
return videos
|
|
ecaaaf3ef2373563a19be39981251b4201a20dc2
|
chrome_frame/tools/helper_shutdown.py
|
chrome_frame/tools/helper_shutdown.py
|
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''This is a simple helper script to shut down the Chrome Frame helper process.
It needs the Python Win32 extensions.'''
import pywintypes
import sys
import win32gui
import win32con
def main():
exit_code = 0
window = win32gui.FindWindow('ChromeFrameHelperWindowClass',
'ChromeFrameHelperWindowName')
if not window:
print 'Chrome Frame helper process not running.'
else:
try:
win32gui.PostMessage(window, win32con.WM_CLOSE, 0, 0)
print 'Chrome Frame helper process shut down.'
except pywintypes.error as ex:
print 'Failed to shutdown Chrome Frame helper process: '
print ex
exit_code = 1
return exit_code
if __name__ == '__main__':
sys.exit(main())
|
Add a tiny helper script to shutdown the chrome frame helper process.
|
Add a tiny helper script to shutdown the chrome frame helper process.
BUG=53127
TEST=Run script, chrome_frame_helper.exe is shut down.
Review URL: http://codereview.chromium.org/3312010
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@58587 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
adobe/chromium,Crystalnix/house-of-life-chromium,adobe/chromium,yitian134/chromium,ropik/chromium,adobe/chromium,adobe/chromium,Crystalnix/house-of-life-chromium,ropik/chromium,Crystalnix/house-of-life-chromium,gavinp/chromium,gavinp/chromium,Crystalnix/house-of-life-chromium,gavinp/chromium,gavinp/chromium,yitian134/chromium,gavinp/chromium,Crystalnix/house-of-life-chromium,ropik/chromium,yitian134/chromium,yitian134/chromium,adobe/chromium,gavinp/chromium,adobe/chromium,Crystalnix/house-of-life-chromium,gavinp/chromium,Crystalnix/house-of-life-chromium,Crystalnix/house-of-life-chromium,adobe/chromium,adobe/chromium,Crystalnix/house-of-life-chromium,ropik/chromium,Crystalnix/house-of-life-chromium,gavinp/chromium,yitian134/chromium,Crystalnix/house-of-life-chromium,yitian134/chromium,ropik/chromium,yitian134/chromium,yitian134/chromium,yitian134/chromium,adobe/chromium,ropik/chromium,gavinp/chromium,ropik/chromium,ropik/chromium,yitian134/chromium,ropik/chromium,adobe/chromium,gavinp/chromium,adobe/chromium
|
Add a tiny helper script to shutdown the chrome frame helper process.
BUG=53127
TEST=Run script, chrome_frame_helper.exe is shut down.
Review URL: http://codereview.chromium.org/3312010
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@58587 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''This is a simple helper script to shut down the Chrome Frame helper process.
It needs the Python Win32 extensions.'''
import pywintypes
import sys
import win32gui
import win32con
def main():
exit_code = 0
window = win32gui.FindWindow('ChromeFrameHelperWindowClass',
'ChromeFrameHelperWindowName')
if not window:
print 'Chrome Frame helper process not running.'
else:
try:
win32gui.PostMessage(window, win32con.WM_CLOSE, 0, 0)
print 'Chrome Frame helper process shut down.'
except pywintypes.error as ex:
print 'Failed to shutdown Chrome Frame helper process: '
print ex
exit_code = 1
return exit_code
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add a tiny helper script to shutdown the chrome frame helper process.
BUG=53127
TEST=Run script, chrome_frame_helper.exe is shut down.
Review URL: http://codereview.chromium.org/3312010
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@58587 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''This is a simple helper script to shut down the Chrome Frame helper process.
It needs the Python Win32 extensions.'''
import pywintypes
import sys
import win32gui
import win32con
def main():
exit_code = 0
window = win32gui.FindWindow('ChromeFrameHelperWindowClass',
'ChromeFrameHelperWindowName')
if not window:
print 'Chrome Frame helper process not running.'
else:
try:
win32gui.PostMessage(window, win32con.WM_CLOSE, 0, 0)
print 'Chrome Frame helper process shut down.'
except pywintypes.error as ex:
print 'Failed to shutdown Chrome Frame helper process: '
print ex
exit_code = 1
return exit_code
if __name__ == '__main__':
sys.exit(main())
|
Add a tiny helper script to shutdown the chrome frame helper process.
BUG=53127
TEST=Run script, chrome_frame_helper.exe is shut down.
Review URL: http://codereview.chromium.org/3312010
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@58587 0039d316-1c4b-4281-b951-d872f2087c98# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''This is a simple helper script to shut down the Chrome Frame helper process.
It needs the Python Win32 extensions.'''
import pywintypes
import sys
import win32gui
import win32con
def main():
exit_code = 0
window = win32gui.FindWindow('ChromeFrameHelperWindowClass',
'ChromeFrameHelperWindowName')
if not window:
print 'Chrome Frame helper process not running.'
else:
try:
win32gui.PostMessage(window, win32con.WM_CLOSE, 0, 0)
print 'Chrome Frame helper process shut down.'
except pywintypes.error as ex:
print 'Failed to shutdown Chrome Frame helper process: '
print ex
exit_code = 1
return exit_code
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add a tiny helper script to shutdown the chrome frame helper process.
BUG=53127
TEST=Run script, chrome_frame_helper.exe is shut down.
Review URL: http://codereview.chromium.org/3312010
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@58587 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''This is a simple helper script to shut down the Chrome Frame helper process.
It needs the Python Win32 extensions.'''
import pywintypes
import sys
import win32gui
import win32con
def main():
exit_code = 0
window = win32gui.FindWindow('ChromeFrameHelperWindowClass',
'ChromeFrameHelperWindowName')
if not window:
print 'Chrome Frame helper process not running.'
else:
try:
win32gui.PostMessage(window, win32con.WM_CLOSE, 0, 0)
print 'Chrome Frame helper process shut down.'
except pywintypes.error as ex:
print 'Failed to shutdown Chrome Frame helper process: '
print ex
exit_code = 1
return exit_code
if __name__ == '__main__':
sys.exit(main())
|
|
225fb524d67faeaa6d5af837f53c25b232b0b43e
|
altair/examples/scatter_linked_table.py
|
altair/examples/scatter_linked_table.py
|
"""
Brushing Scatter Plot to show data on a table
---------------------------------
A scatter plot of the cars dataset, with data tables for horsepower, MPG, and origin.
The tables update toreflect the selection on the scatter plot.
"""
# category: scatter plots
import altair as alt
from vega_datasets import data
source = data.cars()
# Brush for selection
brush = alt.selection(type='interval')
# Scatter Plot
points = alt.Chart().mark_point().encode(
x='Horsepower:Q',
y='Miles_per_Gallon:Q',
color=alt.condition(brush, 'Cylinders:O', alt.value('grey'))
).add_selection(brush)
# Base chart for data tables
ranked_text = alt.Chart().mark_text().encode(
y=alt.Y('row_number:O',axis=None)
).transform_window(
row_number='row_number()'
).transform_filter(
brush
).transform_window(
rank='rank(row_number)'
).transform_filter(
alt.datum.rank<20
)
# Data Tables
horsepower = ranked_text.mark_text().encode(text='Horsepower:N')
mpg = ranked_text.mark_text().encode(text='Miles_per_Gallon:N')
origin = ranked_text.mark_text().encode(text='Origin:N')
# Assign titles to data tables
horsepower.title = 'Horsepower'
mpg.title = 'MPG'
origin.title = 'Origin'
# Combine data tables
text = alt.hconcat(horsepower, mpg, origin)
# Build chart
alt.hconcat(
points,
text,
data=source
).resolve_legend(
color="independent"
)
|
Create brushing scatter plot to show data on a table example
|
Create brushing scatter plot to show data on a table example
|
Python
|
bsd-3-clause
|
altair-viz/altair,jakevdp/altair
|
Create brushing scatter plot to show data on a table example
|
"""
Brushing Scatter Plot to show data on a table
---------------------------------
A scatter plot of the cars dataset, with data tables for horsepower, MPG, and origin.
The tables update toreflect the selection on the scatter plot.
"""
# category: scatter plots
import altair as alt
from vega_datasets import data
source = data.cars()
# Brush for selection
brush = alt.selection(type='interval')
# Scatter Plot
points = alt.Chart().mark_point().encode(
x='Horsepower:Q',
y='Miles_per_Gallon:Q',
color=alt.condition(brush, 'Cylinders:O', alt.value('grey'))
).add_selection(brush)
# Base chart for data tables
ranked_text = alt.Chart().mark_text().encode(
y=alt.Y('row_number:O',axis=None)
).transform_window(
row_number='row_number()'
).transform_filter(
brush
).transform_window(
rank='rank(row_number)'
).transform_filter(
alt.datum.rank<20
)
# Data Tables
horsepower = ranked_text.mark_text().encode(text='Horsepower:N')
mpg = ranked_text.mark_text().encode(text='Miles_per_Gallon:N')
origin = ranked_text.mark_text().encode(text='Origin:N')
# Assign titles to data tables
horsepower.title = 'Horsepower'
mpg.title = 'MPG'
origin.title = 'Origin'
# Combine data tables
text = alt.hconcat(horsepower, mpg, origin)
# Build chart
alt.hconcat(
points,
text,
data=source
).resolve_legend(
color="independent"
)
|
<commit_before><commit_msg>Create brushing scatter plot to show data on a table example<commit_after>
|
"""
Brushing Scatter Plot to show data on a table
---------------------------------
A scatter plot of the cars dataset, with data tables for horsepower, MPG, and origin.
The tables update toreflect the selection on the scatter plot.
"""
# category: scatter plots
import altair as alt
from vega_datasets import data
source = data.cars()
# Brush for selection
brush = alt.selection(type='interval')
# Scatter Plot
points = alt.Chart().mark_point().encode(
x='Horsepower:Q',
y='Miles_per_Gallon:Q',
color=alt.condition(brush, 'Cylinders:O', alt.value('grey'))
).add_selection(brush)
# Base chart for data tables
ranked_text = alt.Chart().mark_text().encode(
y=alt.Y('row_number:O',axis=None)
).transform_window(
row_number='row_number()'
).transform_filter(
brush
).transform_window(
rank='rank(row_number)'
).transform_filter(
alt.datum.rank<20
)
# Data Tables
horsepower = ranked_text.mark_text().encode(text='Horsepower:N')
mpg = ranked_text.mark_text().encode(text='Miles_per_Gallon:N')
origin = ranked_text.mark_text().encode(text='Origin:N')
# Assign titles to data tables
horsepower.title = 'Horsepower'
mpg.title = 'MPG'
origin.title = 'Origin'
# Combine data tables
text = alt.hconcat(horsepower, mpg, origin)
# Build chart
alt.hconcat(
points,
text,
data=source
).resolve_legend(
color="independent"
)
|
Create brushing scatter plot to show data on a table example"""
Brushing Scatter Plot to show data on a table
---------------------------------
A scatter plot of the cars dataset, with data tables for horsepower, MPG, and origin.
The tables update toreflect the selection on the scatter plot.
"""
# category: scatter plots
import altair as alt
from vega_datasets import data
source = data.cars()
# Brush for selection
brush = alt.selection(type='interval')
# Scatter Plot
points = alt.Chart().mark_point().encode(
x='Horsepower:Q',
y='Miles_per_Gallon:Q',
color=alt.condition(brush, 'Cylinders:O', alt.value('grey'))
).add_selection(brush)
# Base chart for data tables
ranked_text = alt.Chart().mark_text().encode(
y=alt.Y('row_number:O',axis=None)
).transform_window(
row_number='row_number()'
).transform_filter(
brush
).transform_window(
rank='rank(row_number)'
).transform_filter(
alt.datum.rank<20
)
# Data Tables
horsepower = ranked_text.mark_text().encode(text='Horsepower:N')
mpg = ranked_text.mark_text().encode(text='Miles_per_Gallon:N')
origin = ranked_text.mark_text().encode(text='Origin:N')
# Assign titles to data tables
horsepower.title = 'Horsepower'
mpg.title = 'MPG'
origin.title = 'Origin'
# Combine data tables
text = alt.hconcat(horsepower, mpg, origin)
# Build chart
alt.hconcat(
points,
text,
data=source
).resolve_legend(
color="independent"
)
|
<commit_before><commit_msg>Create brushing scatter plot to show data on a table example<commit_after>"""
Brushing Scatter Plot to show data on a table
---------------------------------
A scatter plot of the cars dataset, with data tables for horsepower, MPG, and origin.
The tables update toreflect the selection on the scatter plot.
"""
# category: scatter plots
import altair as alt
from vega_datasets import data
source = data.cars()
# Brush for selection
brush = alt.selection(type='interval')
# Scatter Plot
points = alt.Chart().mark_point().encode(
x='Horsepower:Q',
y='Miles_per_Gallon:Q',
color=alt.condition(brush, 'Cylinders:O', alt.value('grey'))
).add_selection(brush)
# Base chart for data tables
ranked_text = alt.Chart().mark_text().encode(
y=alt.Y('row_number:O',axis=None)
).transform_window(
row_number='row_number()'
).transform_filter(
brush
).transform_window(
rank='rank(row_number)'
).transform_filter(
alt.datum.rank<20
)
# Data Tables
horsepower = ranked_text.mark_text().encode(text='Horsepower:N')
mpg = ranked_text.mark_text().encode(text='Miles_per_Gallon:N')
origin = ranked_text.mark_text().encode(text='Origin:N')
# Assign titles to data tables
horsepower.title = 'Horsepower'
mpg.title = 'MPG'
origin.title = 'Origin'
# Combine data tables
text = alt.hconcat(horsepower, mpg, origin)
# Build chart
alt.hconcat(
points,
text,
data=source
).resolve_legend(
color="independent"
)
|
|
ba6673615deeb776919ef98fb4f0b3a07fb80801
|
hackerrank/mini_max_sum.py
|
hackerrank/mini_max_sum.py
|
#!/bin/python3
# https://www.hackerrank.com/challenges/mini-max-sum/problem
"""
The algorithm used to solve this problem is the following:
1. Get the minimum element of the array
2. Get the maximum element of the array
3. Get the sum of all the elements in the array
4. Calculate the min sum -> sum(arr) - max_element
5. Calculate the max sum -> sum(arr) - min_element
The complexity analysis of the proposed algorithm is O(1). Why?
Because the input of the problem is fixed, an array of 5 elements.
According to the book Introduction to Algorithms:
> When we look at input sizes large enough to make only the order of growth of
> the running time relevant, we are studying the asymptotic efficiency of
> algorithms. That is, we are concerned with how the running time of an algorithm
> increases with the size of the input in the limit, as the size of the input
> increases without bound. Usually, an algorithm that is asymptotically more
> efficient will be the best choice for all but very small inputs.
"3 Growth of Functions." Introduction to Algorithms, by Thomas H. Cormen,
MIT Press, 2009.
In short, asymptotic analysis of a problem with a fixed input can be
simplified as O(1).
For a longer explanation please see: https://stackoverflow.com/a/2027842/2420718
"""
def solve(arr):
min_element = min(arr)
max_element = max(arr)
sum_arr = sum(arr)
return (sum_arr - max_element, sum_arr - min_element)
if __name__ == '__main__':
arr = list(map(int, input().rstrip().split()))
(min_sum, max_sum) = solve(arr)
print(f'{min_sum} {max_sum}')
|
Add solution to the problem Mini-Max Sum
|
Add solution to the problem Mini-Max Sum
|
Python
|
mit
|
julianespinel/training,julianespinel/training,julianespinel/training,julianespinel/trainning,julianespinel/trainning,julianespinel/training
|
Add solution to the problem Mini-Max Sum
|
#!/bin/python3
# https://www.hackerrank.com/challenges/mini-max-sum/problem
"""
The algorithm used to solve this problem is the following:
1. Get the minimum element of the array
2. Get the maximum element of the array
3. Get the sum of all the elements in the array
4. Calculate the min sum -> sum(arr) - max_element
5. Calculate the max sum -> sum(arr) - min_element
The complexity analysis of the proposed algorithm is O(1). Why?
Because the input of the problem is fixed, an array of 5 elements.
According to the book Introduction to Algorithms:
> When we look at input sizes large enough to make only the order of growth of
> the running time relevant, we are studying the asymptotic efficiency of
> algorithms. That is, we are concerned with how the running time of an algorithm
> increases with the size of the input in the limit, as the size of the input
> increases without bound. Usually, an algorithm that is asymptotically more
> efficient will be the best choice for all but very small inputs.
"3 Growth of Functions." Introduction to Algorithms, by Thomas H. Cormen,
MIT Press, 2009.
In short, asymptotic analysis of a problem with a fixed input can be
simplified as O(1).
For a longer explanation please see: https://stackoverflow.com/a/2027842/2420718
"""
def solve(arr):
min_element = min(arr)
max_element = max(arr)
sum_arr = sum(arr)
return (sum_arr - max_element, sum_arr - min_element)
if __name__ == '__main__':
arr = list(map(int, input().rstrip().split()))
(min_sum, max_sum) = solve(arr)
print(f'{min_sum} {max_sum}')
|
<commit_before><commit_msg>Add solution to the problem Mini-Max Sum<commit_after>
|
#!/bin/python3
# https://www.hackerrank.com/challenges/mini-max-sum/problem
"""
The algorithm used to solve this problem is the following:
1. Get the minimum element of the array
2. Get the maximum element of the array
3. Get the sum of all the elements in the array
4. Calculate the min sum -> sum(arr) - max_element
5. Calculate the max sum -> sum(arr) - min_element
The complexity analysis of the proposed algorithm is O(1). Why?
Because the input of the problem is fixed, an array of 5 elements.
According to the book Introduction to Algorithms:
> When we look at input sizes large enough to make only the order of growth of
> the running time relevant, we are studying the asymptotic efficiency of
> algorithms. That is, we are concerned with how the running time of an algorithm
> increases with the size of the input in the limit, as the size of the input
> increases without bound. Usually, an algorithm that is asymptotically more
> efficient will be the best choice for all but very small inputs.
"3 Growth of Functions." Introduction to Algorithms, by Thomas H. Cormen,
MIT Press, 2009.
In short, asymptotic analysis of a problem with a fixed input can be
simplified as O(1).
For a longer explanation please see: https://stackoverflow.com/a/2027842/2420718
"""
def solve(arr):
min_element = min(arr)
max_element = max(arr)
sum_arr = sum(arr)
return (sum_arr - max_element, sum_arr - min_element)
if __name__ == '__main__':
arr = list(map(int, input().rstrip().split()))
(min_sum, max_sum) = solve(arr)
print(f'{min_sum} {max_sum}')
|
Add solution to the problem Mini-Max Sum#!/bin/python3
# https://www.hackerrank.com/challenges/mini-max-sum/problem
"""
The algorithm used to solve this problem is the following:
1. Get the minimum element of the array
2. Get the maximum element of the array
3. Get the sum of all the elements in the array
4. Calculate the min sum -> sum(arr) - max_element
5. Calculate the max sum -> sum(arr) - min_element
The complexity analysis of the proposed algorithm is O(1). Why?
Because the input of the problem is fixed, an array of 5 elements.
According to the book Introduction to Algorithms:
> When we look at input sizes large enough to make only the order of growth of
> the running time relevant, we are studying the asymptotic efficiency of
> algorithms. That is, we are concerned with how the running time of an algorithm
> increases with the size of the input in the limit, as the size of the input
> increases without bound. Usually, an algorithm that is asymptotically more
> efficient will be the best choice for all but very small inputs.
"3 Growth of Functions." Introduction to Algorithms, by Thomas H. Cormen,
MIT Press, 2009.
In short, asymptotic analysis of a problem with a fixed input can be
simplified as O(1).
For a longer explanation please see: https://stackoverflow.com/a/2027842/2420718
"""
def solve(arr):
min_element = min(arr)
max_element = max(arr)
sum_arr = sum(arr)
return (sum_arr - max_element, sum_arr - min_element)
if __name__ == '__main__':
arr = list(map(int, input().rstrip().split()))
(min_sum, max_sum) = solve(arr)
print(f'{min_sum} {max_sum}')
|
<commit_before><commit_msg>Add solution to the problem Mini-Max Sum<commit_after>#!/bin/python3
# https://www.hackerrank.com/challenges/mini-max-sum/problem
"""
The algorithm used to solve this problem is the following:
1. Get the minimum element of the array
2. Get the maximum element of the array
3. Get the sum of all the elements in the array
4. Calculate the min sum -> sum(arr) - max_element
5. Calculate the max sum -> sum(arr) - min_element
The complexity analysis of the proposed algorithm is O(1). Why?
Because the input of the problem is fixed, an array of 5 elements.
According to the book Introduction to Algorithms:
> When we look at input sizes large enough to make only the order of growth of
> the running time relevant, we are studying the asymptotic efficiency of
> algorithms. That is, we are concerned with how the running time of an algorithm
> increases with the size of the input in the limit, as the size of the input
> increases without bound. Usually, an algorithm that is asymptotically more
> efficient will be the best choice for all but very small inputs.
"3 Growth of Functions." Introduction to Algorithms, by Thomas H. Cormen,
MIT Press, 2009.
In short, asymptotic analysis of a problem with a fixed input can be
simplified as O(1).
For a longer explanation please see: https://stackoverflow.com/a/2027842/2420718
"""
def solve(arr):
min_element = min(arr)
max_element = max(arr)
sum_arr = sum(arr)
return (sum_arr - max_element, sum_arr - min_element)
if __name__ == '__main__':
arr = list(map(int, input().rstrip().split()))
(min_sum, max_sum) = solve(arr)
print(f'{min_sum} {max_sum}')
|
|
aa215f1d5e6059517c3f7e13287434fd7e193e26
|
examples/sector-coupling/battery-electric-vehicle-charging.py
|
examples/sector-coupling/battery-electric-vehicle-charging.py
|
## Battery Electric Vehicle Charging
#
#In this example a battery electric vehicle (BEV) is driven 100 km in the morning and 100 km in the evening, to simulate commuting, and charged during the day by a solar panel at the driver's place of work. The size of the panel is computed by the optimisation.
#
#The BEV has a battery of size 100 kWh and an electricity consumption of 0.18 kWh/km.
import pypsa
import pandas as pd
#%matplotlib inline
# NB: this example will use units of kW and kWh, unlike the PyPSA defaults
# use 24 hour period for consideration
index = pd.date_range("2016-01-01 00:00","2016-01-01 23:00",freq="H")
# consumption pattern of BEV
bev_usage = pd.Series([0.]*7 + [9.]*2 + [0.]*8 + [9.]*2 + [0.]*5,index)
# solar PV panel generation per unit of capacity - this is only available while parked at place of work
pv_pu = pd.Series([0.]*9 + [0.6,0.75,0.85,0.9,0.85,0.75,0.6,0.4] + [0.]*7,index)
bev_usage.plot()
pv_pu.plot()
network = pypsa.Network()
network.set_snapshots(index)
network.add("Bus",
"place of work",
carrier="AC")
network.add("Bus",
"battery",
carrier="Li-ion")
network.add("Generator",
"PV panel",
bus="place of work",
dispatch="variable",
p_nom_extendable=True,
p_max_pu=pv_pu)
network.add("Load",
"driving",
bus="battery",
p_set=bev_usage)
network.add("Link",
"charger",
bus0="place of work",
bus1="battery",
p_nom="120", #super-charger
efficiency=0.9,
s_nom_extendable=True)
network.add("Store",
"battery storage",
bus="battery",
e_cyclic=True,
e_nom=100)
network.lopf(network.snapshots)
print("Pannel size [kW]:",network.generators.p_nom_opt["PV panel"])
network.generators_t.p.plot()
network.stores_t.loc[["p","e"],:,"battery storage"].plot(grid=True)
print("Losses [kWh/d]:",network.generators_t.loc["p",:,"PV panel"].sum() - network.loads_t.loc["p",:,"driving"].sum())
|
Add Battery Electric Vehicle (BEV) charging example
|
Add Battery Electric Vehicle (BEV) charging example
|
Python
|
mit
|
PyPSA/PyPSA
|
Add Battery Electric Vehicle (BEV) charging example
|
## Battery Electric Vehicle Charging
#
#In this example a battery electric vehicle (BEV) is driven 100 km in the morning and 100 km in the evening, to simulate commuting, and charged during the day by a solar panel at the driver's place of work. The size of the panel is computed by the optimisation.
#
#The BEV has a battery of size 100 kWh and an electricity consumption of 0.18 kWh/km.
import pypsa
import pandas as pd
#%matplotlib inline
# NB: this example will use units of kW and kWh, unlike the PyPSA defaults
# use 24 hour period for consideration
index = pd.date_range("2016-01-01 00:00","2016-01-01 23:00",freq="H")
# consumption pattern of BEV
bev_usage = pd.Series([0.]*7 + [9.]*2 + [0.]*8 + [9.]*2 + [0.]*5,index)
# solar PV panel generation per unit of capacity - this is only available while parked at place of work
pv_pu = pd.Series([0.]*9 + [0.6,0.75,0.85,0.9,0.85,0.75,0.6,0.4] + [0.]*7,index)
bev_usage.plot()
pv_pu.plot()
network = pypsa.Network()
network.set_snapshots(index)
network.add("Bus",
"place of work",
carrier="AC")
network.add("Bus",
"battery",
carrier="Li-ion")
network.add("Generator",
"PV panel",
bus="place of work",
dispatch="variable",
p_nom_extendable=True,
p_max_pu=pv_pu)
network.add("Load",
"driving",
bus="battery",
p_set=bev_usage)
network.add("Link",
"charger",
bus0="place of work",
bus1="battery",
p_nom="120", #super-charger
efficiency=0.9,
s_nom_extendable=True)
network.add("Store",
"battery storage",
bus="battery",
e_cyclic=True,
e_nom=100)
network.lopf(network.snapshots)
print("Pannel size [kW]:",network.generators.p_nom_opt["PV panel"])
network.generators_t.p.plot()
network.stores_t.loc[["p","e"],:,"battery storage"].plot(grid=True)
print("Losses [kWh/d]:",network.generators_t.loc["p",:,"PV panel"].sum() - network.loads_t.loc["p",:,"driving"].sum())
|
<commit_before><commit_msg>Add Battery Electric Vehicle (BEV) charging example<commit_after>
|
## Battery Electric Vehicle Charging
#
#In this example a battery electric vehicle (BEV) is driven 100 km in the morning and 100 km in the evening, to simulate commuting, and charged during the day by a solar panel at the driver's place of work. The size of the panel is computed by the optimisation.
#
#The BEV has a battery of size 100 kWh and an electricity consumption of 0.18 kWh/km.
import pypsa
import pandas as pd
#%matplotlib inline
# NB: this example will use units of kW and kWh, unlike the PyPSA defaults
# use 24 hour period for consideration
index = pd.date_range("2016-01-01 00:00","2016-01-01 23:00",freq="H")
# consumption pattern of BEV
bev_usage = pd.Series([0.]*7 + [9.]*2 + [0.]*8 + [9.]*2 + [0.]*5,index)
# solar PV panel generation per unit of capacity - this is only available while parked at place of work
pv_pu = pd.Series([0.]*9 + [0.6,0.75,0.85,0.9,0.85,0.75,0.6,0.4] + [0.]*7,index)
bev_usage.plot()
pv_pu.plot()
network = pypsa.Network()
network.set_snapshots(index)
network.add("Bus",
"place of work",
carrier="AC")
network.add("Bus",
"battery",
carrier="Li-ion")
network.add("Generator",
"PV panel",
bus="place of work",
dispatch="variable",
p_nom_extendable=True,
p_max_pu=pv_pu)
network.add("Load",
"driving",
bus="battery",
p_set=bev_usage)
network.add("Link",
"charger",
bus0="place of work",
bus1="battery",
p_nom="120", #super-charger
efficiency=0.9,
s_nom_extendable=True)
network.add("Store",
"battery storage",
bus="battery",
e_cyclic=True,
e_nom=100)
network.lopf(network.snapshots)
print("Pannel size [kW]:",network.generators.p_nom_opt["PV panel"])
network.generators_t.p.plot()
network.stores_t.loc[["p","e"],:,"battery storage"].plot(grid=True)
print("Losses [kWh/d]:",network.generators_t.loc["p",:,"PV panel"].sum() - network.loads_t.loc["p",:,"driving"].sum())
|
Add Battery Electric Vehicle (BEV) charging example## Battery Electric Vehicle Charging
#
#In this example a battery electric vehicle (BEV) is driven 100 km in the morning and 100 km in the evening, to simulate commuting, and charged during the day by a solar panel at the driver's place of work. The size of the panel is computed by the optimisation.
#
#The BEV has a battery of size 100 kWh and an electricity consumption of 0.18 kWh/km.
import pypsa
import pandas as pd
#%matplotlib inline
# NB: this example will use units of kW and kWh, unlike the PyPSA defaults
# use 24 hour period for consideration
index = pd.date_range("2016-01-01 00:00","2016-01-01 23:00",freq="H")
# consumption pattern of BEV
bev_usage = pd.Series([0.]*7 + [9.]*2 + [0.]*8 + [9.]*2 + [0.]*5,index)
# solar PV panel generation per unit of capacity - this is only available while parked at place of work
pv_pu = pd.Series([0.]*9 + [0.6,0.75,0.85,0.9,0.85,0.75,0.6,0.4] + [0.]*7,index)
bev_usage.plot()
pv_pu.plot()
network = pypsa.Network()
network.set_snapshots(index)
network.add("Bus",
"place of work",
carrier="AC")
network.add("Bus",
"battery",
carrier="Li-ion")
network.add("Generator",
"PV panel",
bus="place of work",
dispatch="variable",
p_nom_extendable=True,
p_max_pu=pv_pu)
network.add("Load",
"driving",
bus="battery",
p_set=bev_usage)
network.add("Link",
"charger",
bus0="place of work",
bus1="battery",
p_nom="120", #super-charger
efficiency=0.9,
s_nom_extendable=True)
network.add("Store",
"battery storage",
bus="battery",
e_cyclic=True,
e_nom=100)
network.lopf(network.snapshots)
print("Pannel size [kW]:",network.generators.p_nom_opt["PV panel"])
network.generators_t.p.plot()
network.stores_t.loc[["p","e"],:,"battery storage"].plot(grid=True)
print("Losses [kWh/d]:",network.generators_t.loc["p",:,"PV panel"].sum() - network.loads_t.loc["p",:,"driving"].sum())
|
<commit_before><commit_msg>Add Battery Electric Vehicle (BEV) charging example<commit_after>## Battery Electric Vehicle Charging
#
#In this example a battery electric vehicle (BEV) is driven 100 km in the morning and 100 km in the evening, to simulate commuting, and charged during the day by a solar panel at the driver's place of work. The size of the panel is computed by the optimisation.
#
#The BEV has a battery of size 100 kWh and an electricity consumption of 0.18 kWh/km.
import pypsa
import pandas as pd
#%matplotlib inline
# NB: this example will use units of kW and kWh, unlike the PyPSA defaults
# use 24 hour period for consideration
index = pd.date_range("2016-01-01 00:00","2016-01-01 23:00",freq="H")
# consumption pattern of BEV
bev_usage = pd.Series([0.]*7 + [9.]*2 + [0.]*8 + [9.]*2 + [0.]*5,index)
# solar PV panel generation per unit of capacity - this is only available while parked at place of work
pv_pu = pd.Series([0.]*9 + [0.6,0.75,0.85,0.9,0.85,0.75,0.6,0.4] + [0.]*7,index)
bev_usage.plot()
pv_pu.plot()
network = pypsa.Network()
network.set_snapshots(index)
network.add("Bus",
"place of work",
carrier="AC")
network.add("Bus",
"battery",
carrier="Li-ion")
network.add("Generator",
"PV panel",
bus="place of work",
dispatch="variable",
p_nom_extendable=True,
p_max_pu=pv_pu)
network.add("Load",
"driving",
bus="battery",
p_set=bev_usage)
network.add("Link",
"charger",
bus0="place of work",
bus1="battery",
p_nom="120", #super-charger
efficiency=0.9,
s_nom_extendable=True)
network.add("Store",
"battery storage",
bus="battery",
e_cyclic=True,
e_nom=100)
network.lopf(network.snapshots)
print("Pannel size [kW]:",network.generators.p_nom_opt["PV panel"])
network.generators_t.p.plot()
network.stores_t.loc[["p","e"],:,"battery storage"].plot(grid=True)
print("Losses [kWh/d]:",network.generators_t.loc["p",:,"PV panel"].sum() - network.loads_t.loc["p",:,"driving"].sum())
|
|
47cb213be2071ad77f75d350ead4cfd7c7dc2d62
|
py/increasing-triplet-subsequence.py
|
py/increasing-triplet-subsequence.py
|
class Solution(object):
def increasingTriplet(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
first, second = None, None
for n in nums:
if first is None or n <= first:
first = n
elif second is None or n <= second:
second = n
else:
return True
return False
|
Add py solution for 334. Increasing Triplet Subsequence
|
Add py solution for 334. Increasing Triplet Subsequence
334. Increasing Triplet Subsequence: https://leetcode.com/problems/increasing-triplet-subsequence/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 334. Increasing Triplet Subsequence
334. Increasing Triplet Subsequence: https://leetcode.com/problems/increasing-triplet-subsequence/
|
class Solution(object):
def increasingTriplet(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
first, second = None, None
for n in nums:
if first is None or n <= first:
first = n
elif second is None or n <= second:
second = n
else:
return True
return False
|
<commit_before><commit_msg>Add py solution for 334. Increasing Triplet Subsequence
334. Increasing Triplet Subsequence: https://leetcode.com/problems/increasing-triplet-subsequence/<commit_after>
|
class Solution(object):
def increasingTriplet(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
first, second = None, None
for n in nums:
if first is None or n <= first:
first = n
elif second is None or n <= second:
second = n
else:
return True
return False
|
Add py solution for 334. Increasing Triplet Subsequence
334. Increasing Triplet Subsequence: https://leetcode.com/problems/increasing-triplet-subsequence/class Solution(object):
def increasingTriplet(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
first, second = None, None
for n in nums:
if first is None or n <= first:
first = n
elif second is None or n <= second:
second = n
else:
return True
return False
|
<commit_before><commit_msg>Add py solution for 334. Increasing Triplet Subsequence
334. Increasing Triplet Subsequence: https://leetcode.com/problems/increasing-triplet-subsequence/<commit_after>class Solution(object):
def increasingTriplet(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
first, second = None, None
for n in nums:
if first is None or n <= first:
first = n
elif second is None or n <= second:
second = n
else:
return True
return False
|
|
fc772d73549d9a99188cf0534eba7d2f1e0a52d7
|
teuthology/test/test_schedule.py
|
teuthology/test/test_schedule.py
|
from ..schedule import build_config
from ..misc import get_user
class TestSchedule(object):
basic_args = {
'--verbose': False,
'--owner': 'OWNER',
'--description': 'DESC',
'--email': 'EMAIL',
'--last-in-suite': True,
'--name': 'NAME',
'--worker': 'tala',
'--timeout': '6',
'--priority': '99',
# TODO: make this work regardless of $PWD
#'<conf_file>': ['../../examples/3node_ceph.yaml',
# '../../examples/3node_rgw.yaml'],
}
def test_basic(self):
expected = {
'description': 'DESC',
'email': 'EMAIL',
'last_in_suite': True,
'machine_type': 'tala',
'name': 'NAME',
'owner': 'OWNER',
'priority': 99,
'results_timeout': '6',
'verbose': False,
'tube': 'tala',
}
job_dict = build_config(self.basic_args)
assert job_dict == expected
def test_owner(self):
args = self.basic_args
args['--owner'] = None
job_dict = build_config(self.basic_args)
assert job_dict['owner'] == 'scheduled_%s' % get_user()
|
Add a couple tests for teuthology.schedule
|
Add a couple tests for teuthology.schedule
Signed-off-by: Zack Cerza <f801c831581d4150a2793939287636221d62131e@inktank.com>
|
Python
|
mit
|
michaelsevilla/teuthology,ktdreyer/teuthology,robbat2/teuthology,tchaikov/teuthology,zhouyuan/teuthology,tchaikov/teuthology,t-miyamae/teuthology,dmick/teuthology,yghannam/teuthology,yghannam/teuthology,ivotron/teuthology,dmick/teuthology,ceph/teuthology,SUSE/teuthology,robbat2/teuthology,dreamhost/teuthology,SUSE/teuthology,caibo2014/teuthology,zhouyuan/teuthology,dreamhost/teuthology,caibo2014/teuthology,SUSE/teuthology,dmick/teuthology,ceph/teuthology,michaelsevilla/teuthology,ktdreyer/teuthology,ivotron/teuthology,t-miyamae/teuthology
|
Add a couple tests for teuthology.schedule
Signed-off-by: Zack Cerza <f801c831581d4150a2793939287636221d62131e@inktank.com>
|
from ..schedule import build_config
from ..misc import get_user
class TestSchedule(object):
basic_args = {
'--verbose': False,
'--owner': 'OWNER',
'--description': 'DESC',
'--email': 'EMAIL',
'--last-in-suite': True,
'--name': 'NAME',
'--worker': 'tala',
'--timeout': '6',
'--priority': '99',
# TODO: make this work regardless of $PWD
#'<conf_file>': ['../../examples/3node_ceph.yaml',
# '../../examples/3node_rgw.yaml'],
}
def test_basic(self):
expected = {
'description': 'DESC',
'email': 'EMAIL',
'last_in_suite': True,
'machine_type': 'tala',
'name': 'NAME',
'owner': 'OWNER',
'priority': 99,
'results_timeout': '6',
'verbose': False,
'tube': 'tala',
}
job_dict = build_config(self.basic_args)
assert job_dict == expected
def test_owner(self):
args = self.basic_args
args['--owner'] = None
job_dict = build_config(self.basic_args)
assert job_dict['owner'] == 'scheduled_%s' % get_user()
|
<commit_before><commit_msg>Add a couple tests for teuthology.schedule
Signed-off-by: Zack Cerza <f801c831581d4150a2793939287636221d62131e@inktank.com><commit_after>
|
from ..schedule import build_config
from ..misc import get_user
class TestSchedule(object):
basic_args = {
'--verbose': False,
'--owner': 'OWNER',
'--description': 'DESC',
'--email': 'EMAIL',
'--last-in-suite': True,
'--name': 'NAME',
'--worker': 'tala',
'--timeout': '6',
'--priority': '99',
# TODO: make this work regardless of $PWD
#'<conf_file>': ['../../examples/3node_ceph.yaml',
# '../../examples/3node_rgw.yaml'],
}
def test_basic(self):
expected = {
'description': 'DESC',
'email': 'EMAIL',
'last_in_suite': True,
'machine_type': 'tala',
'name': 'NAME',
'owner': 'OWNER',
'priority': 99,
'results_timeout': '6',
'verbose': False,
'tube': 'tala',
}
job_dict = build_config(self.basic_args)
assert job_dict == expected
def test_owner(self):
args = self.basic_args
args['--owner'] = None
job_dict = build_config(self.basic_args)
assert job_dict['owner'] == 'scheduled_%s' % get_user()
|
Add a couple tests for teuthology.schedule
Signed-off-by: Zack Cerza <f801c831581d4150a2793939287636221d62131e@inktank.com>from ..schedule import build_config
from ..misc import get_user
class TestSchedule(object):
basic_args = {
'--verbose': False,
'--owner': 'OWNER',
'--description': 'DESC',
'--email': 'EMAIL',
'--last-in-suite': True,
'--name': 'NAME',
'--worker': 'tala',
'--timeout': '6',
'--priority': '99',
# TODO: make this work regardless of $PWD
#'<conf_file>': ['../../examples/3node_ceph.yaml',
# '../../examples/3node_rgw.yaml'],
}
def test_basic(self):
expected = {
'description': 'DESC',
'email': 'EMAIL',
'last_in_suite': True,
'machine_type': 'tala',
'name': 'NAME',
'owner': 'OWNER',
'priority': 99,
'results_timeout': '6',
'verbose': False,
'tube': 'tala',
}
job_dict = build_config(self.basic_args)
assert job_dict == expected
def test_owner(self):
args = self.basic_args
args['--owner'] = None
job_dict = build_config(self.basic_args)
assert job_dict['owner'] == 'scheduled_%s' % get_user()
|
<commit_before><commit_msg>Add a couple tests for teuthology.schedule
Signed-off-by: Zack Cerza <f801c831581d4150a2793939287636221d62131e@inktank.com><commit_after>from ..schedule import build_config
from ..misc import get_user
class TestSchedule(object):
basic_args = {
'--verbose': False,
'--owner': 'OWNER',
'--description': 'DESC',
'--email': 'EMAIL',
'--last-in-suite': True,
'--name': 'NAME',
'--worker': 'tala',
'--timeout': '6',
'--priority': '99',
# TODO: make this work regardless of $PWD
#'<conf_file>': ['../../examples/3node_ceph.yaml',
# '../../examples/3node_rgw.yaml'],
}
def test_basic(self):
expected = {
'description': 'DESC',
'email': 'EMAIL',
'last_in_suite': True,
'machine_type': 'tala',
'name': 'NAME',
'owner': 'OWNER',
'priority': 99,
'results_timeout': '6',
'verbose': False,
'tube': 'tala',
}
job_dict = build_config(self.basic_args)
assert job_dict == expected
def test_owner(self):
args = self.basic_args
args['--owner'] = None
job_dict = build_config(self.basic_args)
assert job_dict['owner'] == 'scheduled_%s' % get_user()
|
|
4812103b4d9be418aecdc64341fb32be7865f113
|
core/backends/IUnikernelBackend.py
|
core/backends/IUnikernelBackend.py
|
from abc import ABCMeta, abstractmethod
class IUnikernelBackend(object):
"""
Interface that must be implemented by every Unikernel Backend. It contains method stubs used by the REST API
provider and other components.
Redefinition of functions decorated with @asbstractmethod is compulsory.
"""
__metaclass__ = ABCMeta
@abstractmethod
def register(self, _id):
"""
Initialize directory structure for the unikernel, and register it to the database and scheduler.
:param _id: ID of the unikernel
:return: None
"""
pass
@abstractmethod
def configure(self, _id):
"""
Configure the unikernel to be built for the specific backend
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def compile(self, _id):
"""
Build the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def optimize(self, _id):
"""
Optimize the unikernel binary/VM by stripping off debug symbols / applying data compression, etc.
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def start(self, _id):
"""
Launch/boot the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def get_status(self, _id):
"""
Get status of the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def get_log(self, _id):
"""
Get runtime log of the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def stop(self, _id):
"""
Kill execution of the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def destroy(self, _id):
"""
Destroy the unikernel, remove all assets, and unregister from database and scheduler.
:param _id: ID of the unikernel
:return:
"""
pass
|
Add interface for unikernel backends to implement
|
Add interface for unikernel backends to implement
|
Python
|
apache-2.0
|
onyb/dune,adyasha/dune,adyasha/dune,adyasha/dune
|
Add interface for unikernel backends to implement
|
from abc import ABCMeta, abstractmethod
class IUnikernelBackend(object):
"""
Interface that must be implemented by every Unikernel Backend. It contains method stubs used by the REST API
provider and other components.
Redefinition of functions decorated with @asbstractmethod is compulsory.
"""
__metaclass__ = ABCMeta
@abstractmethod
def register(self, _id):
"""
Initialize directory structure for the unikernel, and register it to the database and scheduler.
:param _id: ID of the unikernel
:return: None
"""
pass
@abstractmethod
def configure(self, _id):
"""
Configure the unikernel to be built for the specific backend
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def compile(self, _id):
"""
Build the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def optimize(self, _id):
"""
Optimize the unikernel binary/VM by stripping off debug symbols / applying data compression, etc.
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def start(self, _id):
"""
Launch/boot the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def get_status(self, _id):
"""
Get status of the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def get_log(self, _id):
"""
Get runtime log of the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def stop(self, _id):
"""
Kill execution of the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def destroy(self, _id):
"""
Destroy the unikernel, remove all assets, and unregister from database and scheduler.
:param _id: ID of the unikernel
:return:
"""
pass
|
<commit_before><commit_msg>Add interface for unikernel backends to implement<commit_after>
|
from abc import ABCMeta, abstractmethod
class IUnikernelBackend(object):
"""
Interface that must be implemented by every Unikernel Backend. It contains method stubs used by the REST API
provider and other components.
Redefinition of functions decorated with @asbstractmethod is compulsory.
"""
__metaclass__ = ABCMeta
@abstractmethod
def register(self, _id):
"""
Initialize directory structure for the unikernel, and register it to the database and scheduler.
:param _id: ID of the unikernel
:return: None
"""
pass
@abstractmethod
def configure(self, _id):
"""
Configure the unikernel to be built for the specific backend
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def compile(self, _id):
"""
Build the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def optimize(self, _id):
"""
Optimize the unikernel binary/VM by stripping off debug symbols / applying data compression, etc.
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def start(self, _id):
"""
Launch/boot the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def get_status(self, _id):
"""
Get status of the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def get_log(self, _id):
"""
Get runtime log of the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def stop(self, _id):
"""
Kill execution of the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def destroy(self, _id):
"""
Destroy the unikernel, remove all assets, and unregister from database and scheduler.
:param _id: ID of the unikernel
:return:
"""
pass
|
Add interface for unikernel backends to implementfrom abc import ABCMeta, abstractmethod
class IUnikernelBackend(object):
"""
Interface that must be implemented by every Unikernel Backend. It contains method stubs used by the REST API
provider and other components.
Redefinition of functions decorated with @asbstractmethod is compulsory.
"""
__metaclass__ = ABCMeta
@abstractmethod
def register(self, _id):
"""
Initialize directory structure for the unikernel, and register it to the database and scheduler.
:param _id: ID of the unikernel
:return: None
"""
pass
@abstractmethod
def configure(self, _id):
"""
Configure the unikernel to be built for the specific backend
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def compile(self, _id):
"""
Build the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def optimize(self, _id):
"""
Optimize the unikernel binary/VM by stripping off debug symbols / applying data compression, etc.
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def start(self, _id):
"""
Launch/boot the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def get_status(self, _id):
"""
Get status of the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def get_log(self, _id):
"""
Get runtime log of the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def stop(self, _id):
"""
Kill execution of the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def destroy(self, _id):
"""
Destroy the unikernel, remove all assets, and unregister from database and scheduler.
:param _id: ID of the unikernel
:return:
"""
pass
|
<commit_before><commit_msg>Add interface for unikernel backends to implement<commit_after>from abc import ABCMeta, abstractmethod
class IUnikernelBackend(object):
"""
Interface that must be implemented by every Unikernel Backend. It contains method stubs used by the REST API
provider and other components.
Redefinition of functions decorated with @asbstractmethod is compulsory.
"""
__metaclass__ = ABCMeta
@abstractmethod
def register(self, _id):
"""
Initialize directory structure for the unikernel, and register it to the database and scheduler.
:param _id: ID of the unikernel
:return: None
"""
pass
@abstractmethod
def configure(self, _id):
"""
Configure the unikernel to be built for the specific backend
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def compile(self, _id):
"""
Build the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def optimize(self, _id):
"""
Optimize the unikernel binary/VM by stripping off debug symbols / applying data compression, etc.
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def start(self, _id):
"""
Launch/boot the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def get_status(self, _id):
"""
Get status of the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def get_log(self, _id):
"""
Get runtime log of the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def stop(self, _id):
"""
Kill execution of the unikernel
:param _id: ID of the unikernel
:return:
"""
pass
@abstractmethod
def destroy(self, _id):
"""
Destroy the unikernel, remove all assets, and unregister from database and scheduler.
:param _id: ID of the unikernel
:return:
"""
pass
|
|
2f644f8439e63f7e621825e2036701c3bbcf311a
|
zephyr/management/commands/delete_tutorial_streams.py
|
zephyr/management/commands/delete_tutorial_streams.py
|
from django.core.management.base import BaseCommand
from zephyr.models import UserProfile, Subscription, Recipient, Message, Stream
from django.db.models import Q
import datetime
import pytz
from optparse import make_option
class Command(BaseCommand):
help = """Delete all inactive tutorial stream subscriptions."""
option_list = BaseCommand.option_list + (
make_option('-f', '--for-real',
dest='for_real',
action='store_true',
default=False,
help="Actually deactive subscriptions. Default is a dry run."),
)
def has_sent_to(self, user_profile, recipient):
return Message.objects.filter(sender=user_profile, recipient=recipient).count() != 0
def handle(self, **options):
possible_tutorial_streams = Stream.objects.filter(Q(name__startswith='tutorial-'))
tutorial_bot = UserProfile.objects.get(user__email="humbug+tutorial@humbughq.com")
for stream in possible_tutorial_streams:
recipient = Recipient.objects.get(type=Recipient.STREAM, type_id=stream.id)
subscribers = Subscription.objects.filter(recipient=recipient, active=True)
if ((subscribers.count() == 1) and self.has_sent_to(tutorial_bot, recipient)):
# This is a tutorial stream.
most_recent_message = Message.objects.filter(
recipient=recipient).latest("pub_date")
# This cutoff must be more generous than the tutorial bot cutoff
# in the client code.
cutoff = datetime.datetime.now(tz=pytz.utc) - datetime.timedelta(hours=2)
if most_recent_message.pub_date < cutoff:
# The tutorial has expired, so delete the stream.
print stream.name, most_recent_message.pub_date
if options["for_real"]:
tutorial_user = subscribers[0]
tutorial_user.active = False
tutorial_user.save()
if options["for_real"]:
print "Subscriptions deactivated."
else:
print "This was a dry run. Pass -f to actually deactivate."
|
Add a management command to clean up lingering tutorial streams.
|
Add a management command to clean up lingering tutorial streams.
We now clean up the stream subscription in more places, but some
historical tutorial streams are still around and if an error or page
reload happens during the tutorial it'll stick around.
(imported from commit 8cf0ebda26bf752c1a23296a4ba85d194bbb3004)
|
Python
|
apache-2.0
|
bowlofstew/zulip,JPJPJPOPOP/zulip,isht3/zulip,qq1012803704/zulip,hj3938/zulip,Frouk/zulip,Gabriel0402/zulip,karamcnair/zulip,he15his/zulip,hengqujushi/zulip,schatt/zulip,paxapy/zulip,mdavid/zulip,bluesea/zulip,jackrzhang/zulip,littledogboy/zulip,niftynei/zulip,isht3/zulip,LAndreas/zulip,amallia/zulip,mahim97/zulip,JPJPJPOPOP/zulip,shaunstanislaus/zulip,dxq-git/zulip,dawran6/zulip,vakila/zulip,developerfm/zulip,Vallher/zulip,jphilipsen05/zulip,verma-varsha/zulip,glovebx/zulip,hafeez3000/zulip,hackerkid/zulip,MariaFaBella85/zulip,samatdav/zulip,synicalsyntax/zulip,Suninus/zulip,atomic-labs/zulip,hustlzp/zulip,alliejones/zulip,jackrzhang/zulip,mahim97/zulip,j831/zulip,shrikrishnaholla/zulip,wangdeshui/zulip,willingc/zulip,DazWorrall/zulip,zwily/zulip,souravbadami/zulip,jonesgithub/zulip,luyifan/zulip,sonali0901/zulip,Frouk/zulip,Diptanshu8/zulip,natanovia/zulip,vabs22/zulip,Frouk/zulip,tdr130/zulip,technicalpickles/zulip,vabs22/zulip,lfranchi/zulip,amanharitsh123/zulip,stamhe/zulip,noroot/zulip,fw1121/zulip,tiansiyuan/zulip,seapasulli/zulip,bastianh/zulip,swinghu/zulip,atomic-labs/zulip,MayB/zulip,dxq-git/zulip,armooo/zulip,bitemyapp/zulip,Jianchun1/zulip,samatdav/zulip,jackrzhang/zulip,atomic-labs/zulip,ipernet/zulip,nicholasbs/zulip,cosmicAsymmetry/zulip,brainwane/zulip,EasonYi/zulip,yocome/zulip,esander91/zulip,voidException/zulip,saitodisse/zulip,vaidap/zulip,karamcnair/zulip,KingxBanana/zulip,amanharitsh123/zulip,dawran6/zulip,jainayush975/zulip,SmartPeople/zulip,stamhe/zulip,Diptanshu8/zulip,bowlofstew/zulip,wweiradio/zulip,arpitpanwar/zulip,samatdav/zulip,brainwane/zulip,noroot/zulip,mdavid/zulip,kou/zulip,hayderimran7/zulip,JanzTam/zulip,themass/zulip,ApsOps/zulip,PhilSk/zulip,levixie/zulip,Jianchun1/zulip,ericzhou2008/zulip,levixie/zulip,DazWorrall/zulip,j831/zulip,susansls/zulip,wavelets/zulip,tommyip/zulip,mansilladev/zulip,blaze225/zulip,wweiradio/zulip,kou/zulip,codeKonami/zulip,sharmaeklavya2/zulip,joyhchen/zulip,mohsenSy/zulip,guiquanz/zulip,tdr130/zulip,jackrzhang/zulip,ApsOps/zulip,sharmaeklavya2/zulip,deer-hope/zulip,thomasboyt/zulip,natanovia/zulip,synicalsyntax/zulip,JanzTam/zulip,saitodisse/zulip,paxapy/zulip,ipernet/zulip,moria/zulip,gkotian/zulip,suxinde2009/zulip,tbutter/zulip,vikas-parashar/zulip,schatt/zulip,voidException/zulip,rht/zulip,dnmfarrell/zulip,Gabriel0402/zulip,ApsOps/zulip,mohsenSy/zulip,zorojean/zulip,MayB/zulip,umkay/zulip,swinghu/zulip,deer-hope/zulip,Suninus/zulip,blaze225/zulip,krtkmj/zulip,adnanh/zulip,deer-hope/zulip,christi3k/zulip,ikasumiwt/zulip,SmartPeople/zulip,ashwinirudrappa/zulip,eeshangarg/zulip,zwily/zulip,qq1012803704/zulip,JanzTam/zulip,LAndreas/zulip,joshisa/zulip,dwrpayne/zulip,yuvipanda/zulip,LeeRisk/zulip,avastu/zulip,johnnygaddarr/zulip,yocome/zulip,rht/zulip,kokoar/zulip,LAndreas/zulip,praveenaki/zulip,showell/zulip,levixie/zulip,jessedhillon/zulip,samatdav/zulip,ahmadassaf/zulip,schatt/zulip,xuxiao/zulip,themass/zulip,bluesea/zulip,levixie/zulip,susansls/zulip,eeshangarg/zulip,christi3k/zulip,shrikrishnaholla/zulip,jonesgithub/zulip,thomasboyt/zulip,Batterfii/zulip,zulip/zulip,akuseru/zulip,yuvipanda/zulip,codeKonami/zulip,aliceriot/zulip,Jianchun1/zulip,karamcnair/zulip,fw1121/zulip,shaunstanislaus/zulip,PaulPetring/zulip,JanzTam/zulip,wweiradio/zulip,shrikrishnaholla/zulip,shubhamdhama/zulip,udxxabp/zulip,KingxBanana/zulip,Vallher/zulip,hafeez3000/zulip,jonesgithub/zulip,developerfm/zulip,KJin99/zulip,lfranchi/zulip,AZtheAsian/zulip,MayB/zulip,suxinde2009/zulip,codeKonami/zulip,aliceriot/zulip,grave-w-grave/zulip,niftynei/zulip,arpitpanwar/zulip,m1ssou/zulip,voidException/zulip,mohsenSy/zulip,Cheppers/zulip,luyifan/zulip,xuanhan863/zulip,themass/zulip,sup95/zulip,Galexrt/zulip,avastu/zulip,Galexrt/zulip,hj3938/zulip,ericzhou2008/zulip,peiwei/zulip,kaiyuanheshang/zulip,cosmicAsymmetry/zulip,so0k/zulip,praveenaki/zulip,peiwei/zulip,stamhe/zulip,adnanh/zulip,Vallher/zulip,zulip/zulip,paxapy/zulip,JanzTam/zulip,LAndreas/zulip,Suninus/zulip,suxinde2009/zulip,ryansnowboarder/zulip,dxq-git/zulip,nicholasbs/zulip,levixie/zulip,KingxBanana/zulip,rishig/zulip,wangdeshui/zulip,zhaoweigg/zulip,amallia/zulip,johnny9/zulip,ipernet/zulip,christi3k/zulip,jerryge/zulip,brockwhittaker/zulip,wdaher/zulip,littledogboy/zulip,zofuthan/zulip,cosmicAsymmetry/zulip,dotcool/zulip,stamhe/zulip,amyliu345/zulip,souravbadami/zulip,pradiptad/zulip,firstblade/zulip,Drooids/zulip,esander91/zulip,peguin40/zulip,tommyip/zulip,jainayush975/zulip,Qgap/zulip,dotcool/zulip,codeKonami/zulip,zachallaun/zulip,jonesgithub/zulip,itnihao/zulip,he15his/zulip,AZtheAsian/zulip,ApsOps/zulip,eastlhu/zulip,rishig/zulip,vakila/zulip,easyfmxu/zulip,adnanh/zulip,amallia/zulip,akuseru/zulip,joyhchen/zulip,jerryge/zulip,so0k/zulip,Vallher/zulip,amallia/zulip,Qgap/zulip,cosmicAsymmetry/zulip,kokoar/zulip,PhilSk/zulip,suxinde2009/zulip,brockwhittaker/zulip,dotcool/zulip,umkay/zulip,kou/zulip,DazWorrall/zulip,mansilladev/zulip,gigawhitlocks/zulip,glovebx/zulip,jimmy54/zulip,itnihao/zulip,cosmicAsymmetry/zulip,hengqujushi/zulip,developerfm/zulip,akuseru/zulip,PhilSk/zulip,proliming/zulip,Jianchun1/zulip,kaiyuanheshang/zulip,willingc/zulip,amanharitsh123/zulip,KingxBanana/zulip,seapasulli/zulip,bowlofstew/zulip,arpitpanwar/zulip,joyhchen/zulip,so0k/zulip,jrowan/zulip,thomasboyt/zulip,eeshangarg/zulip,zhaoweigg/zulip,andersk/zulip,pradiptad/zulip,tbutter/zulip,saitodisse/zulip,JPJPJPOPOP/zulip,bssrdf/zulip,Gabriel0402/zulip,RobotCaleb/zulip,xuxiao/zulip,kaiyuanheshang/zulip,jphilipsen05/zulip,umkay/zulip,eastlhu/zulip,krtkmj/zulip,hackerkid/zulip,jessedhillon/zulip,ericzhou2008/zulip,Batterfii/zulip,christi3k/zulip,bitemyapp/zulip,hustlzp/zulip,tbutter/zulip,bitemyapp/zulip,armooo/zulip,showell/zulip,ufosky-server/zulip,dattatreya303/zulip,shubhamdhama/zulip,KJin99/zulip,calvinleenyc/zulip,bssrdf/zulip,jimmy54/zulip,bowlofstew/zulip,hafeez3000/zulip,technicalpickles/zulip,arpith/zulip,dnmfarrell/zulip,swinghu/zulip,AZtheAsian/zulip,udxxabp/zulip,Cheppers/zulip,huangkebo/zulip,timabbott/zulip,dawran6/zulip,hustlzp/zulip,ashwinirudrappa/zulip,Frouk/zulip,vikas-parashar/zulip,eastlhu/zulip,zwily/zulip,andersk/zulip,wangdeshui/zulip,gigawhitlocks/zulip,kokoar/zulip,babbage/zulip,alliejones/zulip,verma-varsha/zulip,PhilSk/zulip,MariaFaBella85/zulip,xuxiao/zulip,MayB/zulip,isht3/zulip,lfranchi/zulip,tiansiyuan/zulip,arpith/zulip,littledogboy/zulip,bssrdf/zulip,souravbadami/zulip,umkay/zulip,Drooids/zulip,hustlzp/zulip,saitodisse/zulip,babbage/zulip,vaidap/zulip,dwrpayne/zulip,reyha/zulip,joshisa/zulip,punchagan/zulip,easyfmxu/zulip,zofuthan/zulip,swinghu/zulip,zulip/zulip,zofuthan/zulip,peguin40/zulip,tdr130/zulip,verma-varsha/zulip,vakila/zulip,akuseru/zulip,technicalpickles/zulip,zofuthan/zulip,dattatreya303/zulip,ahmadassaf/zulip,Drooids/zulip,zwily/zulip,easyfmxu/zulip,so0k/zulip,hustlzp/zulip,KJin99/zulip,dotcool/zulip,adnanh/zulip,sonali0901/zulip,akuseru/zulip,rht/zulip,ericzhou2008/zulip,fw1121/zulip,hayderimran7/zulip,babbage/zulip,niftynei/zulip,ryansnowboarder/zulip,jeffcao/zulip,bastianh/zulip,Cheppers/zulip,zulip/zulip,calvinleenyc/zulip,souravbadami/zulip,RobotCaleb/zulip,yocome/zulip,jessedhillon/zulip,hafeez3000/zulip,esander91/zulip,Vallher/zulip,wweiradio/zulip,isht3/zulip,jimmy54/zulip,zacps/zulip,alliejones/zulip,blaze225/zulip,praveenaki/zulip,Suninus/zulip,hj3938/zulip,hayderimran7/zulip,hj3938/zulip,zachallaun/zulip,MariaFaBella85/zulip,umkay/zulip,samatdav/zulip,wangdeshui/zulip,joyhchen/zulip,souravbadami/zulip,developerfm/zulip,zofuthan/zulip,zacps/zulip,arpitpanwar/zulip,gkotian/zulip,PaulPetring/zulip,aakash-cr7/zulip,zorojean/zulip,nicholasbs/zulip,LeeRisk/zulip,gkotian/zulip,nicholasbs/zulip,DazWorrall/zulip,jeffcao/zulip,vabs22/zulip,PaulPetring/zulip,mansilladev/zulip,alliejones/zulip,codeKonami/zulip,ufosky-server/zulip,aps-sids/zulip,mansilladev/zulip,littledogboy/zulip,zofuthan/zulip,calvinleenyc/zulip,verma-varsha/zulip,gkotian/zulip,SmartPeople/zulip,ryansnowboarder/zulip,schatt/zulip,kaiyuanheshang/zulip,hengqujushi/zulip,luyifan/zulip,moria/zulip,joshisa/zulip,ipernet/zulip,cosmicAsymmetry/zulip,dattatreya303/zulip,amyliu345/zulip,bastianh/zulip,KingxBanana/zulip,LAndreas/zulip,proliming/zulip,SmartPeople/zulip,jeffcao/zulip,jrowan/zulip,arpitpanwar/zulip,dhcrzf/zulip,ashwinirudrappa/zulip,Drooids/zulip,andersk/zulip,avastu/zulip,yuvipanda/zulip,zachallaun/zulip,levixie/zulip,fw1121/zulip,bluesea/zulip,huangkebo/zulip,technicalpickles/zulip,zhaoweigg/zulip,ufosky-server/zulip,niftynei/zulip,codeKonami/zulip,xuanhan863/zulip,sonali0901/zulip,verma-varsha/zulip,bastianh/zulip,eastlhu/zulip,zorojean/zulip,vikas-parashar/zulip,developerfm/zulip,dattatreya303/zulip,hackerkid/zulip,ApsOps/zulip,shubhamdhama/zulip,bluesea/zulip,KJin99/zulip,noroot/zulip,wdaher/zulip,DazWorrall/zulip,j831/zulip,deer-hope/zulip,johnnygaddarr/zulip,shrikrishnaholla/zulip,KingxBanana/zulip,bastianh/zulip,dwrpayne/zulip,ashwinirudrappa/zulip,dhcrzf/zulip,qq1012803704/zulip,Gabriel0402/zulip,ApsOps/zulip,paxapy/zulip,he15his/zulip,LeeRisk/zulip,easyfmxu/zulip,esander91/zulip,voidException/zulip,tdr130/zulip,stamhe/zulip,amanharitsh123/zulip,technicalpickles/zulip,brockwhittaker/zulip,peiwei/zulip,brainwane/zulip,zwily/zulip,ahmadassaf/zulip,peiwei/zulip,babbage/zulip,noroot/zulip,johnny9/zulip,shaunstanislaus/zulip,gigawhitlocks/zulip,timabbott/zulip,Qgap/zulip,glovebx/zulip,so0k/zulip,peiwei/zulip,susansls/zulip,hafeez3000/zulip,tommyip/zulip,kaiyuanheshang/zulip,Diptanshu8/zulip,rishig/zulip,johnnygaddarr/zulip,ryanbackman/zulip,niftynei/zulip,kou/zulip,ryanbackman/zulip,mansilladev/zulip,deer-hope/zulip,ryansnowboarder/zulip,atomic-labs/zulip,jessedhillon/zulip,zachallaun/zulip,zwily/zulip,aliceriot/zulip,ryansnowboarder/zulip,firstblade/zulip,mahim97/zulip,jerryge/zulip,wdaher/zulip,andersk/zulip,natanovia/zulip,Drooids/zulip,guiquanz/zulip,shaunstanislaus/zulip,isht3/zulip,luyifan/zulip,suxinde2009/zulip,showell/zulip,luyifan/zulip,dnmfarrell/zulip,MayB/zulip,xuanhan863/zulip,ufosky-server/zulip,Juanvulcano/zulip,vikas-parashar/zulip,xuanhan863/zulip,sharmaeklavya2/zulip,alliejones/zulip,dnmfarrell/zulip,isht3/zulip,zofuthan/zulip,m1ssou/zulip,Gabriel0402/zulip,calvinleenyc/zulip,ikasumiwt/zulip,hackerkid/zulip,zachallaun/zulip,KJin99/zulip,armooo/zulip,mansilladev/zulip,kou/zulip,pradiptad/zulip,grave-w-grave/zulip,arpith/zulip,themass/zulip,mdavid/zulip,qq1012803704/zulip,johnny9/zulip,zorojean/zulip,glovebx/zulip,shrikrishnaholla/zulip,themass/zulip,qq1012803704/zulip,alliejones/zulip,shubhamdhama/zulip,jainayush975/zulip,shubhamdhama/zulip,dawran6/zulip,tommyip/zulip,ericzhou2008/zulip,paxapy/zulip,jonesgithub/zulip,mahim97/zulip,sup95/zulip,karamcnair/zulip,dotcool/zulip,Galexrt/zulip,wdaher/zulip,mdavid/zulip,mohsenSy/zulip,krtkmj/zulip,tiansiyuan/zulip,xuanhan863/zulip,adnanh/zulip,susansls/zulip,proliming/zulip,m1ssou/zulip,peguin40/zulip,punchagan/zulip,krtkmj/zulip,nicholasbs/zulip,eastlhu/zulip,lfranchi/zulip,rht/zulip,Juanvulcano/zulip,jainayush975/zulip,Jianchun1/zulip,rht/zulip,aliceriot/zulip,PaulPetring/zulip,rishig/zulip,aps-sids/zulip,suxinde2009/zulip,jackrzhang/zulip,kokoar/zulip,j831/zulip,calvinleenyc/zulip,brockwhittaker/zulip,wangdeshui/zulip,zacps/zulip,xuxiao/zulip,esander91/zulip,JanzTam/zulip,shrikrishnaholla/zulip,joshisa/zulip,he15his/zulip,armooo/zulip,zacps/zulip,jimmy54/zulip,jrowan/zulip,Qgap/zulip,bowlofstew/zulip,vaidap/zulip,TigorC/zulip,jonesgithub/zulip,bitemyapp/zulip,grave-w-grave/zulip,firstblade/zulip,Cheppers/zulip,aliceriot/zulip,kaiyuanheshang/zulip,praveenaki/zulip,amallia/zulip,hayderimran7/zulip,ufosky-server/zulip,joyhchen/zulip,dnmfarrell/zulip,eeshangarg/zulip,jainayush975/zulip,ryanbackman/zulip,calvinleenyc/zulip,ikasumiwt/zulip,peguin40/zulip,reyha/zulip,joshisa/zulip,seapasulli/zulip,Jianchun1/zulip,littledogboy/zulip,shubhamdhama/zulip,yuvipanda/zulip,lfranchi/zulip,deer-hope/zulip,hayderimran7/zulip,zorojean/zulip,wavelets/zulip,nicholasbs/zulip,synicalsyntax/zulip,sup95/zulip,bluesea/zulip,souravbadami/zulip,seapasulli/zulip,krtkmj/zulip,tiansiyuan/zulip,ikasumiwt/zulip,MariaFaBella85/zulip,johnny9/zulip,praveenaki/zulip,dwrpayne/zulip,krtkmj/zulip,avastu/zulip,LeeRisk/zulip,saitodisse/zulip,ryanbackman/zulip,joshisa/zulip,adnanh/zulip,zachallaun/zulip,joyhchen/zulip,jerryge/zulip,proliming/zulip,itnihao/zulip,gkotian/zulip,umkay/zulip,tdr130/zulip,voidException/zulip,dhcrzf/zulip,mdavid/zulip,ryanbackman/zulip,jrowan/zulip,seapasulli/zulip,shaunstanislaus/zulip,amyliu345/zulip,paxapy/zulip,AZtheAsian/zulip,bitemyapp/zulip,hj3938/zulip,he15his/zulip,xuxiao/zulip,dattatreya303/zulip,kokoar/zulip,ikasumiwt/zulip,natanovia/zulip,sharmaeklavya2/zulip,jessedhillon/zulip,aakash-cr7/zulip,wavelets/zulip,hustlzp/zulip,Vallher/zulip,wavelets/zulip,gigawhitlocks/zulip,wavelets/zulip,developerfm/zulip,guiquanz/zulip,willingc/zulip,rishig/zulip,ashwinirudrappa/zulip,zhaoweigg/zulip,punchagan/zulip,bssrdf/zulip,EasonYi/zulip,schatt/zulip,KJin99/zulip,vikas-parashar/zulip,Juanvulcano/zulip,kou/zulip,karamcnair/zulip,natanovia/zulip,gigawhitlocks/zulip,johnny9/zulip,moria/zulip,dnmfarrell/zulip,niftynei/zulip,esander91/zulip,dwrpayne/zulip,TigorC/zulip,reyha/zulip,jimmy54/zulip,hackerkid/zulip,Juanvulcano/zulip,dnmfarrell/zulip,RobotCaleb/zulip,akuseru/zulip,yocome/zulip,codeKonami/zulip,willingc/zulip,themass/zulip,timabbott/zulip,vakila/zulip,kaiyuanheshang/zulip,samatdav/zulip,hengqujushi/zulip,punchagan/zulip,mohsenSy/zulip,levixie/zulip,natanovia/zulip,RobotCaleb/zulip,littledogboy/zulip,synicalsyntax/zulip,zulip/zulip,glovebx/zulip,amallia/zulip,wangdeshui/zulip,jerryge/zulip,KJin99/zulip,qq1012803704/zulip,jerryge/zulip,rht/zulip,mohsenSy/zulip,udxxabp/zulip,dawran6/zulip,shaunstanislaus/zulip,johnnygaddarr/zulip,sharmaeklavya2/zulip,ahmadassaf/zulip,Qgap/zulip,susansls/zulip,arpitpanwar/zulip,ufosky-server/zulip,firstblade/zulip,luyifan/zulip,mahim97/zulip,willingc/zulip,vakila/zulip,showell/zulip,andersk/zulip,hengqujushi/zulip,Diptanshu8/zulip,babbage/zulip,proliming/zulip,jessedhillon/zulip,wdaher/zulip,hackerkid/zulip,bowlofstew/zulip,punchagan/zulip,zulip/zulip,willingc/zulip,dhcrzf/zulip,johnnygaddarr/zulip,guiquanz/zulip,aakash-cr7/zulip,TigorC/zulip,Vallher/zulip,RobotCaleb/zulip,EasonYi/zulip,rht/zulip,bssrdf/zulip,dotcool/zulip,tommyip/zulip,peiwei/zulip,hackerkid/zulip,itnihao/zulip,LAndreas/zulip,brainwane/zulip,bitemyapp/zulip,zorojean/zulip,zwily/zulip,Cheppers/zulip,ryansnowboarder/zulip,PaulPetring/zulip,gigawhitlocks/zulip,mansilladev/zulip,firstblade/zulip,showell/zulip,wweiradio/zulip,willingc/zulip,peiwei/zulip,tdr130/zulip,suxinde2009/zulip,Batterfii/zulip,timabbott/zulip,schatt/zulip,Drooids/zulip,luyifan/zulip,johnnygaddarr/zulip,m1ssou/zulip,Galexrt/zulip,themass/zulip,blaze225/zulip,amanharitsh123/zulip,aliceriot/zulip,thomasboyt/zulip,Diptanshu8/zulip,tiansiyuan/zulip,voidException/zulip,Batterfii/zulip,hj3938/zulip,EasonYi/zulip,christi3k/zulip,shrikrishnaholla/zulip,thomasboyt/zulip,aps-sids/zulip,eeshangarg/zulip,Cheppers/zulip,moria/zulip,udxxabp/zulip,dattatreya303/zulip,dhcrzf/zulip,sup95/zulip,tbutter/zulip,TigorC/zulip,jainayush975/zulip,JPJPJPOPOP/zulip,proliming/zulip,vakila/zulip,guiquanz/zulip,jackrzhang/zulip,reyha/zulip,jessedhillon/zulip,JPJPJPOPOP/zulip,hustlzp/zulip,timabbott/zulip,sup95/zulip,eeshangarg/zulip,dawran6/zulip,aakash-cr7/zulip,moria/zulip,schatt/zulip,zulip/zulip,easyfmxu/zulip,ikasumiwt/zulip,mdavid/zulip,wweiradio/zulip,bastianh/zulip,babbage/zulip,Batterfii/zulip,fw1121/zulip,PhilSk/zulip,aakash-cr7/zulip,aliceriot/zulip,sonali0901/zulip,showell/zulip,atomic-labs/zulip,dhcrzf/zulip,natanovia/zulip,jrowan/zulip,he15his/zulip,arpitpanwar/zulip,pradiptad/zulip,Gabriel0402/zulip,tiansiyuan/zulip,sonali0901/zulip,huangkebo/zulip,zhaoweigg/zulip,kou/zulip,vaidap/zulip,PhilSk/zulip,DazWorrall/zulip,umkay/zulip,Cheppers/zulip,Frouk/zulip,saitodisse/zulip,Galexrt/zulip,praveenaki/zulip,pradiptad/zulip,tbutter/zulip,dxq-git/zulip,ahmadassaf/zulip,technicalpickles/zulip,verma-varsha/zulip,firstblade/zulip,PaulPetring/zulip,huangkebo/zulip,hayderimran7/zulip,tiansiyuan/zulip,AZtheAsian/zulip,aps-sids/zulip,joshisa/zulip,LeeRisk/zulip,Batterfii/zulip,praveenaki/zulip,brockwhittaker/zulip,amanharitsh123/zulip,synicalsyntax/zulip,xuanhan863/zulip,synicalsyntax/zulip,babbage/zulip,ryansnowboarder/zulip,punchagan/zulip,DazWorrall/zulip,m1ssou/zulip,avastu/zulip,Batterfii/zulip,eastlhu/zulip,noroot/zulip,johnny9/zulip,TigorC/zulip,RobotCaleb/zulip,qq1012803704/zulip,EasonYi/zulip,arpith/zulip,bluesea/zulip,ufosky-server/zulip,easyfmxu/zulip,SmartPeople/zulip,lfranchi/zulip,so0k/zulip,moria/zulip,dxq-git/zulip,jphilipsen05/zulip,guiquanz/zulip,jeffcao/zulip,timabbott/zulip,tommyip/zulip,jeffcao/zulip,dxq-git/zulip,he15his/zulip,xuxiao/zulip,amyliu345/zulip,MayB/zulip,showell/zulip,fw1121/zulip,jimmy54/zulip,Frouk/zulip,wdaher/zulip,gigawhitlocks/zulip,zachallaun/zulip,Diptanshu8/zulip,LeeRisk/zulip,SmartPeople/zulip,susansls/zulip,ericzhou2008/zulip,vabs22/zulip,reyha/zulip,LAndreas/zulip,shaunstanislaus/zulip,Suninus/zulip,atomic-labs/zulip,dotcool/zulip,esander91/zulip,gkotian/zulip,avastu/zulip,hayderimran7/zulip,vabs22/zulip,swinghu/zulip,huangkebo/zulip,ikasumiwt/zulip,nicholasbs/zulip,dhcrzf/zulip,johnny9/zulip,m1ssou/zulip,sup95/zulip,brainwane/zulip,peguin40/zulip,vabs22/zulip,johnnygaddarr/zulip,kokoar/zulip,brainwane/zulip,itnihao/zulip,vaidap/zulip,hengqujushi/zulip,blaze225/zulip,littledogboy/zulip,tbutter/zulip,seapasulli/zulip,rishig/zulip,Qgap/zulip,saitodisse/zulip,firstblade/zulip,j831/zulip,Juanvulcano/zulip,dxq-git/zulip,dwrpayne/zulip,punchagan/zulip,sharmaeklavya2/zulip,vaidap/zulip,thomasboyt/zulip,ipernet/zulip,jrowan/zulip,udxxabp/zulip,timabbott/zulip,swinghu/zulip,rishig/zulip,arpith/zulip,noroot/zulip,xuanhan863/zulip,voidException/zulip,yuvipanda/zulip,lfranchi/zulip,developerfm/zulip,reyha/zulip,ashwinirudrappa/zulip,yuvipanda/zulip,yocome/zulip,hafeez3000/zulip,andersk/zulip,bowlofstew/zulip,easyfmxu/zulip,aps-sids/zulip,JPJPJPOPOP/zulip,ApsOps/zulip,zhaoweigg/zulip,shubhamdhama/zulip,hj3938/zulip,Drooids/zulip,jonesgithub/zulip,RobotCaleb/zulip,itnihao/zulip,ahmadassaf/zulip,tdr130/zulip,jeffcao/zulip,bitemyapp/zulip,jackrzhang/zulip,Gabriel0402/zulip,zorojean/zulip,ericzhou2008/zulip,dwrpayne/zulip,andersk/zulip,wweiradio/zulip,akuseru/zulip,jphilipsen05/zulip,wavelets/zulip,christi3k/zulip,zacps/zulip,TigorC/zulip,aps-sids/zulip,mahim97/zulip,jphilipsen05/zulip,ashwinirudrappa/zulip,synicalsyntax/zulip,pradiptad/zulip,so0k/zulip,guiquanz/zulip,Suninus/zulip,MariaFaBella85/zulip,pradiptad/zulip,eeshangarg/zulip,huangkebo/zulip,mdavid/zulip,swinghu/zulip,jphilipsen05/zulip,LeeRisk/zulip,deer-hope/zulip,stamhe/zulip,brainwane/zulip,karamcnair/zulip,moria/zulip,Frouk/zulip,Galexrt/zulip,itnihao/zulip,ipernet/zulip,blaze225/zulip,ahmadassaf/zulip,krtkmj/zulip,hafeez3000/zulip,Juanvulcano/zulip,proliming/zulip,xuxiao/zulip,aakash-cr7/zulip,Galexrt/zulip,grave-w-grave/zulip,PaulPetring/zulip,yocome/zulip,sonali0901/zulip,ipernet/zulip,technicalpickles/zulip,EasonYi/zulip,amyliu345/zulip,udxxabp/zulip,m1ssou/zulip,atomic-labs/zulip,wavelets/zulip,kokoar/zulip,thomasboyt/zulip,tbutter/zulip,j831/zulip,yocome/zulip,jerryge/zulip,amyliu345/zulip,udxxabp/zulip,EasonYi/zulip,tommyip/zulip,aps-sids/zulip,grave-w-grave/zulip,zacps/zulip,Qgap/zulip,armooo/zulip,wangdeshui/zulip,avastu/zulip,armooo/zulip,zhaoweigg/zulip,JanzTam/zulip,arpith/zulip,Suninus/zulip,glovebx/zulip,alliejones/zulip,MariaFaBella85/zulip,amallia/zulip,vikas-parashar/zulip,jeffcao/zulip,bssrdf/zulip,karamcnair/zulip,stamhe/zulip,bastianh/zulip,MariaFaBella85/zulip,grave-w-grave/zulip,yuvipanda/zulip,bluesea/zulip,ryanbackman/zulip,bssrdf/zulip,hengqujushi/zulip,MayB/zulip,adnanh/zulip,huangkebo/zulip,eastlhu/zulip,jimmy54/zulip,vakila/zulip,fw1121/zulip,wdaher/zulip,peguin40/zulip,seapasulli/zulip,AZtheAsian/zulip,brockwhittaker/zulip,gkotian/zulip,glovebx/zulip,noroot/zulip,armooo/zulip
|
Add a management command to clean up lingering tutorial streams.
We now clean up the stream subscription in more places, but some
historical tutorial streams are still around and if an error or page
reload happens during the tutorial it'll stick around.
(imported from commit 8cf0ebda26bf752c1a23296a4ba85d194bbb3004)
|
from django.core.management.base import BaseCommand
from zephyr.models import UserProfile, Subscription, Recipient, Message, Stream
from django.db.models import Q
import datetime
import pytz
from optparse import make_option
class Command(BaseCommand):
help = """Delete all inactive tutorial stream subscriptions."""
option_list = BaseCommand.option_list + (
make_option('-f', '--for-real',
dest='for_real',
action='store_true',
default=False,
help="Actually deactive subscriptions. Default is a dry run."),
)
def has_sent_to(self, user_profile, recipient):
return Message.objects.filter(sender=user_profile, recipient=recipient).count() != 0
def handle(self, **options):
possible_tutorial_streams = Stream.objects.filter(Q(name__startswith='tutorial-'))
tutorial_bot = UserProfile.objects.get(user__email="humbug+tutorial@humbughq.com")
for stream in possible_tutorial_streams:
recipient = Recipient.objects.get(type=Recipient.STREAM, type_id=stream.id)
subscribers = Subscription.objects.filter(recipient=recipient, active=True)
if ((subscribers.count() == 1) and self.has_sent_to(tutorial_bot, recipient)):
# This is a tutorial stream.
most_recent_message = Message.objects.filter(
recipient=recipient).latest("pub_date")
# This cutoff must be more generous than the tutorial bot cutoff
# in the client code.
cutoff = datetime.datetime.now(tz=pytz.utc) - datetime.timedelta(hours=2)
if most_recent_message.pub_date < cutoff:
# The tutorial has expired, so delete the stream.
print stream.name, most_recent_message.pub_date
if options["for_real"]:
tutorial_user = subscribers[0]
tutorial_user.active = False
tutorial_user.save()
if options["for_real"]:
print "Subscriptions deactivated."
else:
print "This was a dry run. Pass -f to actually deactivate."
|
<commit_before><commit_msg>Add a management command to clean up lingering tutorial streams.
We now clean up the stream subscription in more places, but some
historical tutorial streams are still around and if an error or page
reload happens during the tutorial it'll stick around.
(imported from commit 8cf0ebda26bf752c1a23296a4ba85d194bbb3004)<commit_after>
|
from django.core.management.base import BaseCommand
from zephyr.models import UserProfile, Subscription, Recipient, Message, Stream
from django.db.models import Q
import datetime
import pytz
from optparse import make_option
class Command(BaseCommand):
help = """Delete all inactive tutorial stream subscriptions."""
option_list = BaseCommand.option_list + (
make_option('-f', '--for-real',
dest='for_real',
action='store_true',
default=False,
help="Actually deactive subscriptions. Default is a dry run."),
)
def has_sent_to(self, user_profile, recipient):
return Message.objects.filter(sender=user_profile, recipient=recipient).count() != 0
def handle(self, **options):
possible_tutorial_streams = Stream.objects.filter(Q(name__startswith='tutorial-'))
tutorial_bot = UserProfile.objects.get(user__email="humbug+tutorial@humbughq.com")
for stream in possible_tutorial_streams:
recipient = Recipient.objects.get(type=Recipient.STREAM, type_id=stream.id)
subscribers = Subscription.objects.filter(recipient=recipient, active=True)
if ((subscribers.count() == 1) and self.has_sent_to(tutorial_bot, recipient)):
# This is a tutorial stream.
most_recent_message = Message.objects.filter(
recipient=recipient).latest("pub_date")
# This cutoff must be more generous than the tutorial bot cutoff
# in the client code.
cutoff = datetime.datetime.now(tz=pytz.utc) - datetime.timedelta(hours=2)
if most_recent_message.pub_date < cutoff:
# The tutorial has expired, so delete the stream.
print stream.name, most_recent_message.pub_date
if options["for_real"]:
tutorial_user = subscribers[0]
tutorial_user.active = False
tutorial_user.save()
if options["for_real"]:
print "Subscriptions deactivated."
else:
print "This was a dry run. Pass -f to actually deactivate."
|
Add a management command to clean up lingering tutorial streams.
We now clean up the stream subscription in more places, but some
historical tutorial streams are still around and if an error or page
reload happens during the tutorial it'll stick around.
(imported from commit 8cf0ebda26bf752c1a23296a4ba85d194bbb3004)from django.core.management.base import BaseCommand
from zephyr.models import UserProfile, Subscription, Recipient, Message, Stream
from django.db.models import Q
import datetime
import pytz
from optparse import make_option
class Command(BaseCommand):
help = """Delete all inactive tutorial stream subscriptions."""
option_list = BaseCommand.option_list + (
make_option('-f', '--for-real',
dest='for_real',
action='store_true',
default=False,
help="Actually deactive subscriptions. Default is a dry run."),
)
def has_sent_to(self, user_profile, recipient):
return Message.objects.filter(sender=user_profile, recipient=recipient).count() != 0
def handle(self, **options):
possible_tutorial_streams = Stream.objects.filter(Q(name__startswith='tutorial-'))
tutorial_bot = UserProfile.objects.get(user__email="humbug+tutorial@humbughq.com")
for stream in possible_tutorial_streams:
recipient = Recipient.objects.get(type=Recipient.STREAM, type_id=stream.id)
subscribers = Subscription.objects.filter(recipient=recipient, active=True)
if ((subscribers.count() == 1) and self.has_sent_to(tutorial_bot, recipient)):
# This is a tutorial stream.
most_recent_message = Message.objects.filter(
recipient=recipient).latest("pub_date")
# This cutoff must be more generous than the tutorial bot cutoff
# in the client code.
cutoff = datetime.datetime.now(tz=pytz.utc) - datetime.timedelta(hours=2)
if most_recent_message.pub_date < cutoff:
# The tutorial has expired, so delete the stream.
print stream.name, most_recent_message.pub_date
if options["for_real"]:
tutorial_user = subscribers[0]
tutorial_user.active = False
tutorial_user.save()
if options["for_real"]:
print "Subscriptions deactivated."
else:
print "This was a dry run. Pass -f to actually deactivate."
|
<commit_before><commit_msg>Add a management command to clean up lingering tutorial streams.
We now clean up the stream subscription in more places, but some
historical tutorial streams are still around and if an error or page
reload happens during the tutorial it'll stick around.
(imported from commit 8cf0ebda26bf752c1a23296a4ba85d194bbb3004)<commit_after>from django.core.management.base import BaseCommand
from zephyr.models import UserProfile, Subscription, Recipient, Message, Stream
from django.db.models import Q
import datetime
import pytz
from optparse import make_option
class Command(BaseCommand):
help = """Delete all inactive tutorial stream subscriptions."""
option_list = BaseCommand.option_list + (
make_option('-f', '--for-real',
dest='for_real',
action='store_true',
default=False,
help="Actually deactive subscriptions. Default is a dry run."),
)
def has_sent_to(self, user_profile, recipient):
return Message.objects.filter(sender=user_profile, recipient=recipient).count() != 0
def handle(self, **options):
possible_tutorial_streams = Stream.objects.filter(Q(name__startswith='tutorial-'))
tutorial_bot = UserProfile.objects.get(user__email="humbug+tutorial@humbughq.com")
for stream in possible_tutorial_streams:
recipient = Recipient.objects.get(type=Recipient.STREAM, type_id=stream.id)
subscribers = Subscription.objects.filter(recipient=recipient, active=True)
if ((subscribers.count() == 1) and self.has_sent_to(tutorial_bot, recipient)):
# This is a tutorial stream.
most_recent_message = Message.objects.filter(
recipient=recipient).latest("pub_date")
# This cutoff must be more generous than the tutorial bot cutoff
# in the client code.
cutoff = datetime.datetime.now(tz=pytz.utc) - datetime.timedelta(hours=2)
if most_recent_message.pub_date < cutoff:
# The tutorial has expired, so delete the stream.
print stream.name, most_recent_message.pub_date
if options["for_real"]:
tutorial_user = subscribers[0]
tutorial_user.active = False
tutorial_user.save()
if options["for_real"]:
print "Subscriptions deactivated."
else:
print "This was a dry run. Pass -f to actually deactivate."
|
|
d668f4eb7c8dd033eca66cca18bf01ab5fb52233
|
blimp/utils/shortcuts.py
|
blimp/utils/shortcuts.py
|
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.shortcuts import resolve_url
def redirect_with_params(request, to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the appropriate
URL for the arguments passed. By default issues a temporary
redirect; pass permanent=True to issue a permanent redirect.
Copies current request's query paramaters and appends it
to the resolved URL with any additional params.
"""
params = kwargs.pop('params', None)
query_dict = request.GET.copy()
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
if params and isinstance(params, dict):
query_dict.update(params)
resolved_url = resolve_url(to, *args, **kwargs)
if query_dict:
resolved_url = '{}?{}'.format(
resolved_url, query_dict.urlencode())
return redirect_class(resolved_url)
|
Implement shortcut method to redirect with params
|
Implement shortcut method to redirect with params
|
Python
|
agpl-3.0
|
jessamynsmith/boards-backend,jessamynsmith/boards-backend,GetBlimp/boards-backend
|
Implement shortcut method to redirect with params
|
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.shortcuts import resolve_url
def redirect_with_params(request, to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the appropriate
URL for the arguments passed. By default issues a temporary
redirect; pass permanent=True to issue a permanent redirect.
Copies current request's query paramaters and appends it
to the resolved URL with any additional params.
"""
params = kwargs.pop('params', None)
query_dict = request.GET.copy()
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
if params and isinstance(params, dict):
query_dict.update(params)
resolved_url = resolve_url(to, *args, **kwargs)
if query_dict:
resolved_url = '{}?{}'.format(
resolved_url, query_dict.urlencode())
return redirect_class(resolved_url)
|
<commit_before><commit_msg>Implement shortcut method to redirect with params<commit_after>
|
from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.shortcuts import resolve_url
def redirect_with_params(request, to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the appropriate
URL for the arguments passed. By default issues a temporary
redirect; pass permanent=True to issue a permanent redirect.
Copies current request's query paramaters and appends it
to the resolved URL with any additional params.
"""
params = kwargs.pop('params', None)
query_dict = request.GET.copy()
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
if params and isinstance(params, dict):
query_dict.update(params)
resolved_url = resolve_url(to, *args, **kwargs)
if query_dict:
resolved_url = '{}?{}'.format(
resolved_url, query_dict.urlencode())
return redirect_class(resolved_url)
|
Implement shortcut method to redirect with paramsfrom django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.shortcuts import resolve_url
def redirect_with_params(request, to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the appropriate
URL for the arguments passed. By default issues a temporary
redirect; pass permanent=True to issue a permanent redirect.
Copies current request's query paramaters and appends it
to the resolved URL with any additional params.
"""
params = kwargs.pop('params', None)
query_dict = request.GET.copy()
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
if params and isinstance(params, dict):
query_dict.update(params)
resolved_url = resolve_url(to, *args, **kwargs)
if query_dict:
resolved_url = '{}?{}'.format(
resolved_url, query_dict.urlencode())
return redirect_class(resolved_url)
|
<commit_before><commit_msg>Implement shortcut method to redirect with params<commit_after>from django.http import HttpResponseRedirect, HttpResponsePermanentRedirect
from django.shortcuts import resolve_url
def redirect_with_params(request, to, *args, **kwargs):
"""
Returns an HttpResponseRedirect to the appropriate
URL for the arguments passed. By default issues a temporary
redirect; pass permanent=True to issue a permanent redirect.
Copies current request's query paramaters and appends it
to the resolved URL with any additional params.
"""
params = kwargs.pop('params', None)
query_dict = request.GET.copy()
if kwargs.pop('permanent', False):
redirect_class = HttpResponsePermanentRedirect
else:
redirect_class = HttpResponseRedirect
if params and isinstance(params, dict):
query_dict.update(params)
resolved_url = resolve_url(to, *args, **kwargs)
if query_dict:
resolved_url = '{}?{}'.format(
resolved_url, query_dict.urlencode())
return redirect_class(resolved_url)
|
|
cb8c664f69dd6eede4d6bee48b6d84f3aeaf2218
|
json_parser/json_parser.py
|
json_parser/json_parser.py
|
import json
jsonFile = open('output.json', 'r')
lines = jsonFile.readlines()
# lines[1] since the first line is an empty result
# the 2 lines combined do not form a valid json structure, but each line itself is a json structure
values = json.loads(lines[1])
transcript = values['result'][0]['alternative'][0]['transcript']
# confidence = values['result'][0]['alternative'][0]['confidence']
jsonFile.close
print transcript
# print confidence
|
Add json parsing for the curl response (already referenced in the main script)
|
Add json parsing for the curl response (already referenced in the main script)
|
Python
|
mit
|
Nespa32/sm_project,Nespa32/sm_project,Nespa32/sm_project,Nespa32/sm_project,Nespa32/sm_project
|
Add json parsing for the curl response (already referenced in the main script)
|
import json
jsonFile = open('output.json', 'r')
lines = jsonFile.readlines()
# lines[1] since the first line is an empty result
# the 2 lines combined do not form a valid json structure, but each line itself is a json structure
values = json.loads(lines[1])
transcript = values['result'][0]['alternative'][0]['transcript']
# confidence = values['result'][0]['alternative'][0]['confidence']
jsonFile.close
print transcript
# print confidence
|
<commit_before><commit_msg>Add json parsing for the curl response (already referenced in the main script)<commit_after>
|
import json
jsonFile = open('output.json', 'r')
lines = jsonFile.readlines()
# lines[1] since the first line is an empty result
# the 2 lines combined do not form a valid json structure, but each line itself is a json structure
values = json.loads(lines[1])
transcript = values['result'][0]['alternative'][0]['transcript']
# confidence = values['result'][0]['alternative'][0]['confidence']
jsonFile.close
print transcript
# print confidence
|
Add json parsing for the curl response (already referenced in the main script)import json
jsonFile = open('output.json', 'r')
lines = jsonFile.readlines()
# lines[1] since the first line is an empty result
# the 2 lines combined do not form a valid json structure, but each line itself is a json structure
values = json.loads(lines[1])
transcript = values['result'][0]['alternative'][0]['transcript']
# confidence = values['result'][0]['alternative'][0]['confidence']
jsonFile.close
print transcript
# print confidence
|
<commit_before><commit_msg>Add json parsing for the curl response (already referenced in the main script)<commit_after>import json
jsonFile = open('output.json', 'r')
lines = jsonFile.readlines()
# lines[1] since the first line is an empty result
# the 2 lines combined do not form a valid json structure, but each line itself is a json structure
values = json.loads(lines[1])
transcript = values['result'][0]['alternative'][0]['transcript']
# confidence = values['result'][0]['alternative'][0]['confidence']
jsonFile.close
print transcript
# print confidence
|
|
22198994d98c3d383fa38513f5de968cb8dcde27
|
CheckUpdatedVulnerableEntries.py
|
CheckUpdatedVulnerableEntries.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import json
import os
import re
import sys
import requests
def getCurrentVersion(package):
# https://wiki.archlinux.org/index.php/Official_repositories_web_interface
url = "https://www.archlinux.org/packages/search/json/?name=%s" % package
resp = requests.get(url)
if resp.status_code != 200:
return None
payload = resp.json()
if not payload or not 'results' in payload:
return None
for result in payload['results']:
if not 'pkgname' in result or result['pkgname'] != package:
continue
if not 'pkgrel' in result or not 'pkgver' in result:
continue
return result['pkgver'] + '-' + result['pkgrel']
return None
def checkVulnerableEntriesUpdated(dbFile):
versionRE = re.compile(r'^([<>]?=?)?\s*((\d+:)?[.a-zA-Z\d_-]+(-\d+)?)$')
with open(dbFile) as db:
issuesJSON = json.load(db)
for issue in issuesJSON:
if issue['status'] == 'Vulnerable':
match = versionRE.match(issue['vulnerableVersion'])
if not match:
continue
vulnerableVersion = match.group(2)
currentVersion = getCurrentVersion(issue['packages'][0])
if vulnerableVersion != currentVersion:
print("Package %s is marked as vulnerable in version %s, but is currenly in version %s"
% (issue['packages'][0],
vulnerableVersion,
currentVersion))
if __name__ == "__main__":
nbParams = len(sys.argv) - 1
if nbParams != 1:
sys.exit('Usage: %s <JSON database>' % (sys.argv[0]))
if not os.path.isfile(sys.argv[1]):
sys.exit("JSON database %s does not exist!" % (sys.argv[1]))
checkVulnerableEntriesUpdated(sys.argv[1])
|
Add a tool to detect update in packages with pending issues
|
Add a tool to detect update in packages with pending issues
|
Python
|
mpl-2.0
|
rgacogne/ArchCVEToJSON
|
Add a tool to detect update in packages with pending issues
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import json
import os
import re
import sys
import requests
def getCurrentVersion(package):
# https://wiki.archlinux.org/index.php/Official_repositories_web_interface
url = "https://www.archlinux.org/packages/search/json/?name=%s" % package
resp = requests.get(url)
if resp.status_code != 200:
return None
payload = resp.json()
if not payload or not 'results' in payload:
return None
for result in payload['results']:
if not 'pkgname' in result or result['pkgname'] != package:
continue
if not 'pkgrel' in result or not 'pkgver' in result:
continue
return result['pkgver'] + '-' + result['pkgrel']
return None
def checkVulnerableEntriesUpdated(dbFile):
versionRE = re.compile(r'^([<>]?=?)?\s*((\d+:)?[.a-zA-Z\d_-]+(-\d+)?)$')
with open(dbFile) as db:
issuesJSON = json.load(db)
for issue in issuesJSON:
if issue['status'] == 'Vulnerable':
match = versionRE.match(issue['vulnerableVersion'])
if not match:
continue
vulnerableVersion = match.group(2)
currentVersion = getCurrentVersion(issue['packages'][0])
if vulnerableVersion != currentVersion:
print("Package %s is marked as vulnerable in version %s, but is currenly in version %s"
% (issue['packages'][0],
vulnerableVersion,
currentVersion))
if __name__ == "__main__":
nbParams = len(sys.argv) - 1
if nbParams != 1:
sys.exit('Usage: %s <JSON database>' % (sys.argv[0]))
if not os.path.isfile(sys.argv[1]):
sys.exit("JSON database %s does not exist!" % (sys.argv[1]))
checkVulnerableEntriesUpdated(sys.argv[1])
|
<commit_before><commit_msg>Add a tool to detect update in packages with pending issues<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import json
import os
import re
import sys
import requests
def getCurrentVersion(package):
# https://wiki.archlinux.org/index.php/Official_repositories_web_interface
url = "https://www.archlinux.org/packages/search/json/?name=%s" % package
resp = requests.get(url)
if resp.status_code != 200:
return None
payload = resp.json()
if not payload or not 'results' in payload:
return None
for result in payload['results']:
if not 'pkgname' in result or result['pkgname'] != package:
continue
if not 'pkgrel' in result or not 'pkgver' in result:
continue
return result['pkgver'] + '-' + result['pkgrel']
return None
def checkVulnerableEntriesUpdated(dbFile):
versionRE = re.compile(r'^([<>]?=?)?\s*((\d+:)?[.a-zA-Z\d_-]+(-\d+)?)$')
with open(dbFile) as db:
issuesJSON = json.load(db)
for issue in issuesJSON:
if issue['status'] == 'Vulnerable':
match = versionRE.match(issue['vulnerableVersion'])
if not match:
continue
vulnerableVersion = match.group(2)
currentVersion = getCurrentVersion(issue['packages'][0])
if vulnerableVersion != currentVersion:
print("Package %s is marked as vulnerable in version %s, but is currenly in version %s"
% (issue['packages'][0],
vulnerableVersion,
currentVersion))
if __name__ == "__main__":
nbParams = len(sys.argv) - 1
if nbParams != 1:
sys.exit('Usage: %s <JSON database>' % (sys.argv[0]))
if not os.path.isfile(sys.argv[1]):
sys.exit("JSON database %s does not exist!" % (sys.argv[1]))
checkVulnerableEntriesUpdated(sys.argv[1])
|
Add a tool to detect update in packages with pending issues#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import json
import os
import re
import sys
import requests
def getCurrentVersion(package):
# https://wiki.archlinux.org/index.php/Official_repositories_web_interface
url = "https://www.archlinux.org/packages/search/json/?name=%s" % package
resp = requests.get(url)
if resp.status_code != 200:
return None
payload = resp.json()
if not payload or not 'results' in payload:
return None
for result in payload['results']:
if not 'pkgname' in result or result['pkgname'] != package:
continue
if not 'pkgrel' in result or not 'pkgver' in result:
continue
return result['pkgver'] + '-' + result['pkgrel']
return None
def checkVulnerableEntriesUpdated(dbFile):
versionRE = re.compile(r'^([<>]?=?)?\s*((\d+:)?[.a-zA-Z\d_-]+(-\d+)?)$')
with open(dbFile) as db:
issuesJSON = json.load(db)
for issue in issuesJSON:
if issue['status'] == 'Vulnerable':
match = versionRE.match(issue['vulnerableVersion'])
if not match:
continue
vulnerableVersion = match.group(2)
currentVersion = getCurrentVersion(issue['packages'][0])
if vulnerableVersion != currentVersion:
print("Package %s is marked as vulnerable in version %s, but is currenly in version %s"
% (issue['packages'][0],
vulnerableVersion,
currentVersion))
if __name__ == "__main__":
nbParams = len(sys.argv) - 1
if nbParams != 1:
sys.exit('Usage: %s <JSON database>' % (sys.argv[0]))
if not os.path.isfile(sys.argv[1]):
sys.exit("JSON database %s does not exist!" % (sys.argv[1]))
checkVulnerableEntriesUpdated(sys.argv[1])
|
<commit_before><commit_msg>Add a tool to detect update in packages with pending issues<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import json
import os
import re
import sys
import requests
def getCurrentVersion(package):
# https://wiki.archlinux.org/index.php/Official_repositories_web_interface
url = "https://www.archlinux.org/packages/search/json/?name=%s" % package
resp = requests.get(url)
if resp.status_code != 200:
return None
payload = resp.json()
if not payload or not 'results' in payload:
return None
for result in payload['results']:
if not 'pkgname' in result or result['pkgname'] != package:
continue
if not 'pkgrel' in result or not 'pkgver' in result:
continue
return result['pkgver'] + '-' + result['pkgrel']
return None
def checkVulnerableEntriesUpdated(dbFile):
versionRE = re.compile(r'^([<>]?=?)?\s*((\d+:)?[.a-zA-Z\d_-]+(-\d+)?)$')
with open(dbFile) as db:
issuesJSON = json.load(db)
for issue in issuesJSON:
if issue['status'] == 'Vulnerable':
match = versionRE.match(issue['vulnerableVersion'])
if not match:
continue
vulnerableVersion = match.group(2)
currentVersion = getCurrentVersion(issue['packages'][0])
if vulnerableVersion != currentVersion:
print("Package %s is marked as vulnerable in version %s, but is currenly in version %s"
% (issue['packages'][0],
vulnerableVersion,
currentVersion))
if __name__ == "__main__":
nbParams = len(sys.argv) - 1
if nbParams != 1:
sys.exit('Usage: %s <JSON database>' % (sys.argv[0]))
if not os.path.isfile(sys.argv[1]):
sys.exit("JSON database %s does not exist!" % (sys.argv[1]))
checkVulnerableEntriesUpdated(sys.argv[1])
|
|
86f276416ee70470d33ec7f481cf2d69b6956b36
|
IPython/kernel/tests/test_kernelspec.py
|
IPython/kernel/tests/test_kernelspec.py
|
import json
import os
from os.path import join as pjoin
import unittest
from IPython.utils.tempdir import TemporaryDirectory
from IPython.kernel import kernelspec
sample_kernel_json = {'argv':['cat', '{connection_file}'],
'display_name':'Test kernel',
'language':'bash',
}
class KernelSpecTests(unittest.TestCase):
def setUp(self):
self.tempdir = td = TemporaryDirectory()
self.sample_kernel_dir = pjoin(td.name, 'kernels', 'Sample')
os.makedirs(self.sample_kernel_dir)
json_file = pjoin(self.sample_kernel_dir, 'kernel.json')
with open(json_file, 'w') as f:
json.dump(sample_kernel_json, f)
self.ksm = kernelspec.KernelSpecManager(ipython_dir=td.name)
def tearDown(self):
self.tempdir.cleanup()
def test_find_kernel_specs(self):
kernels = self.ksm.find_kernel_specs()
self.assertEqual(kernels['sample'], self.sample_kernel_dir)
def test_get_kernel_spec(self):
ks = self.ksm.get_kernel_spec('SAMPLE') # Case insensitive
self.assertEqual(ks.resource_dir, self.sample_kernel_dir)
self.assertEqual(ks.argv, sample_kernel_json['argv'])
self.assertEqual(ks.display_name, sample_kernel_json['display_name'])
self.assertEqual(ks.language, sample_kernel_json['language'])
self.assertEqual(ks.codemirror_mode, sample_kernel_json['language'])
self.assertEqual(ks.env, {})
|
Add simple tests for kernel spec machinery
|
Add simple tests for kernel spec machinery
|
Python
|
bsd-3-clause
|
ipython/ipython,ipython/ipython
|
Add simple tests for kernel spec machinery
|
import json
import os
from os.path import join as pjoin
import unittest
from IPython.utils.tempdir import TemporaryDirectory
from IPython.kernel import kernelspec
sample_kernel_json = {'argv':['cat', '{connection_file}'],
'display_name':'Test kernel',
'language':'bash',
}
class KernelSpecTests(unittest.TestCase):
def setUp(self):
self.tempdir = td = TemporaryDirectory()
self.sample_kernel_dir = pjoin(td.name, 'kernels', 'Sample')
os.makedirs(self.sample_kernel_dir)
json_file = pjoin(self.sample_kernel_dir, 'kernel.json')
with open(json_file, 'w') as f:
json.dump(sample_kernel_json, f)
self.ksm = kernelspec.KernelSpecManager(ipython_dir=td.name)
def tearDown(self):
self.tempdir.cleanup()
def test_find_kernel_specs(self):
kernels = self.ksm.find_kernel_specs()
self.assertEqual(kernels['sample'], self.sample_kernel_dir)
def test_get_kernel_spec(self):
ks = self.ksm.get_kernel_spec('SAMPLE') # Case insensitive
self.assertEqual(ks.resource_dir, self.sample_kernel_dir)
self.assertEqual(ks.argv, sample_kernel_json['argv'])
self.assertEqual(ks.display_name, sample_kernel_json['display_name'])
self.assertEqual(ks.language, sample_kernel_json['language'])
self.assertEqual(ks.codemirror_mode, sample_kernel_json['language'])
self.assertEqual(ks.env, {})
|
<commit_before><commit_msg>Add simple tests for kernel spec machinery<commit_after>
|
import json
import os
from os.path import join as pjoin
import unittest
from IPython.utils.tempdir import TemporaryDirectory
from IPython.kernel import kernelspec
sample_kernel_json = {'argv':['cat', '{connection_file}'],
'display_name':'Test kernel',
'language':'bash',
}
class KernelSpecTests(unittest.TestCase):
def setUp(self):
self.tempdir = td = TemporaryDirectory()
self.sample_kernel_dir = pjoin(td.name, 'kernels', 'Sample')
os.makedirs(self.sample_kernel_dir)
json_file = pjoin(self.sample_kernel_dir, 'kernel.json')
with open(json_file, 'w') as f:
json.dump(sample_kernel_json, f)
self.ksm = kernelspec.KernelSpecManager(ipython_dir=td.name)
def tearDown(self):
self.tempdir.cleanup()
def test_find_kernel_specs(self):
kernels = self.ksm.find_kernel_specs()
self.assertEqual(kernels['sample'], self.sample_kernel_dir)
def test_get_kernel_spec(self):
ks = self.ksm.get_kernel_spec('SAMPLE') # Case insensitive
self.assertEqual(ks.resource_dir, self.sample_kernel_dir)
self.assertEqual(ks.argv, sample_kernel_json['argv'])
self.assertEqual(ks.display_name, sample_kernel_json['display_name'])
self.assertEqual(ks.language, sample_kernel_json['language'])
self.assertEqual(ks.codemirror_mode, sample_kernel_json['language'])
self.assertEqual(ks.env, {})
|
Add simple tests for kernel spec machineryimport json
import os
from os.path import join as pjoin
import unittest
from IPython.utils.tempdir import TemporaryDirectory
from IPython.kernel import kernelspec
sample_kernel_json = {'argv':['cat', '{connection_file}'],
'display_name':'Test kernel',
'language':'bash',
}
class KernelSpecTests(unittest.TestCase):
def setUp(self):
self.tempdir = td = TemporaryDirectory()
self.sample_kernel_dir = pjoin(td.name, 'kernels', 'Sample')
os.makedirs(self.sample_kernel_dir)
json_file = pjoin(self.sample_kernel_dir, 'kernel.json')
with open(json_file, 'w') as f:
json.dump(sample_kernel_json, f)
self.ksm = kernelspec.KernelSpecManager(ipython_dir=td.name)
def tearDown(self):
self.tempdir.cleanup()
def test_find_kernel_specs(self):
kernels = self.ksm.find_kernel_specs()
self.assertEqual(kernels['sample'], self.sample_kernel_dir)
def test_get_kernel_spec(self):
ks = self.ksm.get_kernel_spec('SAMPLE') # Case insensitive
self.assertEqual(ks.resource_dir, self.sample_kernel_dir)
self.assertEqual(ks.argv, sample_kernel_json['argv'])
self.assertEqual(ks.display_name, sample_kernel_json['display_name'])
self.assertEqual(ks.language, sample_kernel_json['language'])
self.assertEqual(ks.codemirror_mode, sample_kernel_json['language'])
self.assertEqual(ks.env, {})
|
<commit_before><commit_msg>Add simple tests for kernel spec machinery<commit_after>import json
import os
from os.path import join as pjoin
import unittest
from IPython.utils.tempdir import TemporaryDirectory
from IPython.kernel import kernelspec
sample_kernel_json = {'argv':['cat', '{connection_file}'],
'display_name':'Test kernel',
'language':'bash',
}
class KernelSpecTests(unittest.TestCase):
def setUp(self):
self.tempdir = td = TemporaryDirectory()
self.sample_kernel_dir = pjoin(td.name, 'kernels', 'Sample')
os.makedirs(self.sample_kernel_dir)
json_file = pjoin(self.sample_kernel_dir, 'kernel.json')
with open(json_file, 'w') as f:
json.dump(sample_kernel_json, f)
self.ksm = kernelspec.KernelSpecManager(ipython_dir=td.name)
def tearDown(self):
self.tempdir.cleanup()
def test_find_kernel_specs(self):
kernels = self.ksm.find_kernel_specs()
self.assertEqual(kernels['sample'], self.sample_kernel_dir)
def test_get_kernel_spec(self):
ks = self.ksm.get_kernel_spec('SAMPLE') # Case insensitive
self.assertEqual(ks.resource_dir, self.sample_kernel_dir)
self.assertEqual(ks.argv, sample_kernel_json['argv'])
self.assertEqual(ks.display_name, sample_kernel_json['display_name'])
self.assertEqual(ks.language, sample_kernel_json['language'])
self.assertEqual(ks.codemirror_mode, sample_kernel_json['language'])
self.assertEqual(ks.env, {})
|
|
46c64c4612417a16fb8027c39e252f5d4a44378b
|
mindbender/maya/tests/test_workflow.py
|
mindbender/maya/tests/test_workflow.py
|
"""Integration tests
These tests include external libraries in order to test
the integration between them.
"""
import os
import sys
import shutil
import tempfile
from maya import cmds
import pyblish_maya
import pyblish.api
import pyblish.util
from mindbender import api, maya
from nose.tools import (
assert_equals,
with_setup
)
self = sys.modules[__name__]
self.tempdir = None
def setup():
pyblish_maya.setup()
api.install(maya)
self.tempdir = tempfile.mkdtemp()
assetdir = os.path.join(
self.tempdir,
"assets",
"Test"
)
os.makedirs(assetdir)
api.register_root(self.tempdir)
assert api.registered_root() == self.tempdir
api.register_silo("assets")
# Setup environment
os.environ["ASSETDIR"] = assetdir
os.environ["MINDBENDER_SILO"] = "assets"
def teardown():
pyblish_maya.teardown()
api.uninstall()
shutil.rmtree(self.tempdir)
def clear():
pass
def test_setup():
"""Fixture is setup ok"""
assert_equals(next(api.ls())["name"], "Test")
@with_setup(clear)
def test_modeling():
"""Modeling workflow is functional"""
cmds.file(new=True, force=True)
transform, generator = cmds.polyCube(name="body_PLY")
group = cmds.group(transform, name="ROOT")
cmds.select(group, replace=True)
maya.create("modelDefault",
family="mindbender.model",
options={"useSelection": True})
# Comply with save validator
cmds.file(rename="temp.ma")
cmds.file(save=True)
# Comply with ID validator
cmds.addAttr(transform, longName="mbID", dataType="string")
pyblish.util.publish()
asset = next(api.ls())
assert_equals(asset["name"], "Test")
subset = asset["subsets"][0]
assert_equals(subset["name"], "modelDefault")
version = subset["versions"][0]
assert_equals(version["version"], 1)
representation = version["representations"][0]
assert_equals(representation["format"], ".ma")
|
Add initial workflow test. This is an example of how workflow can be automatically tested. In this case, the modeling workflow is tested. If something is done here that isn't valid, and is caught during validation, this test would fail.
|
Add initial workflow test.
This is an example of how workflow can be automatically tested. In this case, the modeling workflow is tested. If something is done here that isn't valid, and is caught during validation, this test would fail.
Update this test whenever the modeling workflow changes, and keep an eye out for when it *hasn't* changed, but the test still fails. It may mean there has been an accidental breakage of a valid workflow.
|
Python
|
mit
|
getavalon/core,MoonShineVFX/core,mindbender-studio/core,MoonShineVFX/core,mindbender-studio/core,getavalon/core
|
Add initial workflow test.
This is an example of how workflow can be automatically tested. In this case, the modeling workflow is tested. If something is done here that isn't valid, and is caught during validation, this test would fail.
Update this test whenever the modeling workflow changes, and keep an eye out for when it *hasn't* changed, but the test still fails. It may mean there has been an accidental breakage of a valid workflow.
|
"""Integration tests
These tests include external libraries in order to test
the integration between them.
"""
import os
import sys
import shutil
import tempfile
from maya import cmds
import pyblish_maya
import pyblish.api
import pyblish.util
from mindbender import api, maya
from nose.tools import (
assert_equals,
with_setup
)
self = sys.modules[__name__]
self.tempdir = None
def setup():
pyblish_maya.setup()
api.install(maya)
self.tempdir = tempfile.mkdtemp()
assetdir = os.path.join(
self.tempdir,
"assets",
"Test"
)
os.makedirs(assetdir)
api.register_root(self.tempdir)
assert api.registered_root() == self.tempdir
api.register_silo("assets")
# Setup environment
os.environ["ASSETDIR"] = assetdir
os.environ["MINDBENDER_SILO"] = "assets"
def teardown():
pyblish_maya.teardown()
api.uninstall()
shutil.rmtree(self.tempdir)
def clear():
pass
def test_setup():
"""Fixture is setup ok"""
assert_equals(next(api.ls())["name"], "Test")
@with_setup(clear)
def test_modeling():
"""Modeling workflow is functional"""
cmds.file(new=True, force=True)
transform, generator = cmds.polyCube(name="body_PLY")
group = cmds.group(transform, name="ROOT")
cmds.select(group, replace=True)
maya.create("modelDefault",
family="mindbender.model",
options={"useSelection": True})
# Comply with save validator
cmds.file(rename="temp.ma")
cmds.file(save=True)
# Comply with ID validator
cmds.addAttr(transform, longName="mbID", dataType="string")
pyblish.util.publish()
asset = next(api.ls())
assert_equals(asset["name"], "Test")
subset = asset["subsets"][0]
assert_equals(subset["name"], "modelDefault")
version = subset["versions"][0]
assert_equals(version["version"], 1)
representation = version["representations"][0]
assert_equals(representation["format"], ".ma")
|
<commit_before><commit_msg>Add initial workflow test.
This is an example of how workflow can be automatically tested. In this case, the modeling workflow is tested. If something is done here that isn't valid, and is caught during validation, this test would fail.
Update this test whenever the modeling workflow changes, and keep an eye out for when it *hasn't* changed, but the test still fails. It may mean there has been an accidental breakage of a valid workflow.<commit_after>
|
"""Integration tests
These tests include external libraries in order to test
the integration between them.
"""
import os
import sys
import shutil
import tempfile
from maya import cmds
import pyblish_maya
import pyblish.api
import pyblish.util
from mindbender import api, maya
from nose.tools import (
assert_equals,
with_setup
)
self = sys.modules[__name__]
self.tempdir = None
def setup():
pyblish_maya.setup()
api.install(maya)
self.tempdir = tempfile.mkdtemp()
assetdir = os.path.join(
self.tempdir,
"assets",
"Test"
)
os.makedirs(assetdir)
api.register_root(self.tempdir)
assert api.registered_root() == self.tempdir
api.register_silo("assets")
# Setup environment
os.environ["ASSETDIR"] = assetdir
os.environ["MINDBENDER_SILO"] = "assets"
def teardown():
pyblish_maya.teardown()
api.uninstall()
shutil.rmtree(self.tempdir)
def clear():
pass
def test_setup():
"""Fixture is setup ok"""
assert_equals(next(api.ls())["name"], "Test")
@with_setup(clear)
def test_modeling():
"""Modeling workflow is functional"""
cmds.file(new=True, force=True)
transform, generator = cmds.polyCube(name="body_PLY")
group = cmds.group(transform, name="ROOT")
cmds.select(group, replace=True)
maya.create("modelDefault",
family="mindbender.model",
options={"useSelection": True})
# Comply with save validator
cmds.file(rename="temp.ma")
cmds.file(save=True)
# Comply with ID validator
cmds.addAttr(transform, longName="mbID", dataType="string")
pyblish.util.publish()
asset = next(api.ls())
assert_equals(asset["name"], "Test")
subset = asset["subsets"][0]
assert_equals(subset["name"], "modelDefault")
version = subset["versions"][0]
assert_equals(version["version"], 1)
representation = version["representations"][0]
assert_equals(representation["format"], ".ma")
|
Add initial workflow test.
This is an example of how workflow can be automatically tested. In this case, the modeling workflow is tested. If something is done here that isn't valid, and is caught during validation, this test would fail.
Update this test whenever the modeling workflow changes, and keep an eye out for when it *hasn't* changed, but the test still fails. It may mean there has been an accidental breakage of a valid workflow."""Integration tests
These tests include external libraries in order to test
the integration between them.
"""
import os
import sys
import shutil
import tempfile
from maya import cmds
import pyblish_maya
import pyblish.api
import pyblish.util
from mindbender import api, maya
from nose.tools import (
assert_equals,
with_setup
)
self = sys.modules[__name__]
self.tempdir = None
def setup():
pyblish_maya.setup()
api.install(maya)
self.tempdir = tempfile.mkdtemp()
assetdir = os.path.join(
self.tempdir,
"assets",
"Test"
)
os.makedirs(assetdir)
api.register_root(self.tempdir)
assert api.registered_root() == self.tempdir
api.register_silo("assets")
# Setup environment
os.environ["ASSETDIR"] = assetdir
os.environ["MINDBENDER_SILO"] = "assets"
def teardown():
pyblish_maya.teardown()
api.uninstall()
shutil.rmtree(self.tempdir)
def clear():
pass
def test_setup():
"""Fixture is setup ok"""
assert_equals(next(api.ls())["name"], "Test")
@with_setup(clear)
def test_modeling():
"""Modeling workflow is functional"""
cmds.file(new=True, force=True)
transform, generator = cmds.polyCube(name="body_PLY")
group = cmds.group(transform, name="ROOT")
cmds.select(group, replace=True)
maya.create("modelDefault",
family="mindbender.model",
options={"useSelection": True})
# Comply with save validator
cmds.file(rename="temp.ma")
cmds.file(save=True)
# Comply with ID validator
cmds.addAttr(transform, longName="mbID", dataType="string")
pyblish.util.publish()
asset = next(api.ls())
assert_equals(asset["name"], "Test")
subset = asset["subsets"][0]
assert_equals(subset["name"], "modelDefault")
version = subset["versions"][0]
assert_equals(version["version"], 1)
representation = version["representations"][0]
assert_equals(representation["format"], ".ma")
|
<commit_before><commit_msg>Add initial workflow test.
This is an example of how workflow can be automatically tested. In this case, the modeling workflow is tested. If something is done here that isn't valid, and is caught during validation, this test would fail.
Update this test whenever the modeling workflow changes, and keep an eye out for when it *hasn't* changed, but the test still fails. It may mean there has been an accidental breakage of a valid workflow.<commit_after>"""Integration tests
These tests include external libraries in order to test
the integration between them.
"""
import os
import sys
import shutil
import tempfile
from maya import cmds
import pyblish_maya
import pyblish.api
import pyblish.util
from mindbender import api, maya
from nose.tools import (
assert_equals,
with_setup
)
self = sys.modules[__name__]
self.tempdir = None
def setup():
pyblish_maya.setup()
api.install(maya)
self.tempdir = tempfile.mkdtemp()
assetdir = os.path.join(
self.tempdir,
"assets",
"Test"
)
os.makedirs(assetdir)
api.register_root(self.tempdir)
assert api.registered_root() == self.tempdir
api.register_silo("assets")
# Setup environment
os.environ["ASSETDIR"] = assetdir
os.environ["MINDBENDER_SILO"] = "assets"
def teardown():
pyblish_maya.teardown()
api.uninstall()
shutil.rmtree(self.tempdir)
def clear():
pass
def test_setup():
"""Fixture is setup ok"""
assert_equals(next(api.ls())["name"], "Test")
@with_setup(clear)
def test_modeling():
"""Modeling workflow is functional"""
cmds.file(new=True, force=True)
transform, generator = cmds.polyCube(name="body_PLY")
group = cmds.group(transform, name="ROOT")
cmds.select(group, replace=True)
maya.create("modelDefault",
family="mindbender.model",
options={"useSelection": True})
# Comply with save validator
cmds.file(rename="temp.ma")
cmds.file(save=True)
# Comply with ID validator
cmds.addAttr(transform, longName="mbID", dataType="string")
pyblish.util.publish()
asset = next(api.ls())
assert_equals(asset["name"], "Test")
subset = asset["subsets"][0]
assert_equals(subset["name"], "modelDefault")
version = subset["versions"][0]
assert_equals(version["version"], 1)
representation = version["representations"][0]
assert_equals(representation["format"], ".ma")
|
|
d42bc4c14ca9a4f819c2ef3dccbfa916b381b959
|
tests/print_view_controller_hierarchy_test.py
|
tests/print_view_controller_hierarchy_test.py
|
import re
import unittest
from test_utils import import_utils
import_utils.prepare_lldb_import_or_exit()
import lldb
import_utils.prepare_for_scripts_imports()
from scripts import print_view_controller_hierarchy
class PrintViewControllerHierarchyTest(unittest.TestCase):
def testPrintViewControllerHierarchy(self):
debugger = lldb.SBDebugger.Create()
debugger.SetAsync(False)
target = debugger.CreateTarget('')
error = lldb.SBError()
process = target.AttachToProcessWithName(debugger.GetListener(), 'TestApp',
False, error)
if not process:
self.assertTrue(False, 'Could not attach to process "TestApp"')
debugger.SetSelectedTarget(target)
result = lldb.SBCommandReturnObject()
print_view_controller_hierarchy.print_view_controller_hierarchy(debugger,
None,
result,
None)
self.assertTrue(result.Succeeded())
expected_output_regex = r'<ViewController 0x\w{12}>, state: appeared, view: <UIView 0x\w{12}>'
self.assertTrue(re.match(expected_output_regex,
result.GetOutput().rstrip()))
debugger.Terminate()
|
Add the first unit test.
|
Add the first unit test.
|
Python
|
mit
|
mrhappyasthma/happydebugging,mrhappyasthma/HappyDebugging
|
Add the first unit test.
|
import re
import unittest
from test_utils import import_utils
import_utils.prepare_lldb_import_or_exit()
import lldb
import_utils.prepare_for_scripts_imports()
from scripts import print_view_controller_hierarchy
class PrintViewControllerHierarchyTest(unittest.TestCase):
def testPrintViewControllerHierarchy(self):
debugger = lldb.SBDebugger.Create()
debugger.SetAsync(False)
target = debugger.CreateTarget('')
error = lldb.SBError()
process = target.AttachToProcessWithName(debugger.GetListener(), 'TestApp',
False, error)
if not process:
self.assertTrue(False, 'Could not attach to process "TestApp"')
debugger.SetSelectedTarget(target)
result = lldb.SBCommandReturnObject()
print_view_controller_hierarchy.print_view_controller_hierarchy(debugger,
None,
result,
None)
self.assertTrue(result.Succeeded())
expected_output_regex = r'<ViewController 0x\w{12}>, state: appeared, view: <UIView 0x\w{12}>'
self.assertTrue(re.match(expected_output_regex,
result.GetOutput().rstrip()))
debugger.Terminate()
|
<commit_before><commit_msg>Add the first unit test.<commit_after>
|
import re
import unittest
from test_utils import import_utils
import_utils.prepare_lldb_import_or_exit()
import lldb
import_utils.prepare_for_scripts_imports()
from scripts import print_view_controller_hierarchy
class PrintViewControllerHierarchyTest(unittest.TestCase):
def testPrintViewControllerHierarchy(self):
debugger = lldb.SBDebugger.Create()
debugger.SetAsync(False)
target = debugger.CreateTarget('')
error = lldb.SBError()
process = target.AttachToProcessWithName(debugger.GetListener(), 'TestApp',
False, error)
if not process:
self.assertTrue(False, 'Could not attach to process "TestApp"')
debugger.SetSelectedTarget(target)
result = lldb.SBCommandReturnObject()
print_view_controller_hierarchy.print_view_controller_hierarchy(debugger,
None,
result,
None)
self.assertTrue(result.Succeeded())
expected_output_regex = r'<ViewController 0x\w{12}>, state: appeared, view: <UIView 0x\w{12}>'
self.assertTrue(re.match(expected_output_regex,
result.GetOutput().rstrip()))
debugger.Terminate()
|
Add the first unit test.import re
import unittest
from test_utils import import_utils
import_utils.prepare_lldb_import_or_exit()
import lldb
import_utils.prepare_for_scripts_imports()
from scripts import print_view_controller_hierarchy
class PrintViewControllerHierarchyTest(unittest.TestCase):
def testPrintViewControllerHierarchy(self):
debugger = lldb.SBDebugger.Create()
debugger.SetAsync(False)
target = debugger.CreateTarget('')
error = lldb.SBError()
process = target.AttachToProcessWithName(debugger.GetListener(), 'TestApp',
False, error)
if not process:
self.assertTrue(False, 'Could not attach to process "TestApp"')
debugger.SetSelectedTarget(target)
result = lldb.SBCommandReturnObject()
print_view_controller_hierarchy.print_view_controller_hierarchy(debugger,
None,
result,
None)
self.assertTrue(result.Succeeded())
expected_output_regex = r'<ViewController 0x\w{12}>, state: appeared, view: <UIView 0x\w{12}>'
self.assertTrue(re.match(expected_output_regex,
result.GetOutput().rstrip()))
debugger.Terminate()
|
<commit_before><commit_msg>Add the first unit test.<commit_after>import re
import unittest
from test_utils import import_utils
import_utils.prepare_lldb_import_or_exit()
import lldb
import_utils.prepare_for_scripts_imports()
from scripts import print_view_controller_hierarchy
class PrintViewControllerHierarchyTest(unittest.TestCase):
def testPrintViewControllerHierarchy(self):
debugger = lldb.SBDebugger.Create()
debugger.SetAsync(False)
target = debugger.CreateTarget('')
error = lldb.SBError()
process = target.AttachToProcessWithName(debugger.GetListener(), 'TestApp',
False, error)
if not process:
self.assertTrue(False, 'Could not attach to process "TestApp"')
debugger.SetSelectedTarget(target)
result = lldb.SBCommandReturnObject()
print_view_controller_hierarchy.print_view_controller_hierarchy(debugger,
None,
result,
None)
self.assertTrue(result.Succeeded())
expected_output_regex = r'<ViewController 0x\w{12}>, state: appeared, view: <UIView 0x\w{12}>'
self.assertTrue(re.match(expected_output_regex,
result.GetOutput().rstrip()))
debugger.Terminate()
|
|
fe257b061292df122064765db0f06c49b01b3ed7
|
ad-hoc-scripts/lift.py
|
ad-hoc-scripts/lift.py
|
#! /usr/bin/env python3
import sys
import json
with open(sys.argv[1]) as f:
equajson = json.load(f)
field_list = [
"always-an-integer",
"always-positive",
"always-dimensionless",
"bound-variable",
"fixed-constant",
"special-function"
]
for term in equajson["unicode-pretty-print"]["terms"]:
print(term)
if any(x in field_list for x in term.keys()):
term["classification"] = {}
del_list = []
for field in term.keys():
print(field)
if field in field_list:
term["classification"][field] = term[field]
del_list.append(field)
for field in del_list:
del term[field]
with open(sys.argv[1], 'w') as f:
json.dump(equajson, f, indent=4, separators=(',', ': '), ensure_ascii=False, sort_keys=True)
|
Make a script to automatically move to new schema.
|
Make a script to automatically move to new schema.
|
Python
|
mit
|
nbeaver/equajson
|
Make a script to automatically move to new schema.
|
#! /usr/bin/env python3
import sys
import json
with open(sys.argv[1]) as f:
equajson = json.load(f)
field_list = [
"always-an-integer",
"always-positive",
"always-dimensionless",
"bound-variable",
"fixed-constant",
"special-function"
]
for term in equajson["unicode-pretty-print"]["terms"]:
print(term)
if any(x in field_list for x in term.keys()):
term["classification"] = {}
del_list = []
for field in term.keys():
print(field)
if field in field_list:
term["classification"][field] = term[field]
del_list.append(field)
for field in del_list:
del term[field]
with open(sys.argv[1], 'w') as f:
json.dump(equajson, f, indent=4, separators=(',', ': '), ensure_ascii=False, sort_keys=True)
|
<commit_before><commit_msg>Make a script to automatically move to new schema.<commit_after>
|
#! /usr/bin/env python3
import sys
import json
with open(sys.argv[1]) as f:
equajson = json.load(f)
field_list = [
"always-an-integer",
"always-positive",
"always-dimensionless",
"bound-variable",
"fixed-constant",
"special-function"
]
for term in equajson["unicode-pretty-print"]["terms"]:
print(term)
if any(x in field_list for x in term.keys()):
term["classification"] = {}
del_list = []
for field in term.keys():
print(field)
if field in field_list:
term["classification"][field] = term[field]
del_list.append(field)
for field in del_list:
del term[field]
with open(sys.argv[1], 'w') as f:
json.dump(equajson, f, indent=4, separators=(',', ': '), ensure_ascii=False, sort_keys=True)
|
Make a script to automatically move to new schema.#! /usr/bin/env python3
import sys
import json
with open(sys.argv[1]) as f:
equajson = json.load(f)
field_list = [
"always-an-integer",
"always-positive",
"always-dimensionless",
"bound-variable",
"fixed-constant",
"special-function"
]
for term in equajson["unicode-pretty-print"]["terms"]:
print(term)
if any(x in field_list for x in term.keys()):
term["classification"] = {}
del_list = []
for field in term.keys():
print(field)
if field in field_list:
term["classification"][field] = term[field]
del_list.append(field)
for field in del_list:
del term[field]
with open(sys.argv[1], 'w') as f:
json.dump(equajson, f, indent=4, separators=(',', ': '), ensure_ascii=False, sort_keys=True)
|
<commit_before><commit_msg>Make a script to automatically move to new schema.<commit_after>#! /usr/bin/env python3
import sys
import json
with open(sys.argv[1]) as f:
equajson = json.load(f)
field_list = [
"always-an-integer",
"always-positive",
"always-dimensionless",
"bound-variable",
"fixed-constant",
"special-function"
]
for term in equajson["unicode-pretty-print"]["terms"]:
print(term)
if any(x in field_list for x in term.keys()):
term["classification"] = {}
del_list = []
for field in term.keys():
print(field)
if field in field_list:
term["classification"][field] = term[field]
del_list.append(field)
for field in del_list:
del term[field]
with open(sys.argv[1], 'w') as f:
json.dump(equajson, f, indent=4, separators=(',', ': '), ensure_ascii=False, sort_keys=True)
|
|
c90f64a57b02035c87c000e06025c59736fad9ba
|
WDL_Pipelines/GATK_from_bam/check_completion_status.py
|
WDL_Pipelines/GATK_from_bam/check_completion_status.py
|
#! /usr/bin/python
'''
Checks a Google genomics pipeline submission and prints status.
'''
import argparse
import subprocess
import yaml
def check_status(code):
'''
Checks status with locally installed gsutil (in PATH).
'''
script = ' '.join(["gcloud alpha genomics operations describe",
code,
"--format='yaml(done, error, metadata.events)'"])
proc = subprocess.Popen(script, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, _ = proc.communicate()
status = parse_status(stdout)
print '\t'.join([code] + status)
def parse_status(yaml_status):
'''
Pulls completion status from yaml output.
'''
status_map = yaml.safe_load(yaml_status)
done_status = str(status_map["done"])
try:
err = status_map["error"]
return [done_status, "Error"]
except KeyError:
return [done_status]
def main():
'''
Arg parsing and central dispatch.
'''
# Arg parsing
desc = "Checks Google pipeline job for completion"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("codes", metavar="CODE", nargs='+',
help="pipeline code")
args = parser.parse_args()
# Central dispatch
for code in args.codes:
check_status(code)
if __name__ == "__main__":
main()
|
Add post-submission status check script.
|
Add post-submission status check script.
|
Python
|
bsd-2-clause
|
dkdeconti/DFCI-CCCB-GATK-Cloud-pipeline,dkdeconti/DFCI-CCCB-GATK-Cloud-pipeline,dkdeconti/DFCI-CCCB-GATK-Cloud-pipeline
|
Add post-submission status check script.
|
#! /usr/bin/python
'''
Checks a Google genomics pipeline submission and prints status.
'''
import argparse
import subprocess
import yaml
def check_status(code):
'''
Checks status with locally installed gsutil (in PATH).
'''
script = ' '.join(["gcloud alpha genomics operations describe",
code,
"--format='yaml(done, error, metadata.events)'"])
proc = subprocess.Popen(script, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, _ = proc.communicate()
status = parse_status(stdout)
print '\t'.join([code] + status)
def parse_status(yaml_status):
'''
Pulls completion status from yaml output.
'''
status_map = yaml.safe_load(yaml_status)
done_status = str(status_map["done"])
try:
err = status_map["error"]
return [done_status, "Error"]
except KeyError:
return [done_status]
def main():
'''
Arg parsing and central dispatch.
'''
# Arg parsing
desc = "Checks Google pipeline job for completion"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("codes", metavar="CODE", nargs='+',
help="pipeline code")
args = parser.parse_args()
# Central dispatch
for code in args.codes:
check_status(code)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add post-submission status check script.<commit_after>
|
#! /usr/bin/python
'''
Checks a Google genomics pipeline submission and prints status.
'''
import argparse
import subprocess
import yaml
def check_status(code):
'''
Checks status with locally installed gsutil (in PATH).
'''
script = ' '.join(["gcloud alpha genomics operations describe",
code,
"--format='yaml(done, error, metadata.events)'"])
proc = subprocess.Popen(script, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, _ = proc.communicate()
status = parse_status(stdout)
print '\t'.join([code] + status)
def parse_status(yaml_status):
'''
Pulls completion status from yaml output.
'''
status_map = yaml.safe_load(yaml_status)
done_status = str(status_map["done"])
try:
err = status_map["error"]
return [done_status, "Error"]
except KeyError:
return [done_status]
def main():
'''
Arg parsing and central dispatch.
'''
# Arg parsing
desc = "Checks Google pipeline job for completion"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("codes", metavar="CODE", nargs='+',
help="pipeline code")
args = parser.parse_args()
# Central dispatch
for code in args.codes:
check_status(code)
if __name__ == "__main__":
main()
|
Add post-submission status check script.#! /usr/bin/python
'''
Checks a Google genomics pipeline submission and prints status.
'''
import argparse
import subprocess
import yaml
def check_status(code):
'''
Checks status with locally installed gsutil (in PATH).
'''
script = ' '.join(["gcloud alpha genomics operations describe",
code,
"--format='yaml(done, error, metadata.events)'"])
proc = subprocess.Popen(script, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, _ = proc.communicate()
status = parse_status(stdout)
print '\t'.join([code] + status)
def parse_status(yaml_status):
'''
Pulls completion status from yaml output.
'''
status_map = yaml.safe_load(yaml_status)
done_status = str(status_map["done"])
try:
err = status_map["error"]
return [done_status, "Error"]
except KeyError:
return [done_status]
def main():
'''
Arg parsing and central dispatch.
'''
# Arg parsing
desc = "Checks Google pipeline job for completion"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("codes", metavar="CODE", nargs='+',
help="pipeline code")
args = parser.parse_args()
# Central dispatch
for code in args.codes:
check_status(code)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add post-submission status check script.<commit_after>#! /usr/bin/python
'''
Checks a Google genomics pipeline submission and prints status.
'''
import argparse
import subprocess
import yaml
def check_status(code):
'''
Checks status with locally installed gsutil (in PATH).
'''
script = ' '.join(["gcloud alpha genomics operations describe",
code,
"--format='yaml(done, error, metadata.events)'"])
proc = subprocess.Popen(script, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, _ = proc.communicate()
status = parse_status(stdout)
print '\t'.join([code] + status)
def parse_status(yaml_status):
'''
Pulls completion status from yaml output.
'''
status_map = yaml.safe_load(yaml_status)
done_status = str(status_map["done"])
try:
err = status_map["error"]
return [done_status, "Error"]
except KeyError:
return [done_status]
def main():
'''
Arg parsing and central dispatch.
'''
# Arg parsing
desc = "Checks Google pipeline job for completion"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("codes", metavar="CODE", nargs='+',
help="pipeline code")
args = parser.parse_args()
# Central dispatch
for code in args.codes:
check_status(code)
if __name__ == "__main__":
main()
|
|
52484856bb43401434ad85d44e1a81bccdf4d50c
|
i8c/tests/test_last_op_branch.py
|
i8c/tests/test_last_op_branch.py
|
from i8c.tests import TestCase
SOURCE = """\
define test::last_op_is_branch
argument bool x
argument bool y
goto label2
label1:
return
label2:
bne label1
"""
class TestFuncWithLastOpBra(TestCase):
def test_last_op_is_branch(self):
"""Check that functions whose last op is a branch work.
This is testing the code that adds the synthetic return.
As a side-effect it also exercises the code that stops
us generating unnecessary gotos.
"""
tree, output = self.compile(SOURCE)
self.assertEqual(["ne", "bra"], output.opnames)
|
Test synthetic return addition when last op is branch
|
Test synthetic return addition when last op is branch
|
Python
|
lgpl-2.1
|
gbenson/i8c
|
Test synthetic return addition when last op is branch
|
from i8c.tests import TestCase
SOURCE = """\
define test::last_op_is_branch
argument bool x
argument bool y
goto label2
label1:
return
label2:
bne label1
"""
class TestFuncWithLastOpBra(TestCase):
def test_last_op_is_branch(self):
"""Check that functions whose last op is a branch work.
This is testing the code that adds the synthetic return.
As a side-effect it also exercises the code that stops
us generating unnecessary gotos.
"""
tree, output = self.compile(SOURCE)
self.assertEqual(["ne", "bra"], output.opnames)
|
<commit_before><commit_msg>Test synthetic return addition when last op is branch<commit_after>
|
from i8c.tests import TestCase
SOURCE = """\
define test::last_op_is_branch
argument bool x
argument bool y
goto label2
label1:
return
label2:
bne label1
"""
class TestFuncWithLastOpBra(TestCase):
def test_last_op_is_branch(self):
"""Check that functions whose last op is a branch work.
This is testing the code that adds the synthetic return.
As a side-effect it also exercises the code that stops
us generating unnecessary gotos.
"""
tree, output = self.compile(SOURCE)
self.assertEqual(["ne", "bra"], output.opnames)
|
Test synthetic return addition when last op is branchfrom i8c.tests import TestCase
SOURCE = """\
define test::last_op_is_branch
argument bool x
argument bool y
goto label2
label1:
return
label2:
bne label1
"""
class TestFuncWithLastOpBra(TestCase):
def test_last_op_is_branch(self):
"""Check that functions whose last op is a branch work.
This is testing the code that adds the synthetic return.
As a side-effect it also exercises the code that stops
us generating unnecessary gotos.
"""
tree, output = self.compile(SOURCE)
self.assertEqual(["ne", "bra"], output.opnames)
|
<commit_before><commit_msg>Test synthetic return addition when last op is branch<commit_after>from i8c.tests import TestCase
SOURCE = """\
define test::last_op_is_branch
argument bool x
argument bool y
goto label2
label1:
return
label2:
bne label1
"""
class TestFuncWithLastOpBra(TestCase):
def test_last_op_is_branch(self):
"""Check that functions whose last op is a branch work.
This is testing the code that adds the synthetic return.
As a side-effect it also exercises the code that stops
us generating unnecessary gotos.
"""
tree, output = self.compile(SOURCE)
self.assertEqual(["ne", "bra"], output.opnames)
|
|
d1663520f019364841f673e929ed8095697b7ab9
|
test_http1.py
|
test_http1.py
|
from echo_client import client
def test_ok():
response = client('GET /index.html HTTP/1.1').split('\r\n')
first_line = response[0]
assert first_line == 'HTTP/1.1 200 OK'
def test_405():
response = client('POST /index.html HTTP/1.1').split('\r\n')
first_line = response[0]
assert first_line == 'HTTP/1.1 405 Method Not Allowed'
def test_505():
response = client('GET /index.html HTTP/1.0').split('\r\n')
first_line = response[0]
assert first_line == 'HTTP/1.1 505 HTTP Version Not Supported'
|
Add tests for HTTP server, step 1
|
Add tests for HTTP server, step 1
|
Python
|
mit
|
jwarren116/network-tools,jwarren116/network-tools
|
Add tests for HTTP server, step 1
|
from echo_client import client
def test_ok():
response = client('GET /index.html HTTP/1.1').split('\r\n')
first_line = response[0]
assert first_line == 'HTTP/1.1 200 OK'
def test_405():
response = client('POST /index.html HTTP/1.1').split('\r\n')
first_line = response[0]
assert first_line == 'HTTP/1.1 405 Method Not Allowed'
def test_505():
response = client('GET /index.html HTTP/1.0').split('\r\n')
first_line = response[0]
assert first_line == 'HTTP/1.1 505 HTTP Version Not Supported'
|
<commit_before><commit_msg>Add tests for HTTP server, step 1<commit_after>
|
from echo_client import client
def test_ok():
response = client('GET /index.html HTTP/1.1').split('\r\n')
first_line = response[0]
assert first_line == 'HTTP/1.1 200 OK'
def test_405():
response = client('POST /index.html HTTP/1.1').split('\r\n')
first_line = response[0]
assert first_line == 'HTTP/1.1 405 Method Not Allowed'
def test_505():
response = client('GET /index.html HTTP/1.0').split('\r\n')
first_line = response[0]
assert first_line == 'HTTP/1.1 505 HTTP Version Not Supported'
|
Add tests for HTTP server, step 1from echo_client import client
def test_ok():
response = client('GET /index.html HTTP/1.1').split('\r\n')
first_line = response[0]
assert first_line == 'HTTP/1.1 200 OK'
def test_405():
response = client('POST /index.html HTTP/1.1').split('\r\n')
first_line = response[0]
assert first_line == 'HTTP/1.1 405 Method Not Allowed'
def test_505():
response = client('GET /index.html HTTP/1.0').split('\r\n')
first_line = response[0]
assert first_line == 'HTTP/1.1 505 HTTP Version Not Supported'
|
<commit_before><commit_msg>Add tests for HTTP server, step 1<commit_after>from echo_client import client
def test_ok():
response = client('GET /index.html HTTP/1.1').split('\r\n')
first_line = response[0]
assert first_line == 'HTTP/1.1 200 OK'
def test_405():
response = client('POST /index.html HTTP/1.1').split('\r\n')
first_line = response[0]
assert first_line == 'HTTP/1.1 405 Method Not Allowed'
def test_505():
response = client('GET /index.html HTTP/1.0').split('\r\n')
first_line = response[0]
assert first_line == 'HTTP/1.1 505 HTTP Version Not Supported'
|
|
4398fd54672d8149e61b9360465d227b85f0ea12
|
django_auth_policy/management/commands/filter_password_list.py
|
django_auth_policy/management/commands/filter_password_list.py
|
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ValidationError
from django.conf import settings
from django_auth_policy.handlers import PasswordStrengthPolicyHandler
class Command(BaseCommand):
help = ("Filters a list of password by removing all passwords that do not "
"comply with the configured password strength policies."
"Provide file paths as arguments.")
def handle(self, *args, **options):
if not args:
print ("Please provide one or more file paths for files with "
"password lists.")
return
pw_handler = PasswordStrengthPolicyHandler()
for arg in args:
fh = open(arg, 'r')
for pw in fh:
pw = pw.strip()
try:
pw_handler.validate(pw)
except ValidationError:
continue
print pw
fh.close()
|
Add command which reads password lists and outputs passwords that validate the password policy
|
Add command which reads password lists and outputs passwords that validate the password policy
This comes in handy when disallowing commonly used passwords because it removes all passwords that will never validate anyhow.
|
Python
|
bsd-3-clause
|
Dreamsolution/django-auth-policy,Dreamsolution/django-auth-policy,mcella/django-auth-policy,mcella/django-auth-policy
|
Add command which reads password lists and outputs passwords that validate the password policy
This comes in handy when disallowing commonly used passwords because it removes all passwords that will never validate anyhow.
|
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ValidationError
from django.conf import settings
from django_auth_policy.handlers import PasswordStrengthPolicyHandler
class Command(BaseCommand):
help = ("Filters a list of password by removing all passwords that do not "
"comply with the configured password strength policies."
"Provide file paths as arguments.")
def handle(self, *args, **options):
if not args:
print ("Please provide one or more file paths for files with "
"password lists.")
return
pw_handler = PasswordStrengthPolicyHandler()
for arg in args:
fh = open(arg, 'r')
for pw in fh:
pw = pw.strip()
try:
pw_handler.validate(pw)
except ValidationError:
continue
print pw
fh.close()
|
<commit_before><commit_msg>Add command which reads password lists and outputs passwords that validate the password policy
This comes in handy when disallowing commonly used passwords because it removes all passwords that will never validate anyhow.<commit_after>
|
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ValidationError
from django.conf import settings
from django_auth_policy.handlers import PasswordStrengthPolicyHandler
class Command(BaseCommand):
help = ("Filters a list of password by removing all passwords that do not "
"comply with the configured password strength policies."
"Provide file paths as arguments.")
def handle(self, *args, **options):
if not args:
print ("Please provide one or more file paths for files with "
"password lists.")
return
pw_handler = PasswordStrengthPolicyHandler()
for arg in args:
fh = open(arg, 'r')
for pw in fh:
pw = pw.strip()
try:
pw_handler.validate(pw)
except ValidationError:
continue
print pw
fh.close()
|
Add command which reads password lists and outputs passwords that validate the password policy
This comes in handy when disallowing commonly used passwords because it removes all passwords that will never validate anyhow.from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ValidationError
from django.conf import settings
from django_auth_policy.handlers import PasswordStrengthPolicyHandler
class Command(BaseCommand):
help = ("Filters a list of password by removing all passwords that do not "
"comply with the configured password strength policies."
"Provide file paths as arguments.")
def handle(self, *args, **options):
if not args:
print ("Please provide one or more file paths for files with "
"password lists.")
return
pw_handler = PasswordStrengthPolicyHandler()
for arg in args:
fh = open(arg, 'r')
for pw in fh:
pw = pw.strip()
try:
pw_handler.validate(pw)
except ValidationError:
continue
print pw
fh.close()
|
<commit_before><commit_msg>Add command which reads password lists and outputs passwords that validate the password policy
This comes in handy when disallowing commonly used passwords because it removes all passwords that will never validate anyhow.<commit_after>from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ValidationError
from django.conf import settings
from django_auth_policy.handlers import PasswordStrengthPolicyHandler
class Command(BaseCommand):
help = ("Filters a list of password by removing all passwords that do not "
"comply with the configured password strength policies."
"Provide file paths as arguments.")
def handle(self, *args, **options):
if not args:
print ("Please provide one or more file paths for files with "
"password lists.")
return
pw_handler = PasswordStrengthPolicyHandler()
for arg in args:
fh = open(arg, 'r')
for pw in fh:
pw = pw.strip()
try:
pw_handler.validate(pw)
except ValidationError:
continue
print pw
fh.close()
|
|
8a089a5a8126110ef0b763294a0a14869787146e
|
examples/long_running_publisher.py
|
examples/long_running_publisher.py
|
# -*- coding: utf-8 -*-
# pylint: disable=C0111,C0103,R0205
import threading
from time import sleep
from pika import ConnectionParameters, BlockingConnection, PlainCredentials
class Publisher(threading.Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.daemon = True
self.is_running = True
self.name = "Publisher"
self.queue = "downstream_queue"
credentials = PlainCredentials("guest", "guest")
parameters = ConnectionParameters("localhost", credentials=credentials)
self.connection = BlockingConnection(parameters)
self.channel = self.connection.channel()
self.channel.queue_declare(queue=self.queue, auto_delete=True)
def run(self):
while self.is_running:
self.connection.process_data_events(time_limit=1)
def _publish(self, message):
self.channel.basic_publish("", self.queue, body=message.encode())
def publish(self, message):
self.connection.add_callback_threadsafe(lambda: self._publish(message))
def stop(self):
print("Stopping...")
self.is_running = False
# Wait until all the data events have been processed
self.connection.process_data_events(time_limit=1)
if self.connection.is_open:
self.connection.close()
print("Stopped")
if __name__ == "__main__":
publisher = Publisher()
publisher.start()
try:
for i in range(9999):
msg = f"Message {i}"
print(f"Publishing: {msg!r}")
publisher.publish(msg)
sleep(1)
except KeyboardInterrupt:
publisher.stop()
finally:
publisher.join()
|
Add an example of a long running publisher
|
Add an example of a long running publisher
|
Python
|
bsd-3-clause
|
pika/pika
|
Add an example of a long running publisher
|
# -*- coding: utf-8 -*-
# pylint: disable=C0111,C0103,R0205
import threading
from time import sleep
from pika import ConnectionParameters, BlockingConnection, PlainCredentials
class Publisher(threading.Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.daemon = True
self.is_running = True
self.name = "Publisher"
self.queue = "downstream_queue"
credentials = PlainCredentials("guest", "guest")
parameters = ConnectionParameters("localhost", credentials=credentials)
self.connection = BlockingConnection(parameters)
self.channel = self.connection.channel()
self.channel.queue_declare(queue=self.queue, auto_delete=True)
def run(self):
while self.is_running:
self.connection.process_data_events(time_limit=1)
def _publish(self, message):
self.channel.basic_publish("", self.queue, body=message.encode())
def publish(self, message):
self.connection.add_callback_threadsafe(lambda: self._publish(message))
def stop(self):
print("Stopping...")
self.is_running = False
# Wait until all the data events have been processed
self.connection.process_data_events(time_limit=1)
if self.connection.is_open:
self.connection.close()
print("Stopped")
if __name__ == "__main__":
publisher = Publisher()
publisher.start()
try:
for i in range(9999):
msg = f"Message {i}"
print(f"Publishing: {msg!r}")
publisher.publish(msg)
sleep(1)
except KeyboardInterrupt:
publisher.stop()
finally:
publisher.join()
|
<commit_before><commit_msg>Add an example of a long running publisher<commit_after>
|
# -*- coding: utf-8 -*-
# pylint: disable=C0111,C0103,R0205
import threading
from time import sleep
from pika import ConnectionParameters, BlockingConnection, PlainCredentials
class Publisher(threading.Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.daemon = True
self.is_running = True
self.name = "Publisher"
self.queue = "downstream_queue"
credentials = PlainCredentials("guest", "guest")
parameters = ConnectionParameters("localhost", credentials=credentials)
self.connection = BlockingConnection(parameters)
self.channel = self.connection.channel()
self.channel.queue_declare(queue=self.queue, auto_delete=True)
def run(self):
while self.is_running:
self.connection.process_data_events(time_limit=1)
def _publish(self, message):
self.channel.basic_publish("", self.queue, body=message.encode())
def publish(self, message):
self.connection.add_callback_threadsafe(lambda: self._publish(message))
def stop(self):
print("Stopping...")
self.is_running = False
# Wait until all the data events have been processed
self.connection.process_data_events(time_limit=1)
if self.connection.is_open:
self.connection.close()
print("Stopped")
if __name__ == "__main__":
publisher = Publisher()
publisher.start()
try:
for i in range(9999):
msg = f"Message {i}"
print(f"Publishing: {msg!r}")
publisher.publish(msg)
sleep(1)
except KeyboardInterrupt:
publisher.stop()
finally:
publisher.join()
|
Add an example of a long running publisher# -*- coding: utf-8 -*-
# pylint: disable=C0111,C0103,R0205
import threading
from time import sleep
from pika import ConnectionParameters, BlockingConnection, PlainCredentials
class Publisher(threading.Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.daemon = True
self.is_running = True
self.name = "Publisher"
self.queue = "downstream_queue"
credentials = PlainCredentials("guest", "guest")
parameters = ConnectionParameters("localhost", credentials=credentials)
self.connection = BlockingConnection(parameters)
self.channel = self.connection.channel()
self.channel.queue_declare(queue=self.queue, auto_delete=True)
def run(self):
while self.is_running:
self.connection.process_data_events(time_limit=1)
def _publish(self, message):
self.channel.basic_publish("", self.queue, body=message.encode())
def publish(self, message):
self.connection.add_callback_threadsafe(lambda: self._publish(message))
def stop(self):
print("Stopping...")
self.is_running = False
# Wait until all the data events have been processed
self.connection.process_data_events(time_limit=1)
if self.connection.is_open:
self.connection.close()
print("Stopped")
if __name__ == "__main__":
publisher = Publisher()
publisher.start()
try:
for i in range(9999):
msg = f"Message {i}"
print(f"Publishing: {msg!r}")
publisher.publish(msg)
sleep(1)
except KeyboardInterrupt:
publisher.stop()
finally:
publisher.join()
|
<commit_before><commit_msg>Add an example of a long running publisher<commit_after># -*- coding: utf-8 -*-
# pylint: disable=C0111,C0103,R0205
import threading
from time import sleep
from pika import ConnectionParameters, BlockingConnection, PlainCredentials
class Publisher(threading.Thread):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.daemon = True
self.is_running = True
self.name = "Publisher"
self.queue = "downstream_queue"
credentials = PlainCredentials("guest", "guest")
parameters = ConnectionParameters("localhost", credentials=credentials)
self.connection = BlockingConnection(parameters)
self.channel = self.connection.channel()
self.channel.queue_declare(queue=self.queue, auto_delete=True)
def run(self):
while self.is_running:
self.connection.process_data_events(time_limit=1)
def _publish(self, message):
self.channel.basic_publish("", self.queue, body=message.encode())
def publish(self, message):
self.connection.add_callback_threadsafe(lambda: self._publish(message))
def stop(self):
print("Stopping...")
self.is_running = False
# Wait until all the data events have been processed
self.connection.process_data_events(time_limit=1)
if self.connection.is_open:
self.connection.close()
print("Stopped")
if __name__ == "__main__":
publisher = Publisher()
publisher.start()
try:
for i in range(9999):
msg = f"Message {i}"
print(f"Publishing: {msg!r}")
publisher.publish(msg)
sleep(1)
except KeyboardInterrupt:
publisher.stop()
finally:
publisher.join()
|
|
4ba2dc07dcd1394e829fed0168ee64af1bd401ed
|
pyblogit/api_interface.py
|
pyblogit/api_interface.py
|
"""
pyblogit.api_interface
~~~~~~~~~~~~~~~~~~~~~~
This modules acts as an interface between pyblogit and various
blogging platform apis.
"""
import gdata.gauth
import gdata.blogger.client
class BloggerInterface(object):
def __init__(self):
self._CLIENT_ID = client_id
self._CLIENT_SECRET = client_secret
self._SCOPE = 'https://www.googleapis.com/auth/blogger'
def get_access_code(self):
"""Opens dafualt browser to the google auth page and provides
them with an access code."""
token = gdata.gauth.OAuth2Token(
client_id = self.CLIENT_ID,
client_secret = self.CLIENT_SECRET,
scope = self.SCOPE,
user_agent = 'pyblogit')
url = token.generate_authorize_url(redirect_uri='urn:ietf:wg:oauth:2.0:oob')
webbrowser.open_new_tab(url)
def generate_token(self, code):
"""Generates new api access token."""
self._token = token.get_access_token(code)
def get_client(self):
"""Returns an authorised blogger api client."""
client = gdata.blogger.client.BloggerClient()
self._token.authorize(client)
return client
|
Add BloggerInterface class to interact with blogger api
|
Add BloggerInterface class to interact with blogger api
|
Python
|
mit
|
jamalmoir/pyblogit
|
Add BloggerInterface class to interact with blogger api
|
"""
pyblogit.api_interface
~~~~~~~~~~~~~~~~~~~~~~
This modules acts as an interface between pyblogit and various
blogging platform apis.
"""
import gdata.gauth
import gdata.blogger.client
class BloggerInterface(object):
def __init__(self):
self._CLIENT_ID = client_id
self._CLIENT_SECRET = client_secret
self._SCOPE = 'https://www.googleapis.com/auth/blogger'
def get_access_code(self):
"""Opens dafualt browser to the google auth page and provides
them with an access code."""
token = gdata.gauth.OAuth2Token(
client_id = self.CLIENT_ID,
client_secret = self.CLIENT_SECRET,
scope = self.SCOPE,
user_agent = 'pyblogit')
url = token.generate_authorize_url(redirect_uri='urn:ietf:wg:oauth:2.0:oob')
webbrowser.open_new_tab(url)
def generate_token(self, code):
"""Generates new api access token."""
self._token = token.get_access_token(code)
def get_client(self):
"""Returns an authorised blogger api client."""
client = gdata.blogger.client.BloggerClient()
self._token.authorize(client)
return client
|
<commit_before><commit_msg>Add BloggerInterface class to interact with blogger api<commit_after>
|
"""
pyblogit.api_interface
~~~~~~~~~~~~~~~~~~~~~~
This modules acts as an interface between pyblogit and various
blogging platform apis.
"""
import gdata.gauth
import gdata.blogger.client
class BloggerInterface(object):
def __init__(self):
self._CLIENT_ID = client_id
self._CLIENT_SECRET = client_secret
self._SCOPE = 'https://www.googleapis.com/auth/blogger'
def get_access_code(self):
"""Opens dafualt browser to the google auth page and provides
them with an access code."""
token = gdata.gauth.OAuth2Token(
client_id = self.CLIENT_ID,
client_secret = self.CLIENT_SECRET,
scope = self.SCOPE,
user_agent = 'pyblogit')
url = token.generate_authorize_url(redirect_uri='urn:ietf:wg:oauth:2.0:oob')
webbrowser.open_new_tab(url)
def generate_token(self, code):
"""Generates new api access token."""
self._token = token.get_access_token(code)
def get_client(self):
"""Returns an authorised blogger api client."""
client = gdata.blogger.client.BloggerClient()
self._token.authorize(client)
return client
|
Add BloggerInterface class to interact with blogger api"""
pyblogit.api_interface
~~~~~~~~~~~~~~~~~~~~~~
This modules acts as an interface between pyblogit and various
blogging platform apis.
"""
import gdata.gauth
import gdata.blogger.client
class BloggerInterface(object):
def __init__(self):
self._CLIENT_ID = client_id
self._CLIENT_SECRET = client_secret
self._SCOPE = 'https://www.googleapis.com/auth/blogger'
def get_access_code(self):
"""Opens dafualt browser to the google auth page and provides
them with an access code."""
token = gdata.gauth.OAuth2Token(
client_id = self.CLIENT_ID,
client_secret = self.CLIENT_SECRET,
scope = self.SCOPE,
user_agent = 'pyblogit')
url = token.generate_authorize_url(redirect_uri='urn:ietf:wg:oauth:2.0:oob')
webbrowser.open_new_tab(url)
def generate_token(self, code):
"""Generates new api access token."""
self._token = token.get_access_token(code)
def get_client(self):
"""Returns an authorised blogger api client."""
client = gdata.blogger.client.BloggerClient()
self._token.authorize(client)
return client
|
<commit_before><commit_msg>Add BloggerInterface class to interact with blogger api<commit_after>"""
pyblogit.api_interface
~~~~~~~~~~~~~~~~~~~~~~
This modules acts as an interface between pyblogit and various
blogging platform apis.
"""
import gdata.gauth
import gdata.blogger.client
class BloggerInterface(object):
def __init__(self):
self._CLIENT_ID = client_id
self._CLIENT_SECRET = client_secret
self._SCOPE = 'https://www.googleapis.com/auth/blogger'
def get_access_code(self):
"""Opens dafualt browser to the google auth page and provides
them with an access code."""
token = gdata.gauth.OAuth2Token(
client_id = self.CLIENT_ID,
client_secret = self.CLIENT_SECRET,
scope = self.SCOPE,
user_agent = 'pyblogit')
url = token.generate_authorize_url(redirect_uri='urn:ietf:wg:oauth:2.0:oob')
webbrowser.open_new_tab(url)
def generate_token(self, code):
"""Generates new api access token."""
self._token = token.get_access_token(code)
def get_client(self):
"""Returns an authorised blogger api client."""
client = gdata.blogger.client.BloggerClient()
self._token.authorize(client)
return client
|
|
cb9d7a92ca0a1e0202a9a1238a3b3ebd3882129c
|
stanczyk/util.py
|
stanczyk/util.py
|
"""
Utilities that don't fit well anywhere else.
"""
def _getRemote(namespace):
"""Gets the remote protocol, or raises an error.
This is intended to be used by command implementations.
This isn't implemented as an argument-injecting decorator, because
the console code uses introspection to tell users how to call
console commands.
"""
try:
return namespace["remote"]
except KeyError:
raise RuntimeError("You are not connected to the exercise server. "
"Call ``connect``.")
|
Move logic for accessing the remote attribute of the namespace (and raising a meaningful error when it doesn't exist) into a separate function
|
Move logic for accessing the remote attribute of the namespace (and raising a meaningful error when it doesn't exist) into a separate function
|
Python
|
isc
|
crypto101/stanczyk
|
Move logic for accessing the remote attribute of the namespace (and raising a meaningful error when it doesn't exist) into a separate function
|
"""
Utilities that don't fit well anywhere else.
"""
def _getRemote(namespace):
"""Gets the remote protocol, or raises an error.
This is intended to be used by command implementations.
This isn't implemented as an argument-injecting decorator, because
the console code uses introspection to tell users how to call
console commands.
"""
try:
return namespace["remote"]
except KeyError:
raise RuntimeError("You are not connected to the exercise server. "
"Call ``connect``.")
|
<commit_before><commit_msg>Move logic for accessing the remote attribute of the namespace (and raising a meaningful error when it doesn't exist) into a separate function<commit_after>
|
"""
Utilities that don't fit well anywhere else.
"""
def _getRemote(namespace):
"""Gets the remote protocol, or raises an error.
This is intended to be used by command implementations.
This isn't implemented as an argument-injecting decorator, because
the console code uses introspection to tell users how to call
console commands.
"""
try:
return namespace["remote"]
except KeyError:
raise RuntimeError("You are not connected to the exercise server. "
"Call ``connect``.")
|
Move logic for accessing the remote attribute of the namespace (and raising a meaningful error when it doesn't exist) into a separate function"""
Utilities that don't fit well anywhere else.
"""
def _getRemote(namespace):
"""Gets the remote protocol, or raises an error.
This is intended to be used by command implementations.
This isn't implemented as an argument-injecting decorator, because
the console code uses introspection to tell users how to call
console commands.
"""
try:
return namespace["remote"]
except KeyError:
raise RuntimeError("You are not connected to the exercise server. "
"Call ``connect``.")
|
<commit_before><commit_msg>Move logic for accessing the remote attribute of the namespace (and raising a meaningful error when it doesn't exist) into a separate function<commit_after>"""
Utilities that don't fit well anywhere else.
"""
def _getRemote(namespace):
"""Gets the remote protocol, or raises an error.
This is intended to be used by command implementations.
This isn't implemented as an argument-injecting decorator, because
the console code uses introspection to tell users how to call
console commands.
"""
try:
return namespace["remote"]
except KeyError:
raise RuntimeError("You are not connected to the exercise server. "
"Call ``connect``.")
|
|
0dbc2613fc686471be214ef69f245bc279a7e660
|
http_ping.py
|
http_ping.py
|
from locust import HttpLocust, TaskSet, task
class HttpPingTasks(TaskSet):
@task
def ping(self):
self.client.get("/")
class SayHelloLocust(HttpLocust):
task_set = HttpPingTasks
min_wait = 100
max_wait = 500
|
Add simple pinger for basic locust demo
|
Add simple pinger for basic locust demo
|
Python
|
apache-2.0
|
drednout/locust_on_meetup
|
Add simple pinger for basic locust demo
|
from locust import HttpLocust, TaskSet, task
class HttpPingTasks(TaskSet):
@task
def ping(self):
self.client.get("/")
class SayHelloLocust(HttpLocust):
task_set = HttpPingTasks
min_wait = 100
max_wait = 500
|
<commit_before><commit_msg>Add simple pinger for basic locust demo<commit_after>
|
from locust import HttpLocust, TaskSet, task
class HttpPingTasks(TaskSet):
@task
def ping(self):
self.client.get("/")
class SayHelloLocust(HttpLocust):
task_set = HttpPingTasks
min_wait = 100
max_wait = 500
|
Add simple pinger for basic locust demofrom locust import HttpLocust, TaskSet, task
class HttpPingTasks(TaskSet):
@task
def ping(self):
self.client.get("/")
class SayHelloLocust(HttpLocust):
task_set = HttpPingTasks
min_wait = 100
max_wait = 500
|
<commit_before><commit_msg>Add simple pinger for basic locust demo<commit_after>from locust import HttpLocust, TaskSet, task
class HttpPingTasks(TaskSet):
@task
def ping(self):
self.client.get("/")
class SayHelloLocust(HttpLocust):
task_set = HttpPingTasks
min_wait = 100
max_wait = 500
|
|
bb9fb35d5a143fcd249780e224699413d26cf841
|
Gui/change_license.py
|
Gui/change_license.py
|
import os
opening_dashes = """\
/* -------------------------------------------------------------------------- *
"""
dashes = """\
* -------------------------------------------------------------------------- *
"""
opensim_description = """\
* OpenSim is a toolkit for musculoskeletal modeling and simulation. *
* See http://opensim.stanford.edu and the NOTICE file for more information. *
"""
apache_boilerplate = """\
* Licensed under the Apache License, Version 2.0 (the "License"); you may *
* not use this file except in compliance with the License. You may obtain a *
* copy of the License at http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* -------------------------------------------------------------------------- */
"""
def pad(content):
line = " * "
line += content
line += (77 - len(line)) * " " + " * \n"
return line
def create_new_license_blurb(fname):
blurb = opening_dashes
blurb += pad("OpenSim: %s" % fname)
blurb += dashes
blurb += opensim_description
blurb += pad("")
blurb += pad("Copyright (c) 2005-2016 Stanford University and the Authors")
# TODO detect other authors, like Kevin Xu.
blurb += pad("Author(s): Ayman Habib")
blurb += pad("")
blurb += apache_boilerplate
return blurb
for dirpath, dirnames, filenames in os.walk('.'):
for fname in filenames:
if fname.endswith('.java'):
fpath = os.path.join(dirpath, fname)
filecontents = ""
with open(fpath, 'r') as f:
filecontents = f.read()
if "Stanford University" in filecontents:
start = filecontents.find(" * Copyright")
ending = "SUCH DAMAGE.\n"
end = filecontents.find(ending) + len(ending)
print("%s, %i, %i" % (fname, start, end))
newfilecontents = create_new_license_blurb(fname)
newfilecontents += filecontents[0:start]
newfilecontents += filecontents[end::]
with open(fpath, 'w') as f:
f.write(newfilecontents)
|
Add script to convert to apache 2.0 license.
|
Add script to convert to apache 2.0 license.
|
Python
|
apache-2.0
|
opensim-org/opensim-gui,opensim-org/opensim-gui,opensim-org/opensim-gui,opensim-org/opensim-gui
|
Add script to convert to apache 2.0 license.
|
import os
opening_dashes = """\
/* -------------------------------------------------------------------------- *
"""
dashes = """\
* -------------------------------------------------------------------------- *
"""
opensim_description = """\
* OpenSim is a toolkit for musculoskeletal modeling and simulation. *
* See http://opensim.stanford.edu and the NOTICE file for more information. *
"""
apache_boilerplate = """\
* Licensed under the Apache License, Version 2.0 (the "License"); you may *
* not use this file except in compliance with the License. You may obtain a *
* copy of the License at http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* -------------------------------------------------------------------------- */
"""
def pad(content):
line = " * "
line += content
line += (77 - len(line)) * " " + " * \n"
return line
def create_new_license_blurb(fname):
blurb = opening_dashes
blurb += pad("OpenSim: %s" % fname)
blurb += dashes
blurb += opensim_description
blurb += pad("")
blurb += pad("Copyright (c) 2005-2016 Stanford University and the Authors")
# TODO detect other authors, like Kevin Xu.
blurb += pad("Author(s): Ayman Habib")
blurb += pad("")
blurb += apache_boilerplate
return blurb
for dirpath, dirnames, filenames in os.walk('.'):
for fname in filenames:
if fname.endswith('.java'):
fpath = os.path.join(dirpath, fname)
filecontents = ""
with open(fpath, 'r') as f:
filecontents = f.read()
if "Stanford University" in filecontents:
start = filecontents.find(" * Copyright")
ending = "SUCH DAMAGE.\n"
end = filecontents.find(ending) + len(ending)
print("%s, %i, %i" % (fname, start, end))
newfilecontents = create_new_license_blurb(fname)
newfilecontents += filecontents[0:start]
newfilecontents += filecontents[end::]
with open(fpath, 'w') as f:
f.write(newfilecontents)
|
<commit_before><commit_msg>Add script to convert to apache 2.0 license.<commit_after>
|
import os
opening_dashes = """\
/* -------------------------------------------------------------------------- *
"""
dashes = """\
* -------------------------------------------------------------------------- *
"""
opensim_description = """\
* OpenSim is a toolkit for musculoskeletal modeling and simulation. *
* See http://opensim.stanford.edu and the NOTICE file for more information. *
"""
apache_boilerplate = """\
* Licensed under the Apache License, Version 2.0 (the "License"); you may *
* not use this file except in compliance with the License. You may obtain a *
* copy of the License at http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* -------------------------------------------------------------------------- */
"""
def pad(content):
line = " * "
line += content
line += (77 - len(line)) * " " + " * \n"
return line
def create_new_license_blurb(fname):
blurb = opening_dashes
blurb += pad("OpenSim: %s" % fname)
blurb += dashes
blurb += opensim_description
blurb += pad("")
blurb += pad("Copyright (c) 2005-2016 Stanford University and the Authors")
# TODO detect other authors, like Kevin Xu.
blurb += pad("Author(s): Ayman Habib")
blurb += pad("")
blurb += apache_boilerplate
return blurb
for dirpath, dirnames, filenames in os.walk('.'):
for fname in filenames:
if fname.endswith('.java'):
fpath = os.path.join(dirpath, fname)
filecontents = ""
with open(fpath, 'r') as f:
filecontents = f.read()
if "Stanford University" in filecontents:
start = filecontents.find(" * Copyright")
ending = "SUCH DAMAGE.\n"
end = filecontents.find(ending) + len(ending)
print("%s, %i, %i" % (fname, start, end))
newfilecontents = create_new_license_blurb(fname)
newfilecontents += filecontents[0:start]
newfilecontents += filecontents[end::]
with open(fpath, 'w') as f:
f.write(newfilecontents)
|
Add script to convert to apache 2.0 license.import os
opening_dashes = """\
/* -------------------------------------------------------------------------- *
"""
dashes = """\
* -------------------------------------------------------------------------- *
"""
opensim_description = """\
* OpenSim is a toolkit for musculoskeletal modeling and simulation. *
* See http://opensim.stanford.edu and the NOTICE file for more information. *
"""
apache_boilerplate = """\
* Licensed under the Apache License, Version 2.0 (the "License"); you may *
* not use this file except in compliance with the License. You may obtain a *
* copy of the License at http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* -------------------------------------------------------------------------- */
"""
def pad(content):
line = " * "
line += content
line += (77 - len(line)) * " " + " * \n"
return line
def create_new_license_blurb(fname):
blurb = opening_dashes
blurb += pad("OpenSim: %s" % fname)
blurb += dashes
blurb += opensim_description
blurb += pad("")
blurb += pad("Copyright (c) 2005-2016 Stanford University and the Authors")
# TODO detect other authors, like Kevin Xu.
blurb += pad("Author(s): Ayman Habib")
blurb += pad("")
blurb += apache_boilerplate
return blurb
for dirpath, dirnames, filenames in os.walk('.'):
for fname in filenames:
if fname.endswith('.java'):
fpath = os.path.join(dirpath, fname)
filecontents = ""
with open(fpath, 'r') as f:
filecontents = f.read()
if "Stanford University" in filecontents:
start = filecontents.find(" * Copyright")
ending = "SUCH DAMAGE.\n"
end = filecontents.find(ending) + len(ending)
print("%s, %i, %i" % (fname, start, end))
newfilecontents = create_new_license_blurb(fname)
newfilecontents += filecontents[0:start]
newfilecontents += filecontents[end::]
with open(fpath, 'w') as f:
f.write(newfilecontents)
|
<commit_before><commit_msg>Add script to convert to apache 2.0 license.<commit_after>import os
opening_dashes = """\
/* -------------------------------------------------------------------------- *
"""
dashes = """\
* -------------------------------------------------------------------------- *
"""
opensim_description = """\
* OpenSim is a toolkit for musculoskeletal modeling and simulation. *
* See http://opensim.stanford.edu and the NOTICE file for more information. *
"""
apache_boilerplate = """\
* Licensed under the Apache License, Version 2.0 (the "License"); you may *
* not use this file except in compliance with the License. You may obtain a *
* copy of the License at http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software *
* distributed under the License is distributed on an "AS IS" BASIS, *
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
* See the License for the specific language governing permissions and *
* limitations under the License. *
* -------------------------------------------------------------------------- */
"""
def pad(content):
line = " * "
line += content
line += (77 - len(line)) * " " + " * \n"
return line
def create_new_license_blurb(fname):
blurb = opening_dashes
blurb += pad("OpenSim: %s" % fname)
blurb += dashes
blurb += opensim_description
blurb += pad("")
blurb += pad("Copyright (c) 2005-2016 Stanford University and the Authors")
# TODO detect other authors, like Kevin Xu.
blurb += pad("Author(s): Ayman Habib")
blurb += pad("")
blurb += apache_boilerplate
return blurb
for dirpath, dirnames, filenames in os.walk('.'):
for fname in filenames:
if fname.endswith('.java'):
fpath = os.path.join(dirpath, fname)
filecontents = ""
with open(fpath, 'r') as f:
filecontents = f.read()
if "Stanford University" in filecontents:
start = filecontents.find(" * Copyright")
ending = "SUCH DAMAGE.\n"
end = filecontents.find(ending) + len(ending)
print("%s, %i, %i" % (fname, start, end))
newfilecontents = create_new_license_blurb(fname)
newfilecontents += filecontents[0:start]
newfilecontents += filecontents[end::]
with open(fpath, 'w') as f:
f.write(newfilecontents)
|
|
3050b09418ab7d807e8f1c136014cd8fb9f61148
|
dojo/db_migrations/0023_cve_fix_1553.py
|
dojo/db_migrations/0023_cve_fix_1553.py
|
# Generated by Django 2.2.4 on 2019-11-18 19:06
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dojo', '0021_auto_20191102_0956'),
]
operations = [
migrations.AlterField(
model_name='finding',
name='cve',
field=models.TextField(max_length=28, null=True, validators=[django.core.validators.RegexValidator(message="Vulnerability ID must be entered in the format: 'ABC-9999-9999'. ", regex='^[A-Z]{1,10}-\\d{4}-\\d{4,12}$')]),
),
migrations.AlterField(
model_name='finding_template',
name='cve',
field=models.TextField(max_length=28, null=True, validators=[django.core.validators.RegexValidator(message="Vulnerability ID must be entered in the format: 'ABC-9999-9999'. ", regex='^[A-Z]{1,10}-\\d{4}-\\d{4,12}$')]),
),
]
|
Add correct migration file fo cve-fix-1553
|
Add correct migration file fo cve-fix-1553
|
Python
|
bsd-3-clause
|
rackerlabs/django-DefectDojo,rackerlabs/django-DefectDojo,rackerlabs/django-DefectDojo,rackerlabs/django-DefectDojo
|
Add correct migration file fo cve-fix-1553
|
# Generated by Django 2.2.4 on 2019-11-18 19:06
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dojo', '0021_auto_20191102_0956'),
]
operations = [
migrations.AlterField(
model_name='finding',
name='cve',
field=models.TextField(max_length=28, null=True, validators=[django.core.validators.RegexValidator(message="Vulnerability ID must be entered in the format: 'ABC-9999-9999'. ", regex='^[A-Z]{1,10}-\\d{4}-\\d{4,12}$')]),
),
migrations.AlterField(
model_name='finding_template',
name='cve',
field=models.TextField(max_length=28, null=True, validators=[django.core.validators.RegexValidator(message="Vulnerability ID must be entered in the format: 'ABC-9999-9999'. ", regex='^[A-Z]{1,10}-\\d{4}-\\d{4,12}$')]),
),
]
|
<commit_before><commit_msg>Add correct migration file fo cve-fix-1553<commit_after>
|
# Generated by Django 2.2.4 on 2019-11-18 19:06
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dojo', '0021_auto_20191102_0956'),
]
operations = [
migrations.AlterField(
model_name='finding',
name='cve',
field=models.TextField(max_length=28, null=True, validators=[django.core.validators.RegexValidator(message="Vulnerability ID must be entered in the format: 'ABC-9999-9999'. ", regex='^[A-Z]{1,10}-\\d{4}-\\d{4,12}$')]),
),
migrations.AlterField(
model_name='finding_template',
name='cve',
field=models.TextField(max_length=28, null=True, validators=[django.core.validators.RegexValidator(message="Vulnerability ID must be entered in the format: 'ABC-9999-9999'. ", regex='^[A-Z]{1,10}-\\d{4}-\\d{4,12}$')]),
),
]
|
Add correct migration file fo cve-fix-1553# Generated by Django 2.2.4 on 2019-11-18 19:06
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dojo', '0021_auto_20191102_0956'),
]
operations = [
migrations.AlterField(
model_name='finding',
name='cve',
field=models.TextField(max_length=28, null=True, validators=[django.core.validators.RegexValidator(message="Vulnerability ID must be entered in the format: 'ABC-9999-9999'. ", regex='^[A-Z]{1,10}-\\d{4}-\\d{4,12}$')]),
),
migrations.AlterField(
model_name='finding_template',
name='cve',
field=models.TextField(max_length=28, null=True, validators=[django.core.validators.RegexValidator(message="Vulnerability ID must be entered in the format: 'ABC-9999-9999'. ", regex='^[A-Z]{1,10}-\\d{4}-\\d{4,12}$')]),
),
]
|
<commit_before><commit_msg>Add correct migration file fo cve-fix-1553<commit_after># Generated by Django 2.2.4 on 2019-11-18 19:06
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dojo', '0021_auto_20191102_0956'),
]
operations = [
migrations.AlterField(
model_name='finding',
name='cve',
field=models.TextField(max_length=28, null=True, validators=[django.core.validators.RegexValidator(message="Vulnerability ID must be entered in the format: 'ABC-9999-9999'. ", regex='^[A-Z]{1,10}-\\d{4}-\\d{4,12}$')]),
),
migrations.AlterField(
model_name='finding_template',
name='cve',
field=models.TextField(max_length=28, null=True, validators=[django.core.validators.RegexValidator(message="Vulnerability ID must be entered in the format: 'ABC-9999-9999'. ", regex='^[A-Z]{1,10}-\\d{4}-\\d{4,12}$')]),
),
]
|
|
f4b5ccd3fdacad36cb124245591ea5bfaf4de12e
|
test/option--.py
|
test/option--.py
|
#!/usr/bin/env python
__revision__ = "test/option-n.py __REVISION__ __DATE__ __DEVELOPER__"
import TestCmd
import os.path
import string
import sys
test = TestCmd.TestCmd(program = 'scons.py',
workdir = '',
interpreter = 'python')
test.write('build.py', r"""
import sys
file = open(sys.argv[1], 'w')
file.write("build.py: %s\n" % sys.argv[1])
file.close()
""")
test.write('SConstruct', """
MyBuild = Builder(name = "MyBuild",
action = "python build.py %(target)s")
env = Environment(BUILDERS = [MyBuild])
env.MyBuild(target = '-f1.out', source = 'f1.in')
env.MyBuild(target = '-f2.out', source = 'f2.in')
""")
expect = "python build.py -f1.out\npython build.py -f2.out\n"
test.run(chdir = '.', arguments = '-- -f1.out -f2.out')
test.fail_test(test.stdout() != expect)
test.fail_test(test.stderr() != "")
test.fail_test(not os.path.exists(test.workpath('-f1.out')))
test.fail_test(not os.path.exists(test.workpath('-f2.out')))
test.pass_test()
|
Add a test for -- terminating option processing.
|
Add a test for -- terminating option processing.
|
Python
|
mit
|
andrewyoung1991/scons,andrewyoung1991/scons,timj/scons,andrewyoung1991/scons,timj/scons,andrewyoung1991/scons,andrewyoung1991/scons,timj/scons,andrewyoung1991/scons,Distrotech/scons,Distrotech/scons,andrewyoung1991/scons,timj/scons,timj/scons,andrewyoung1991/scons,timj/scons,Distrotech/scons,timj/scons,Distrotech/scons,timj/scons,andrewyoung1991/scons,timj/scons,Distrotech/scons
|
Add a test for -- terminating option processing.
|
#!/usr/bin/env python
__revision__ = "test/option-n.py __REVISION__ __DATE__ __DEVELOPER__"
import TestCmd
import os.path
import string
import sys
test = TestCmd.TestCmd(program = 'scons.py',
workdir = '',
interpreter = 'python')
test.write('build.py', r"""
import sys
file = open(sys.argv[1], 'w')
file.write("build.py: %s\n" % sys.argv[1])
file.close()
""")
test.write('SConstruct', """
MyBuild = Builder(name = "MyBuild",
action = "python build.py %(target)s")
env = Environment(BUILDERS = [MyBuild])
env.MyBuild(target = '-f1.out', source = 'f1.in')
env.MyBuild(target = '-f2.out', source = 'f2.in')
""")
expect = "python build.py -f1.out\npython build.py -f2.out\n"
test.run(chdir = '.', arguments = '-- -f1.out -f2.out')
test.fail_test(test.stdout() != expect)
test.fail_test(test.stderr() != "")
test.fail_test(not os.path.exists(test.workpath('-f1.out')))
test.fail_test(not os.path.exists(test.workpath('-f2.out')))
test.pass_test()
|
<commit_before><commit_msg>Add a test for -- terminating option processing.<commit_after>
|
#!/usr/bin/env python
__revision__ = "test/option-n.py __REVISION__ __DATE__ __DEVELOPER__"
import TestCmd
import os.path
import string
import sys
test = TestCmd.TestCmd(program = 'scons.py',
workdir = '',
interpreter = 'python')
test.write('build.py', r"""
import sys
file = open(sys.argv[1], 'w')
file.write("build.py: %s\n" % sys.argv[1])
file.close()
""")
test.write('SConstruct', """
MyBuild = Builder(name = "MyBuild",
action = "python build.py %(target)s")
env = Environment(BUILDERS = [MyBuild])
env.MyBuild(target = '-f1.out', source = 'f1.in')
env.MyBuild(target = '-f2.out', source = 'f2.in')
""")
expect = "python build.py -f1.out\npython build.py -f2.out\n"
test.run(chdir = '.', arguments = '-- -f1.out -f2.out')
test.fail_test(test.stdout() != expect)
test.fail_test(test.stderr() != "")
test.fail_test(not os.path.exists(test.workpath('-f1.out')))
test.fail_test(not os.path.exists(test.workpath('-f2.out')))
test.pass_test()
|
Add a test for -- terminating option processing.#!/usr/bin/env python
__revision__ = "test/option-n.py __REVISION__ __DATE__ __DEVELOPER__"
import TestCmd
import os.path
import string
import sys
test = TestCmd.TestCmd(program = 'scons.py',
workdir = '',
interpreter = 'python')
test.write('build.py', r"""
import sys
file = open(sys.argv[1], 'w')
file.write("build.py: %s\n" % sys.argv[1])
file.close()
""")
test.write('SConstruct', """
MyBuild = Builder(name = "MyBuild",
action = "python build.py %(target)s")
env = Environment(BUILDERS = [MyBuild])
env.MyBuild(target = '-f1.out', source = 'f1.in')
env.MyBuild(target = '-f2.out', source = 'f2.in')
""")
expect = "python build.py -f1.out\npython build.py -f2.out\n"
test.run(chdir = '.', arguments = '-- -f1.out -f2.out')
test.fail_test(test.stdout() != expect)
test.fail_test(test.stderr() != "")
test.fail_test(not os.path.exists(test.workpath('-f1.out')))
test.fail_test(not os.path.exists(test.workpath('-f2.out')))
test.pass_test()
|
<commit_before><commit_msg>Add a test for -- terminating option processing.<commit_after>#!/usr/bin/env python
__revision__ = "test/option-n.py __REVISION__ __DATE__ __DEVELOPER__"
import TestCmd
import os.path
import string
import sys
test = TestCmd.TestCmd(program = 'scons.py',
workdir = '',
interpreter = 'python')
test.write('build.py', r"""
import sys
file = open(sys.argv[1], 'w')
file.write("build.py: %s\n" % sys.argv[1])
file.close()
""")
test.write('SConstruct', """
MyBuild = Builder(name = "MyBuild",
action = "python build.py %(target)s")
env = Environment(BUILDERS = [MyBuild])
env.MyBuild(target = '-f1.out', source = 'f1.in')
env.MyBuild(target = '-f2.out', source = 'f2.in')
""")
expect = "python build.py -f1.out\npython build.py -f2.out\n"
test.run(chdir = '.', arguments = '-- -f1.out -f2.out')
test.fail_test(test.stdout() != expect)
test.fail_test(test.stderr() != "")
test.fail_test(not os.path.exists(test.workpath('-f1.out')))
test.fail_test(not os.path.exists(test.workpath('-f2.out')))
test.pass_test()
|
|
0454b3e6463585ea3d643b0a9b5b8782d1dcb7d7
|
geocoder/mapzen_reverse.py
|
geocoder/mapzen_reverse.py
|
#!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
from geocoder.base import Base
from geocoder.mapzen import Mapzen
from geocoder.location import Location
class MapzenReverse(Base):
"""
Mapzen REST API
=======================
API Reference
-------------
https://pelias.mapzen.com/
"""
provider = 'mapzen'
method = 'reverse'
def __init__(self, location, **kwargs):
t = str(Location(location)).split(",")
print t
self.url = 'https://pelias.mapzen.com/reverse'
self.params = {
'lat': t[0],
'lon': t[1],
'size': 1,
}
self._initialize(**kwargs)
def _exceptions(self): # Seems to always return results, ie: Location: Earth
self._build_tree(self.parse['features'][0]['geometry'])
self._build_tree(self.parse['features'][0]['properties'])
self._build_tree(self.parse['features'][0])
@property
def lat(self):
return self.parse['coordinates'][1]
@property
def lng(self):
return self.parse['coordinates'][0]
@property
def address(self):
return self.parse['properties'].get('text')
@property
def country(self):
return self.parse['properties'].get('alpha3')
@property
def state(self):
return self.parse['properties'].get('admin1')
@property
def city(self):
return self.parse['properties'].get('admin2')
@property
def street(self):
return self.parse['address'].get('street')
@property
def housenumber(self):
return self.parse['address'].get('number')
if __name__ == '__main__':
g = MapzenReverse([45.4049053, -75.7077965])
g.debug()
g = MapzenReverse([45.4049053, -150.7077965])
g.debug()
|
Add support for reverse geocoding with Mapzen
|
Add support for reverse geocoding with Mapzen
|
Python
|
mit
|
akittas/geocoder,DenisCarriere/geocoder
|
Add support for reverse geocoding with Mapzen
|
#!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
from geocoder.base import Base
from geocoder.mapzen import Mapzen
from geocoder.location import Location
class MapzenReverse(Base):
"""
Mapzen REST API
=======================
API Reference
-------------
https://pelias.mapzen.com/
"""
provider = 'mapzen'
method = 'reverse'
def __init__(self, location, **kwargs):
t = str(Location(location)).split(",")
print t
self.url = 'https://pelias.mapzen.com/reverse'
self.params = {
'lat': t[0],
'lon': t[1],
'size': 1,
}
self._initialize(**kwargs)
def _exceptions(self): # Seems to always return results, ie: Location: Earth
self._build_tree(self.parse['features'][0]['geometry'])
self._build_tree(self.parse['features'][0]['properties'])
self._build_tree(self.parse['features'][0])
@property
def lat(self):
return self.parse['coordinates'][1]
@property
def lng(self):
return self.parse['coordinates'][0]
@property
def address(self):
return self.parse['properties'].get('text')
@property
def country(self):
return self.parse['properties'].get('alpha3')
@property
def state(self):
return self.parse['properties'].get('admin1')
@property
def city(self):
return self.parse['properties'].get('admin2')
@property
def street(self):
return self.parse['address'].get('street')
@property
def housenumber(self):
return self.parse['address'].get('number')
if __name__ == '__main__':
g = MapzenReverse([45.4049053, -75.7077965])
g.debug()
g = MapzenReverse([45.4049053, -150.7077965])
g.debug()
|
<commit_before><commit_msg>Add support for reverse geocoding with Mapzen<commit_after>
|
#!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
from geocoder.base import Base
from geocoder.mapzen import Mapzen
from geocoder.location import Location
class MapzenReverse(Base):
"""
Mapzen REST API
=======================
API Reference
-------------
https://pelias.mapzen.com/
"""
provider = 'mapzen'
method = 'reverse'
def __init__(self, location, **kwargs):
t = str(Location(location)).split(",")
print t
self.url = 'https://pelias.mapzen.com/reverse'
self.params = {
'lat': t[0],
'lon': t[1],
'size': 1,
}
self._initialize(**kwargs)
def _exceptions(self): # Seems to always return results, ie: Location: Earth
self._build_tree(self.parse['features'][0]['geometry'])
self._build_tree(self.parse['features'][0]['properties'])
self._build_tree(self.parse['features'][0])
@property
def lat(self):
return self.parse['coordinates'][1]
@property
def lng(self):
return self.parse['coordinates'][0]
@property
def address(self):
return self.parse['properties'].get('text')
@property
def country(self):
return self.parse['properties'].get('alpha3')
@property
def state(self):
return self.parse['properties'].get('admin1')
@property
def city(self):
return self.parse['properties'].get('admin2')
@property
def street(self):
return self.parse['address'].get('street')
@property
def housenumber(self):
return self.parse['address'].get('number')
if __name__ == '__main__':
g = MapzenReverse([45.4049053, -75.7077965])
g.debug()
g = MapzenReverse([45.4049053, -150.7077965])
g.debug()
|
Add support for reverse geocoding with Mapzen#!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
from geocoder.base import Base
from geocoder.mapzen import Mapzen
from geocoder.location import Location
class MapzenReverse(Base):
"""
Mapzen REST API
=======================
API Reference
-------------
https://pelias.mapzen.com/
"""
provider = 'mapzen'
method = 'reverse'
def __init__(self, location, **kwargs):
t = str(Location(location)).split(",")
print t
self.url = 'https://pelias.mapzen.com/reverse'
self.params = {
'lat': t[0],
'lon': t[1],
'size': 1,
}
self._initialize(**kwargs)
def _exceptions(self): # Seems to always return results, ie: Location: Earth
self._build_tree(self.parse['features'][0]['geometry'])
self._build_tree(self.parse['features'][0]['properties'])
self._build_tree(self.parse['features'][0])
@property
def lat(self):
return self.parse['coordinates'][1]
@property
def lng(self):
return self.parse['coordinates'][0]
@property
def address(self):
return self.parse['properties'].get('text')
@property
def country(self):
return self.parse['properties'].get('alpha3')
@property
def state(self):
return self.parse['properties'].get('admin1')
@property
def city(self):
return self.parse['properties'].get('admin2')
@property
def street(self):
return self.parse['address'].get('street')
@property
def housenumber(self):
return self.parse['address'].get('number')
if __name__ == '__main__':
g = MapzenReverse([45.4049053, -75.7077965])
g.debug()
g = MapzenReverse([45.4049053, -150.7077965])
g.debug()
|
<commit_before><commit_msg>Add support for reverse geocoding with Mapzen<commit_after>#!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
from geocoder.base import Base
from geocoder.mapzen import Mapzen
from geocoder.location import Location
class MapzenReverse(Base):
"""
Mapzen REST API
=======================
API Reference
-------------
https://pelias.mapzen.com/
"""
provider = 'mapzen'
method = 'reverse'
def __init__(self, location, **kwargs):
t = str(Location(location)).split(",")
print t
self.url = 'https://pelias.mapzen.com/reverse'
self.params = {
'lat': t[0],
'lon': t[1],
'size': 1,
}
self._initialize(**kwargs)
def _exceptions(self): # Seems to always return results, ie: Location: Earth
self._build_tree(self.parse['features'][0]['geometry'])
self._build_tree(self.parse['features'][0]['properties'])
self._build_tree(self.parse['features'][0])
@property
def lat(self):
return self.parse['coordinates'][1]
@property
def lng(self):
return self.parse['coordinates'][0]
@property
def address(self):
return self.parse['properties'].get('text')
@property
def country(self):
return self.parse['properties'].get('alpha3')
@property
def state(self):
return self.parse['properties'].get('admin1')
@property
def city(self):
return self.parse['properties'].get('admin2')
@property
def street(self):
return self.parse['address'].get('street')
@property
def housenumber(self):
return self.parse['address'].get('number')
if __name__ == '__main__':
g = MapzenReverse([45.4049053, -75.7077965])
g.debug()
g = MapzenReverse([45.4049053, -150.7077965])
g.debug()
|
|
78a5dce20f5a1347f9d4e92143d2efbb8e52e61c
|
bmi_ilamb/tests/test_config.py
|
bmi_ilamb/tests/test_config.py
|
import os
from nose.tools import (raises, assert_equal, assert_is,
assert_is_instance, assert_is_none)
from ..config import Configuration
from .. import data_dir
bmi_ilamb_config = os.path.join(data_dir, 'bmi_ilamb.yaml')
def test_configuration_instantiates():
x = Configuration()
assert_is_instance(x, Configuration)
@raises(TypeError)
def test_load_fails_with_no_argument():
x = Configuration()
x.load()
@raises(IOError)
def test_load_fails_with_nonexistent_file():
x = Configuration()
x.load('foo.txt')
def test_load():
x = Configuration()
x.load(bmi_ilamb_config)
def test_get_ilamb_root_returns_none_before_load():
x = Configuration()
r = x.get_ilamb_root()
assert_is_none(r)
def test_get_ilamb_root():
x = Configuration()
x.load(bmi_ilamb_config)
r = x.get_ilamb_root()
assert_is(type(r), str)
def test_get_arguments_returns_list_before_load():
x = Configuration()
r = x.get_arguments()
assert_is(type(r), list)
def test_get_arguments_omits_ilamb_root():
x = Configuration()
x.load(bmi_ilamb_config)
r = x.get_arguments()
assert_equal(r.count('ilamb_root'), 0)
def test_get_arguments():
x = Configuration()
x.load(bmi_ilamb_config)
r = x.get_arguments()
assert_equal(len(r), 6)
|
Add unit tests for Configuration class
|
Add unit tests for Configuration class
|
Python
|
mit
|
permamodel/bmi-ilamb
|
Add unit tests for Configuration class
|
import os
from nose.tools import (raises, assert_equal, assert_is,
assert_is_instance, assert_is_none)
from ..config import Configuration
from .. import data_dir
bmi_ilamb_config = os.path.join(data_dir, 'bmi_ilamb.yaml')
def test_configuration_instantiates():
x = Configuration()
assert_is_instance(x, Configuration)
@raises(TypeError)
def test_load_fails_with_no_argument():
x = Configuration()
x.load()
@raises(IOError)
def test_load_fails_with_nonexistent_file():
x = Configuration()
x.load('foo.txt')
def test_load():
x = Configuration()
x.load(bmi_ilamb_config)
def test_get_ilamb_root_returns_none_before_load():
x = Configuration()
r = x.get_ilamb_root()
assert_is_none(r)
def test_get_ilamb_root():
x = Configuration()
x.load(bmi_ilamb_config)
r = x.get_ilamb_root()
assert_is(type(r), str)
def test_get_arguments_returns_list_before_load():
x = Configuration()
r = x.get_arguments()
assert_is(type(r), list)
def test_get_arguments_omits_ilamb_root():
x = Configuration()
x.load(bmi_ilamb_config)
r = x.get_arguments()
assert_equal(r.count('ilamb_root'), 0)
def test_get_arguments():
x = Configuration()
x.load(bmi_ilamb_config)
r = x.get_arguments()
assert_equal(len(r), 6)
|
<commit_before><commit_msg>Add unit tests for Configuration class<commit_after>
|
import os
from nose.tools import (raises, assert_equal, assert_is,
assert_is_instance, assert_is_none)
from ..config import Configuration
from .. import data_dir
bmi_ilamb_config = os.path.join(data_dir, 'bmi_ilamb.yaml')
def test_configuration_instantiates():
x = Configuration()
assert_is_instance(x, Configuration)
@raises(TypeError)
def test_load_fails_with_no_argument():
x = Configuration()
x.load()
@raises(IOError)
def test_load_fails_with_nonexistent_file():
x = Configuration()
x.load('foo.txt')
def test_load():
x = Configuration()
x.load(bmi_ilamb_config)
def test_get_ilamb_root_returns_none_before_load():
x = Configuration()
r = x.get_ilamb_root()
assert_is_none(r)
def test_get_ilamb_root():
x = Configuration()
x.load(bmi_ilamb_config)
r = x.get_ilamb_root()
assert_is(type(r), str)
def test_get_arguments_returns_list_before_load():
x = Configuration()
r = x.get_arguments()
assert_is(type(r), list)
def test_get_arguments_omits_ilamb_root():
x = Configuration()
x.load(bmi_ilamb_config)
r = x.get_arguments()
assert_equal(r.count('ilamb_root'), 0)
def test_get_arguments():
x = Configuration()
x.load(bmi_ilamb_config)
r = x.get_arguments()
assert_equal(len(r), 6)
|
Add unit tests for Configuration classimport os
from nose.tools import (raises, assert_equal, assert_is,
assert_is_instance, assert_is_none)
from ..config import Configuration
from .. import data_dir
bmi_ilamb_config = os.path.join(data_dir, 'bmi_ilamb.yaml')
def test_configuration_instantiates():
x = Configuration()
assert_is_instance(x, Configuration)
@raises(TypeError)
def test_load_fails_with_no_argument():
x = Configuration()
x.load()
@raises(IOError)
def test_load_fails_with_nonexistent_file():
x = Configuration()
x.load('foo.txt')
def test_load():
x = Configuration()
x.load(bmi_ilamb_config)
def test_get_ilamb_root_returns_none_before_load():
x = Configuration()
r = x.get_ilamb_root()
assert_is_none(r)
def test_get_ilamb_root():
x = Configuration()
x.load(bmi_ilamb_config)
r = x.get_ilamb_root()
assert_is(type(r), str)
def test_get_arguments_returns_list_before_load():
x = Configuration()
r = x.get_arguments()
assert_is(type(r), list)
def test_get_arguments_omits_ilamb_root():
x = Configuration()
x.load(bmi_ilamb_config)
r = x.get_arguments()
assert_equal(r.count('ilamb_root'), 0)
def test_get_arguments():
x = Configuration()
x.load(bmi_ilamb_config)
r = x.get_arguments()
assert_equal(len(r), 6)
|
<commit_before><commit_msg>Add unit tests for Configuration class<commit_after>import os
from nose.tools import (raises, assert_equal, assert_is,
assert_is_instance, assert_is_none)
from ..config import Configuration
from .. import data_dir
bmi_ilamb_config = os.path.join(data_dir, 'bmi_ilamb.yaml')
def test_configuration_instantiates():
x = Configuration()
assert_is_instance(x, Configuration)
@raises(TypeError)
def test_load_fails_with_no_argument():
x = Configuration()
x.load()
@raises(IOError)
def test_load_fails_with_nonexistent_file():
x = Configuration()
x.load('foo.txt')
def test_load():
x = Configuration()
x.load(bmi_ilamb_config)
def test_get_ilamb_root_returns_none_before_load():
x = Configuration()
r = x.get_ilamb_root()
assert_is_none(r)
def test_get_ilamb_root():
x = Configuration()
x.load(bmi_ilamb_config)
r = x.get_ilamb_root()
assert_is(type(r), str)
def test_get_arguments_returns_list_before_load():
x = Configuration()
r = x.get_arguments()
assert_is(type(r), list)
def test_get_arguments_omits_ilamb_root():
x = Configuration()
x.load(bmi_ilamb_config)
r = x.get_arguments()
assert_equal(r.count('ilamb_root'), 0)
def test_get_arguments():
x = Configuration()
x.load(bmi_ilamb_config)
r = x.get_arguments()
assert_equal(len(r), 6)
|
|
5331cfce73d4a5410191cfaee09bbc777c2fa243
|
geojsonify.py
|
geojsonify.py
|
import json
import os
for root, dirs, files in os.walk('data'):
for name in files:
features = []
filepath = os.path.join(root, name)
with open(filepath) as f:
# check to see if the first line is already geojson-ey
first_line = f.readline()
if first_line == '{"type": "FeatureCollection", "features": [\n':
print "Skipping {} because it's already geojson-ey".format(name)
break
f.seek(0)
for line in f:
line = line.rstrip(',\n')
features.append(json.loads(line))
features = sorted(features, key=lambda f: f['properties']['id'])
with open(filepath, 'w') as f:
f.write('{"type": "FeatureCollection", "features": [\n')
for feature in features:
f.write(json.dumps(feature) + ',\n')
f.seek(f.tell() - 2) # Chop off the last comma and newline
f.write('\n]}\n')
|
Add a script that makes the output of the scraper be valid GeoJSON
|
Add a script that makes the output of the scraper be valid GeoJSON
|
Python
|
mit
|
iandees/postboxes
|
Add a script that makes the output of the scraper be valid GeoJSON
|
import json
import os
for root, dirs, files in os.walk('data'):
for name in files:
features = []
filepath = os.path.join(root, name)
with open(filepath) as f:
# check to see if the first line is already geojson-ey
first_line = f.readline()
if first_line == '{"type": "FeatureCollection", "features": [\n':
print "Skipping {} because it's already geojson-ey".format(name)
break
f.seek(0)
for line in f:
line = line.rstrip(',\n')
features.append(json.loads(line))
features = sorted(features, key=lambda f: f['properties']['id'])
with open(filepath, 'w') as f:
f.write('{"type": "FeatureCollection", "features": [\n')
for feature in features:
f.write(json.dumps(feature) + ',\n')
f.seek(f.tell() - 2) # Chop off the last comma and newline
f.write('\n]}\n')
|
<commit_before><commit_msg>Add a script that makes the output of the scraper be valid GeoJSON<commit_after>
|
import json
import os
for root, dirs, files in os.walk('data'):
for name in files:
features = []
filepath = os.path.join(root, name)
with open(filepath) as f:
# check to see if the first line is already geojson-ey
first_line = f.readline()
if first_line == '{"type": "FeatureCollection", "features": [\n':
print "Skipping {} because it's already geojson-ey".format(name)
break
f.seek(0)
for line in f:
line = line.rstrip(',\n')
features.append(json.loads(line))
features = sorted(features, key=lambda f: f['properties']['id'])
with open(filepath, 'w') as f:
f.write('{"type": "FeatureCollection", "features": [\n')
for feature in features:
f.write(json.dumps(feature) + ',\n')
f.seek(f.tell() - 2) # Chop off the last comma and newline
f.write('\n]}\n')
|
Add a script that makes the output of the scraper be valid GeoJSONimport json
import os
for root, dirs, files in os.walk('data'):
for name in files:
features = []
filepath = os.path.join(root, name)
with open(filepath) as f:
# check to see if the first line is already geojson-ey
first_line = f.readline()
if first_line == '{"type": "FeatureCollection", "features": [\n':
print "Skipping {} because it's already geojson-ey".format(name)
break
f.seek(0)
for line in f:
line = line.rstrip(',\n')
features.append(json.loads(line))
features = sorted(features, key=lambda f: f['properties']['id'])
with open(filepath, 'w') as f:
f.write('{"type": "FeatureCollection", "features": [\n')
for feature in features:
f.write(json.dumps(feature) + ',\n')
f.seek(f.tell() - 2) # Chop off the last comma and newline
f.write('\n]}\n')
|
<commit_before><commit_msg>Add a script that makes the output of the scraper be valid GeoJSON<commit_after>import json
import os
for root, dirs, files in os.walk('data'):
for name in files:
features = []
filepath = os.path.join(root, name)
with open(filepath) as f:
# check to see if the first line is already geojson-ey
first_line = f.readline()
if first_line == '{"type": "FeatureCollection", "features": [\n':
print "Skipping {} because it's already geojson-ey".format(name)
break
f.seek(0)
for line in f:
line = line.rstrip(',\n')
features.append(json.loads(line))
features = sorted(features, key=lambda f: f['properties']['id'])
with open(filepath, 'w') as f:
f.write('{"type": "FeatureCollection", "features": [\n')
for feature in features:
f.write(json.dumps(feature) + ',\n')
f.seek(f.tell() - 2) # Chop off the last comma and newline
f.write('\n]}\n')
|
|
adfb3e433d0ce226aaa657c5f2ef0dc5b4d0fe2f
|
indra/sources/trips/drum_reader.py
|
indra/sources/trips/drum_reader.py
|
import sys
import random
from indra.sources import trips
from kqml import KQMLModule, KQMLPerformative, KQMLList
class DrumReader(KQMLModule):
def __init__(self, **kwargs):
super(DrumReader, self).__init__(**kwargs)
self.msg_counter = random.randint(1, 100000)
self.ready()
self.extractions = None
self.read_text('MEK phosphorylates ERK1.')
self.read_text('BRAF phosphorylates MEK1.')
def read_text(self, text):
msg_id = 'RT000%s' % self.msg_counter
kqml_perf = _get_perf(text, msg_id)
self.send(kqml_perf)
self.msg_counter += 1
def receive_reply(self, msg, content):
extractions = content.gets(':extractions')
self.extractions = extractions
tp = trips.process_xml(self.extractions)
print(tp.statements)
def _get_perf(text, msg_id):
text = text.encode('utf-8')
msg = KQMLPerformative('REQUEST')
msg.set('receiver', 'DRUM')
content = KQMLList('run-text')
content.sets('text', text)
msg.set('content', content)
msg.set('reply-with', msg_id)
return msg
if __name__ == '__main__':
# NOTE: drum/bin/trips-drum needs to be running
dr = DrumReader(name='DrumReader')
dr.start()
|
Implement basic DRUM reader agent
|
Implement basic DRUM reader agent
|
Python
|
bsd-2-clause
|
sorgerlab/indra,johnbachman/belpy,sorgerlab/belpy,pvtodorov/indra,johnbachman/indra,bgyori/indra,johnbachman/indra,sorgerlab/belpy,johnbachman/belpy,pvtodorov/indra,sorgerlab/indra,bgyori/indra,johnbachman/indra,pvtodorov/indra,pvtodorov/indra,bgyori/indra,sorgerlab/indra,sorgerlab/belpy,johnbachman/belpy
|
Implement basic DRUM reader agent
|
import sys
import random
from indra.sources import trips
from kqml import KQMLModule, KQMLPerformative, KQMLList
class DrumReader(KQMLModule):
def __init__(self, **kwargs):
super(DrumReader, self).__init__(**kwargs)
self.msg_counter = random.randint(1, 100000)
self.ready()
self.extractions = None
self.read_text('MEK phosphorylates ERK1.')
self.read_text('BRAF phosphorylates MEK1.')
def read_text(self, text):
msg_id = 'RT000%s' % self.msg_counter
kqml_perf = _get_perf(text, msg_id)
self.send(kqml_perf)
self.msg_counter += 1
def receive_reply(self, msg, content):
extractions = content.gets(':extractions')
self.extractions = extractions
tp = trips.process_xml(self.extractions)
print(tp.statements)
def _get_perf(text, msg_id):
text = text.encode('utf-8')
msg = KQMLPerformative('REQUEST')
msg.set('receiver', 'DRUM')
content = KQMLList('run-text')
content.sets('text', text)
msg.set('content', content)
msg.set('reply-with', msg_id)
return msg
if __name__ == '__main__':
# NOTE: drum/bin/trips-drum needs to be running
dr = DrumReader(name='DrumReader')
dr.start()
|
<commit_before><commit_msg>Implement basic DRUM reader agent<commit_after>
|
import sys
import random
from indra.sources import trips
from kqml import KQMLModule, KQMLPerformative, KQMLList
class DrumReader(KQMLModule):
def __init__(self, **kwargs):
super(DrumReader, self).__init__(**kwargs)
self.msg_counter = random.randint(1, 100000)
self.ready()
self.extractions = None
self.read_text('MEK phosphorylates ERK1.')
self.read_text('BRAF phosphorylates MEK1.')
def read_text(self, text):
msg_id = 'RT000%s' % self.msg_counter
kqml_perf = _get_perf(text, msg_id)
self.send(kqml_perf)
self.msg_counter += 1
def receive_reply(self, msg, content):
extractions = content.gets(':extractions')
self.extractions = extractions
tp = trips.process_xml(self.extractions)
print(tp.statements)
def _get_perf(text, msg_id):
text = text.encode('utf-8')
msg = KQMLPerformative('REQUEST')
msg.set('receiver', 'DRUM')
content = KQMLList('run-text')
content.sets('text', text)
msg.set('content', content)
msg.set('reply-with', msg_id)
return msg
if __name__ == '__main__':
# NOTE: drum/bin/trips-drum needs to be running
dr = DrumReader(name='DrumReader')
dr.start()
|
Implement basic DRUM reader agentimport sys
import random
from indra.sources import trips
from kqml import KQMLModule, KQMLPerformative, KQMLList
class DrumReader(KQMLModule):
def __init__(self, **kwargs):
super(DrumReader, self).__init__(**kwargs)
self.msg_counter = random.randint(1, 100000)
self.ready()
self.extractions = None
self.read_text('MEK phosphorylates ERK1.')
self.read_text('BRAF phosphorylates MEK1.')
def read_text(self, text):
msg_id = 'RT000%s' % self.msg_counter
kqml_perf = _get_perf(text, msg_id)
self.send(kqml_perf)
self.msg_counter += 1
def receive_reply(self, msg, content):
extractions = content.gets(':extractions')
self.extractions = extractions
tp = trips.process_xml(self.extractions)
print(tp.statements)
def _get_perf(text, msg_id):
text = text.encode('utf-8')
msg = KQMLPerformative('REQUEST')
msg.set('receiver', 'DRUM')
content = KQMLList('run-text')
content.sets('text', text)
msg.set('content', content)
msg.set('reply-with', msg_id)
return msg
if __name__ == '__main__':
# NOTE: drum/bin/trips-drum needs to be running
dr = DrumReader(name='DrumReader')
dr.start()
|
<commit_before><commit_msg>Implement basic DRUM reader agent<commit_after>import sys
import random
from indra.sources import trips
from kqml import KQMLModule, KQMLPerformative, KQMLList
class DrumReader(KQMLModule):
def __init__(self, **kwargs):
super(DrumReader, self).__init__(**kwargs)
self.msg_counter = random.randint(1, 100000)
self.ready()
self.extractions = None
self.read_text('MEK phosphorylates ERK1.')
self.read_text('BRAF phosphorylates MEK1.')
def read_text(self, text):
msg_id = 'RT000%s' % self.msg_counter
kqml_perf = _get_perf(text, msg_id)
self.send(kqml_perf)
self.msg_counter += 1
def receive_reply(self, msg, content):
extractions = content.gets(':extractions')
self.extractions = extractions
tp = trips.process_xml(self.extractions)
print(tp.statements)
def _get_perf(text, msg_id):
text = text.encode('utf-8')
msg = KQMLPerformative('REQUEST')
msg.set('receiver', 'DRUM')
content = KQMLList('run-text')
content.sets('text', text)
msg.set('content', content)
msg.set('reply-with', msg_id)
return msg
if __name__ == '__main__':
# NOTE: drum/bin/trips-drum needs to be running
dr = DrumReader(name='DrumReader')
dr.start()
|
|
7dbdae4cbf8e4d78f84c2b8163cd62c7935d3890
|
bandicoot/tests/generate_regressions.py
|
bandicoot/tests/generate_regressions.py
|
import bandicoot as bc
from os.path import dirname, abspath, join
if __name__ == '__main__':
empty_user = bc.User()
empty_user.attributes['empty'] = True
empty_path = join(dirname(abspath(__file__)), 'samples/empty_user.json')
bc.io.to_json(bc.utils.all(empty_user, summary='extended', flatten=True), empty_path)
sample_user = bc.tests.generate_user.sample_user()
sample_path = join(dirname(abspath(__file__)), 'samples/sample_user_all_metrics.json')
bc.io.to_json(bc.utils.all(sample_user, summary='extended', groupby=None, flatten=True), sample_path)
|
Add a simple command to generate automatic regressions
|
Add a simple command to generate automatic regressions
|
Python
|
mit
|
ulfaslak/bandicoot,yvesalexandre/bandicoot,econandrew/bandicoot,econandrew/bandicoot,yvesalexandre/bandicoot,econandrew/bandicoot,ulfaslak/bandicoot,yvesalexandre/bandicoot,ulfaslak/bandicoot
|
Add a simple command to generate automatic regressions
|
import bandicoot as bc
from os.path import dirname, abspath, join
if __name__ == '__main__':
empty_user = bc.User()
empty_user.attributes['empty'] = True
empty_path = join(dirname(abspath(__file__)), 'samples/empty_user.json')
bc.io.to_json(bc.utils.all(empty_user, summary='extended', flatten=True), empty_path)
sample_user = bc.tests.generate_user.sample_user()
sample_path = join(dirname(abspath(__file__)), 'samples/sample_user_all_metrics.json')
bc.io.to_json(bc.utils.all(sample_user, summary='extended', groupby=None, flatten=True), sample_path)
|
<commit_before><commit_msg>Add a simple command to generate automatic regressions<commit_after>
|
import bandicoot as bc
from os.path import dirname, abspath, join
if __name__ == '__main__':
empty_user = bc.User()
empty_user.attributes['empty'] = True
empty_path = join(dirname(abspath(__file__)), 'samples/empty_user.json')
bc.io.to_json(bc.utils.all(empty_user, summary='extended', flatten=True), empty_path)
sample_user = bc.tests.generate_user.sample_user()
sample_path = join(dirname(abspath(__file__)), 'samples/sample_user_all_metrics.json')
bc.io.to_json(bc.utils.all(sample_user, summary='extended', groupby=None, flatten=True), sample_path)
|
Add a simple command to generate automatic regressionsimport bandicoot as bc
from os.path import dirname, abspath, join
if __name__ == '__main__':
empty_user = bc.User()
empty_user.attributes['empty'] = True
empty_path = join(dirname(abspath(__file__)), 'samples/empty_user.json')
bc.io.to_json(bc.utils.all(empty_user, summary='extended', flatten=True), empty_path)
sample_user = bc.tests.generate_user.sample_user()
sample_path = join(dirname(abspath(__file__)), 'samples/sample_user_all_metrics.json')
bc.io.to_json(bc.utils.all(sample_user, summary='extended', groupby=None, flatten=True), sample_path)
|
<commit_before><commit_msg>Add a simple command to generate automatic regressions<commit_after>import bandicoot as bc
from os.path import dirname, abspath, join
if __name__ == '__main__':
empty_user = bc.User()
empty_user.attributes['empty'] = True
empty_path = join(dirname(abspath(__file__)), 'samples/empty_user.json')
bc.io.to_json(bc.utils.all(empty_user, summary='extended', flatten=True), empty_path)
sample_user = bc.tests.generate_user.sample_user()
sample_path = join(dirname(abspath(__file__)), 'samples/sample_user_all_metrics.json')
bc.io.to_json(bc.utils.all(sample_user, summary='extended', groupby=None, flatten=True), sample_path)
|
|
e59a870a1e039e12da2097401f925146ecc1a5fb
|
tests/modules/test_memory.py
|
tests/modules/test_memory.py
|
# pylint: disable=C0103,C0111
import mock
import unittest
import tests.mocks as mocks
from bumblebee.input import LEFT_MOUSE
from bumblebee.modules.memory import Module
class VirtualMemory(object):
def __init__(self, percent):
self.percent = percent
class TestMemoryModule(unittest.TestCase):
def setUp(self):
mocks.setup_test(self, Module)
self._psutil = mock.patch("bumblebee.modules.memory.psutil")
self.psutil = self._psutil.start()
def tearDown(self):
self._psutil.stop()
mocks.teardown_test(self)
def test_leftclick(self):
mocks.mouseEvent(stdin=self.stdin, button=LEFT_MOUSE, inp=self.input, module=self.module)
self.popen.assert_call("gnome-system-monitor")
def test_warning(self):
self.config.set("memory.critical", "80")
self.config.set("memory.warning", "70")
self.psutil.virtual_memory.return_value = VirtualMemory(75)
self.module.update_all()
self.assertTrue("warning" in self.module.state(self.anyWidget))
def test_critical(self):
self.config.set("memory.critical", "80")
self.config.set("memory.warning", "70")
self.psutil.virtual_memory.return_value = VirtualMemory(81)
self.module.update_all()
self.assertTrue("critical" in self.module.state(self.anyWidget))
def test_usage(self):
rv = VirtualMemory(50)
rv.total = 1000
rv.available = 500
self.psutil.virtual_memory.return_value = rv
self.module.update_all()
self.assertEquals("500.00B/1000.00B (50.00%)", self.module.memory_usage(self.anyWidget))
self.assertEquals(None, self.module.state(self.anyWidget))
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
Add unit tests for memory module
|
[tests] Add unit tests for memory module
|
Python
|
mit
|
tobi-wan-kenobi/bumblebee-status,tobi-wan-kenobi/bumblebee-status
|
[tests] Add unit tests for memory module
|
# pylint: disable=C0103,C0111
import mock
import unittest
import tests.mocks as mocks
from bumblebee.input import LEFT_MOUSE
from bumblebee.modules.memory import Module
class VirtualMemory(object):
def __init__(self, percent):
self.percent = percent
class TestMemoryModule(unittest.TestCase):
def setUp(self):
mocks.setup_test(self, Module)
self._psutil = mock.patch("bumblebee.modules.memory.psutil")
self.psutil = self._psutil.start()
def tearDown(self):
self._psutil.stop()
mocks.teardown_test(self)
def test_leftclick(self):
mocks.mouseEvent(stdin=self.stdin, button=LEFT_MOUSE, inp=self.input, module=self.module)
self.popen.assert_call("gnome-system-monitor")
def test_warning(self):
self.config.set("memory.critical", "80")
self.config.set("memory.warning", "70")
self.psutil.virtual_memory.return_value = VirtualMemory(75)
self.module.update_all()
self.assertTrue("warning" in self.module.state(self.anyWidget))
def test_critical(self):
self.config.set("memory.critical", "80")
self.config.set("memory.warning", "70")
self.psutil.virtual_memory.return_value = VirtualMemory(81)
self.module.update_all()
self.assertTrue("critical" in self.module.state(self.anyWidget))
def test_usage(self):
rv = VirtualMemory(50)
rv.total = 1000
rv.available = 500
self.psutil.virtual_memory.return_value = rv
self.module.update_all()
self.assertEquals("500.00B/1000.00B (50.00%)", self.module.memory_usage(self.anyWidget))
self.assertEquals(None, self.module.state(self.anyWidget))
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
<commit_before><commit_msg>[tests] Add unit tests for memory module<commit_after>
|
# pylint: disable=C0103,C0111
import mock
import unittest
import tests.mocks as mocks
from bumblebee.input import LEFT_MOUSE
from bumblebee.modules.memory import Module
class VirtualMemory(object):
def __init__(self, percent):
self.percent = percent
class TestMemoryModule(unittest.TestCase):
def setUp(self):
mocks.setup_test(self, Module)
self._psutil = mock.patch("bumblebee.modules.memory.psutil")
self.psutil = self._psutil.start()
def tearDown(self):
self._psutil.stop()
mocks.teardown_test(self)
def test_leftclick(self):
mocks.mouseEvent(stdin=self.stdin, button=LEFT_MOUSE, inp=self.input, module=self.module)
self.popen.assert_call("gnome-system-monitor")
def test_warning(self):
self.config.set("memory.critical", "80")
self.config.set("memory.warning", "70")
self.psutil.virtual_memory.return_value = VirtualMemory(75)
self.module.update_all()
self.assertTrue("warning" in self.module.state(self.anyWidget))
def test_critical(self):
self.config.set("memory.critical", "80")
self.config.set("memory.warning", "70")
self.psutil.virtual_memory.return_value = VirtualMemory(81)
self.module.update_all()
self.assertTrue("critical" in self.module.state(self.anyWidget))
def test_usage(self):
rv = VirtualMemory(50)
rv.total = 1000
rv.available = 500
self.psutil.virtual_memory.return_value = rv
self.module.update_all()
self.assertEquals("500.00B/1000.00B (50.00%)", self.module.memory_usage(self.anyWidget))
self.assertEquals(None, self.module.state(self.anyWidget))
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
[tests] Add unit tests for memory module# pylint: disable=C0103,C0111
import mock
import unittest
import tests.mocks as mocks
from bumblebee.input import LEFT_MOUSE
from bumblebee.modules.memory import Module
class VirtualMemory(object):
def __init__(self, percent):
self.percent = percent
class TestMemoryModule(unittest.TestCase):
def setUp(self):
mocks.setup_test(self, Module)
self._psutil = mock.patch("bumblebee.modules.memory.psutil")
self.psutil = self._psutil.start()
def tearDown(self):
self._psutil.stop()
mocks.teardown_test(self)
def test_leftclick(self):
mocks.mouseEvent(stdin=self.stdin, button=LEFT_MOUSE, inp=self.input, module=self.module)
self.popen.assert_call("gnome-system-monitor")
def test_warning(self):
self.config.set("memory.critical", "80")
self.config.set("memory.warning", "70")
self.psutil.virtual_memory.return_value = VirtualMemory(75)
self.module.update_all()
self.assertTrue("warning" in self.module.state(self.anyWidget))
def test_critical(self):
self.config.set("memory.critical", "80")
self.config.set("memory.warning", "70")
self.psutil.virtual_memory.return_value = VirtualMemory(81)
self.module.update_all()
self.assertTrue("critical" in self.module.state(self.anyWidget))
def test_usage(self):
rv = VirtualMemory(50)
rv.total = 1000
rv.available = 500
self.psutil.virtual_memory.return_value = rv
self.module.update_all()
self.assertEquals("500.00B/1000.00B (50.00%)", self.module.memory_usage(self.anyWidget))
self.assertEquals(None, self.module.state(self.anyWidget))
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
<commit_before><commit_msg>[tests] Add unit tests for memory module<commit_after># pylint: disable=C0103,C0111
import mock
import unittest
import tests.mocks as mocks
from bumblebee.input import LEFT_MOUSE
from bumblebee.modules.memory import Module
class VirtualMemory(object):
def __init__(self, percent):
self.percent = percent
class TestMemoryModule(unittest.TestCase):
def setUp(self):
mocks.setup_test(self, Module)
self._psutil = mock.patch("bumblebee.modules.memory.psutil")
self.psutil = self._psutil.start()
def tearDown(self):
self._psutil.stop()
mocks.teardown_test(self)
def test_leftclick(self):
mocks.mouseEvent(stdin=self.stdin, button=LEFT_MOUSE, inp=self.input, module=self.module)
self.popen.assert_call("gnome-system-monitor")
def test_warning(self):
self.config.set("memory.critical", "80")
self.config.set("memory.warning", "70")
self.psutil.virtual_memory.return_value = VirtualMemory(75)
self.module.update_all()
self.assertTrue("warning" in self.module.state(self.anyWidget))
def test_critical(self):
self.config.set("memory.critical", "80")
self.config.set("memory.warning", "70")
self.psutil.virtual_memory.return_value = VirtualMemory(81)
self.module.update_all()
self.assertTrue("critical" in self.module.state(self.anyWidget))
def test_usage(self):
rv = VirtualMemory(50)
rv.total = 1000
rv.available = 500
self.psutil.virtual_memory.return_value = rv
self.module.update_all()
self.assertEquals("500.00B/1000.00B (50.00%)", self.module.memory_usage(self.anyWidget))
self.assertEquals(None, self.module.state(self.anyWidget))
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
|
01aedda05cbd6cbe98afa71496c5c09465f0e4e6
|
tests/test_end_to_end.py
|
tests/test_end_to_end.py
|
#!/usr/bin/env python
# coding=utf-8
import unittest
from click.testing import CliRunner
from shub import tool
class ShubEndToEndTests(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
def run_subcmd(self, subcmd):
return self.runner.invoke(tool.cli, [subcmd]).output
def test_usage_is_displayed_if_no_arg_is_provided(self):
output = self.run_subcmd('')
usage_is_displayed = output.startswith('Usage:')
self.assertTrue(usage_is_displayed)
def test_deploy_egg_isnt_broken(self):
output = self.run_subcmd('deploy-egg')
error = 'Unexpected output: %s' % output
self.assertTrue('Missing argument' in output, error)
def test_deploy_reqs_isnt_broken(self):
output = self.run_subcmd('deploy-reqs')
error = 'Unexpected output: %s' % output
self.assertTrue('Missing argument' in output, error)
def test_deploy_isnt_broken(self):
output = self.run_subcmd('deploy')
error = 'Unexpected output: %s' % output
self.assertTrue('requires scrapy' in output, error)
def test_fetch_eggs_isnt_broken(self):
output = self.run_subcmd('fetch-eggs')
error = 'Unexpected output: %s' % output
self.assertTrue('Missing argument' in output, error)
|
Add rudimentary end to end tests
|
Add rudimentary end to end tests
Our subcommand tests bypass the click wrappers. I think it's a good idea
to have these tests just to have a little more confidence that no silly
bugs have gone unnoticed.
|
Python
|
bsd-3-clause
|
scrapinghub/shub
|
Add rudimentary end to end tests
Our subcommand tests bypass the click wrappers. I think it's a good idea
to have these tests just to have a little more confidence that no silly
bugs have gone unnoticed.
|
#!/usr/bin/env python
# coding=utf-8
import unittest
from click.testing import CliRunner
from shub import tool
class ShubEndToEndTests(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
def run_subcmd(self, subcmd):
return self.runner.invoke(tool.cli, [subcmd]).output
def test_usage_is_displayed_if_no_arg_is_provided(self):
output = self.run_subcmd('')
usage_is_displayed = output.startswith('Usage:')
self.assertTrue(usage_is_displayed)
def test_deploy_egg_isnt_broken(self):
output = self.run_subcmd('deploy-egg')
error = 'Unexpected output: %s' % output
self.assertTrue('Missing argument' in output, error)
def test_deploy_reqs_isnt_broken(self):
output = self.run_subcmd('deploy-reqs')
error = 'Unexpected output: %s' % output
self.assertTrue('Missing argument' in output, error)
def test_deploy_isnt_broken(self):
output = self.run_subcmd('deploy')
error = 'Unexpected output: %s' % output
self.assertTrue('requires scrapy' in output, error)
def test_fetch_eggs_isnt_broken(self):
output = self.run_subcmd('fetch-eggs')
error = 'Unexpected output: %s' % output
self.assertTrue('Missing argument' in output, error)
|
<commit_before><commit_msg>Add rudimentary end to end tests
Our subcommand tests bypass the click wrappers. I think it's a good idea
to have these tests just to have a little more confidence that no silly
bugs have gone unnoticed.<commit_after>
|
#!/usr/bin/env python
# coding=utf-8
import unittest
from click.testing import CliRunner
from shub import tool
class ShubEndToEndTests(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
def run_subcmd(self, subcmd):
return self.runner.invoke(tool.cli, [subcmd]).output
def test_usage_is_displayed_if_no_arg_is_provided(self):
output = self.run_subcmd('')
usage_is_displayed = output.startswith('Usage:')
self.assertTrue(usage_is_displayed)
def test_deploy_egg_isnt_broken(self):
output = self.run_subcmd('deploy-egg')
error = 'Unexpected output: %s' % output
self.assertTrue('Missing argument' in output, error)
def test_deploy_reqs_isnt_broken(self):
output = self.run_subcmd('deploy-reqs')
error = 'Unexpected output: %s' % output
self.assertTrue('Missing argument' in output, error)
def test_deploy_isnt_broken(self):
output = self.run_subcmd('deploy')
error = 'Unexpected output: %s' % output
self.assertTrue('requires scrapy' in output, error)
def test_fetch_eggs_isnt_broken(self):
output = self.run_subcmd('fetch-eggs')
error = 'Unexpected output: %s' % output
self.assertTrue('Missing argument' in output, error)
|
Add rudimentary end to end tests
Our subcommand tests bypass the click wrappers. I think it's a good idea
to have these tests just to have a little more confidence that no silly
bugs have gone unnoticed.#!/usr/bin/env python
# coding=utf-8
import unittest
from click.testing import CliRunner
from shub import tool
class ShubEndToEndTests(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
def run_subcmd(self, subcmd):
return self.runner.invoke(tool.cli, [subcmd]).output
def test_usage_is_displayed_if_no_arg_is_provided(self):
output = self.run_subcmd('')
usage_is_displayed = output.startswith('Usage:')
self.assertTrue(usage_is_displayed)
def test_deploy_egg_isnt_broken(self):
output = self.run_subcmd('deploy-egg')
error = 'Unexpected output: %s' % output
self.assertTrue('Missing argument' in output, error)
def test_deploy_reqs_isnt_broken(self):
output = self.run_subcmd('deploy-reqs')
error = 'Unexpected output: %s' % output
self.assertTrue('Missing argument' in output, error)
def test_deploy_isnt_broken(self):
output = self.run_subcmd('deploy')
error = 'Unexpected output: %s' % output
self.assertTrue('requires scrapy' in output, error)
def test_fetch_eggs_isnt_broken(self):
output = self.run_subcmd('fetch-eggs')
error = 'Unexpected output: %s' % output
self.assertTrue('Missing argument' in output, error)
|
<commit_before><commit_msg>Add rudimentary end to end tests
Our subcommand tests bypass the click wrappers. I think it's a good idea
to have these tests just to have a little more confidence that no silly
bugs have gone unnoticed.<commit_after>#!/usr/bin/env python
# coding=utf-8
import unittest
from click.testing import CliRunner
from shub import tool
class ShubEndToEndTests(unittest.TestCase):
def setUp(self):
self.runner = CliRunner()
def run_subcmd(self, subcmd):
return self.runner.invoke(tool.cli, [subcmd]).output
def test_usage_is_displayed_if_no_arg_is_provided(self):
output = self.run_subcmd('')
usage_is_displayed = output.startswith('Usage:')
self.assertTrue(usage_is_displayed)
def test_deploy_egg_isnt_broken(self):
output = self.run_subcmd('deploy-egg')
error = 'Unexpected output: %s' % output
self.assertTrue('Missing argument' in output, error)
def test_deploy_reqs_isnt_broken(self):
output = self.run_subcmd('deploy-reqs')
error = 'Unexpected output: %s' % output
self.assertTrue('Missing argument' in output, error)
def test_deploy_isnt_broken(self):
output = self.run_subcmd('deploy')
error = 'Unexpected output: %s' % output
self.assertTrue('requires scrapy' in output, error)
def test_fetch_eggs_isnt_broken(self):
output = self.run_subcmd('fetch-eggs')
error = 'Unexpected output: %s' % output
self.assertTrue('Missing argument' in output, error)
|
|
df0cf1ddbf236c6cf70563e6ddeb3b7cece80c92
|
tests/test_generation.py
|
tests/test_generation.py
|
from datetime import date
from populous.backends.base import Backend
from populous.blueprint import Blueprint
from populous.item import Item
def test_blueprint_preprocess(mocker):
blueprint = Blueprint()
blueprint.add_item({'name': 'foo', 'table': 'test'})
blueprint.add_item({'name': 'bar', 'table': 'test'})
foo = mocker.Mock(wraps=blueprint.items['foo'])
bar = mocker.Mock(wraps=blueprint.items['bar'])
blueprint.items['foo'] = foo
blueprint.items['bar'] = bar
blueprint.preprocess()
assert foo.preprocess.called is True
assert bar.preprocess.called is True
def test_item_preprocess(mocker):
existing = (
(1, 'Homer', 'Simpson', date(1956, 6, 18), 'M'),
(2, 'Marge', 'Simpson', date(1959, 6, 29), 'F'),
(3, 'Bart', 'Simpson', date(1981, 4, 1), 'M'),
(4, 'Lisa', 'Simpson', date(1984, 5, 9), 'F'),
(5, 'Maggie', 'Simpson', date(1988, 11, 7), 'F')
)
class DummyBackend(Backend):
def select(self, *args, **kwargs):
return iter(existing)
blueprint = Blueprint(backend=mocker.Mock(wraps=DummyBackend()))
item = Item(blueprint, 'person', 'test')
item.add_field('id', 'Integer', unique=True)
item.add_field('firstname', 'Text', unique=['lastname', 'birth'])
item.add_field('lastname', 'Text')
item.add_field('birth', 'Date')
item.add_field('gender', 'Choices', choices=['M', 'F'])
item.preprocess()
assert blueprint.backend.select.call_args == mocker.call(
'test', ['id', 'firstname', 'lastname', 'birth'])
assert 1 in item.fields['id'].seen
assert 5 in item.fields['id'].seen
assert 6 not in item.fields['id'].seen
seen = item.fields['firstname'].seen
assert ('Homer', 'Simpson', date(1956, 6, 18)) in seen
assert ('Lisa', 'Simpson', date(1984, 5, 9)) in seen
assert ('Bart', 'Simpson', date(2016, 10, 9)) not in seen
|
Add tests for item preprocessing
|
Add tests for item preprocessing
|
Python
|
mit
|
novafloss/populous
|
Add tests for item preprocessing
|
from datetime import date
from populous.backends.base import Backend
from populous.blueprint import Blueprint
from populous.item import Item
def test_blueprint_preprocess(mocker):
blueprint = Blueprint()
blueprint.add_item({'name': 'foo', 'table': 'test'})
blueprint.add_item({'name': 'bar', 'table': 'test'})
foo = mocker.Mock(wraps=blueprint.items['foo'])
bar = mocker.Mock(wraps=blueprint.items['bar'])
blueprint.items['foo'] = foo
blueprint.items['bar'] = bar
blueprint.preprocess()
assert foo.preprocess.called is True
assert bar.preprocess.called is True
def test_item_preprocess(mocker):
existing = (
(1, 'Homer', 'Simpson', date(1956, 6, 18), 'M'),
(2, 'Marge', 'Simpson', date(1959, 6, 29), 'F'),
(3, 'Bart', 'Simpson', date(1981, 4, 1), 'M'),
(4, 'Lisa', 'Simpson', date(1984, 5, 9), 'F'),
(5, 'Maggie', 'Simpson', date(1988, 11, 7), 'F')
)
class DummyBackend(Backend):
def select(self, *args, **kwargs):
return iter(existing)
blueprint = Blueprint(backend=mocker.Mock(wraps=DummyBackend()))
item = Item(blueprint, 'person', 'test')
item.add_field('id', 'Integer', unique=True)
item.add_field('firstname', 'Text', unique=['lastname', 'birth'])
item.add_field('lastname', 'Text')
item.add_field('birth', 'Date')
item.add_field('gender', 'Choices', choices=['M', 'F'])
item.preprocess()
assert blueprint.backend.select.call_args == mocker.call(
'test', ['id', 'firstname', 'lastname', 'birth'])
assert 1 in item.fields['id'].seen
assert 5 in item.fields['id'].seen
assert 6 not in item.fields['id'].seen
seen = item.fields['firstname'].seen
assert ('Homer', 'Simpson', date(1956, 6, 18)) in seen
assert ('Lisa', 'Simpson', date(1984, 5, 9)) in seen
assert ('Bart', 'Simpson', date(2016, 10, 9)) not in seen
|
<commit_before><commit_msg>Add tests for item preprocessing<commit_after>
|
from datetime import date
from populous.backends.base import Backend
from populous.blueprint import Blueprint
from populous.item import Item
def test_blueprint_preprocess(mocker):
blueprint = Blueprint()
blueprint.add_item({'name': 'foo', 'table': 'test'})
blueprint.add_item({'name': 'bar', 'table': 'test'})
foo = mocker.Mock(wraps=blueprint.items['foo'])
bar = mocker.Mock(wraps=blueprint.items['bar'])
blueprint.items['foo'] = foo
blueprint.items['bar'] = bar
blueprint.preprocess()
assert foo.preprocess.called is True
assert bar.preprocess.called is True
def test_item_preprocess(mocker):
existing = (
(1, 'Homer', 'Simpson', date(1956, 6, 18), 'M'),
(2, 'Marge', 'Simpson', date(1959, 6, 29), 'F'),
(3, 'Bart', 'Simpson', date(1981, 4, 1), 'M'),
(4, 'Lisa', 'Simpson', date(1984, 5, 9), 'F'),
(5, 'Maggie', 'Simpson', date(1988, 11, 7), 'F')
)
class DummyBackend(Backend):
def select(self, *args, **kwargs):
return iter(existing)
blueprint = Blueprint(backend=mocker.Mock(wraps=DummyBackend()))
item = Item(blueprint, 'person', 'test')
item.add_field('id', 'Integer', unique=True)
item.add_field('firstname', 'Text', unique=['lastname', 'birth'])
item.add_field('lastname', 'Text')
item.add_field('birth', 'Date')
item.add_field('gender', 'Choices', choices=['M', 'F'])
item.preprocess()
assert blueprint.backend.select.call_args == mocker.call(
'test', ['id', 'firstname', 'lastname', 'birth'])
assert 1 in item.fields['id'].seen
assert 5 in item.fields['id'].seen
assert 6 not in item.fields['id'].seen
seen = item.fields['firstname'].seen
assert ('Homer', 'Simpson', date(1956, 6, 18)) in seen
assert ('Lisa', 'Simpson', date(1984, 5, 9)) in seen
assert ('Bart', 'Simpson', date(2016, 10, 9)) not in seen
|
Add tests for item preprocessingfrom datetime import date
from populous.backends.base import Backend
from populous.blueprint import Blueprint
from populous.item import Item
def test_blueprint_preprocess(mocker):
blueprint = Blueprint()
blueprint.add_item({'name': 'foo', 'table': 'test'})
blueprint.add_item({'name': 'bar', 'table': 'test'})
foo = mocker.Mock(wraps=blueprint.items['foo'])
bar = mocker.Mock(wraps=blueprint.items['bar'])
blueprint.items['foo'] = foo
blueprint.items['bar'] = bar
blueprint.preprocess()
assert foo.preprocess.called is True
assert bar.preprocess.called is True
def test_item_preprocess(mocker):
existing = (
(1, 'Homer', 'Simpson', date(1956, 6, 18), 'M'),
(2, 'Marge', 'Simpson', date(1959, 6, 29), 'F'),
(3, 'Bart', 'Simpson', date(1981, 4, 1), 'M'),
(4, 'Lisa', 'Simpson', date(1984, 5, 9), 'F'),
(5, 'Maggie', 'Simpson', date(1988, 11, 7), 'F')
)
class DummyBackend(Backend):
def select(self, *args, **kwargs):
return iter(existing)
blueprint = Blueprint(backend=mocker.Mock(wraps=DummyBackend()))
item = Item(blueprint, 'person', 'test')
item.add_field('id', 'Integer', unique=True)
item.add_field('firstname', 'Text', unique=['lastname', 'birth'])
item.add_field('lastname', 'Text')
item.add_field('birth', 'Date')
item.add_field('gender', 'Choices', choices=['M', 'F'])
item.preprocess()
assert blueprint.backend.select.call_args == mocker.call(
'test', ['id', 'firstname', 'lastname', 'birth'])
assert 1 in item.fields['id'].seen
assert 5 in item.fields['id'].seen
assert 6 not in item.fields['id'].seen
seen = item.fields['firstname'].seen
assert ('Homer', 'Simpson', date(1956, 6, 18)) in seen
assert ('Lisa', 'Simpson', date(1984, 5, 9)) in seen
assert ('Bart', 'Simpson', date(2016, 10, 9)) not in seen
|
<commit_before><commit_msg>Add tests for item preprocessing<commit_after>from datetime import date
from populous.backends.base import Backend
from populous.blueprint import Blueprint
from populous.item import Item
def test_blueprint_preprocess(mocker):
blueprint = Blueprint()
blueprint.add_item({'name': 'foo', 'table': 'test'})
blueprint.add_item({'name': 'bar', 'table': 'test'})
foo = mocker.Mock(wraps=blueprint.items['foo'])
bar = mocker.Mock(wraps=blueprint.items['bar'])
blueprint.items['foo'] = foo
blueprint.items['bar'] = bar
blueprint.preprocess()
assert foo.preprocess.called is True
assert bar.preprocess.called is True
def test_item_preprocess(mocker):
existing = (
(1, 'Homer', 'Simpson', date(1956, 6, 18), 'M'),
(2, 'Marge', 'Simpson', date(1959, 6, 29), 'F'),
(3, 'Bart', 'Simpson', date(1981, 4, 1), 'M'),
(4, 'Lisa', 'Simpson', date(1984, 5, 9), 'F'),
(5, 'Maggie', 'Simpson', date(1988, 11, 7), 'F')
)
class DummyBackend(Backend):
def select(self, *args, **kwargs):
return iter(existing)
blueprint = Blueprint(backend=mocker.Mock(wraps=DummyBackend()))
item = Item(blueprint, 'person', 'test')
item.add_field('id', 'Integer', unique=True)
item.add_field('firstname', 'Text', unique=['lastname', 'birth'])
item.add_field('lastname', 'Text')
item.add_field('birth', 'Date')
item.add_field('gender', 'Choices', choices=['M', 'F'])
item.preprocess()
assert blueprint.backend.select.call_args == mocker.call(
'test', ['id', 'firstname', 'lastname', 'birth'])
assert 1 in item.fields['id'].seen
assert 5 in item.fields['id'].seen
assert 6 not in item.fields['id'].seen
seen = item.fields['firstname'].seen
assert ('Homer', 'Simpson', date(1956, 6, 18)) in seen
assert ('Lisa', 'Simpson', date(1984, 5, 9)) in seen
assert ('Bart', 'Simpson', date(2016, 10, 9)) not in seen
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.