commit
stringlengths
40
40
old_file
stringlengths
4
118
new_file
stringlengths
4
118
old_contents
stringlengths
0
2.94k
new_contents
stringlengths
1
4.43k
subject
stringlengths
15
444
message
stringlengths
16
3.45k
lang
stringclasses
1 value
license
stringclasses
13 values
repos
stringlengths
5
43.2k
prompt
stringlengths
17
4.58k
response
stringlengths
1
4.43k
prompt_tagged
stringlengths
58
4.62k
response_tagged
stringlengths
1
4.43k
text
stringlengths
132
7.29k
text_tagged
stringlengths
173
7.33k
263cd80a16f44e900d939322e3e6e1ce0cea31b7
elements/swift-proxy/check_mk_checks/swift_proxy_healthcheck.py
elements/swift-proxy/check_mk_checks/swift_proxy_healthcheck.py
#!/usr/bin/python # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # Check_mk script that checks if swift-proxy-server is responding to # http requests. # # The healthcheck middleware in the swift-proxy-server pipeline normally # responds to a GET /healthcheck with 200 status and "OK" in response body. # # Output is in the check_mk format: # # status test_name perf_data output # # For example: (using - for perf_data) # # 0 swift_proxy_healthcheck - Connection Ok import ConfigParser import requests def main(): # Read port value from conf file conf_parser = ConfigParser.ConfigParser() conf_parser.read("/etc/swift/proxy-server.conf") config = dict(conf_parser.items("DEFAULT")) port = config.get("bind_port", "8080") ip = config.get("bind_ip", "localhost") status = 2 # Assume error try: url = "http://" + ip + ":" + port + "/healthcheck" resp = requests.get(url, timeout=5.0) if resp.status_code == 200 and resp.text == "OK": msg = "Ok" status = 0 # Good status elif resp.status_code == 503 and "DISABLED" in resp.text: msg = "disabled by file" status = 1 # Warning status else: msg = "unexpected response (status: {0}".format(resp.status_code) except requests.exceptions.Timeout: msg = "timeout ({0}:{1})".format(ip, port) except requests.exceptions.ConnectionError: msg = "connection error ({0}:{1})".format(ip, port) except Exception as error: msg = "{0} ({1}:{2})".format(str(error), ip, port) print("{0} swift_proxy_healthcheck - {1}".format(status, msg)) if __name__ == '__main__': main()
Add check_mk swift proxy diagnostic
Add check_mk swift proxy diagnostic Added a check_mk script that runs healthcheck to ensure that a proxy node is up and running with proxy services. This diag is also check_mk compliant (ie. the output matches check_mk desired format). Have not added thecheck_mk-agent element to element-deps file as even though it is required to install the file itself into the build image, it can be manually added to images that require diagnostics. Change-Id: If18acfa1b5c967481f59b9f2041d8d7c37db15ca
Python
apache-2.0
rdo-management/tripleo-image-elements,radez/tripleo-image-elements,radez/tripleo-image-elements,rdo-management/tripleo-image-elements,openstack/tripleo-image-elements,openstack/tripleo-image-elements
Add check_mk swift proxy diagnostic Added a check_mk script that runs healthcheck to ensure that a proxy node is up and running with proxy services. This diag is also check_mk compliant (ie. the output matches check_mk desired format). Have not added thecheck_mk-agent element to element-deps file as even though it is required to install the file itself into the build image, it can be manually added to images that require diagnostics. Change-Id: If18acfa1b5c967481f59b9f2041d8d7c37db15ca
#!/usr/bin/python # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # Check_mk script that checks if swift-proxy-server is responding to # http requests. # # The healthcheck middleware in the swift-proxy-server pipeline normally # responds to a GET /healthcheck with 200 status and "OK" in response body. # # Output is in the check_mk format: # # status test_name perf_data output # # For example: (using - for perf_data) # # 0 swift_proxy_healthcheck - Connection Ok import ConfigParser import requests def main(): # Read port value from conf file conf_parser = ConfigParser.ConfigParser() conf_parser.read("/etc/swift/proxy-server.conf") config = dict(conf_parser.items("DEFAULT")) port = config.get("bind_port", "8080") ip = config.get("bind_ip", "localhost") status = 2 # Assume error try: url = "http://" + ip + ":" + port + "/healthcheck" resp = requests.get(url, timeout=5.0) if resp.status_code == 200 and resp.text == "OK": msg = "Ok" status = 0 # Good status elif resp.status_code == 503 and "DISABLED" in resp.text: msg = "disabled by file" status = 1 # Warning status else: msg = "unexpected response (status: {0}".format(resp.status_code) except requests.exceptions.Timeout: msg = "timeout ({0}:{1})".format(ip, port) except requests.exceptions.ConnectionError: msg = "connection error ({0}:{1})".format(ip, port) except Exception as error: msg = "{0} ({1}:{2})".format(str(error), ip, port) print("{0} swift_proxy_healthcheck - {1}".format(status, msg)) if __name__ == '__main__': main()
<commit_before><commit_msg>Add check_mk swift proxy diagnostic Added a check_mk script that runs healthcheck to ensure that a proxy node is up and running with proxy services. This diag is also check_mk compliant (ie. the output matches check_mk desired format). Have not added thecheck_mk-agent element to element-deps file as even though it is required to install the file itself into the build image, it can be manually added to images that require diagnostics. Change-Id: If18acfa1b5c967481f59b9f2041d8d7c37db15ca<commit_after>
#!/usr/bin/python # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # Check_mk script that checks if swift-proxy-server is responding to # http requests. # # The healthcheck middleware in the swift-proxy-server pipeline normally # responds to a GET /healthcheck with 200 status and "OK" in response body. # # Output is in the check_mk format: # # status test_name perf_data output # # For example: (using - for perf_data) # # 0 swift_proxy_healthcheck - Connection Ok import ConfigParser import requests def main(): # Read port value from conf file conf_parser = ConfigParser.ConfigParser() conf_parser.read("/etc/swift/proxy-server.conf") config = dict(conf_parser.items("DEFAULT")) port = config.get("bind_port", "8080") ip = config.get("bind_ip", "localhost") status = 2 # Assume error try: url = "http://" + ip + ":" + port + "/healthcheck" resp = requests.get(url, timeout=5.0) if resp.status_code == 200 and resp.text == "OK": msg = "Ok" status = 0 # Good status elif resp.status_code == 503 and "DISABLED" in resp.text: msg = "disabled by file" status = 1 # Warning status else: msg = "unexpected response (status: {0}".format(resp.status_code) except requests.exceptions.Timeout: msg = "timeout ({0}:{1})".format(ip, port) except requests.exceptions.ConnectionError: msg = "connection error ({0}:{1})".format(ip, port) except Exception as error: msg = "{0} ({1}:{2})".format(str(error), ip, port) print("{0} swift_proxy_healthcheck - {1}".format(status, msg)) if __name__ == '__main__': main()
Add check_mk swift proxy diagnostic Added a check_mk script that runs healthcheck to ensure that a proxy node is up and running with proxy services. This diag is also check_mk compliant (ie. the output matches check_mk desired format). Have not added thecheck_mk-agent element to element-deps file as even though it is required to install the file itself into the build image, it can be manually added to images that require diagnostics. Change-Id: If18acfa1b5c967481f59b9f2041d8d7c37db15ca#!/usr/bin/python # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # Check_mk script that checks if swift-proxy-server is responding to # http requests. # # The healthcheck middleware in the swift-proxy-server pipeline normally # responds to a GET /healthcheck with 200 status and "OK" in response body. # # Output is in the check_mk format: # # status test_name perf_data output # # For example: (using - for perf_data) # # 0 swift_proxy_healthcheck - Connection Ok import ConfigParser import requests def main(): # Read port value from conf file conf_parser = ConfigParser.ConfigParser() conf_parser.read("/etc/swift/proxy-server.conf") config = dict(conf_parser.items("DEFAULT")) port = config.get("bind_port", "8080") ip = config.get("bind_ip", "localhost") status = 2 # Assume error try: url = "http://" + ip + ":" + port + "/healthcheck" resp = requests.get(url, timeout=5.0) if resp.status_code == 200 and resp.text == "OK": msg = "Ok" status = 0 # Good status elif resp.status_code == 503 and "DISABLED" in resp.text: msg = "disabled by file" status = 1 # Warning status else: msg = "unexpected response (status: {0}".format(resp.status_code) except requests.exceptions.Timeout: msg = "timeout ({0}:{1})".format(ip, port) except requests.exceptions.ConnectionError: msg = "connection error ({0}:{1})".format(ip, port) except Exception as error: msg = "{0} ({1}:{2})".format(str(error), ip, port) print("{0} swift_proxy_healthcheck - {1}".format(status, msg)) if __name__ == '__main__': main()
<commit_before><commit_msg>Add check_mk swift proxy diagnostic Added a check_mk script that runs healthcheck to ensure that a proxy node is up and running with proxy services. This diag is also check_mk compliant (ie. the output matches check_mk desired format). Have not added thecheck_mk-agent element to element-deps file as even though it is required to install the file itself into the build image, it can be manually added to images that require diagnostics. Change-Id: If18acfa1b5c967481f59b9f2041d8d7c37db15ca<commit_after>#!/usr/bin/python # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # Check_mk script that checks if swift-proxy-server is responding to # http requests. # # The healthcheck middleware in the swift-proxy-server pipeline normally # responds to a GET /healthcheck with 200 status and "OK" in response body. # # Output is in the check_mk format: # # status test_name perf_data output # # For example: (using - for perf_data) # # 0 swift_proxy_healthcheck - Connection Ok import ConfigParser import requests def main(): # Read port value from conf file conf_parser = ConfigParser.ConfigParser() conf_parser.read("/etc/swift/proxy-server.conf") config = dict(conf_parser.items("DEFAULT")) port = config.get("bind_port", "8080") ip = config.get("bind_ip", "localhost") status = 2 # Assume error try: url = "http://" + ip + ":" + port + "/healthcheck" resp = requests.get(url, timeout=5.0) if resp.status_code == 200 and resp.text == "OK": msg = "Ok" status = 0 # Good status elif resp.status_code == 503 and "DISABLED" in resp.text: msg = "disabled by file" status = 1 # Warning status else: msg = "unexpected response (status: {0}".format(resp.status_code) except requests.exceptions.Timeout: msg = "timeout ({0}:{1})".format(ip, port) except requests.exceptions.ConnectionError: msg = "connection error ({0}:{1})".format(ip, port) except Exception as error: msg = "{0} ({1}:{2})".format(str(error), ip, port) print("{0} swift_proxy_healthcheck - {1}".format(status, msg)) if __name__ == '__main__': main()
d34dad32170e53f49e14611f5bfbfcb4eb7b8d4d
extra/create_rand_host.py
extra/create_rand_host.py
import requests import json import random import uuid login="test" password="P@ssword" with open("dict", 'r', encoding="latin-1") as words : a = words.readlines() headers = { 'Content-type': 'application/json', 'Accept': 'application/json' } def gen_ip(): a = random.randint(1, 254) b = random.randint(1, 254) c = random.randint(1, 254) d = random.randint(1, 254) #mask = random.randint(1, 32) ip_s = "{}.{}.{}.{}".format(a,b,c,d) return {'ip' : ip_s} def gen_interface(): name = random.choice(a).replace('\n','') n_int = random.randint(1, 2) n_ip = random.randint(1, 3) interfaces = [] for i in range(n_int): ips = [] for y in range(n_ip): ips.append(gen_ip()) mac = ':'.join(("%12x" % random.randint(0, 0xFFFFFFFFFFFF))[i:i+2] for i in range(0, 12, 2)) interfaces.append({ "ip_address": ips, "name": "{}{}".format(name,i), "mac_address": mac, #"host": null }) return interfaces while(1): name = random.choice(a).replace('\n','') #name = random.choice(a).replace('\n','' snow_id = random.randint(100000, 999999) machine_uuid = str(uuid.uuid4()) interfaces = gen_interface() data = { "interfaces": interfaces, "name": name, "snow_id": snow_id, "uuid": machine_uuid, "machine_id": machine_uuid, "local_name": name, "dns_name": "{}.foo.bar".format(name), "vm_name": name, #"geo": null, #"team": null, #"domain": null, #"hardware_type": null, #"parent_host": null, #"os_family": null, #"os_distribution": null, #"os_distribution_version": null, #"system_team": null, #"application_team": null } r = requests.post( 'http://localhost:8081/api/v1/inventory/hosts/', auth=(login, password), #headers=headers, json=data, ) print(r.text)
Add script to generate fake data
Add script to generate fake data
Python
mit
H0neyBadger/cmdb,H0neyBadger/cmdb
Add script to generate fake data
import requests import json import random import uuid login="test" password="P@ssword" with open("dict", 'r', encoding="latin-1") as words : a = words.readlines() headers = { 'Content-type': 'application/json', 'Accept': 'application/json' } def gen_ip(): a = random.randint(1, 254) b = random.randint(1, 254) c = random.randint(1, 254) d = random.randint(1, 254) #mask = random.randint(1, 32) ip_s = "{}.{}.{}.{}".format(a,b,c,d) return {'ip' : ip_s} def gen_interface(): name = random.choice(a).replace('\n','') n_int = random.randint(1, 2) n_ip = random.randint(1, 3) interfaces = [] for i in range(n_int): ips = [] for y in range(n_ip): ips.append(gen_ip()) mac = ':'.join(("%12x" % random.randint(0, 0xFFFFFFFFFFFF))[i:i+2] for i in range(0, 12, 2)) interfaces.append({ "ip_address": ips, "name": "{}{}".format(name,i), "mac_address": mac, #"host": null }) return interfaces while(1): name = random.choice(a).replace('\n','') #name = random.choice(a).replace('\n','' snow_id = random.randint(100000, 999999) machine_uuid = str(uuid.uuid4()) interfaces = gen_interface() data = { "interfaces": interfaces, "name": name, "snow_id": snow_id, "uuid": machine_uuid, "machine_id": machine_uuid, "local_name": name, "dns_name": "{}.foo.bar".format(name), "vm_name": name, #"geo": null, #"team": null, #"domain": null, #"hardware_type": null, #"parent_host": null, #"os_family": null, #"os_distribution": null, #"os_distribution_version": null, #"system_team": null, #"application_team": null } r = requests.post( 'http://localhost:8081/api/v1/inventory/hosts/', auth=(login, password), #headers=headers, json=data, ) print(r.text)
<commit_before><commit_msg>Add script to generate fake data<commit_after>
import requests import json import random import uuid login="test" password="P@ssword" with open("dict", 'r', encoding="latin-1") as words : a = words.readlines() headers = { 'Content-type': 'application/json', 'Accept': 'application/json' } def gen_ip(): a = random.randint(1, 254) b = random.randint(1, 254) c = random.randint(1, 254) d = random.randint(1, 254) #mask = random.randint(1, 32) ip_s = "{}.{}.{}.{}".format(a,b,c,d) return {'ip' : ip_s} def gen_interface(): name = random.choice(a).replace('\n','') n_int = random.randint(1, 2) n_ip = random.randint(1, 3) interfaces = [] for i in range(n_int): ips = [] for y in range(n_ip): ips.append(gen_ip()) mac = ':'.join(("%12x" % random.randint(0, 0xFFFFFFFFFFFF))[i:i+2] for i in range(0, 12, 2)) interfaces.append({ "ip_address": ips, "name": "{}{}".format(name,i), "mac_address": mac, #"host": null }) return interfaces while(1): name = random.choice(a).replace('\n','') #name = random.choice(a).replace('\n','' snow_id = random.randint(100000, 999999) machine_uuid = str(uuid.uuid4()) interfaces = gen_interface() data = { "interfaces": interfaces, "name": name, "snow_id": snow_id, "uuid": machine_uuid, "machine_id": machine_uuid, "local_name": name, "dns_name": "{}.foo.bar".format(name), "vm_name": name, #"geo": null, #"team": null, #"domain": null, #"hardware_type": null, #"parent_host": null, #"os_family": null, #"os_distribution": null, #"os_distribution_version": null, #"system_team": null, #"application_team": null } r = requests.post( 'http://localhost:8081/api/v1/inventory/hosts/', auth=(login, password), #headers=headers, json=data, ) print(r.text)
Add script to generate fake dataimport requests import json import random import uuid login="test" password="P@ssword" with open("dict", 'r', encoding="latin-1") as words : a = words.readlines() headers = { 'Content-type': 'application/json', 'Accept': 'application/json' } def gen_ip(): a = random.randint(1, 254) b = random.randint(1, 254) c = random.randint(1, 254) d = random.randint(1, 254) #mask = random.randint(1, 32) ip_s = "{}.{}.{}.{}".format(a,b,c,d) return {'ip' : ip_s} def gen_interface(): name = random.choice(a).replace('\n','') n_int = random.randint(1, 2) n_ip = random.randint(1, 3) interfaces = [] for i in range(n_int): ips = [] for y in range(n_ip): ips.append(gen_ip()) mac = ':'.join(("%12x" % random.randint(0, 0xFFFFFFFFFFFF))[i:i+2] for i in range(0, 12, 2)) interfaces.append({ "ip_address": ips, "name": "{}{}".format(name,i), "mac_address": mac, #"host": null }) return interfaces while(1): name = random.choice(a).replace('\n','') #name = random.choice(a).replace('\n','' snow_id = random.randint(100000, 999999) machine_uuid = str(uuid.uuid4()) interfaces = gen_interface() data = { "interfaces": interfaces, "name": name, "snow_id": snow_id, "uuid": machine_uuid, "machine_id": machine_uuid, "local_name": name, "dns_name": "{}.foo.bar".format(name), "vm_name": name, #"geo": null, #"team": null, #"domain": null, #"hardware_type": null, #"parent_host": null, #"os_family": null, #"os_distribution": null, #"os_distribution_version": null, #"system_team": null, #"application_team": null } r = requests.post( 'http://localhost:8081/api/v1/inventory/hosts/', auth=(login, password), #headers=headers, json=data, ) print(r.text)
<commit_before><commit_msg>Add script to generate fake data<commit_after>import requests import json import random import uuid login="test" password="P@ssword" with open("dict", 'r', encoding="latin-1") as words : a = words.readlines() headers = { 'Content-type': 'application/json', 'Accept': 'application/json' } def gen_ip(): a = random.randint(1, 254) b = random.randint(1, 254) c = random.randint(1, 254) d = random.randint(1, 254) #mask = random.randint(1, 32) ip_s = "{}.{}.{}.{}".format(a,b,c,d) return {'ip' : ip_s} def gen_interface(): name = random.choice(a).replace('\n','') n_int = random.randint(1, 2) n_ip = random.randint(1, 3) interfaces = [] for i in range(n_int): ips = [] for y in range(n_ip): ips.append(gen_ip()) mac = ':'.join(("%12x" % random.randint(0, 0xFFFFFFFFFFFF))[i:i+2] for i in range(0, 12, 2)) interfaces.append({ "ip_address": ips, "name": "{}{}".format(name,i), "mac_address": mac, #"host": null }) return interfaces while(1): name = random.choice(a).replace('\n','') #name = random.choice(a).replace('\n','' snow_id = random.randint(100000, 999999) machine_uuid = str(uuid.uuid4()) interfaces = gen_interface() data = { "interfaces": interfaces, "name": name, "snow_id": snow_id, "uuid": machine_uuid, "machine_id": machine_uuid, "local_name": name, "dns_name": "{}.foo.bar".format(name), "vm_name": name, #"geo": null, #"team": null, #"domain": null, #"hardware_type": null, #"parent_host": null, #"os_family": null, #"os_distribution": null, #"os_distribution_version": null, #"system_team": null, #"application_team": null } r = requests.post( 'http://localhost:8081/api/v1/inventory/hosts/', auth=(login, password), #headers=headers, json=data, ) print(r.text)
0f2ef5a0eae40a24e50c10187606aa884faff728
TrailingString.py
TrailingString.py
# Trailing String # # https://www.codeeval.com/open_challenges/32/ # # Challenge Description: There are two strings: A and B. Print 1 if string B # occurs at the end of string A. Otherwise, print 0. import sys def is_trailing_string(x, y): return x[-len(y):] == y if __name__ == '__main__': input_file = sys.argv[1] with open(input_file, 'r') as test_cases: for case in test_cases: strings = case.rstrip().split(',') if (is_trailing_string(strings[0], strings[1])): print(1) else: print(0)
Solve the Trailing String challenge
Solve the Trailing String challenge
Python
mit
TommyN94/CodeEvalSolutions,TommyN94/CodeEvalSolutions
Solve the Trailing String challenge
# Trailing String # # https://www.codeeval.com/open_challenges/32/ # # Challenge Description: There are two strings: A and B. Print 1 if string B # occurs at the end of string A. Otherwise, print 0. import sys def is_trailing_string(x, y): return x[-len(y):] == y if __name__ == '__main__': input_file = sys.argv[1] with open(input_file, 'r') as test_cases: for case in test_cases: strings = case.rstrip().split(',') if (is_trailing_string(strings[0], strings[1])): print(1) else: print(0)
<commit_before><commit_msg>Solve the Trailing String challenge<commit_after>
# Trailing String # # https://www.codeeval.com/open_challenges/32/ # # Challenge Description: There are two strings: A and B. Print 1 if string B # occurs at the end of string A. Otherwise, print 0. import sys def is_trailing_string(x, y): return x[-len(y):] == y if __name__ == '__main__': input_file = sys.argv[1] with open(input_file, 'r') as test_cases: for case in test_cases: strings = case.rstrip().split(',') if (is_trailing_string(strings[0], strings[1])): print(1) else: print(0)
Solve the Trailing String challenge# Trailing String # # https://www.codeeval.com/open_challenges/32/ # # Challenge Description: There are two strings: A and B. Print 1 if string B # occurs at the end of string A. Otherwise, print 0. import sys def is_trailing_string(x, y): return x[-len(y):] == y if __name__ == '__main__': input_file = sys.argv[1] with open(input_file, 'r') as test_cases: for case in test_cases: strings = case.rstrip().split(',') if (is_trailing_string(strings[0], strings[1])): print(1) else: print(0)
<commit_before><commit_msg>Solve the Trailing String challenge<commit_after># Trailing String # # https://www.codeeval.com/open_challenges/32/ # # Challenge Description: There are two strings: A and B. Print 1 if string B # occurs at the end of string A. Otherwise, print 0. import sys def is_trailing_string(x, y): return x[-len(y):] == y if __name__ == '__main__': input_file = sys.argv[1] with open(input_file, 'r') as test_cases: for case in test_cases: strings = case.rstrip().split(',') if (is_trailing_string(strings[0], strings[1])): print(1) else: print(0)
2986ca3ebd7562d9cb84f33598e17b3b9627f3a2
landlab/grid/tests/test_diagonals.py
landlab/grid/tests/test_diagonals.py
#! /usr/bin/env python import numpy as np from nose.tools import assert_is, assert_is_instance from numpy.testing import assert_array_equal from landlab.grid.diagonals import create_nodes_at_diagonal def test_nodes_at_diagonal(): """Test tail and head nodes of diagonals.""" diagonals = create_nodes_at_diagonal((2, 2)) assert_array_equal(diagonals, [[0, 3], [1, 2]]) diagonals = create_nodes_at_diagonal((4, 3)) assert_is_instance(diagonals, np.ndarray) assert_array_equal(diagonals, [[0, 4], [1, 3], [1, 5], [2, 4], [3, 7], [4, 6], [4, 8], [5, 7], [6, 10], [7, 9], [7, 11], [8, 10]]) def test_nodes_at_diagonal_1d(): """Test nodes at diagonals for 1d grid.""" diagonals = create_nodes_at_diagonal((1, 2)) assert_array_equal(diagonals, np.array([], dtype=int).reshape((0, 2))) diagonals = create_nodes_at_diagonal((4, 1)) assert_array_equal(diagonals, np.array([], dtype=int).reshape((0, 2))) def test_nodes_at_diagonal_out_keyword(): """Test out keyword for nodes_at_diagonal.""" buffer = np.empty((4, 2), dtype=int) diagonals = create_nodes_at_diagonal((3, 2), out=buffer) assert_is(buffer, diagonals) assert_array_equal(diagonals, [[0, 3], [1, 2], [2, 5], [3, 4]])
Add some unit tests for the diagonals.
Add some unit tests for the diagonals.
Python
mit
amandersillinois/landlab,landlab/landlab,cmshobe/landlab,cmshobe/landlab,cmshobe/landlab,landlab/landlab,amandersillinois/landlab,landlab/landlab
Add some unit tests for the diagonals.
#! /usr/bin/env python import numpy as np from nose.tools import assert_is, assert_is_instance from numpy.testing import assert_array_equal from landlab.grid.diagonals import create_nodes_at_diagonal def test_nodes_at_diagonal(): """Test tail and head nodes of diagonals.""" diagonals = create_nodes_at_diagonal((2, 2)) assert_array_equal(diagonals, [[0, 3], [1, 2]]) diagonals = create_nodes_at_diagonal((4, 3)) assert_is_instance(diagonals, np.ndarray) assert_array_equal(diagonals, [[0, 4], [1, 3], [1, 5], [2, 4], [3, 7], [4, 6], [4, 8], [5, 7], [6, 10], [7, 9], [7, 11], [8, 10]]) def test_nodes_at_diagonal_1d(): """Test nodes at diagonals for 1d grid.""" diagonals = create_nodes_at_diagonal((1, 2)) assert_array_equal(diagonals, np.array([], dtype=int).reshape((0, 2))) diagonals = create_nodes_at_diagonal((4, 1)) assert_array_equal(diagonals, np.array([], dtype=int).reshape((0, 2))) def test_nodes_at_diagonal_out_keyword(): """Test out keyword for nodes_at_diagonal.""" buffer = np.empty((4, 2), dtype=int) diagonals = create_nodes_at_diagonal((3, 2), out=buffer) assert_is(buffer, diagonals) assert_array_equal(diagonals, [[0, 3], [1, 2], [2, 5], [3, 4]])
<commit_before><commit_msg>Add some unit tests for the diagonals.<commit_after>
#! /usr/bin/env python import numpy as np from nose.tools import assert_is, assert_is_instance from numpy.testing import assert_array_equal from landlab.grid.diagonals import create_nodes_at_diagonal def test_nodes_at_diagonal(): """Test tail and head nodes of diagonals.""" diagonals = create_nodes_at_diagonal((2, 2)) assert_array_equal(diagonals, [[0, 3], [1, 2]]) diagonals = create_nodes_at_diagonal((4, 3)) assert_is_instance(diagonals, np.ndarray) assert_array_equal(diagonals, [[0, 4], [1, 3], [1, 5], [2, 4], [3, 7], [4, 6], [4, 8], [5, 7], [6, 10], [7, 9], [7, 11], [8, 10]]) def test_nodes_at_diagonal_1d(): """Test nodes at diagonals for 1d grid.""" diagonals = create_nodes_at_diagonal((1, 2)) assert_array_equal(diagonals, np.array([], dtype=int).reshape((0, 2))) diagonals = create_nodes_at_diagonal((4, 1)) assert_array_equal(diagonals, np.array([], dtype=int).reshape((0, 2))) def test_nodes_at_diagonal_out_keyword(): """Test out keyword for nodes_at_diagonal.""" buffer = np.empty((4, 2), dtype=int) diagonals = create_nodes_at_diagonal((3, 2), out=buffer) assert_is(buffer, diagonals) assert_array_equal(diagonals, [[0, 3], [1, 2], [2, 5], [3, 4]])
Add some unit tests for the diagonals.#! /usr/bin/env python import numpy as np from nose.tools import assert_is, assert_is_instance from numpy.testing import assert_array_equal from landlab.grid.diagonals import create_nodes_at_diagonal def test_nodes_at_diagonal(): """Test tail and head nodes of diagonals.""" diagonals = create_nodes_at_diagonal((2, 2)) assert_array_equal(diagonals, [[0, 3], [1, 2]]) diagonals = create_nodes_at_diagonal((4, 3)) assert_is_instance(diagonals, np.ndarray) assert_array_equal(diagonals, [[0, 4], [1, 3], [1, 5], [2, 4], [3, 7], [4, 6], [4, 8], [5, 7], [6, 10], [7, 9], [7, 11], [8, 10]]) def test_nodes_at_diagonal_1d(): """Test nodes at diagonals for 1d grid.""" diagonals = create_nodes_at_diagonal((1, 2)) assert_array_equal(diagonals, np.array([], dtype=int).reshape((0, 2))) diagonals = create_nodes_at_diagonal((4, 1)) assert_array_equal(diagonals, np.array([], dtype=int).reshape((0, 2))) def test_nodes_at_diagonal_out_keyword(): """Test out keyword for nodes_at_diagonal.""" buffer = np.empty((4, 2), dtype=int) diagonals = create_nodes_at_diagonal((3, 2), out=buffer) assert_is(buffer, diagonals) assert_array_equal(diagonals, [[0, 3], [1, 2], [2, 5], [3, 4]])
<commit_before><commit_msg>Add some unit tests for the diagonals.<commit_after>#! /usr/bin/env python import numpy as np from nose.tools import assert_is, assert_is_instance from numpy.testing import assert_array_equal from landlab.grid.diagonals import create_nodes_at_diagonal def test_nodes_at_diagonal(): """Test tail and head nodes of diagonals.""" diagonals = create_nodes_at_diagonal((2, 2)) assert_array_equal(diagonals, [[0, 3], [1, 2]]) diagonals = create_nodes_at_diagonal((4, 3)) assert_is_instance(diagonals, np.ndarray) assert_array_equal(diagonals, [[0, 4], [1, 3], [1, 5], [2, 4], [3, 7], [4, 6], [4, 8], [5, 7], [6, 10], [7, 9], [7, 11], [8, 10]]) def test_nodes_at_diagonal_1d(): """Test nodes at diagonals for 1d grid.""" diagonals = create_nodes_at_diagonal((1, 2)) assert_array_equal(diagonals, np.array([], dtype=int).reshape((0, 2))) diagonals = create_nodes_at_diagonal((4, 1)) assert_array_equal(diagonals, np.array([], dtype=int).reshape((0, 2))) def test_nodes_at_diagonal_out_keyword(): """Test out keyword for nodes_at_diagonal.""" buffer = np.empty((4, 2), dtype=int) diagonals = create_nodes_at_diagonal((3, 2), out=buffer) assert_is(buffer, diagonals) assert_array_equal(diagonals, [[0, 3], [1, 2], [2, 5], [3, 4]])
9f2d4c6052089706f9398be3234eb1fc63de95bf
tests/test_comparisons.py
tests/test_comparisons.py
""" Test ChatterBot's statement comparison algorithms. """ from unittest import TestCase, SkipTest from chatterbot.conversation import Statement from chatterbot import comparisons class LevenshteinDistanceTestCase(TestCase): def test_levenshtein_distance_statement_false(self): """ Falsy values should match by zero. """ statement = Statement('') other_statement = Statement('Hello') value = comparisons.levenshtein_distance(statement, other_statement) self.assertEqual(value, 0) def test_levenshtein_distance_other_statement_false(self): """ Falsy values should match by zero. """ statement = Statement('Hello') other_statement = Statement('') value = comparisons.levenshtein_distance(statement, other_statement) self.assertEqual(value, 0) def test_exact_match_different_capitalization(self): """ Test that text capitalization is ignored. """ statement = Statement('Hi HoW ArE yOu?') other_statement = Statement('hI hOw are YoU?') value = comparisons.levenshtein_distance(statement, other_statement) self.assertEqual(value, 1) class SynsetDistanceTestCase(TestCase): def test_exact_match_different_capitalization(self): """ Test that text capitalization is ignored. """ raise SkipTest('This test needs to be created.') class SentimentComparisonTestCase(TestCase): def test_exact_match_different_capitalization(self): """ Test that text capitalization is ignored. """ statement = Statement('Hi HoW ArE yOu?') other_statement = Statement('hI hOw are YoU?') value = comparisons.sentiment_comparison(statement, other_statement) self.assertEqual(value, 1) class JaccardSimilarityTestCase(TestCase): def test_exact_match_different_capitalization(self): """ Test that text capitalization is ignored. """ raise SkipTest('This test needs to be created.')
Add a few statement comparison tests
Add a few statement comparison tests
Python
bsd-3-clause
Gustavo6046/ChatterBot,davizucon/ChatterBot,vkosuri/ChatterBot,Reinaesaya/OUIRL-ChatBot,maclogan/VirtualPenPal,gunthercox/ChatterBot,Reinaesaya/OUIRL-ChatBot
Add a few statement comparison tests
""" Test ChatterBot's statement comparison algorithms. """ from unittest import TestCase, SkipTest from chatterbot.conversation import Statement from chatterbot import comparisons class LevenshteinDistanceTestCase(TestCase): def test_levenshtein_distance_statement_false(self): """ Falsy values should match by zero. """ statement = Statement('') other_statement = Statement('Hello') value = comparisons.levenshtein_distance(statement, other_statement) self.assertEqual(value, 0) def test_levenshtein_distance_other_statement_false(self): """ Falsy values should match by zero. """ statement = Statement('Hello') other_statement = Statement('') value = comparisons.levenshtein_distance(statement, other_statement) self.assertEqual(value, 0) def test_exact_match_different_capitalization(self): """ Test that text capitalization is ignored. """ statement = Statement('Hi HoW ArE yOu?') other_statement = Statement('hI hOw are YoU?') value = comparisons.levenshtein_distance(statement, other_statement) self.assertEqual(value, 1) class SynsetDistanceTestCase(TestCase): def test_exact_match_different_capitalization(self): """ Test that text capitalization is ignored. """ raise SkipTest('This test needs to be created.') class SentimentComparisonTestCase(TestCase): def test_exact_match_different_capitalization(self): """ Test that text capitalization is ignored. """ statement = Statement('Hi HoW ArE yOu?') other_statement = Statement('hI hOw are YoU?') value = comparisons.sentiment_comparison(statement, other_statement) self.assertEqual(value, 1) class JaccardSimilarityTestCase(TestCase): def test_exact_match_different_capitalization(self): """ Test that text capitalization is ignored. """ raise SkipTest('This test needs to be created.')
<commit_before><commit_msg>Add a few statement comparison tests<commit_after>
""" Test ChatterBot's statement comparison algorithms. """ from unittest import TestCase, SkipTest from chatterbot.conversation import Statement from chatterbot import comparisons class LevenshteinDistanceTestCase(TestCase): def test_levenshtein_distance_statement_false(self): """ Falsy values should match by zero. """ statement = Statement('') other_statement = Statement('Hello') value = comparisons.levenshtein_distance(statement, other_statement) self.assertEqual(value, 0) def test_levenshtein_distance_other_statement_false(self): """ Falsy values should match by zero. """ statement = Statement('Hello') other_statement = Statement('') value = comparisons.levenshtein_distance(statement, other_statement) self.assertEqual(value, 0) def test_exact_match_different_capitalization(self): """ Test that text capitalization is ignored. """ statement = Statement('Hi HoW ArE yOu?') other_statement = Statement('hI hOw are YoU?') value = comparisons.levenshtein_distance(statement, other_statement) self.assertEqual(value, 1) class SynsetDistanceTestCase(TestCase): def test_exact_match_different_capitalization(self): """ Test that text capitalization is ignored. """ raise SkipTest('This test needs to be created.') class SentimentComparisonTestCase(TestCase): def test_exact_match_different_capitalization(self): """ Test that text capitalization is ignored. """ statement = Statement('Hi HoW ArE yOu?') other_statement = Statement('hI hOw are YoU?') value = comparisons.sentiment_comparison(statement, other_statement) self.assertEqual(value, 1) class JaccardSimilarityTestCase(TestCase): def test_exact_match_different_capitalization(self): """ Test that text capitalization is ignored. """ raise SkipTest('This test needs to be created.')
Add a few statement comparison tests""" Test ChatterBot's statement comparison algorithms. """ from unittest import TestCase, SkipTest from chatterbot.conversation import Statement from chatterbot import comparisons class LevenshteinDistanceTestCase(TestCase): def test_levenshtein_distance_statement_false(self): """ Falsy values should match by zero. """ statement = Statement('') other_statement = Statement('Hello') value = comparisons.levenshtein_distance(statement, other_statement) self.assertEqual(value, 0) def test_levenshtein_distance_other_statement_false(self): """ Falsy values should match by zero. """ statement = Statement('Hello') other_statement = Statement('') value = comparisons.levenshtein_distance(statement, other_statement) self.assertEqual(value, 0) def test_exact_match_different_capitalization(self): """ Test that text capitalization is ignored. """ statement = Statement('Hi HoW ArE yOu?') other_statement = Statement('hI hOw are YoU?') value = comparisons.levenshtein_distance(statement, other_statement) self.assertEqual(value, 1) class SynsetDistanceTestCase(TestCase): def test_exact_match_different_capitalization(self): """ Test that text capitalization is ignored. """ raise SkipTest('This test needs to be created.') class SentimentComparisonTestCase(TestCase): def test_exact_match_different_capitalization(self): """ Test that text capitalization is ignored. """ statement = Statement('Hi HoW ArE yOu?') other_statement = Statement('hI hOw are YoU?') value = comparisons.sentiment_comparison(statement, other_statement) self.assertEqual(value, 1) class JaccardSimilarityTestCase(TestCase): def test_exact_match_different_capitalization(self): """ Test that text capitalization is ignored. """ raise SkipTest('This test needs to be created.')
<commit_before><commit_msg>Add a few statement comparison tests<commit_after>""" Test ChatterBot's statement comparison algorithms. """ from unittest import TestCase, SkipTest from chatterbot.conversation import Statement from chatterbot import comparisons class LevenshteinDistanceTestCase(TestCase): def test_levenshtein_distance_statement_false(self): """ Falsy values should match by zero. """ statement = Statement('') other_statement = Statement('Hello') value = comparisons.levenshtein_distance(statement, other_statement) self.assertEqual(value, 0) def test_levenshtein_distance_other_statement_false(self): """ Falsy values should match by zero. """ statement = Statement('Hello') other_statement = Statement('') value = comparisons.levenshtein_distance(statement, other_statement) self.assertEqual(value, 0) def test_exact_match_different_capitalization(self): """ Test that text capitalization is ignored. """ statement = Statement('Hi HoW ArE yOu?') other_statement = Statement('hI hOw are YoU?') value = comparisons.levenshtein_distance(statement, other_statement) self.assertEqual(value, 1) class SynsetDistanceTestCase(TestCase): def test_exact_match_different_capitalization(self): """ Test that text capitalization is ignored. """ raise SkipTest('This test needs to be created.') class SentimentComparisonTestCase(TestCase): def test_exact_match_different_capitalization(self): """ Test that text capitalization is ignored. """ statement = Statement('Hi HoW ArE yOu?') other_statement = Statement('hI hOw are YoU?') value = comparisons.sentiment_comparison(statement, other_statement) self.assertEqual(value, 1) class JaccardSimilarityTestCase(TestCase): def test_exact_match_different_capitalization(self): """ Test that text capitalization is ignored. """ raise SkipTest('This test needs to be created.')
145d90259715e826ac88f86715e5a7215f3800ac
app/jobs/create_civic_vcf.py
app/jobs/create_civic_vcf.py
class CreateCivicVcf < ActiveJob::Base def perform system("python3 #{script_path} #{vcf_path}") self.class.set(wait_until: Date.tomorrow.midnight).perform_later end def script_path File.join(Rails.root, 'misc_scripts', 'create_civic_vcf.py') end private def civicpy_cache_file_location File.join(Rails.root, 'public', 'downloads', 'civic.vcf') end end
Add job to create CIViC VCF
Add job to create CIViC VCF
Python
mit
genome/civic-server,genome/civic-server,genome/civic-server,genome/civic-server,genome/civic-server
Add job to create CIViC VCF
class CreateCivicVcf < ActiveJob::Base def perform system("python3 #{script_path} #{vcf_path}") self.class.set(wait_until: Date.tomorrow.midnight).perform_later end def script_path File.join(Rails.root, 'misc_scripts', 'create_civic_vcf.py') end private def civicpy_cache_file_location File.join(Rails.root, 'public', 'downloads', 'civic.vcf') end end
<commit_before><commit_msg>Add job to create CIViC VCF<commit_after>
class CreateCivicVcf < ActiveJob::Base def perform system("python3 #{script_path} #{vcf_path}") self.class.set(wait_until: Date.tomorrow.midnight).perform_later end def script_path File.join(Rails.root, 'misc_scripts', 'create_civic_vcf.py') end private def civicpy_cache_file_location File.join(Rails.root, 'public', 'downloads', 'civic.vcf') end end
Add job to create CIViC VCFclass CreateCivicVcf < ActiveJob::Base def perform system("python3 #{script_path} #{vcf_path}") self.class.set(wait_until: Date.tomorrow.midnight).perform_later end def script_path File.join(Rails.root, 'misc_scripts', 'create_civic_vcf.py') end private def civicpy_cache_file_location File.join(Rails.root, 'public', 'downloads', 'civic.vcf') end end
<commit_before><commit_msg>Add job to create CIViC VCF<commit_after>class CreateCivicVcf < ActiveJob::Base def perform system("python3 #{script_path} #{vcf_path}") self.class.set(wait_until: Date.tomorrow.midnight).perform_later end def script_path File.join(Rails.root, 'misc_scripts', 'create_civic_vcf.py') end private def civicpy_cache_file_location File.join(Rails.root, 'public', 'downloads', 'civic.vcf') end end
8fac2cf0cb4d7b530670797ef1c2731238dfa655
frappe/patches/v12_0/delete_duplicate_indexes.py
frappe/patches/v12_0/delete_duplicate_indexes.py
import frappe # This patch deletes all the duplicate indexes created for same column # The patch only checks for indexes with UNIQUE constraints def execute(): all_tables = frappe.db.get_tables() final_deletion_map = frappe._dict() for table in all_tables: indexes_to_keep_map = frappe._dict() indexes_to_delete = [] index_info = frappe.db.sql(""" SELECT column_name, index_name, non_unique FROM information_schema.STATISTICS WHERE table_name=%s AND non_unique=0 ORDER BY index_name; """, table, as_dict=1) for index in index_info: if not indexes_to_keep_map.get(index.column_name): indexes_to_keep_map[index.column_name] = index else: indexes_to_delete.append(index.index_name) if indexes_to_delete: final_deletion_map[table] = indexes_to_delete # build drop index query for (table_name, index_list) in final_deletion_map.items(): query = "ALTER TABLE `{}` ".format(table_name) query_parts = [] for index in index_list: query_parts.append("DROP INDEX `{}`".format(index)) query = query + ', '.join(query_parts) frappe.db.sql(query)
Add patch to delete duplicate issues
fix: Add patch to delete duplicate issues
Python
mit
yashodhank/frappe,vjFaLk/frappe,StrellaGroup/frappe,frappe/frappe,StrellaGroup/frappe,adityahase/frappe,mhbu50/frappe,saurabh6790/frappe,StrellaGroup/frappe,vjFaLk/frappe,almeidapaulopt/frappe,frappe/frappe,almeidapaulopt/frappe,saurabh6790/frappe,yashodhank/frappe,saurabh6790/frappe,yashodhank/frappe,yashodhank/frappe,adityahase/frappe,mhbu50/frappe,mhbu50/frappe,almeidapaulopt/frappe,saurabh6790/frappe,vjFaLk/frappe,adityahase/frappe,adityahase/frappe,vjFaLk/frappe,almeidapaulopt/frappe,mhbu50/frappe,frappe/frappe
fix: Add patch to delete duplicate issues
import frappe # This patch deletes all the duplicate indexes created for same column # The patch only checks for indexes with UNIQUE constraints def execute(): all_tables = frappe.db.get_tables() final_deletion_map = frappe._dict() for table in all_tables: indexes_to_keep_map = frappe._dict() indexes_to_delete = [] index_info = frappe.db.sql(""" SELECT column_name, index_name, non_unique FROM information_schema.STATISTICS WHERE table_name=%s AND non_unique=0 ORDER BY index_name; """, table, as_dict=1) for index in index_info: if not indexes_to_keep_map.get(index.column_name): indexes_to_keep_map[index.column_name] = index else: indexes_to_delete.append(index.index_name) if indexes_to_delete: final_deletion_map[table] = indexes_to_delete # build drop index query for (table_name, index_list) in final_deletion_map.items(): query = "ALTER TABLE `{}` ".format(table_name) query_parts = [] for index in index_list: query_parts.append("DROP INDEX `{}`".format(index)) query = query + ', '.join(query_parts) frappe.db.sql(query)
<commit_before><commit_msg>fix: Add patch to delete duplicate issues<commit_after>
import frappe # This patch deletes all the duplicate indexes created for same column # The patch only checks for indexes with UNIQUE constraints def execute(): all_tables = frappe.db.get_tables() final_deletion_map = frappe._dict() for table in all_tables: indexes_to_keep_map = frappe._dict() indexes_to_delete = [] index_info = frappe.db.sql(""" SELECT column_name, index_name, non_unique FROM information_schema.STATISTICS WHERE table_name=%s AND non_unique=0 ORDER BY index_name; """, table, as_dict=1) for index in index_info: if not indexes_to_keep_map.get(index.column_name): indexes_to_keep_map[index.column_name] = index else: indexes_to_delete.append(index.index_name) if indexes_to_delete: final_deletion_map[table] = indexes_to_delete # build drop index query for (table_name, index_list) in final_deletion_map.items(): query = "ALTER TABLE `{}` ".format(table_name) query_parts = [] for index in index_list: query_parts.append("DROP INDEX `{}`".format(index)) query = query + ', '.join(query_parts) frappe.db.sql(query)
fix: Add patch to delete duplicate issuesimport frappe # This patch deletes all the duplicate indexes created for same column # The patch only checks for indexes with UNIQUE constraints def execute(): all_tables = frappe.db.get_tables() final_deletion_map = frappe._dict() for table in all_tables: indexes_to_keep_map = frappe._dict() indexes_to_delete = [] index_info = frappe.db.sql(""" SELECT column_name, index_name, non_unique FROM information_schema.STATISTICS WHERE table_name=%s AND non_unique=0 ORDER BY index_name; """, table, as_dict=1) for index in index_info: if not indexes_to_keep_map.get(index.column_name): indexes_to_keep_map[index.column_name] = index else: indexes_to_delete.append(index.index_name) if indexes_to_delete: final_deletion_map[table] = indexes_to_delete # build drop index query for (table_name, index_list) in final_deletion_map.items(): query = "ALTER TABLE `{}` ".format(table_name) query_parts = [] for index in index_list: query_parts.append("DROP INDEX `{}`".format(index)) query = query + ', '.join(query_parts) frappe.db.sql(query)
<commit_before><commit_msg>fix: Add patch to delete duplicate issues<commit_after>import frappe # This patch deletes all the duplicate indexes created for same column # The patch only checks for indexes with UNIQUE constraints def execute(): all_tables = frappe.db.get_tables() final_deletion_map = frappe._dict() for table in all_tables: indexes_to_keep_map = frappe._dict() indexes_to_delete = [] index_info = frappe.db.sql(""" SELECT column_name, index_name, non_unique FROM information_schema.STATISTICS WHERE table_name=%s AND non_unique=0 ORDER BY index_name; """, table, as_dict=1) for index in index_info: if not indexes_to_keep_map.get(index.column_name): indexes_to_keep_map[index.column_name] = index else: indexes_to_delete.append(index.index_name) if indexes_to_delete: final_deletion_map[table] = indexes_to_delete # build drop index query for (table_name, index_list) in final_deletion_map.items(): query = "ALTER TABLE `{}` ".format(table_name) query_parts = [] for index in index_list: query_parts.append("DROP INDEX `{}`".format(index)) query = query + ', '.join(query_parts) frappe.db.sql(query)
cb26f56c47e8125f9ac09ddfcd952fc42a9c3669
quickly/families/migrations/0002_familymember_name.py
quickly/families/migrations/0002_familymember_name.py
# -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2017-05-17 16:49 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('families', '0001_initial'), ] operations = [ migrations.AddField( model_name='familymember', name='name', field=models.CharField(blank=True, max_length=255), ), ]
Add name to family member
Add name to family member
Python
mit
wearespindle/quickly.press,wearespindle/quickly.press,wearespindle/quickly.press
Add name to family member
# -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2017-05-17 16:49 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('families', '0001_initial'), ] operations = [ migrations.AddField( model_name='familymember', name='name', field=models.CharField(blank=True, max_length=255), ), ]
<commit_before><commit_msg>Add name to family member<commit_after>
# -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2017-05-17 16:49 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('families', '0001_initial'), ] operations = [ migrations.AddField( model_name='familymember', name='name', field=models.CharField(blank=True, max_length=255), ), ]
Add name to family member# -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2017-05-17 16:49 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('families', '0001_initial'), ] operations = [ migrations.AddField( model_name='familymember', name='name', field=models.CharField(blank=True, max_length=255), ), ]
<commit_before><commit_msg>Add name to family member<commit_after># -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2017-05-17 16:49 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('families', '0001_initial'), ] operations = [ migrations.AddField( model_name='familymember', name='name', field=models.CharField(blank=True, max_length=255), ), ]
fee9c862334bfd290a2b9bd1e07b93badfb66ae5
letsencrypt-postfix/TestPostfixConfigGenerator.py
letsencrypt-postfix/TestPostfixConfigGenerator.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import io import logging import unittest import Config import PostfixConfigGenerator as pcg logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler()) # Fake Postfix Configs names_only_config = """myhostname = mail.fubard.org mydomain = fubard.org myorigin = fubard.org""" def GetFakeOpen(fake_file_contents): fake_file = io.StringIO() # cast this to unicode for py2 fake_file.write(fake_file_contents) fake_file.seek(0) def FakeOpen(_): return fake_file return FakeOpen class TestPostfixConfigGenerator(unittest.TestCase): def setUp(self): self.fopen_names_only_config = GetFakeOpen(names_only_config) #self.config = Config.Config() self.config = None self.postfix_dir = 'tests/' def tearDown(self): pass def testGetAllNames(self): sorted_names = ('fubard.org', 'mail.fubard.org') postfix_config_gen = pcg.PostfixConfigGenerator( self.config, self.postfix_dir, fixup=True, fopen=self.fopen_names_only_config ) self.assertEqual(sorted_names, postfix_config_gen.get_all_names()) if __name__ == '__main__': unittest.main()
Add failing test for get_all_names.
Add failing test for get_all_names.
Python
apache-2.0
letsencrypt/letsencrypt,lmcro/letsencrypt,stweil/letsencrypt,lmcro/letsencrypt,letsencrypt/letsencrypt,stweil/letsencrypt
Add failing test for get_all_names.
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import io import logging import unittest import Config import PostfixConfigGenerator as pcg logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler()) # Fake Postfix Configs names_only_config = """myhostname = mail.fubard.org mydomain = fubard.org myorigin = fubard.org""" def GetFakeOpen(fake_file_contents): fake_file = io.StringIO() # cast this to unicode for py2 fake_file.write(fake_file_contents) fake_file.seek(0) def FakeOpen(_): return fake_file return FakeOpen class TestPostfixConfigGenerator(unittest.TestCase): def setUp(self): self.fopen_names_only_config = GetFakeOpen(names_only_config) #self.config = Config.Config() self.config = None self.postfix_dir = 'tests/' def tearDown(self): pass def testGetAllNames(self): sorted_names = ('fubard.org', 'mail.fubard.org') postfix_config_gen = pcg.PostfixConfigGenerator( self.config, self.postfix_dir, fixup=True, fopen=self.fopen_names_only_config ) self.assertEqual(sorted_names, postfix_config_gen.get_all_names()) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Add failing test for get_all_names.<commit_after>
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import io import logging import unittest import Config import PostfixConfigGenerator as pcg logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler()) # Fake Postfix Configs names_only_config = """myhostname = mail.fubard.org mydomain = fubard.org myorigin = fubard.org""" def GetFakeOpen(fake_file_contents): fake_file = io.StringIO() # cast this to unicode for py2 fake_file.write(fake_file_contents) fake_file.seek(0) def FakeOpen(_): return fake_file return FakeOpen class TestPostfixConfigGenerator(unittest.TestCase): def setUp(self): self.fopen_names_only_config = GetFakeOpen(names_only_config) #self.config = Config.Config() self.config = None self.postfix_dir = 'tests/' def tearDown(self): pass def testGetAllNames(self): sorted_names = ('fubard.org', 'mail.fubard.org') postfix_config_gen = pcg.PostfixConfigGenerator( self.config, self.postfix_dir, fixup=True, fopen=self.fopen_names_only_config ) self.assertEqual(sorted_names, postfix_config_gen.get_all_names()) if __name__ == '__main__': unittest.main()
Add failing test for get_all_names.#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import io import logging import unittest import Config import PostfixConfigGenerator as pcg logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler()) # Fake Postfix Configs names_only_config = """myhostname = mail.fubard.org mydomain = fubard.org myorigin = fubard.org""" def GetFakeOpen(fake_file_contents): fake_file = io.StringIO() # cast this to unicode for py2 fake_file.write(fake_file_contents) fake_file.seek(0) def FakeOpen(_): return fake_file return FakeOpen class TestPostfixConfigGenerator(unittest.TestCase): def setUp(self): self.fopen_names_only_config = GetFakeOpen(names_only_config) #self.config = Config.Config() self.config = None self.postfix_dir = 'tests/' def tearDown(self): pass def testGetAllNames(self): sorted_names = ('fubard.org', 'mail.fubard.org') postfix_config_gen = pcg.PostfixConfigGenerator( self.config, self.postfix_dir, fixup=True, fopen=self.fopen_names_only_config ) self.assertEqual(sorted_names, postfix_config_gen.get_all_names()) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Add failing test for get_all_names.<commit_after>#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import io import logging import unittest import Config import PostfixConfigGenerator as pcg logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler()) # Fake Postfix Configs names_only_config = """myhostname = mail.fubard.org mydomain = fubard.org myorigin = fubard.org""" def GetFakeOpen(fake_file_contents): fake_file = io.StringIO() # cast this to unicode for py2 fake_file.write(fake_file_contents) fake_file.seek(0) def FakeOpen(_): return fake_file return FakeOpen class TestPostfixConfigGenerator(unittest.TestCase): def setUp(self): self.fopen_names_only_config = GetFakeOpen(names_only_config) #self.config = Config.Config() self.config = None self.postfix_dir = 'tests/' def tearDown(self): pass def testGetAllNames(self): sorted_names = ('fubard.org', 'mail.fubard.org') postfix_config_gen = pcg.PostfixConfigGenerator( self.config, self.postfix_dir, fixup=True, fopen=self.fopen_names_only_config ) self.assertEqual(sorted_names, postfix_config_gen.get_all_names()) if __name__ == '__main__': unittest.main()
4d531a4ae2036785dffbb67d96fe013c7ca44784
localtv/management/commands/send_welcome_email.py
localtv/management/commands/send_welcome_email.py
# Copyright 2011 - Participatory Culture Foundation # # This file is part of Miro Community. # # Miro Community is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Miro Community is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Miro Community. If not, see <http://www.gnu.org/licenses/>. from django.core.management.base import BaseCommand import localtv.tiers import localtv.models class Command(BaseCommand): def handle(self, *args, **options): # We send this email to the person who owns the site. So we use # the tiers system's ability to send email. site_location = localtv.models.SiteLocation.objects.get_current() if site_location.already_sent_welcome_email: return # If we haven't sent it, prepare the email # Now send the sucker subject = "%s: Welcome to Miro Community" template = 'localtv/admin/ti mark a note in the SiteLocation to indicate we have sent it site_location.already_sent_welcome_email = True site_location.save() localtv.tiers.send_tiers_related_email(" for site_location_column in localtv.tiers.nightly_warnings(): # Save a note saying we sent the notice sitelocation = localtv.models.SiteLocation.objects.get_current() setattr(sitelocation, site_location_column, True) sitelocation.save() template_name, subject = column2template[site_location_column] localtv.tiers.send_tiers_related_email(subject, template_name, sitelocation)
Add management command to send welcome email
Add management command to send welcome email
Python
agpl-3.0
pculture/mirocommunity,pculture/mirocommunity,pculture/mirocommunity,pculture/mirocommunity
Add management command to send welcome email
# Copyright 2011 - Participatory Culture Foundation # # This file is part of Miro Community. # # Miro Community is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Miro Community is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Miro Community. If not, see <http://www.gnu.org/licenses/>. from django.core.management.base import BaseCommand import localtv.tiers import localtv.models class Command(BaseCommand): def handle(self, *args, **options): # We send this email to the person who owns the site. So we use # the tiers system's ability to send email. site_location = localtv.models.SiteLocation.objects.get_current() if site_location.already_sent_welcome_email: return # If we haven't sent it, prepare the email # Now send the sucker subject = "%s: Welcome to Miro Community" template = 'localtv/admin/ti mark a note in the SiteLocation to indicate we have sent it site_location.already_sent_welcome_email = True site_location.save() localtv.tiers.send_tiers_related_email(" for site_location_column in localtv.tiers.nightly_warnings(): # Save a note saying we sent the notice sitelocation = localtv.models.SiteLocation.objects.get_current() setattr(sitelocation, site_location_column, True) sitelocation.save() template_name, subject = column2template[site_location_column] localtv.tiers.send_tiers_related_email(subject, template_name, sitelocation)
<commit_before><commit_msg>Add management command to send welcome email<commit_after>
# Copyright 2011 - Participatory Culture Foundation # # This file is part of Miro Community. # # Miro Community is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Miro Community is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Miro Community. If not, see <http://www.gnu.org/licenses/>. from django.core.management.base import BaseCommand import localtv.tiers import localtv.models class Command(BaseCommand): def handle(self, *args, **options): # We send this email to the person who owns the site. So we use # the tiers system's ability to send email. site_location = localtv.models.SiteLocation.objects.get_current() if site_location.already_sent_welcome_email: return # If we haven't sent it, prepare the email # Now send the sucker subject = "%s: Welcome to Miro Community" template = 'localtv/admin/ti mark a note in the SiteLocation to indicate we have sent it site_location.already_sent_welcome_email = True site_location.save() localtv.tiers.send_tiers_related_email(" for site_location_column in localtv.tiers.nightly_warnings(): # Save a note saying we sent the notice sitelocation = localtv.models.SiteLocation.objects.get_current() setattr(sitelocation, site_location_column, True) sitelocation.save() template_name, subject = column2template[site_location_column] localtv.tiers.send_tiers_related_email(subject, template_name, sitelocation)
Add management command to send welcome email# Copyright 2011 - Participatory Culture Foundation # # This file is part of Miro Community. # # Miro Community is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Miro Community is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Miro Community. If not, see <http://www.gnu.org/licenses/>. from django.core.management.base import BaseCommand import localtv.tiers import localtv.models class Command(BaseCommand): def handle(self, *args, **options): # We send this email to the person who owns the site. So we use # the tiers system's ability to send email. site_location = localtv.models.SiteLocation.objects.get_current() if site_location.already_sent_welcome_email: return # If we haven't sent it, prepare the email # Now send the sucker subject = "%s: Welcome to Miro Community" template = 'localtv/admin/ti mark a note in the SiteLocation to indicate we have sent it site_location.already_sent_welcome_email = True site_location.save() localtv.tiers.send_tiers_related_email(" for site_location_column in localtv.tiers.nightly_warnings(): # Save a note saying we sent the notice sitelocation = localtv.models.SiteLocation.objects.get_current() setattr(sitelocation, site_location_column, True) sitelocation.save() template_name, subject = column2template[site_location_column] localtv.tiers.send_tiers_related_email(subject, template_name, sitelocation)
<commit_before><commit_msg>Add management command to send welcome email<commit_after># Copyright 2011 - Participatory Culture Foundation # # This file is part of Miro Community. # # Miro Community is free software: you can redistribute it and/or modify it # under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or (at your # option) any later version. # # Miro Community is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with Miro Community. If not, see <http://www.gnu.org/licenses/>. from django.core.management.base import BaseCommand import localtv.tiers import localtv.models class Command(BaseCommand): def handle(self, *args, **options): # We send this email to the person who owns the site. So we use # the tiers system's ability to send email. site_location = localtv.models.SiteLocation.objects.get_current() if site_location.already_sent_welcome_email: return # If we haven't sent it, prepare the email # Now send the sucker subject = "%s: Welcome to Miro Community" template = 'localtv/admin/ti mark a note in the SiteLocation to indicate we have sent it site_location.already_sent_welcome_email = True site_location.save() localtv.tiers.send_tiers_related_email(" for site_location_column in localtv.tiers.nightly_warnings(): # Save a note saying we sent the notice sitelocation = localtv.models.SiteLocation.objects.get_current() setattr(sitelocation, site_location_column, True) sitelocation.save() template_name, subject = column2template[site_location_column] localtv.tiers.send_tiers_related_email(subject, template_name, sitelocation)
518dd9401b41fc02c815ee6e0ca3b6b113d87f43
mica/stats/tests/test_guide_stats.py
mica/stats/tests/test_guide_stats.py
import tempfile import os from .. import guide_stats def test_calc_stats(): guide_stats.calc_stats(17210) def test_make_gui_stats(): """ Save the guide stats for one obsid into a newly-created table """ # Get a temporary file, but then delete it, because _save_acq_stats will only # make a new table if the supplied file doesn't exist fh, fn = tempfile.mkstemp(suffix='.h5') os.unlink(fn) guide_stats.TABLE_FILE = fn obsid = 20001 obsid_info, gui, star_info, catalog, temp = guide_stats.calc_stats(obsid) t = guide_stats.table_gui_stats(obsid_info, gui, star_info, catalog, temp) guide_stats._save_gui_stats(t) os.unlink(fn)
Add a test that makes a new guide stats database
Add a test that makes a new guide stats database
Python
bsd-3-clause
sot/mica,sot/mica
Add a test that makes a new guide stats database
import tempfile import os from .. import guide_stats def test_calc_stats(): guide_stats.calc_stats(17210) def test_make_gui_stats(): """ Save the guide stats for one obsid into a newly-created table """ # Get a temporary file, but then delete it, because _save_acq_stats will only # make a new table if the supplied file doesn't exist fh, fn = tempfile.mkstemp(suffix='.h5') os.unlink(fn) guide_stats.TABLE_FILE = fn obsid = 20001 obsid_info, gui, star_info, catalog, temp = guide_stats.calc_stats(obsid) t = guide_stats.table_gui_stats(obsid_info, gui, star_info, catalog, temp) guide_stats._save_gui_stats(t) os.unlink(fn)
<commit_before><commit_msg>Add a test that makes a new guide stats database<commit_after>
import tempfile import os from .. import guide_stats def test_calc_stats(): guide_stats.calc_stats(17210) def test_make_gui_stats(): """ Save the guide stats for one obsid into a newly-created table """ # Get a temporary file, but then delete it, because _save_acq_stats will only # make a new table if the supplied file doesn't exist fh, fn = tempfile.mkstemp(suffix='.h5') os.unlink(fn) guide_stats.TABLE_FILE = fn obsid = 20001 obsid_info, gui, star_info, catalog, temp = guide_stats.calc_stats(obsid) t = guide_stats.table_gui_stats(obsid_info, gui, star_info, catalog, temp) guide_stats._save_gui_stats(t) os.unlink(fn)
Add a test that makes a new guide stats databaseimport tempfile import os from .. import guide_stats def test_calc_stats(): guide_stats.calc_stats(17210) def test_make_gui_stats(): """ Save the guide stats for one obsid into a newly-created table """ # Get a temporary file, but then delete it, because _save_acq_stats will only # make a new table if the supplied file doesn't exist fh, fn = tempfile.mkstemp(suffix='.h5') os.unlink(fn) guide_stats.TABLE_FILE = fn obsid = 20001 obsid_info, gui, star_info, catalog, temp = guide_stats.calc_stats(obsid) t = guide_stats.table_gui_stats(obsid_info, gui, star_info, catalog, temp) guide_stats._save_gui_stats(t) os.unlink(fn)
<commit_before><commit_msg>Add a test that makes a new guide stats database<commit_after>import tempfile import os from .. import guide_stats def test_calc_stats(): guide_stats.calc_stats(17210) def test_make_gui_stats(): """ Save the guide stats for one obsid into a newly-created table """ # Get a temporary file, but then delete it, because _save_acq_stats will only # make a new table if the supplied file doesn't exist fh, fn = tempfile.mkstemp(suffix='.h5') os.unlink(fn) guide_stats.TABLE_FILE = fn obsid = 20001 obsid_info, gui, star_info, catalog, temp = guide_stats.calc_stats(obsid) t = guide_stats.table_gui_stats(obsid_info, gui, star_info, catalog, temp) guide_stats._save_gui_stats(t) os.unlink(fn)
0f5de6aeae62337797993697307335276fd1d11a
python/005_smallest_multiple/smallest_multiple.py
python/005_smallest_multiple/smallest_multiple.py
from collections import Counter from functools import reduce from operator import mul from typing import Generator _range = tuple(range(2, 21)) def _factor_generator(number: int) -> Generator: while number % 2 == 0: number = number / 2 yield 2 cursor = 3 while cursor <= number: if number % cursor == 0: yield cursor number = number / cursor else: cursor = cursor + 2 factors = tuple(map(tuple, map(_factor_generator, _range))) counters = tuple(map(Counter, factors)) final_count = reduce(lambda x, y: x | y, counters) smallest_multiple = reduce(mul, [prime ** power for prime, power in final_count.items()])
Add solution to problme 5
Add solution to problme 5
Python
bsd-3-clause
gidj/euler,gidj/euler
Add solution to problme 5
from collections import Counter from functools import reduce from operator import mul from typing import Generator _range = tuple(range(2, 21)) def _factor_generator(number: int) -> Generator: while number % 2 == 0: number = number / 2 yield 2 cursor = 3 while cursor <= number: if number % cursor == 0: yield cursor number = number / cursor else: cursor = cursor + 2 factors = tuple(map(tuple, map(_factor_generator, _range))) counters = tuple(map(Counter, factors)) final_count = reduce(lambda x, y: x | y, counters) smallest_multiple = reduce(mul, [prime ** power for prime, power in final_count.items()])
<commit_before><commit_msg>Add solution to problme 5<commit_after>
from collections import Counter from functools import reduce from operator import mul from typing import Generator _range = tuple(range(2, 21)) def _factor_generator(number: int) -> Generator: while number % 2 == 0: number = number / 2 yield 2 cursor = 3 while cursor <= number: if number % cursor == 0: yield cursor number = number / cursor else: cursor = cursor + 2 factors = tuple(map(tuple, map(_factor_generator, _range))) counters = tuple(map(Counter, factors)) final_count = reduce(lambda x, y: x | y, counters) smallest_multiple = reduce(mul, [prime ** power for prime, power in final_count.items()])
Add solution to problme 5from collections import Counter from functools import reduce from operator import mul from typing import Generator _range = tuple(range(2, 21)) def _factor_generator(number: int) -> Generator: while number % 2 == 0: number = number / 2 yield 2 cursor = 3 while cursor <= number: if number % cursor == 0: yield cursor number = number / cursor else: cursor = cursor + 2 factors = tuple(map(tuple, map(_factor_generator, _range))) counters = tuple(map(Counter, factors)) final_count = reduce(lambda x, y: x | y, counters) smallest_multiple = reduce(mul, [prime ** power for prime, power in final_count.items()])
<commit_before><commit_msg>Add solution to problme 5<commit_after>from collections import Counter from functools import reduce from operator import mul from typing import Generator _range = tuple(range(2, 21)) def _factor_generator(number: int) -> Generator: while number % 2 == 0: number = number / 2 yield 2 cursor = 3 while cursor <= number: if number % cursor == 0: yield cursor number = number / cursor else: cursor = cursor + 2 factors = tuple(map(tuple, map(_factor_generator, _range))) counters = tuple(map(Counter, factors)) final_count = reduce(lambda x, y: x | y, counters) smallest_multiple = reduce(mul, [prime ** power for prime, power in final_count.items()])
4b125b0d6f1c3710499be585b08ecf70663415c3
work/migrations/0002_auto_20170810_1630.py
work/migrations/0002_auto_20170810_1630.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.2 on 2017-08-10 14:30 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('work', '0001_initial'), ] operations = [ migrations.AlterField( model_name='work', name='status', field=models.CharField(choices=[('d', 'Draft'), ('p', 'Published'), ('w', 'Withdrawn')], default='d', max_length=1), ), ]
Set post stat on draft by default
Set post stat on draft by default
Python
mit
oktomus/website,oktomus/website,oktomus/website
Set post stat on draft by default
# -*- coding: utf-8 -*- # Generated by Django 1.11.2 on 2017-08-10 14:30 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('work', '0001_initial'), ] operations = [ migrations.AlterField( model_name='work', name='status', field=models.CharField(choices=[('d', 'Draft'), ('p', 'Published'), ('w', 'Withdrawn')], default='d', max_length=1), ), ]
<commit_before><commit_msg>Set post stat on draft by default<commit_after>
# -*- coding: utf-8 -*- # Generated by Django 1.11.2 on 2017-08-10 14:30 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('work', '0001_initial'), ] operations = [ migrations.AlterField( model_name='work', name='status', field=models.CharField(choices=[('d', 'Draft'), ('p', 'Published'), ('w', 'Withdrawn')], default='d', max_length=1), ), ]
Set post stat on draft by default# -*- coding: utf-8 -*- # Generated by Django 1.11.2 on 2017-08-10 14:30 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('work', '0001_initial'), ] operations = [ migrations.AlterField( model_name='work', name='status', field=models.CharField(choices=[('d', 'Draft'), ('p', 'Published'), ('w', 'Withdrawn')], default='d', max_length=1), ), ]
<commit_before><commit_msg>Set post stat on draft by default<commit_after># -*- coding: utf-8 -*- # Generated by Django 1.11.2 on 2017-08-10 14:30 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('work', '0001_initial'), ] operations = [ migrations.AlterField( model_name='work', name='status', field=models.CharField(choices=[('d', 'Draft'), ('p', 'Published'), ('w', 'Withdrawn')], default='d', max_length=1), ), ]
1d6c17e5adc3df4de86636ef77fc0a406bf065e9
scrapi/harvesters/dryad.py
scrapi/harvesters/dryad.py
''' Harvester for Dryad for the SHARE project Example API call: http://www.datadryad.org/oai/request?verb=ListRecords&metadataPrefix=oai_dc ''' from __future__ import unicode_literals from scrapi.base import OAIHarvester class DryadHarvester(OAIHarvester): short_name = 'dryad' long_name = 'Dryad Data Repository' url = 'http://www.datadryad.org/oai/request' base_url = 'http://www.datadryad.org/oai/request' property_list = ['rights', 'format', 'relation', 'date', 'identifier', 'type', 'setSpec'] timezone_granularity = True
''' Harvester for Dryad for the SHARE project Example API call: http://www.datadryad.org/oai/request?verb=ListRecords&metadataPrefix=oai_dc ''' from __future__ import unicode_literals from lxml import etree import logging from scrapi.base import OAIHarvester logger = logging.getLogger(__name__) class DryadHarvester(OAIHarvester): short_name = 'dryad' long_name = 'Dryad Data Repository' url = 'http://www.datadryad.org/oai/request' base_url = 'http://www.datadryad.org/oai/request' property_list = ['rights', 'format', 'relation', 'date', 'identifier', 'type', 'setSpec'] timezone_granularity = True def normalize(self, raw_doc): str_result = raw_doc.get('doc') result = etree.XML(str_result) status = (result.xpath('ns0:header/@status', namespaces=self.namespaces) or [''])[0] if status.lower() == 'deleted' or status.lower() == 'item is not available': logger.info('Not normalizing record with ID {}, status {}'.format(raw_doc['docID'], status)) return None return super(OAIHarvester, self).normalize(raw_doc)
Add custom normaluze function that will not get non existant items
Add custom normaluze function that will not get non existant items
Python
apache-2.0
icereval/scrapi,CenterForOpenScience/scrapi,fabianvf/scrapi,mehanig/scrapi,alexgarciac/scrapi,erinspace/scrapi,felliott/scrapi,felliott/scrapi,CenterForOpenScience/scrapi,erinspace/scrapi,ostwald/scrapi,fabianvf/scrapi,jeffreyliu3230/scrapi,mehanig/scrapi
''' Harvester for Dryad for the SHARE project Example API call: http://www.datadryad.org/oai/request?verb=ListRecords&metadataPrefix=oai_dc ''' from __future__ import unicode_literals from scrapi.base import OAIHarvester class DryadHarvester(OAIHarvester): short_name = 'dryad' long_name = 'Dryad Data Repository' url = 'http://www.datadryad.org/oai/request' base_url = 'http://www.datadryad.org/oai/request' property_list = ['rights', 'format', 'relation', 'date', 'identifier', 'type', 'setSpec'] timezone_granularity = True Add custom normaluze function that will not get non existant items
''' Harvester for Dryad for the SHARE project Example API call: http://www.datadryad.org/oai/request?verb=ListRecords&metadataPrefix=oai_dc ''' from __future__ import unicode_literals from lxml import etree import logging from scrapi.base import OAIHarvester logger = logging.getLogger(__name__) class DryadHarvester(OAIHarvester): short_name = 'dryad' long_name = 'Dryad Data Repository' url = 'http://www.datadryad.org/oai/request' base_url = 'http://www.datadryad.org/oai/request' property_list = ['rights', 'format', 'relation', 'date', 'identifier', 'type', 'setSpec'] timezone_granularity = True def normalize(self, raw_doc): str_result = raw_doc.get('doc') result = etree.XML(str_result) status = (result.xpath('ns0:header/@status', namespaces=self.namespaces) or [''])[0] if status.lower() == 'deleted' or status.lower() == 'item is not available': logger.info('Not normalizing record with ID {}, status {}'.format(raw_doc['docID'], status)) return None return super(OAIHarvester, self).normalize(raw_doc)
<commit_before>''' Harvester for Dryad for the SHARE project Example API call: http://www.datadryad.org/oai/request?verb=ListRecords&metadataPrefix=oai_dc ''' from __future__ import unicode_literals from scrapi.base import OAIHarvester class DryadHarvester(OAIHarvester): short_name = 'dryad' long_name = 'Dryad Data Repository' url = 'http://www.datadryad.org/oai/request' base_url = 'http://www.datadryad.org/oai/request' property_list = ['rights', 'format', 'relation', 'date', 'identifier', 'type', 'setSpec'] timezone_granularity = True <commit_msg>Add custom normaluze function that will not get non existant items<commit_after>
''' Harvester for Dryad for the SHARE project Example API call: http://www.datadryad.org/oai/request?verb=ListRecords&metadataPrefix=oai_dc ''' from __future__ import unicode_literals from lxml import etree import logging from scrapi.base import OAIHarvester logger = logging.getLogger(__name__) class DryadHarvester(OAIHarvester): short_name = 'dryad' long_name = 'Dryad Data Repository' url = 'http://www.datadryad.org/oai/request' base_url = 'http://www.datadryad.org/oai/request' property_list = ['rights', 'format', 'relation', 'date', 'identifier', 'type', 'setSpec'] timezone_granularity = True def normalize(self, raw_doc): str_result = raw_doc.get('doc') result = etree.XML(str_result) status = (result.xpath('ns0:header/@status', namespaces=self.namespaces) or [''])[0] if status.lower() == 'deleted' or status.lower() == 'item is not available': logger.info('Not normalizing record with ID {}, status {}'.format(raw_doc['docID'], status)) return None return super(OAIHarvester, self).normalize(raw_doc)
''' Harvester for Dryad for the SHARE project Example API call: http://www.datadryad.org/oai/request?verb=ListRecords&metadataPrefix=oai_dc ''' from __future__ import unicode_literals from scrapi.base import OAIHarvester class DryadHarvester(OAIHarvester): short_name = 'dryad' long_name = 'Dryad Data Repository' url = 'http://www.datadryad.org/oai/request' base_url = 'http://www.datadryad.org/oai/request' property_list = ['rights', 'format', 'relation', 'date', 'identifier', 'type', 'setSpec'] timezone_granularity = True Add custom normaluze function that will not get non existant items''' Harvester for Dryad for the SHARE project Example API call: http://www.datadryad.org/oai/request?verb=ListRecords&metadataPrefix=oai_dc ''' from __future__ import unicode_literals from lxml import etree import logging from scrapi.base import OAIHarvester logger = logging.getLogger(__name__) class DryadHarvester(OAIHarvester): short_name = 'dryad' long_name = 'Dryad Data Repository' url = 'http://www.datadryad.org/oai/request' base_url = 'http://www.datadryad.org/oai/request' property_list = ['rights', 'format', 'relation', 'date', 'identifier', 'type', 'setSpec'] timezone_granularity = True def normalize(self, raw_doc): str_result = raw_doc.get('doc') result = etree.XML(str_result) status = (result.xpath('ns0:header/@status', namespaces=self.namespaces) or [''])[0] if status.lower() == 'deleted' or status.lower() == 'item is not available': logger.info('Not normalizing record with ID {}, status {}'.format(raw_doc['docID'], status)) return None return super(OAIHarvester, self).normalize(raw_doc)
<commit_before>''' Harvester for Dryad for the SHARE project Example API call: http://www.datadryad.org/oai/request?verb=ListRecords&metadataPrefix=oai_dc ''' from __future__ import unicode_literals from scrapi.base import OAIHarvester class DryadHarvester(OAIHarvester): short_name = 'dryad' long_name = 'Dryad Data Repository' url = 'http://www.datadryad.org/oai/request' base_url = 'http://www.datadryad.org/oai/request' property_list = ['rights', 'format', 'relation', 'date', 'identifier', 'type', 'setSpec'] timezone_granularity = True <commit_msg>Add custom normaluze function that will not get non existant items<commit_after>''' Harvester for Dryad for the SHARE project Example API call: http://www.datadryad.org/oai/request?verb=ListRecords&metadataPrefix=oai_dc ''' from __future__ import unicode_literals from lxml import etree import logging from scrapi.base import OAIHarvester logger = logging.getLogger(__name__) class DryadHarvester(OAIHarvester): short_name = 'dryad' long_name = 'Dryad Data Repository' url = 'http://www.datadryad.org/oai/request' base_url = 'http://www.datadryad.org/oai/request' property_list = ['rights', 'format', 'relation', 'date', 'identifier', 'type', 'setSpec'] timezone_granularity = True def normalize(self, raw_doc): str_result = raw_doc.get('doc') result = etree.XML(str_result) status = (result.xpath('ns0:header/@status', namespaces=self.namespaces) or [''])[0] if status.lower() == 'deleted' or status.lower() == 'item is not available': logger.info('Not normalizing record with ID {}, status {}'.format(raw_doc['docID'], status)) return None return super(OAIHarvester, self).normalize(raw_doc)
339438e97f91958b1fc0cb1c2d85b3c6d97a1efa
lib/allosmod/config/__init__.py
lib/allosmod/config/__init__.py
# Dummy config; should not be installed. This is here simply so that the # test suite works. import os datadir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '..', '..', 'data') local_scratch='/tmp' global_scratch='/scrapp'
Add dummy config file so that tests work.
Add dummy config file so that tests work.
Python
lgpl-2.1
salilab/allosmod-lib,salilab/allosmod-lib,salilab/allosmod-lib,salilab/allosmod-lib,salilab/allosmod-lib
Add dummy config file so that tests work.
# Dummy config; should not be installed. This is here simply so that the # test suite works. import os datadir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '..', '..', 'data') local_scratch='/tmp' global_scratch='/scrapp'
<commit_before><commit_msg>Add dummy config file so that tests work.<commit_after>
# Dummy config; should not be installed. This is here simply so that the # test suite works. import os datadir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '..', '..', 'data') local_scratch='/tmp' global_scratch='/scrapp'
Add dummy config file so that tests work.# Dummy config; should not be installed. This is here simply so that the # test suite works. import os datadir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '..', '..', 'data') local_scratch='/tmp' global_scratch='/scrapp'
<commit_before><commit_msg>Add dummy config file so that tests work.<commit_after># Dummy config; should not be installed. This is here simply so that the # test suite works. import os datadir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', '..', '..', 'data') local_scratch='/tmp' global_scratch='/scrapp'
60c99b91cede21a509030f276684bb2ff706ec3c
wdom/tests/test_window.py
wdom/tests/test_window.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from time import sleep from unittest.mock import MagicMock from wdom.document import get_document from wdom.server import _tornado from wdom.server.handler import event_handler from wdom.testing import TestCase from wdom.window import customElements class TestWindow(TestCase): def setUp(self): super().setUp() self.doc = get_document() self.win = self.doc.defaultView self.conn_mock = MagicMock() _tornado.connections.append(self.conn_mock) def tearDown(self): _tornado.connections.remove(self.conn_mock) def test_custom_elements_registory(self): self.assertIs(self.win.customElements, customElements) def test_document(self): self.assertIs(self.win.document, self.doc) self.assertIs(self.win, self.doc.defaultView) def test_rimo_id(self): self.assertEqual(self.win.rimo_id, 'window') def test_add_eventlistener(self): mock = MagicMock(_is_coroutine=False) self.win.js_exec = MagicMock(_is_coroutine=False) self.win.addEventListener('click', mock) self.win.js_exec.assert_called_once_with('addEventListener', 'click') msg = { 'type': 'click', 'currentTarget': {'id': 'window'}, 'target': {'id': 'window'}, } e = event_handler(msg) mock.assert_called_once_with(e) def test_add_event_handler_doc(self): mock = MagicMock(_is_coroutine=False) self.win.addEventListener('click', mock) msg = { 'type': 'click', 'currentTarget': {'id': 'document'}, 'target': {'id': 'document'}, } event_handler(msg) sleep(0.1) mock.assert_not_called()
Add test for window module
Add test for window module
Python
mit
miyakogi/wdom,miyakogi/wdom,miyakogi/wdom
Add test for window module
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from time import sleep from unittest.mock import MagicMock from wdom.document import get_document from wdom.server import _tornado from wdom.server.handler import event_handler from wdom.testing import TestCase from wdom.window import customElements class TestWindow(TestCase): def setUp(self): super().setUp() self.doc = get_document() self.win = self.doc.defaultView self.conn_mock = MagicMock() _tornado.connections.append(self.conn_mock) def tearDown(self): _tornado.connections.remove(self.conn_mock) def test_custom_elements_registory(self): self.assertIs(self.win.customElements, customElements) def test_document(self): self.assertIs(self.win.document, self.doc) self.assertIs(self.win, self.doc.defaultView) def test_rimo_id(self): self.assertEqual(self.win.rimo_id, 'window') def test_add_eventlistener(self): mock = MagicMock(_is_coroutine=False) self.win.js_exec = MagicMock(_is_coroutine=False) self.win.addEventListener('click', mock) self.win.js_exec.assert_called_once_with('addEventListener', 'click') msg = { 'type': 'click', 'currentTarget': {'id': 'window'}, 'target': {'id': 'window'}, } e = event_handler(msg) mock.assert_called_once_with(e) def test_add_event_handler_doc(self): mock = MagicMock(_is_coroutine=False) self.win.addEventListener('click', mock) msg = { 'type': 'click', 'currentTarget': {'id': 'document'}, 'target': {'id': 'document'}, } event_handler(msg) sleep(0.1) mock.assert_not_called()
<commit_before><commit_msg>Add test for window module<commit_after>
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from time import sleep from unittest.mock import MagicMock from wdom.document import get_document from wdom.server import _tornado from wdom.server.handler import event_handler from wdom.testing import TestCase from wdom.window import customElements class TestWindow(TestCase): def setUp(self): super().setUp() self.doc = get_document() self.win = self.doc.defaultView self.conn_mock = MagicMock() _tornado.connections.append(self.conn_mock) def tearDown(self): _tornado.connections.remove(self.conn_mock) def test_custom_elements_registory(self): self.assertIs(self.win.customElements, customElements) def test_document(self): self.assertIs(self.win.document, self.doc) self.assertIs(self.win, self.doc.defaultView) def test_rimo_id(self): self.assertEqual(self.win.rimo_id, 'window') def test_add_eventlistener(self): mock = MagicMock(_is_coroutine=False) self.win.js_exec = MagicMock(_is_coroutine=False) self.win.addEventListener('click', mock) self.win.js_exec.assert_called_once_with('addEventListener', 'click') msg = { 'type': 'click', 'currentTarget': {'id': 'window'}, 'target': {'id': 'window'}, } e = event_handler(msg) mock.assert_called_once_with(e) def test_add_event_handler_doc(self): mock = MagicMock(_is_coroutine=False) self.win.addEventListener('click', mock) msg = { 'type': 'click', 'currentTarget': {'id': 'document'}, 'target': {'id': 'document'}, } event_handler(msg) sleep(0.1) mock.assert_not_called()
Add test for window module#!/usr/bin/env python3 # -*- coding: utf-8 -*- from time import sleep from unittest.mock import MagicMock from wdom.document import get_document from wdom.server import _tornado from wdom.server.handler import event_handler from wdom.testing import TestCase from wdom.window import customElements class TestWindow(TestCase): def setUp(self): super().setUp() self.doc = get_document() self.win = self.doc.defaultView self.conn_mock = MagicMock() _tornado.connections.append(self.conn_mock) def tearDown(self): _tornado.connections.remove(self.conn_mock) def test_custom_elements_registory(self): self.assertIs(self.win.customElements, customElements) def test_document(self): self.assertIs(self.win.document, self.doc) self.assertIs(self.win, self.doc.defaultView) def test_rimo_id(self): self.assertEqual(self.win.rimo_id, 'window') def test_add_eventlistener(self): mock = MagicMock(_is_coroutine=False) self.win.js_exec = MagicMock(_is_coroutine=False) self.win.addEventListener('click', mock) self.win.js_exec.assert_called_once_with('addEventListener', 'click') msg = { 'type': 'click', 'currentTarget': {'id': 'window'}, 'target': {'id': 'window'}, } e = event_handler(msg) mock.assert_called_once_with(e) def test_add_event_handler_doc(self): mock = MagicMock(_is_coroutine=False) self.win.addEventListener('click', mock) msg = { 'type': 'click', 'currentTarget': {'id': 'document'}, 'target': {'id': 'document'}, } event_handler(msg) sleep(0.1) mock.assert_not_called()
<commit_before><commit_msg>Add test for window module<commit_after>#!/usr/bin/env python3 # -*- coding: utf-8 -*- from time import sleep from unittest.mock import MagicMock from wdom.document import get_document from wdom.server import _tornado from wdom.server.handler import event_handler from wdom.testing import TestCase from wdom.window import customElements class TestWindow(TestCase): def setUp(self): super().setUp() self.doc = get_document() self.win = self.doc.defaultView self.conn_mock = MagicMock() _tornado.connections.append(self.conn_mock) def tearDown(self): _tornado.connections.remove(self.conn_mock) def test_custom_elements_registory(self): self.assertIs(self.win.customElements, customElements) def test_document(self): self.assertIs(self.win.document, self.doc) self.assertIs(self.win, self.doc.defaultView) def test_rimo_id(self): self.assertEqual(self.win.rimo_id, 'window') def test_add_eventlistener(self): mock = MagicMock(_is_coroutine=False) self.win.js_exec = MagicMock(_is_coroutine=False) self.win.addEventListener('click', mock) self.win.js_exec.assert_called_once_with('addEventListener', 'click') msg = { 'type': 'click', 'currentTarget': {'id': 'window'}, 'target': {'id': 'window'}, } e = event_handler(msg) mock.assert_called_once_with(e) def test_add_event_handler_doc(self): mock = MagicMock(_is_coroutine=False) self.win.addEventListener('click', mock) msg = { 'type': 'click', 'currentTarget': {'id': 'document'}, 'target': {'id': 'document'}, } event_handler(msg) sleep(0.1) mock.assert_not_called()
00b35fc6373ec290d92b4b6aa71972b1f9d94105
test/python3_regression.py
test/python3_regression.py
from __future__ import absolute_import, division, print_function import os def find_new_python3_incompatible_code(module_under_test): ''' Check source code to see if any files violate Python 3 syntax that previously did not. Example call: def test_find_python3_violations(): import xia2 import pytest import dials.test.python3_regression as py3test result = py3test.find_new_python3_incompatible_code(xia2) if result is None: pytest.skip('No python3 interpreter available') elif result: pytest.fail(result) Known violations are kept in file .known-python3-violations in the module directory. ''' # File containing list of excluded files allowed_broken_files_list = '.known-python3-violations' # Mask all *PYTHON* variables from environment - Python3 will not like cctbx python settings environ_override = { k: '' for k in list(os.environ) if 'PYTHON' in k } from dials.util.procrunner import run_process module_path = module_under_test.__path__[0] try: result = run_process(['python3', '-m', 'compileall', '-x', '\.git', '-q', module_path], environ=environ_override, print_stdout=False) except OSError as e: if e.errno == 2: return None raise if result['stderr']: return 'Python3 compilation exited with unexpected STDERR output' if not result['exitcode']: # No compilation errors return False errors = map(lambda x: x.replace(module_path + os.path.sep, '').strip(), result['stdout'].split('***')) errors = filter(lambda x: "'" in x, errors) broken_files = { error.split("'")[1]: error for error in errors } exclusion_file = os.path.join(module_path, allowed_broken_files_list) with open(exclusion_file + '.log', 'w') as fh: fh.write("\n".join(sorted(broken_files))) if os.path.exists(exclusion_file): with open(exclusion_file, 'r') as fh: excluded_files = fh.read().splitlines() broken_files = { filename: broken_files[filename] for filename in broken_files if filename not in excluded_files } if not broken_files: # No syntax violations in new files return False for filename in sorted(broken_files): print(broken_files[filename], end="\n\n") return "{} file[s] contain newly introduced Python3 syntax errors".format(len(broken_files))
Add Jenkins Python3 monster to dials, but don't enable it here yet.
Add Jenkins Python3 monster to dials, but don't enable it here yet.
Python
bsd-3-clause
dials/dials,dials/dials,dials/dials,dials/dials,dials/dials
Add Jenkins Python3 monster to dials, but don't enable it here yet.
from __future__ import absolute_import, division, print_function import os def find_new_python3_incompatible_code(module_under_test): ''' Check source code to see if any files violate Python 3 syntax that previously did not. Example call: def test_find_python3_violations(): import xia2 import pytest import dials.test.python3_regression as py3test result = py3test.find_new_python3_incompatible_code(xia2) if result is None: pytest.skip('No python3 interpreter available') elif result: pytest.fail(result) Known violations are kept in file .known-python3-violations in the module directory. ''' # File containing list of excluded files allowed_broken_files_list = '.known-python3-violations' # Mask all *PYTHON* variables from environment - Python3 will not like cctbx python settings environ_override = { k: '' for k in list(os.environ) if 'PYTHON' in k } from dials.util.procrunner import run_process module_path = module_under_test.__path__[0] try: result = run_process(['python3', '-m', 'compileall', '-x', '\.git', '-q', module_path], environ=environ_override, print_stdout=False) except OSError as e: if e.errno == 2: return None raise if result['stderr']: return 'Python3 compilation exited with unexpected STDERR output' if not result['exitcode']: # No compilation errors return False errors = map(lambda x: x.replace(module_path + os.path.sep, '').strip(), result['stdout'].split('***')) errors = filter(lambda x: "'" in x, errors) broken_files = { error.split("'")[1]: error for error in errors } exclusion_file = os.path.join(module_path, allowed_broken_files_list) with open(exclusion_file + '.log', 'w') as fh: fh.write("\n".join(sorted(broken_files))) if os.path.exists(exclusion_file): with open(exclusion_file, 'r') as fh: excluded_files = fh.read().splitlines() broken_files = { filename: broken_files[filename] for filename in broken_files if filename not in excluded_files } if not broken_files: # No syntax violations in new files return False for filename in sorted(broken_files): print(broken_files[filename], end="\n\n") return "{} file[s] contain newly introduced Python3 syntax errors".format(len(broken_files))
<commit_before><commit_msg>Add Jenkins Python3 monster to dials, but don't enable it here yet.<commit_after>
from __future__ import absolute_import, division, print_function import os def find_new_python3_incompatible_code(module_under_test): ''' Check source code to see if any files violate Python 3 syntax that previously did not. Example call: def test_find_python3_violations(): import xia2 import pytest import dials.test.python3_regression as py3test result = py3test.find_new_python3_incompatible_code(xia2) if result is None: pytest.skip('No python3 interpreter available') elif result: pytest.fail(result) Known violations are kept in file .known-python3-violations in the module directory. ''' # File containing list of excluded files allowed_broken_files_list = '.known-python3-violations' # Mask all *PYTHON* variables from environment - Python3 will not like cctbx python settings environ_override = { k: '' for k in list(os.environ) if 'PYTHON' in k } from dials.util.procrunner import run_process module_path = module_under_test.__path__[0] try: result = run_process(['python3', '-m', 'compileall', '-x', '\.git', '-q', module_path], environ=environ_override, print_stdout=False) except OSError as e: if e.errno == 2: return None raise if result['stderr']: return 'Python3 compilation exited with unexpected STDERR output' if not result['exitcode']: # No compilation errors return False errors = map(lambda x: x.replace(module_path + os.path.sep, '').strip(), result['stdout'].split('***')) errors = filter(lambda x: "'" in x, errors) broken_files = { error.split("'")[1]: error for error in errors } exclusion_file = os.path.join(module_path, allowed_broken_files_list) with open(exclusion_file + '.log', 'w') as fh: fh.write("\n".join(sorted(broken_files))) if os.path.exists(exclusion_file): with open(exclusion_file, 'r') as fh: excluded_files = fh.read().splitlines() broken_files = { filename: broken_files[filename] for filename in broken_files if filename not in excluded_files } if not broken_files: # No syntax violations in new files return False for filename in sorted(broken_files): print(broken_files[filename], end="\n\n") return "{} file[s] contain newly introduced Python3 syntax errors".format(len(broken_files))
Add Jenkins Python3 monster to dials, but don't enable it here yet.from __future__ import absolute_import, division, print_function import os def find_new_python3_incompatible_code(module_under_test): ''' Check source code to see if any files violate Python 3 syntax that previously did not. Example call: def test_find_python3_violations(): import xia2 import pytest import dials.test.python3_regression as py3test result = py3test.find_new_python3_incompatible_code(xia2) if result is None: pytest.skip('No python3 interpreter available') elif result: pytest.fail(result) Known violations are kept in file .known-python3-violations in the module directory. ''' # File containing list of excluded files allowed_broken_files_list = '.known-python3-violations' # Mask all *PYTHON* variables from environment - Python3 will not like cctbx python settings environ_override = { k: '' for k in list(os.environ) if 'PYTHON' in k } from dials.util.procrunner import run_process module_path = module_under_test.__path__[0] try: result = run_process(['python3', '-m', 'compileall', '-x', '\.git', '-q', module_path], environ=environ_override, print_stdout=False) except OSError as e: if e.errno == 2: return None raise if result['stderr']: return 'Python3 compilation exited with unexpected STDERR output' if not result['exitcode']: # No compilation errors return False errors = map(lambda x: x.replace(module_path + os.path.sep, '').strip(), result['stdout'].split('***')) errors = filter(lambda x: "'" in x, errors) broken_files = { error.split("'")[1]: error for error in errors } exclusion_file = os.path.join(module_path, allowed_broken_files_list) with open(exclusion_file + '.log', 'w') as fh: fh.write("\n".join(sorted(broken_files))) if os.path.exists(exclusion_file): with open(exclusion_file, 'r') as fh: excluded_files = fh.read().splitlines() broken_files = { filename: broken_files[filename] for filename in broken_files if filename not in excluded_files } if not broken_files: # No syntax violations in new files return False for filename in sorted(broken_files): print(broken_files[filename], end="\n\n") return "{} file[s] contain newly introduced Python3 syntax errors".format(len(broken_files))
<commit_before><commit_msg>Add Jenkins Python3 monster to dials, but don't enable it here yet.<commit_after>from __future__ import absolute_import, division, print_function import os def find_new_python3_incompatible_code(module_under_test): ''' Check source code to see if any files violate Python 3 syntax that previously did not. Example call: def test_find_python3_violations(): import xia2 import pytest import dials.test.python3_regression as py3test result = py3test.find_new_python3_incompatible_code(xia2) if result is None: pytest.skip('No python3 interpreter available') elif result: pytest.fail(result) Known violations are kept in file .known-python3-violations in the module directory. ''' # File containing list of excluded files allowed_broken_files_list = '.known-python3-violations' # Mask all *PYTHON* variables from environment - Python3 will not like cctbx python settings environ_override = { k: '' for k in list(os.environ) if 'PYTHON' in k } from dials.util.procrunner import run_process module_path = module_under_test.__path__[0] try: result = run_process(['python3', '-m', 'compileall', '-x', '\.git', '-q', module_path], environ=environ_override, print_stdout=False) except OSError as e: if e.errno == 2: return None raise if result['stderr']: return 'Python3 compilation exited with unexpected STDERR output' if not result['exitcode']: # No compilation errors return False errors = map(lambda x: x.replace(module_path + os.path.sep, '').strip(), result['stdout'].split('***')) errors = filter(lambda x: "'" in x, errors) broken_files = { error.split("'")[1]: error for error in errors } exclusion_file = os.path.join(module_path, allowed_broken_files_list) with open(exclusion_file + '.log', 'w') as fh: fh.write("\n".join(sorted(broken_files))) if os.path.exists(exclusion_file): with open(exclusion_file, 'r') as fh: excluded_files = fh.read().splitlines() broken_files = { filename: broken_files[filename] for filename in broken_files if filename not in excluded_files } if not broken_files: # No syntax violations in new files return False for filename in sorted(broken_files): print(broken_files[filename], end="\n\n") return "{} file[s] contain newly introduced Python3 syntax errors".format(len(broken_files))
49b8df3387e6839bc2e0025ec8d2454ab61654a2
dataactbroker/helpers/pandas_helper.py
dataactbroker/helpers/pandas_helper.py
import pandas as pd from dataactcore.interfaces.db import GlobalDB def check_dataframe_diff(new_data, model, id_col, sort_cols, lambda_funcs=None): """ Checks if 2 dataframes (the new data and the existing data for a model) are different. Args: new_data: dataframe containing the new data to compare model: The model to get the existing data from id_col: A string containing the name of the ID column to delete from the existing data sort_cols: An array containing the columns to sort on lambda_funcs: A dict with the column to update as the key and the lambda function to be executed as the value. As of now, it must take exactly 1 argument Returns: True if there are differences between the two dataframes, false otherwise """ if not lambda_funcs: lambda_funcs = {} new_data_copy = new_data.copy(deep=True) # Drop the created_at and updated_at columns from the new data so they don't cause differences new_data_copy.drop(['created_at', 'updated_at'], axis=1, inplace=True) sess = GlobalDB.db().session current_data = pd.read_sql_table(model.__table__.name, sess.connection(), coerce_float=False) # Drop the created_at and updated_at for the same reason as above, also drop the pk ID column for this table current_data.drop([id_col, 'created_at', 'updated_at'], axis=1, inplace=True) # pandas comparison requires everything to be in the same order new_data_copy.sort_values(by=sort_cols, inplace=True) current_data.sort_values(by=sort_cols, inplace=True) # Columns have to be in order too cols = new_data_copy.columns.tolist() cols.sort() new_data_copy = new_data_copy[cols] cols = current_data.columns.tolist() cols.sort() current_data = current_data[cols] # Reset indexes after sorting, so that they match new_data_copy.reset_index(drop=True, inplace=True) current_data.reset_index(drop=True, inplace=True) # Apply any lambda functions provided to update values if needed for col_name, lambda_func in lambda_funcs.items(): current_data[col_name] = current_data[col_name].apply(lambda x: lambda_func(x)) return not new_data_copy.equals(current_data)
Create a helper to check for differences in dataframes
Create a helper to check for differences in dataframes
Python
cc0-1.0
fedspendingtransparency/data-act-broker-backend,fedspendingtransparency/data-act-broker-backend
Create a helper to check for differences in dataframes
import pandas as pd from dataactcore.interfaces.db import GlobalDB def check_dataframe_diff(new_data, model, id_col, sort_cols, lambda_funcs=None): """ Checks if 2 dataframes (the new data and the existing data for a model) are different. Args: new_data: dataframe containing the new data to compare model: The model to get the existing data from id_col: A string containing the name of the ID column to delete from the existing data sort_cols: An array containing the columns to sort on lambda_funcs: A dict with the column to update as the key and the lambda function to be executed as the value. As of now, it must take exactly 1 argument Returns: True if there are differences between the two dataframes, false otherwise """ if not lambda_funcs: lambda_funcs = {} new_data_copy = new_data.copy(deep=True) # Drop the created_at and updated_at columns from the new data so they don't cause differences new_data_copy.drop(['created_at', 'updated_at'], axis=1, inplace=True) sess = GlobalDB.db().session current_data = pd.read_sql_table(model.__table__.name, sess.connection(), coerce_float=False) # Drop the created_at and updated_at for the same reason as above, also drop the pk ID column for this table current_data.drop([id_col, 'created_at', 'updated_at'], axis=1, inplace=True) # pandas comparison requires everything to be in the same order new_data_copy.sort_values(by=sort_cols, inplace=True) current_data.sort_values(by=sort_cols, inplace=True) # Columns have to be in order too cols = new_data_copy.columns.tolist() cols.sort() new_data_copy = new_data_copy[cols] cols = current_data.columns.tolist() cols.sort() current_data = current_data[cols] # Reset indexes after sorting, so that they match new_data_copy.reset_index(drop=True, inplace=True) current_data.reset_index(drop=True, inplace=True) # Apply any lambda functions provided to update values if needed for col_name, lambda_func in lambda_funcs.items(): current_data[col_name] = current_data[col_name].apply(lambda x: lambda_func(x)) return not new_data_copy.equals(current_data)
<commit_before><commit_msg>Create a helper to check for differences in dataframes<commit_after>
import pandas as pd from dataactcore.interfaces.db import GlobalDB def check_dataframe_diff(new_data, model, id_col, sort_cols, lambda_funcs=None): """ Checks if 2 dataframes (the new data and the existing data for a model) are different. Args: new_data: dataframe containing the new data to compare model: The model to get the existing data from id_col: A string containing the name of the ID column to delete from the existing data sort_cols: An array containing the columns to sort on lambda_funcs: A dict with the column to update as the key and the lambda function to be executed as the value. As of now, it must take exactly 1 argument Returns: True if there are differences between the two dataframes, false otherwise """ if not lambda_funcs: lambda_funcs = {} new_data_copy = new_data.copy(deep=True) # Drop the created_at and updated_at columns from the new data so they don't cause differences new_data_copy.drop(['created_at', 'updated_at'], axis=1, inplace=True) sess = GlobalDB.db().session current_data = pd.read_sql_table(model.__table__.name, sess.connection(), coerce_float=False) # Drop the created_at and updated_at for the same reason as above, also drop the pk ID column for this table current_data.drop([id_col, 'created_at', 'updated_at'], axis=1, inplace=True) # pandas comparison requires everything to be in the same order new_data_copy.sort_values(by=sort_cols, inplace=True) current_data.sort_values(by=sort_cols, inplace=True) # Columns have to be in order too cols = new_data_copy.columns.tolist() cols.sort() new_data_copy = new_data_copy[cols] cols = current_data.columns.tolist() cols.sort() current_data = current_data[cols] # Reset indexes after sorting, so that they match new_data_copy.reset_index(drop=True, inplace=True) current_data.reset_index(drop=True, inplace=True) # Apply any lambda functions provided to update values if needed for col_name, lambda_func in lambda_funcs.items(): current_data[col_name] = current_data[col_name].apply(lambda x: lambda_func(x)) return not new_data_copy.equals(current_data)
Create a helper to check for differences in dataframesimport pandas as pd from dataactcore.interfaces.db import GlobalDB def check_dataframe_diff(new_data, model, id_col, sort_cols, lambda_funcs=None): """ Checks if 2 dataframes (the new data and the existing data for a model) are different. Args: new_data: dataframe containing the new data to compare model: The model to get the existing data from id_col: A string containing the name of the ID column to delete from the existing data sort_cols: An array containing the columns to sort on lambda_funcs: A dict with the column to update as the key and the lambda function to be executed as the value. As of now, it must take exactly 1 argument Returns: True if there are differences between the two dataframes, false otherwise """ if not lambda_funcs: lambda_funcs = {} new_data_copy = new_data.copy(deep=True) # Drop the created_at and updated_at columns from the new data so they don't cause differences new_data_copy.drop(['created_at', 'updated_at'], axis=1, inplace=True) sess = GlobalDB.db().session current_data = pd.read_sql_table(model.__table__.name, sess.connection(), coerce_float=False) # Drop the created_at and updated_at for the same reason as above, also drop the pk ID column for this table current_data.drop([id_col, 'created_at', 'updated_at'], axis=1, inplace=True) # pandas comparison requires everything to be in the same order new_data_copy.sort_values(by=sort_cols, inplace=True) current_data.sort_values(by=sort_cols, inplace=True) # Columns have to be in order too cols = new_data_copy.columns.tolist() cols.sort() new_data_copy = new_data_copy[cols] cols = current_data.columns.tolist() cols.sort() current_data = current_data[cols] # Reset indexes after sorting, so that they match new_data_copy.reset_index(drop=True, inplace=True) current_data.reset_index(drop=True, inplace=True) # Apply any lambda functions provided to update values if needed for col_name, lambda_func in lambda_funcs.items(): current_data[col_name] = current_data[col_name].apply(lambda x: lambda_func(x)) return not new_data_copy.equals(current_data)
<commit_before><commit_msg>Create a helper to check for differences in dataframes<commit_after>import pandas as pd from dataactcore.interfaces.db import GlobalDB def check_dataframe_diff(new_data, model, id_col, sort_cols, lambda_funcs=None): """ Checks if 2 dataframes (the new data and the existing data for a model) are different. Args: new_data: dataframe containing the new data to compare model: The model to get the existing data from id_col: A string containing the name of the ID column to delete from the existing data sort_cols: An array containing the columns to sort on lambda_funcs: A dict with the column to update as the key and the lambda function to be executed as the value. As of now, it must take exactly 1 argument Returns: True if there are differences between the two dataframes, false otherwise """ if not lambda_funcs: lambda_funcs = {} new_data_copy = new_data.copy(deep=True) # Drop the created_at and updated_at columns from the new data so they don't cause differences new_data_copy.drop(['created_at', 'updated_at'], axis=1, inplace=True) sess = GlobalDB.db().session current_data = pd.read_sql_table(model.__table__.name, sess.connection(), coerce_float=False) # Drop the created_at and updated_at for the same reason as above, also drop the pk ID column for this table current_data.drop([id_col, 'created_at', 'updated_at'], axis=1, inplace=True) # pandas comparison requires everything to be in the same order new_data_copy.sort_values(by=sort_cols, inplace=True) current_data.sort_values(by=sort_cols, inplace=True) # Columns have to be in order too cols = new_data_copy.columns.tolist() cols.sort() new_data_copy = new_data_copy[cols] cols = current_data.columns.tolist() cols.sort() current_data = current_data[cols] # Reset indexes after sorting, so that they match new_data_copy.reset_index(drop=True, inplace=True) current_data.reset_index(drop=True, inplace=True) # Apply any lambda functions provided to update values if needed for col_name, lambda_func in lambda_funcs.items(): current_data[col_name] = current_data[col_name].apply(lambda x: lambda_func(x)) return not new_data_copy.equals(current_data)
0d1ce8eaf0329ab6efb6e1bbbb0edca2e78ce649
python/qidoc/test/test_qidoc_list.py
python/qidoc/test/test_qidoc_list.py
## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved. ## Use of this source code is governed by a BSD-style license that can be ## found in the COPYING file. def test_simple(qidoc_action, record_messages): qidoc_action.add_test_project("world") qidoc_action.add_test_project("libworld") qidoc_action("list") assert record_messages.find("\*\s+world") assert record_messages.find("\*\s+libworld")
Add a test for `qidoc list`
Add a test for `qidoc list` Change-Id: I0fcd9fa17c2d69c5d90ec61eca872ca9a23f92b3 Reviewed-on: http://gerrit.aldebaran.lan/62795 Tested-by: gerrit Reviewed-by: vbarbaresi <371b46c96c99af52f4f920034e4fcb63ece5bdb5@aldebaran-robotics.com>
Python
bsd-3-clause
aldebaran/qibuild,aldebaran/qibuild,aldebaran/qibuild,aldebaran/qibuild
Add a test for `qidoc list` Change-Id: I0fcd9fa17c2d69c5d90ec61eca872ca9a23f92b3 Reviewed-on: http://gerrit.aldebaran.lan/62795 Tested-by: gerrit Reviewed-by: vbarbaresi <371b46c96c99af52f4f920034e4fcb63ece5bdb5@aldebaran-robotics.com>
## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved. ## Use of this source code is governed by a BSD-style license that can be ## found in the COPYING file. def test_simple(qidoc_action, record_messages): qidoc_action.add_test_project("world") qidoc_action.add_test_project("libworld") qidoc_action("list") assert record_messages.find("\*\s+world") assert record_messages.find("\*\s+libworld")
<commit_before><commit_msg>Add a test for `qidoc list` Change-Id: I0fcd9fa17c2d69c5d90ec61eca872ca9a23f92b3 Reviewed-on: http://gerrit.aldebaran.lan/62795 Tested-by: gerrit Reviewed-by: vbarbaresi <371b46c96c99af52f4f920034e4fcb63ece5bdb5@aldebaran-robotics.com><commit_after>
## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved. ## Use of this source code is governed by a BSD-style license that can be ## found in the COPYING file. def test_simple(qidoc_action, record_messages): qidoc_action.add_test_project("world") qidoc_action.add_test_project("libworld") qidoc_action("list") assert record_messages.find("\*\s+world") assert record_messages.find("\*\s+libworld")
Add a test for `qidoc list` Change-Id: I0fcd9fa17c2d69c5d90ec61eca872ca9a23f92b3 Reviewed-on: http://gerrit.aldebaran.lan/62795 Tested-by: gerrit Reviewed-by: vbarbaresi <371b46c96c99af52f4f920034e4fcb63ece5bdb5@aldebaran-robotics.com>## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved. ## Use of this source code is governed by a BSD-style license that can be ## found in the COPYING file. def test_simple(qidoc_action, record_messages): qidoc_action.add_test_project("world") qidoc_action.add_test_project("libworld") qidoc_action("list") assert record_messages.find("\*\s+world") assert record_messages.find("\*\s+libworld")
<commit_before><commit_msg>Add a test for `qidoc list` Change-Id: I0fcd9fa17c2d69c5d90ec61eca872ca9a23f92b3 Reviewed-on: http://gerrit.aldebaran.lan/62795 Tested-by: gerrit Reviewed-by: vbarbaresi <371b46c96c99af52f4f920034e4fcb63ece5bdb5@aldebaran-robotics.com><commit_after>## Copyright (c) 2012-2015 Aldebaran Robotics. All rights reserved. ## Use of this source code is governed by a BSD-style license that can be ## found in the COPYING file. def test_simple(qidoc_action, record_messages): qidoc_action.add_test_project("world") qidoc_action.add_test_project("libworld") qidoc_action("list") assert record_messages.find("\*\s+world") assert record_messages.find("\*\s+libworld")
88dd94982afa76c3e450c9b186fa8054979779d3
InformationScripting/test/scripts/countChanges.py
InformationScripting/test/scripts/countChanges.py
# script $<"<changes>" | "<countChanges>" | "<heatmap>">$ # assume single input input = inputs[0] commits = input.take("commit") nodeCounts = {} for commit in commits: sha1 = commit.commit for change in input.take(sha1): key = getattr(change, sha1) nodeCounts[key] = nodeCounts.get(key, 0) + 1 result = TupleSet() for node, count in nodeCounts.items(): result.add(Tuple([NamedProperty("count", count), NamedProperty("ast", node)])) results = [result]
Add a script to count changes per node.
Add a script to count changes per node. This makes it possible to show a heatmap of recently changed nodes.
Python
bsd-3-clause
Vaishal-shah/Envision,Vaishal-shah/Envision,mgalbier/Envision,Vaishal-shah/Envision,mgalbier/Envision,Vaishal-shah/Envision,lukedirtwalker/Envision,lukedirtwalker/Envision,lukedirtwalker/Envision,mgalbier/Envision,dimitar-asenov/Envision,mgalbier/Envision,Vaishal-shah/Envision,mgalbier/Envision,lukedirtwalker/Envision,mgalbier/Envision,lukedirtwalker/Envision,dimitar-asenov/Envision,lukedirtwalker/Envision,dimitar-asenov/Envision,dimitar-asenov/Envision,dimitar-asenov/Envision,Vaishal-shah/Envision,dimitar-asenov/Envision
Add a script to count changes per node. This makes it possible to show a heatmap of recently changed nodes.
# script $<"<changes>" | "<countChanges>" | "<heatmap>">$ # assume single input input = inputs[0] commits = input.take("commit") nodeCounts = {} for commit in commits: sha1 = commit.commit for change in input.take(sha1): key = getattr(change, sha1) nodeCounts[key] = nodeCounts.get(key, 0) + 1 result = TupleSet() for node, count in nodeCounts.items(): result.add(Tuple([NamedProperty("count", count), NamedProperty("ast", node)])) results = [result]
<commit_before><commit_msg>Add a script to count changes per node. This makes it possible to show a heatmap of recently changed nodes.<commit_after>
# script $<"<changes>" | "<countChanges>" | "<heatmap>">$ # assume single input input = inputs[0] commits = input.take("commit") nodeCounts = {} for commit in commits: sha1 = commit.commit for change in input.take(sha1): key = getattr(change, sha1) nodeCounts[key] = nodeCounts.get(key, 0) + 1 result = TupleSet() for node, count in nodeCounts.items(): result.add(Tuple([NamedProperty("count", count), NamedProperty("ast", node)])) results = [result]
Add a script to count changes per node. This makes it possible to show a heatmap of recently changed nodes.# script $<"<changes>" | "<countChanges>" | "<heatmap>">$ # assume single input input = inputs[0] commits = input.take("commit") nodeCounts = {} for commit in commits: sha1 = commit.commit for change in input.take(sha1): key = getattr(change, sha1) nodeCounts[key] = nodeCounts.get(key, 0) + 1 result = TupleSet() for node, count in nodeCounts.items(): result.add(Tuple([NamedProperty("count", count), NamedProperty("ast", node)])) results = [result]
<commit_before><commit_msg>Add a script to count changes per node. This makes it possible to show a heatmap of recently changed nodes.<commit_after># script $<"<changes>" | "<countChanges>" | "<heatmap>">$ # assume single input input = inputs[0] commits = input.take("commit") nodeCounts = {} for commit in commits: sha1 = commit.commit for change in input.take(sha1): key = getattr(change, sha1) nodeCounts[key] = nodeCounts.get(key, 0) + 1 result = TupleSet() for node, count in nodeCounts.items(): result.add(Tuple([NamedProperty("count", count), NamedProperty("ast", node)])) results = [result]
52fbd3f92454c103a484c03ae6cbea63dc1329ce
cinder/tests/tempest/api/volume/test_volume_unicode.py
cinder/tests/tempest/api/volume/test_volume_unicode.py
# -*- coding: utf-8 -*- # Copyright 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.volume import base as volume_base from tempest.common.utils import data_utils from tempest.common import waiters from tempest import config CONF = config.CONF class CinderUnicodeTest(volume_base.BaseVolumeTest): @classmethod def resource_setup(cls): super(CinderUnicodeTest, cls).resource_setup() # Stick to three-byte unicode here, since four+ byte # chars require utf8mb4 database support which may not # be configured. cls.volume_name = u"CinderUnicodeTest塵㼗‽" cls.volume = cls.create_volume_with_args(name=cls.volume_name) @classmethod def create_volume_with_args(cls, **kwargs): name = kwargs['name'] or data_utils.rand_name('Volume') name_field = cls.special_fields['name_field'] kwargs[name_field] = name volume = cls.volumes_client.create_volume(**kwargs)['volume'] cls.volumes.append(volume) waiters.wait_for_volume_status(cls.volumes_client, volume['id'], 'available') return volume def test_create_delete_unicode_volume_name(self): """Create a volume with a unicode name and view it.""" result = self.volumes_client.show_volume(self.volumes[0]['id']) fetched_volume = result['volume'] self.assertEqual(fetched_volume[self.special_fields['name_field']], self.volume_name)
Add Unicode volume name test
tempest: Add Unicode volume name test Create and view a volume with Unicode characters in the name. Change-Id: I0c49814a98921d9743f74a6fc0b0b0044473c398
Python
apache-2.0
Datera/cinder,bswartz/cinder,cloudbase/cinder,bswartz/cinder,phenoxim/cinder,Nexenta/cinder,eharney/cinder,eharney/cinder,mahak/cinder,Nexenta/cinder,Hybrid-Cloud/cinder,ge0rgi/cinder,j-griffith/cinder,openstack/cinder,NetApp/cinder,phenoxim/cinder,j-griffith/cinder,Datera/cinder,openstack/cinder,Hybrid-Cloud/cinder,NetApp/cinder,mahak/cinder,cloudbase/cinder
tempest: Add Unicode volume name test Create and view a volume with Unicode characters in the name. Change-Id: I0c49814a98921d9743f74a6fc0b0b0044473c398
# -*- coding: utf-8 -*- # Copyright 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.volume import base as volume_base from tempest.common.utils import data_utils from tempest.common import waiters from tempest import config CONF = config.CONF class CinderUnicodeTest(volume_base.BaseVolumeTest): @classmethod def resource_setup(cls): super(CinderUnicodeTest, cls).resource_setup() # Stick to three-byte unicode here, since four+ byte # chars require utf8mb4 database support which may not # be configured. cls.volume_name = u"CinderUnicodeTest塵㼗‽" cls.volume = cls.create_volume_with_args(name=cls.volume_name) @classmethod def create_volume_with_args(cls, **kwargs): name = kwargs['name'] or data_utils.rand_name('Volume') name_field = cls.special_fields['name_field'] kwargs[name_field] = name volume = cls.volumes_client.create_volume(**kwargs)['volume'] cls.volumes.append(volume) waiters.wait_for_volume_status(cls.volumes_client, volume['id'], 'available') return volume def test_create_delete_unicode_volume_name(self): """Create a volume with a unicode name and view it.""" result = self.volumes_client.show_volume(self.volumes[0]['id']) fetched_volume = result['volume'] self.assertEqual(fetched_volume[self.special_fields['name_field']], self.volume_name)
<commit_before><commit_msg>tempest: Add Unicode volume name test Create and view a volume with Unicode characters in the name. Change-Id: I0c49814a98921d9743f74a6fc0b0b0044473c398<commit_after>
# -*- coding: utf-8 -*- # Copyright 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.volume import base as volume_base from tempest.common.utils import data_utils from tempest.common import waiters from tempest import config CONF = config.CONF class CinderUnicodeTest(volume_base.BaseVolumeTest): @classmethod def resource_setup(cls): super(CinderUnicodeTest, cls).resource_setup() # Stick to three-byte unicode here, since four+ byte # chars require utf8mb4 database support which may not # be configured. cls.volume_name = u"CinderUnicodeTest塵㼗‽" cls.volume = cls.create_volume_with_args(name=cls.volume_name) @classmethod def create_volume_with_args(cls, **kwargs): name = kwargs['name'] or data_utils.rand_name('Volume') name_field = cls.special_fields['name_field'] kwargs[name_field] = name volume = cls.volumes_client.create_volume(**kwargs)['volume'] cls.volumes.append(volume) waiters.wait_for_volume_status(cls.volumes_client, volume['id'], 'available') return volume def test_create_delete_unicode_volume_name(self): """Create a volume with a unicode name and view it.""" result = self.volumes_client.show_volume(self.volumes[0]['id']) fetched_volume = result['volume'] self.assertEqual(fetched_volume[self.special_fields['name_field']], self.volume_name)
tempest: Add Unicode volume name test Create and view a volume with Unicode characters in the name. Change-Id: I0c49814a98921d9743f74a6fc0b0b0044473c398# -*- coding: utf-8 -*- # Copyright 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.volume import base as volume_base from tempest.common.utils import data_utils from tempest.common import waiters from tempest import config CONF = config.CONF class CinderUnicodeTest(volume_base.BaseVolumeTest): @classmethod def resource_setup(cls): super(CinderUnicodeTest, cls).resource_setup() # Stick to three-byte unicode here, since four+ byte # chars require utf8mb4 database support which may not # be configured. cls.volume_name = u"CinderUnicodeTest塵㼗‽" cls.volume = cls.create_volume_with_args(name=cls.volume_name) @classmethod def create_volume_with_args(cls, **kwargs): name = kwargs['name'] or data_utils.rand_name('Volume') name_field = cls.special_fields['name_field'] kwargs[name_field] = name volume = cls.volumes_client.create_volume(**kwargs)['volume'] cls.volumes.append(volume) waiters.wait_for_volume_status(cls.volumes_client, volume['id'], 'available') return volume def test_create_delete_unicode_volume_name(self): """Create a volume with a unicode name and view it.""" result = self.volumes_client.show_volume(self.volumes[0]['id']) fetched_volume = result['volume'] self.assertEqual(fetched_volume[self.special_fields['name_field']], self.volume_name)
<commit_before><commit_msg>tempest: Add Unicode volume name test Create and view a volume with Unicode characters in the name. Change-Id: I0c49814a98921d9743f74a6fc0b0b0044473c398<commit_after># -*- coding: utf-8 -*- # Copyright 2016 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.api.volume import base as volume_base from tempest.common.utils import data_utils from tempest.common import waiters from tempest import config CONF = config.CONF class CinderUnicodeTest(volume_base.BaseVolumeTest): @classmethod def resource_setup(cls): super(CinderUnicodeTest, cls).resource_setup() # Stick to three-byte unicode here, since four+ byte # chars require utf8mb4 database support which may not # be configured. cls.volume_name = u"CinderUnicodeTest塵㼗‽" cls.volume = cls.create_volume_with_args(name=cls.volume_name) @classmethod def create_volume_with_args(cls, **kwargs): name = kwargs['name'] or data_utils.rand_name('Volume') name_field = cls.special_fields['name_field'] kwargs[name_field] = name volume = cls.volumes_client.create_volume(**kwargs)['volume'] cls.volumes.append(volume) waiters.wait_for_volume_status(cls.volumes_client, volume['id'], 'available') return volume def test_create_delete_unicode_volume_name(self): """Create a volume with a unicode name and view it.""" result = self.volumes_client.show_volume(self.volumes[0]['id']) fetched_volume = result['volume'] self.assertEqual(fetched_volume[self.special_fields['name_field']], self.volume_name)
1263bf8dd374afe04735948e767bb31813d99077
test/on_yubikey/cli_piv/test_read_write_object.py
test/on_yubikey/cli_piv/test_read_write_object.py
import os import unittest from ..framework import cli_test_suite from ykman.piv import OBJ from .util import DEFAULT_MANAGEMENT_KEY @cli_test_suite def additional_tests(ykman_cli): class ReadWriteObject(unittest.TestCase): def setUp(cls): ykman_cli('piv', 'reset', '-f') pass @classmethod def tearDownClass(cls): ykman_cli('piv', 'reset', '-f') pass def test_read_write_read_is_noop(self): data = os.urandom(32) ykman_cli('piv', 'write-object', hex(OBJ.AUTHENTICATION), '-', '-m', DEFAULT_MANAGEMENT_KEY, input=data) output1 = ykman_cli.with_bytes_output('piv', 'read-object', hex(OBJ.AUTHENTICATION)) self.assertEqual(output1, data) ykman_cli('piv', 'write-object', hex(OBJ.AUTHENTICATION), '-', '-m', DEFAULT_MANAGEMENT_KEY, input=output1) output2 = ykman_cli.with_bytes_output('piv', 'read-object', hex(OBJ.AUTHENTICATION)) self.assertEqual(output2, data) return [ReadWriteObject]
Add test of PIV read/write object cycle
Add test of PIV read/write object cycle
Python
bsd-2-clause
Yubico/yubikey-manager,Yubico/yubikey-manager
Add test of PIV read/write object cycle
import os import unittest from ..framework import cli_test_suite from ykman.piv import OBJ from .util import DEFAULT_MANAGEMENT_KEY @cli_test_suite def additional_tests(ykman_cli): class ReadWriteObject(unittest.TestCase): def setUp(cls): ykman_cli('piv', 'reset', '-f') pass @classmethod def tearDownClass(cls): ykman_cli('piv', 'reset', '-f') pass def test_read_write_read_is_noop(self): data = os.urandom(32) ykman_cli('piv', 'write-object', hex(OBJ.AUTHENTICATION), '-', '-m', DEFAULT_MANAGEMENT_KEY, input=data) output1 = ykman_cli.with_bytes_output('piv', 'read-object', hex(OBJ.AUTHENTICATION)) self.assertEqual(output1, data) ykman_cli('piv', 'write-object', hex(OBJ.AUTHENTICATION), '-', '-m', DEFAULT_MANAGEMENT_KEY, input=output1) output2 = ykman_cli.with_bytes_output('piv', 'read-object', hex(OBJ.AUTHENTICATION)) self.assertEqual(output2, data) return [ReadWriteObject]
<commit_before><commit_msg>Add test of PIV read/write object cycle<commit_after>
import os import unittest from ..framework import cli_test_suite from ykman.piv import OBJ from .util import DEFAULT_MANAGEMENT_KEY @cli_test_suite def additional_tests(ykman_cli): class ReadWriteObject(unittest.TestCase): def setUp(cls): ykman_cli('piv', 'reset', '-f') pass @classmethod def tearDownClass(cls): ykman_cli('piv', 'reset', '-f') pass def test_read_write_read_is_noop(self): data = os.urandom(32) ykman_cli('piv', 'write-object', hex(OBJ.AUTHENTICATION), '-', '-m', DEFAULT_MANAGEMENT_KEY, input=data) output1 = ykman_cli.with_bytes_output('piv', 'read-object', hex(OBJ.AUTHENTICATION)) self.assertEqual(output1, data) ykman_cli('piv', 'write-object', hex(OBJ.AUTHENTICATION), '-', '-m', DEFAULT_MANAGEMENT_KEY, input=output1) output2 = ykman_cli.with_bytes_output('piv', 'read-object', hex(OBJ.AUTHENTICATION)) self.assertEqual(output2, data) return [ReadWriteObject]
Add test of PIV read/write object cycleimport os import unittest from ..framework import cli_test_suite from ykman.piv import OBJ from .util import DEFAULT_MANAGEMENT_KEY @cli_test_suite def additional_tests(ykman_cli): class ReadWriteObject(unittest.TestCase): def setUp(cls): ykman_cli('piv', 'reset', '-f') pass @classmethod def tearDownClass(cls): ykman_cli('piv', 'reset', '-f') pass def test_read_write_read_is_noop(self): data = os.urandom(32) ykman_cli('piv', 'write-object', hex(OBJ.AUTHENTICATION), '-', '-m', DEFAULT_MANAGEMENT_KEY, input=data) output1 = ykman_cli.with_bytes_output('piv', 'read-object', hex(OBJ.AUTHENTICATION)) self.assertEqual(output1, data) ykman_cli('piv', 'write-object', hex(OBJ.AUTHENTICATION), '-', '-m', DEFAULT_MANAGEMENT_KEY, input=output1) output2 = ykman_cli.with_bytes_output('piv', 'read-object', hex(OBJ.AUTHENTICATION)) self.assertEqual(output2, data) return [ReadWriteObject]
<commit_before><commit_msg>Add test of PIV read/write object cycle<commit_after>import os import unittest from ..framework import cli_test_suite from ykman.piv import OBJ from .util import DEFAULT_MANAGEMENT_KEY @cli_test_suite def additional_tests(ykman_cli): class ReadWriteObject(unittest.TestCase): def setUp(cls): ykman_cli('piv', 'reset', '-f') pass @classmethod def tearDownClass(cls): ykman_cli('piv', 'reset', '-f') pass def test_read_write_read_is_noop(self): data = os.urandom(32) ykman_cli('piv', 'write-object', hex(OBJ.AUTHENTICATION), '-', '-m', DEFAULT_MANAGEMENT_KEY, input=data) output1 = ykman_cli.with_bytes_output('piv', 'read-object', hex(OBJ.AUTHENTICATION)) self.assertEqual(output1, data) ykman_cli('piv', 'write-object', hex(OBJ.AUTHENTICATION), '-', '-m', DEFAULT_MANAGEMENT_KEY, input=output1) output2 = ykman_cli.with_bytes_output('piv', 'read-object', hex(OBJ.AUTHENTICATION)) self.assertEqual(output2, data) return [ReadWriteObject]
e419ae979fc59af758c5fe93bde7add47dcf21df
UM/Settings/ProfileWriter.py
UM/Settings/ProfileWriter.py
# Copyright (c) 2015 Ultimaker B.V. # Uranium is released under the terms of the AGPLv3 or higher. from UM.PluginObject import PluginObject ## Base class for profile writer plugins. # # This class defines a write() function to write profiles to files with. class ProfileWriter(PluginObject): ## Initialises the profile writer. # # This currently doesn't do anything since the writer is basically static. def __init__(self): super().__init__() ## Writes a profile to the specified stream. # # For example, the stream could be a file stream. The profile writer then # writes its own file format to the specified file. # # \param stream \type{IOStream} The stream to output to. # \param profile \type{Profile} The profile to write to the stream. def write(self, stream, node): raise NotImplementedError("Profile writer plugin was not correctly implemented. No write was specified.")
Add base class for profile writer plugins
Add base class for profile writer plugins This class defines the write() function to write profiles with, that all profile writer plugins should implement. Contributes to issue CURA-34.
Python
agpl-3.0
onitake/Uranium,onitake/Uranium
Add base class for profile writer plugins This class defines the write() function to write profiles with, that all profile writer plugins should implement. Contributes to issue CURA-34.
# Copyright (c) 2015 Ultimaker B.V. # Uranium is released under the terms of the AGPLv3 or higher. from UM.PluginObject import PluginObject ## Base class for profile writer plugins. # # This class defines a write() function to write profiles to files with. class ProfileWriter(PluginObject): ## Initialises the profile writer. # # This currently doesn't do anything since the writer is basically static. def __init__(self): super().__init__() ## Writes a profile to the specified stream. # # For example, the stream could be a file stream. The profile writer then # writes its own file format to the specified file. # # \param stream \type{IOStream} The stream to output to. # \param profile \type{Profile} The profile to write to the stream. def write(self, stream, node): raise NotImplementedError("Profile writer plugin was not correctly implemented. No write was specified.")
<commit_before><commit_msg>Add base class for profile writer plugins This class defines the write() function to write profiles with, that all profile writer plugins should implement. Contributes to issue CURA-34.<commit_after>
# Copyright (c) 2015 Ultimaker B.V. # Uranium is released under the terms of the AGPLv3 or higher. from UM.PluginObject import PluginObject ## Base class for profile writer plugins. # # This class defines a write() function to write profiles to files with. class ProfileWriter(PluginObject): ## Initialises the profile writer. # # This currently doesn't do anything since the writer is basically static. def __init__(self): super().__init__() ## Writes a profile to the specified stream. # # For example, the stream could be a file stream. The profile writer then # writes its own file format to the specified file. # # \param stream \type{IOStream} The stream to output to. # \param profile \type{Profile} The profile to write to the stream. def write(self, stream, node): raise NotImplementedError("Profile writer plugin was not correctly implemented. No write was specified.")
Add base class for profile writer plugins This class defines the write() function to write profiles with, that all profile writer plugins should implement. Contributes to issue CURA-34.# Copyright (c) 2015 Ultimaker B.V. # Uranium is released under the terms of the AGPLv3 or higher. from UM.PluginObject import PluginObject ## Base class for profile writer plugins. # # This class defines a write() function to write profiles to files with. class ProfileWriter(PluginObject): ## Initialises the profile writer. # # This currently doesn't do anything since the writer is basically static. def __init__(self): super().__init__() ## Writes a profile to the specified stream. # # For example, the stream could be a file stream. The profile writer then # writes its own file format to the specified file. # # \param stream \type{IOStream} The stream to output to. # \param profile \type{Profile} The profile to write to the stream. def write(self, stream, node): raise NotImplementedError("Profile writer plugin was not correctly implemented. No write was specified.")
<commit_before><commit_msg>Add base class for profile writer plugins This class defines the write() function to write profiles with, that all profile writer plugins should implement. Contributes to issue CURA-34.<commit_after># Copyright (c) 2015 Ultimaker B.V. # Uranium is released under the terms of the AGPLv3 or higher. from UM.PluginObject import PluginObject ## Base class for profile writer plugins. # # This class defines a write() function to write profiles to files with. class ProfileWriter(PluginObject): ## Initialises the profile writer. # # This currently doesn't do anything since the writer is basically static. def __init__(self): super().__init__() ## Writes a profile to the specified stream. # # For example, the stream could be a file stream. The profile writer then # writes its own file format to the specified file. # # \param stream \type{IOStream} The stream to output to. # \param profile \type{Profile} The profile to write to the stream. def write(self, stream, node): raise NotImplementedError("Profile writer plugin was not correctly implemented. No write was specified.")
a61d86d235d3968898911596df594f93fe96d7e5
Sketches/MPS/Synchronous.py
Sketches/MPS/Synchronous.py
#!/usr/bin/python import Axon import time from Kamaelia.Chassis.Pipeline import Pipeline class MySender(Axon.Component.component): def main(self): while 1: yield 1 sent = False print "-----------------------------------------------------------------------" while not sent: try: self.send(time.time(), "outbox") sent = True except Axon.AxonExceptions.noSpaceInBox: print " Gah1" # self.pause() yield 1 else: print "NO Gah1" sent = False while not sent: try: self.send(time.time(), "outbox") sent = True except Axon.AxonExceptions.noSpaceInBox: print " Gah2" # self.pause() yield 1 else: print "NO Gah2" sent = False while not sent: try: self.send(time.time(), "outbox") sent = True except Axon.AxonExceptions.noSpaceInBox: print " Gah3" # self.pause() yield 1 else: print "NO Gah3" class MyReceiver(Axon.Component.component): def main(self): self.setInboxSize("inbox", 1) t = time.time() while 1: while time.time()-t <0.1: yield 1 t = time.time() if self.dataReady("inbox"): print " ",self.recv() yield 1 Pipeline( MySender(), MyReceiver(), ).run()
Test suite used for testing the Component level API for fixed box sizes
Test suite used for testing the Component level API for fixed box sizes Michael
Python
apache-2.0
sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia,sparkslabs/kamaelia
Test suite used for testing the Component level API for fixed box sizes Michael
#!/usr/bin/python import Axon import time from Kamaelia.Chassis.Pipeline import Pipeline class MySender(Axon.Component.component): def main(self): while 1: yield 1 sent = False print "-----------------------------------------------------------------------" while not sent: try: self.send(time.time(), "outbox") sent = True except Axon.AxonExceptions.noSpaceInBox: print " Gah1" # self.pause() yield 1 else: print "NO Gah1" sent = False while not sent: try: self.send(time.time(), "outbox") sent = True except Axon.AxonExceptions.noSpaceInBox: print " Gah2" # self.pause() yield 1 else: print "NO Gah2" sent = False while not sent: try: self.send(time.time(), "outbox") sent = True except Axon.AxonExceptions.noSpaceInBox: print " Gah3" # self.pause() yield 1 else: print "NO Gah3" class MyReceiver(Axon.Component.component): def main(self): self.setInboxSize("inbox", 1) t = time.time() while 1: while time.time()-t <0.1: yield 1 t = time.time() if self.dataReady("inbox"): print " ",self.recv() yield 1 Pipeline( MySender(), MyReceiver(), ).run()
<commit_before><commit_msg>Test suite used for testing the Component level API for fixed box sizes Michael<commit_after>
#!/usr/bin/python import Axon import time from Kamaelia.Chassis.Pipeline import Pipeline class MySender(Axon.Component.component): def main(self): while 1: yield 1 sent = False print "-----------------------------------------------------------------------" while not sent: try: self.send(time.time(), "outbox") sent = True except Axon.AxonExceptions.noSpaceInBox: print " Gah1" # self.pause() yield 1 else: print "NO Gah1" sent = False while not sent: try: self.send(time.time(), "outbox") sent = True except Axon.AxonExceptions.noSpaceInBox: print " Gah2" # self.pause() yield 1 else: print "NO Gah2" sent = False while not sent: try: self.send(time.time(), "outbox") sent = True except Axon.AxonExceptions.noSpaceInBox: print " Gah3" # self.pause() yield 1 else: print "NO Gah3" class MyReceiver(Axon.Component.component): def main(self): self.setInboxSize("inbox", 1) t = time.time() while 1: while time.time()-t <0.1: yield 1 t = time.time() if self.dataReady("inbox"): print " ",self.recv() yield 1 Pipeline( MySender(), MyReceiver(), ).run()
Test suite used for testing the Component level API for fixed box sizes Michael#!/usr/bin/python import Axon import time from Kamaelia.Chassis.Pipeline import Pipeline class MySender(Axon.Component.component): def main(self): while 1: yield 1 sent = False print "-----------------------------------------------------------------------" while not sent: try: self.send(time.time(), "outbox") sent = True except Axon.AxonExceptions.noSpaceInBox: print " Gah1" # self.pause() yield 1 else: print "NO Gah1" sent = False while not sent: try: self.send(time.time(), "outbox") sent = True except Axon.AxonExceptions.noSpaceInBox: print " Gah2" # self.pause() yield 1 else: print "NO Gah2" sent = False while not sent: try: self.send(time.time(), "outbox") sent = True except Axon.AxonExceptions.noSpaceInBox: print " Gah3" # self.pause() yield 1 else: print "NO Gah3" class MyReceiver(Axon.Component.component): def main(self): self.setInboxSize("inbox", 1) t = time.time() while 1: while time.time()-t <0.1: yield 1 t = time.time() if self.dataReady("inbox"): print " ",self.recv() yield 1 Pipeline( MySender(), MyReceiver(), ).run()
<commit_before><commit_msg>Test suite used for testing the Component level API for fixed box sizes Michael<commit_after>#!/usr/bin/python import Axon import time from Kamaelia.Chassis.Pipeline import Pipeline class MySender(Axon.Component.component): def main(self): while 1: yield 1 sent = False print "-----------------------------------------------------------------------" while not sent: try: self.send(time.time(), "outbox") sent = True except Axon.AxonExceptions.noSpaceInBox: print " Gah1" # self.pause() yield 1 else: print "NO Gah1" sent = False while not sent: try: self.send(time.time(), "outbox") sent = True except Axon.AxonExceptions.noSpaceInBox: print " Gah2" # self.pause() yield 1 else: print "NO Gah2" sent = False while not sent: try: self.send(time.time(), "outbox") sent = True except Axon.AxonExceptions.noSpaceInBox: print " Gah3" # self.pause() yield 1 else: print "NO Gah3" class MyReceiver(Axon.Component.component): def main(self): self.setInboxSize("inbox", 1) t = time.time() while 1: while time.time()-t <0.1: yield 1 t = time.time() if self.dataReady("inbox"): print " ",self.recv() yield 1 Pipeline( MySender(), MyReceiver(), ).run()
1663cb8557de85b1f3ccf5822fa01758e679ccd7
TestScript/multi_client.py
TestScript/multi_client.py
# -*- coding: utf-8 -*- __author__ = 'sm9' import asyncore, socket import string, random import struct, time HOST = '192.168.0.11' PORT = 9001 PKT_CS_LOGIN = 1 PKT_SC_LOGIN = 2 PKT_CS_CHAT = 3 PKT_SC_CHAT = 4 def str_generator(size=128, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for x in range(size)) class PacketDecoder(object): def __init__(self): self._stream = '' def feed(self, buff): self._stream += buff def decode(self): while len(self._stream) > 4: pkt_size = struct.unpack('h', self._stream[:2])[0] if pkt_size > len(self._stream): break packet = self._stream[:pkt_size] yield packet self._stream = self._stream[pkt_size:] class Client(asyncore.dispatcher): def __init__(self, pid): asyncore.dispatcher.__init__(self) self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.connect( (HOST, PORT) ) self.decoder = PacketDecoder() self.islogin = False self.pid = pid self.buffer = struct.pack('hhi', 8, PKT_CS_LOGIN, pid) def handle_connect(self): pass def handle_close(self): self.close() def handle_read(self): data = self.recv(8192) if not data: #print 'NOT DATA' self.close() else: self.decoder.feed(data) for packet in self.decoder.decode(): self.process(packet) if self.islogin: self.buffer = struct.pack('<hhi1024s', 1032, PKT_CS_CHAT, self.pid, str_generator(1000)) def writable(self): return (len(self.buffer) > 0) def handle_write(self): sent = self.send(self.buffer) self.buffer = self.buffer[sent:] def process(self, packet): pkt_len, pkt_type = struct.unpack('hh', packet[:4]) if pkt_type == PKT_SC_LOGIN: self.islogin = True playerId, posX, posY, posZ, playerName = struct.unpack('<iddd30s', packet[4:]) print playerId, posX, posY, posZ, playerName elif pkt_type == PKT_SC_CHAT: playerId, playerName, chat = struct.unpack('<i30s1024s', packet[4:]) #print playerId, playerName, chat else: print 'PKT_TYPE ERROR' for pid in range(1000, 1100): client = Client(pid) time.sleep(0.02) asyncore.loop()
Test script for the stress test
*added: Test script for the stress test
Python
mit
zrma/EasyGameServer,zeliard/EasyGameServer,zeliard/EasyGameServer,zrma/EasyGameServer,Lt-Red/EasyGameServer,Lt-Red/EasyGameServer
*added: Test script for the stress test
# -*- coding: utf-8 -*- __author__ = 'sm9' import asyncore, socket import string, random import struct, time HOST = '192.168.0.11' PORT = 9001 PKT_CS_LOGIN = 1 PKT_SC_LOGIN = 2 PKT_CS_CHAT = 3 PKT_SC_CHAT = 4 def str_generator(size=128, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for x in range(size)) class PacketDecoder(object): def __init__(self): self._stream = '' def feed(self, buff): self._stream += buff def decode(self): while len(self._stream) > 4: pkt_size = struct.unpack('h', self._stream[:2])[0] if pkt_size > len(self._stream): break packet = self._stream[:pkt_size] yield packet self._stream = self._stream[pkt_size:] class Client(asyncore.dispatcher): def __init__(self, pid): asyncore.dispatcher.__init__(self) self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.connect( (HOST, PORT) ) self.decoder = PacketDecoder() self.islogin = False self.pid = pid self.buffer = struct.pack('hhi', 8, PKT_CS_LOGIN, pid) def handle_connect(self): pass def handle_close(self): self.close() def handle_read(self): data = self.recv(8192) if not data: #print 'NOT DATA' self.close() else: self.decoder.feed(data) for packet in self.decoder.decode(): self.process(packet) if self.islogin: self.buffer = struct.pack('<hhi1024s', 1032, PKT_CS_CHAT, self.pid, str_generator(1000)) def writable(self): return (len(self.buffer) > 0) def handle_write(self): sent = self.send(self.buffer) self.buffer = self.buffer[sent:] def process(self, packet): pkt_len, pkt_type = struct.unpack('hh', packet[:4]) if pkt_type == PKT_SC_LOGIN: self.islogin = True playerId, posX, posY, posZ, playerName = struct.unpack('<iddd30s', packet[4:]) print playerId, posX, posY, posZ, playerName elif pkt_type == PKT_SC_CHAT: playerId, playerName, chat = struct.unpack('<i30s1024s', packet[4:]) #print playerId, playerName, chat else: print 'PKT_TYPE ERROR' for pid in range(1000, 1100): client = Client(pid) time.sleep(0.02) asyncore.loop()
<commit_before><commit_msg>*added: Test script for the stress test<commit_after>
# -*- coding: utf-8 -*- __author__ = 'sm9' import asyncore, socket import string, random import struct, time HOST = '192.168.0.11' PORT = 9001 PKT_CS_LOGIN = 1 PKT_SC_LOGIN = 2 PKT_CS_CHAT = 3 PKT_SC_CHAT = 4 def str_generator(size=128, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for x in range(size)) class PacketDecoder(object): def __init__(self): self._stream = '' def feed(self, buff): self._stream += buff def decode(self): while len(self._stream) > 4: pkt_size = struct.unpack('h', self._stream[:2])[0] if pkt_size > len(self._stream): break packet = self._stream[:pkt_size] yield packet self._stream = self._stream[pkt_size:] class Client(asyncore.dispatcher): def __init__(self, pid): asyncore.dispatcher.__init__(self) self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.connect( (HOST, PORT) ) self.decoder = PacketDecoder() self.islogin = False self.pid = pid self.buffer = struct.pack('hhi', 8, PKT_CS_LOGIN, pid) def handle_connect(self): pass def handle_close(self): self.close() def handle_read(self): data = self.recv(8192) if not data: #print 'NOT DATA' self.close() else: self.decoder.feed(data) for packet in self.decoder.decode(): self.process(packet) if self.islogin: self.buffer = struct.pack('<hhi1024s', 1032, PKT_CS_CHAT, self.pid, str_generator(1000)) def writable(self): return (len(self.buffer) > 0) def handle_write(self): sent = self.send(self.buffer) self.buffer = self.buffer[sent:] def process(self, packet): pkt_len, pkt_type = struct.unpack('hh', packet[:4]) if pkt_type == PKT_SC_LOGIN: self.islogin = True playerId, posX, posY, posZ, playerName = struct.unpack('<iddd30s', packet[4:]) print playerId, posX, posY, posZ, playerName elif pkt_type == PKT_SC_CHAT: playerId, playerName, chat = struct.unpack('<i30s1024s', packet[4:]) #print playerId, playerName, chat else: print 'PKT_TYPE ERROR' for pid in range(1000, 1100): client = Client(pid) time.sleep(0.02) asyncore.loop()
*added: Test script for the stress test# -*- coding: utf-8 -*- __author__ = 'sm9' import asyncore, socket import string, random import struct, time HOST = '192.168.0.11' PORT = 9001 PKT_CS_LOGIN = 1 PKT_SC_LOGIN = 2 PKT_CS_CHAT = 3 PKT_SC_CHAT = 4 def str_generator(size=128, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for x in range(size)) class PacketDecoder(object): def __init__(self): self._stream = '' def feed(self, buff): self._stream += buff def decode(self): while len(self._stream) > 4: pkt_size = struct.unpack('h', self._stream[:2])[0] if pkt_size > len(self._stream): break packet = self._stream[:pkt_size] yield packet self._stream = self._stream[pkt_size:] class Client(asyncore.dispatcher): def __init__(self, pid): asyncore.dispatcher.__init__(self) self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.connect( (HOST, PORT) ) self.decoder = PacketDecoder() self.islogin = False self.pid = pid self.buffer = struct.pack('hhi', 8, PKT_CS_LOGIN, pid) def handle_connect(self): pass def handle_close(self): self.close() def handle_read(self): data = self.recv(8192) if not data: #print 'NOT DATA' self.close() else: self.decoder.feed(data) for packet in self.decoder.decode(): self.process(packet) if self.islogin: self.buffer = struct.pack('<hhi1024s', 1032, PKT_CS_CHAT, self.pid, str_generator(1000)) def writable(self): return (len(self.buffer) > 0) def handle_write(self): sent = self.send(self.buffer) self.buffer = self.buffer[sent:] def process(self, packet): pkt_len, pkt_type = struct.unpack('hh', packet[:4]) if pkt_type == PKT_SC_LOGIN: self.islogin = True playerId, posX, posY, posZ, playerName = struct.unpack('<iddd30s', packet[4:]) print playerId, posX, posY, posZ, playerName elif pkt_type == PKT_SC_CHAT: playerId, playerName, chat = struct.unpack('<i30s1024s', packet[4:]) #print playerId, playerName, chat else: print 'PKT_TYPE ERROR' for pid in range(1000, 1100): client = Client(pid) time.sleep(0.02) asyncore.loop()
<commit_before><commit_msg>*added: Test script for the stress test<commit_after># -*- coding: utf-8 -*- __author__ = 'sm9' import asyncore, socket import string, random import struct, time HOST = '192.168.0.11' PORT = 9001 PKT_CS_LOGIN = 1 PKT_SC_LOGIN = 2 PKT_CS_CHAT = 3 PKT_SC_CHAT = 4 def str_generator(size=128, chars=string.ascii_uppercase + string.digits): return ''.join(random.choice(chars) for x in range(size)) class PacketDecoder(object): def __init__(self): self._stream = '' def feed(self, buff): self._stream += buff def decode(self): while len(self._stream) > 4: pkt_size = struct.unpack('h', self._stream[:2])[0] if pkt_size > len(self._stream): break packet = self._stream[:pkt_size] yield packet self._stream = self._stream[pkt_size:] class Client(asyncore.dispatcher): def __init__(self, pid): asyncore.dispatcher.__init__(self) self.create_socket(socket.AF_INET, socket.SOCK_STREAM) self.connect( (HOST, PORT) ) self.decoder = PacketDecoder() self.islogin = False self.pid = pid self.buffer = struct.pack('hhi', 8, PKT_CS_LOGIN, pid) def handle_connect(self): pass def handle_close(self): self.close() def handle_read(self): data = self.recv(8192) if not data: #print 'NOT DATA' self.close() else: self.decoder.feed(data) for packet in self.decoder.decode(): self.process(packet) if self.islogin: self.buffer = struct.pack('<hhi1024s', 1032, PKT_CS_CHAT, self.pid, str_generator(1000)) def writable(self): return (len(self.buffer) > 0) def handle_write(self): sent = self.send(self.buffer) self.buffer = self.buffer[sent:] def process(self, packet): pkt_len, pkt_type = struct.unpack('hh', packet[:4]) if pkt_type == PKT_SC_LOGIN: self.islogin = True playerId, posX, posY, posZ, playerName = struct.unpack('<iddd30s', packet[4:]) print playerId, posX, posY, posZ, playerName elif pkt_type == PKT_SC_CHAT: playerId, playerName, chat = struct.unpack('<i30s1024s', packet[4:]) #print playerId, playerName, chat else: print 'PKT_TYPE ERROR' for pid in range(1000, 1100): client = Client(pid) time.sleep(0.02) asyncore.loop()
3a3f529498522dada5e6af6765cd537f47d368d8
Tools/compiler/stacktest.py
Tools/compiler/stacktest.py
import compiler import dis import types def extract_code_objects(co): l = [co] for const in co.co_consts: if type(const) == types.CodeType: l.append(const) return l def compare(a, b): if not (a.co_name == "?" or a.co_name.startswith('<lambda')): assert a.co_name == b.co_name, (a, b) if a.co_stacksize != b.co_stacksize: print "stack mismatch %s: %d vs. %d" % (a.co_name, a.co_stacksize, b.co_stacksize) if a.co_stacksize > b.co_stacksize: print "good code" dis.dis(a) print "bad code" dis.dis(b) assert 0 def main(files): for file in files: print file buf = open(file).read() try: co1 = compile(buf, file, "exec") except SyntaxError: print "skipped" continue co2 = compiler.compile(buf, file, "exec") co1l = extract_code_objects(co1) co2l = extract_code_objects(co2) for a, b in zip(co1l, co2l): compare(a, b) if __name__ == "__main__": import sys main(sys.argv[1:])
Test utility to look for bad stacksize calculations.
Test utility to look for bad stacksize calculations.
Python
mit
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
Test utility to look for bad stacksize calculations.
import compiler import dis import types def extract_code_objects(co): l = [co] for const in co.co_consts: if type(const) == types.CodeType: l.append(const) return l def compare(a, b): if not (a.co_name == "?" or a.co_name.startswith('<lambda')): assert a.co_name == b.co_name, (a, b) if a.co_stacksize != b.co_stacksize: print "stack mismatch %s: %d vs. %d" % (a.co_name, a.co_stacksize, b.co_stacksize) if a.co_stacksize > b.co_stacksize: print "good code" dis.dis(a) print "bad code" dis.dis(b) assert 0 def main(files): for file in files: print file buf = open(file).read() try: co1 = compile(buf, file, "exec") except SyntaxError: print "skipped" continue co2 = compiler.compile(buf, file, "exec") co1l = extract_code_objects(co1) co2l = extract_code_objects(co2) for a, b in zip(co1l, co2l): compare(a, b) if __name__ == "__main__": import sys main(sys.argv[1:])
<commit_before><commit_msg>Test utility to look for bad stacksize calculations.<commit_after>
import compiler import dis import types def extract_code_objects(co): l = [co] for const in co.co_consts: if type(const) == types.CodeType: l.append(const) return l def compare(a, b): if not (a.co_name == "?" or a.co_name.startswith('<lambda')): assert a.co_name == b.co_name, (a, b) if a.co_stacksize != b.co_stacksize: print "stack mismatch %s: %d vs. %d" % (a.co_name, a.co_stacksize, b.co_stacksize) if a.co_stacksize > b.co_stacksize: print "good code" dis.dis(a) print "bad code" dis.dis(b) assert 0 def main(files): for file in files: print file buf = open(file).read() try: co1 = compile(buf, file, "exec") except SyntaxError: print "skipped" continue co2 = compiler.compile(buf, file, "exec") co1l = extract_code_objects(co1) co2l = extract_code_objects(co2) for a, b in zip(co1l, co2l): compare(a, b) if __name__ == "__main__": import sys main(sys.argv[1:])
Test utility to look for bad stacksize calculations.import compiler import dis import types def extract_code_objects(co): l = [co] for const in co.co_consts: if type(const) == types.CodeType: l.append(const) return l def compare(a, b): if not (a.co_name == "?" or a.co_name.startswith('<lambda')): assert a.co_name == b.co_name, (a, b) if a.co_stacksize != b.co_stacksize: print "stack mismatch %s: %d vs. %d" % (a.co_name, a.co_stacksize, b.co_stacksize) if a.co_stacksize > b.co_stacksize: print "good code" dis.dis(a) print "bad code" dis.dis(b) assert 0 def main(files): for file in files: print file buf = open(file).read() try: co1 = compile(buf, file, "exec") except SyntaxError: print "skipped" continue co2 = compiler.compile(buf, file, "exec") co1l = extract_code_objects(co1) co2l = extract_code_objects(co2) for a, b in zip(co1l, co2l): compare(a, b) if __name__ == "__main__": import sys main(sys.argv[1:])
<commit_before><commit_msg>Test utility to look for bad stacksize calculations.<commit_after>import compiler import dis import types def extract_code_objects(co): l = [co] for const in co.co_consts: if type(const) == types.CodeType: l.append(const) return l def compare(a, b): if not (a.co_name == "?" or a.co_name.startswith('<lambda')): assert a.co_name == b.co_name, (a, b) if a.co_stacksize != b.co_stacksize: print "stack mismatch %s: %d vs. %d" % (a.co_name, a.co_stacksize, b.co_stacksize) if a.co_stacksize > b.co_stacksize: print "good code" dis.dis(a) print "bad code" dis.dis(b) assert 0 def main(files): for file in files: print file buf = open(file).read() try: co1 = compile(buf, file, "exec") except SyntaxError: print "skipped" continue co2 = compiler.compile(buf, file, "exec") co1l = extract_code_objects(co1) co2l = extract_code_objects(co2) for a, b in zip(co1l, co2l): compare(a, b) if __name__ == "__main__": import sys main(sys.argv[1:])
63709e388ed86892e9771b86daad4a9ec3c0bd44
ibei/__init__.py
ibei/__init__.py
# -*- coding: utf-8 -*- """ Base Library (:mod:`ibei`) ========================== .. currentmodule:: ibei """ from .version import __version__ from .uibei import uibei from .sqsolarcell import SQSolarcell from .devossolarcell import DeVosSolarcell
# -*- coding: utf-8 -*- """ Base Library (:mod:`ibei`) ========================== .. currentmodule:: ibei """ from .version import __version__ from .uibei import uibei
Remove import of specific models
Remove import of specific models
Python
mit
jrsmith3/ibei
# -*- coding: utf-8 -*- """ Base Library (:mod:`ibei`) ========================== .. currentmodule:: ibei """ from .version import __version__ from .uibei import uibei from .sqsolarcell import SQSolarcell from .devossolarcell import DeVosSolarcell Remove import of specific models
# -*- coding: utf-8 -*- """ Base Library (:mod:`ibei`) ========================== .. currentmodule:: ibei """ from .version import __version__ from .uibei import uibei
<commit_before># -*- coding: utf-8 -*- """ Base Library (:mod:`ibei`) ========================== .. currentmodule:: ibei """ from .version import __version__ from .uibei import uibei from .sqsolarcell import SQSolarcell from .devossolarcell import DeVosSolarcell <commit_msg>Remove import of specific models<commit_after>
# -*- coding: utf-8 -*- """ Base Library (:mod:`ibei`) ========================== .. currentmodule:: ibei """ from .version import __version__ from .uibei import uibei
# -*- coding: utf-8 -*- """ Base Library (:mod:`ibei`) ========================== .. currentmodule:: ibei """ from .version import __version__ from .uibei import uibei from .sqsolarcell import SQSolarcell from .devossolarcell import DeVosSolarcell Remove import of specific models# -*- coding: utf-8 -*- """ Base Library (:mod:`ibei`) ========================== .. currentmodule:: ibei """ from .version import __version__ from .uibei import uibei
<commit_before># -*- coding: utf-8 -*- """ Base Library (:mod:`ibei`) ========================== .. currentmodule:: ibei """ from .version import __version__ from .uibei import uibei from .sqsolarcell import SQSolarcell from .devossolarcell import DeVosSolarcell <commit_msg>Remove import of specific models<commit_after># -*- coding: utf-8 -*- """ Base Library (:mod:`ibei`) ========================== .. currentmodule:: ibei """ from .version import __version__ from .uibei import uibei
3ef6a9d7d40dd66c89fd3e5c309ad76b5d4e836c
heat/tests/test_fault_middleware.py
heat/tests/test_fault_middleware.py
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from heat.common import exception as heat_exc from heat.openstack.common.rpc import common as rpc_common from heat.tests.common import HeatTestCase from oslo.config import cfg import heat.api.middleware.fault as fault class FaultMiddlewareTest(HeatTestCase): def test_openstack_exception_with_kwargs(self): wrapper = fault.FaultWrapper(None) msg = wrapper._error(heat_exc.StackNotFound(stack_name='a')) expected = {'code': 404, 'error': {'message': 'The Stack (a) could not be found.', 'traceback': 'None\n', 'type': 'StackNotFound'}, 'explanation': 'The resource could not be found.', 'title': 'Not Found'} self.assertEqual(msg, expected) def test_openstack_exception_without_kwargs(self): wrapper = fault.FaultWrapper(None) msg = wrapper._error(heat_exc.NoServiceEndpoint()) expected = {'code': 500, 'error': {'message': 'Response from Keystone does ' 'not contain a Heat endpoint.', 'traceback': 'None\n', 'type': 'NoServiceEndpoint'}, 'explanation': 'The server has either erred or is ' 'incapable of performing the requested ' 'operation.', 'title': 'Internal Server Error'} self.assertEqual(msg, expected) def test_remote_exception(self): error = heat_exc.StackNotFound(stack_name='a') exc_info = (type(error), error, None) serialized = rpc_common.serialize_remote_exception(exc_info) remote_error = rpc_common.deserialize_remote_exception(cfg.CONF, serialized) wrapper = fault.FaultWrapper(None) msg = wrapper._error(remote_error) expected_message, expected_traceback = str(remote_error).split('\n', 1) expected = {'code': 404, 'error': {'message': expected_message, 'traceback': expected_traceback, 'type': 'StackNotFound'}, 'explanation': 'The resource could not be found.', 'title': 'Not Found'} self.assertEqual(msg, expected)
Add unittests for faultwrap middleware
Add unittests for faultwrap middleware Partially implements bp exception-formatting Change-Id: Ib57ce625775e222e5b42b22c10687bce7b63d26d
Python
apache-2.0
miguelgrinberg/heat,JioCloud/heat,takeshineshiro/heat,pshchelo/heat,jasondunsmore/heat,dragorosson/heat,rh-s/heat,maestro-hybrid-cloud/heat,pratikmallya/heat,miguelgrinberg/heat,rickerc/heat_audit,NeCTAR-RC/heat,cwolferh/heat-scratch,cryptickp/heat,openstack/heat,steveb/heat,gonzolino/heat,rh-s/heat,citrix-openstack-build/heat,rickerc/heat_audit,noironetworks/heat,redhat-openstack/heat,JioCloud/heat,maestro-hybrid-cloud/heat,redhat-openstack/heat,noironetworks/heat,NeCTAR-RC/heat,rdo-management/heat,citrix-openstack-build/heat,ntt-sic/heat,ntt-sic/heat,cwolferh/heat-scratch,takeshineshiro/heat,steveb/heat,dragorosson/heat,openstack/heat,jasondunsmore/heat,varunarya10/heat,gonzolino/heat,srznew/heat,srznew/heat,dims/heat,pshchelo/heat,dims/heat,rdo-management/heat,varunarya10/heat,pratikmallya/heat,cryptickp/heat
Add unittests for faultwrap middleware Partially implements bp exception-formatting Change-Id: Ib57ce625775e222e5b42b22c10687bce7b63d26d
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from heat.common import exception as heat_exc from heat.openstack.common.rpc import common as rpc_common from heat.tests.common import HeatTestCase from oslo.config import cfg import heat.api.middleware.fault as fault class FaultMiddlewareTest(HeatTestCase): def test_openstack_exception_with_kwargs(self): wrapper = fault.FaultWrapper(None) msg = wrapper._error(heat_exc.StackNotFound(stack_name='a')) expected = {'code': 404, 'error': {'message': 'The Stack (a) could not be found.', 'traceback': 'None\n', 'type': 'StackNotFound'}, 'explanation': 'The resource could not be found.', 'title': 'Not Found'} self.assertEqual(msg, expected) def test_openstack_exception_without_kwargs(self): wrapper = fault.FaultWrapper(None) msg = wrapper._error(heat_exc.NoServiceEndpoint()) expected = {'code': 500, 'error': {'message': 'Response from Keystone does ' 'not contain a Heat endpoint.', 'traceback': 'None\n', 'type': 'NoServiceEndpoint'}, 'explanation': 'The server has either erred or is ' 'incapable of performing the requested ' 'operation.', 'title': 'Internal Server Error'} self.assertEqual(msg, expected) def test_remote_exception(self): error = heat_exc.StackNotFound(stack_name='a') exc_info = (type(error), error, None) serialized = rpc_common.serialize_remote_exception(exc_info) remote_error = rpc_common.deserialize_remote_exception(cfg.CONF, serialized) wrapper = fault.FaultWrapper(None) msg = wrapper._error(remote_error) expected_message, expected_traceback = str(remote_error).split('\n', 1) expected = {'code': 404, 'error': {'message': expected_message, 'traceback': expected_traceback, 'type': 'StackNotFound'}, 'explanation': 'The resource could not be found.', 'title': 'Not Found'} self.assertEqual(msg, expected)
<commit_before><commit_msg>Add unittests for faultwrap middleware Partially implements bp exception-formatting Change-Id: Ib57ce625775e222e5b42b22c10687bce7b63d26d<commit_after>
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from heat.common import exception as heat_exc from heat.openstack.common.rpc import common as rpc_common from heat.tests.common import HeatTestCase from oslo.config import cfg import heat.api.middleware.fault as fault class FaultMiddlewareTest(HeatTestCase): def test_openstack_exception_with_kwargs(self): wrapper = fault.FaultWrapper(None) msg = wrapper._error(heat_exc.StackNotFound(stack_name='a')) expected = {'code': 404, 'error': {'message': 'The Stack (a) could not be found.', 'traceback': 'None\n', 'type': 'StackNotFound'}, 'explanation': 'The resource could not be found.', 'title': 'Not Found'} self.assertEqual(msg, expected) def test_openstack_exception_without_kwargs(self): wrapper = fault.FaultWrapper(None) msg = wrapper._error(heat_exc.NoServiceEndpoint()) expected = {'code': 500, 'error': {'message': 'Response from Keystone does ' 'not contain a Heat endpoint.', 'traceback': 'None\n', 'type': 'NoServiceEndpoint'}, 'explanation': 'The server has either erred or is ' 'incapable of performing the requested ' 'operation.', 'title': 'Internal Server Error'} self.assertEqual(msg, expected) def test_remote_exception(self): error = heat_exc.StackNotFound(stack_name='a') exc_info = (type(error), error, None) serialized = rpc_common.serialize_remote_exception(exc_info) remote_error = rpc_common.deserialize_remote_exception(cfg.CONF, serialized) wrapper = fault.FaultWrapper(None) msg = wrapper._error(remote_error) expected_message, expected_traceback = str(remote_error).split('\n', 1) expected = {'code': 404, 'error': {'message': expected_message, 'traceback': expected_traceback, 'type': 'StackNotFound'}, 'explanation': 'The resource could not be found.', 'title': 'Not Found'} self.assertEqual(msg, expected)
Add unittests for faultwrap middleware Partially implements bp exception-formatting Change-Id: Ib57ce625775e222e5b42b22c10687bce7b63d26d# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from heat.common import exception as heat_exc from heat.openstack.common.rpc import common as rpc_common from heat.tests.common import HeatTestCase from oslo.config import cfg import heat.api.middleware.fault as fault class FaultMiddlewareTest(HeatTestCase): def test_openstack_exception_with_kwargs(self): wrapper = fault.FaultWrapper(None) msg = wrapper._error(heat_exc.StackNotFound(stack_name='a')) expected = {'code': 404, 'error': {'message': 'The Stack (a) could not be found.', 'traceback': 'None\n', 'type': 'StackNotFound'}, 'explanation': 'The resource could not be found.', 'title': 'Not Found'} self.assertEqual(msg, expected) def test_openstack_exception_without_kwargs(self): wrapper = fault.FaultWrapper(None) msg = wrapper._error(heat_exc.NoServiceEndpoint()) expected = {'code': 500, 'error': {'message': 'Response from Keystone does ' 'not contain a Heat endpoint.', 'traceback': 'None\n', 'type': 'NoServiceEndpoint'}, 'explanation': 'The server has either erred or is ' 'incapable of performing the requested ' 'operation.', 'title': 'Internal Server Error'} self.assertEqual(msg, expected) def test_remote_exception(self): error = heat_exc.StackNotFound(stack_name='a') exc_info = (type(error), error, None) serialized = rpc_common.serialize_remote_exception(exc_info) remote_error = rpc_common.deserialize_remote_exception(cfg.CONF, serialized) wrapper = fault.FaultWrapper(None) msg = wrapper._error(remote_error) expected_message, expected_traceback = str(remote_error).split('\n', 1) expected = {'code': 404, 'error': {'message': expected_message, 'traceback': expected_traceback, 'type': 'StackNotFound'}, 'explanation': 'The resource could not be found.', 'title': 'Not Found'} self.assertEqual(msg, expected)
<commit_before><commit_msg>Add unittests for faultwrap middleware Partially implements bp exception-formatting Change-Id: Ib57ce625775e222e5b42b22c10687bce7b63d26d<commit_after># vim: tabstop=4 shiftwidth=4 softtabstop=4 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from heat.common import exception as heat_exc from heat.openstack.common.rpc import common as rpc_common from heat.tests.common import HeatTestCase from oslo.config import cfg import heat.api.middleware.fault as fault class FaultMiddlewareTest(HeatTestCase): def test_openstack_exception_with_kwargs(self): wrapper = fault.FaultWrapper(None) msg = wrapper._error(heat_exc.StackNotFound(stack_name='a')) expected = {'code': 404, 'error': {'message': 'The Stack (a) could not be found.', 'traceback': 'None\n', 'type': 'StackNotFound'}, 'explanation': 'The resource could not be found.', 'title': 'Not Found'} self.assertEqual(msg, expected) def test_openstack_exception_without_kwargs(self): wrapper = fault.FaultWrapper(None) msg = wrapper._error(heat_exc.NoServiceEndpoint()) expected = {'code': 500, 'error': {'message': 'Response from Keystone does ' 'not contain a Heat endpoint.', 'traceback': 'None\n', 'type': 'NoServiceEndpoint'}, 'explanation': 'The server has either erred or is ' 'incapable of performing the requested ' 'operation.', 'title': 'Internal Server Error'} self.assertEqual(msg, expected) def test_remote_exception(self): error = heat_exc.StackNotFound(stack_name='a') exc_info = (type(error), error, None) serialized = rpc_common.serialize_remote_exception(exc_info) remote_error = rpc_common.deserialize_remote_exception(cfg.CONF, serialized) wrapper = fault.FaultWrapper(None) msg = wrapper._error(remote_error) expected_message, expected_traceback = str(remote_error).split('\n', 1) expected = {'code': 404, 'error': {'message': expected_message, 'traceback': expected_traceback, 'type': 'StackNotFound'}, 'explanation': 'The resource could not be found.', 'title': 'Not Found'} self.assertEqual(msg, expected)
5a2e3da28fa2471d97b5d0e1b3cae76602dc9c79
snippets/list_holidays.py
snippets/list_holidays.py
import pandas as pd from datetime import date import holidays def process_holidays(df): year = df['year'] user_date = date(year, df['month'], df['day']) hd = holidays.US(years=year) for holiday_date, name in hd.iteritems(): days = (holiday_date - user_date).days # Get the real data for the new year if days < 0: days = days + 364 name = "".join([c for c in name if c.isalpha() or c.isdigit() or c==' ']).rstrip().lower().replace(" ", "_") df['days_to_' + name] = days return df df = pd.DataFrame() df['year'] = pd.Series(range(2010, 2015)) df['day'] = pd.Series(range(11, 27, 3)) df['month'] = pd.Series(range(2, 12, 2)) print df.apply(process_holidays, axis=1)
Create small script to compute distance to some US holidays
Create small script to compute distance to some US holidays
Python
mit
davidgasquez/kaggle-airbnb
Create small script to compute distance to some US holidays
import pandas as pd from datetime import date import holidays def process_holidays(df): year = df['year'] user_date = date(year, df['month'], df['day']) hd = holidays.US(years=year) for holiday_date, name in hd.iteritems(): days = (holiday_date - user_date).days # Get the real data for the new year if days < 0: days = days + 364 name = "".join([c for c in name if c.isalpha() or c.isdigit() or c==' ']).rstrip().lower().replace(" ", "_") df['days_to_' + name] = days return df df = pd.DataFrame() df['year'] = pd.Series(range(2010, 2015)) df['day'] = pd.Series(range(11, 27, 3)) df['month'] = pd.Series(range(2, 12, 2)) print df.apply(process_holidays, axis=1)
<commit_before><commit_msg>Create small script to compute distance to some US holidays<commit_after>
import pandas as pd from datetime import date import holidays def process_holidays(df): year = df['year'] user_date = date(year, df['month'], df['day']) hd = holidays.US(years=year) for holiday_date, name in hd.iteritems(): days = (holiday_date - user_date).days # Get the real data for the new year if days < 0: days = days + 364 name = "".join([c for c in name if c.isalpha() or c.isdigit() or c==' ']).rstrip().lower().replace(" ", "_") df['days_to_' + name] = days return df df = pd.DataFrame() df['year'] = pd.Series(range(2010, 2015)) df['day'] = pd.Series(range(11, 27, 3)) df['month'] = pd.Series(range(2, 12, 2)) print df.apply(process_holidays, axis=1)
Create small script to compute distance to some US holidaysimport pandas as pd from datetime import date import holidays def process_holidays(df): year = df['year'] user_date = date(year, df['month'], df['day']) hd = holidays.US(years=year) for holiday_date, name in hd.iteritems(): days = (holiday_date - user_date).days # Get the real data for the new year if days < 0: days = days + 364 name = "".join([c for c in name if c.isalpha() or c.isdigit() or c==' ']).rstrip().lower().replace(" ", "_") df['days_to_' + name] = days return df df = pd.DataFrame() df['year'] = pd.Series(range(2010, 2015)) df['day'] = pd.Series(range(11, 27, 3)) df['month'] = pd.Series(range(2, 12, 2)) print df.apply(process_holidays, axis=1)
<commit_before><commit_msg>Create small script to compute distance to some US holidays<commit_after>import pandas as pd from datetime import date import holidays def process_holidays(df): year = df['year'] user_date = date(year, df['month'], df['day']) hd = holidays.US(years=year) for holiday_date, name in hd.iteritems(): days = (holiday_date - user_date).days # Get the real data for the new year if days < 0: days = days + 364 name = "".join([c for c in name if c.isalpha() or c.isdigit() or c==' ']).rstrip().lower().replace(" ", "_") df['days_to_' + name] = days return df df = pd.DataFrame() df['year'] = pd.Series(range(2010, 2015)) df['day'] = pd.Series(range(11, 27, 3)) df['month'] = pd.Series(range(2, 12, 2)) print df.apply(process_holidays, axis=1)
e6c79dc248d51b38da2d6fa0e92468176b0b21bc
migrations/versions/1010_remove_unused_fields.py
migrations/versions/1010_remove_unused_fields.py
"""Remove unused fields Revision ID: 1010 Revises: 1000 Create Date: 2017-10-04 15:14:48.532073 """ from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql # revision identifiers, used by Alembic. revision = '1010' down_revision = '1000' def upgrade(): op.drop_column('contact_information', 'address2') op.drop_column('contact_information', 'country') op.drop_column('contact_information', 'website') op.drop_column('suppliers', 'clients') op.drop_column('suppliers', 'esourcing_id') def downgrade(): op.add_column('suppliers', sa.Column('esourcing_id', sa.VARCHAR(), autoincrement=False, nullable=True)) op.add_column('suppliers', sa.Column('clients', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=True)) op.add_column('contact_information', sa.Column('website', sa.VARCHAR(), autoincrement=False, nullable=True)) op.add_column('contact_information', sa.Column('country', sa.VARCHAR(), autoincrement=False, nullable=True)) op.add_column('contact_information', sa.Column('address2', sa.VARCHAR(), autoincrement=False, nullable=True))
Remove unused fields from model and drop columns from db
Remove unused fields from model and drop columns from db
Python
mit
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
Remove unused fields from model and drop columns from db
"""Remove unused fields Revision ID: 1010 Revises: 1000 Create Date: 2017-10-04 15:14:48.532073 """ from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql # revision identifiers, used by Alembic. revision = '1010' down_revision = '1000' def upgrade(): op.drop_column('contact_information', 'address2') op.drop_column('contact_information', 'country') op.drop_column('contact_information', 'website') op.drop_column('suppliers', 'clients') op.drop_column('suppliers', 'esourcing_id') def downgrade(): op.add_column('suppliers', sa.Column('esourcing_id', sa.VARCHAR(), autoincrement=False, nullable=True)) op.add_column('suppliers', sa.Column('clients', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=True)) op.add_column('contact_information', sa.Column('website', sa.VARCHAR(), autoincrement=False, nullable=True)) op.add_column('contact_information', sa.Column('country', sa.VARCHAR(), autoincrement=False, nullable=True)) op.add_column('contact_information', sa.Column('address2', sa.VARCHAR(), autoincrement=False, nullable=True))
<commit_before><commit_msg>Remove unused fields from model and drop columns from db<commit_after>
"""Remove unused fields Revision ID: 1010 Revises: 1000 Create Date: 2017-10-04 15:14:48.532073 """ from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql # revision identifiers, used by Alembic. revision = '1010' down_revision = '1000' def upgrade(): op.drop_column('contact_information', 'address2') op.drop_column('contact_information', 'country') op.drop_column('contact_information', 'website') op.drop_column('suppliers', 'clients') op.drop_column('suppliers', 'esourcing_id') def downgrade(): op.add_column('suppliers', sa.Column('esourcing_id', sa.VARCHAR(), autoincrement=False, nullable=True)) op.add_column('suppliers', sa.Column('clients', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=True)) op.add_column('contact_information', sa.Column('website', sa.VARCHAR(), autoincrement=False, nullable=True)) op.add_column('contact_information', sa.Column('country', sa.VARCHAR(), autoincrement=False, nullable=True)) op.add_column('contact_information', sa.Column('address2', sa.VARCHAR(), autoincrement=False, nullable=True))
Remove unused fields from model and drop columns from db"""Remove unused fields Revision ID: 1010 Revises: 1000 Create Date: 2017-10-04 15:14:48.532073 """ from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql # revision identifiers, used by Alembic. revision = '1010' down_revision = '1000' def upgrade(): op.drop_column('contact_information', 'address2') op.drop_column('contact_information', 'country') op.drop_column('contact_information', 'website') op.drop_column('suppliers', 'clients') op.drop_column('suppliers', 'esourcing_id') def downgrade(): op.add_column('suppliers', sa.Column('esourcing_id', sa.VARCHAR(), autoincrement=False, nullable=True)) op.add_column('suppliers', sa.Column('clients', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=True)) op.add_column('contact_information', sa.Column('website', sa.VARCHAR(), autoincrement=False, nullable=True)) op.add_column('contact_information', sa.Column('country', sa.VARCHAR(), autoincrement=False, nullable=True)) op.add_column('contact_information', sa.Column('address2', sa.VARCHAR(), autoincrement=False, nullable=True))
<commit_before><commit_msg>Remove unused fields from model and drop columns from db<commit_after>"""Remove unused fields Revision ID: 1010 Revises: 1000 Create Date: 2017-10-04 15:14:48.532073 """ from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql # revision identifiers, used by Alembic. revision = '1010' down_revision = '1000' def upgrade(): op.drop_column('contact_information', 'address2') op.drop_column('contact_information', 'country') op.drop_column('contact_information', 'website') op.drop_column('suppliers', 'clients') op.drop_column('suppliers', 'esourcing_id') def downgrade(): op.add_column('suppliers', sa.Column('esourcing_id', sa.VARCHAR(), autoincrement=False, nullable=True)) op.add_column('suppliers', sa.Column('clients', postgresql.JSON(astext_type=sa.Text()), autoincrement=False, nullable=True)) op.add_column('contact_information', sa.Column('website', sa.VARCHAR(), autoincrement=False, nullable=True)) op.add_column('contact_information', sa.Column('country', sa.VARCHAR(), autoincrement=False, nullable=True)) op.add_column('contact_information', sa.Column('address2', sa.VARCHAR(), autoincrement=False, nullable=True))
14a1e5fb2afe9c372646ce26a9b2b92c517067b3
minecraft-server/minecraftforge_spider.py
minecraft-server/minecraftforge_spider.py
# -*- coding: utf-8 -*- import scrapy from scrapy.contrib.spiders import CrawlSpider, Rule from scrapy.contrib.linkextractors import LinkExtractor from scrapy.selector import Selector import re class Forge(scrapy.Item): versions = scrapy.Field() latest = scrapy.Field() class ForgeVersions(scrapy.Item): id = scrapy.Field() minecraft = scrapy.Field() type = scrapy.Field() time = scrapy.Field() url = scrapy.Field() class ForgeLatest(scrapy.Item): forge_latest = scrapy.Field() forge_recommended = scrapy.Field() class ForgeSpider(CrawlSpider): name = "ForgeSpider" allowed_domains = ["minecraftforge.net"] start_urls = ['http://files.minecraftforge.net'] def parse(self, response): forge = Forge() forge['versions'] = [] forge['latest'] = ForgeLatest() selector = Selector(response) rows = selector.xpath('//table[@id="promotions_table"]//tr') header = rows.pop(0) for row in rows: cells = row.xpath('td') id = cells[1].xpath('text()').extract() minecraft = cells[2].xpath('text()').extract() type = cells[0].xpath('text()') time = cells[3].xpath('text()') url = cells[4].xpath('a[text()="Installer"]/@href') #if has version has_version = re.match('(.+)\-.+', ''.join(type.extract())) if has_version: download = ForgeVersions() download['id'] = id download['minecraft'] = minecraft download['type'] = 'forge_' + ''.join(type.re('([a-zA-Z]+)')).lower() download['time'] = time.extract() download['url'] = url.re('http://adf.ly/\d+/(.+)') forge['versions'].append(download) else: is_recommended = re.match('Recommended', ''.join(type.extract())) if is_recommended: download = ForgeLatest() forge['latest']['forge_recommended'] = id else: download = ForgeLatest() forge['latest']['forge_latest'] = id return forge
Add initial version of forge download scraper
Add initial version of forge download scraper
Python
apache-2.0
jbeda/itzg-dockerfiles,zerocoolys/dockerfiles-1,sekka1/dockerfiles,zerocoolys/dockerfiles-1,jbeda/itzg-dockerfiles,jbeda/itzg-dockerfiles,ajthemacboy/dockerfiles,itzg/dockerfiles,sekka1/dockerfiles,sekka1/dockerfiles,zerocoolys/dockerfiles-1
Add initial version of forge download scraper
# -*- coding: utf-8 -*- import scrapy from scrapy.contrib.spiders import CrawlSpider, Rule from scrapy.contrib.linkextractors import LinkExtractor from scrapy.selector import Selector import re class Forge(scrapy.Item): versions = scrapy.Field() latest = scrapy.Field() class ForgeVersions(scrapy.Item): id = scrapy.Field() minecraft = scrapy.Field() type = scrapy.Field() time = scrapy.Field() url = scrapy.Field() class ForgeLatest(scrapy.Item): forge_latest = scrapy.Field() forge_recommended = scrapy.Field() class ForgeSpider(CrawlSpider): name = "ForgeSpider" allowed_domains = ["minecraftforge.net"] start_urls = ['http://files.minecraftforge.net'] def parse(self, response): forge = Forge() forge['versions'] = [] forge['latest'] = ForgeLatest() selector = Selector(response) rows = selector.xpath('//table[@id="promotions_table"]//tr') header = rows.pop(0) for row in rows: cells = row.xpath('td') id = cells[1].xpath('text()').extract() minecraft = cells[2].xpath('text()').extract() type = cells[0].xpath('text()') time = cells[3].xpath('text()') url = cells[4].xpath('a[text()="Installer"]/@href') #if has version has_version = re.match('(.+)\-.+', ''.join(type.extract())) if has_version: download = ForgeVersions() download['id'] = id download['minecraft'] = minecraft download['type'] = 'forge_' + ''.join(type.re('([a-zA-Z]+)')).lower() download['time'] = time.extract() download['url'] = url.re('http://adf.ly/\d+/(.+)') forge['versions'].append(download) else: is_recommended = re.match('Recommended', ''.join(type.extract())) if is_recommended: download = ForgeLatest() forge['latest']['forge_recommended'] = id else: download = ForgeLatest() forge['latest']['forge_latest'] = id return forge
<commit_before><commit_msg>Add initial version of forge download scraper<commit_after>
# -*- coding: utf-8 -*- import scrapy from scrapy.contrib.spiders import CrawlSpider, Rule from scrapy.contrib.linkextractors import LinkExtractor from scrapy.selector import Selector import re class Forge(scrapy.Item): versions = scrapy.Field() latest = scrapy.Field() class ForgeVersions(scrapy.Item): id = scrapy.Field() minecraft = scrapy.Field() type = scrapy.Field() time = scrapy.Field() url = scrapy.Field() class ForgeLatest(scrapy.Item): forge_latest = scrapy.Field() forge_recommended = scrapy.Field() class ForgeSpider(CrawlSpider): name = "ForgeSpider" allowed_domains = ["minecraftforge.net"] start_urls = ['http://files.minecraftforge.net'] def parse(self, response): forge = Forge() forge['versions'] = [] forge['latest'] = ForgeLatest() selector = Selector(response) rows = selector.xpath('//table[@id="promotions_table"]//tr') header = rows.pop(0) for row in rows: cells = row.xpath('td') id = cells[1].xpath('text()').extract() minecraft = cells[2].xpath('text()').extract() type = cells[0].xpath('text()') time = cells[3].xpath('text()') url = cells[4].xpath('a[text()="Installer"]/@href') #if has version has_version = re.match('(.+)\-.+', ''.join(type.extract())) if has_version: download = ForgeVersions() download['id'] = id download['minecraft'] = minecraft download['type'] = 'forge_' + ''.join(type.re('([a-zA-Z]+)')).lower() download['time'] = time.extract() download['url'] = url.re('http://adf.ly/\d+/(.+)') forge['versions'].append(download) else: is_recommended = re.match('Recommended', ''.join(type.extract())) if is_recommended: download = ForgeLatest() forge['latest']['forge_recommended'] = id else: download = ForgeLatest() forge['latest']['forge_latest'] = id return forge
Add initial version of forge download scraper# -*- coding: utf-8 -*- import scrapy from scrapy.contrib.spiders import CrawlSpider, Rule from scrapy.contrib.linkextractors import LinkExtractor from scrapy.selector import Selector import re class Forge(scrapy.Item): versions = scrapy.Field() latest = scrapy.Field() class ForgeVersions(scrapy.Item): id = scrapy.Field() minecraft = scrapy.Field() type = scrapy.Field() time = scrapy.Field() url = scrapy.Field() class ForgeLatest(scrapy.Item): forge_latest = scrapy.Field() forge_recommended = scrapy.Field() class ForgeSpider(CrawlSpider): name = "ForgeSpider" allowed_domains = ["minecraftforge.net"] start_urls = ['http://files.minecraftforge.net'] def parse(self, response): forge = Forge() forge['versions'] = [] forge['latest'] = ForgeLatest() selector = Selector(response) rows = selector.xpath('//table[@id="promotions_table"]//tr') header = rows.pop(0) for row in rows: cells = row.xpath('td') id = cells[1].xpath('text()').extract() minecraft = cells[2].xpath('text()').extract() type = cells[0].xpath('text()') time = cells[3].xpath('text()') url = cells[4].xpath('a[text()="Installer"]/@href') #if has version has_version = re.match('(.+)\-.+', ''.join(type.extract())) if has_version: download = ForgeVersions() download['id'] = id download['minecraft'] = minecraft download['type'] = 'forge_' + ''.join(type.re('([a-zA-Z]+)')).lower() download['time'] = time.extract() download['url'] = url.re('http://adf.ly/\d+/(.+)') forge['versions'].append(download) else: is_recommended = re.match('Recommended', ''.join(type.extract())) if is_recommended: download = ForgeLatest() forge['latest']['forge_recommended'] = id else: download = ForgeLatest() forge['latest']['forge_latest'] = id return forge
<commit_before><commit_msg>Add initial version of forge download scraper<commit_after># -*- coding: utf-8 -*- import scrapy from scrapy.contrib.spiders import CrawlSpider, Rule from scrapy.contrib.linkextractors import LinkExtractor from scrapy.selector import Selector import re class Forge(scrapy.Item): versions = scrapy.Field() latest = scrapy.Field() class ForgeVersions(scrapy.Item): id = scrapy.Field() minecraft = scrapy.Field() type = scrapy.Field() time = scrapy.Field() url = scrapy.Field() class ForgeLatest(scrapy.Item): forge_latest = scrapy.Field() forge_recommended = scrapy.Field() class ForgeSpider(CrawlSpider): name = "ForgeSpider" allowed_domains = ["minecraftforge.net"] start_urls = ['http://files.minecraftforge.net'] def parse(self, response): forge = Forge() forge['versions'] = [] forge['latest'] = ForgeLatest() selector = Selector(response) rows = selector.xpath('//table[@id="promotions_table"]//tr') header = rows.pop(0) for row in rows: cells = row.xpath('td') id = cells[1].xpath('text()').extract() minecraft = cells[2].xpath('text()').extract() type = cells[0].xpath('text()') time = cells[3].xpath('text()') url = cells[4].xpath('a[text()="Installer"]/@href') #if has version has_version = re.match('(.+)\-.+', ''.join(type.extract())) if has_version: download = ForgeVersions() download['id'] = id download['minecraft'] = minecraft download['type'] = 'forge_' + ''.join(type.re('([a-zA-Z]+)')).lower() download['time'] = time.extract() download['url'] = url.re('http://adf.ly/\d+/(.+)') forge['versions'].append(download) else: is_recommended = re.match('Recommended', ''.join(type.extract())) if is_recommended: download = ForgeLatest() forge['latest']['forge_recommended'] = id else: download = ForgeLatest() forge['latest']['forge_latest'] = id return forge
027aa7152e3873c3bfa96c04e4d2ea0b6c345020
snippet/example/python/sqlalchemy-orm-example.py
snippet/example/python/sqlalchemy-orm-example.py
# encoding: utf8 from __future__ import print_function from sqlalchemy.orm import sessionmaker from sqlalchemy import create_engine, Column, Integer, String from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class User(Base): __tablename__ = 'users' id = Column(Integer, primary_key=True) name = Column(String) fullname = Column(String) password = Column(String) def __repr__(self): return "<User(name='%s', fullname='%s', password='%s')>" % (self.name, self.fullname, self.password) engine = create_engine('sqlite:///:memory:', echo=True) Base.metadata.create_all(engine) Session = sessionmaker(bind=engine) session = Session() ed_user = User(name='ed', fullname='Ed Jones', password='edspassword') session.add(ed_user) session.add_all([ User(name='wendy', fullname='Wendy Williams', password='foobar'), User(name='mary', fullname='Mary Contrary', password='xxg527'), User(name='fred', fullname='Fred Flinstone', password='blah')]) session.commit() print(session.query(User).offset(1).limit(2).all()) # SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, \ # users.password AS users_password FROM users LIMIT 2 OFFSET 1 for row in session.query(User, User.name).all(): print(row.User, row.name) #ed_user.name = "ed2" #session.commit() with session.begin(subtransactions=True): ed_user.name = "ed2" session.delete(ed_user)
Add the usage example of sqlalchemy ORM.
Add the usage example of sqlalchemy ORM.
Python
mit
xgfone/snippet,xgfone/snippet,xgfone/snippet,xgfone/snippet,xgfone/snippet,xgfone/snippet,xgfone/snippet
Add the usage example of sqlalchemy ORM.
# encoding: utf8 from __future__ import print_function from sqlalchemy.orm import sessionmaker from sqlalchemy import create_engine, Column, Integer, String from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class User(Base): __tablename__ = 'users' id = Column(Integer, primary_key=True) name = Column(String) fullname = Column(String) password = Column(String) def __repr__(self): return "<User(name='%s', fullname='%s', password='%s')>" % (self.name, self.fullname, self.password) engine = create_engine('sqlite:///:memory:', echo=True) Base.metadata.create_all(engine) Session = sessionmaker(bind=engine) session = Session() ed_user = User(name='ed', fullname='Ed Jones', password='edspassword') session.add(ed_user) session.add_all([ User(name='wendy', fullname='Wendy Williams', password='foobar'), User(name='mary', fullname='Mary Contrary', password='xxg527'), User(name='fred', fullname='Fred Flinstone', password='blah')]) session.commit() print(session.query(User).offset(1).limit(2).all()) # SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, \ # users.password AS users_password FROM users LIMIT 2 OFFSET 1 for row in session.query(User, User.name).all(): print(row.User, row.name) #ed_user.name = "ed2" #session.commit() with session.begin(subtransactions=True): ed_user.name = "ed2" session.delete(ed_user)
<commit_before><commit_msg>Add the usage example of sqlalchemy ORM.<commit_after>
# encoding: utf8 from __future__ import print_function from sqlalchemy.orm import sessionmaker from sqlalchemy import create_engine, Column, Integer, String from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class User(Base): __tablename__ = 'users' id = Column(Integer, primary_key=True) name = Column(String) fullname = Column(String) password = Column(String) def __repr__(self): return "<User(name='%s', fullname='%s', password='%s')>" % (self.name, self.fullname, self.password) engine = create_engine('sqlite:///:memory:', echo=True) Base.metadata.create_all(engine) Session = sessionmaker(bind=engine) session = Session() ed_user = User(name='ed', fullname='Ed Jones', password='edspassword') session.add(ed_user) session.add_all([ User(name='wendy', fullname='Wendy Williams', password='foobar'), User(name='mary', fullname='Mary Contrary', password='xxg527'), User(name='fred', fullname='Fred Flinstone', password='blah')]) session.commit() print(session.query(User).offset(1).limit(2).all()) # SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, \ # users.password AS users_password FROM users LIMIT 2 OFFSET 1 for row in session.query(User, User.name).all(): print(row.User, row.name) #ed_user.name = "ed2" #session.commit() with session.begin(subtransactions=True): ed_user.name = "ed2" session.delete(ed_user)
Add the usage example of sqlalchemy ORM.# encoding: utf8 from __future__ import print_function from sqlalchemy.orm import sessionmaker from sqlalchemy import create_engine, Column, Integer, String from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class User(Base): __tablename__ = 'users' id = Column(Integer, primary_key=True) name = Column(String) fullname = Column(String) password = Column(String) def __repr__(self): return "<User(name='%s', fullname='%s', password='%s')>" % (self.name, self.fullname, self.password) engine = create_engine('sqlite:///:memory:', echo=True) Base.metadata.create_all(engine) Session = sessionmaker(bind=engine) session = Session() ed_user = User(name='ed', fullname='Ed Jones', password='edspassword') session.add(ed_user) session.add_all([ User(name='wendy', fullname='Wendy Williams', password='foobar'), User(name='mary', fullname='Mary Contrary', password='xxg527'), User(name='fred', fullname='Fred Flinstone', password='blah')]) session.commit() print(session.query(User).offset(1).limit(2).all()) # SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, \ # users.password AS users_password FROM users LIMIT 2 OFFSET 1 for row in session.query(User, User.name).all(): print(row.User, row.name) #ed_user.name = "ed2" #session.commit() with session.begin(subtransactions=True): ed_user.name = "ed2" session.delete(ed_user)
<commit_before><commit_msg>Add the usage example of sqlalchemy ORM.<commit_after># encoding: utf8 from __future__ import print_function from sqlalchemy.orm import sessionmaker from sqlalchemy import create_engine, Column, Integer, String from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class User(Base): __tablename__ = 'users' id = Column(Integer, primary_key=True) name = Column(String) fullname = Column(String) password = Column(String) def __repr__(self): return "<User(name='%s', fullname='%s', password='%s')>" % (self.name, self.fullname, self.password) engine = create_engine('sqlite:///:memory:', echo=True) Base.metadata.create_all(engine) Session = sessionmaker(bind=engine) session = Session() ed_user = User(name='ed', fullname='Ed Jones', password='edspassword') session.add(ed_user) session.add_all([ User(name='wendy', fullname='Wendy Williams', password='foobar'), User(name='mary', fullname='Mary Contrary', password='xxg527'), User(name='fred', fullname='Fred Flinstone', password='blah')]) session.commit() print(session.query(User).offset(1).limit(2).all()) # SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, \ # users.password AS users_password FROM users LIMIT 2 OFFSET 1 for row in session.query(User, User.name).all(): print(row.User, row.name) #ed_user.name = "ed2" #session.commit() with session.begin(subtransactions=True): ed_user.name = "ed2" session.delete(ed_user)
c58a30b5b410595b76aebc8db7025a49b231b75b
scripts/most_recent.py
scripts/most_recent.py
from datetime import datetime from optparse import OptionParser from urllib2 import urlopen from BeautifulSoup import BeautifulSoup if __name__ == '__main__': usage = "%prog <USERNAME> <WEB SERVER>" parser = OptionParser(usage=usage) opts,args = parser.parse_args() if len(args) != 2: parser.error("All arguments are required.") username,server = args if server[:7] != "http://": server = "http://%s" % (server,) soup = BeautifulSoup(urlopen('%s/user/%s/recent-tracks' % (server, username))) gobbles_list = soup.find('ul', {'class': 'gobbles'}) timestamp = gobbles_list.find('li')['about'].split('#')[1].split('.')[0] print datetime.fromtimestamp(float(timestamp))
Add script to print time of most recent gobble.
Add script to print time of most recent gobble.
Python
agpl-3.0
foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm,foocorp/gnu-fm
Add script to print time of most recent gobble.
from datetime import datetime from optparse import OptionParser from urllib2 import urlopen from BeautifulSoup import BeautifulSoup if __name__ == '__main__': usage = "%prog <USERNAME> <WEB SERVER>" parser = OptionParser(usage=usage) opts,args = parser.parse_args() if len(args) != 2: parser.error("All arguments are required.") username,server = args if server[:7] != "http://": server = "http://%s" % (server,) soup = BeautifulSoup(urlopen('%s/user/%s/recent-tracks' % (server, username))) gobbles_list = soup.find('ul', {'class': 'gobbles'}) timestamp = gobbles_list.find('li')['about'].split('#')[1].split('.')[0] print datetime.fromtimestamp(float(timestamp))
<commit_before><commit_msg>Add script to print time of most recent gobble.<commit_after>
from datetime import datetime from optparse import OptionParser from urllib2 import urlopen from BeautifulSoup import BeautifulSoup if __name__ == '__main__': usage = "%prog <USERNAME> <WEB SERVER>" parser = OptionParser(usage=usage) opts,args = parser.parse_args() if len(args) != 2: parser.error("All arguments are required.") username,server = args if server[:7] != "http://": server = "http://%s" % (server,) soup = BeautifulSoup(urlopen('%s/user/%s/recent-tracks' % (server, username))) gobbles_list = soup.find('ul', {'class': 'gobbles'}) timestamp = gobbles_list.find('li')['about'].split('#')[1].split('.')[0] print datetime.fromtimestamp(float(timestamp))
Add script to print time of most recent gobble.from datetime import datetime from optparse import OptionParser from urllib2 import urlopen from BeautifulSoup import BeautifulSoup if __name__ == '__main__': usage = "%prog <USERNAME> <WEB SERVER>" parser = OptionParser(usage=usage) opts,args = parser.parse_args() if len(args) != 2: parser.error("All arguments are required.") username,server = args if server[:7] != "http://": server = "http://%s" % (server,) soup = BeautifulSoup(urlopen('%s/user/%s/recent-tracks' % (server, username))) gobbles_list = soup.find('ul', {'class': 'gobbles'}) timestamp = gobbles_list.find('li')['about'].split('#')[1].split('.')[0] print datetime.fromtimestamp(float(timestamp))
<commit_before><commit_msg>Add script to print time of most recent gobble.<commit_after>from datetime import datetime from optparse import OptionParser from urllib2 import urlopen from BeautifulSoup import BeautifulSoup if __name__ == '__main__': usage = "%prog <USERNAME> <WEB SERVER>" parser = OptionParser(usage=usage) opts,args = parser.parse_args() if len(args) != 2: parser.error("All arguments are required.") username,server = args if server[:7] != "http://": server = "http://%s" % (server,) soup = BeautifulSoup(urlopen('%s/user/%s/recent-tracks' % (server, username))) gobbles_list = soup.find('ul', {'class': 'gobbles'}) timestamp = gobbles_list.find('li')['about'].split('#')[1].split('.')[0] print datetime.fromtimestamp(float(timestamp))
febfaffb2bdbb9529cbb7df004b62ae8a2a07ad0
tests/test_jeffress_big.py
tests/test_jeffress_big.py
""" In this test we create a bigger Jeffress model. - It consists of 180 output neurons. - The axonal delay between the input neurons and the inter-layer neurons are sampled from 0ms to 100ms. - We create the same spiketrain with 50 ms difference between the two input neurons. One can see that the better the weights are adjusted, the more precise the outcome of the net is. """ import numpy as np from neurons import spiking, plotting, tools def test_jeffress(output_weight): neurons = 180 * 3 + 2 timesteps = 300 # Set the axonal delays ax_delays = np.zeros(neurons) ax_delays[1:181] = np.linspace(0, 100, 180) ax_delays[182:362] = np.linspace(100, 0, 180) model = spiking.SRM_X(neurons=neurons, threshold=np.array([1]*neurons), t_current=np.array([45]*neurons), t_membrane=np.array([50]*neurons), eta_reset=np.array([5.0]*neurons), ax_delay=ax_delays, simulation_window_size=500) weights = np.zeros((neurons, neurons)) # Connect to input layer weights[0, 1:181] = 1 weights[181, 182:361] = 1 # Connect inter-layer to output layer weights[1:181, 362:542] = output_weight * np.eye(180) weights[182:362, 362:542] = output_weight * np.eye(180) # Spiketrain: Same spiketrain on both input neurons, but one is sligthly delayed spiketrain = np.zeros((neurons, timesteps), dtype=bool) spiketrain[0, (50, 55, 60)] = 1 spiketrain[181, (0, 5, 10)] = 1 for t in range(timesteps): current = model.check_spikes(spiketrain, weights, t) print("output weight", output_weight) print("sum over the spikes in the output layer\n", np.sum(spiketrain, axis=1)[362:]) if __name__ == "__main__": output_weights = [2.5, 1.6, 1.5, 1.4, 1.3, 1.295] for output_weight in output_weights: test_jeffress(output_weight)
Add example, Jeffress net with 180 neurons
Add example, Jeffress net with 180 neurons
Python
bsd-2-clause
timqian/neurons,johannesmik/neurons
Add example, Jeffress net with 180 neurons
""" In this test we create a bigger Jeffress model. - It consists of 180 output neurons. - The axonal delay between the input neurons and the inter-layer neurons are sampled from 0ms to 100ms. - We create the same spiketrain with 50 ms difference between the two input neurons. One can see that the better the weights are adjusted, the more precise the outcome of the net is. """ import numpy as np from neurons import spiking, plotting, tools def test_jeffress(output_weight): neurons = 180 * 3 + 2 timesteps = 300 # Set the axonal delays ax_delays = np.zeros(neurons) ax_delays[1:181] = np.linspace(0, 100, 180) ax_delays[182:362] = np.linspace(100, 0, 180) model = spiking.SRM_X(neurons=neurons, threshold=np.array([1]*neurons), t_current=np.array([45]*neurons), t_membrane=np.array([50]*neurons), eta_reset=np.array([5.0]*neurons), ax_delay=ax_delays, simulation_window_size=500) weights = np.zeros((neurons, neurons)) # Connect to input layer weights[0, 1:181] = 1 weights[181, 182:361] = 1 # Connect inter-layer to output layer weights[1:181, 362:542] = output_weight * np.eye(180) weights[182:362, 362:542] = output_weight * np.eye(180) # Spiketrain: Same spiketrain on both input neurons, but one is sligthly delayed spiketrain = np.zeros((neurons, timesteps), dtype=bool) spiketrain[0, (50, 55, 60)] = 1 spiketrain[181, (0, 5, 10)] = 1 for t in range(timesteps): current = model.check_spikes(spiketrain, weights, t) print("output weight", output_weight) print("sum over the spikes in the output layer\n", np.sum(spiketrain, axis=1)[362:]) if __name__ == "__main__": output_weights = [2.5, 1.6, 1.5, 1.4, 1.3, 1.295] for output_weight in output_weights: test_jeffress(output_weight)
<commit_before><commit_msg>Add example, Jeffress net with 180 neurons<commit_after>
""" In this test we create a bigger Jeffress model. - It consists of 180 output neurons. - The axonal delay between the input neurons and the inter-layer neurons are sampled from 0ms to 100ms. - We create the same spiketrain with 50 ms difference between the two input neurons. One can see that the better the weights are adjusted, the more precise the outcome of the net is. """ import numpy as np from neurons import spiking, plotting, tools def test_jeffress(output_weight): neurons = 180 * 3 + 2 timesteps = 300 # Set the axonal delays ax_delays = np.zeros(neurons) ax_delays[1:181] = np.linspace(0, 100, 180) ax_delays[182:362] = np.linspace(100, 0, 180) model = spiking.SRM_X(neurons=neurons, threshold=np.array([1]*neurons), t_current=np.array([45]*neurons), t_membrane=np.array([50]*neurons), eta_reset=np.array([5.0]*neurons), ax_delay=ax_delays, simulation_window_size=500) weights = np.zeros((neurons, neurons)) # Connect to input layer weights[0, 1:181] = 1 weights[181, 182:361] = 1 # Connect inter-layer to output layer weights[1:181, 362:542] = output_weight * np.eye(180) weights[182:362, 362:542] = output_weight * np.eye(180) # Spiketrain: Same spiketrain on both input neurons, but one is sligthly delayed spiketrain = np.zeros((neurons, timesteps), dtype=bool) spiketrain[0, (50, 55, 60)] = 1 spiketrain[181, (0, 5, 10)] = 1 for t in range(timesteps): current = model.check_spikes(spiketrain, weights, t) print("output weight", output_weight) print("sum over the spikes in the output layer\n", np.sum(spiketrain, axis=1)[362:]) if __name__ == "__main__": output_weights = [2.5, 1.6, 1.5, 1.4, 1.3, 1.295] for output_weight in output_weights: test_jeffress(output_weight)
Add example, Jeffress net with 180 neurons""" In this test we create a bigger Jeffress model. - It consists of 180 output neurons. - The axonal delay between the input neurons and the inter-layer neurons are sampled from 0ms to 100ms. - We create the same spiketrain with 50 ms difference between the two input neurons. One can see that the better the weights are adjusted, the more precise the outcome of the net is. """ import numpy as np from neurons import spiking, plotting, tools def test_jeffress(output_weight): neurons = 180 * 3 + 2 timesteps = 300 # Set the axonal delays ax_delays = np.zeros(neurons) ax_delays[1:181] = np.linspace(0, 100, 180) ax_delays[182:362] = np.linspace(100, 0, 180) model = spiking.SRM_X(neurons=neurons, threshold=np.array([1]*neurons), t_current=np.array([45]*neurons), t_membrane=np.array([50]*neurons), eta_reset=np.array([5.0]*neurons), ax_delay=ax_delays, simulation_window_size=500) weights = np.zeros((neurons, neurons)) # Connect to input layer weights[0, 1:181] = 1 weights[181, 182:361] = 1 # Connect inter-layer to output layer weights[1:181, 362:542] = output_weight * np.eye(180) weights[182:362, 362:542] = output_weight * np.eye(180) # Spiketrain: Same spiketrain on both input neurons, but one is sligthly delayed spiketrain = np.zeros((neurons, timesteps), dtype=bool) spiketrain[0, (50, 55, 60)] = 1 spiketrain[181, (0, 5, 10)] = 1 for t in range(timesteps): current = model.check_spikes(spiketrain, weights, t) print("output weight", output_weight) print("sum over the spikes in the output layer\n", np.sum(spiketrain, axis=1)[362:]) if __name__ == "__main__": output_weights = [2.5, 1.6, 1.5, 1.4, 1.3, 1.295] for output_weight in output_weights: test_jeffress(output_weight)
<commit_before><commit_msg>Add example, Jeffress net with 180 neurons<commit_after>""" In this test we create a bigger Jeffress model. - It consists of 180 output neurons. - The axonal delay between the input neurons and the inter-layer neurons are sampled from 0ms to 100ms. - We create the same spiketrain with 50 ms difference between the two input neurons. One can see that the better the weights are adjusted, the more precise the outcome of the net is. """ import numpy as np from neurons import spiking, plotting, tools def test_jeffress(output_weight): neurons = 180 * 3 + 2 timesteps = 300 # Set the axonal delays ax_delays = np.zeros(neurons) ax_delays[1:181] = np.linspace(0, 100, 180) ax_delays[182:362] = np.linspace(100, 0, 180) model = spiking.SRM_X(neurons=neurons, threshold=np.array([1]*neurons), t_current=np.array([45]*neurons), t_membrane=np.array([50]*neurons), eta_reset=np.array([5.0]*neurons), ax_delay=ax_delays, simulation_window_size=500) weights = np.zeros((neurons, neurons)) # Connect to input layer weights[0, 1:181] = 1 weights[181, 182:361] = 1 # Connect inter-layer to output layer weights[1:181, 362:542] = output_weight * np.eye(180) weights[182:362, 362:542] = output_weight * np.eye(180) # Spiketrain: Same spiketrain on both input neurons, but one is sligthly delayed spiketrain = np.zeros((neurons, timesteps), dtype=bool) spiketrain[0, (50, 55, 60)] = 1 spiketrain[181, (0, 5, 10)] = 1 for t in range(timesteps): current = model.check_spikes(spiketrain, weights, t) print("output weight", output_weight) print("sum over the spikes in the output layer\n", np.sum(spiketrain, axis=1)[362:]) if __name__ == "__main__": output_weights = [2.5, 1.6, 1.5, 1.4, 1.3, 1.295] for output_weight in output_weights: test_jeffress(output_weight)
2a51077d78b7b30a1ca751595b6872b9532b070c
zthread/refactor/rename.py
zthread/refactor/rename.py
#!/usr/bin/env python import re,glob,os,sys def rename(dir_path, search_pattern): for file_path in glob.iglob(os.path.join(dir_path, search_pattern)): title, ext = os.path.splitext(os.path.basename(file_path)) res = "_".join([s.group(0).lower() for s in re.finditer(r"[A-Z][a-z]+", title)]) print title print ext print res if res: print "ddd" if ext == ".cxx": os.rename(file_path, os.path.join(dir_path, res + ".cc")) elif ext == ".h": print "psss me" os.rename(file_path, os.path.join(dir_path, res + ".h")) dir_path = sys.argv[1] rename(dir_path, "*.cxx") rename(dir_path, "*.h") # re.finditer(
Add a script to help to refactor source code.
Add a script to help to refactor source code.
Python
mit
YanShenChun/cppthread,YanShenChun/cppthread,YanShenChun/cppthread
Add a script to help to refactor source code.
#!/usr/bin/env python import re,glob,os,sys def rename(dir_path, search_pattern): for file_path in glob.iglob(os.path.join(dir_path, search_pattern)): title, ext = os.path.splitext(os.path.basename(file_path)) res = "_".join([s.group(0).lower() for s in re.finditer(r"[A-Z][a-z]+", title)]) print title print ext print res if res: print "ddd" if ext == ".cxx": os.rename(file_path, os.path.join(dir_path, res + ".cc")) elif ext == ".h": print "psss me" os.rename(file_path, os.path.join(dir_path, res + ".h")) dir_path = sys.argv[1] rename(dir_path, "*.cxx") rename(dir_path, "*.h") # re.finditer(
<commit_before><commit_msg>Add a script to help to refactor source code.<commit_after>
#!/usr/bin/env python import re,glob,os,sys def rename(dir_path, search_pattern): for file_path in glob.iglob(os.path.join(dir_path, search_pattern)): title, ext = os.path.splitext(os.path.basename(file_path)) res = "_".join([s.group(0).lower() for s in re.finditer(r"[A-Z][a-z]+", title)]) print title print ext print res if res: print "ddd" if ext == ".cxx": os.rename(file_path, os.path.join(dir_path, res + ".cc")) elif ext == ".h": print "psss me" os.rename(file_path, os.path.join(dir_path, res + ".h")) dir_path = sys.argv[1] rename(dir_path, "*.cxx") rename(dir_path, "*.h") # re.finditer(
Add a script to help to refactor source code.#!/usr/bin/env python import re,glob,os,sys def rename(dir_path, search_pattern): for file_path in glob.iglob(os.path.join(dir_path, search_pattern)): title, ext = os.path.splitext(os.path.basename(file_path)) res = "_".join([s.group(0).lower() for s in re.finditer(r"[A-Z][a-z]+", title)]) print title print ext print res if res: print "ddd" if ext == ".cxx": os.rename(file_path, os.path.join(dir_path, res + ".cc")) elif ext == ".h": print "psss me" os.rename(file_path, os.path.join(dir_path, res + ".h")) dir_path = sys.argv[1] rename(dir_path, "*.cxx") rename(dir_path, "*.h") # re.finditer(
<commit_before><commit_msg>Add a script to help to refactor source code.<commit_after>#!/usr/bin/env python import re,glob,os,sys def rename(dir_path, search_pattern): for file_path in glob.iglob(os.path.join(dir_path, search_pattern)): title, ext = os.path.splitext(os.path.basename(file_path)) res = "_".join([s.group(0).lower() for s in re.finditer(r"[A-Z][a-z]+", title)]) print title print ext print res if res: print "ddd" if ext == ".cxx": os.rename(file_path, os.path.join(dir_path, res + ".cc")) elif ext == ".h": print "psss me" os.rename(file_path, os.path.join(dir_path, res + ".h")) dir_path = sys.argv[1] rename(dir_path, "*.cxx") rename(dir_path, "*.h") # re.finditer(
52ca0942faaa52138805f966cb0b242009516a06
py/coin-change.py
py/coin-change.py
class Solution(object): def coinChange(self, coins, amount): """ :type coins: List[int] :type amount: int :rtype: int """ min_count = [0] * (amount + 1) for c in coins: if c <= amount: min_count[c] = 1 for i in xrange(min(coins), amount + 1): next_min = None for c in coins: if i == c or (i > c and min_count[i - c] > 0): t = min_count[i - c] + 1 if next_min is None or t < next_min: next_min = t if next_min is not None: min_count[i] = next_min if min_count[amount] == 0 and amount > 0: return -1 else: return min_count[amount]
Add py solution for 322. Coin Change
Add py solution for 322. Coin Change 322. Coin Change: https://leetcode.com/problems/coin-change/
Python
apache-2.0
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
Add py solution for 322. Coin Change 322. Coin Change: https://leetcode.com/problems/coin-change/
class Solution(object): def coinChange(self, coins, amount): """ :type coins: List[int] :type amount: int :rtype: int """ min_count = [0] * (amount + 1) for c in coins: if c <= amount: min_count[c] = 1 for i in xrange(min(coins), amount + 1): next_min = None for c in coins: if i == c or (i > c and min_count[i - c] > 0): t = min_count[i - c] + 1 if next_min is None or t < next_min: next_min = t if next_min is not None: min_count[i] = next_min if min_count[amount] == 0 and amount > 0: return -1 else: return min_count[amount]
<commit_before><commit_msg>Add py solution for 322. Coin Change 322. Coin Change: https://leetcode.com/problems/coin-change/<commit_after>
class Solution(object): def coinChange(self, coins, amount): """ :type coins: List[int] :type amount: int :rtype: int """ min_count = [0] * (amount + 1) for c in coins: if c <= amount: min_count[c] = 1 for i in xrange(min(coins), amount + 1): next_min = None for c in coins: if i == c or (i > c and min_count[i - c] > 0): t = min_count[i - c] + 1 if next_min is None or t < next_min: next_min = t if next_min is not None: min_count[i] = next_min if min_count[amount] == 0 and amount > 0: return -1 else: return min_count[amount]
Add py solution for 322. Coin Change 322. Coin Change: https://leetcode.com/problems/coin-change/class Solution(object): def coinChange(self, coins, amount): """ :type coins: List[int] :type amount: int :rtype: int """ min_count = [0] * (amount + 1) for c in coins: if c <= amount: min_count[c] = 1 for i in xrange(min(coins), amount + 1): next_min = None for c in coins: if i == c or (i > c and min_count[i - c] > 0): t = min_count[i - c] + 1 if next_min is None or t < next_min: next_min = t if next_min is not None: min_count[i] = next_min if min_count[amount] == 0 and amount > 0: return -1 else: return min_count[amount]
<commit_before><commit_msg>Add py solution for 322. Coin Change 322. Coin Change: https://leetcode.com/problems/coin-change/<commit_after>class Solution(object): def coinChange(self, coins, amount): """ :type coins: List[int] :type amount: int :rtype: int """ min_count = [0] * (amount + 1) for c in coins: if c <= amount: min_count[c] = 1 for i in xrange(min(coins), amount + 1): next_min = None for c in coins: if i == c or (i > c and min_count[i - c] > 0): t = min_count[i - c] + 1 if next_min is None or t < next_min: next_min = t if next_min is not None: min_count[i] = next_min if min_count[amount] == 0 and amount > 0: return -1 else: return min_count[amount]
8ddda76d25fd2572eccd24353d2347242af063fa
benchexec/tools/cbmc-path.py
benchexec/tools/cbmc-path.py
""" BenchExec is a framework for reliable benchmarking. This file is part of BenchExec. Copyright (C) 2007-2018 Dirk Beyer All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import benchexec.util as util from . import cbmc class Tool(cbmc.Tool): """ Tool info for CBMC Path (http://www.cprover.org/cbmc/). It always adds --xml-ui to the command-line arguments for easier parsing of the output, unless a propertyfile is passed -- in which case running under SV-COMP conditions is assumed. """ def name(self): return 'CBMC Path'
Add tool-info module for CBMC Path
Add tool-info module for CBMC Path This module inherits from CBMC's tool-info module, except that it returns "CBMC Path" as the tool name. The --paths switch is specified in the wrapper script `cbmc`, which invokes `cbmc-binary`.
Python
apache-2.0
dbeyer/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,dbeyer/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,dbeyer/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,dbeyer/benchexec,sosy-lab/benchexec
Add tool-info module for CBMC Path This module inherits from CBMC's tool-info module, except that it returns "CBMC Path" as the tool name. The --paths switch is specified in the wrapper script `cbmc`, which invokes `cbmc-binary`.
""" BenchExec is a framework for reliable benchmarking. This file is part of BenchExec. Copyright (C) 2007-2018 Dirk Beyer All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import benchexec.util as util from . import cbmc class Tool(cbmc.Tool): """ Tool info for CBMC Path (http://www.cprover.org/cbmc/). It always adds --xml-ui to the command-line arguments for easier parsing of the output, unless a propertyfile is passed -- in which case running under SV-COMP conditions is assumed. """ def name(self): return 'CBMC Path'
<commit_before><commit_msg>Add tool-info module for CBMC Path This module inherits from CBMC's tool-info module, except that it returns "CBMC Path" as the tool name. The --paths switch is specified in the wrapper script `cbmc`, which invokes `cbmc-binary`.<commit_after>
""" BenchExec is a framework for reliable benchmarking. This file is part of BenchExec. Copyright (C) 2007-2018 Dirk Beyer All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import benchexec.util as util from . import cbmc class Tool(cbmc.Tool): """ Tool info for CBMC Path (http://www.cprover.org/cbmc/). It always adds --xml-ui to the command-line arguments for easier parsing of the output, unless a propertyfile is passed -- in which case running under SV-COMP conditions is assumed. """ def name(self): return 'CBMC Path'
Add tool-info module for CBMC Path This module inherits from CBMC's tool-info module, except that it returns "CBMC Path" as the tool name. The --paths switch is specified in the wrapper script `cbmc`, which invokes `cbmc-binary`.""" BenchExec is a framework for reliable benchmarking. This file is part of BenchExec. Copyright (C) 2007-2018 Dirk Beyer All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import benchexec.util as util from . import cbmc class Tool(cbmc.Tool): """ Tool info for CBMC Path (http://www.cprover.org/cbmc/). It always adds --xml-ui to the command-line arguments for easier parsing of the output, unless a propertyfile is passed -- in which case running under SV-COMP conditions is assumed. """ def name(self): return 'CBMC Path'
<commit_before><commit_msg>Add tool-info module for CBMC Path This module inherits from CBMC's tool-info module, except that it returns "CBMC Path" as the tool name. The --paths switch is specified in the wrapper script `cbmc`, which invokes `cbmc-binary`.<commit_after>""" BenchExec is a framework for reliable benchmarking. This file is part of BenchExec. Copyright (C) 2007-2018 Dirk Beyer All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import benchexec.util as util from . import cbmc class Tool(cbmc.Tool): """ Tool info for CBMC Path (http://www.cprover.org/cbmc/). It always adds --xml-ui to the command-line arguments for easier parsing of the output, unless a propertyfile is passed -- in which case running under SV-COMP conditions is assumed. """ def name(self): return 'CBMC Path'
8aaa76b3adb7f7c661eba8d1f4c50e8b0f03fee4
s3_to_azure/move_files.py
s3_to_azure/move_files.py
import sys import time from azure.storage import BlobService from azure import WindowsAzureMissingResourceError from CREDENTIALS import account_name, account_key db = BlobService(account_name=account_name, account_key=account_key) ### bucket = 'crawl-data' in_progress = set() # existing = set([x.name for x in db.list_blobs(bucket, 'common-crawl/crawl-data/CC-MAIN-2014-23/')]) todo = [x.rstrip() for x in sys.stdin.readlines()] todo = [x for x in todo if x not in existing] ### while todo or in_progress: new_progress = set() for path in in_progress: props = db.get_blob_properties(bucket, path) if props['x-ms-copy-status'] == 'success': print '\t%s completed' % path continue new_progress.add(path) time.sleep(0.25) print 'Task queue length is %d' % len(new_progress) print 'TODO queue length is %d' % len(todo) # Populate the queue while todo and len(new_progress) < 256: path = todo.pop() # If it exists, skip it -- only add if it's missing try: resp = db.get_blob_properties(bucket, path) except WindowsAzureMissingResourceError: db.copy_blob(bucket, path, 'https://aws-publicdatasets.s3.amazonaws.com/' + path) new_progress.add(path) in_progress = new_progress time.sleep(300)
Add a script that moves files to Azure storage using Copy Blob API
Add a script that moves files to Azure storage using Copy Blob API
Python
mit
Smerity/cc-quick-scripts,Smerity/cc-quick-scripts
Add a script that moves files to Azure storage using Copy Blob API
import sys import time from azure.storage import BlobService from azure import WindowsAzureMissingResourceError from CREDENTIALS import account_name, account_key db = BlobService(account_name=account_name, account_key=account_key) ### bucket = 'crawl-data' in_progress = set() # existing = set([x.name for x in db.list_blobs(bucket, 'common-crawl/crawl-data/CC-MAIN-2014-23/')]) todo = [x.rstrip() for x in sys.stdin.readlines()] todo = [x for x in todo if x not in existing] ### while todo or in_progress: new_progress = set() for path in in_progress: props = db.get_blob_properties(bucket, path) if props['x-ms-copy-status'] == 'success': print '\t%s completed' % path continue new_progress.add(path) time.sleep(0.25) print 'Task queue length is %d' % len(new_progress) print 'TODO queue length is %d' % len(todo) # Populate the queue while todo and len(new_progress) < 256: path = todo.pop() # If it exists, skip it -- only add if it's missing try: resp = db.get_blob_properties(bucket, path) except WindowsAzureMissingResourceError: db.copy_blob(bucket, path, 'https://aws-publicdatasets.s3.amazonaws.com/' + path) new_progress.add(path) in_progress = new_progress time.sleep(300)
<commit_before><commit_msg>Add a script that moves files to Azure storage using Copy Blob API<commit_after>
import sys import time from azure.storage import BlobService from azure import WindowsAzureMissingResourceError from CREDENTIALS import account_name, account_key db = BlobService(account_name=account_name, account_key=account_key) ### bucket = 'crawl-data' in_progress = set() # existing = set([x.name for x in db.list_blobs(bucket, 'common-crawl/crawl-data/CC-MAIN-2014-23/')]) todo = [x.rstrip() for x in sys.stdin.readlines()] todo = [x for x in todo if x not in existing] ### while todo or in_progress: new_progress = set() for path in in_progress: props = db.get_blob_properties(bucket, path) if props['x-ms-copy-status'] == 'success': print '\t%s completed' % path continue new_progress.add(path) time.sleep(0.25) print 'Task queue length is %d' % len(new_progress) print 'TODO queue length is %d' % len(todo) # Populate the queue while todo and len(new_progress) < 256: path = todo.pop() # If it exists, skip it -- only add if it's missing try: resp = db.get_blob_properties(bucket, path) except WindowsAzureMissingResourceError: db.copy_blob(bucket, path, 'https://aws-publicdatasets.s3.amazonaws.com/' + path) new_progress.add(path) in_progress = new_progress time.sleep(300)
Add a script that moves files to Azure storage using Copy Blob APIimport sys import time from azure.storage import BlobService from azure import WindowsAzureMissingResourceError from CREDENTIALS import account_name, account_key db = BlobService(account_name=account_name, account_key=account_key) ### bucket = 'crawl-data' in_progress = set() # existing = set([x.name for x in db.list_blobs(bucket, 'common-crawl/crawl-data/CC-MAIN-2014-23/')]) todo = [x.rstrip() for x in sys.stdin.readlines()] todo = [x for x in todo if x not in existing] ### while todo or in_progress: new_progress = set() for path in in_progress: props = db.get_blob_properties(bucket, path) if props['x-ms-copy-status'] == 'success': print '\t%s completed' % path continue new_progress.add(path) time.sleep(0.25) print 'Task queue length is %d' % len(new_progress) print 'TODO queue length is %d' % len(todo) # Populate the queue while todo and len(new_progress) < 256: path = todo.pop() # If it exists, skip it -- only add if it's missing try: resp = db.get_blob_properties(bucket, path) except WindowsAzureMissingResourceError: db.copy_blob(bucket, path, 'https://aws-publicdatasets.s3.amazonaws.com/' + path) new_progress.add(path) in_progress = new_progress time.sleep(300)
<commit_before><commit_msg>Add a script that moves files to Azure storage using Copy Blob API<commit_after>import sys import time from azure.storage import BlobService from azure import WindowsAzureMissingResourceError from CREDENTIALS import account_name, account_key db = BlobService(account_name=account_name, account_key=account_key) ### bucket = 'crawl-data' in_progress = set() # existing = set([x.name for x in db.list_blobs(bucket, 'common-crawl/crawl-data/CC-MAIN-2014-23/')]) todo = [x.rstrip() for x in sys.stdin.readlines()] todo = [x for x in todo if x not in existing] ### while todo or in_progress: new_progress = set() for path in in_progress: props = db.get_blob_properties(bucket, path) if props['x-ms-copy-status'] == 'success': print '\t%s completed' % path continue new_progress.add(path) time.sleep(0.25) print 'Task queue length is %d' % len(new_progress) print 'TODO queue length is %d' % len(todo) # Populate the queue while todo and len(new_progress) < 256: path = todo.pop() # If it exists, skip it -- only add if it's missing try: resp = db.get_blob_properties(bucket, path) except WindowsAzureMissingResourceError: db.copy_blob(bucket, path, 'https://aws-publicdatasets.s3.amazonaws.com/' + path) new_progress.add(path) in_progress = new_progress time.sleep(300)
c3ec949a888896cc2457c391f797c504538bcf51
junction/tickets/management/commands/fill_data.py
junction/tickets/management/commands/fill_data.py
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals import os import optparse # Third Party Stuff from django.core.management.base import BaseCommand, CommandError from django.db import transaction # Junction Stuff from junction.tickets.models import Ticket class Command(BaseCommand): """ Read a csv file containing ticket numbers and fill all the details for it. """ option_list = BaseCommand.option_list + ( optparse.make_option( "-i", "--infile", dest = "filename", help = "specify import file", metavar = "FILE" ), ) option_list = option_list + ( optparse.make_option( "-o", "--outfile", dest = "category", help = "specify output file name", metavar = "FILE" ), ) @transaction.atomic def handle(self, *args, **options): if len(args) != 2: raise CommandError('Usage: python manage.py fill_data <in_file> <out_file>') in_file, out_file = args ticket_nums = [line.rstrip('\n') for line in open(in_file).readlines()] fh = open(out_file, 'w') header = ','.join(('ticket_num', 'name', 'email', 'address', '\n')) fh.write(header) for ticket_num in ticket_nums: ticket = Ticket.objects.get(ticket_no=ticket_num) details = ticket.others for attendee in details['attendee']: if attendee['ticketNo'] == ticket_num: attendee = attendee break else: attendee = {} if not ticket.address: ticket.address = '' data = data = ','.join((ticket_num, ticket.name, attendee['email'], ticket.address, '\n')) fh.write(data)
Add management command to fill data for given ticket nums
Add management command to fill data for given ticket nums `fill_data` management command takes input file which has a list of ticket numbers and outputs a csv with all details of given ticket numbers
Python
mit
nava45/junction,nava45/junction,ChillarAnand/junction,pythonindia/junction,pythonindia/junction,pythonindia/junction,pythonindia/junction,ChillarAnand/junction,ChillarAnand/junction,nava45/junction,nava45/junction,ChillarAnand/junction
Add management command to fill data for given ticket nums `fill_data` management command takes input file which has a list of ticket numbers and outputs a csv with all details of given ticket numbers
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals import os import optparse # Third Party Stuff from django.core.management.base import BaseCommand, CommandError from django.db import transaction # Junction Stuff from junction.tickets.models import Ticket class Command(BaseCommand): """ Read a csv file containing ticket numbers and fill all the details for it. """ option_list = BaseCommand.option_list + ( optparse.make_option( "-i", "--infile", dest = "filename", help = "specify import file", metavar = "FILE" ), ) option_list = option_list + ( optparse.make_option( "-o", "--outfile", dest = "category", help = "specify output file name", metavar = "FILE" ), ) @transaction.atomic def handle(self, *args, **options): if len(args) != 2: raise CommandError('Usage: python manage.py fill_data <in_file> <out_file>') in_file, out_file = args ticket_nums = [line.rstrip('\n') for line in open(in_file).readlines()] fh = open(out_file, 'w') header = ','.join(('ticket_num', 'name', 'email', 'address', '\n')) fh.write(header) for ticket_num in ticket_nums: ticket = Ticket.objects.get(ticket_no=ticket_num) details = ticket.others for attendee in details['attendee']: if attendee['ticketNo'] == ticket_num: attendee = attendee break else: attendee = {} if not ticket.address: ticket.address = '' data = data = ','.join((ticket_num, ticket.name, attendee['email'], ticket.address, '\n')) fh.write(data)
<commit_before><commit_msg>Add management command to fill data for given ticket nums `fill_data` management command takes input file which has a list of ticket numbers and outputs a csv with all details of given ticket numbers<commit_after>
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals import os import optparse # Third Party Stuff from django.core.management.base import BaseCommand, CommandError from django.db import transaction # Junction Stuff from junction.tickets.models import Ticket class Command(BaseCommand): """ Read a csv file containing ticket numbers and fill all the details for it. """ option_list = BaseCommand.option_list + ( optparse.make_option( "-i", "--infile", dest = "filename", help = "specify import file", metavar = "FILE" ), ) option_list = option_list + ( optparse.make_option( "-o", "--outfile", dest = "category", help = "specify output file name", metavar = "FILE" ), ) @transaction.atomic def handle(self, *args, **options): if len(args) != 2: raise CommandError('Usage: python manage.py fill_data <in_file> <out_file>') in_file, out_file = args ticket_nums = [line.rstrip('\n') for line in open(in_file).readlines()] fh = open(out_file, 'w') header = ','.join(('ticket_num', 'name', 'email', 'address', '\n')) fh.write(header) for ticket_num in ticket_nums: ticket = Ticket.objects.get(ticket_no=ticket_num) details = ticket.others for attendee in details['attendee']: if attendee['ticketNo'] == ticket_num: attendee = attendee break else: attendee = {} if not ticket.address: ticket.address = '' data = data = ','.join((ticket_num, ticket.name, attendee['email'], ticket.address, '\n')) fh.write(data)
Add management command to fill data for given ticket nums `fill_data` management command takes input file which has a list of ticket numbers and outputs a csv with all details of given ticket numbers# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals import os import optparse # Third Party Stuff from django.core.management.base import BaseCommand, CommandError from django.db import transaction # Junction Stuff from junction.tickets.models import Ticket class Command(BaseCommand): """ Read a csv file containing ticket numbers and fill all the details for it. """ option_list = BaseCommand.option_list + ( optparse.make_option( "-i", "--infile", dest = "filename", help = "specify import file", metavar = "FILE" ), ) option_list = option_list + ( optparse.make_option( "-o", "--outfile", dest = "category", help = "specify output file name", metavar = "FILE" ), ) @transaction.atomic def handle(self, *args, **options): if len(args) != 2: raise CommandError('Usage: python manage.py fill_data <in_file> <out_file>') in_file, out_file = args ticket_nums = [line.rstrip('\n') for line in open(in_file).readlines()] fh = open(out_file, 'w') header = ','.join(('ticket_num', 'name', 'email', 'address', '\n')) fh.write(header) for ticket_num in ticket_nums: ticket = Ticket.objects.get(ticket_no=ticket_num) details = ticket.others for attendee in details['attendee']: if attendee['ticketNo'] == ticket_num: attendee = attendee break else: attendee = {} if not ticket.address: ticket.address = '' data = data = ','.join((ticket_num, ticket.name, attendee['email'], ticket.address, '\n')) fh.write(data)
<commit_before><commit_msg>Add management command to fill data for given ticket nums `fill_data` management command takes input file which has a list of ticket numbers and outputs a csv with all details of given ticket numbers<commit_after># -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, unicode_literals import os import optparse # Third Party Stuff from django.core.management.base import BaseCommand, CommandError from django.db import transaction # Junction Stuff from junction.tickets.models import Ticket class Command(BaseCommand): """ Read a csv file containing ticket numbers and fill all the details for it. """ option_list = BaseCommand.option_list + ( optparse.make_option( "-i", "--infile", dest = "filename", help = "specify import file", metavar = "FILE" ), ) option_list = option_list + ( optparse.make_option( "-o", "--outfile", dest = "category", help = "specify output file name", metavar = "FILE" ), ) @transaction.atomic def handle(self, *args, **options): if len(args) != 2: raise CommandError('Usage: python manage.py fill_data <in_file> <out_file>') in_file, out_file = args ticket_nums = [line.rstrip('\n') for line in open(in_file).readlines()] fh = open(out_file, 'w') header = ','.join(('ticket_num', 'name', 'email', 'address', '\n')) fh.write(header) for ticket_num in ticket_nums: ticket = Ticket.objects.get(ticket_no=ticket_num) details = ticket.others for attendee in details['attendee']: if attendee['ticketNo'] == ticket_num: attendee = attendee break else: attendee = {} if not ticket.address: ticket.address = '' data = data = ','.join((ticket_num, ticket.name, attendee['email'], ticket.address, '\n')) fh.write(data)
e86c87f4d97677f18522321ca06f79373722a610
senlin/tests/middleware/test_trust_middleware.py
senlin/tests/middleware/test_trust_middleware.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from senlin.api.middleware import trust from senlin.db import api as db_api from senlin.drivers.openstack import sdk from senlin.tests.common import base from senlin.tests.common import utils class TestTrustMiddleware(base.SenlinTestCase): def setUp(self): super(TestTrustMiddleware, self).setUp() self.context = utils.dummy_context() self.middleware = trust.TrustMiddleware(None) @mock.patch('senlin.db.api') def test_get_trust_already_exists(self, mock_db_api): res = mock.MagicMock() res.cred = {} res.cred['openstack'] = {} res.cred['openstack']['trust'] = 'FAKE_TRUST_ID' db_api.cred_get = mock.MagicMock(return_value=res) trust_id = self.middleware._get_trust(self.context) self.assertEqual(res.cred['openstack']['trust'], trust_id) self.assertTrue(db_api.cred_get.called) @mock.patch('senlin.drivers.openstack.sdk') @mock.patch('senlin.db.api') def test_get_trust_not_exists(self, mock_db_api, mock_sdk): db_api.cred_get = mock.MagicMock(return_value=None) sdk.get_service_user_id = \ mock.MagicMock(return_value='FAKE_ADMIN_ID') client = mock.MagicMock() client.trust_get_by_trustor = mock.MagicMock(return_value=None) test_trust = mock.MagicMock() test_trust.id = "FAKE_TRUST_ID" client.trust_create = mock.MagicMock(return_value=test_trust) db_api.cred_create = mock.MagicMock() with mock.patch( 'senlin.drivers.openstack.keystone_v3.KeystoneClient', return_value=client): trust_id = self.middleware._get_trust(self.context) self.assertEqual(trust_id, test_trust.id) self.assertTrue(db_api.cred_get.called) self.assertTrue(sdk.get_service_user_id.called) self.assertTrue(client.trust_get_by_trustor.called) self.assertTrue(client.trust_create.called) self.assertTrue(db_api.cred_create.called)
Add test case for Trust Middleware
Add test case for Trust Middleware This patch adds a test case for Trust Middleware in order to verify its correctness. Change-Id: I26a65d8ac617e3011dc2be104138e9d177c98c80 Closes-Bug: #1469947
Python
apache-2.0
stackforge/senlin,openstack/senlin,Alzon/senlin,openstack/senlin,tengqm/senlin-container,openstack/senlin,tengqm/senlin-container,Alzon/senlin,stackforge/senlin
Add test case for Trust Middleware This patch adds a test case for Trust Middleware in order to verify its correctness. Change-Id: I26a65d8ac617e3011dc2be104138e9d177c98c80 Closes-Bug: #1469947
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from senlin.api.middleware import trust from senlin.db import api as db_api from senlin.drivers.openstack import sdk from senlin.tests.common import base from senlin.tests.common import utils class TestTrustMiddleware(base.SenlinTestCase): def setUp(self): super(TestTrustMiddleware, self).setUp() self.context = utils.dummy_context() self.middleware = trust.TrustMiddleware(None) @mock.patch('senlin.db.api') def test_get_trust_already_exists(self, mock_db_api): res = mock.MagicMock() res.cred = {} res.cred['openstack'] = {} res.cred['openstack']['trust'] = 'FAKE_TRUST_ID' db_api.cred_get = mock.MagicMock(return_value=res) trust_id = self.middleware._get_trust(self.context) self.assertEqual(res.cred['openstack']['trust'], trust_id) self.assertTrue(db_api.cred_get.called) @mock.patch('senlin.drivers.openstack.sdk') @mock.patch('senlin.db.api') def test_get_trust_not_exists(self, mock_db_api, mock_sdk): db_api.cred_get = mock.MagicMock(return_value=None) sdk.get_service_user_id = \ mock.MagicMock(return_value='FAKE_ADMIN_ID') client = mock.MagicMock() client.trust_get_by_trustor = mock.MagicMock(return_value=None) test_trust = mock.MagicMock() test_trust.id = "FAKE_TRUST_ID" client.trust_create = mock.MagicMock(return_value=test_trust) db_api.cred_create = mock.MagicMock() with mock.patch( 'senlin.drivers.openstack.keystone_v3.KeystoneClient', return_value=client): trust_id = self.middleware._get_trust(self.context) self.assertEqual(trust_id, test_trust.id) self.assertTrue(db_api.cred_get.called) self.assertTrue(sdk.get_service_user_id.called) self.assertTrue(client.trust_get_by_trustor.called) self.assertTrue(client.trust_create.called) self.assertTrue(db_api.cred_create.called)
<commit_before><commit_msg>Add test case for Trust Middleware This patch adds a test case for Trust Middleware in order to verify its correctness. Change-Id: I26a65d8ac617e3011dc2be104138e9d177c98c80 Closes-Bug: #1469947<commit_after>
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from senlin.api.middleware import trust from senlin.db import api as db_api from senlin.drivers.openstack import sdk from senlin.tests.common import base from senlin.tests.common import utils class TestTrustMiddleware(base.SenlinTestCase): def setUp(self): super(TestTrustMiddleware, self).setUp() self.context = utils.dummy_context() self.middleware = trust.TrustMiddleware(None) @mock.patch('senlin.db.api') def test_get_trust_already_exists(self, mock_db_api): res = mock.MagicMock() res.cred = {} res.cred['openstack'] = {} res.cred['openstack']['trust'] = 'FAKE_TRUST_ID' db_api.cred_get = mock.MagicMock(return_value=res) trust_id = self.middleware._get_trust(self.context) self.assertEqual(res.cred['openstack']['trust'], trust_id) self.assertTrue(db_api.cred_get.called) @mock.patch('senlin.drivers.openstack.sdk') @mock.patch('senlin.db.api') def test_get_trust_not_exists(self, mock_db_api, mock_sdk): db_api.cred_get = mock.MagicMock(return_value=None) sdk.get_service_user_id = \ mock.MagicMock(return_value='FAKE_ADMIN_ID') client = mock.MagicMock() client.trust_get_by_trustor = mock.MagicMock(return_value=None) test_trust = mock.MagicMock() test_trust.id = "FAKE_TRUST_ID" client.trust_create = mock.MagicMock(return_value=test_trust) db_api.cred_create = mock.MagicMock() with mock.patch( 'senlin.drivers.openstack.keystone_v3.KeystoneClient', return_value=client): trust_id = self.middleware._get_trust(self.context) self.assertEqual(trust_id, test_trust.id) self.assertTrue(db_api.cred_get.called) self.assertTrue(sdk.get_service_user_id.called) self.assertTrue(client.trust_get_by_trustor.called) self.assertTrue(client.trust_create.called) self.assertTrue(db_api.cred_create.called)
Add test case for Trust Middleware This patch adds a test case for Trust Middleware in order to verify its correctness. Change-Id: I26a65d8ac617e3011dc2be104138e9d177c98c80 Closes-Bug: #1469947# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from senlin.api.middleware import trust from senlin.db import api as db_api from senlin.drivers.openstack import sdk from senlin.tests.common import base from senlin.tests.common import utils class TestTrustMiddleware(base.SenlinTestCase): def setUp(self): super(TestTrustMiddleware, self).setUp() self.context = utils.dummy_context() self.middleware = trust.TrustMiddleware(None) @mock.patch('senlin.db.api') def test_get_trust_already_exists(self, mock_db_api): res = mock.MagicMock() res.cred = {} res.cred['openstack'] = {} res.cred['openstack']['trust'] = 'FAKE_TRUST_ID' db_api.cred_get = mock.MagicMock(return_value=res) trust_id = self.middleware._get_trust(self.context) self.assertEqual(res.cred['openstack']['trust'], trust_id) self.assertTrue(db_api.cred_get.called) @mock.patch('senlin.drivers.openstack.sdk') @mock.patch('senlin.db.api') def test_get_trust_not_exists(self, mock_db_api, mock_sdk): db_api.cred_get = mock.MagicMock(return_value=None) sdk.get_service_user_id = \ mock.MagicMock(return_value='FAKE_ADMIN_ID') client = mock.MagicMock() client.trust_get_by_trustor = mock.MagicMock(return_value=None) test_trust = mock.MagicMock() test_trust.id = "FAKE_TRUST_ID" client.trust_create = mock.MagicMock(return_value=test_trust) db_api.cred_create = mock.MagicMock() with mock.patch( 'senlin.drivers.openstack.keystone_v3.KeystoneClient', return_value=client): trust_id = self.middleware._get_trust(self.context) self.assertEqual(trust_id, test_trust.id) self.assertTrue(db_api.cred_get.called) self.assertTrue(sdk.get_service_user_id.called) self.assertTrue(client.trust_get_by_trustor.called) self.assertTrue(client.trust_create.called) self.assertTrue(db_api.cred_create.called)
<commit_before><commit_msg>Add test case for Trust Middleware This patch adds a test case for Trust Middleware in order to verify its correctness. Change-Id: I26a65d8ac617e3011dc2be104138e9d177c98c80 Closes-Bug: #1469947<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from senlin.api.middleware import trust from senlin.db import api as db_api from senlin.drivers.openstack import sdk from senlin.tests.common import base from senlin.tests.common import utils class TestTrustMiddleware(base.SenlinTestCase): def setUp(self): super(TestTrustMiddleware, self).setUp() self.context = utils.dummy_context() self.middleware = trust.TrustMiddleware(None) @mock.patch('senlin.db.api') def test_get_trust_already_exists(self, mock_db_api): res = mock.MagicMock() res.cred = {} res.cred['openstack'] = {} res.cred['openstack']['trust'] = 'FAKE_TRUST_ID' db_api.cred_get = mock.MagicMock(return_value=res) trust_id = self.middleware._get_trust(self.context) self.assertEqual(res.cred['openstack']['trust'], trust_id) self.assertTrue(db_api.cred_get.called) @mock.patch('senlin.drivers.openstack.sdk') @mock.patch('senlin.db.api') def test_get_trust_not_exists(self, mock_db_api, mock_sdk): db_api.cred_get = mock.MagicMock(return_value=None) sdk.get_service_user_id = \ mock.MagicMock(return_value='FAKE_ADMIN_ID') client = mock.MagicMock() client.trust_get_by_trustor = mock.MagicMock(return_value=None) test_trust = mock.MagicMock() test_trust.id = "FAKE_TRUST_ID" client.trust_create = mock.MagicMock(return_value=test_trust) db_api.cred_create = mock.MagicMock() with mock.patch( 'senlin.drivers.openstack.keystone_v3.KeystoneClient', return_value=client): trust_id = self.middleware._get_trust(self.context) self.assertEqual(trust_id, test_trust.id) self.assertTrue(db_api.cred_get.called) self.assertTrue(sdk.get_service_user_id.called) self.assertTrue(client.trust_get_by_trustor.called) self.assertTrue(client.trust_create.called) self.assertTrue(db_api.cred_create.called)
ddbda12b1cb83886b8edab6e50bdde8a6a2d2672
compass/tests/test_views.py
compass/tests/test_views.py
from django.test import TestCase, Client from ..models import Category, Book class CompassTest(TestCase): @classmethod def setUpClass(cls): cls.client = Client() super(CompassTest, cls).setUpClass() def test_can_view_search_page(self): response = self.client.get('/') self.assertContains(response, '<input name="title" type="text"/>') def test_can_view_categories_page(self): response = self.client.get('/') self.assertContains(response, '<title>Book Categories</title>') def test_can_view_books_page(self): response = self.client.get('/categories/') self.assertContains(response, '<title>Book Categories</title>') def test_can_view_category_page(self): Category.create(title="Mock Category") response = self.client.get('/categories/mock-category-2016-08-22') self.assertContains(response, '<title>Mock Category</title>') def test_can_view_book_page(self): category = Category.create(title="Mock Category") Book.create(title="Mock book", category=category) response = self.client.get( '/categories/mock-category-2016-08-22/books/mock-book-2016-08-22') self.assertContains(response, '<title>Mock Book</title>') def test_can_search_book_using_category_and_title(self): category = Category.create(title="Mock Category") Book.create(title="Mock book", category=category) response = self.client.post('/search', { "title": "Mock book", "category": "Mock category", }) self.assertContains(response, 'Mock book') def test_can_search_book_using_only_title(self): category = Category.create(title="Mock Category") Book.create(title="Mock book", category=category) response = self.client.post('/search', { "title": "Mock book", "category": "", }) self.assertContains(response, 'Mock book') def test_can_search_using_only_category(self): category = Category.create(title="Mock Category") Book.create(title="Mock book", category=category) response = self.client.post('/search', { "title": "", "category": "Mock Category", }) self.assertContains(response, 'Mock book')
Test views return response for searched query
Test views return response for searched query
Python
mit
andela-osule/bookworm,andela-osule/bookworm
Test views return response for searched query
from django.test import TestCase, Client from ..models import Category, Book class CompassTest(TestCase): @classmethod def setUpClass(cls): cls.client = Client() super(CompassTest, cls).setUpClass() def test_can_view_search_page(self): response = self.client.get('/') self.assertContains(response, '<input name="title" type="text"/>') def test_can_view_categories_page(self): response = self.client.get('/') self.assertContains(response, '<title>Book Categories</title>') def test_can_view_books_page(self): response = self.client.get('/categories/') self.assertContains(response, '<title>Book Categories</title>') def test_can_view_category_page(self): Category.create(title="Mock Category") response = self.client.get('/categories/mock-category-2016-08-22') self.assertContains(response, '<title>Mock Category</title>') def test_can_view_book_page(self): category = Category.create(title="Mock Category") Book.create(title="Mock book", category=category) response = self.client.get( '/categories/mock-category-2016-08-22/books/mock-book-2016-08-22') self.assertContains(response, '<title>Mock Book</title>') def test_can_search_book_using_category_and_title(self): category = Category.create(title="Mock Category") Book.create(title="Mock book", category=category) response = self.client.post('/search', { "title": "Mock book", "category": "Mock category", }) self.assertContains(response, 'Mock book') def test_can_search_book_using_only_title(self): category = Category.create(title="Mock Category") Book.create(title="Mock book", category=category) response = self.client.post('/search', { "title": "Mock book", "category": "", }) self.assertContains(response, 'Mock book') def test_can_search_using_only_category(self): category = Category.create(title="Mock Category") Book.create(title="Mock book", category=category) response = self.client.post('/search', { "title": "", "category": "Mock Category", }) self.assertContains(response, 'Mock book')
<commit_before><commit_msg>Test views return response for searched query<commit_after>
from django.test import TestCase, Client from ..models import Category, Book class CompassTest(TestCase): @classmethod def setUpClass(cls): cls.client = Client() super(CompassTest, cls).setUpClass() def test_can_view_search_page(self): response = self.client.get('/') self.assertContains(response, '<input name="title" type="text"/>') def test_can_view_categories_page(self): response = self.client.get('/') self.assertContains(response, '<title>Book Categories</title>') def test_can_view_books_page(self): response = self.client.get('/categories/') self.assertContains(response, '<title>Book Categories</title>') def test_can_view_category_page(self): Category.create(title="Mock Category") response = self.client.get('/categories/mock-category-2016-08-22') self.assertContains(response, '<title>Mock Category</title>') def test_can_view_book_page(self): category = Category.create(title="Mock Category") Book.create(title="Mock book", category=category) response = self.client.get( '/categories/mock-category-2016-08-22/books/mock-book-2016-08-22') self.assertContains(response, '<title>Mock Book</title>') def test_can_search_book_using_category_and_title(self): category = Category.create(title="Mock Category") Book.create(title="Mock book", category=category) response = self.client.post('/search', { "title": "Mock book", "category": "Mock category", }) self.assertContains(response, 'Mock book') def test_can_search_book_using_only_title(self): category = Category.create(title="Mock Category") Book.create(title="Mock book", category=category) response = self.client.post('/search', { "title": "Mock book", "category": "", }) self.assertContains(response, 'Mock book') def test_can_search_using_only_category(self): category = Category.create(title="Mock Category") Book.create(title="Mock book", category=category) response = self.client.post('/search', { "title": "", "category": "Mock Category", }) self.assertContains(response, 'Mock book')
Test views return response for searched queryfrom django.test import TestCase, Client from ..models import Category, Book class CompassTest(TestCase): @classmethod def setUpClass(cls): cls.client = Client() super(CompassTest, cls).setUpClass() def test_can_view_search_page(self): response = self.client.get('/') self.assertContains(response, '<input name="title" type="text"/>') def test_can_view_categories_page(self): response = self.client.get('/') self.assertContains(response, '<title>Book Categories</title>') def test_can_view_books_page(self): response = self.client.get('/categories/') self.assertContains(response, '<title>Book Categories</title>') def test_can_view_category_page(self): Category.create(title="Mock Category") response = self.client.get('/categories/mock-category-2016-08-22') self.assertContains(response, '<title>Mock Category</title>') def test_can_view_book_page(self): category = Category.create(title="Mock Category") Book.create(title="Mock book", category=category) response = self.client.get( '/categories/mock-category-2016-08-22/books/mock-book-2016-08-22') self.assertContains(response, '<title>Mock Book</title>') def test_can_search_book_using_category_and_title(self): category = Category.create(title="Mock Category") Book.create(title="Mock book", category=category) response = self.client.post('/search', { "title": "Mock book", "category": "Mock category", }) self.assertContains(response, 'Mock book') def test_can_search_book_using_only_title(self): category = Category.create(title="Mock Category") Book.create(title="Mock book", category=category) response = self.client.post('/search', { "title": "Mock book", "category": "", }) self.assertContains(response, 'Mock book') def test_can_search_using_only_category(self): category = Category.create(title="Mock Category") Book.create(title="Mock book", category=category) response = self.client.post('/search', { "title": "", "category": "Mock Category", }) self.assertContains(response, 'Mock book')
<commit_before><commit_msg>Test views return response for searched query<commit_after>from django.test import TestCase, Client from ..models import Category, Book class CompassTest(TestCase): @classmethod def setUpClass(cls): cls.client = Client() super(CompassTest, cls).setUpClass() def test_can_view_search_page(self): response = self.client.get('/') self.assertContains(response, '<input name="title" type="text"/>') def test_can_view_categories_page(self): response = self.client.get('/') self.assertContains(response, '<title>Book Categories</title>') def test_can_view_books_page(self): response = self.client.get('/categories/') self.assertContains(response, '<title>Book Categories</title>') def test_can_view_category_page(self): Category.create(title="Mock Category") response = self.client.get('/categories/mock-category-2016-08-22') self.assertContains(response, '<title>Mock Category</title>') def test_can_view_book_page(self): category = Category.create(title="Mock Category") Book.create(title="Mock book", category=category) response = self.client.get( '/categories/mock-category-2016-08-22/books/mock-book-2016-08-22') self.assertContains(response, '<title>Mock Book</title>') def test_can_search_book_using_category_and_title(self): category = Category.create(title="Mock Category") Book.create(title="Mock book", category=category) response = self.client.post('/search', { "title": "Mock book", "category": "Mock category", }) self.assertContains(response, 'Mock book') def test_can_search_book_using_only_title(self): category = Category.create(title="Mock Category") Book.create(title="Mock book", category=category) response = self.client.post('/search', { "title": "Mock book", "category": "", }) self.assertContains(response, 'Mock book') def test_can_search_using_only_category(self): category = Category.create(title="Mock Category") Book.create(title="Mock book", category=category) response = self.client.post('/search', { "title": "", "category": "Mock Category", }) self.assertContains(response, 'Mock book')
e6bfcede8cb01fa38a5401315422a13d28983182
tests/test_agent.py
tests/test_agent.py
import json import pytest import zmq from zephyrus.addresses import Participants from zephyrus.agent import Agent from zephyrus.message import Message from zephyrus.tester import TesterMessenger as ZTesterMessenger @pytest.fixture def DummyAgent(): class Dummy(Agent): def act(self, perceived): return Message('agent', 'RESULT', perceived[::-1]) def perceive(self, perceived_data): return super().perceive(perceived_data) def mainloop(self): msg = Message.from_string(self.socket_receive.recv_string()) action = self.perceive(msg.content) self.socket_send.send_string(str(action)) return Dummy @pytest.fixture def address_config_file(tmpdir_factory): path = tmpdir_factory.mktemp('config').join('addresses.json') data = { "simulation": "tcp://127.0.0.1:7000", "strategy": "tcp://127.0.0.1:5000", "tester": "tcp://127.0.0.1:6600", "tester_par": "tcp://127.0.0.1:6601", "tester_est": "tcp://127.0.0.1:6605", "monitor": "tcp://127.0.0.1:6500", "environment": "tcp://127.0.0.1:6000", "agent": "tcp://127.0.0.1:6001" } json.dump(data, open(str(path), 'w')) return path def test_agent_hello(DummyAgent, address_config_file): ag = DummyAgent(1, str(address_config_file)) ag.start() messenger = ZTesterMessenger('tester') participants = Participants(str(address_config_file)) ctx = zmq.Context() ssend = ctx.socket(zmq.PUSH) ssend.connect(participants.address('agent')) srecv = ctx.socket(zmq.PULL) srecv.bind(participants.address('monitor')) ssend.send_string(str(messenger.build_start_message())) content = list(range(10)) ssend.send_string(json.dumps({'sender': 'oi', 'type': 'bumba', 'content': content})) msg = Message.from_json(srecv.recv_json()) assert msg.sender == 'agent' assert msg.type == 'RESULT' assert msg.content == content[::-1] ssend.send_string(str(messenger.build_stop_message())) ag.join()
Add tests to agent module. Just sunny day case for now
Add tests to agent module. Just sunny day case for now
Python
mit
wairton/zephyrus-mas
Add tests to agent module. Just sunny day case for now
import json import pytest import zmq from zephyrus.addresses import Participants from zephyrus.agent import Agent from zephyrus.message import Message from zephyrus.tester import TesterMessenger as ZTesterMessenger @pytest.fixture def DummyAgent(): class Dummy(Agent): def act(self, perceived): return Message('agent', 'RESULT', perceived[::-1]) def perceive(self, perceived_data): return super().perceive(perceived_data) def mainloop(self): msg = Message.from_string(self.socket_receive.recv_string()) action = self.perceive(msg.content) self.socket_send.send_string(str(action)) return Dummy @pytest.fixture def address_config_file(tmpdir_factory): path = tmpdir_factory.mktemp('config').join('addresses.json') data = { "simulation": "tcp://127.0.0.1:7000", "strategy": "tcp://127.0.0.1:5000", "tester": "tcp://127.0.0.1:6600", "tester_par": "tcp://127.0.0.1:6601", "tester_est": "tcp://127.0.0.1:6605", "monitor": "tcp://127.0.0.1:6500", "environment": "tcp://127.0.0.1:6000", "agent": "tcp://127.0.0.1:6001" } json.dump(data, open(str(path), 'w')) return path def test_agent_hello(DummyAgent, address_config_file): ag = DummyAgent(1, str(address_config_file)) ag.start() messenger = ZTesterMessenger('tester') participants = Participants(str(address_config_file)) ctx = zmq.Context() ssend = ctx.socket(zmq.PUSH) ssend.connect(participants.address('agent')) srecv = ctx.socket(zmq.PULL) srecv.bind(participants.address('monitor')) ssend.send_string(str(messenger.build_start_message())) content = list(range(10)) ssend.send_string(json.dumps({'sender': 'oi', 'type': 'bumba', 'content': content})) msg = Message.from_json(srecv.recv_json()) assert msg.sender == 'agent' assert msg.type == 'RESULT' assert msg.content == content[::-1] ssend.send_string(str(messenger.build_stop_message())) ag.join()
<commit_before><commit_msg>Add tests to agent module. Just sunny day case for now<commit_after>
import json import pytest import zmq from zephyrus.addresses import Participants from zephyrus.agent import Agent from zephyrus.message import Message from zephyrus.tester import TesterMessenger as ZTesterMessenger @pytest.fixture def DummyAgent(): class Dummy(Agent): def act(self, perceived): return Message('agent', 'RESULT', perceived[::-1]) def perceive(self, perceived_data): return super().perceive(perceived_data) def mainloop(self): msg = Message.from_string(self.socket_receive.recv_string()) action = self.perceive(msg.content) self.socket_send.send_string(str(action)) return Dummy @pytest.fixture def address_config_file(tmpdir_factory): path = tmpdir_factory.mktemp('config').join('addresses.json') data = { "simulation": "tcp://127.0.0.1:7000", "strategy": "tcp://127.0.0.1:5000", "tester": "tcp://127.0.0.1:6600", "tester_par": "tcp://127.0.0.1:6601", "tester_est": "tcp://127.0.0.1:6605", "monitor": "tcp://127.0.0.1:6500", "environment": "tcp://127.0.0.1:6000", "agent": "tcp://127.0.0.1:6001" } json.dump(data, open(str(path), 'w')) return path def test_agent_hello(DummyAgent, address_config_file): ag = DummyAgent(1, str(address_config_file)) ag.start() messenger = ZTesterMessenger('tester') participants = Participants(str(address_config_file)) ctx = zmq.Context() ssend = ctx.socket(zmq.PUSH) ssend.connect(participants.address('agent')) srecv = ctx.socket(zmq.PULL) srecv.bind(participants.address('monitor')) ssend.send_string(str(messenger.build_start_message())) content = list(range(10)) ssend.send_string(json.dumps({'sender': 'oi', 'type': 'bumba', 'content': content})) msg = Message.from_json(srecv.recv_json()) assert msg.sender == 'agent' assert msg.type == 'RESULT' assert msg.content == content[::-1] ssend.send_string(str(messenger.build_stop_message())) ag.join()
Add tests to agent module. Just sunny day case for nowimport json import pytest import zmq from zephyrus.addresses import Participants from zephyrus.agent import Agent from zephyrus.message import Message from zephyrus.tester import TesterMessenger as ZTesterMessenger @pytest.fixture def DummyAgent(): class Dummy(Agent): def act(self, perceived): return Message('agent', 'RESULT', perceived[::-1]) def perceive(self, perceived_data): return super().perceive(perceived_data) def mainloop(self): msg = Message.from_string(self.socket_receive.recv_string()) action = self.perceive(msg.content) self.socket_send.send_string(str(action)) return Dummy @pytest.fixture def address_config_file(tmpdir_factory): path = tmpdir_factory.mktemp('config').join('addresses.json') data = { "simulation": "tcp://127.0.0.1:7000", "strategy": "tcp://127.0.0.1:5000", "tester": "tcp://127.0.0.1:6600", "tester_par": "tcp://127.0.0.1:6601", "tester_est": "tcp://127.0.0.1:6605", "monitor": "tcp://127.0.0.1:6500", "environment": "tcp://127.0.0.1:6000", "agent": "tcp://127.0.0.1:6001" } json.dump(data, open(str(path), 'w')) return path def test_agent_hello(DummyAgent, address_config_file): ag = DummyAgent(1, str(address_config_file)) ag.start() messenger = ZTesterMessenger('tester') participants = Participants(str(address_config_file)) ctx = zmq.Context() ssend = ctx.socket(zmq.PUSH) ssend.connect(participants.address('agent')) srecv = ctx.socket(zmq.PULL) srecv.bind(participants.address('monitor')) ssend.send_string(str(messenger.build_start_message())) content = list(range(10)) ssend.send_string(json.dumps({'sender': 'oi', 'type': 'bumba', 'content': content})) msg = Message.from_json(srecv.recv_json()) assert msg.sender == 'agent' assert msg.type == 'RESULT' assert msg.content == content[::-1] ssend.send_string(str(messenger.build_stop_message())) ag.join()
<commit_before><commit_msg>Add tests to agent module. Just sunny day case for now<commit_after>import json import pytest import zmq from zephyrus.addresses import Participants from zephyrus.agent import Agent from zephyrus.message import Message from zephyrus.tester import TesterMessenger as ZTesterMessenger @pytest.fixture def DummyAgent(): class Dummy(Agent): def act(self, perceived): return Message('agent', 'RESULT', perceived[::-1]) def perceive(self, perceived_data): return super().perceive(perceived_data) def mainloop(self): msg = Message.from_string(self.socket_receive.recv_string()) action = self.perceive(msg.content) self.socket_send.send_string(str(action)) return Dummy @pytest.fixture def address_config_file(tmpdir_factory): path = tmpdir_factory.mktemp('config').join('addresses.json') data = { "simulation": "tcp://127.0.0.1:7000", "strategy": "tcp://127.0.0.1:5000", "tester": "tcp://127.0.0.1:6600", "tester_par": "tcp://127.0.0.1:6601", "tester_est": "tcp://127.0.0.1:6605", "monitor": "tcp://127.0.0.1:6500", "environment": "tcp://127.0.0.1:6000", "agent": "tcp://127.0.0.1:6001" } json.dump(data, open(str(path), 'w')) return path def test_agent_hello(DummyAgent, address_config_file): ag = DummyAgent(1, str(address_config_file)) ag.start() messenger = ZTesterMessenger('tester') participants = Participants(str(address_config_file)) ctx = zmq.Context() ssend = ctx.socket(zmq.PUSH) ssend.connect(participants.address('agent')) srecv = ctx.socket(zmq.PULL) srecv.bind(participants.address('monitor')) ssend.send_string(str(messenger.build_start_message())) content = list(range(10)) ssend.send_string(json.dumps({'sender': 'oi', 'type': 'bumba', 'content': content})) msg = Message.from_json(srecv.recv_json()) assert msg.sender == 'agent' assert msg.type == 'RESULT' assert msg.content == content[::-1] ssend.send_string(str(messenger.build_stop_message())) ag.join()
797599802c4702d4a4452aafa276ceedd829e27f
tests/test_tools.py
tests/test_tools.py
from windpowerlib.tools import smallest_difference import collections class TestTools: @classmethod def setup_class(self): self.return_tuple = collections.namedtuple('selected_values', ['closest_value', 'corresp_value']) def test_smallest_difference(self): # value_1 closer to comparative value expected_output = self.return_tuple(30, 5.0) assert smallest_difference(30, 10, 100, 5.0, 6.0) == expected_output # value_1 = comparative value expected_output = self.return_tuple(100, 5.0) assert smallest_difference(100, 10, 100, 5.0, 6.0) == expected_output # value_2 closer to comparative value expected_output = self.return_tuple(30, 6.0) assert smallest_difference(10, 30, 100, 5.0, 6.0) == expected_output # value_2 = comparative value expected_output = self.return_tuple(100, 6.0) assert smallest_difference(10, 100, 100, 5.0, 6.0) == expected_output # value_2 is None expected_output = self.return_tuple(10, 5.0) assert smallest_difference(10, None, 100, 5.0, 6.0) == expected_output
Add test module for tools module
Add test module for tools module
Python
mit
wind-python/windpowerlib
Add test module for tools module
from windpowerlib.tools import smallest_difference import collections class TestTools: @classmethod def setup_class(self): self.return_tuple = collections.namedtuple('selected_values', ['closest_value', 'corresp_value']) def test_smallest_difference(self): # value_1 closer to comparative value expected_output = self.return_tuple(30, 5.0) assert smallest_difference(30, 10, 100, 5.0, 6.0) == expected_output # value_1 = comparative value expected_output = self.return_tuple(100, 5.0) assert smallest_difference(100, 10, 100, 5.0, 6.0) == expected_output # value_2 closer to comparative value expected_output = self.return_tuple(30, 6.0) assert smallest_difference(10, 30, 100, 5.0, 6.0) == expected_output # value_2 = comparative value expected_output = self.return_tuple(100, 6.0) assert smallest_difference(10, 100, 100, 5.0, 6.0) == expected_output # value_2 is None expected_output = self.return_tuple(10, 5.0) assert smallest_difference(10, None, 100, 5.0, 6.0) == expected_output
<commit_before><commit_msg>Add test module for tools module<commit_after>
from windpowerlib.tools import smallest_difference import collections class TestTools: @classmethod def setup_class(self): self.return_tuple = collections.namedtuple('selected_values', ['closest_value', 'corresp_value']) def test_smallest_difference(self): # value_1 closer to comparative value expected_output = self.return_tuple(30, 5.0) assert smallest_difference(30, 10, 100, 5.0, 6.0) == expected_output # value_1 = comparative value expected_output = self.return_tuple(100, 5.0) assert smallest_difference(100, 10, 100, 5.0, 6.0) == expected_output # value_2 closer to comparative value expected_output = self.return_tuple(30, 6.0) assert smallest_difference(10, 30, 100, 5.0, 6.0) == expected_output # value_2 = comparative value expected_output = self.return_tuple(100, 6.0) assert smallest_difference(10, 100, 100, 5.0, 6.0) == expected_output # value_2 is None expected_output = self.return_tuple(10, 5.0) assert smallest_difference(10, None, 100, 5.0, 6.0) == expected_output
Add test module for tools modulefrom windpowerlib.tools import smallest_difference import collections class TestTools: @classmethod def setup_class(self): self.return_tuple = collections.namedtuple('selected_values', ['closest_value', 'corresp_value']) def test_smallest_difference(self): # value_1 closer to comparative value expected_output = self.return_tuple(30, 5.0) assert smallest_difference(30, 10, 100, 5.0, 6.0) == expected_output # value_1 = comparative value expected_output = self.return_tuple(100, 5.0) assert smallest_difference(100, 10, 100, 5.0, 6.0) == expected_output # value_2 closer to comparative value expected_output = self.return_tuple(30, 6.0) assert smallest_difference(10, 30, 100, 5.0, 6.0) == expected_output # value_2 = comparative value expected_output = self.return_tuple(100, 6.0) assert smallest_difference(10, 100, 100, 5.0, 6.0) == expected_output # value_2 is None expected_output = self.return_tuple(10, 5.0) assert smallest_difference(10, None, 100, 5.0, 6.0) == expected_output
<commit_before><commit_msg>Add test module for tools module<commit_after>from windpowerlib.tools import smallest_difference import collections class TestTools: @classmethod def setup_class(self): self.return_tuple = collections.namedtuple('selected_values', ['closest_value', 'corresp_value']) def test_smallest_difference(self): # value_1 closer to comparative value expected_output = self.return_tuple(30, 5.0) assert smallest_difference(30, 10, 100, 5.0, 6.0) == expected_output # value_1 = comparative value expected_output = self.return_tuple(100, 5.0) assert smallest_difference(100, 10, 100, 5.0, 6.0) == expected_output # value_2 closer to comparative value expected_output = self.return_tuple(30, 6.0) assert smallest_difference(10, 30, 100, 5.0, 6.0) == expected_output # value_2 = comparative value expected_output = self.return_tuple(100, 6.0) assert smallest_difference(10, 100, 100, 5.0, 6.0) == expected_output # value_2 is None expected_output = self.return_tuple(10, 5.0) assert smallest_difference(10, None, 100, 5.0, 6.0) == expected_output
2d557b68fd9b7e0e900215afeb5185e466907a49
cyhdfs3/tests/test_avro.py
cyhdfs3/tests/test_avro.py
import posixpath import subprocess import numpy as np import pandas as pd import pandas.util.testing as pdt import cyavro from utils import * avroschema = """ {"type": "record", "name": "from_bytes_test", "fields":[ {"name": "id", "type": "int"}, {"name": "name", "type": "string"} ] } """ def test_avro_move_read(hdfs, tmpdir, request): testname = request.node.name hdfs_path = posixpath.join(TEST_DIR, testname) local_path = tmpdir.join(testname + '.avro').realpath().strpath # Create an avrofile writer = cyavro.AvroWriter(local_path, 'null', avroschema) ids = np.random.randint(100, size=10) ids = np.arange(10) names = pdt.rands_array(10, 10) df_write = pd.DataFrame({"id": ids, "name": names}) df_write = cyavro.prepare_pandas_df_for_write(df_write, avroschema, copy=False) writer.write(df_write) writer.close() # Move file to hdfs subprocess.call("hadoop fs -put {} {}".format(local_path, hdfs_path), shell=True) # Read avro and compare data with hdfs.open(hdfs_path, 'r') as f: reader = f.read_avro() reader.init_buffers() df_read = pd.DataFrame(reader.read_chunk()) pdt.assert_frame_equal(df_write, df_read) reader.close() def test_avro_write_read(hdfs, tmpdir, request): testname = request.node.name hdfs_path = posixpath.join(TEST_DIR, testname) local_path = tmpdir.join(testname + '.avro').realpath().strpath # Create an avrofile writer = cyavro.AvroWriter(local_path, 'null', avroschema) ids = np.random.randint(100, size=10) ids = np.arange(10) names = pdt.rands_array(10, 10) df_write = pd.DataFrame({"id": ids, "name": names}) df_write = cyavro.prepare_pandas_df_for_write(df_write, avroschema, copy=False) writer.write(df_write) writer.close() # Read avro file bytes from localfile and write them to hdfs data = b'' with open(local_path, 'r') as f: data = f.read() with hdfs.open(hdfs_path, 'w') as f: f.write(data) # Read avro file bytes from hdfs and compare with hdfs.open(hdfs_path, 'r') as f: read_data = f.read() assert len(data) == len(read_data) assert data == read_data
Add test for reading and writing avro bytes to hdfs
Add test for reading and writing avro bytes to hdfs
Python
apache-2.0
danielfrg/libhdfs3.py,danielfrg/libhdfs3.py,danielfrg/cyhdfs3,danielfrg/cyhdfs3
Add test for reading and writing avro bytes to hdfs
import posixpath import subprocess import numpy as np import pandas as pd import pandas.util.testing as pdt import cyavro from utils import * avroschema = """ {"type": "record", "name": "from_bytes_test", "fields":[ {"name": "id", "type": "int"}, {"name": "name", "type": "string"} ] } """ def test_avro_move_read(hdfs, tmpdir, request): testname = request.node.name hdfs_path = posixpath.join(TEST_DIR, testname) local_path = tmpdir.join(testname + '.avro').realpath().strpath # Create an avrofile writer = cyavro.AvroWriter(local_path, 'null', avroschema) ids = np.random.randint(100, size=10) ids = np.arange(10) names = pdt.rands_array(10, 10) df_write = pd.DataFrame({"id": ids, "name": names}) df_write = cyavro.prepare_pandas_df_for_write(df_write, avroschema, copy=False) writer.write(df_write) writer.close() # Move file to hdfs subprocess.call("hadoop fs -put {} {}".format(local_path, hdfs_path), shell=True) # Read avro and compare data with hdfs.open(hdfs_path, 'r') as f: reader = f.read_avro() reader.init_buffers() df_read = pd.DataFrame(reader.read_chunk()) pdt.assert_frame_equal(df_write, df_read) reader.close() def test_avro_write_read(hdfs, tmpdir, request): testname = request.node.name hdfs_path = posixpath.join(TEST_DIR, testname) local_path = tmpdir.join(testname + '.avro').realpath().strpath # Create an avrofile writer = cyavro.AvroWriter(local_path, 'null', avroschema) ids = np.random.randint(100, size=10) ids = np.arange(10) names = pdt.rands_array(10, 10) df_write = pd.DataFrame({"id": ids, "name": names}) df_write = cyavro.prepare_pandas_df_for_write(df_write, avroschema, copy=False) writer.write(df_write) writer.close() # Read avro file bytes from localfile and write them to hdfs data = b'' with open(local_path, 'r') as f: data = f.read() with hdfs.open(hdfs_path, 'w') as f: f.write(data) # Read avro file bytes from hdfs and compare with hdfs.open(hdfs_path, 'r') as f: read_data = f.read() assert len(data) == len(read_data) assert data == read_data
<commit_before><commit_msg>Add test for reading and writing avro bytes to hdfs<commit_after>
import posixpath import subprocess import numpy as np import pandas as pd import pandas.util.testing as pdt import cyavro from utils import * avroschema = """ {"type": "record", "name": "from_bytes_test", "fields":[ {"name": "id", "type": "int"}, {"name": "name", "type": "string"} ] } """ def test_avro_move_read(hdfs, tmpdir, request): testname = request.node.name hdfs_path = posixpath.join(TEST_DIR, testname) local_path = tmpdir.join(testname + '.avro').realpath().strpath # Create an avrofile writer = cyavro.AvroWriter(local_path, 'null', avroschema) ids = np.random.randint(100, size=10) ids = np.arange(10) names = pdt.rands_array(10, 10) df_write = pd.DataFrame({"id": ids, "name": names}) df_write = cyavro.prepare_pandas_df_for_write(df_write, avroschema, copy=False) writer.write(df_write) writer.close() # Move file to hdfs subprocess.call("hadoop fs -put {} {}".format(local_path, hdfs_path), shell=True) # Read avro and compare data with hdfs.open(hdfs_path, 'r') as f: reader = f.read_avro() reader.init_buffers() df_read = pd.DataFrame(reader.read_chunk()) pdt.assert_frame_equal(df_write, df_read) reader.close() def test_avro_write_read(hdfs, tmpdir, request): testname = request.node.name hdfs_path = posixpath.join(TEST_DIR, testname) local_path = tmpdir.join(testname + '.avro').realpath().strpath # Create an avrofile writer = cyavro.AvroWriter(local_path, 'null', avroschema) ids = np.random.randint(100, size=10) ids = np.arange(10) names = pdt.rands_array(10, 10) df_write = pd.DataFrame({"id": ids, "name": names}) df_write = cyavro.prepare_pandas_df_for_write(df_write, avroschema, copy=False) writer.write(df_write) writer.close() # Read avro file bytes from localfile and write them to hdfs data = b'' with open(local_path, 'r') as f: data = f.read() with hdfs.open(hdfs_path, 'w') as f: f.write(data) # Read avro file bytes from hdfs and compare with hdfs.open(hdfs_path, 'r') as f: read_data = f.read() assert len(data) == len(read_data) assert data == read_data
Add test for reading and writing avro bytes to hdfsimport posixpath import subprocess import numpy as np import pandas as pd import pandas.util.testing as pdt import cyavro from utils import * avroschema = """ {"type": "record", "name": "from_bytes_test", "fields":[ {"name": "id", "type": "int"}, {"name": "name", "type": "string"} ] } """ def test_avro_move_read(hdfs, tmpdir, request): testname = request.node.name hdfs_path = posixpath.join(TEST_DIR, testname) local_path = tmpdir.join(testname + '.avro').realpath().strpath # Create an avrofile writer = cyavro.AvroWriter(local_path, 'null', avroschema) ids = np.random.randint(100, size=10) ids = np.arange(10) names = pdt.rands_array(10, 10) df_write = pd.DataFrame({"id": ids, "name": names}) df_write = cyavro.prepare_pandas_df_for_write(df_write, avroschema, copy=False) writer.write(df_write) writer.close() # Move file to hdfs subprocess.call("hadoop fs -put {} {}".format(local_path, hdfs_path), shell=True) # Read avro and compare data with hdfs.open(hdfs_path, 'r') as f: reader = f.read_avro() reader.init_buffers() df_read = pd.DataFrame(reader.read_chunk()) pdt.assert_frame_equal(df_write, df_read) reader.close() def test_avro_write_read(hdfs, tmpdir, request): testname = request.node.name hdfs_path = posixpath.join(TEST_DIR, testname) local_path = tmpdir.join(testname + '.avro').realpath().strpath # Create an avrofile writer = cyavro.AvroWriter(local_path, 'null', avroschema) ids = np.random.randint(100, size=10) ids = np.arange(10) names = pdt.rands_array(10, 10) df_write = pd.DataFrame({"id": ids, "name": names}) df_write = cyavro.prepare_pandas_df_for_write(df_write, avroschema, copy=False) writer.write(df_write) writer.close() # Read avro file bytes from localfile and write them to hdfs data = b'' with open(local_path, 'r') as f: data = f.read() with hdfs.open(hdfs_path, 'w') as f: f.write(data) # Read avro file bytes from hdfs and compare with hdfs.open(hdfs_path, 'r') as f: read_data = f.read() assert len(data) == len(read_data) assert data == read_data
<commit_before><commit_msg>Add test for reading and writing avro bytes to hdfs<commit_after>import posixpath import subprocess import numpy as np import pandas as pd import pandas.util.testing as pdt import cyavro from utils import * avroschema = """ {"type": "record", "name": "from_bytes_test", "fields":[ {"name": "id", "type": "int"}, {"name": "name", "type": "string"} ] } """ def test_avro_move_read(hdfs, tmpdir, request): testname = request.node.name hdfs_path = posixpath.join(TEST_DIR, testname) local_path = tmpdir.join(testname + '.avro').realpath().strpath # Create an avrofile writer = cyavro.AvroWriter(local_path, 'null', avroschema) ids = np.random.randint(100, size=10) ids = np.arange(10) names = pdt.rands_array(10, 10) df_write = pd.DataFrame({"id": ids, "name": names}) df_write = cyavro.prepare_pandas_df_for_write(df_write, avroschema, copy=False) writer.write(df_write) writer.close() # Move file to hdfs subprocess.call("hadoop fs -put {} {}".format(local_path, hdfs_path), shell=True) # Read avro and compare data with hdfs.open(hdfs_path, 'r') as f: reader = f.read_avro() reader.init_buffers() df_read = pd.DataFrame(reader.read_chunk()) pdt.assert_frame_equal(df_write, df_read) reader.close() def test_avro_write_read(hdfs, tmpdir, request): testname = request.node.name hdfs_path = posixpath.join(TEST_DIR, testname) local_path = tmpdir.join(testname + '.avro').realpath().strpath # Create an avrofile writer = cyavro.AvroWriter(local_path, 'null', avroschema) ids = np.random.randint(100, size=10) ids = np.arange(10) names = pdt.rands_array(10, 10) df_write = pd.DataFrame({"id": ids, "name": names}) df_write = cyavro.prepare_pandas_df_for_write(df_write, avroschema, copy=False) writer.write(df_write) writer.close() # Read avro file bytes from localfile and write them to hdfs data = b'' with open(local_path, 'r') as f: data = f.read() with hdfs.open(hdfs_path, 'w') as f: f.write(data) # Read avro file bytes from hdfs and compare with hdfs.open(hdfs_path, 'r') as f: read_data = f.read() assert len(data) == len(read_data) assert data == read_data
5b619029441261659bf0f326f784e5322a952096
coursera-crypto-i/w4/pbdkf2_hmac_sha256.py
coursera-crypto-i/w4/pbdkf2_hmac_sha256.py
import binascii, hashlib, hmac, os, time def scramble_with_kdf(passw, salt, iters): return hashlib.pbkdf2_hmac('sha256', passw, salt, iters, 32) def scramble_with_sha256(passw, salt, iters): passw = salt + passw for i in range(iters): passw = hashlib.sha256(passw).digest() return passw def scramble_with_hmac(passw, salt, iters): hmac_obj = hmac.new(salt, passw, hashlib.sha256) for i in range(iters): hmac_obj.update(passw) return hmac_obj.digest() iters = 10000000 passw = "hello world".encode() salt = os.urandom(256) start1 = time.time() print(binascii.hexlify(scramble_with_kdf(passw, salt, iters))) end1 = time.time() start2 = time.time() print(binascii.hexlify(scramble_with_sha256(passw, salt, iters))) end2 = time.time() start3 = time.time() print(binascii.hexlify(scramble_with_hmac(passw, salt, iters))) end3 = time.time() print("scramble_with_kdf:\t{}".format(end1 - start1)) print("scramble_with_sha256:\t{}".format(end2 - start2)) print("scramble_with_hmac:\t{}".format(end3 - start3))
Add timing test of password scrambling with pbkdf2, sha-256 and hmac(sha-256)
Add timing test of password scrambling with pbkdf2, sha-256 and hmac(sha-256)
Python
mit
reider-roque/crypto-challenges
Add timing test of password scrambling with pbkdf2, sha-256 and hmac(sha-256)
import binascii, hashlib, hmac, os, time def scramble_with_kdf(passw, salt, iters): return hashlib.pbkdf2_hmac('sha256', passw, salt, iters, 32) def scramble_with_sha256(passw, salt, iters): passw = salt + passw for i in range(iters): passw = hashlib.sha256(passw).digest() return passw def scramble_with_hmac(passw, salt, iters): hmac_obj = hmac.new(salt, passw, hashlib.sha256) for i in range(iters): hmac_obj.update(passw) return hmac_obj.digest() iters = 10000000 passw = "hello world".encode() salt = os.urandom(256) start1 = time.time() print(binascii.hexlify(scramble_with_kdf(passw, salt, iters))) end1 = time.time() start2 = time.time() print(binascii.hexlify(scramble_with_sha256(passw, salt, iters))) end2 = time.time() start3 = time.time() print(binascii.hexlify(scramble_with_hmac(passw, salt, iters))) end3 = time.time() print("scramble_with_kdf:\t{}".format(end1 - start1)) print("scramble_with_sha256:\t{}".format(end2 - start2)) print("scramble_with_hmac:\t{}".format(end3 - start3))
<commit_before><commit_msg>Add timing test of password scrambling with pbkdf2, sha-256 and hmac(sha-256)<commit_after>
import binascii, hashlib, hmac, os, time def scramble_with_kdf(passw, salt, iters): return hashlib.pbkdf2_hmac('sha256', passw, salt, iters, 32) def scramble_with_sha256(passw, salt, iters): passw = salt + passw for i in range(iters): passw = hashlib.sha256(passw).digest() return passw def scramble_with_hmac(passw, salt, iters): hmac_obj = hmac.new(salt, passw, hashlib.sha256) for i in range(iters): hmac_obj.update(passw) return hmac_obj.digest() iters = 10000000 passw = "hello world".encode() salt = os.urandom(256) start1 = time.time() print(binascii.hexlify(scramble_with_kdf(passw, salt, iters))) end1 = time.time() start2 = time.time() print(binascii.hexlify(scramble_with_sha256(passw, salt, iters))) end2 = time.time() start3 = time.time() print(binascii.hexlify(scramble_with_hmac(passw, salt, iters))) end3 = time.time() print("scramble_with_kdf:\t{}".format(end1 - start1)) print("scramble_with_sha256:\t{}".format(end2 - start2)) print("scramble_with_hmac:\t{}".format(end3 - start3))
Add timing test of password scrambling with pbkdf2, sha-256 and hmac(sha-256)import binascii, hashlib, hmac, os, time def scramble_with_kdf(passw, salt, iters): return hashlib.pbkdf2_hmac('sha256', passw, salt, iters, 32) def scramble_with_sha256(passw, salt, iters): passw = salt + passw for i in range(iters): passw = hashlib.sha256(passw).digest() return passw def scramble_with_hmac(passw, salt, iters): hmac_obj = hmac.new(salt, passw, hashlib.sha256) for i in range(iters): hmac_obj.update(passw) return hmac_obj.digest() iters = 10000000 passw = "hello world".encode() salt = os.urandom(256) start1 = time.time() print(binascii.hexlify(scramble_with_kdf(passw, salt, iters))) end1 = time.time() start2 = time.time() print(binascii.hexlify(scramble_with_sha256(passw, salt, iters))) end2 = time.time() start3 = time.time() print(binascii.hexlify(scramble_with_hmac(passw, salt, iters))) end3 = time.time() print("scramble_with_kdf:\t{}".format(end1 - start1)) print("scramble_with_sha256:\t{}".format(end2 - start2)) print("scramble_with_hmac:\t{}".format(end3 - start3))
<commit_before><commit_msg>Add timing test of password scrambling with pbkdf2, sha-256 and hmac(sha-256)<commit_after>import binascii, hashlib, hmac, os, time def scramble_with_kdf(passw, salt, iters): return hashlib.pbkdf2_hmac('sha256', passw, salt, iters, 32) def scramble_with_sha256(passw, salt, iters): passw = salt + passw for i in range(iters): passw = hashlib.sha256(passw).digest() return passw def scramble_with_hmac(passw, salt, iters): hmac_obj = hmac.new(salt, passw, hashlib.sha256) for i in range(iters): hmac_obj.update(passw) return hmac_obj.digest() iters = 10000000 passw = "hello world".encode() salt = os.urandom(256) start1 = time.time() print(binascii.hexlify(scramble_with_kdf(passw, salt, iters))) end1 = time.time() start2 = time.time() print(binascii.hexlify(scramble_with_sha256(passw, salt, iters))) end2 = time.time() start3 = time.time() print(binascii.hexlify(scramble_with_hmac(passw, salt, iters))) end3 = time.time() print("scramble_with_kdf:\t{}".format(end1 - start1)) print("scramble_with_sha256:\t{}".format(end2 - start2)) print("scramble_with_hmac:\t{}".format(end3 - start3))
a8f152e9a6a2db98305ee84dfb5b3be3cee91a84
us_ignite/apps/management/commands/app_import.py
us_ignite/apps/management/commands/app_import.py
import requests from django.core.management.base import BaseCommand, CommandError from us_ignite.apps import importer class Command(BaseCommand): help = 'Import the given JSON file.' def handle(self, url, *args, **options): response = requests.get(url) if not response.status_code == 200: raise CommandError('Issue getting the file %s', response.content) result = importer.digest_payload(response.json()) print u'%s apps have been imported.' % len(result) print u'Done!'
Implement importer as a management command.
Implement importer as a management command. Heroku has limitations on the ammount of time that a request should take. By using a management command the application can workaround the time it takes to perform the import.
Python
bsd-3-clause
us-ignite/us_ignite,us-ignite/us_ignite,us-ignite/us_ignite,us-ignite/us_ignite,us-ignite/us_ignite
Implement importer as a management command. Heroku has limitations on the ammount of time that a request should take. By using a management command the application can workaround the time it takes to perform the import.
import requests from django.core.management.base import BaseCommand, CommandError from us_ignite.apps import importer class Command(BaseCommand): help = 'Import the given JSON file.' def handle(self, url, *args, **options): response = requests.get(url) if not response.status_code == 200: raise CommandError('Issue getting the file %s', response.content) result = importer.digest_payload(response.json()) print u'%s apps have been imported.' % len(result) print u'Done!'
<commit_before><commit_msg>Implement importer as a management command. Heroku has limitations on the ammount of time that a request should take. By using a management command the application can workaround the time it takes to perform the import.<commit_after>
import requests from django.core.management.base import BaseCommand, CommandError from us_ignite.apps import importer class Command(BaseCommand): help = 'Import the given JSON file.' def handle(self, url, *args, **options): response = requests.get(url) if not response.status_code == 200: raise CommandError('Issue getting the file %s', response.content) result = importer.digest_payload(response.json()) print u'%s apps have been imported.' % len(result) print u'Done!'
Implement importer as a management command. Heroku has limitations on the ammount of time that a request should take. By using a management command the application can workaround the time it takes to perform the import.import requests from django.core.management.base import BaseCommand, CommandError from us_ignite.apps import importer class Command(BaseCommand): help = 'Import the given JSON file.' def handle(self, url, *args, **options): response = requests.get(url) if not response.status_code == 200: raise CommandError('Issue getting the file %s', response.content) result = importer.digest_payload(response.json()) print u'%s apps have been imported.' % len(result) print u'Done!'
<commit_before><commit_msg>Implement importer as a management command. Heroku has limitations on the ammount of time that a request should take. By using a management command the application can workaround the time it takes to perform the import.<commit_after>import requests from django.core.management.base import BaseCommand, CommandError from us_ignite.apps import importer class Command(BaseCommand): help = 'Import the given JSON file.' def handle(self, url, *args, **options): response = requests.get(url) if not response.status_code == 200: raise CommandError('Issue getting the file %s', response.content) result = importer.digest_payload(response.json()) print u'%s apps have been imported.' % len(result) print u'Done!'
3ed70bcc0c699744fd4dc3259ca2f0b6ee7e5d6a
swampdragon/pubsub_providers/redis_sub_provider.py
swampdragon/pubsub_providers/redis_sub_provider.py
import json import tornadoredis.pubsub import tornadoredis from .base_provider import BaseProvider from .redis_settings import get_redis_host, get_redis_port, get_redis_db class RedisSubProvider(BaseProvider): def __init__(self): self._subscriber = tornadoredis.pubsub.SockJSSubscriber(tornadoredis.Client( host=get_redis_host(), port=get_redis_port(), selected_db=get_redis_db() )) def close(self, broadcaster): for channel in self._subscriber.subscribers: if broadcaster in self._subscriber.subscribers[channel]: self._subscriber.unsubscribe(channel, broadcaster) def get_channel(self, base_channel, **channel_filter): return self._construct_channel(base_channel, **channel_filter) def subscribe(self, channels, broadcaster): self._subscriber.subscribe(channels, broadcaster) def unsubscribe(self, channels, broadcaster): for channel in channels: if broadcaster in self._subscriber.subscribers[channel]: self._subscriber.subscribers[channel].pop(broadcaster) def publish(self, channel, data): if isinstance(data, dict): data = json.dumps(data) broadcasters = list(self._subscriber.subscribers[channel].keys()) if broadcasters: for bc in broadcasters: if not bc.session.is_closed: bc.broadcast(broadcasters, data) break
import json import tornadoredis.pubsub import tornadoredis from .base_provider import BaseProvider from .redis_settings import get_redis_host, get_redis_port, get_redis_db, get_redis_password class RedisSubProvider(BaseProvider): def __init__(self): self._subscriber = tornadoredis.pubsub.SockJSSubscriber(tornadoredis.Client( host=get_redis_host(), port=get_redis_port(), password=get_redis_password(), selected_db=get_redis_db() )) def close(self, broadcaster): for channel in self._subscriber.subscribers: if broadcaster in self._subscriber.subscribers[channel]: self._subscriber.unsubscribe(channel, broadcaster) def get_channel(self, base_channel, **channel_filter): return self._construct_channel(base_channel, **channel_filter) def subscribe(self, channels, broadcaster): self._subscriber.subscribe(channels, broadcaster) def unsubscribe(self, channels, broadcaster): for channel in channels: if broadcaster in self._subscriber.subscribers[channel]: self._subscriber.subscribers[channel].pop(broadcaster) def publish(self, channel, data): if isinstance(data, dict): data = json.dumps(data) broadcasters = list(self._subscriber.subscribers[channel].keys()) if broadcasters: for bc in broadcasters: if not bc.session.is_closed: bc.broadcast(broadcasters, data) break
Add auth step to baseprovider for Redis connection pools
Add auth step to baseprovider for Redis connection pools
Python
bsd-3-clause
denizs/swampdragon,jonashagstedt/swampdragon,jonashagstedt/swampdragon,denizs/swampdragon,denizs/swampdragon,jonashagstedt/swampdragon
import json import tornadoredis.pubsub import tornadoredis from .base_provider import BaseProvider from .redis_settings import get_redis_host, get_redis_port, get_redis_db class RedisSubProvider(BaseProvider): def __init__(self): self._subscriber = tornadoredis.pubsub.SockJSSubscriber(tornadoredis.Client( host=get_redis_host(), port=get_redis_port(), selected_db=get_redis_db() )) def close(self, broadcaster): for channel in self._subscriber.subscribers: if broadcaster in self._subscriber.subscribers[channel]: self._subscriber.unsubscribe(channel, broadcaster) def get_channel(self, base_channel, **channel_filter): return self._construct_channel(base_channel, **channel_filter) def subscribe(self, channels, broadcaster): self._subscriber.subscribe(channels, broadcaster) def unsubscribe(self, channels, broadcaster): for channel in channels: if broadcaster in self._subscriber.subscribers[channel]: self._subscriber.subscribers[channel].pop(broadcaster) def publish(self, channel, data): if isinstance(data, dict): data = json.dumps(data) broadcasters = list(self._subscriber.subscribers[channel].keys()) if broadcasters: for bc in broadcasters: if not bc.session.is_closed: bc.broadcast(broadcasters, data) break Add auth step to baseprovider for Redis connection pools
import json import tornadoredis.pubsub import tornadoredis from .base_provider import BaseProvider from .redis_settings import get_redis_host, get_redis_port, get_redis_db, get_redis_password class RedisSubProvider(BaseProvider): def __init__(self): self._subscriber = tornadoredis.pubsub.SockJSSubscriber(tornadoredis.Client( host=get_redis_host(), port=get_redis_port(), password=get_redis_password(), selected_db=get_redis_db() )) def close(self, broadcaster): for channel in self._subscriber.subscribers: if broadcaster in self._subscriber.subscribers[channel]: self._subscriber.unsubscribe(channel, broadcaster) def get_channel(self, base_channel, **channel_filter): return self._construct_channel(base_channel, **channel_filter) def subscribe(self, channels, broadcaster): self._subscriber.subscribe(channels, broadcaster) def unsubscribe(self, channels, broadcaster): for channel in channels: if broadcaster in self._subscriber.subscribers[channel]: self._subscriber.subscribers[channel].pop(broadcaster) def publish(self, channel, data): if isinstance(data, dict): data = json.dumps(data) broadcasters = list(self._subscriber.subscribers[channel].keys()) if broadcasters: for bc in broadcasters: if not bc.session.is_closed: bc.broadcast(broadcasters, data) break
<commit_before>import json import tornadoredis.pubsub import tornadoredis from .base_provider import BaseProvider from .redis_settings import get_redis_host, get_redis_port, get_redis_db class RedisSubProvider(BaseProvider): def __init__(self): self._subscriber = tornadoredis.pubsub.SockJSSubscriber(tornadoredis.Client( host=get_redis_host(), port=get_redis_port(), selected_db=get_redis_db() )) def close(self, broadcaster): for channel in self._subscriber.subscribers: if broadcaster in self._subscriber.subscribers[channel]: self._subscriber.unsubscribe(channel, broadcaster) def get_channel(self, base_channel, **channel_filter): return self._construct_channel(base_channel, **channel_filter) def subscribe(self, channels, broadcaster): self._subscriber.subscribe(channels, broadcaster) def unsubscribe(self, channels, broadcaster): for channel in channels: if broadcaster in self._subscriber.subscribers[channel]: self._subscriber.subscribers[channel].pop(broadcaster) def publish(self, channel, data): if isinstance(data, dict): data = json.dumps(data) broadcasters = list(self._subscriber.subscribers[channel].keys()) if broadcasters: for bc in broadcasters: if not bc.session.is_closed: bc.broadcast(broadcasters, data) break <commit_msg>Add auth step to baseprovider for Redis connection pools<commit_after>
import json import tornadoredis.pubsub import tornadoredis from .base_provider import BaseProvider from .redis_settings import get_redis_host, get_redis_port, get_redis_db, get_redis_password class RedisSubProvider(BaseProvider): def __init__(self): self._subscriber = tornadoredis.pubsub.SockJSSubscriber(tornadoredis.Client( host=get_redis_host(), port=get_redis_port(), password=get_redis_password(), selected_db=get_redis_db() )) def close(self, broadcaster): for channel in self._subscriber.subscribers: if broadcaster in self._subscriber.subscribers[channel]: self._subscriber.unsubscribe(channel, broadcaster) def get_channel(self, base_channel, **channel_filter): return self._construct_channel(base_channel, **channel_filter) def subscribe(self, channels, broadcaster): self._subscriber.subscribe(channels, broadcaster) def unsubscribe(self, channels, broadcaster): for channel in channels: if broadcaster in self._subscriber.subscribers[channel]: self._subscriber.subscribers[channel].pop(broadcaster) def publish(self, channel, data): if isinstance(data, dict): data = json.dumps(data) broadcasters = list(self._subscriber.subscribers[channel].keys()) if broadcasters: for bc in broadcasters: if not bc.session.is_closed: bc.broadcast(broadcasters, data) break
import json import tornadoredis.pubsub import tornadoredis from .base_provider import BaseProvider from .redis_settings import get_redis_host, get_redis_port, get_redis_db class RedisSubProvider(BaseProvider): def __init__(self): self._subscriber = tornadoredis.pubsub.SockJSSubscriber(tornadoredis.Client( host=get_redis_host(), port=get_redis_port(), selected_db=get_redis_db() )) def close(self, broadcaster): for channel in self._subscriber.subscribers: if broadcaster in self._subscriber.subscribers[channel]: self._subscriber.unsubscribe(channel, broadcaster) def get_channel(self, base_channel, **channel_filter): return self._construct_channel(base_channel, **channel_filter) def subscribe(self, channels, broadcaster): self._subscriber.subscribe(channels, broadcaster) def unsubscribe(self, channels, broadcaster): for channel in channels: if broadcaster in self._subscriber.subscribers[channel]: self._subscriber.subscribers[channel].pop(broadcaster) def publish(self, channel, data): if isinstance(data, dict): data = json.dumps(data) broadcasters = list(self._subscriber.subscribers[channel].keys()) if broadcasters: for bc in broadcasters: if not bc.session.is_closed: bc.broadcast(broadcasters, data) break Add auth step to baseprovider for Redis connection poolsimport json import tornadoredis.pubsub import tornadoredis from .base_provider import BaseProvider from .redis_settings import get_redis_host, get_redis_port, get_redis_db, get_redis_password class RedisSubProvider(BaseProvider): def __init__(self): self._subscriber = tornadoredis.pubsub.SockJSSubscriber(tornadoredis.Client( host=get_redis_host(), port=get_redis_port(), password=get_redis_password(), selected_db=get_redis_db() )) def close(self, broadcaster): for channel in self._subscriber.subscribers: if broadcaster in self._subscriber.subscribers[channel]: self._subscriber.unsubscribe(channel, broadcaster) def get_channel(self, base_channel, **channel_filter): return self._construct_channel(base_channel, **channel_filter) def subscribe(self, channels, broadcaster): self._subscriber.subscribe(channels, broadcaster) def unsubscribe(self, channels, broadcaster): for channel in channels: if broadcaster in self._subscriber.subscribers[channel]: self._subscriber.subscribers[channel].pop(broadcaster) def publish(self, channel, data): if isinstance(data, dict): data = json.dumps(data) broadcasters = list(self._subscriber.subscribers[channel].keys()) if broadcasters: for bc in broadcasters: if not bc.session.is_closed: bc.broadcast(broadcasters, data) break
<commit_before>import json import tornadoredis.pubsub import tornadoredis from .base_provider import BaseProvider from .redis_settings import get_redis_host, get_redis_port, get_redis_db class RedisSubProvider(BaseProvider): def __init__(self): self._subscriber = tornadoredis.pubsub.SockJSSubscriber(tornadoredis.Client( host=get_redis_host(), port=get_redis_port(), selected_db=get_redis_db() )) def close(self, broadcaster): for channel in self._subscriber.subscribers: if broadcaster in self._subscriber.subscribers[channel]: self._subscriber.unsubscribe(channel, broadcaster) def get_channel(self, base_channel, **channel_filter): return self._construct_channel(base_channel, **channel_filter) def subscribe(self, channels, broadcaster): self._subscriber.subscribe(channels, broadcaster) def unsubscribe(self, channels, broadcaster): for channel in channels: if broadcaster in self._subscriber.subscribers[channel]: self._subscriber.subscribers[channel].pop(broadcaster) def publish(self, channel, data): if isinstance(data, dict): data = json.dumps(data) broadcasters = list(self._subscriber.subscribers[channel].keys()) if broadcasters: for bc in broadcasters: if not bc.session.is_closed: bc.broadcast(broadcasters, data) break <commit_msg>Add auth step to baseprovider for Redis connection pools<commit_after>import json import tornadoredis.pubsub import tornadoredis from .base_provider import BaseProvider from .redis_settings import get_redis_host, get_redis_port, get_redis_db, get_redis_password class RedisSubProvider(BaseProvider): def __init__(self): self._subscriber = tornadoredis.pubsub.SockJSSubscriber(tornadoredis.Client( host=get_redis_host(), port=get_redis_port(), password=get_redis_password(), selected_db=get_redis_db() )) def close(self, broadcaster): for channel in self._subscriber.subscribers: if broadcaster in self._subscriber.subscribers[channel]: self._subscriber.unsubscribe(channel, broadcaster) def get_channel(self, base_channel, **channel_filter): return self._construct_channel(base_channel, **channel_filter) def subscribe(self, channels, broadcaster): self._subscriber.subscribe(channels, broadcaster) def unsubscribe(self, channels, broadcaster): for channel in channels: if broadcaster in self._subscriber.subscribers[channel]: self._subscriber.subscribers[channel].pop(broadcaster) def publish(self, channel, data): if isinstance(data, dict): data = json.dumps(data) broadcasters = list(self._subscriber.subscribers[channel].keys()) if broadcasters: for bc in broadcasters: if not bc.session.is_closed: bc.broadcast(broadcasters, data) break
0a74878997b2df53b1e482a61a36c6e9f35b248a
examples/uevents.py
examples/uevents.py
import time from examples.common import print_devices import blivet from blivet.events.manager import event_manager from blivet.util import set_up_logging def print_changes(event, changes): print("***", event) for change in changes: print("***", change) print("***") print() set_up_logging(console_logs=["blivet.event"]) b = blivet.Blivet() # create an instance of Blivet b.reset() # detect system storage configuration print_devices(b) event_manager.notify_cb = print_changes event_manager.enable() while True: time.sleep(0.5)
Add an example of event monitoring.
Add an example of event monitoring.
Python
lgpl-2.1
rvykydal/blivet,AdamWill/blivet,jkonecny12/blivet,rhinstaller/blivet,jkonecny12/blivet,rhinstaller/blivet,rvykydal/blivet,AdamWill/blivet,vpodzime/blivet,vojtechtrefny/blivet,vpodzime/blivet,vojtechtrefny/blivet
Add an example of event monitoring.
import time from examples.common import print_devices import blivet from blivet.events.manager import event_manager from blivet.util import set_up_logging def print_changes(event, changes): print("***", event) for change in changes: print("***", change) print("***") print() set_up_logging(console_logs=["blivet.event"]) b = blivet.Blivet() # create an instance of Blivet b.reset() # detect system storage configuration print_devices(b) event_manager.notify_cb = print_changes event_manager.enable() while True: time.sleep(0.5)
<commit_before><commit_msg>Add an example of event monitoring.<commit_after>
import time from examples.common import print_devices import blivet from blivet.events.manager import event_manager from blivet.util import set_up_logging def print_changes(event, changes): print("***", event) for change in changes: print("***", change) print("***") print() set_up_logging(console_logs=["blivet.event"]) b = blivet.Blivet() # create an instance of Blivet b.reset() # detect system storage configuration print_devices(b) event_manager.notify_cb = print_changes event_manager.enable() while True: time.sleep(0.5)
Add an example of event monitoring.import time from examples.common import print_devices import blivet from blivet.events.manager import event_manager from blivet.util import set_up_logging def print_changes(event, changes): print("***", event) for change in changes: print("***", change) print("***") print() set_up_logging(console_logs=["blivet.event"]) b = blivet.Blivet() # create an instance of Blivet b.reset() # detect system storage configuration print_devices(b) event_manager.notify_cb = print_changes event_manager.enable() while True: time.sleep(0.5)
<commit_before><commit_msg>Add an example of event monitoring.<commit_after>import time from examples.common import print_devices import blivet from blivet.events.manager import event_manager from blivet.util import set_up_logging def print_changes(event, changes): print("***", event) for change in changes: print("***", change) print("***") print() set_up_logging(console_logs=["blivet.event"]) b = blivet.Blivet() # create an instance of Blivet b.reset() # detect system storage configuration print_devices(b) event_manager.notify_cb = print_changes event_manager.enable() while True: time.sleep(0.5)
49b916ce00d919a73ddc8923f62e0b4c6115b608
src/coranking_test.py
src/coranking_test.py
import unittest import nose.tools from sklearn import manifold, datasets from mia.coranking import trustworthiness, continuity, LCMC, coranking_matrix from mia.utils import * class CorankingTest(unittest.TestCase): def setUp(self): self._high_data, color \ = datasets.samples_generator.make_swiss_roll(n_samples=300, random_state=1) isomap = manifold.Isomap(n_neighbors=12, n_components=2) self._low_data = isomap.fit_transform(self._high_data) def test_coranking_matrix(self): Q = coranking_matrix(self._high_data, self._low_data) n, _ = self._high_data.shape nose.tools.assert_equal(Q.shape, (n-1, n-1)) def test_trustworthiness(self): Q = coranking_matrix(self._high_data, self._low_data) t = trustworthiness(Q, 5) nose.tools.assert_true(isinstance(t, float)) nose.tools.assert_almost_equal(t, 0.895582191) def test_continuity(self): Q = coranking_matrix(self._high_data, self._low_data) c = continuity(Q, 5) nose.tools.assert_true(isinstance(c, float)) nose.tools.assert_almost_equal(c, 0.982385844) c2 = trustworthiness(Q, 5) nose.tools.assert_true(c, c2) def test_LCMC(self): Q = coranking_matrix(self._high_data, self._low_data) l = LCMC(Q, 5) nose.tools.assert_true(isinstance(l, float)) nose.tools.assert_almost_equal(l, 0.474110925)
Add test to source folder.
Add test to source folder.
Python
mit
samueljackson92/coranking
Add test to source folder.
import unittest import nose.tools from sklearn import manifold, datasets from mia.coranking import trustworthiness, continuity, LCMC, coranking_matrix from mia.utils import * class CorankingTest(unittest.TestCase): def setUp(self): self._high_data, color \ = datasets.samples_generator.make_swiss_roll(n_samples=300, random_state=1) isomap = manifold.Isomap(n_neighbors=12, n_components=2) self._low_data = isomap.fit_transform(self._high_data) def test_coranking_matrix(self): Q = coranking_matrix(self._high_data, self._low_data) n, _ = self._high_data.shape nose.tools.assert_equal(Q.shape, (n-1, n-1)) def test_trustworthiness(self): Q = coranking_matrix(self._high_data, self._low_data) t = trustworthiness(Q, 5) nose.tools.assert_true(isinstance(t, float)) nose.tools.assert_almost_equal(t, 0.895582191) def test_continuity(self): Q = coranking_matrix(self._high_data, self._low_data) c = continuity(Q, 5) nose.tools.assert_true(isinstance(c, float)) nose.tools.assert_almost_equal(c, 0.982385844) c2 = trustworthiness(Q, 5) nose.tools.assert_true(c, c2) def test_LCMC(self): Q = coranking_matrix(self._high_data, self._low_data) l = LCMC(Q, 5) nose.tools.assert_true(isinstance(l, float)) nose.tools.assert_almost_equal(l, 0.474110925)
<commit_before><commit_msg>Add test to source folder.<commit_after>
import unittest import nose.tools from sklearn import manifold, datasets from mia.coranking import trustworthiness, continuity, LCMC, coranking_matrix from mia.utils import * class CorankingTest(unittest.TestCase): def setUp(self): self._high_data, color \ = datasets.samples_generator.make_swiss_roll(n_samples=300, random_state=1) isomap = manifold.Isomap(n_neighbors=12, n_components=2) self._low_data = isomap.fit_transform(self._high_data) def test_coranking_matrix(self): Q = coranking_matrix(self._high_data, self._low_data) n, _ = self._high_data.shape nose.tools.assert_equal(Q.shape, (n-1, n-1)) def test_trustworthiness(self): Q = coranking_matrix(self._high_data, self._low_data) t = trustworthiness(Q, 5) nose.tools.assert_true(isinstance(t, float)) nose.tools.assert_almost_equal(t, 0.895582191) def test_continuity(self): Q = coranking_matrix(self._high_data, self._low_data) c = continuity(Q, 5) nose.tools.assert_true(isinstance(c, float)) nose.tools.assert_almost_equal(c, 0.982385844) c2 = trustworthiness(Q, 5) nose.tools.assert_true(c, c2) def test_LCMC(self): Q = coranking_matrix(self._high_data, self._low_data) l = LCMC(Q, 5) nose.tools.assert_true(isinstance(l, float)) nose.tools.assert_almost_equal(l, 0.474110925)
Add test to source folder.import unittest import nose.tools from sklearn import manifold, datasets from mia.coranking import trustworthiness, continuity, LCMC, coranking_matrix from mia.utils import * class CorankingTest(unittest.TestCase): def setUp(self): self._high_data, color \ = datasets.samples_generator.make_swiss_roll(n_samples=300, random_state=1) isomap = manifold.Isomap(n_neighbors=12, n_components=2) self._low_data = isomap.fit_transform(self._high_data) def test_coranking_matrix(self): Q = coranking_matrix(self._high_data, self._low_data) n, _ = self._high_data.shape nose.tools.assert_equal(Q.shape, (n-1, n-1)) def test_trustworthiness(self): Q = coranking_matrix(self._high_data, self._low_data) t = trustworthiness(Q, 5) nose.tools.assert_true(isinstance(t, float)) nose.tools.assert_almost_equal(t, 0.895582191) def test_continuity(self): Q = coranking_matrix(self._high_data, self._low_data) c = continuity(Q, 5) nose.tools.assert_true(isinstance(c, float)) nose.tools.assert_almost_equal(c, 0.982385844) c2 = trustworthiness(Q, 5) nose.tools.assert_true(c, c2) def test_LCMC(self): Q = coranking_matrix(self._high_data, self._low_data) l = LCMC(Q, 5) nose.tools.assert_true(isinstance(l, float)) nose.tools.assert_almost_equal(l, 0.474110925)
<commit_before><commit_msg>Add test to source folder.<commit_after>import unittest import nose.tools from sklearn import manifold, datasets from mia.coranking import trustworthiness, continuity, LCMC, coranking_matrix from mia.utils import * class CorankingTest(unittest.TestCase): def setUp(self): self._high_data, color \ = datasets.samples_generator.make_swiss_roll(n_samples=300, random_state=1) isomap = manifold.Isomap(n_neighbors=12, n_components=2) self._low_data = isomap.fit_transform(self._high_data) def test_coranking_matrix(self): Q = coranking_matrix(self._high_data, self._low_data) n, _ = self._high_data.shape nose.tools.assert_equal(Q.shape, (n-1, n-1)) def test_trustworthiness(self): Q = coranking_matrix(self._high_data, self._low_data) t = trustworthiness(Q, 5) nose.tools.assert_true(isinstance(t, float)) nose.tools.assert_almost_equal(t, 0.895582191) def test_continuity(self): Q = coranking_matrix(self._high_data, self._low_data) c = continuity(Q, 5) nose.tools.assert_true(isinstance(c, float)) nose.tools.assert_almost_equal(c, 0.982385844) c2 = trustworthiness(Q, 5) nose.tools.assert_true(c, c2) def test_LCMC(self): Q = coranking_matrix(self._high_data, self._low_data) l = LCMC(Q, 5) nose.tools.assert_true(isinstance(l, float)) nose.tools.assert_almost_equal(l, 0.474110925)
9c4cb69b60b7d91a5ed07f2871174e276db80071
loadcontent.py
loadcontent.py
from os import listdir, path basedir = '../../website.content/' for app in listdir(basedir): module = __import__(app,{},{},fromlist=['load']) if 'loaddir' in dir(module.load): module.load.loaddir(path.join(basedir, app), clear=True) elif 'loadfile' in dir(module.load): module.load.clear() for item in listdir(path.join(basedir,app)): module.load.loadfile(path.join(basedir,app,item))
Load content from a given directory.
Load content from a given directory. Currently, the directory is hard-coded.
Python
agpl-3.0
luispedro/django-gitcms,luispedro/django-gitcms
Load content from a given directory. Currently, the directory is hard-coded.
from os import listdir, path basedir = '../../website.content/' for app in listdir(basedir): module = __import__(app,{},{},fromlist=['load']) if 'loaddir' in dir(module.load): module.load.loaddir(path.join(basedir, app), clear=True) elif 'loadfile' in dir(module.load): module.load.clear() for item in listdir(path.join(basedir,app)): module.load.loadfile(path.join(basedir,app,item))
<commit_before><commit_msg>Load content from a given directory. Currently, the directory is hard-coded.<commit_after>
from os import listdir, path basedir = '../../website.content/' for app in listdir(basedir): module = __import__(app,{},{},fromlist=['load']) if 'loaddir' in dir(module.load): module.load.loaddir(path.join(basedir, app), clear=True) elif 'loadfile' in dir(module.load): module.load.clear() for item in listdir(path.join(basedir,app)): module.load.loadfile(path.join(basedir,app,item))
Load content from a given directory. Currently, the directory is hard-coded.from os import listdir, path basedir = '../../website.content/' for app in listdir(basedir): module = __import__(app,{},{},fromlist=['load']) if 'loaddir' in dir(module.load): module.load.loaddir(path.join(basedir, app), clear=True) elif 'loadfile' in dir(module.load): module.load.clear() for item in listdir(path.join(basedir,app)): module.load.loadfile(path.join(basedir,app,item))
<commit_before><commit_msg>Load content from a given directory. Currently, the directory is hard-coded.<commit_after>from os import listdir, path basedir = '../../website.content/' for app in listdir(basedir): module = __import__(app,{},{},fromlist=['load']) if 'loaddir' in dir(module.load): module.load.loaddir(path.join(basedir, app), clear=True) elif 'loadfile' in dir(module.load): module.load.clear() for item in listdir(path.join(basedir,app)): module.load.loadfile(path.join(basedir,app,item))
cd0e3ec8359eeec31ad383310c2bab4588dc095a
IcedID_Downloader/ztrak_downloader_strings_ida.py
IcedID_Downloader/ztrak_downloader_strings_ida.py
def gen_key(k): return(((k << 0x1d) | (k >> 3)) & 0xffffffff) #Unpacked of 32a683ac11d966d73fedf4e249573022891ac902086167e4d20b18be28bd2c1d for addr in XrefsTo(0x40233e, flags=0): addr = addr.frm #print(hex(addr)) addr = idc.PrevHead(addr) while GetMnem(addr) != "push": addr = idc.PrevHead(addr) print(hex(addr)) #Get first param pushed which is address of domain data_addr = GetOperandValue(addr,0) xork_init = Dword(data_addr) data_addr += 4 length_delta = Word(data_addr) data_addr += 2 length = (xork_init ^ length_delta) & 0xffff out = "" xork = xork_init for i in range(length): xork = gen_key(xork) xork += i out += chr((Byte(data_addr) ^ (xork & 0xFF)) & 0xFF) data_addr += 1 if out[-2:] == '\x00\x00': print(out.decode('utf16')) else: print(out) def other_decode(data, key, l): out = "" for i in range(l): key = ror(key, 1) key = ~key key = ror(key,1) key -= 0x120 key = rol(key, 1) key = ~key key -= 0x9101 out += chr(Byte(data+i) ^ (key & 0xff)) return out def decode(data_addr): #data_addr = GetOperandValue(addr,0) xork_init = Dword(data_addr) data_addr += 4 length_delta = Word(data_addr) data_addr += 2 length = (xork_init ^ length_delta) & 0xffff out = "" xork = xork_init for i in range(length): xork = gen_key(xork) xork += i out += chr((Byte(data_addr) ^ (xork & 0xFF)) & 0xFF) data_addr += 1 if out[-2:] == '\x00\x00': print(out.decode('utf16')) else: print(out)
Add IcedID downloader ida string decoding
Add IcedID downloader ida string decoding
Python
mit
sysopfb/Malware_Scripts
Add IcedID downloader ida string decoding
def gen_key(k): return(((k << 0x1d) | (k >> 3)) & 0xffffffff) #Unpacked of 32a683ac11d966d73fedf4e249573022891ac902086167e4d20b18be28bd2c1d for addr in XrefsTo(0x40233e, flags=0): addr = addr.frm #print(hex(addr)) addr = idc.PrevHead(addr) while GetMnem(addr) != "push": addr = idc.PrevHead(addr) print(hex(addr)) #Get first param pushed which is address of domain data_addr = GetOperandValue(addr,0) xork_init = Dword(data_addr) data_addr += 4 length_delta = Word(data_addr) data_addr += 2 length = (xork_init ^ length_delta) & 0xffff out = "" xork = xork_init for i in range(length): xork = gen_key(xork) xork += i out += chr((Byte(data_addr) ^ (xork & 0xFF)) & 0xFF) data_addr += 1 if out[-2:] == '\x00\x00': print(out.decode('utf16')) else: print(out) def other_decode(data, key, l): out = "" for i in range(l): key = ror(key, 1) key = ~key key = ror(key,1) key -= 0x120 key = rol(key, 1) key = ~key key -= 0x9101 out += chr(Byte(data+i) ^ (key & 0xff)) return out def decode(data_addr): #data_addr = GetOperandValue(addr,0) xork_init = Dword(data_addr) data_addr += 4 length_delta = Word(data_addr) data_addr += 2 length = (xork_init ^ length_delta) & 0xffff out = "" xork = xork_init for i in range(length): xork = gen_key(xork) xork += i out += chr((Byte(data_addr) ^ (xork & 0xFF)) & 0xFF) data_addr += 1 if out[-2:] == '\x00\x00': print(out.decode('utf16')) else: print(out)
<commit_before><commit_msg>Add IcedID downloader ida string decoding<commit_after>
def gen_key(k): return(((k << 0x1d) | (k >> 3)) & 0xffffffff) #Unpacked of 32a683ac11d966d73fedf4e249573022891ac902086167e4d20b18be28bd2c1d for addr in XrefsTo(0x40233e, flags=0): addr = addr.frm #print(hex(addr)) addr = idc.PrevHead(addr) while GetMnem(addr) != "push": addr = idc.PrevHead(addr) print(hex(addr)) #Get first param pushed which is address of domain data_addr = GetOperandValue(addr,0) xork_init = Dword(data_addr) data_addr += 4 length_delta = Word(data_addr) data_addr += 2 length = (xork_init ^ length_delta) & 0xffff out = "" xork = xork_init for i in range(length): xork = gen_key(xork) xork += i out += chr((Byte(data_addr) ^ (xork & 0xFF)) & 0xFF) data_addr += 1 if out[-2:] == '\x00\x00': print(out.decode('utf16')) else: print(out) def other_decode(data, key, l): out = "" for i in range(l): key = ror(key, 1) key = ~key key = ror(key,1) key -= 0x120 key = rol(key, 1) key = ~key key -= 0x9101 out += chr(Byte(data+i) ^ (key & 0xff)) return out def decode(data_addr): #data_addr = GetOperandValue(addr,0) xork_init = Dword(data_addr) data_addr += 4 length_delta = Word(data_addr) data_addr += 2 length = (xork_init ^ length_delta) & 0xffff out = "" xork = xork_init for i in range(length): xork = gen_key(xork) xork += i out += chr((Byte(data_addr) ^ (xork & 0xFF)) & 0xFF) data_addr += 1 if out[-2:] == '\x00\x00': print(out.decode('utf16')) else: print(out)
Add IcedID downloader ida string decodingdef gen_key(k): return(((k << 0x1d) | (k >> 3)) & 0xffffffff) #Unpacked of 32a683ac11d966d73fedf4e249573022891ac902086167e4d20b18be28bd2c1d for addr in XrefsTo(0x40233e, flags=0): addr = addr.frm #print(hex(addr)) addr = idc.PrevHead(addr) while GetMnem(addr) != "push": addr = idc.PrevHead(addr) print(hex(addr)) #Get first param pushed which is address of domain data_addr = GetOperandValue(addr,0) xork_init = Dword(data_addr) data_addr += 4 length_delta = Word(data_addr) data_addr += 2 length = (xork_init ^ length_delta) & 0xffff out = "" xork = xork_init for i in range(length): xork = gen_key(xork) xork += i out += chr((Byte(data_addr) ^ (xork & 0xFF)) & 0xFF) data_addr += 1 if out[-2:] == '\x00\x00': print(out.decode('utf16')) else: print(out) def other_decode(data, key, l): out = "" for i in range(l): key = ror(key, 1) key = ~key key = ror(key,1) key -= 0x120 key = rol(key, 1) key = ~key key -= 0x9101 out += chr(Byte(data+i) ^ (key & 0xff)) return out def decode(data_addr): #data_addr = GetOperandValue(addr,0) xork_init = Dword(data_addr) data_addr += 4 length_delta = Word(data_addr) data_addr += 2 length = (xork_init ^ length_delta) & 0xffff out = "" xork = xork_init for i in range(length): xork = gen_key(xork) xork += i out += chr((Byte(data_addr) ^ (xork & 0xFF)) & 0xFF) data_addr += 1 if out[-2:] == '\x00\x00': print(out.decode('utf16')) else: print(out)
<commit_before><commit_msg>Add IcedID downloader ida string decoding<commit_after>def gen_key(k): return(((k << 0x1d) | (k >> 3)) & 0xffffffff) #Unpacked of 32a683ac11d966d73fedf4e249573022891ac902086167e4d20b18be28bd2c1d for addr in XrefsTo(0x40233e, flags=0): addr = addr.frm #print(hex(addr)) addr = idc.PrevHead(addr) while GetMnem(addr) != "push": addr = idc.PrevHead(addr) print(hex(addr)) #Get first param pushed which is address of domain data_addr = GetOperandValue(addr,0) xork_init = Dword(data_addr) data_addr += 4 length_delta = Word(data_addr) data_addr += 2 length = (xork_init ^ length_delta) & 0xffff out = "" xork = xork_init for i in range(length): xork = gen_key(xork) xork += i out += chr((Byte(data_addr) ^ (xork & 0xFF)) & 0xFF) data_addr += 1 if out[-2:] == '\x00\x00': print(out.decode('utf16')) else: print(out) def other_decode(data, key, l): out = "" for i in range(l): key = ror(key, 1) key = ~key key = ror(key,1) key -= 0x120 key = rol(key, 1) key = ~key key -= 0x9101 out += chr(Byte(data+i) ^ (key & 0xff)) return out def decode(data_addr): #data_addr = GetOperandValue(addr,0) xork_init = Dword(data_addr) data_addr += 4 length_delta = Word(data_addr) data_addr += 2 length = (xork_init ^ length_delta) & 0xffff out = "" xork = xork_init for i in range(length): xork = gen_key(xork) xork += i out += chr((Byte(data_addr) ^ (xork & 0xFF)) & 0xFF) data_addr += 1 if out[-2:] == '\x00\x00': print(out.decode('utf16')) else: print(out)
c4b83a909eb7149ac3da33b90e912d1275a8dc16
tests/compiler/test_access_compilation.py
tests/compiler/test_access_compilation.py
from tests.compiler import compile_local, A_ID, LST_ID, SELF_ID, VAL1_ID from thinglang.compiler.opcodes import OpcodePushLocal, OpcodePushIndexImmediate, OpcodePushIndex, OpcodePushMember def test_local_list_immediate_index(): assert compile_local('lst[123]') == [OpcodePushLocal(LST_ID), OpcodePushIndexImmediate(123)] def test_local_list_non_immediate_index(): assert compile_local('lst[a]') == [OpcodePushLocal(LST_ID), OpcodePushLocal(A_ID), OpcodePushIndex()] assert compile_local('lst[self.val1]') == [OpcodePushLocal(LST_ID), OpcodePushMember(SELF_ID, VAL1_ID), OpcodePushIndex()]
Add tests for indexed access compilation
Add tests for indexed access compilation
Python
mit
ytanay/thinglang,ytanay/thinglang,ytanay/thinglang,ytanay/thinglang
Add tests for indexed access compilation
from tests.compiler import compile_local, A_ID, LST_ID, SELF_ID, VAL1_ID from thinglang.compiler.opcodes import OpcodePushLocal, OpcodePushIndexImmediate, OpcodePushIndex, OpcodePushMember def test_local_list_immediate_index(): assert compile_local('lst[123]') == [OpcodePushLocal(LST_ID), OpcodePushIndexImmediate(123)] def test_local_list_non_immediate_index(): assert compile_local('lst[a]') == [OpcodePushLocal(LST_ID), OpcodePushLocal(A_ID), OpcodePushIndex()] assert compile_local('lst[self.val1]') == [OpcodePushLocal(LST_ID), OpcodePushMember(SELF_ID, VAL1_ID), OpcodePushIndex()]
<commit_before><commit_msg>Add tests for indexed access compilation<commit_after>
from tests.compiler import compile_local, A_ID, LST_ID, SELF_ID, VAL1_ID from thinglang.compiler.opcodes import OpcodePushLocal, OpcodePushIndexImmediate, OpcodePushIndex, OpcodePushMember def test_local_list_immediate_index(): assert compile_local('lst[123]') == [OpcodePushLocal(LST_ID), OpcodePushIndexImmediate(123)] def test_local_list_non_immediate_index(): assert compile_local('lst[a]') == [OpcodePushLocal(LST_ID), OpcodePushLocal(A_ID), OpcodePushIndex()] assert compile_local('lst[self.val1]') == [OpcodePushLocal(LST_ID), OpcodePushMember(SELF_ID, VAL1_ID), OpcodePushIndex()]
Add tests for indexed access compilationfrom tests.compiler import compile_local, A_ID, LST_ID, SELF_ID, VAL1_ID from thinglang.compiler.opcodes import OpcodePushLocal, OpcodePushIndexImmediate, OpcodePushIndex, OpcodePushMember def test_local_list_immediate_index(): assert compile_local('lst[123]') == [OpcodePushLocal(LST_ID), OpcodePushIndexImmediate(123)] def test_local_list_non_immediate_index(): assert compile_local('lst[a]') == [OpcodePushLocal(LST_ID), OpcodePushLocal(A_ID), OpcodePushIndex()] assert compile_local('lst[self.val1]') == [OpcodePushLocal(LST_ID), OpcodePushMember(SELF_ID, VAL1_ID), OpcodePushIndex()]
<commit_before><commit_msg>Add tests for indexed access compilation<commit_after>from tests.compiler import compile_local, A_ID, LST_ID, SELF_ID, VAL1_ID from thinglang.compiler.opcodes import OpcodePushLocal, OpcodePushIndexImmediate, OpcodePushIndex, OpcodePushMember def test_local_list_immediate_index(): assert compile_local('lst[123]') == [OpcodePushLocal(LST_ID), OpcodePushIndexImmediate(123)] def test_local_list_non_immediate_index(): assert compile_local('lst[a]') == [OpcodePushLocal(LST_ID), OpcodePushLocal(A_ID), OpcodePushIndex()] assert compile_local('lst[self.val1]') == [OpcodePushLocal(LST_ID), OpcodePushMember(SELF_ID, VAL1_ID), OpcodePushIndex()]
ca109b6cd0f7ce5818abdf413abcac51fc5f8b0d
odl/contrib/param_opt/test/test_param_opt.py
odl/contrib/param_opt/test/test_param_opt.py
import pytest import odl import odl.contrib.fom import odl.contrib.param_opt from odl.util.testutils import simple_fixture space = simple_fixture('space', [odl.rn(3), odl.uniform_discr([0, 0], [1, 1], [9, 11]), odl.uniform_discr(0, 1, 10)]) def test_optimal_parameters(space): """Tests if optimal_parameters works for some simple examples.""" fom = odl.contrib.fom.mean_squared_error mynoise = odl.phantom.white_noise(space) phantoms = [mynoise] data = [mynoise] def reconstruction(data, lam): """Perturbs the data by adding lam to it.""" return data + lam result = odl.contrib.param_opt.optimal_parameters(reconstruction, fom, phantoms, data, 1) assert result == pytest.approx(0) if __name__ == '__main__': odl.util.test_file(__file__)
Add initial test for the optimal_parameters function.
TST: Add initial test for the optimal_parameters function.
Python
mpl-2.0
odlgroup/odl,kohr-h/odl,odlgroup/odl,kohr-h/odl
TST: Add initial test for the optimal_parameters function.
import pytest import odl import odl.contrib.fom import odl.contrib.param_opt from odl.util.testutils import simple_fixture space = simple_fixture('space', [odl.rn(3), odl.uniform_discr([0, 0], [1, 1], [9, 11]), odl.uniform_discr(0, 1, 10)]) def test_optimal_parameters(space): """Tests if optimal_parameters works for some simple examples.""" fom = odl.contrib.fom.mean_squared_error mynoise = odl.phantom.white_noise(space) phantoms = [mynoise] data = [mynoise] def reconstruction(data, lam): """Perturbs the data by adding lam to it.""" return data + lam result = odl.contrib.param_opt.optimal_parameters(reconstruction, fom, phantoms, data, 1) assert result == pytest.approx(0) if __name__ == '__main__': odl.util.test_file(__file__)
<commit_before><commit_msg>TST: Add initial test for the optimal_parameters function.<commit_after>
import pytest import odl import odl.contrib.fom import odl.contrib.param_opt from odl.util.testutils import simple_fixture space = simple_fixture('space', [odl.rn(3), odl.uniform_discr([0, 0], [1, 1], [9, 11]), odl.uniform_discr(0, 1, 10)]) def test_optimal_parameters(space): """Tests if optimal_parameters works for some simple examples.""" fom = odl.contrib.fom.mean_squared_error mynoise = odl.phantom.white_noise(space) phantoms = [mynoise] data = [mynoise] def reconstruction(data, lam): """Perturbs the data by adding lam to it.""" return data + lam result = odl.contrib.param_opt.optimal_parameters(reconstruction, fom, phantoms, data, 1) assert result == pytest.approx(0) if __name__ == '__main__': odl.util.test_file(__file__)
TST: Add initial test for the optimal_parameters function.import pytest import odl import odl.contrib.fom import odl.contrib.param_opt from odl.util.testutils import simple_fixture space = simple_fixture('space', [odl.rn(3), odl.uniform_discr([0, 0], [1, 1], [9, 11]), odl.uniform_discr(0, 1, 10)]) def test_optimal_parameters(space): """Tests if optimal_parameters works for some simple examples.""" fom = odl.contrib.fom.mean_squared_error mynoise = odl.phantom.white_noise(space) phantoms = [mynoise] data = [mynoise] def reconstruction(data, lam): """Perturbs the data by adding lam to it.""" return data + lam result = odl.contrib.param_opt.optimal_parameters(reconstruction, fom, phantoms, data, 1) assert result == pytest.approx(0) if __name__ == '__main__': odl.util.test_file(__file__)
<commit_before><commit_msg>TST: Add initial test for the optimal_parameters function.<commit_after>import pytest import odl import odl.contrib.fom import odl.contrib.param_opt from odl.util.testutils import simple_fixture space = simple_fixture('space', [odl.rn(3), odl.uniform_discr([0, 0], [1, 1], [9, 11]), odl.uniform_discr(0, 1, 10)]) def test_optimal_parameters(space): """Tests if optimal_parameters works for some simple examples.""" fom = odl.contrib.fom.mean_squared_error mynoise = odl.phantom.white_noise(space) phantoms = [mynoise] data = [mynoise] def reconstruction(data, lam): """Perturbs the data by adding lam to it.""" return data + lam result = odl.contrib.param_opt.optimal_parameters(reconstruction, fom, phantoms, data, 1) assert result == pytest.approx(0) if __name__ == '__main__': odl.util.test_file(__file__)
9ade5a79e74281f8503c798b06f6b568122b0594
addons/zotero/migrations/0005_auto_20180216_0849.py
addons/zotero/migrations/0005_auto_20180216_0849.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-02-16 14:49 from __future__ import unicode_literals from bulk_update.helper import bulk_update from django.db import migrations def reverse_func(state, schema): print 'Starting reverse zotero library migration' modify_node_settings(state, None) modify_user_settings(state, False, None) def modify_node_settings(state, library_name): """ Updates the library_id for all ZoteroNodeSettings """ ZoteroNodeSettings = state.get_model('addons_zotero', 'NodeSettings') ZoteroNodeSettings.objects.all().update(library_id=library_name) print 'NodeSettings Updated' def modify_user_settings(state, add, library_name): """ For all zotero user settings, :params state: app_state :params library_name: library name to add or remove from user settings oauth metadata :params add: True for adding library, False for removing it. """ ZoteroUserSettings = state.get_model('addons_zotero', 'UserSettings') user_settings_count = ZoteroUserSettings.objects.count() current_count = 0 user_settings_pending_save = [] for user_setting in ZoteroUserSettings.objects.all(): current_count += 1 for node, ext_accounts in user_setting.oauth_grants.iteritems(): for ext_account in ext_accounts.keys(): if add: user_setting.oauth_grants[node][ext_account]['library'] = library_name else: user_setting.oauth_grants[node][ext_account].pop('library', None) user_settings_pending_save.append(user_setting) print 'UserSettings {}/{} updated'.format(current_count, user_settings_count) print 'Bulk saving UserSettings...' bulk_update(user_settings_pending_save) print 'Done' def migrate_zotero_libraries(state, schema): """ 1) For all zotero NodeSettings, mark library_id as 'personal', which has been the only option prior to zotero group libraries being added 2) For all zotero usersettings, add 'personal' library value to the nodes that have been given permission to use zotero external accounts. """ print 'Starting zotero library migration' modify_node_settings(state, 'personal') modify_user_settings(state, True, 'personal') class Migration(migrations.Migration): dependencies = [ ('addons_zotero', '0004_merge_20180112_0836'), ] operations = [ migrations.RunPython(migrate_zotero_libraries, reverse_func) ]
Add data migration for existing zotero user and node settings models - "personal" library is the only library that was previously available, but now group libraries are accessible.
Add data migration for existing zotero user and node settings models - "personal" library is the only library that was previously available, but now group libraries are accessible.
Python
apache-2.0
chennan47/osf.io,saradbowman/osf.io,Johnetordoff/osf.io,icereval/osf.io,erinspace/osf.io,chennan47/osf.io,CenterForOpenScience/osf.io,CenterForOpenScience/osf.io,brianjgeiger/osf.io,icereval/osf.io,felliott/osf.io,erinspace/osf.io,sloria/osf.io,mfraezz/osf.io,CenterForOpenScience/osf.io,pattisdr/osf.io,mfraezz/osf.io,mattclark/osf.io,binoculars/osf.io,Johnetordoff/osf.io,aaxelb/osf.io,icereval/osf.io,sloria/osf.io,brianjgeiger/osf.io,pattisdr/osf.io,HalcyonChimera/osf.io,aaxelb/osf.io,baylee-d/osf.io,mattclark/osf.io,caseyrollins/osf.io,baylee-d/osf.io,Johnetordoff/osf.io,felliott/osf.io,binoculars/osf.io,HalcyonChimera/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,binoculars/osf.io,adlius/osf.io,Johnetordoff/osf.io,saradbowman/osf.io,cslzchen/osf.io,mfraezz/osf.io,mfraezz/osf.io,caseyrollins/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,sloria/osf.io,caseyrollins/osf.io,HalcyonChimera/osf.io,adlius/osf.io,felliott/osf.io,pattisdr/osf.io,chennan47/osf.io,felliott/osf.io,aaxelb/osf.io,CenterForOpenScience/osf.io,HalcyonChimera/osf.io,baylee-d/osf.io,adlius/osf.io,erinspace/osf.io,aaxelb/osf.io,mattclark/osf.io,adlius/osf.io
Add data migration for existing zotero user and node settings models - "personal" library is the only library that was previously available, but now group libraries are accessible.
# -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-02-16 14:49 from __future__ import unicode_literals from bulk_update.helper import bulk_update from django.db import migrations def reverse_func(state, schema): print 'Starting reverse zotero library migration' modify_node_settings(state, None) modify_user_settings(state, False, None) def modify_node_settings(state, library_name): """ Updates the library_id for all ZoteroNodeSettings """ ZoteroNodeSettings = state.get_model('addons_zotero', 'NodeSettings') ZoteroNodeSettings.objects.all().update(library_id=library_name) print 'NodeSettings Updated' def modify_user_settings(state, add, library_name): """ For all zotero user settings, :params state: app_state :params library_name: library name to add or remove from user settings oauth metadata :params add: True for adding library, False for removing it. """ ZoteroUserSettings = state.get_model('addons_zotero', 'UserSettings') user_settings_count = ZoteroUserSettings.objects.count() current_count = 0 user_settings_pending_save = [] for user_setting in ZoteroUserSettings.objects.all(): current_count += 1 for node, ext_accounts in user_setting.oauth_grants.iteritems(): for ext_account in ext_accounts.keys(): if add: user_setting.oauth_grants[node][ext_account]['library'] = library_name else: user_setting.oauth_grants[node][ext_account].pop('library', None) user_settings_pending_save.append(user_setting) print 'UserSettings {}/{} updated'.format(current_count, user_settings_count) print 'Bulk saving UserSettings...' bulk_update(user_settings_pending_save) print 'Done' def migrate_zotero_libraries(state, schema): """ 1) For all zotero NodeSettings, mark library_id as 'personal', which has been the only option prior to zotero group libraries being added 2) For all zotero usersettings, add 'personal' library value to the nodes that have been given permission to use zotero external accounts. """ print 'Starting zotero library migration' modify_node_settings(state, 'personal') modify_user_settings(state, True, 'personal') class Migration(migrations.Migration): dependencies = [ ('addons_zotero', '0004_merge_20180112_0836'), ] operations = [ migrations.RunPython(migrate_zotero_libraries, reverse_func) ]
<commit_before><commit_msg>Add data migration for existing zotero user and node settings models - "personal" library is the only library that was previously available, but now group libraries are accessible.<commit_after>
# -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-02-16 14:49 from __future__ import unicode_literals from bulk_update.helper import bulk_update from django.db import migrations def reverse_func(state, schema): print 'Starting reverse zotero library migration' modify_node_settings(state, None) modify_user_settings(state, False, None) def modify_node_settings(state, library_name): """ Updates the library_id for all ZoteroNodeSettings """ ZoteroNodeSettings = state.get_model('addons_zotero', 'NodeSettings') ZoteroNodeSettings.objects.all().update(library_id=library_name) print 'NodeSettings Updated' def modify_user_settings(state, add, library_name): """ For all zotero user settings, :params state: app_state :params library_name: library name to add or remove from user settings oauth metadata :params add: True for adding library, False for removing it. """ ZoteroUserSettings = state.get_model('addons_zotero', 'UserSettings') user_settings_count = ZoteroUserSettings.objects.count() current_count = 0 user_settings_pending_save = [] for user_setting in ZoteroUserSettings.objects.all(): current_count += 1 for node, ext_accounts in user_setting.oauth_grants.iteritems(): for ext_account in ext_accounts.keys(): if add: user_setting.oauth_grants[node][ext_account]['library'] = library_name else: user_setting.oauth_grants[node][ext_account].pop('library', None) user_settings_pending_save.append(user_setting) print 'UserSettings {}/{} updated'.format(current_count, user_settings_count) print 'Bulk saving UserSettings...' bulk_update(user_settings_pending_save) print 'Done' def migrate_zotero_libraries(state, schema): """ 1) For all zotero NodeSettings, mark library_id as 'personal', which has been the only option prior to zotero group libraries being added 2) For all zotero usersettings, add 'personal' library value to the nodes that have been given permission to use zotero external accounts. """ print 'Starting zotero library migration' modify_node_settings(state, 'personal') modify_user_settings(state, True, 'personal') class Migration(migrations.Migration): dependencies = [ ('addons_zotero', '0004_merge_20180112_0836'), ] operations = [ migrations.RunPython(migrate_zotero_libraries, reverse_func) ]
Add data migration for existing zotero user and node settings models - "personal" library is the only library that was previously available, but now group libraries are accessible.# -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-02-16 14:49 from __future__ import unicode_literals from bulk_update.helper import bulk_update from django.db import migrations def reverse_func(state, schema): print 'Starting reverse zotero library migration' modify_node_settings(state, None) modify_user_settings(state, False, None) def modify_node_settings(state, library_name): """ Updates the library_id for all ZoteroNodeSettings """ ZoteroNodeSettings = state.get_model('addons_zotero', 'NodeSettings') ZoteroNodeSettings.objects.all().update(library_id=library_name) print 'NodeSettings Updated' def modify_user_settings(state, add, library_name): """ For all zotero user settings, :params state: app_state :params library_name: library name to add or remove from user settings oauth metadata :params add: True for adding library, False for removing it. """ ZoteroUserSettings = state.get_model('addons_zotero', 'UserSettings') user_settings_count = ZoteroUserSettings.objects.count() current_count = 0 user_settings_pending_save = [] for user_setting in ZoteroUserSettings.objects.all(): current_count += 1 for node, ext_accounts in user_setting.oauth_grants.iteritems(): for ext_account in ext_accounts.keys(): if add: user_setting.oauth_grants[node][ext_account]['library'] = library_name else: user_setting.oauth_grants[node][ext_account].pop('library', None) user_settings_pending_save.append(user_setting) print 'UserSettings {}/{} updated'.format(current_count, user_settings_count) print 'Bulk saving UserSettings...' bulk_update(user_settings_pending_save) print 'Done' def migrate_zotero_libraries(state, schema): """ 1) For all zotero NodeSettings, mark library_id as 'personal', which has been the only option prior to zotero group libraries being added 2) For all zotero usersettings, add 'personal' library value to the nodes that have been given permission to use zotero external accounts. """ print 'Starting zotero library migration' modify_node_settings(state, 'personal') modify_user_settings(state, True, 'personal') class Migration(migrations.Migration): dependencies = [ ('addons_zotero', '0004_merge_20180112_0836'), ] operations = [ migrations.RunPython(migrate_zotero_libraries, reverse_func) ]
<commit_before><commit_msg>Add data migration for existing zotero user and node settings models - "personal" library is the only library that was previously available, but now group libraries are accessible.<commit_after># -*- coding: utf-8 -*- # Generated by Django 1.11.9 on 2018-02-16 14:49 from __future__ import unicode_literals from bulk_update.helper import bulk_update from django.db import migrations def reverse_func(state, schema): print 'Starting reverse zotero library migration' modify_node_settings(state, None) modify_user_settings(state, False, None) def modify_node_settings(state, library_name): """ Updates the library_id for all ZoteroNodeSettings """ ZoteroNodeSettings = state.get_model('addons_zotero', 'NodeSettings') ZoteroNodeSettings.objects.all().update(library_id=library_name) print 'NodeSettings Updated' def modify_user_settings(state, add, library_name): """ For all zotero user settings, :params state: app_state :params library_name: library name to add or remove from user settings oauth metadata :params add: True for adding library, False for removing it. """ ZoteroUserSettings = state.get_model('addons_zotero', 'UserSettings') user_settings_count = ZoteroUserSettings.objects.count() current_count = 0 user_settings_pending_save = [] for user_setting in ZoteroUserSettings.objects.all(): current_count += 1 for node, ext_accounts in user_setting.oauth_grants.iteritems(): for ext_account in ext_accounts.keys(): if add: user_setting.oauth_grants[node][ext_account]['library'] = library_name else: user_setting.oauth_grants[node][ext_account].pop('library', None) user_settings_pending_save.append(user_setting) print 'UserSettings {}/{} updated'.format(current_count, user_settings_count) print 'Bulk saving UserSettings...' bulk_update(user_settings_pending_save) print 'Done' def migrate_zotero_libraries(state, schema): """ 1) For all zotero NodeSettings, mark library_id as 'personal', which has been the only option prior to zotero group libraries being added 2) For all zotero usersettings, add 'personal' library value to the nodes that have been given permission to use zotero external accounts. """ print 'Starting zotero library migration' modify_node_settings(state, 'personal') modify_user_settings(state, True, 'personal') class Migration(migrations.Migration): dependencies = [ ('addons_zotero', '0004_merge_20180112_0836'), ] operations = [ migrations.RunPython(migrate_zotero_libraries, reverse_func) ]
7578cef2d5006af632b45ce6b279d54253db3b5b
pi_approach/Libraries/serverxclient.py
pi_approach/Libraries/serverxclient.py
# Server and Client Abstraction Library import socket HOST = "userinterface.local" PORT = 12345 class Server(object): """A server-serving class""" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def setup_server(self): try: Server.s.bind((HOST,PORT)) print "Bind success" except socket.error: return "Bind failure" def socket_reception(self): Server.s.listen(5) (connection, address) = Server.s.accept() print str(connection)+ " : " + str(address) return (connection, address) def receive_data(self, connection): data = connection.recv(4096) return data def send_data(self, connection, data): connection.send(data) def close_connection(self, connection): connection.close() class Client(object): """A socket-enabled client class that connects to a server""" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def socket_connection(self): Client.s.connect((HOST,PORT)) def receive_data(self): data = Client.s.recv(4096) return data def send_data(self, data): Client.s.send(data)
Add abstracted server and client library
Add abstracted server and client library
Python
mit
the-raspberry-pi-guy/lidar
Add abstracted server and client library
# Server and Client Abstraction Library import socket HOST = "userinterface.local" PORT = 12345 class Server(object): """A server-serving class""" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def setup_server(self): try: Server.s.bind((HOST,PORT)) print "Bind success" except socket.error: return "Bind failure" def socket_reception(self): Server.s.listen(5) (connection, address) = Server.s.accept() print str(connection)+ " : " + str(address) return (connection, address) def receive_data(self, connection): data = connection.recv(4096) return data def send_data(self, connection, data): connection.send(data) def close_connection(self, connection): connection.close() class Client(object): """A socket-enabled client class that connects to a server""" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def socket_connection(self): Client.s.connect((HOST,PORT)) def receive_data(self): data = Client.s.recv(4096) return data def send_data(self, data): Client.s.send(data)
<commit_before><commit_msg>Add abstracted server and client library<commit_after>
# Server and Client Abstraction Library import socket HOST = "userinterface.local" PORT = 12345 class Server(object): """A server-serving class""" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def setup_server(self): try: Server.s.bind((HOST,PORT)) print "Bind success" except socket.error: return "Bind failure" def socket_reception(self): Server.s.listen(5) (connection, address) = Server.s.accept() print str(connection)+ " : " + str(address) return (connection, address) def receive_data(self, connection): data = connection.recv(4096) return data def send_data(self, connection, data): connection.send(data) def close_connection(self, connection): connection.close() class Client(object): """A socket-enabled client class that connects to a server""" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def socket_connection(self): Client.s.connect((HOST,PORT)) def receive_data(self): data = Client.s.recv(4096) return data def send_data(self, data): Client.s.send(data)
Add abstracted server and client library# Server and Client Abstraction Library import socket HOST = "userinterface.local" PORT = 12345 class Server(object): """A server-serving class""" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def setup_server(self): try: Server.s.bind((HOST,PORT)) print "Bind success" except socket.error: return "Bind failure" def socket_reception(self): Server.s.listen(5) (connection, address) = Server.s.accept() print str(connection)+ " : " + str(address) return (connection, address) def receive_data(self, connection): data = connection.recv(4096) return data def send_data(self, connection, data): connection.send(data) def close_connection(self, connection): connection.close() class Client(object): """A socket-enabled client class that connects to a server""" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def socket_connection(self): Client.s.connect((HOST,PORT)) def receive_data(self): data = Client.s.recv(4096) return data def send_data(self, data): Client.s.send(data)
<commit_before><commit_msg>Add abstracted server and client library<commit_after># Server and Client Abstraction Library import socket HOST = "userinterface.local" PORT = 12345 class Server(object): """A server-serving class""" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def setup_server(self): try: Server.s.bind((HOST,PORT)) print "Bind success" except socket.error: return "Bind failure" def socket_reception(self): Server.s.listen(5) (connection, address) = Server.s.accept() print str(connection)+ " : " + str(address) return (connection, address) def receive_data(self, connection): data = connection.recv(4096) return data def send_data(self, connection, data): connection.send(data) def close_connection(self, connection): connection.close() class Client(object): """A socket-enabled client class that connects to a server""" s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def socket_connection(self): Client.s.connect((HOST,PORT)) def receive_data(self): data = Client.s.recv(4096) return data def send_data(self, data): Client.s.send(data)
a9efd01f22a4fe311b97bb6ef4f14e3abe1a5dc1
analysis/sort-by.py
analysis/sort-by.py
#!/usr/bin/env python """ Sort a result list by a particular top level key. The key must have a total order (e.g. strings, ints, floats) """ import argparse import os import logging import pprint import sys import yaml from br_util import FinalResultType, classifyResult # HACK _brPath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _brPath) from BoogieRunner import ProgramListLoader try: # Try to use libyaml which is faster from yaml import CLoader as Loader, CDumper as Dumper except ImportError: # fall back on python implementation from yaml import Loader, Dumper def main(args): resultTypes = [ r.name for r in list(FinalResultType)] # Get list of ResultTypes as strings logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser() parser.add_argument('key', type=str) parser.add_argument('result_yml', type=argparse.FileType('r'), help='File to open, if \'-\' then use stdin') parser.add_argument('-r', '--reverse', default=False, action='store_true') pargs = parser.parse_args(args) logging.info('Loading YAML') results = yaml.load(pargs.result_yml, Loader=Loader) logging.info('Finished loading YAML') assert isinstance(results, list) assert len(results) > 0 if not pargs.key in results[0]: logging.info('Results do not have the key "{}"'.format(pargs.key)) return 1 results.sort(key= lambda r: r[pargs.key], reverse=pargs.reverse) try: print(yaml.dump(results, default_flow_style=False, Dumper=Dumper)) except BrokenPipeError as e: pass if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
Add script to sort results. This is useful for sorting by the "total_time" key.
Add script to sort results. This is useful for sorting by the "total_time" key.
Python
bsd-3-clause
symbooglix/boogie-runner,symbooglix/boogie-runner
Add script to sort results. This is useful for sorting by the "total_time" key.
#!/usr/bin/env python """ Sort a result list by a particular top level key. The key must have a total order (e.g. strings, ints, floats) """ import argparse import os import logging import pprint import sys import yaml from br_util import FinalResultType, classifyResult # HACK _brPath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _brPath) from BoogieRunner import ProgramListLoader try: # Try to use libyaml which is faster from yaml import CLoader as Loader, CDumper as Dumper except ImportError: # fall back on python implementation from yaml import Loader, Dumper def main(args): resultTypes = [ r.name for r in list(FinalResultType)] # Get list of ResultTypes as strings logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser() parser.add_argument('key', type=str) parser.add_argument('result_yml', type=argparse.FileType('r'), help='File to open, if \'-\' then use stdin') parser.add_argument('-r', '--reverse', default=False, action='store_true') pargs = parser.parse_args(args) logging.info('Loading YAML') results = yaml.load(pargs.result_yml, Loader=Loader) logging.info('Finished loading YAML') assert isinstance(results, list) assert len(results) > 0 if not pargs.key in results[0]: logging.info('Results do not have the key "{}"'.format(pargs.key)) return 1 results.sort(key= lambda r: r[pargs.key], reverse=pargs.reverse) try: print(yaml.dump(results, default_flow_style=False, Dumper=Dumper)) except BrokenPipeError as e: pass if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
<commit_before><commit_msg>Add script to sort results. This is useful for sorting by the "total_time" key.<commit_after>
#!/usr/bin/env python """ Sort a result list by a particular top level key. The key must have a total order (e.g. strings, ints, floats) """ import argparse import os import logging import pprint import sys import yaml from br_util import FinalResultType, classifyResult # HACK _brPath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _brPath) from BoogieRunner import ProgramListLoader try: # Try to use libyaml which is faster from yaml import CLoader as Loader, CDumper as Dumper except ImportError: # fall back on python implementation from yaml import Loader, Dumper def main(args): resultTypes = [ r.name for r in list(FinalResultType)] # Get list of ResultTypes as strings logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser() parser.add_argument('key', type=str) parser.add_argument('result_yml', type=argparse.FileType('r'), help='File to open, if \'-\' then use stdin') parser.add_argument('-r', '--reverse', default=False, action='store_true') pargs = parser.parse_args(args) logging.info('Loading YAML') results = yaml.load(pargs.result_yml, Loader=Loader) logging.info('Finished loading YAML') assert isinstance(results, list) assert len(results) > 0 if not pargs.key in results[0]: logging.info('Results do not have the key "{}"'.format(pargs.key)) return 1 results.sort(key= lambda r: r[pargs.key], reverse=pargs.reverse) try: print(yaml.dump(results, default_flow_style=False, Dumper=Dumper)) except BrokenPipeError as e: pass if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
Add script to sort results. This is useful for sorting by the "total_time" key.#!/usr/bin/env python """ Sort a result list by a particular top level key. The key must have a total order (e.g. strings, ints, floats) """ import argparse import os import logging import pprint import sys import yaml from br_util import FinalResultType, classifyResult # HACK _brPath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _brPath) from BoogieRunner import ProgramListLoader try: # Try to use libyaml which is faster from yaml import CLoader as Loader, CDumper as Dumper except ImportError: # fall back on python implementation from yaml import Loader, Dumper def main(args): resultTypes = [ r.name for r in list(FinalResultType)] # Get list of ResultTypes as strings logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser() parser.add_argument('key', type=str) parser.add_argument('result_yml', type=argparse.FileType('r'), help='File to open, if \'-\' then use stdin') parser.add_argument('-r', '--reverse', default=False, action='store_true') pargs = parser.parse_args(args) logging.info('Loading YAML') results = yaml.load(pargs.result_yml, Loader=Loader) logging.info('Finished loading YAML') assert isinstance(results, list) assert len(results) > 0 if not pargs.key in results[0]: logging.info('Results do not have the key "{}"'.format(pargs.key)) return 1 results.sort(key= lambda r: r[pargs.key], reverse=pargs.reverse) try: print(yaml.dump(results, default_flow_style=False, Dumper=Dumper)) except BrokenPipeError as e: pass if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
<commit_before><commit_msg>Add script to sort results. This is useful for sorting by the "total_time" key.<commit_after>#!/usr/bin/env python """ Sort a result list by a particular top level key. The key must have a total order (e.g. strings, ints, floats) """ import argparse import os import logging import pprint import sys import yaml from br_util import FinalResultType, classifyResult # HACK _brPath = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _brPath) from BoogieRunner import ProgramListLoader try: # Try to use libyaml which is faster from yaml import CLoader as Loader, CDumper as Dumper except ImportError: # fall back on python implementation from yaml import Loader, Dumper def main(args): resultTypes = [ r.name for r in list(FinalResultType)] # Get list of ResultTypes as strings logging.basicConfig(level=logging.DEBUG) parser = argparse.ArgumentParser() parser.add_argument('key', type=str) parser.add_argument('result_yml', type=argparse.FileType('r'), help='File to open, if \'-\' then use stdin') parser.add_argument('-r', '--reverse', default=False, action='store_true') pargs = parser.parse_args(args) logging.info('Loading YAML') results = yaml.load(pargs.result_yml, Loader=Loader) logging.info('Finished loading YAML') assert isinstance(results, list) assert len(results) > 0 if not pargs.key in results[0]: logging.info('Results do not have the key "{}"'.format(pargs.key)) return 1 results.sort(key= lambda r: r[pargs.key], reverse=pargs.reverse) try: print(yaml.dump(results, default_flow_style=False, Dumper=Dumper)) except BrokenPipeError as e: pass if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
2ad3cb462469a2d4f3f457e1e520979871410d20
tests/test/xie/graphics/shape.py
tests/test/xie/graphics/shape.py
import unittest import copy from xie.graphics.shape import Pane class PaneTestCase(unittest.TestCase): def setUp(self): self.generateTestData() def tearDown(self): pass def generateTestData(self): self.pane_1=Pane(38, 32, 101, 50) self.pane_2=Pane(22, 50, 122, 89) self.pane_3=Pane(5, 20, 31, 32) self.pane_4=Pane(2, 1, 5, 88) self.point_1=(47, 51) self.point_2=(77, 43) def testPaneEquality(self): self.assertEqual(self.pane_1, copy.deepcopy(self.pane_1)) self.assertEqual(self.pane_2, copy.deepcopy(self.pane_2)) self.assertEqual(self.pane_3, copy.deepcopy(self.pane_3)) self.assertEqual(self.pane_4, copy.deepcopy(self.pane_4)) def test_transformRelativePointByTargetPane(self): transformedPoint_1=self.pane_1.transformRelativePointByTargetPane(self.point_1, self.pane_4) self.assertEqual(transformedPoint_1, (2, 89)) transformedPoint_2=self.pane_1.transformRelativePointByTargetPane(self.point_2, self.pane_4) self.assertEqual(transformedPoint_2, (4, 51)) def test_transformRelativePaneByTargetPane(self): transformedPane_1=self.pane_1.transformRelativePaneByTargetPane(self.pane_2, self.pane_4) self.assertEqual(transformedPane_1, Pane(1, 84, 7, 265)) transformedPane_2=self.pane_1.transformRelativePaneByTargetPane(self.pane_3, self.pane_4) self.assertEqual(transformedPane_2, Pane(0, -54, 2, 1))
Add test cases for Pane.
Add test cases for Pane.
Python
apache-2.0
xrloong/Xie
Add test cases for Pane.
import unittest import copy from xie.graphics.shape import Pane class PaneTestCase(unittest.TestCase): def setUp(self): self.generateTestData() def tearDown(self): pass def generateTestData(self): self.pane_1=Pane(38, 32, 101, 50) self.pane_2=Pane(22, 50, 122, 89) self.pane_3=Pane(5, 20, 31, 32) self.pane_4=Pane(2, 1, 5, 88) self.point_1=(47, 51) self.point_2=(77, 43) def testPaneEquality(self): self.assertEqual(self.pane_1, copy.deepcopy(self.pane_1)) self.assertEqual(self.pane_2, copy.deepcopy(self.pane_2)) self.assertEqual(self.pane_3, copy.deepcopy(self.pane_3)) self.assertEqual(self.pane_4, copy.deepcopy(self.pane_4)) def test_transformRelativePointByTargetPane(self): transformedPoint_1=self.pane_1.transformRelativePointByTargetPane(self.point_1, self.pane_4) self.assertEqual(transformedPoint_1, (2, 89)) transformedPoint_2=self.pane_1.transformRelativePointByTargetPane(self.point_2, self.pane_4) self.assertEqual(transformedPoint_2, (4, 51)) def test_transformRelativePaneByTargetPane(self): transformedPane_1=self.pane_1.transformRelativePaneByTargetPane(self.pane_2, self.pane_4) self.assertEqual(transformedPane_1, Pane(1, 84, 7, 265)) transformedPane_2=self.pane_1.transformRelativePaneByTargetPane(self.pane_3, self.pane_4) self.assertEqual(transformedPane_2, Pane(0, -54, 2, 1))
<commit_before><commit_msg>Add test cases for Pane.<commit_after>
import unittest import copy from xie.graphics.shape import Pane class PaneTestCase(unittest.TestCase): def setUp(self): self.generateTestData() def tearDown(self): pass def generateTestData(self): self.pane_1=Pane(38, 32, 101, 50) self.pane_2=Pane(22, 50, 122, 89) self.pane_3=Pane(5, 20, 31, 32) self.pane_4=Pane(2, 1, 5, 88) self.point_1=(47, 51) self.point_2=(77, 43) def testPaneEquality(self): self.assertEqual(self.pane_1, copy.deepcopy(self.pane_1)) self.assertEqual(self.pane_2, copy.deepcopy(self.pane_2)) self.assertEqual(self.pane_3, copy.deepcopy(self.pane_3)) self.assertEqual(self.pane_4, copy.deepcopy(self.pane_4)) def test_transformRelativePointByTargetPane(self): transformedPoint_1=self.pane_1.transformRelativePointByTargetPane(self.point_1, self.pane_4) self.assertEqual(transformedPoint_1, (2, 89)) transformedPoint_2=self.pane_1.transformRelativePointByTargetPane(self.point_2, self.pane_4) self.assertEqual(transformedPoint_2, (4, 51)) def test_transformRelativePaneByTargetPane(self): transformedPane_1=self.pane_1.transformRelativePaneByTargetPane(self.pane_2, self.pane_4) self.assertEqual(transformedPane_1, Pane(1, 84, 7, 265)) transformedPane_2=self.pane_1.transformRelativePaneByTargetPane(self.pane_3, self.pane_4) self.assertEqual(transformedPane_2, Pane(0, -54, 2, 1))
Add test cases for Pane.import unittest import copy from xie.graphics.shape import Pane class PaneTestCase(unittest.TestCase): def setUp(self): self.generateTestData() def tearDown(self): pass def generateTestData(self): self.pane_1=Pane(38, 32, 101, 50) self.pane_2=Pane(22, 50, 122, 89) self.pane_3=Pane(5, 20, 31, 32) self.pane_4=Pane(2, 1, 5, 88) self.point_1=(47, 51) self.point_2=(77, 43) def testPaneEquality(self): self.assertEqual(self.pane_1, copy.deepcopy(self.pane_1)) self.assertEqual(self.pane_2, copy.deepcopy(self.pane_2)) self.assertEqual(self.pane_3, copy.deepcopy(self.pane_3)) self.assertEqual(self.pane_4, copy.deepcopy(self.pane_4)) def test_transformRelativePointByTargetPane(self): transformedPoint_1=self.pane_1.transformRelativePointByTargetPane(self.point_1, self.pane_4) self.assertEqual(transformedPoint_1, (2, 89)) transformedPoint_2=self.pane_1.transformRelativePointByTargetPane(self.point_2, self.pane_4) self.assertEqual(transformedPoint_2, (4, 51)) def test_transformRelativePaneByTargetPane(self): transformedPane_1=self.pane_1.transformRelativePaneByTargetPane(self.pane_2, self.pane_4) self.assertEqual(transformedPane_1, Pane(1, 84, 7, 265)) transformedPane_2=self.pane_1.transformRelativePaneByTargetPane(self.pane_3, self.pane_4) self.assertEqual(transformedPane_2, Pane(0, -54, 2, 1))
<commit_before><commit_msg>Add test cases for Pane.<commit_after>import unittest import copy from xie.graphics.shape import Pane class PaneTestCase(unittest.TestCase): def setUp(self): self.generateTestData() def tearDown(self): pass def generateTestData(self): self.pane_1=Pane(38, 32, 101, 50) self.pane_2=Pane(22, 50, 122, 89) self.pane_3=Pane(5, 20, 31, 32) self.pane_4=Pane(2, 1, 5, 88) self.point_1=(47, 51) self.point_2=(77, 43) def testPaneEquality(self): self.assertEqual(self.pane_1, copy.deepcopy(self.pane_1)) self.assertEqual(self.pane_2, copy.deepcopy(self.pane_2)) self.assertEqual(self.pane_3, copy.deepcopy(self.pane_3)) self.assertEqual(self.pane_4, copy.deepcopy(self.pane_4)) def test_transformRelativePointByTargetPane(self): transformedPoint_1=self.pane_1.transformRelativePointByTargetPane(self.point_1, self.pane_4) self.assertEqual(transformedPoint_1, (2, 89)) transformedPoint_2=self.pane_1.transformRelativePointByTargetPane(self.point_2, self.pane_4) self.assertEqual(transformedPoint_2, (4, 51)) def test_transformRelativePaneByTargetPane(self): transformedPane_1=self.pane_1.transformRelativePaneByTargetPane(self.pane_2, self.pane_4) self.assertEqual(transformedPane_1, Pane(1, 84, 7, 265)) transformedPane_2=self.pane_1.transformRelativePaneByTargetPane(self.pane_3, self.pane_4) self.assertEqual(transformedPane_2, Pane(0, -54, 2, 1))
efdb0a8c8857ca04614108769e898cc409615b18
functions.py
functions.py
import numpy as np from key import Key def remove_duplicates(values): output = [] seen = [] for value in values: if not np.any([np.all(value == x) for x in seen]): output.append(value) seen.append(value) return output def key_proliferation(input_keys): dt = np.dtype(object) key_list = [x.array_swap() for x in input_keys] map_list = [] for list in key_list: for key in list: map_list.append(key.map) filtered_map_list = remove_duplicates(map_list) filtered_key_list = [Key(x) for x in filtered_map_list] return filtered_key_list
Develop key_proliferate function to create new keys.
Develop key_proliferate function to create new keys.
Python
apache-2.0
paulsbrookes/subcipher
Develop key_proliferate function to create new keys.
import numpy as np from key import Key def remove_duplicates(values): output = [] seen = [] for value in values: if not np.any([np.all(value == x) for x in seen]): output.append(value) seen.append(value) return output def key_proliferation(input_keys): dt = np.dtype(object) key_list = [x.array_swap() for x in input_keys] map_list = [] for list in key_list: for key in list: map_list.append(key.map) filtered_map_list = remove_duplicates(map_list) filtered_key_list = [Key(x) for x in filtered_map_list] return filtered_key_list
<commit_before><commit_msg>Develop key_proliferate function to create new keys.<commit_after>
import numpy as np from key import Key def remove_duplicates(values): output = [] seen = [] for value in values: if not np.any([np.all(value == x) for x in seen]): output.append(value) seen.append(value) return output def key_proliferation(input_keys): dt = np.dtype(object) key_list = [x.array_swap() for x in input_keys] map_list = [] for list in key_list: for key in list: map_list.append(key.map) filtered_map_list = remove_duplicates(map_list) filtered_key_list = [Key(x) for x in filtered_map_list] return filtered_key_list
Develop key_proliferate function to create new keys.import numpy as np from key import Key def remove_duplicates(values): output = [] seen = [] for value in values: if not np.any([np.all(value == x) for x in seen]): output.append(value) seen.append(value) return output def key_proliferation(input_keys): dt = np.dtype(object) key_list = [x.array_swap() for x in input_keys] map_list = [] for list in key_list: for key in list: map_list.append(key.map) filtered_map_list = remove_duplicates(map_list) filtered_key_list = [Key(x) for x in filtered_map_list] return filtered_key_list
<commit_before><commit_msg>Develop key_proliferate function to create new keys.<commit_after>import numpy as np from key import Key def remove_duplicates(values): output = [] seen = [] for value in values: if not np.any([np.all(value == x) for x in seen]): output.append(value) seen.append(value) return output def key_proliferation(input_keys): dt = np.dtype(object) key_list = [x.array_swap() for x in input_keys] map_list = [] for list in key_list: for key in list: map_list.append(key.map) filtered_map_list = remove_duplicates(map_list) filtered_key_list = [Key(x) for x in filtered_map_list] return filtered_key_list
ba388774ac7326efae78cfc69768985e71c988f4
test/test_ev3_battery_percentage.py
test/test_ev3_battery_percentage.py
import ev3.ev3dev import unittest from util import get_input class TestI2CS(unittest.TestCase): def test_battery(self): print(ev3.ev3dev.get_battery_percentage()) if __name__ == '__main__': unittest.main()
Move get_battery_percentage to module method Add test
Move get_battery_percentage to module method Add test
Python
apache-2.0
MaxNoe/python-ev3,topikachu/python-ev3,topikachu/python-ev3,evz/python-ev3,MaxNoe/python-ev3,evz/python-ev3
Move get_battery_percentage to module method Add test
import ev3.ev3dev import unittest from util import get_input class TestI2CS(unittest.TestCase): def test_battery(self): print(ev3.ev3dev.get_battery_percentage()) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Move get_battery_percentage to module method Add test<commit_after>
import ev3.ev3dev import unittest from util import get_input class TestI2CS(unittest.TestCase): def test_battery(self): print(ev3.ev3dev.get_battery_percentage()) if __name__ == '__main__': unittest.main()
Move get_battery_percentage to module method Add testimport ev3.ev3dev import unittest from util import get_input class TestI2CS(unittest.TestCase): def test_battery(self): print(ev3.ev3dev.get_battery_percentage()) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Move get_battery_percentage to module method Add test<commit_after>import ev3.ev3dev import unittest from util import get_input class TestI2CS(unittest.TestCase): def test_battery(self): print(ev3.ev3dev.get_battery_percentage()) if __name__ == '__main__': unittest.main()
dc1b04727f21038c815119b522ad276ceb01de41
tests/test_issue_types.py
tests/test_issue_types.py
from taiga.requestmaker import RequestMaker, RequestMakerException from taiga.models.base import InstanceResource, ListResource from taiga.models.models import IssueType, IssueTypes from taiga import TaigaAPI import taiga.exceptions import json import requests import unittest from mock import patch from .tools import create_mock_json from .tools import MockResponse class TestIssueTypes(unittest.TestCase): @patch('taiga.models.base.ListResource._new_resource') def test_create_issue_type(self, mock_new_resource): rm = RequestMaker('/api/v1', 'fakehost', 'faketoken') mock_new_resource.return_value = IssueType(rm) it = IssueTypes(rm).create(1, 'IT 1') mock_new_resource.assert_called_with( payload={'project': 1, 'name': 'IT 1'} )
Add tests for issue types
Add tests for issue types
Python
mit
jespino/python-taiga,nephila/python-taiga,bameda/python-taiga,mlq/python-taiga,bameda/python-taiga,erikw/python-taiga,erikw/python-taiga,mlq/python-taiga,jespino/python-taiga
Add tests for issue types
from taiga.requestmaker import RequestMaker, RequestMakerException from taiga.models.base import InstanceResource, ListResource from taiga.models.models import IssueType, IssueTypes from taiga import TaigaAPI import taiga.exceptions import json import requests import unittest from mock import patch from .tools import create_mock_json from .tools import MockResponse class TestIssueTypes(unittest.TestCase): @patch('taiga.models.base.ListResource._new_resource') def test_create_issue_type(self, mock_new_resource): rm = RequestMaker('/api/v1', 'fakehost', 'faketoken') mock_new_resource.return_value = IssueType(rm) it = IssueTypes(rm).create(1, 'IT 1') mock_new_resource.assert_called_with( payload={'project': 1, 'name': 'IT 1'} )
<commit_before><commit_msg>Add tests for issue types<commit_after>
from taiga.requestmaker import RequestMaker, RequestMakerException from taiga.models.base import InstanceResource, ListResource from taiga.models.models import IssueType, IssueTypes from taiga import TaigaAPI import taiga.exceptions import json import requests import unittest from mock import patch from .tools import create_mock_json from .tools import MockResponse class TestIssueTypes(unittest.TestCase): @patch('taiga.models.base.ListResource._new_resource') def test_create_issue_type(self, mock_new_resource): rm = RequestMaker('/api/v1', 'fakehost', 'faketoken') mock_new_resource.return_value = IssueType(rm) it = IssueTypes(rm).create(1, 'IT 1') mock_new_resource.assert_called_with( payload={'project': 1, 'name': 'IT 1'} )
Add tests for issue typesfrom taiga.requestmaker import RequestMaker, RequestMakerException from taiga.models.base import InstanceResource, ListResource from taiga.models.models import IssueType, IssueTypes from taiga import TaigaAPI import taiga.exceptions import json import requests import unittest from mock import patch from .tools import create_mock_json from .tools import MockResponse class TestIssueTypes(unittest.TestCase): @patch('taiga.models.base.ListResource._new_resource') def test_create_issue_type(self, mock_new_resource): rm = RequestMaker('/api/v1', 'fakehost', 'faketoken') mock_new_resource.return_value = IssueType(rm) it = IssueTypes(rm).create(1, 'IT 1') mock_new_resource.assert_called_with( payload={'project': 1, 'name': 'IT 1'} )
<commit_before><commit_msg>Add tests for issue types<commit_after>from taiga.requestmaker import RequestMaker, RequestMakerException from taiga.models.base import InstanceResource, ListResource from taiga.models.models import IssueType, IssueTypes from taiga import TaigaAPI import taiga.exceptions import json import requests import unittest from mock import patch from .tools import create_mock_json from .tools import MockResponse class TestIssueTypes(unittest.TestCase): @patch('taiga.models.base.ListResource._new_resource') def test_create_issue_type(self, mock_new_resource): rm = RequestMaker('/api/v1', 'fakehost', 'faketoken') mock_new_resource.return_value = IssueType(rm) it = IssueTypes(rm).create(1, 'IT 1') mock_new_resource.assert_called_with( payload={'project': 1, 'name': 'IT 1'} )
16b6c70884f870461cd3fa7d18daf61faa5c25d0
selfdrive/car/tests/test_car_params.py
selfdrive/car/tests/test_car_params.py
#!/usr/bin/env python3 import unittest from selfdrive.car.fingerprints import all_known_cars from selfdrive.car.car_helpers import interfaces from selfdrive.car.fingerprints import _FINGERPRINTS as FINGERPRINTS class TestCarParam(unittest.TestCase): def test_creating_car_params(self): all_cars = all_known_cars() for car in all_cars: fingerprint = FINGERPRINTS[car][0] CarInterface, CarController, CarState = interfaces[car] fingerprints = { 0: fingerprint, 1: fingerprint, 2: fingerprint, } car_fw = [] for has_relay in [True, False]: car_params = CarInterface.get_params(car, fingerprints, has_relay, car_fw) car_interface = CarInterface(car_params, CarController, CarState), car_params assert car_params assert car_interface if __name__ == "__main__": unittest.main()
Add test that gets all the car params
Add test that gets all the car params
Python
mit
commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot
Add test that gets all the car params
#!/usr/bin/env python3 import unittest from selfdrive.car.fingerprints import all_known_cars from selfdrive.car.car_helpers import interfaces from selfdrive.car.fingerprints import _FINGERPRINTS as FINGERPRINTS class TestCarParam(unittest.TestCase): def test_creating_car_params(self): all_cars = all_known_cars() for car in all_cars: fingerprint = FINGERPRINTS[car][0] CarInterface, CarController, CarState = interfaces[car] fingerprints = { 0: fingerprint, 1: fingerprint, 2: fingerprint, } car_fw = [] for has_relay in [True, False]: car_params = CarInterface.get_params(car, fingerprints, has_relay, car_fw) car_interface = CarInterface(car_params, CarController, CarState), car_params assert car_params assert car_interface if __name__ == "__main__": unittest.main()
<commit_before><commit_msg>Add test that gets all the car params<commit_after>
#!/usr/bin/env python3 import unittest from selfdrive.car.fingerprints import all_known_cars from selfdrive.car.car_helpers import interfaces from selfdrive.car.fingerprints import _FINGERPRINTS as FINGERPRINTS class TestCarParam(unittest.TestCase): def test_creating_car_params(self): all_cars = all_known_cars() for car in all_cars: fingerprint = FINGERPRINTS[car][0] CarInterface, CarController, CarState = interfaces[car] fingerprints = { 0: fingerprint, 1: fingerprint, 2: fingerprint, } car_fw = [] for has_relay in [True, False]: car_params = CarInterface.get_params(car, fingerprints, has_relay, car_fw) car_interface = CarInterface(car_params, CarController, CarState), car_params assert car_params assert car_interface if __name__ == "__main__": unittest.main()
Add test that gets all the car params#!/usr/bin/env python3 import unittest from selfdrive.car.fingerprints import all_known_cars from selfdrive.car.car_helpers import interfaces from selfdrive.car.fingerprints import _FINGERPRINTS as FINGERPRINTS class TestCarParam(unittest.TestCase): def test_creating_car_params(self): all_cars = all_known_cars() for car in all_cars: fingerprint = FINGERPRINTS[car][0] CarInterface, CarController, CarState = interfaces[car] fingerprints = { 0: fingerprint, 1: fingerprint, 2: fingerprint, } car_fw = [] for has_relay in [True, False]: car_params = CarInterface.get_params(car, fingerprints, has_relay, car_fw) car_interface = CarInterface(car_params, CarController, CarState), car_params assert car_params assert car_interface if __name__ == "__main__": unittest.main()
<commit_before><commit_msg>Add test that gets all the car params<commit_after>#!/usr/bin/env python3 import unittest from selfdrive.car.fingerprints import all_known_cars from selfdrive.car.car_helpers import interfaces from selfdrive.car.fingerprints import _FINGERPRINTS as FINGERPRINTS class TestCarParam(unittest.TestCase): def test_creating_car_params(self): all_cars = all_known_cars() for car in all_cars: fingerprint = FINGERPRINTS[car][0] CarInterface, CarController, CarState = interfaces[car] fingerprints = { 0: fingerprint, 1: fingerprint, 2: fingerprint, } car_fw = [] for has_relay in [True, False]: car_params = CarInterface.get_params(car, fingerprints, has_relay, car_fw) car_interface = CarInterface(car_params, CarController, CarState), car_params assert car_params assert car_interface if __name__ == "__main__": unittest.main()
2e208d7458a91f08a75bcee39a1cb9134565b857
DeepAlignmentNetwork/ImageDemo.py
DeepAlignmentNetwork/ImageDemo.py
from FaceAlignment import FaceAlignment import numpy as np import cv2 import utils model = FaceAlignment(112, 112, 1, 1, True) model.loadNetwork("../data/DAN-Menpo-tracking.npz") cascade = cv2.CascadeClassifier("../data/haarcascade_frontalface_alt.xml") color_img = cv2.imread("../data/jk.jpg") if len(color_img.shape) > 2: gray_img = np.mean(color_img, axis=2).astype(np.uint8) else: gray_img = color_img.astype(np.uint8) # reset = True landmarks = None # if reset: rects = cascade.detectMultiScale(gray_img, scaleFactor=1.2, minNeighbors=3, minSize=(50, 50)) for rect in rects: tl_x = rect[0] tl_y = rect[1] br_x = tl_x + rect[2] br_y = tl_y + rect[3] cv2.rectangle(color_img, (tl_x, tl_y), (br_x, br_y), (255, 0, 0)) initLandmarks = utils.bestFitRect(None, model.initLandmarks, [tl_x, tl_y, br_x, br_y]) if model.confidenceLayer: landmarks, confidence = model.processImg(gray_img[np.newaxis], initLandmarks) if confidence < 0.1: reset = True else: landmarks = model.processImg(gray_img[np.newaxis], initLandmarks) landmarks = landmarks.astype(np.int32) for i in range(landmarks.shape[0]): cv2.circle(color_img, (landmarks[i, 0], landmarks[i, 1]), 2, (0, 255, 0)) cv2.imshow("image", color_img) key = cv2.waitKey(0)
Add a sample code for still images
Add a sample code for still images
Python
mit
MarekKowalski/DeepAlignmentNetwork
Add a sample code for still images
from FaceAlignment import FaceAlignment import numpy as np import cv2 import utils model = FaceAlignment(112, 112, 1, 1, True) model.loadNetwork("../data/DAN-Menpo-tracking.npz") cascade = cv2.CascadeClassifier("../data/haarcascade_frontalface_alt.xml") color_img = cv2.imread("../data/jk.jpg") if len(color_img.shape) > 2: gray_img = np.mean(color_img, axis=2).astype(np.uint8) else: gray_img = color_img.astype(np.uint8) # reset = True landmarks = None # if reset: rects = cascade.detectMultiScale(gray_img, scaleFactor=1.2, minNeighbors=3, minSize=(50, 50)) for rect in rects: tl_x = rect[0] tl_y = rect[1] br_x = tl_x + rect[2] br_y = tl_y + rect[3] cv2.rectangle(color_img, (tl_x, tl_y), (br_x, br_y), (255, 0, 0)) initLandmarks = utils.bestFitRect(None, model.initLandmarks, [tl_x, tl_y, br_x, br_y]) if model.confidenceLayer: landmarks, confidence = model.processImg(gray_img[np.newaxis], initLandmarks) if confidence < 0.1: reset = True else: landmarks = model.processImg(gray_img[np.newaxis], initLandmarks) landmarks = landmarks.astype(np.int32) for i in range(landmarks.shape[0]): cv2.circle(color_img, (landmarks[i, 0], landmarks[i, 1]), 2, (0, 255, 0)) cv2.imshow("image", color_img) key = cv2.waitKey(0)
<commit_before><commit_msg>Add a sample code for still images<commit_after>
from FaceAlignment import FaceAlignment import numpy as np import cv2 import utils model = FaceAlignment(112, 112, 1, 1, True) model.loadNetwork("../data/DAN-Menpo-tracking.npz") cascade = cv2.CascadeClassifier("../data/haarcascade_frontalface_alt.xml") color_img = cv2.imread("../data/jk.jpg") if len(color_img.shape) > 2: gray_img = np.mean(color_img, axis=2).astype(np.uint8) else: gray_img = color_img.astype(np.uint8) # reset = True landmarks = None # if reset: rects = cascade.detectMultiScale(gray_img, scaleFactor=1.2, minNeighbors=3, minSize=(50, 50)) for rect in rects: tl_x = rect[0] tl_y = rect[1] br_x = tl_x + rect[2] br_y = tl_y + rect[3] cv2.rectangle(color_img, (tl_x, tl_y), (br_x, br_y), (255, 0, 0)) initLandmarks = utils.bestFitRect(None, model.initLandmarks, [tl_x, tl_y, br_x, br_y]) if model.confidenceLayer: landmarks, confidence = model.processImg(gray_img[np.newaxis], initLandmarks) if confidence < 0.1: reset = True else: landmarks = model.processImg(gray_img[np.newaxis], initLandmarks) landmarks = landmarks.astype(np.int32) for i in range(landmarks.shape[0]): cv2.circle(color_img, (landmarks[i, 0], landmarks[i, 1]), 2, (0, 255, 0)) cv2.imshow("image", color_img) key = cv2.waitKey(0)
Add a sample code for still imagesfrom FaceAlignment import FaceAlignment import numpy as np import cv2 import utils model = FaceAlignment(112, 112, 1, 1, True) model.loadNetwork("../data/DAN-Menpo-tracking.npz") cascade = cv2.CascadeClassifier("../data/haarcascade_frontalface_alt.xml") color_img = cv2.imread("../data/jk.jpg") if len(color_img.shape) > 2: gray_img = np.mean(color_img, axis=2).astype(np.uint8) else: gray_img = color_img.astype(np.uint8) # reset = True landmarks = None # if reset: rects = cascade.detectMultiScale(gray_img, scaleFactor=1.2, minNeighbors=3, minSize=(50, 50)) for rect in rects: tl_x = rect[0] tl_y = rect[1] br_x = tl_x + rect[2] br_y = tl_y + rect[3] cv2.rectangle(color_img, (tl_x, tl_y), (br_x, br_y), (255, 0, 0)) initLandmarks = utils.bestFitRect(None, model.initLandmarks, [tl_x, tl_y, br_x, br_y]) if model.confidenceLayer: landmarks, confidence = model.processImg(gray_img[np.newaxis], initLandmarks) if confidence < 0.1: reset = True else: landmarks = model.processImg(gray_img[np.newaxis], initLandmarks) landmarks = landmarks.astype(np.int32) for i in range(landmarks.shape[0]): cv2.circle(color_img, (landmarks[i, 0], landmarks[i, 1]), 2, (0, 255, 0)) cv2.imshow("image", color_img) key = cv2.waitKey(0)
<commit_before><commit_msg>Add a sample code for still images<commit_after>from FaceAlignment import FaceAlignment import numpy as np import cv2 import utils model = FaceAlignment(112, 112, 1, 1, True) model.loadNetwork("../data/DAN-Menpo-tracking.npz") cascade = cv2.CascadeClassifier("../data/haarcascade_frontalface_alt.xml") color_img = cv2.imread("../data/jk.jpg") if len(color_img.shape) > 2: gray_img = np.mean(color_img, axis=2).astype(np.uint8) else: gray_img = color_img.astype(np.uint8) # reset = True landmarks = None # if reset: rects = cascade.detectMultiScale(gray_img, scaleFactor=1.2, minNeighbors=3, minSize=(50, 50)) for rect in rects: tl_x = rect[0] tl_y = rect[1] br_x = tl_x + rect[2] br_y = tl_y + rect[3] cv2.rectangle(color_img, (tl_x, tl_y), (br_x, br_y), (255, 0, 0)) initLandmarks = utils.bestFitRect(None, model.initLandmarks, [tl_x, tl_y, br_x, br_y]) if model.confidenceLayer: landmarks, confidence = model.processImg(gray_img[np.newaxis], initLandmarks) if confidence < 0.1: reset = True else: landmarks = model.processImg(gray_img[np.newaxis], initLandmarks) landmarks = landmarks.astype(np.int32) for i in range(landmarks.shape[0]): cv2.circle(color_img, (landmarks[i, 0], landmarks[i, 1]), 2, (0, 255, 0)) cv2.imshow("image", color_img) key = cv2.waitKey(0)
164b386d97929816eaecbebe7a47164202acad8f
website/profile_run.py
website/profile_run.py
#!/usr/bin/env python3 from werkzeug.contrib.profiler import ProfilerMiddleware from app import app app.config['PROFILE'] = True app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30]) app.run(debug = True)
Add a script for profiling the app
Add a script for profiling the app
Python
lgpl-2.1
reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/ActiveDriverDB,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations
Add a script for profiling the app
#!/usr/bin/env python3 from werkzeug.contrib.profiler import ProfilerMiddleware from app import app app.config['PROFILE'] = True app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30]) app.run(debug = True)
<commit_before><commit_msg>Add a script for profiling the app<commit_after>
#!/usr/bin/env python3 from werkzeug.contrib.profiler import ProfilerMiddleware from app import app app.config['PROFILE'] = True app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30]) app.run(debug = True)
Add a script for profiling the app#!/usr/bin/env python3 from werkzeug.contrib.profiler import ProfilerMiddleware from app import app app.config['PROFILE'] = True app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30]) app.run(debug = True)
<commit_before><commit_msg>Add a script for profiling the app<commit_after>#!/usr/bin/env python3 from werkzeug.contrib.profiler import ProfilerMiddleware from app import app app.config['PROFILE'] = True app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30]) app.run(debug = True)
d1a9fc8e3e64e3eb4187f7db7c0176f05d68ccef
smaug/tests/fullstack/test_protectables.py
smaug/tests/fullstack/test_protectables.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from smaug.tests.fullstack import smaug_base import time class ProtectablesTest(smaug_base.SmaugBaseTest): """Test Protectables operation""" def create_volume(self, size): volume = self.cinder_client.volumes.create(size) time.sleep(5) return volume def delete_volume(self, volume_id): self.cinder_client.volumes.delete(volume_id) time.sleep(15) def test_protectables_list(self): res = self.smaug_client.protectables.list() self.assertTrue(len(res)) def test_protectables_get_with_project(self): protectable_type = 'OS::Keystone::Project' res = self.smaug_client.protectables.get(protectable_type) dependent_types = ['OS::Cinder::Volume', 'OS::Nova::Server'] self.assertEqual(dependent_types, res.dependent_types) def test_protectables_list_instances(self): res_list = self.smaug_client.protectables.list_instances( 'OS::Cinder::Volume') before_num = len(res_list) volume_1 = self.create_volume(1) volume_2 = self.create_volume(1) res = self.smaug_client.protectables.list_instances( 'OS::Cinder::Volume') after_num = len(res) self.assertEqual(2, after_num - before_num) self.delete_volume(volume_1.id) self.delete_volume(volume_2.id)
Add fullstack tests of the resource protectables RESTAPI
Add fullstack tests of the resource protectables RESTAPI Change-Id: Ibe660a07311544f82246e82538b565847488040b Closes-Bug: #1578888
Python
apache-2.0
openstack/smaug,openstack/smaug
Add fullstack tests of the resource protectables RESTAPI Change-Id: Ibe660a07311544f82246e82538b565847488040b Closes-Bug: #1578888
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from smaug.tests.fullstack import smaug_base import time class ProtectablesTest(smaug_base.SmaugBaseTest): """Test Protectables operation""" def create_volume(self, size): volume = self.cinder_client.volumes.create(size) time.sleep(5) return volume def delete_volume(self, volume_id): self.cinder_client.volumes.delete(volume_id) time.sleep(15) def test_protectables_list(self): res = self.smaug_client.protectables.list() self.assertTrue(len(res)) def test_protectables_get_with_project(self): protectable_type = 'OS::Keystone::Project' res = self.smaug_client.protectables.get(protectable_type) dependent_types = ['OS::Cinder::Volume', 'OS::Nova::Server'] self.assertEqual(dependent_types, res.dependent_types) def test_protectables_list_instances(self): res_list = self.smaug_client.protectables.list_instances( 'OS::Cinder::Volume') before_num = len(res_list) volume_1 = self.create_volume(1) volume_2 = self.create_volume(1) res = self.smaug_client.protectables.list_instances( 'OS::Cinder::Volume') after_num = len(res) self.assertEqual(2, after_num - before_num) self.delete_volume(volume_1.id) self.delete_volume(volume_2.id)
<commit_before><commit_msg>Add fullstack tests of the resource protectables RESTAPI Change-Id: Ibe660a07311544f82246e82538b565847488040b Closes-Bug: #1578888<commit_after>
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from smaug.tests.fullstack import smaug_base import time class ProtectablesTest(smaug_base.SmaugBaseTest): """Test Protectables operation""" def create_volume(self, size): volume = self.cinder_client.volumes.create(size) time.sleep(5) return volume def delete_volume(self, volume_id): self.cinder_client.volumes.delete(volume_id) time.sleep(15) def test_protectables_list(self): res = self.smaug_client.protectables.list() self.assertTrue(len(res)) def test_protectables_get_with_project(self): protectable_type = 'OS::Keystone::Project' res = self.smaug_client.protectables.get(protectable_type) dependent_types = ['OS::Cinder::Volume', 'OS::Nova::Server'] self.assertEqual(dependent_types, res.dependent_types) def test_protectables_list_instances(self): res_list = self.smaug_client.protectables.list_instances( 'OS::Cinder::Volume') before_num = len(res_list) volume_1 = self.create_volume(1) volume_2 = self.create_volume(1) res = self.smaug_client.protectables.list_instances( 'OS::Cinder::Volume') after_num = len(res) self.assertEqual(2, after_num - before_num) self.delete_volume(volume_1.id) self.delete_volume(volume_2.id)
Add fullstack tests of the resource protectables RESTAPI Change-Id: Ibe660a07311544f82246e82538b565847488040b Closes-Bug: #1578888# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from smaug.tests.fullstack import smaug_base import time class ProtectablesTest(smaug_base.SmaugBaseTest): """Test Protectables operation""" def create_volume(self, size): volume = self.cinder_client.volumes.create(size) time.sleep(5) return volume def delete_volume(self, volume_id): self.cinder_client.volumes.delete(volume_id) time.sleep(15) def test_protectables_list(self): res = self.smaug_client.protectables.list() self.assertTrue(len(res)) def test_protectables_get_with_project(self): protectable_type = 'OS::Keystone::Project' res = self.smaug_client.protectables.get(protectable_type) dependent_types = ['OS::Cinder::Volume', 'OS::Nova::Server'] self.assertEqual(dependent_types, res.dependent_types) def test_protectables_list_instances(self): res_list = self.smaug_client.protectables.list_instances( 'OS::Cinder::Volume') before_num = len(res_list) volume_1 = self.create_volume(1) volume_2 = self.create_volume(1) res = self.smaug_client.protectables.list_instances( 'OS::Cinder::Volume') after_num = len(res) self.assertEqual(2, after_num - before_num) self.delete_volume(volume_1.id) self.delete_volume(volume_2.id)
<commit_before><commit_msg>Add fullstack tests of the resource protectables RESTAPI Change-Id: Ibe660a07311544f82246e82538b565847488040b Closes-Bug: #1578888<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from smaug.tests.fullstack import smaug_base import time class ProtectablesTest(smaug_base.SmaugBaseTest): """Test Protectables operation""" def create_volume(self, size): volume = self.cinder_client.volumes.create(size) time.sleep(5) return volume def delete_volume(self, volume_id): self.cinder_client.volumes.delete(volume_id) time.sleep(15) def test_protectables_list(self): res = self.smaug_client.protectables.list() self.assertTrue(len(res)) def test_protectables_get_with_project(self): protectable_type = 'OS::Keystone::Project' res = self.smaug_client.protectables.get(protectable_type) dependent_types = ['OS::Cinder::Volume', 'OS::Nova::Server'] self.assertEqual(dependent_types, res.dependent_types) def test_protectables_list_instances(self): res_list = self.smaug_client.protectables.list_instances( 'OS::Cinder::Volume') before_num = len(res_list) volume_1 = self.create_volume(1) volume_2 = self.create_volume(1) res = self.smaug_client.protectables.list_instances( 'OS::Cinder::Volume') after_num = len(res) self.assertEqual(2, after_num - before_num) self.delete_volume(volume_1.id) self.delete_volume(volume_2.id)
61d9f394f186b754580c11c40932754072194f72
playlist_updates.py
playlist_updates.py
#! /usr/bin/env python import os import sys import httplib2 from apiclient.discovery import build # pylint: disable=import-error from oauth2client.client import flow_from_clientsecrets from oauth2client.file import Storage from oauth2client.tools import argparser from oauth2client.tools import run_flow # The CLIENT_SECRETS_FILE variable specifies the name of a file that contains # the OAuth 2.0 information for this application, including its client_id and # client_secret. You can acquire an OAuth 2.0 client ID and client secret from # the {{ Google Cloud Console }} at # {{ https://cloud.google.com/console }}. # Please ensure that you have enabled the YouTube Data API for your project. # For more information about using OAuth2 to access the YouTube Data API, see: # https://developers.google.com/youtube/v3/guides/authentication # For more information about the client_secrets.json file format, see: # https://developers.google.com/api-client-library/python/guide/aaa_client_secrets CLIENT_SECRETS_FILE = 'client_secrets.json' # This variable defines a message to display if the CLIENT_SECRETS_FILE is # missing. MISSING_CLIENT_SECRETS_MESSAGE = """ WARNING: Please configure OAuth 2.0 To make this sample run you will need to populate the client_secrets.json file found at: %s with information from the {{ Cloud Console }} {{ https://cloud.google.com/console }} For more information about the client_secrets.json file format, please visit: https://developers.google.com/api-client-library/python/guide/aaa_client_secrets """ % os.path.abspath(os.path.join(os.path.dirname(__file__), CLIENT_SECRETS_FILE)) # This OAuth 2.0 access scope allows for full read/write access to the # authenticated user's account. YOUTUBE_READ_WRITE_SCOPE = 'https://www.googleapis.com/auth/youtube' YOUTUBE_API_SERVICE_NAME = 'youtube' YOUTUBE_API_VERSION = 'v3' def main(): flow = flow_from_clientsecrets( CLIENT_SECRETS_FILE, message=MISSING_CLIENT_SECRETS_MESSAGE, scope=YOUTUBE_READ_WRITE_SCOPE ) storage = Storage('%s-oauth2.json' % sys.argv[0]) credentials = storage.get() if credentials is None or credentials.invalid: flags = argparser.parse_args() credentials = run_flow(flow, storage, flags) youtube = build( YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, http=credentials.authorize(httplib2.Http()) ) playlists = youtube.playlists().list(part='snippet', mine=True).execute() print(len(playlists)) if __name__ == '__main__': exit(main())
Add boilerplate to access youtube api
Add boilerplate to access youtube api
Python
unlicense
ipwnponies/youtube-sort-playlist,ipwnponies/youtube-sort-playlist
Add boilerplate to access youtube api
#! /usr/bin/env python import os import sys import httplib2 from apiclient.discovery import build # pylint: disable=import-error from oauth2client.client import flow_from_clientsecrets from oauth2client.file import Storage from oauth2client.tools import argparser from oauth2client.tools import run_flow # The CLIENT_SECRETS_FILE variable specifies the name of a file that contains # the OAuth 2.0 information for this application, including its client_id and # client_secret. You can acquire an OAuth 2.0 client ID and client secret from # the {{ Google Cloud Console }} at # {{ https://cloud.google.com/console }}. # Please ensure that you have enabled the YouTube Data API for your project. # For more information about using OAuth2 to access the YouTube Data API, see: # https://developers.google.com/youtube/v3/guides/authentication # For more information about the client_secrets.json file format, see: # https://developers.google.com/api-client-library/python/guide/aaa_client_secrets CLIENT_SECRETS_FILE = 'client_secrets.json' # This variable defines a message to display if the CLIENT_SECRETS_FILE is # missing. MISSING_CLIENT_SECRETS_MESSAGE = """ WARNING: Please configure OAuth 2.0 To make this sample run you will need to populate the client_secrets.json file found at: %s with information from the {{ Cloud Console }} {{ https://cloud.google.com/console }} For more information about the client_secrets.json file format, please visit: https://developers.google.com/api-client-library/python/guide/aaa_client_secrets """ % os.path.abspath(os.path.join(os.path.dirname(__file__), CLIENT_SECRETS_FILE)) # This OAuth 2.0 access scope allows for full read/write access to the # authenticated user's account. YOUTUBE_READ_WRITE_SCOPE = 'https://www.googleapis.com/auth/youtube' YOUTUBE_API_SERVICE_NAME = 'youtube' YOUTUBE_API_VERSION = 'v3' def main(): flow = flow_from_clientsecrets( CLIENT_SECRETS_FILE, message=MISSING_CLIENT_SECRETS_MESSAGE, scope=YOUTUBE_READ_WRITE_SCOPE ) storage = Storage('%s-oauth2.json' % sys.argv[0]) credentials = storage.get() if credentials is None or credentials.invalid: flags = argparser.parse_args() credentials = run_flow(flow, storage, flags) youtube = build( YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, http=credentials.authorize(httplib2.Http()) ) playlists = youtube.playlists().list(part='snippet', mine=True).execute() print(len(playlists)) if __name__ == '__main__': exit(main())
<commit_before><commit_msg>Add boilerplate to access youtube api<commit_after>
#! /usr/bin/env python import os import sys import httplib2 from apiclient.discovery import build # pylint: disable=import-error from oauth2client.client import flow_from_clientsecrets from oauth2client.file import Storage from oauth2client.tools import argparser from oauth2client.tools import run_flow # The CLIENT_SECRETS_FILE variable specifies the name of a file that contains # the OAuth 2.0 information for this application, including its client_id and # client_secret. You can acquire an OAuth 2.0 client ID and client secret from # the {{ Google Cloud Console }} at # {{ https://cloud.google.com/console }}. # Please ensure that you have enabled the YouTube Data API for your project. # For more information about using OAuth2 to access the YouTube Data API, see: # https://developers.google.com/youtube/v3/guides/authentication # For more information about the client_secrets.json file format, see: # https://developers.google.com/api-client-library/python/guide/aaa_client_secrets CLIENT_SECRETS_FILE = 'client_secrets.json' # This variable defines a message to display if the CLIENT_SECRETS_FILE is # missing. MISSING_CLIENT_SECRETS_MESSAGE = """ WARNING: Please configure OAuth 2.0 To make this sample run you will need to populate the client_secrets.json file found at: %s with information from the {{ Cloud Console }} {{ https://cloud.google.com/console }} For more information about the client_secrets.json file format, please visit: https://developers.google.com/api-client-library/python/guide/aaa_client_secrets """ % os.path.abspath(os.path.join(os.path.dirname(__file__), CLIENT_SECRETS_FILE)) # This OAuth 2.0 access scope allows for full read/write access to the # authenticated user's account. YOUTUBE_READ_WRITE_SCOPE = 'https://www.googleapis.com/auth/youtube' YOUTUBE_API_SERVICE_NAME = 'youtube' YOUTUBE_API_VERSION = 'v3' def main(): flow = flow_from_clientsecrets( CLIENT_SECRETS_FILE, message=MISSING_CLIENT_SECRETS_MESSAGE, scope=YOUTUBE_READ_WRITE_SCOPE ) storage = Storage('%s-oauth2.json' % sys.argv[0]) credentials = storage.get() if credentials is None or credentials.invalid: flags = argparser.parse_args() credentials = run_flow(flow, storage, flags) youtube = build( YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, http=credentials.authorize(httplib2.Http()) ) playlists = youtube.playlists().list(part='snippet', mine=True).execute() print(len(playlists)) if __name__ == '__main__': exit(main())
Add boilerplate to access youtube api#! /usr/bin/env python import os import sys import httplib2 from apiclient.discovery import build # pylint: disable=import-error from oauth2client.client import flow_from_clientsecrets from oauth2client.file import Storage from oauth2client.tools import argparser from oauth2client.tools import run_flow # The CLIENT_SECRETS_FILE variable specifies the name of a file that contains # the OAuth 2.0 information for this application, including its client_id and # client_secret. You can acquire an OAuth 2.0 client ID and client secret from # the {{ Google Cloud Console }} at # {{ https://cloud.google.com/console }}. # Please ensure that you have enabled the YouTube Data API for your project. # For more information about using OAuth2 to access the YouTube Data API, see: # https://developers.google.com/youtube/v3/guides/authentication # For more information about the client_secrets.json file format, see: # https://developers.google.com/api-client-library/python/guide/aaa_client_secrets CLIENT_SECRETS_FILE = 'client_secrets.json' # This variable defines a message to display if the CLIENT_SECRETS_FILE is # missing. MISSING_CLIENT_SECRETS_MESSAGE = """ WARNING: Please configure OAuth 2.0 To make this sample run you will need to populate the client_secrets.json file found at: %s with information from the {{ Cloud Console }} {{ https://cloud.google.com/console }} For more information about the client_secrets.json file format, please visit: https://developers.google.com/api-client-library/python/guide/aaa_client_secrets """ % os.path.abspath(os.path.join(os.path.dirname(__file__), CLIENT_SECRETS_FILE)) # This OAuth 2.0 access scope allows for full read/write access to the # authenticated user's account. YOUTUBE_READ_WRITE_SCOPE = 'https://www.googleapis.com/auth/youtube' YOUTUBE_API_SERVICE_NAME = 'youtube' YOUTUBE_API_VERSION = 'v3' def main(): flow = flow_from_clientsecrets( CLIENT_SECRETS_FILE, message=MISSING_CLIENT_SECRETS_MESSAGE, scope=YOUTUBE_READ_WRITE_SCOPE ) storage = Storage('%s-oauth2.json' % sys.argv[0]) credentials = storage.get() if credentials is None or credentials.invalid: flags = argparser.parse_args() credentials = run_flow(flow, storage, flags) youtube = build( YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, http=credentials.authorize(httplib2.Http()) ) playlists = youtube.playlists().list(part='snippet', mine=True).execute() print(len(playlists)) if __name__ == '__main__': exit(main())
<commit_before><commit_msg>Add boilerplate to access youtube api<commit_after>#! /usr/bin/env python import os import sys import httplib2 from apiclient.discovery import build # pylint: disable=import-error from oauth2client.client import flow_from_clientsecrets from oauth2client.file import Storage from oauth2client.tools import argparser from oauth2client.tools import run_flow # The CLIENT_SECRETS_FILE variable specifies the name of a file that contains # the OAuth 2.0 information for this application, including its client_id and # client_secret. You can acquire an OAuth 2.0 client ID and client secret from # the {{ Google Cloud Console }} at # {{ https://cloud.google.com/console }}. # Please ensure that you have enabled the YouTube Data API for your project. # For more information about using OAuth2 to access the YouTube Data API, see: # https://developers.google.com/youtube/v3/guides/authentication # For more information about the client_secrets.json file format, see: # https://developers.google.com/api-client-library/python/guide/aaa_client_secrets CLIENT_SECRETS_FILE = 'client_secrets.json' # This variable defines a message to display if the CLIENT_SECRETS_FILE is # missing. MISSING_CLIENT_SECRETS_MESSAGE = """ WARNING: Please configure OAuth 2.0 To make this sample run you will need to populate the client_secrets.json file found at: %s with information from the {{ Cloud Console }} {{ https://cloud.google.com/console }} For more information about the client_secrets.json file format, please visit: https://developers.google.com/api-client-library/python/guide/aaa_client_secrets """ % os.path.abspath(os.path.join(os.path.dirname(__file__), CLIENT_SECRETS_FILE)) # This OAuth 2.0 access scope allows for full read/write access to the # authenticated user's account. YOUTUBE_READ_WRITE_SCOPE = 'https://www.googleapis.com/auth/youtube' YOUTUBE_API_SERVICE_NAME = 'youtube' YOUTUBE_API_VERSION = 'v3' def main(): flow = flow_from_clientsecrets( CLIENT_SECRETS_FILE, message=MISSING_CLIENT_SECRETS_MESSAGE, scope=YOUTUBE_READ_WRITE_SCOPE ) storage = Storage('%s-oauth2.json' % sys.argv[0]) credentials = storage.get() if credentials is None or credentials.invalid: flags = argparser.parse_args() credentials = run_flow(flow, storage, flags) youtube = build( YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, http=credentials.authorize(httplib2.Http()) ) playlists = youtube.playlists().list(part='snippet', mine=True).execute() print(len(playlists)) if __name__ == '__main__': exit(main())
67b1e01382b718bc8facbdb48e0c88f716e5a247
tests/integration/test_smoke.py
tests/integration/test_smoke.py
"""Smoke tests to verify basic communication to all AWS services.""" import botocore.session from nose.tools import assert_equals REGION = 'us-east-1' SMOKE_TESTS = { 'autoscaling': {'DescribeAccountLimits': {}, 'DescribeAdjustmentTypes': {}}, 'cloudformation': {'DescribeStacks': {}, 'ListStacks': {}}, 'cloudfront': {'ListDistributions': {}, 'ListStreamingDistributions': {}}, 'cloudsearch': {'DescribeDomains': {}, 'ListDomainNames': {}}, 'cloudtrail': {'DescribeTrails': {}}, 'cloudwatch': {'ListMetrics': {}}, 'cognito-identity': {'ListIdentityPools': {'maxResults': 1}}, 'cognito-sync': {'ListIdentityPoolUsage': {}}, 'datapipeline': {'ListPipelines': {}}, 'directconnect': {'DescribeConnections': {}}, 'dynamodb': {'ListTables': {}}, 'ec2': {'DescribeRegions': {}, 'DescribeInstances': {}}, 'elasticache': {'DescribeCacheClusters': {}}, 'elasticbeanstalk': {'DescribeApplications': {}}, 'elastictranscoder': {'ListPipelines': {}}, 'elb': {'DescribeLoadBalancers': {}}, 'emr': {'ListClusters': {}}, 'iam': {'ListUsers': {}}, 'importexport': {'ListJobs': {}}, 'kinesis': {'ListStreams': {}}, 'logs': {'DescribeLogGroups': {}}, 'opsworks': {'DescribeStacks': {}}, 'rds': {'DescribeDBInstances': {}}, 'redshift': {'DescribeClusters': {}}, 'route53': {'ListHostedZones': {}}, 's3': {'ListBuckets': {}}, 'sdb': {'ListDomains': {}}, 'ses': {'ListIdentities': {}}, 'sns': {'ListTopics': {}}, 'sqs': {'ListQueues': {}}, 'storagegateway': {'ListGateways': {}}, 'sts': {'GetSessionToken': {}}, # Subscription needed for support API calls. 'support': {}, 'swf': {'ListDomains': {'RegistrationStatus': 'REGISTERED'}}, } def test_can_make_request(): session = botocore.session.get_session() for service_name in SMOKE_TESTS: service = session.get_service(service_name) endpoint = service.get_endpoint(REGION) for operation_name in SMOKE_TESTS[service_name]: kwargs = SMOKE_TESTS[service_name][operation_name] yield _make_call, service, endpoint, operation_name, kwargs def _make_call(service, endpoint, operation_name, kwargs): operation = service.get_operation(operation_name) response, parsed = operation.call(endpoint, **kwargs) assert_equals(response.status_code, 200)
Add integ smoke tests to verify basic service calls
Add integ smoke tests to verify basic service calls Verify we can make a successful servie call to each supported service (except support). The point of these tests to verify nothing's majorly broken. This will make a Describe/List call to a service and verify we get a 200 response back.
Python
apache-2.0
pplu/botocore,boto/botocore
Add integ smoke tests to verify basic service calls Verify we can make a successful servie call to each supported service (except support). The point of these tests to verify nothing's majorly broken. This will make a Describe/List call to a service and verify we get a 200 response back.
"""Smoke tests to verify basic communication to all AWS services.""" import botocore.session from nose.tools import assert_equals REGION = 'us-east-1' SMOKE_TESTS = { 'autoscaling': {'DescribeAccountLimits': {}, 'DescribeAdjustmentTypes': {}}, 'cloudformation': {'DescribeStacks': {}, 'ListStacks': {}}, 'cloudfront': {'ListDistributions': {}, 'ListStreamingDistributions': {}}, 'cloudsearch': {'DescribeDomains': {}, 'ListDomainNames': {}}, 'cloudtrail': {'DescribeTrails': {}}, 'cloudwatch': {'ListMetrics': {}}, 'cognito-identity': {'ListIdentityPools': {'maxResults': 1}}, 'cognito-sync': {'ListIdentityPoolUsage': {}}, 'datapipeline': {'ListPipelines': {}}, 'directconnect': {'DescribeConnections': {}}, 'dynamodb': {'ListTables': {}}, 'ec2': {'DescribeRegions': {}, 'DescribeInstances': {}}, 'elasticache': {'DescribeCacheClusters': {}}, 'elasticbeanstalk': {'DescribeApplications': {}}, 'elastictranscoder': {'ListPipelines': {}}, 'elb': {'DescribeLoadBalancers': {}}, 'emr': {'ListClusters': {}}, 'iam': {'ListUsers': {}}, 'importexport': {'ListJobs': {}}, 'kinesis': {'ListStreams': {}}, 'logs': {'DescribeLogGroups': {}}, 'opsworks': {'DescribeStacks': {}}, 'rds': {'DescribeDBInstances': {}}, 'redshift': {'DescribeClusters': {}}, 'route53': {'ListHostedZones': {}}, 's3': {'ListBuckets': {}}, 'sdb': {'ListDomains': {}}, 'ses': {'ListIdentities': {}}, 'sns': {'ListTopics': {}}, 'sqs': {'ListQueues': {}}, 'storagegateway': {'ListGateways': {}}, 'sts': {'GetSessionToken': {}}, # Subscription needed for support API calls. 'support': {}, 'swf': {'ListDomains': {'RegistrationStatus': 'REGISTERED'}}, } def test_can_make_request(): session = botocore.session.get_session() for service_name in SMOKE_TESTS: service = session.get_service(service_name) endpoint = service.get_endpoint(REGION) for operation_name in SMOKE_TESTS[service_name]: kwargs = SMOKE_TESTS[service_name][operation_name] yield _make_call, service, endpoint, operation_name, kwargs def _make_call(service, endpoint, operation_name, kwargs): operation = service.get_operation(operation_name) response, parsed = operation.call(endpoint, **kwargs) assert_equals(response.status_code, 200)
<commit_before><commit_msg>Add integ smoke tests to verify basic service calls Verify we can make a successful servie call to each supported service (except support). The point of these tests to verify nothing's majorly broken. This will make a Describe/List call to a service and verify we get a 200 response back.<commit_after>
"""Smoke tests to verify basic communication to all AWS services.""" import botocore.session from nose.tools import assert_equals REGION = 'us-east-1' SMOKE_TESTS = { 'autoscaling': {'DescribeAccountLimits': {}, 'DescribeAdjustmentTypes': {}}, 'cloudformation': {'DescribeStacks': {}, 'ListStacks': {}}, 'cloudfront': {'ListDistributions': {}, 'ListStreamingDistributions': {}}, 'cloudsearch': {'DescribeDomains': {}, 'ListDomainNames': {}}, 'cloudtrail': {'DescribeTrails': {}}, 'cloudwatch': {'ListMetrics': {}}, 'cognito-identity': {'ListIdentityPools': {'maxResults': 1}}, 'cognito-sync': {'ListIdentityPoolUsage': {}}, 'datapipeline': {'ListPipelines': {}}, 'directconnect': {'DescribeConnections': {}}, 'dynamodb': {'ListTables': {}}, 'ec2': {'DescribeRegions': {}, 'DescribeInstances': {}}, 'elasticache': {'DescribeCacheClusters': {}}, 'elasticbeanstalk': {'DescribeApplications': {}}, 'elastictranscoder': {'ListPipelines': {}}, 'elb': {'DescribeLoadBalancers': {}}, 'emr': {'ListClusters': {}}, 'iam': {'ListUsers': {}}, 'importexport': {'ListJobs': {}}, 'kinesis': {'ListStreams': {}}, 'logs': {'DescribeLogGroups': {}}, 'opsworks': {'DescribeStacks': {}}, 'rds': {'DescribeDBInstances': {}}, 'redshift': {'DescribeClusters': {}}, 'route53': {'ListHostedZones': {}}, 's3': {'ListBuckets': {}}, 'sdb': {'ListDomains': {}}, 'ses': {'ListIdentities': {}}, 'sns': {'ListTopics': {}}, 'sqs': {'ListQueues': {}}, 'storagegateway': {'ListGateways': {}}, 'sts': {'GetSessionToken': {}}, # Subscription needed for support API calls. 'support': {}, 'swf': {'ListDomains': {'RegistrationStatus': 'REGISTERED'}}, } def test_can_make_request(): session = botocore.session.get_session() for service_name in SMOKE_TESTS: service = session.get_service(service_name) endpoint = service.get_endpoint(REGION) for operation_name in SMOKE_TESTS[service_name]: kwargs = SMOKE_TESTS[service_name][operation_name] yield _make_call, service, endpoint, operation_name, kwargs def _make_call(service, endpoint, operation_name, kwargs): operation = service.get_operation(operation_name) response, parsed = operation.call(endpoint, **kwargs) assert_equals(response.status_code, 200)
Add integ smoke tests to verify basic service calls Verify we can make a successful servie call to each supported service (except support). The point of these tests to verify nothing's majorly broken. This will make a Describe/List call to a service and verify we get a 200 response back."""Smoke tests to verify basic communication to all AWS services.""" import botocore.session from nose.tools import assert_equals REGION = 'us-east-1' SMOKE_TESTS = { 'autoscaling': {'DescribeAccountLimits': {}, 'DescribeAdjustmentTypes': {}}, 'cloudformation': {'DescribeStacks': {}, 'ListStacks': {}}, 'cloudfront': {'ListDistributions': {}, 'ListStreamingDistributions': {}}, 'cloudsearch': {'DescribeDomains': {}, 'ListDomainNames': {}}, 'cloudtrail': {'DescribeTrails': {}}, 'cloudwatch': {'ListMetrics': {}}, 'cognito-identity': {'ListIdentityPools': {'maxResults': 1}}, 'cognito-sync': {'ListIdentityPoolUsage': {}}, 'datapipeline': {'ListPipelines': {}}, 'directconnect': {'DescribeConnections': {}}, 'dynamodb': {'ListTables': {}}, 'ec2': {'DescribeRegions': {}, 'DescribeInstances': {}}, 'elasticache': {'DescribeCacheClusters': {}}, 'elasticbeanstalk': {'DescribeApplications': {}}, 'elastictranscoder': {'ListPipelines': {}}, 'elb': {'DescribeLoadBalancers': {}}, 'emr': {'ListClusters': {}}, 'iam': {'ListUsers': {}}, 'importexport': {'ListJobs': {}}, 'kinesis': {'ListStreams': {}}, 'logs': {'DescribeLogGroups': {}}, 'opsworks': {'DescribeStacks': {}}, 'rds': {'DescribeDBInstances': {}}, 'redshift': {'DescribeClusters': {}}, 'route53': {'ListHostedZones': {}}, 's3': {'ListBuckets': {}}, 'sdb': {'ListDomains': {}}, 'ses': {'ListIdentities': {}}, 'sns': {'ListTopics': {}}, 'sqs': {'ListQueues': {}}, 'storagegateway': {'ListGateways': {}}, 'sts': {'GetSessionToken': {}}, # Subscription needed for support API calls. 'support': {}, 'swf': {'ListDomains': {'RegistrationStatus': 'REGISTERED'}}, } def test_can_make_request(): session = botocore.session.get_session() for service_name in SMOKE_TESTS: service = session.get_service(service_name) endpoint = service.get_endpoint(REGION) for operation_name in SMOKE_TESTS[service_name]: kwargs = SMOKE_TESTS[service_name][operation_name] yield _make_call, service, endpoint, operation_name, kwargs def _make_call(service, endpoint, operation_name, kwargs): operation = service.get_operation(operation_name) response, parsed = operation.call(endpoint, **kwargs) assert_equals(response.status_code, 200)
<commit_before><commit_msg>Add integ smoke tests to verify basic service calls Verify we can make a successful servie call to each supported service (except support). The point of these tests to verify nothing's majorly broken. This will make a Describe/List call to a service and verify we get a 200 response back.<commit_after>"""Smoke tests to verify basic communication to all AWS services.""" import botocore.session from nose.tools import assert_equals REGION = 'us-east-1' SMOKE_TESTS = { 'autoscaling': {'DescribeAccountLimits': {}, 'DescribeAdjustmentTypes': {}}, 'cloudformation': {'DescribeStacks': {}, 'ListStacks': {}}, 'cloudfront': {'ListDistributions': {}, 'ListStreamingDistributions': {}}, 'cloudsearch': {'DescribeDomains': {}, 'ListDomainNames': {}}, 'cloudtrail': {'DescribeTrails': {}}, 'cloudwatch': {'ListMetrics': {}}, 'cognito-identity': {'ListIdentityPools': {'maxResults': 1}}, 'cognito-sync': {'ListIdentityPoolUsage': {}}, 'datapipeline': {'ListPipelines': {}}, 'directconnect': {'DescribeConnections': {}}, 'dynamodb': {'ListTables': {}}, 'ec2': {'DescribeRegions': {}, 'DescribeInstances': {}}, 'elasticache': {'DescribeCacheClusters': {}}, 'elasticbeanstalk': {'DescribeApplications': {}}, 'elastictranscoder': {'ListPipelines': {}}, 'elb': {'DescribeLoadBalancers': {}}, 'emr': {'ListClusters': {}}, 'iam': {'ListUsers': {}}, 'importexport': {'ListJobs': {}}, 'kinesis': {'ListStreams': {}}, 'logs': {'DescribeLogGroups': {}}, 'opsworks': {'DescribeStacks': {}}, 'rds': {'DescribeDBInstances': {}}, 'redshift': {'DescribeClusters': {}}, 'route53': {'ListHostedZones': {}}, 's3': {'ListBuckets': {}}, 'sdb': {'ListDomains': {}}, 'ses': {'ListIdentities': {}}, 'sns': {'ListTopics': {}}, 'sqs': {'ListQueues': {}}, 'storagegateway': {'ListGateways': {}}, 'sts': {'GetSessionToken': {}}, # Subscription needed for support API calls. 'support': {}, 'swf': {'ListDomains': {'RegistrationStatus': 'REGISTERED'}}, } def test_can_make_request(): session = botocore.session.get_session() for service_name in SMOKE_TESTS: service = session.get_service(service_name) endpoint = service.get_endpoint(REGION) for operation_name in SMOKE_TESTS[service_name]: kwargs = SMOKE_TESTS[service_name][operation_name] yield _make_call, service, endpoint, operation_name, kwargs def _make_call(service, endpoint, operation_name, kwargs): operation = service.get_operation(operation_name) response, parsed = operation.call(endpoint, **kwargs) assert_equals(response.status_code, 200)
d5b366c317e8fefaede6ae34cf25a67b5db1df28
core/migrations/0009_auto_20200825_0800.py
core/migrations/0009_auto_20200825_0800.py
# Generated by Django 2.2.14 on 2020-08-25 08:00 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0008_board_private'), ] operations = [ migrations.AlterField( model_name='pin', name='referer', field=models.CharField(blank=True, max_length=512, null=True), ), migrations.AlterField( model_name='pin', name='url', field=models.CharField(blank=True, max_length=512, null=True), ), ]
Add migrations for url and referer length
Feature: Add migrations for url and referer length
Python
bsd-2-clause
pinry/pinry,pinry/pinry,pinry/pinry,pinry/pinry
Feature: Add migrations for url and referer length
# Generated by Django 2.2.14 on 2020-08-25 08:00 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0008_board_private'), ] operations = [ migrations.AlterField( model_name='pin', name='referer', field=models.CharField(blank=True, max_length=512, null=True), ), migrations.AlterField( model_name='pin', name='url', field=models.CharField(blank=True, max_length=512, null=True), ), ]
<commit_before><commit_msg>Feature: Add migrations for url and referer length<commit_after>
# Generated by Django 2.2.14 on 2020-08-25 08:00 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0008_board_private'), ] operations = [ migrations.AlterField( model_name='pin', name='referer', field=models.CharField(blank=True, max_length=512, null=True), ), migrations.AlterField( model_name='pin', name='url', field=models.CharField(blank=True, max_length=512, null=True), ), ]
Feature: Add migrations for url and referer length# Generated by Django 2.2.14 on 2020-08-25 08:00 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0008_board_private'), ] operations = [ migrations.AlterField( model_name='pin', name='referer', field=models.CharField(blank=True, max_length=512, null=True), ), migrations.AlterField( model_name='pin', name='url', field=models.CharField(blank=True, max_length=512, null=True), ), ]
<commit_before><commit_msg>Feature: Add migrations for url and referer length<commit_after># Generated by Django 2.2.14 on 2020-08-25 08:00 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('core', '0008_board_private'), ] operations = [ migrations.AlterField( model_name='pin', name='referer', field=models.CharField(blank=True, max_length=512, null=True), ), migrations.AlterField( model_name='pin', name='url', field=models.CharField(blank=True, max_length=512, null=True), ), ]
d2bedbc07192a703f00a04e1fb262048da890f3d
example/roles/bsd_role.py
example/roles/bsd_role.py
from pyinfra.modules import server, pkg # OpenBSD packages? pkg.packages( ['py-pip', 'git'], sudo=True, op='core_packages' # this and above binds these three operations to run as one ) # add_pkg does not automatically do this server.shell( 'ln -sf /usr/local/bin/pip2.7 /usr/local/bin/pip', sudo=True )
Add role to example deploy.
Add role to example deploy.
Python
mit
Fizzadar/pyinfra,Fizzadar/pyinfra
Add role to example deploy.
from pyinfra.modules import server, pkg # OpenBSD packages? pkg.packages( ['py-pip', 'git'], sudo=True, op='core_packages' # this and above binds these three operations to run as one ) # add_pkg does not automatically do this server.shell( 'ln -sf /usr/local/bin/pip2.7 /usr/local/bin/pip', sudo=True )
<commit_before><commit_msg>Add role to example deploy.<commit_after>
from pyinfra.modules import server, pkg # OpenBSD packages? pkg.packages( ['py-pip', 'git'], sudo=True, op='core_packages' # this and above binds these three operations to run as one ) # add_pkg does not automatically do this server.shell( 'ln -sf /usr/local/bin/pip2.7 /usr/local/bin/pip', sudo=True )
Add role to example deploy.from pyinfra.modules import server, pkg # OpenBSD packages? pkg.packages( ['py-pip', 'git'], sudo=True, op='core_packages' # this and above binds these three operations to run as one ) # add_pkg does not automatically do this server.shell( 'ln -sf /usr/local/bin/pip2.7 /usr/local/bin/pip', sudo=True )
<commit_before><commit_msg>Add role to example deploy.<commit_after>from pyinfra.modules import server, pkg # OpenBSD packages? pkg.packages( ['py-pip', 'git'], sudo=True, op='core_packages' # this and above binds these three operations to run as one ) # add_pkg does not automatically do this server.shell( 'ln -sf /usr/local/bin/pip2.7 /usr/local/bin/pip', sudo=True )
aba72a7172dc0760441bd9a8bb9fef4a2c38c70b
Lib/test/test_pep263.py
Lib/test/test_pep263.py
#! -*- coding: koi8-r -*- assert u"".encode("utf-8") == '\xd0\x9f\xd0\xb8\xd1\x82\xd0\xbe\xd0\xbd'
Test whether a Cyrillic text correctly appears in a Unicode literal.
Test whether a Cyrillic text correctly appears in a Unicode literal.
Python
mit
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
Test whether a Cyrillic text correctly appears in a Unicode literal.
#! -*- coding: koi8-r -*- assert u"".encode("utf-8") == '\xd0\x9f\xd0\xb8\xd1\x82\xd0\xbe\xd0\xbd'
<commit_before><commit_msg>Test whether a Cyrillic text correctly appears in a Unicode literal.<commit_after>
#! -*- coding: koi8-r -*- assert u"".encode("utf-8") == '\xd0\x9f\xd0\xb8\xd1\x82\xd0\xbe\xd0\xbd'
Test whether a Cyrillic text correctly appears in a Unicode literal.#! -*- coding: koi8-r -*- assert u"".encode("utf-8") == '\xd0\x9f\xd0\xb8\xd1\x82\xd0\xbe\xd0\xbd'
<commit_before><commit_msg>Test whether a Cyrillic text correctly appears in a Unicode literal.<commit_after>#! -*- coding: koi8-r -*- assert u"".encode("utf-8") == '\xd0\x9f\xd0\xb8\xd1\x82\xd0\xbe\xd0\xbd'
44c7cb73a309a02e907ab6d4ded5957e62ec39e3
lib/analyzer/pipe_server.py
lib/analyzer/pipe_server.py
'''Wraps a process to make it act as a pipe server. Takes care of supressing console windows under Windows and other housekeeping. ''' from subprocess import Popen from subprocess import PIPE import threading from Dart import PluginLogger from Dart.lib.plat import supress_window from Dart.lib.path import pushd _logger = PluginLogger(__name__) class PipeServer(object): '''Starts as process and communicates with it via pipes. ''' status_lock = threading.RLock() def __init__(self, args): self.proc = None self.args = args @property def is_running(self): '''Returns `True` if the server seems to be responsive. ''' try: with PipeServer.status_lock: return not self.proc.stdin.closed except AttributeError: _logger.debug('PipeServer not started yet') return def start(self, working_dir='.'): with PipeServer.status_lock: if self.is_running: _logger.debug( 'tried to start an already running PipeServer; aborting') return with pushd(working_dir): _logger.debug('starting PipeServer with args: %s', self.args) self.proc = Popen(self.args, stdout=PIPE, stdin=PIPE, stderr=PIPE, startupinfo=supress_window()) def stop(self): _logger.debug('stopping PipeServer...') self.proc.stdin.close() self.proc.stdout.close() self.proc.kill()
Add PipeServer class to manage a pipe server process
WIP: Add PipeServer class to manage a pipe server process
Python
bsd-3-clause
guillermooo-forks/dart-sublime-bundle,guillermooo/dart-sublime-bundle,guillermooo-forks/dart-sublime-bundle,guillermooo/dart-sublime-bundle,guillermooo/dart-sublime-bundle,guillermooo/dart-sublime-bundle,guillermooo-forks/dart-sublime-bundle,guillermooo-forks/dart-sublime-bundle
WIP: Add PipeServer class to manage a pipe server process
'''Wraps a process to make it act as a pipe server. Takes care of supressing console windows under Windows and other housekeeping. ''' from subprocess import Popen from subprocess import PIPE import threading from Dart import PluginLogger from Dart.lib.plat import supress_window from Dart.lib.path import pushd _logger = PluginLogger(__name__) class PipeServer(object): '''Starts as process and communicates with it via pipes. ''' status_lock = threading.RLock() def __init__(self, args): self.proc = None self.args = args @property def is_running(self): '''Returns `True` if the server seems to be responsive. ''' try: with PipeServer.status_lock: return not self.proc.stdin.closed except AttributeError: _logger.debug('PipeServer not started yet') return def start(self, working_dir='.'): with PipeServer.status_lock: if self.is_running: _logger.debug( 'tried to start an already running PipeServer; aborting') return with pushd(working_dir): _logger.debug('starting PipeServer with args: %s', self.args) self.proc = Popen(self.args, stdout=PIPE, stdin=PIPE, stderr=PIPE, startupinfo=supress_window()) def stop(self): _logger.debug('stopping PipeServer...') self.proc.stdin.close() self.proc.stdout.close() self.proc.kill()
<commit_before><commit_msg>WIP: Add PipeServer class to manage a pipe server process<commit_after>
'''Wraps a process to make it act as a pipe server. Takes care of supressing console windows under Windows and other housekeeping. ''' from subprocess import Popen from subprocess import PIPE import threading from Dart import PluginLogger from Dart.lib.plat import supress_window from Dart.lib.path import pushd _logger = PluginLogger(__name__) class PipeServer(object): '''Starts as process and communicates with it via pipes. ''' status_lock = threading.RLock() def __init__(self, args): self.proc = None self.args = args @property def is_running(self): '''Returns `True` if the server seems to be responsive. ''' try: with PipeServer.status_lock: return not self.proc.stdin.closed except AttributeError: _logger.debug('PipeServer not started yet') return def start(self, working_dir='.'): with PipeServer.status_lock: if self.is_running: _logger.debug( 'tried to start an already running PipeServer; aborting') return with pushd(working_dir): _logger.debug('starting PipeServer with args: %s', self.args) self.proc = Popen(self.args, stdout=PIPE, stdin=PIPE, stderr=PIPE, startupinfo=supress_window()) def stop(self): _logger.debug('stopping PipeServer...') self.proc.stdin.close() self.proc.stdout.close() self.proc.kill()
WIP: Add PipeServer class to manage a pipe server process'''Wraps a process to make it act as a pipe server. Takes care of supressing console windows under Windows and other housekeeping. ''' from subprocess import Popen from subprocess import PIPE import threading from Dart import PluginLogger from Dart.lib.plat import supress_window from Dart.lib.path import pushd _logger = PluginLogger(__name__) class PipeServer(object): '''Starts as process and communicates with it via pipes. ''' status_lock = threading.RLock() def __init__(self, args): self.proc = None self.args = args @property def is_running(self): '''Returns `True` if the server seems to be responsive. ''' try: with PipeServer.status_lock: return not self.proc.stdin.closed except AttributeError: _logger.debug('PipeServer not started yet') return def start(self, working_dir='.'): with PipeServer.status_lock: if self.is_running: _logger.debug( 'tried to start an already running PipeServer; aborting') return with pushd(working_dir): _logger.debug('starting PipeServer with args: %s', self.args) self.proc = Popen(self.args, stdout=PIPE, stdin=PIPE, stderr=PIPE, startupinfo=supress_window()) def stop(self): _logger.debug('stopping PipeServer...') self.proc.stdin.close() self.proc.stdout.close() self.proc.kill()
<commit_before><commit_msg>WIP: Add PipeServer class to manage a pipe server process<commit_after>'''Wraps a process to make it act as a pipe server. Takes care of supressing console windows under Windows and other housekeeping. ''' from subprocess import Popen from subprocess import PIPE import threading from Dart import PluginLogger from Dart.lib.plat import supress_window from Dart.lib.path import pushd _logger = PluginLogger(__name__) class PipeServer(object): '''Starts as process and communicates with it via pipes. ''' status_lock = threading.RLock() def __init__(self, args): self.proc = None self.args = args @property def is_running(self): '''Returns `True` if the server seems to be responsive. ''' try: with PipeServer.status_lock: return not self.proc.stdin.closed except AttributeError: _logger.debug('PipeServer not started yet') return def start(self, working_dir='.'): with PipeServer.status_lock: if self.is_running: _logger.debug( 'tried to start an already running PipeServer; aborting') return with pushd(working_dir): _logger.debug('starting PipeServer with args: %s', self.args) self.proc = Popen(self.args, stdout=PIPE, stdin=PIPE, stderr=PIPE, startupinfo=supress_window()) def stop(self): _logger.debug('stopping PipeServer...') self.proc.stdin.close() self.proc.stdout.close() self.proc.kill()
5b7697661c90e27ce36e6eae3b854470862f4191
tensorflow_io/image/python/ops/image_dataset_ops.py
tensorflow_io/image/python/ops/image_dataset_ops.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Image Dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.util import nest from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import load_library from tensorflow.python.platform import resource_loader image_ops = load_library.load_op_library( resource_loader.get_path_to_datafile('_image_ops.so')) class WebPDataset(dataset_ops.DatasetSource): """A WebP Image File Dataset that reads the WebP file.""" def __init__(self, filenames): """Create a `WebPDataset`. filenames: A `tf.string` tensor containing one or more filenames. """ super(WebPDataset, self).__init__() self._filenames = ops.convert_to_tensor( filenames, dtype=dtypes.string, name="filenames") def _as_variant_tensor(self): return image_ops.web_p_dataset( self._filenames) @property def output_classes(self): return ops.Tensor @property def output_shapes(self): return (tensor_shape.TensorShape([None, None, None])) @property def output_types(self): return dtypes.uint8
Add python wrapper for WebP image format support
Add python wrapper for WebP image format support Signed-off-by: Yong Tang <765086fe2e0c1f980161f127fec596800f327f62@outlook.com>
Python
apache-2.0
tensorflow/io,tensorflow/io,tensorflow/io,tensorflow/io,tensorflow/io,tensorflow/io,tensorflow/io
Add python wrapper for WebP image format support Signed-off-by: Yong Tang <765086fe2e0c1f980161f127fec596800f327f62@outlook.com>
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Image Dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.util import nest from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import load_library from tensorflow.python.platform import resource_loader image_ops = load_library.load_op_library( resource_loader.get_path_to_datafile('_image_ops.so')) class WebPDataset(dataset_ops.DatasetSource): """A WebP Image File Dataset that reads the WebP file.""" def __init__(self, filenames): """Create a `WebPDataset`. filenames: A `tf.string` tensor containing one or more filenames. """ super(WebPDataset, self).__init__() self._filenames = ops.convert_to_tensor( filenames, dtype=dtypes.string, name="filenames") def _as_variant_tensor(self): return image_ops.web_p_dataset( self._filenames) @property def output_classes(self): return ops.Tensor @property def output_shapes(self): return (tensor_shape.TensorShape([None, None, None])) @property def output_types(self): return dtypes.uint8
<commit_before><commit_msg>Add python wrapper for WebP image format support Signed-off-by: Yong Tang <765086fe2e0c1f980161f127fec596800f327f62@outlook.com><commit_after>
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Image Dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.util import nest from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import load_library from tensorflow.python.platform import resource_loader image_ops = load_library.load_op_library( resource_loader.get_path_to_datafile('_image_ops.so')) class WebPDataset(dataset_ops.DatasetSource): """A WebP Image File Dataset that reads the WebP file.""" def __init__(self, filenames): """Create a `WebPDataset`. filenames: A `tf.string` tensor containing one or more filenames. """ super(WebPDataset, self).__init__() self._filenames = ops.convert_to_tensor( filenames, dtype=dtypes.string, name="filenames") def _as_variant_tensor(self): return image_ops.web_p_dataset( self._filenames) @property def output_classes(self): return ops.Tensor @property def output_shapes(self): return (tensor_shape.TensorShape([None, None, None])) @property def output_types(self): return dtypes.uint8
Add python wrapper for WebP image format support Signed-off-by: Yong Tang <765086fe2e0c1f980161f127fec596800f327f62@outlook.com># Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Image Dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.util import nest from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import load_library from tensorflow.python.platform import resource_loader image_ops = load_library.load_op_library( resource_loader.get_path_to_datafile('_image_ops.so')) class WebPDataset(dataset_ops.DatasetSource): """A WebP Image File Dataset that reads the WebP file.""" def __init__(self, filenames): """Create a `WebPDataset`. filenames: A `tf.string` tensor containing one or more filenames. """ super(WebPDataset, self).__init__() self._filenames = ops.convert_to_tensor( filenames, dtype=dtypes.string, name="filenames") def _as_variant_tensor(self): return image_ops.web_p_dataset( self._filenames) @property def output_classes(self): return ops.Tensor @property def output_shapes(self): return (tensor_shape.TensorShape([None, None, None])) @property def output_types(self): return dtypes.uint8
<commit_before><commit_msg>Add python wrapper for WebP image format support Signed-off-by: Yong Tang <765086fe2e0c1f980161f127fec596800f327f62@outlook.com><commit_after># Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Image Dataset.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.util import nest from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import load_library from tensorflow.python.platform import resource_loader image_ops = load_library.load_op_library( resource_loader.get_path_to_datafile('_image_ops.so')) class WebPDataset(dataset_ops.DatasetSource): """A WebP Image File Dataset that reads the WebP file.""" def __init__(self, filenames): """Create a `WebPDataset`. filenames: A `tf.string` tensor containing one or more filenames. """ super(WebPDataset, self).__init__() self._filenames = ops.convert_to_tensor( filenames, dtype=dtypes.string, name="filenames") def _as_variant_tensor(self): return image_ops.web_p_dataset( self._filenames) @property def output_classes(self): return ops.Tensor @property def output_shapes(self): return (tensor_shape.TensorShape([None, None, None])) @property def output_types(self): return dtypes.uint8
dadeb85fdbe6a4fed129cbeffbc305b36cef36c4
{{cookiecutter.repo_name}}/tests/test_cli.py
{{cookiecutter.repo_name}}/tests/test_cli.py
import pytest from click.testing import CliRunner from cli import main @pytest.fixture def runner(): return CliRunner() @pytest.fixture(params=['fr', 'en', 'tlh']) def lang(request): return request.param @pytest.fixture(params=['-l', '--language']) def cli_param(request): return request.param @pytest.fixture def mock_app(mocker): return mocker.patch('{{cookiecutter.repo_name}}.{{cookiecutter.app_class_name}}') def test_language_to_app(runner, mock_app, cli_param, lang): result = runner.invoke(main, [cli_param,lang]) assert result.exit_code == 0 mock_app.assert_called_once_with(lang) def test_abort_with_invalid_lang(runner, mock_app): result = runner.invoke(main, ['-l', 'foobar']) assert result.exit_code != 0 assert not mock_app.called
Implement test for the cli
Implement test for the cli
Python
mit
hackebrot/cookiedozer,hackebrot/cookiedozer
Implement test for the cli
import pytest from click.testing import CliRunner from cli import main @pytest.fixture def runner(): return CliRunner() @pytest.fixture(params=['fr', 'en', 'tlh']) def lang(request): return request.param @pytest.fixture(params=['-l', '--language']) def cli_param(request): return request.param @pytest.fixture def mock_app(mocker): return mocker.patch('{{cookiecutter.repo_name}}.{{cookiecutter.app_class_name}}') def test_language_to_app(runner, mock_app, cli_param, lang): result = runner.invoke(main, [cli_param,lang]) assert result.exit_code == 0 mock_app.assert_called_once_with(lang) def test_abort_with_invalid_lang(runner, mock_app): result = runner.invoke(main, ['-l', 'foobar']) assert result.exit_code != 0 assert not mock_app.called
<commit_before><commit_msg>Implement test for the cli<commit_after>
import pytest from click.testing import CliRunner from cli import main @pytest.fixture def runner(): return CliRunner() @pytest.fixture(params=['fr', 'en', 'tlh']) def lang(request): return request.param @pytest.fixture(params=['-l', '--language']) def cli_param(request): return request.param @pytest.fixture def mock_app(mocker): return mocker.patch('{{cookiecutter.repo_name}}.{{cookiecutter.app_class_name}}') def test_language_to_app(runner, mock_app, cli_param, lang): result = runner.invoke(main, [cli_param,lang]) assert result.exit_code == 0 mock_app.assert_called_once_with(lang) def test_abort_with_invalid_lang(runner, mock_app): result = runner.invoke(main, ['-l', 'foobar']) assert result.exit_code != 0 assert not mock_app.called
Implement test for the cliimport pytest from click.testing import CliRunner from cli import main @pytest.fixture def runner(): return CliRunner() @pytest.fixture(params=['fr', 'en', 'tlh']) def lang(request): return request.param @pytest.fixture(params=['-l', '--language']) def cli_param(request): return request.param @pytest.fixture def mock_app(mocker): return mocker.patch('{{cookiecutter.repo_name}}.{{cookiecutter.app_class_name}}') def test_language_to_app(runner, mock_app, cli_param, lang): result = runner.invoke(main, [cli_param,lang]) assert result.exit_code == 0 mock_app.assert_called_once_with(lang) def test_abort_with_invalid_lang(runner, mock_app): result = runner.invoke(main, ['-l', 'foobar']) assert result.exit_code != 0 assert not mock_app.called
<commit_before><commit_msg>Implement test for the cli<commit_after>import pytest from click.testing import CliRunner from cli import main @pytest.fixture def runner(): return CliRunner() @pytest.fixture(params=['fr', 'en', 'tlh']) def lang(request): return request.param @pytest.fixture(params=['-l', '--language']) def cli_param(request): return request.param @pytest.fixture def mock_app(mocker): return mocker.patch('{{cookiecutter.repo_name}}.{{cookiecutter.app_class_name}}') def test_language_to_app(runner, mock_app, cli_param, lang): result = runner.invoke(main, [cli_param,lang]) assert result.exit_code == 0 mock_app.assert_called_once_with(lang) def test_abort_with_invalid_lang(runner, mock_app): result = runner.invoke(main, ['-l', 'foobar']) assert result.exit_code != 0 assert not mock_app.called
6af695bdceaabdd4144def6a11f930e9d02a2d5c
src/assistant_library_with_local_commands_demo.py
src/assistant_library_with_local_commands_demo.py
#!/usr/bin/env python3 # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run a recognizer using the Google Assistant Library. The Google Assistant Library has direct access to the audio API, so this Python code doesn't need to record audio. Hot word detection "OK, Google" is supported. The Google Assistant Library can be installed with: env/bin/pip install google-assistant-library==0.0.2 It is available for Raspberry Pi 2/3 only; Pi Zero is not supported. """ import logging import subprocess import sys import aiy.assistant.auth_helpers import aiy.audio import aiy.voicehat from google.assistant.library import Assistant from google.assistant.library.event import EventType logging.basicConfig( level=logging.INFO, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s" ) def power_off_pi(): aiy.audio.say('Good bye!') subprocess.call('sudo shutdown now', shell=True) def reboot_pi(): aiy.audio.say('See you in a bit!') subprocess.call('sudo reboot', shell=True) def process_event(assistant, event): status_ui = aiy.voicehat.get_status_ui() if event.type == EventType.ON_START_FINISHED: status_ui.status('ready') if sys.stdout.isatty(): print('Say "OK, Google" then speak, or press Ctrl+C to quit...') elif event.type == EventType.ON_CONVERSATION_TURN_STARTED: status_ui.status('listening') elif event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED and event.args: text = event.args['text'] print('You said:', text) if text == 'power off': assistant.stop_conversation() power_off_pi() elif text == 'reboot': assistant.stop_conversation() reboot_pi() elif event.type == EventType.ON_END_OF_UTTERANCE: status_ui.status('thinking') elif event.type == EventType.ON_CONVERSATION_TURN_FINISHED: status_ui.status('ready') elif event.type == EventType.ON_ASSISTANT_ERROR and event.args and event.args['is_fatal']: sys.exit(1) def main(): credentials = aiy.assistant.auth_helpers.get_assistant_credentials() with Assistant(credentials) as assistant: for event in assistant.start(): process_event(assistant, event) if __name__ == '__main__': main()
Add a demo to showcase how to handle local commands with Assistant Library.
Add a demo to showcase how to handle local commands with Assistant Library.
Python
apache-2.0
google/aiyprojects-raspbian,t1m0thyj/aiyprojects-raspbian,google/aiyprojects-raspbian,google/aiyprojects-raspbian,t1m0thyj/aiyprojects-raspbian,google/aiyprojects-raspbian,google/aiyprojects-raspbian
Add a demo to showcase how to handle local commands with Assistant Library.
#!/usr/bin/env python3 # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run a recognizer using the Google Assistant Library. The Google Assistant Library has direct access to the audio API, so this Python code doesn't need to record audio. Hot word detection "OK, Google" is supported. The Google Assistant Library can be installed with: env/bin/pip install google-assistant-library==0.0.2 It is available for Raspberry Pi 2/3 only; Pi Zero is not supported. """ import logging import subprocess import sys import aiy.assistant.auth_helpers import aiy.audio import aiy.voicehat from google.assistant.library import Assistant from google.assistant.library.event import EventType logging.basicConfig( level=logging.INFO, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s" ) def power_off_pi(): aiy.audio.say('Good bye!') subprocess.call('sudo shutdown now', shell=True) def reboot_pi(): aiy.audio.say('See you in a bit!') subprocess.call('sudo reboot', shell=True) def process_event(assistant, event): status_ui = aiy.voicehat.get_status_ui() if event.type == EventType.ON_START_FINISHED: status_ui.status('ready') if sys.stdout.isatty(): print('Say "OK, Google" then speak, or press Ctrl+C to quit...') elif event.type == EventType.ON_CONVERSATION_TURN_STARTED: status_ui.status('listening') elif event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED and event.args: text = event.args['text'] print('You said:', text) if text == 'power off': assistant.stop_conversation() power_off_pi() elif text == 'reboot': assistant.stop_conversation() reboot_pi() elif event.type == EventType.ON_END_OF_UTTERANCE: status_ui.status('thinking') elif event.type == EventType.ON_CONVERSATION_TURN_FINISHED: status_ui.status('ready') elif event.type == EventType.ON_ASSISTANT_ERROR and event.args and event.args['is_fatal']: sys.exit(1) def main(): credentials = aiy.assistant.auth_helpers.get_assistant_credentials() with Assistant(credentials) as assistant: for event in assistant.start(): process_event(assistant, event) if __name__ == '__main__': main()
<commit_before><commit_msg>Add a demo to showcase how to handle local commands with Assistant Library.<commit_after>
#!/usr/bin/env python3 # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run a recognizer using the Google Assistant Library. The Google Assistant Library has direct access to the audio API, so this Python code doesn't need to record audio. Hot word detection "OK, Google" is supported. The Google Assistant Library can be installed with: env/bin/pip install google-assistant-library==0.0.2 It is available for Raspberry Pi 2/3 only; Pi Zero is not supported. """ import logging import subprocess import sys import aiy.assistant.auth_helpers import aiy.audio import aiy.voicehat from google.assistant.library import Assistant from google.assistant.library.event import EventType logging.basicConfig( level=logging.INFO, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s" ) def power_off_pi(): aiy.audio.say('Good bye!') subprocess.call('sudo shutdown now', shell=True) def reboot_pi(): aiy.audio.say('See you in a bit!') subprocess.call('sudo reboot', shell=True) def process_event(assistant, event): status_ui = aiy.voicehat.get_status_ui() if event.type == EventType.ON_START_FINISHED: status_ui.status('ready') if sys.stdout.isatty(): print('Say "OK, Google" then speak, or press Ctrl+C to quit...') elif event.type == EventType.ON_CONVERSATION_TURN_STARTED: status_ui.status('listening') elif event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED and event.args: text = event.args['text'] print('You said:', text) if text == 'power off': assistant.stop_conversation() power_off_pi() elif text == 'reboot': assistant.stop_conversation() reboot_pi() elif event.type == EventType.ON_END_OF_UTTERANCE: status_ui.status('thinking') elif event.type == EventType.ON_CONVERSATION_TURN_FINISHED: status_ui.status('ready') elif event.type == EventType.ON_ASSISTANT_ERROR and event.args and event.args['is_fatal']: sys.exit(1) def main(): credentials = aiy.assistant.auth_helpers.get_assistant_credentials() with Assistant(credentials) as assistant: for event in assistant.start(): process_event(assistant, event) if __name__ == '__main__': main()
Add a demo to showcase how to handle local commands with Assistant Library.#!/usr/bin/env python3 # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run a recognizer using the Google Assistant Library. The Google Assistant Library has direct access to the audio API, so this Python code doesn't need to record audio. Hot word detection "OK, Google" is supported. The Google Assistant Library can be installed with: env/bin/pip install google-assistant-library==0.0.2 It is available for Raspberry Pi 2/3 only; Pi Zero is not supported. """ import logging import subprocess import sys import aiy.assistant.auth_helpers import aiy.audio import aiy.voicehat from google.assistant.library import Assistant from google.assistant.library.event import EventType logging.basicConfig( level=logging.INFO, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s" ) def power_off_pi(): aiy.audio.say('Good bye!') subprocess.call('sudo shutdown now', shell=True) def reboot_pi(): aiy.audio.say('See you in a bit!') subprocess.call('sudo reboot', shell=True) def process_event(assistant, event): status_ui = aiy.voicehat.get_status_ui() if event.type == EventType.ON_START_FINISHED: status_ui.status('ready') if sys.stdout.isatty(): print('Say "OK, Google" then speak, or press Ctrl+C to quit...') elif event.type == EventType.ON_CONVERSATION_TURN_STARTED: status_ui.status('listening') elif event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED and event.args: text = event.args['text'] print('You said:', text) if text == 'power off': assistant.stop_conversation() power_off_pi() elif text == 'reboot': assistant.stop_conversation() reboot_pi() elif event.type == EventType.ON_END_OF_UTTERANCE: status_ui.status('thinking') elif event.type == EventType.ON_CONVERSATION_TURN_FINISHED: status_ui.status('ready') elif event.type == EventType.ON_ASSISTANT_ERROR and event.args and event.args['is_fatal']: sys.exit(1) def main(): credentials = aiy.assistant.auth_helpers.get_assistant_credentials() with Assistant(credentials) as assistant: for event in assistant.start(): process_event(assistant, event) if __name__ == '__main__': main()
<commit_before><commit_msg>Add a demo to showcase how to handle local commands with Assistant Library.<commit_after>#!/usr/bin/env python3 # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run a recognizer using the Google Assistant Library. The Google Assistant Library has direct access to the audio API, so this Python code doesn't need to record audio. Hot word detection "OK, Google" is supported. The Google Assistant Library can be installed with: env/bin/pip install google-assistant-library==0.0.2 It is available for Raspberry Pi 2/3 only; Pi Zero is not supported. """ import logging import subprocess import sys import aiy.assistant.auth_helpers import aiy.audio import aiy.voicehat from google.assistant.library import Assistant from google.assistant.library.event import EventType logging.basicConfig( level=logging.INFO, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s" ) def power_off_pi(): aiy.audio.say('Good bye!') subprocess.call('sudo shutdown now', shell=True) def reboot_pi(): aiy.audio.say('See you in a bit!') subprocess.call('sudo reboot', shell=True) def process_event(assistant, event): status_ui = aiy.voicehat.get_status_ui() if event.type == EventType.ON_START_FINISHED: status_ui.status('ready') if sys.stdout.isatty(): print('Say "OK, Google" then speak, or press Ctrl+C to quit...') elif event.type == EventType.ON_CONVERSATION_TURN_STARTED: status_ui.status('listening') elif event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED and event.args: text = event.args['text'] print('You said:', text) if text == 'power off': assistant.stop_conversation() power_off_pi() elif text == 'reboot': assistant.stop_conversation() reboot_pi() elif event.type == EventType.ON_END_OF_UTTERANCE: status_ui.status('thinking') elif event.type == EventType.ON_CONVERSATION_TURN_FINISHED: status_ui.status('ready') elif event.type == EventType.ON_ASSISTANT_ERROR and event.args and event.args['is_fatal']: sys.exit(1) def main(): credentials = aiy.assistant.auth_helpers.get_assistant_credentials() with Assistant(credentials) as assistant: for event in assistant.start(): process_event(assistant, event) if __name__ == '__main__': main()
a81d8aef2462bce852f52b551eb067659973978f
rethinkdb/setup-rethinkdb.py
rethinkdb/setup-rethinkdb.py
#!/usr/bin/env python import rethinkdb as r conn = r.connect() r.db_create('muzhack').run(conn) r.db('muzhack').table_create('users').run(conn) r.db('muzhack').table_create('projects').run(conn)
Add script for setting up rethinkdb
Add script for setting up rethinkdb
Python
mit
muzhack/muzhack,muzhack/musitechhub,muzhack/musitechhub,muzhack/musitechhub,muzhack/muzhack,muzhack/muzhack,muzhack/muzhack,muzhack/musitechhub
Add script for setting up rethinkdb
#!/usr/bin/env python import rethinkdb as r conn = r.connect() r.db_create('muzhack').run(conn) r.db('muzhack').table_create('users').run(conn) r.db('muzhack').table_create('projects').run(conn)
<commit_before><commit_msg>Add script for setting up rethinkdb<commit_after>
#!/usr/bin/env python import rethinkdb as r conn = r.connect() r.db_create('muzhack').run(conn) r.db('muzhack').table_create('users').run(conn) r.db('muzhack').table_create('projects').run(conn)
Add script for setting up rethinkdb#!/usr/bin/env python import rethinkdb as r conn = r.connect() r.db_create('muzhack').run(conn) r.db('muzhack').table_create('users').run(conn) r.db('muzhack').table_create('projects').run(conn)
<commit_before><commit_msg>Add script for setting up rethinkdb<commit_after>#!/usr/bin/env python import rethinkdb as r conn = r.connect() r.db_create('muzhack').run(conn) r.db('muzhack').table_create('users').run(conn) r.db('muzhack').table_create('projects').run(conn)
153983a933cd708cffbfde84a69930b33f747ca1
zephyr/management/commands/set_unread_to_pointer.py
zephyr/management/commands/set_unread_to_pointer.py
from optparse import make_option import logging from django.core.management.base import BaseCommand from zephyr.lib.actions import do_deactivate, user_sessions from zephyr.lib import utils from zephyr.models import UserMessage, UserProfile from django.db import transaction, models class Command(BaseCommand): help = "Updates a user's read messages up to her current pointer location" option_list = BaseCommand.option_list + ( make_option('-f', '--for-real', dest='for_real', action='store_true', default=False, help="Actually change message flags. Default is a dry run."), make_option('-a', '--all', dest='all_users', action='store_true', default=False, help="Updates flags for all users at once."), make_option('-r', '--realm', dest='one_realm', action='store_true', default=False, help="Updates flags for all users in one realm at once."), ) def handle(self, *args, **options): if not args and not options["all_users"] and not options["one_realm"]: print "Please specify an e-mail address and/or --realm or --all" exit(1) if options["all_users"]: users = UserProfile.objects.all() elif options["one_realm"]: if not args: print "Please specify which realm to process." exit(1) users = UserProfile.objects.filter(realm__domain=args[0]) else: users = [UserProfile.objects.get(user__email__iexact=args[0])] for user_profile in users: pointer = user_profile.pointer msgs = UserMessage.objects.filter(user_profile=user_profile, flags=~UserMessage.flags.read, message__id__lte=pointer) if not options["for_real"]: for msg in msgs: print "Adding read flag to msg: %s - %s/%s (own msg: %s)" \ % (user_profile.user.email, msg.message.id, msg.id, msg.message.sender.user.email == user_profile.user.email) else: def do_update(batch): with transaction.commit_on_success(): UserMessage.objects.filter(id__in=batch).update(flags=models.F('flags') | UserMessage.flags.read) mids = [m.id for m in msgs] utils.run_in_batches(mids, 250, do_update, 3, logging.info) if not options["for_real"]: print "Dry run completed. Run with --for-real to change message flags." exit(1) print "User messages updated."
Add a manage.py command to add read flags up to the current pointer
Add a manage.py command to add read flags up to the current pointer This is required because our migration is going to go in two phases. When we do the database migration (on pushing to master), we update all messages at that point. But prod doesn't know about the new flags field, so any new messages sent on prod will not have the read bit set. When we push to prod, we want to re-run the bit of the migration script that automatically sets read flags on messages older than the users's pointer. (imported from commit 961d33e972eac9ada80089bf1b1269c7fb42d56b)
Python
apache-2.0
zorojean/zulip,developerfm/zulip,ikasumiwt/zulip,yocome/zulip,codeKonami/zulip,ipernet/zulip,PaulPetring/zulip,brockwhittaker/zulip,christi3k/zulip,bastianh/zulip,johnny9/zulip,ufosky-server/zulip,vabs22/zulip,Drooids/zulip,stamhe/zulip,Juanvulcano/zulip,RobotCaleb/zulip,TigorC/zulip,joshisa/zulip,Frouk/zulip,peiwei/zulip,udxxabp/zulip,schatt/zulip,jphilipsen05/zulip,huangkebo/zulip,wavelets/zulip,hafeez3000/zulip,technicalpickles/zulip,m1ssou/zulip,wavelets/zulip,thomasboyt/zulip,dxq-git/zulip,jrowan/zulip,paxapy/zulip,ericzhou2008/zulip,EasonYi/zulip,dnmfarrell/zulip,wdaher/zulip,lfranchi/zulip,bluesea/zulip,dawran6/zulip,atomic-labs/zulip,lfranchi/zulip,gigawhitlocks/zulip,MayB/zulip,ashwinirudrappa/zulip,SmartPeople/zulip,shaunstanislaus/zulip,vakila/zulip,adnanh/zulip,KJin99/zulip,brockwhittaker/zulip,arpitpanwar/zulip,christi3k/zulip,bluesea/zulip,willingc/zulip,voidException/zulip,zacps/zulip,arpitpanwar/zulip,fw1121/zulip,LeeRisk/zulip,LeeRisk/zulip,eastlhu/zulip,sonali0901/zulip,ryanbackman/zulip,armooo/zulip,deer-hope/zulip,willingc/zulip,kokoar/zulip,Drooids/zulip,hj3938/zulip,tbutter/zulip,karamcnair/zulip,hayderimran7/zulip,MayB/zulip,souravbadami/zulip,grave-w-grave/zulip,gkotian/zulip,saitodisse/zulip,firstblade/zulip,mansilladev/zulip,hengqujushi/zulip,Drooids/zulip,nicholasbs/zulip,cosmicAsymmetry/zulip,jackrzhang/zulip,EasonYi/zulip,adnanh/zulip,mahim97/zulip,reyha/zulip,xuxiao/zulip,brainwane/zulip,j831/zulip,zorojean/zulip,souravbadami/zulip,Suninus/zulip,LAndreas/zulip,praveenaki/zulip,huangkebo/zulip,joyhchen/zulip,praveenaki/zulip,shrikrishnaholla/zulip,RobotCaleb/zulip,technicalpickles/zulip,Galexrt/zulip,verma-varsha/zulip,timabbott/zulip,karamcnair/zulip,bssrdf/zulip,calvinleenyc/zulip,armooo/zulip,niftynei/zulip,dwrpayne/zulip,ahmadassaf/zulip,bssrdf/zulip,saitodisse/zulip,huangkebo/zulip,johnny9/zulip,cosmicAsymmetry/zulip,dhcrzf/zulip,bluesea/zulip,dnmfarrell/zulip,dhcrzf/zulip,adnanh/zulip,joyhchen/zulip,ipernet/zulip,bitemyapp/zulip,amyliu345/zulip,swinghu/zulip,Jianchun1/zulip,joyhchen/zulip,adnanh/zulip,krtkmj/zulip,synicalsyntax/zulip,itnihao/zulip,Suninus/zulip,MayB/zulip,zwily/zulip,tiansiyuan/zulip,natanovia/zulip,jainayush975/zulip,dattatreya303/zulip,jainayush975/zulip,seapasulli/zulip,johnny9/zulip,peiwei/zulip,thomasboyt/zulip,qq1012803704/zulip,wweiradio/zulip,shubhamdhama/zulip,kou/zulip,amallia/zulip,LeeRisk/zulip,brockwhittaker/zulip,zachallaun/zulip,gigawhitlocks/zulip,samatdav/zulip,atomic-labs/zulip,yuvipanda/zulip,JPJPJPOPOP/zulip,amallia/zulip,reyha/zulip,sharmaeklavya2/zulip,Galexrt/zulip,xuanhan863/zulip,KJin99/zulip,avastu/zulip,jackrzhang/zulip,paxapy/zulip,tommyip/zulip,calvinleenyc/zulip,synicalsyntax/zulip,yuvipanda/zulip,aakash-cr7/zulip,showell/zulip,eastlhu/zulip,voidException/zulip,KJin99/zulip,babbage/zulip,timabbott/zulip,Juanvulcano/zulip,xuanhan863/zulip,PaulPetring/zulip,Suninus/zulip,krtkmj/zulip,Gabriel0402/zulip,babbage/zulip,yocome/zulip,JanzTam/zulip,hafeez3000/zulip,timabbott/zulip,easyfmxu/zulip,umkay/zulip,technicalpickles/zulip,zorojean/zulip,bssrdf/zulip,tbutter/zulip,j831/zulip,esander91/zulip,peguin40/zulip,Gabriel0402/zulip,schatt/zulip,aps-sids/zulip,samatdav/zulip,zorojean/zulip,zwily/zulip,adnanh/zulip,Frouk/zulip,zachallaun/zulip,PhilSk/zulip,levixie/zulip,ApsOps/zulip,jonesgithub/zulip,jessedhillon/zulip,jessedhillon/zulip,zulip/zulip,nicholasbs/zulip,AZtheAsian/zulip,seapasulli/zulip,esander91/zulip,akuseru/zulip,amyliu345/zulip,dawran6/zulip,aakash-cr7/zulip,seapasulli/zulip,Drooids/zulip,moria/zulip,udxxabp/zulip,qq1012803704/zulip,bastianh/zulip,blaze225/zulip,ericzhou2008/zulip,brainwane/zulip,littledogboy/zulip,Galexrt/zulip,bluesea/zulip,dxq-git/zulip,gigawhitlocks/zulip,noroot/zulip,xuanhan863/zulip,Galexrt/zulip,jonesgithub/zulip,noroot/zulip,stamhe/zulip,ApsOps/zulip,TigorC/zulip,praveenaki/zulip,isht3/zulip,JPJPJPOPOP/zulip,Batterfii/zulip,moria/zulip,samatdav/zulip,arpith/zulip,lfranchi/zulip,wangdeshui/zulip,kaiyuanheshang/zulip,SmartPeople/zulip,mdavid/zulip,hj3938/zulip,PhilSk/zulip,swinghu/zulip,gkotian/zulip,shubhamdhama/zulip,Diptanshu8/zulip,brainwane/zulip,ryansnowboarder/zulip,showell/zulip,atomic-labs/zulip,reyha/zulip,pradiptad/zulip,niftynei/zulip,codeKonami/zulip,wdaher/zulip,rishig/zulip,alliejones/zulip,hayderimran7/zulip,krtkmj/zulip,kokoar/zulip,yuvipanda/zulip,dhcrzf/zulip,vikas-parashar/zulip,kokoar/zulip,rht/zulip,jphilipsen05/zulip,vabs22/zulip,shaunstanislaus/zulip,arpitpanwar/zulip,brockwhittaker/zulip,hj3938/zulip,shubhamdhama/zulip,Jianchun1/zulip,atomic-labs/zulip,tbutter/zulip,JanzTam/zulip,eastlhu/zulip,themass/zulip,voidException/zulip,hengqujushi/zulip,LAndreas/zulip,fw1121/zulip,armooo/zulip,ryanbackman/zulip,nicholasbs/zulip,Diptanshu8/zulip,dnmfarrell/zulip,johnny9/zulip,johnnygaddarr/zulip,Frouk/zulip,Cheppers/zulip,yuvipanda/zulip,mansilladev/zulip,jonesgithub/zulip,isht3/zulip,rht/zulip,developerfm/zulip,mdavid/zulip,jessedhillon/zulip,reyha/zulip,amanharitsh123/zulip,punchagan/zulip,j831/zulip,karamcnair/zulip,MariaFaBella85/zulip,wavelets/zulip,littledogboy/zulip,KJin99/zulip,themass/zulip,jessedhillon/zulip,luyifan/zulip,codeKonami/zulip,proliming/zulip,amyliu345/zulip,bluesea/zulip,aps-sids/zulip,ikasumiwt/zulip,hayderimran7/zulip,paxapy/zulip,KingxBanana/zulip,TigorC/zulip,Gabriel0402/zulip,zhaoweigg/zulip,bssrdf/zulip,joshisa/zulip,PaulPetring/zulip,amanharitsh123/zulip,huangkebo/zulip,hustlzp/zulip,dnmfarrell/zulip,hackerkid/zulip,tbutter/zulip,fw1121/zulip,praveenaki/zulip,johnnygaddarr/zulip,punchagan/zulip,kokoar/zulip,samatdav/zulip,shubhamdhama/zulip,voidException/zulip,brainwane/zulip,hj3938/zulip,hengqujushi/zulip,hackerkid/zulip,vabs22/zulip,RobotCaleb/zulip,jimmy54/zulip,yuvipanda/zulip,udxxabp/zulip,lfranchi/zulip,suxinde2009/zulip,alliejones/zulip,dawran6/zulip,arpith/zulip,LeeRisk/zulip,qq1012803704/zulip,ufosky-server/zulip,shrikrishnaholla/zulip,arpith/zulip,Diptanshu8/zulip,xuanhan863/zulip,JanzTam/zulip,jeffcao/zulip,tdr130/zulip,dotcool/zulip,atomic-labs/zulip,christi3k/zulip,vikas-parashar/zulip,armooo/zulip,j831/zulip,zacps/zulip,deer-hope/zulip,zofuthan/zulip,dnmfarrell/zulip,jrowan/zulip,peguin40/zulip,zorojean/zulip,zhaoweigg/zulip,zhaoweigg/zulip,willingc/zulip,sup95/zulip,grave-w-grave/zulip,dattatreya303/zulip,showell/zulip,Jianchun1/zulip,he15his/zulip,zachallaun/zulip,kaiyuanheshang/zulip,proliming/zulip,moria/zulip,jimmy54/zulip,xuxiao/zulip,zulip/zulip,kokoar/zulip,jessedhillon/zulip,RobotCaleb/zulip,dxq-git/zulip,glovebx/zulip,luyifan/zulip,MayB/zulip,zwily/zulip,ryansnowboarder/zulip,umkay/zulip,bastianh/zulip,zofuthan/zulip,punchagan/zulip,JPJPJPOPOP/zulip,wweiradio/zulip,pradiptad/zulip,pradiptad/zulip,luyifan/zulip,ryanbackman/zulip,Vallher/zulip,sharmaeklavya2/zulip,sharmaeklavya2/zulip,bowlofstew/zulip,jerryge/zulip,jessedhillon/zulip,bowlofstew/zulip,avastu/zulip,jphilipsen05/zulip,shrikrishnaholla/zulip,krtkmj/zulip,mansilladev/zulip,ipernet/zulip,timabbott/zulip,sup95/zulip,sharmaeklavya2/zulip,dattatreya303/zulip,zacps/zulip,dotcool/zulip,developerfm/zulip,zorojean/zulip,Juanvulcano/zulip,praveenaki/zulip,tommyip/zulip,hayderimran7/zulip,qq1012803704/zulip,moria/zulip,Jianchun1/zulip,PhilSk/zulip,Drooids/zulip,blaze225/zulip,mahim97/zulip,moria/zulip,arpith/zulip,peguin40/zulip,vaidap/zulip,ericzhou2008/zulip,ApsOps/zulip,jerryge/zulip,glovebx/zulip,wweiradio/zulip,amanharitsh123/zulip,hackerkid/zulip,isht3/zulip,zacps/zulip,natanovia/zulip,gkotian/zulip,Galexrt/zulip,jainayush975/zulip,bastianh/zulip,Frouk/zulip,voidException/zulip,vakila/zulip,alliejones/zulip,schatt/zulip,eeshangarg/zulip,so0k/zulip,ApsOps/zulip,DazWorrall/zulip,vikas-parashar/zulip,moria/zulip,LeeRisk/zulip,zwily/zulip,brainwane/zulip,vaidap/zulip,johnny9/zulip,alliejones/zulip,levixie/zulip,eastlhu/zulip,rht/zulip,Batterfii/zulip,huangkebo/zulip,ikasumiwt/zulip,eeshangarg/zulip,firstblade/zulip,AZtheAsian/zulip,kokoar/zulip,susansls/zulip,Vallher/zulip,vakila/zulip,aliceriot/zulip,j831/zulip,andersk/zulip,gkotian/zulip,itnihao/zulip,kou/zulip,hj3938/zulip,jeffcao/zulip,christi3k/zulip,saitodisse/zulip,aakash-cr7/zulip,jackrzhang/zulip,avastu/zulip,blaze225/zulip,deer-hope/zulip,blaze225/zulip,themass/zulip,Cheppers/zulip,karamcnair/zulip,Jianchun1/zulip,dhcrzf/zulip,easyfmxu/zulip,seapasulli/zulip,xuanhan863/zulip,ryansnowboarder/zulip,schatt/zulip,bluesea/zulip,noroot/zulip,kaiyuanheshang/zulip,guiquanz/zulip,fw1121/zulip,bowlofstew/zulip,easyfmxu/zulip,vikas-parashar/zulip,guiquanz/zulip,Vallher/zulip,shaunstanislaus/zulip,brockwhittaker/zulip,avastu/zulip,jeffcao/zulip,noroot/zulip,grave-w-grave/zulip,zacps/zulip,ahmadassaf/zulip,sonali0901/zulip,tbutter/zulip,DazWorrall/zulip,so0k/zulip,xuxiao/zulip,Qgap/zulip,Drooids/zulip,udxxabp/zulip,Frouk/zulip,levixie/zulip,grave-w-grave/zulip,vabs22/zulip,tiansiyuan/zulip,jphilipsen05/zulip,dwrpayne/zulip,souravbadami/zulip,m1ssou/zulip,natanovia/zulip,paxapy/zulip,schatt/zulip,SmartPeople/zulip,tiansiyuan/zulip,m1ssou/zulip,easyfmxu/zulip,gkotian/zulip,rishig/zulip,kou/zulip,shaunstanislaus/zulip,jeffcao/zulip,cosmicAsymmetry/zulip,jonesgithub/zulip,bitemyapp/zulip,Batterfii/zulip,MariaFaBella85/zulip,so0k/zulip,AZtheAsian/zulip,avastu/zulip,amanharitsh123/zulip,niftynei/zulip,KJin99/zulip,ikasumiwt/zulip,kaiyuanheshang/zulip,rishig/zulip,hj3938/zulip,peguin40/zulip,sup95/zulip,dawran6/zulip,moria/zulip,arpitpanwar/zulip,hackerkid/zulip,PaulPetring/zulip,synicalsyntax/zulip,ryanbackman/zulip,isht3/zulip,lfranchi/zulip,hackerkid/zulip,dwrpayne/zulip,suxinde2009/zulip,cosmicAsymmetry/zulip,mansilladev/zulip,krtkmj/zulip,Cheppers/zulip,amyliu345/zulip,christi3k/zulip,dxq-git/zulip,RobotCaleb/zulip,brainwane/zulip,wangdeshui/zulip,peiwei/zulip,jessedhillon/zulip,itnihao/zulip,m1ssou/zulip,umkay/zulip,ericzhou2008/zulip,aliceriot/zulip,levixie/zulip,mdavid/zulip,bssrdf/zulip,avastu/zulip,jackrzhang/zulip,ufosky-server/zulip,peiwei/zulip,codeKonami/zulip,bitemyapp/zulip,Suninus/zulip,andersk/zulip,esander91/zulip,jeffcao/zulip,deer-hope/zulip,KingxBanana/zulip,kaiyuanheshang/zulip,xuxiao/zulip,xuxiao/zulip,akuseru/zulip,deer-hope/zulip,jrowan/zulip,itnihao/zulip,jeffcao/zulip,calvinleenyc/zulip,esander91/zulip,vikas-parashar/zulip,ryanbackman/zulip,bowlofstew/zulip,zhaoweigg/zulip,tdr130/zulip,pradiptad/zulip,eastlhu/zulip,kaiyuanheshang/zulip,Qgap/zulip,gigawhitlocks/zulip,joshisa/zulip,ipernet/zulip,swinghu/zulip,hafeez3000/zulip,guiquanz/zulip,aliceriot/zulip,lfranchi/zulip,aps-sids/zulip,babbage/zulip,dhcrzf/zulip,susansls/zulip,dwrpayne/zulip,johnnygaddarr/zulip,easyfmxu/zulip,zofuthan/zulip,aakash-cr7/zulip,jackrzhang/zulip,wdaher/zulip,so0k/zulip,jackrzhang/zulip,hafeez3000/zulip,calvinleenyc/zulip,nicholasbs/zulip,jerryge/zulip,babbage/zulip,wavelets/zulip,vakila/zulip,andersk/zulip,tiansiyuan/zulip,seapasulli/zulip,seapasulli/zulip,vakila/zulip,JPJPJPOPOP/zulip,shrikrishnaholla/zulip,thomasboyt/zulip,rht/zulip,gigawhitlocks/zulip,Qgap/zulip,praveenaki/zulip,avastu/zulip,swinghu/zulip,wdaher/zulip,MariaFaBella85/zulip,eeshangarg/zulip,RobotCaleb/zulip,MariaFaBella85/zulip,DazWorrall/zulip,mahim97/zulip,pradiptad/zulip,amyliu345/zulip,wweiradio/zulip,armooo/zulip,firstblade/zulip,sup95/zulip,MariaFaBella85/zulip,umkay/zulip,yuvipanda/zulip,ashwinirudrappa/zulip,hj3938/zulip,babbage/zulip,Batterfii/zulip,Galexrt/zulip,eastlhu/zulip,ikasumiwt/zulip,themass/zulip,Frouk/zulip,mohsenSy/zulip,thomasboyt/zulip,dhcrzf/zulip,rishig/zulip,aliceriot/zulip,ahmadassaf/zulip,jrowan/zulip,hayderimran7/zulip,willingc/zulip,qq1012803704/zulip,udxxabp/zulip,esander91/zulip,hustlzp/zulip,niftynei/zulip,hustlzp/zulip,showell/zulip,proliming/zulip,rht/zulip,Diptanshu8/zulip,KingxBanana/zulip,gigawhitlocks/zulip,shubhamdhama/zulip,praveenaki/zulip,amallia/zulip,aps-sids/zulip,nicholasbs/zulip,yuvipanda/zulip,lfranchi/zulip,yocome/zulip,nicholasbs/zulip,verma-varsha/zulip,tbutter/zulip,ahmadassaf/zulip,verma-varsha/zulip,punchagan/zulip,Juanvulcano/zulip,alliejones/zulip,pradiptad/zulip,Cheppers/zulip,johnnygaddarr/zulip,wangdeshui/zulip,joshisa/zulip,tiansiyuan/zulip,umkay/zulip,ApsOps/zulip,stamhe/zulip,jimmy54/zulip,codeKonami/zulip,peiwei/zulip,krtkmj/zulip,glovebx/zulip,tommyip/zulip,jphilipsen05/zulip,zwily/zulip,dxq-git/zulip,dotcool/zulip,jonesgithub/zulip,Gabriel0402/zulip,saitodisse/zulip,wangdeshui/zulip,littledogboy/zulip,aakash-cr7/zulip,yocome/zulip,mansilladev/zulip,showell/zulip,m1ssou/zulip,ufosky-server/zulip,stamhe/zulip,sonali0901/zulip,amallia/zulip,tdr130/zulip,KJin99/zulip,mohsenSy/zulip,zofuthan/zulip,ashwinirudrappa/zulip,hengqujushi/zulip,aliceriot/zulip,tdr130/zulip,jimmy54/zulip,ipernet/zulip,shaunstanislaus/zulip,fw1121/zulip,mohsenSy/zulip,JPJPJPOPOP/zulip,wavelets/zulip,firstblade/zulip,kou/zulip,arpith/zulip,KingxBanana/zulip,aps-sids/zulip,timabbott/zulip,zulip/zulip,synicalsyntax/zulip,guiquanz/zulip,JanzTam/zulip,Cheppers/zulip,akuseru/zulip,technicalpickles/zulip,zofuthan/zulip,zachallaun/zulip,KingxBanana/zulip,technicalpickles/zulip,punchagan/zulip,jimmy54/zulip,udxxabp/zulip,amallia/zulip,ipernet/zulip,peiwei/zulip,jeffcao/zulip,glovebx/zulip,LAndreas/zulip,stamhe/zulip,PhilSk/zulip,synicalsyntax/zulip,adnanh/zulip,rht/zulip,jerryge/zulip,dwrpayne/zulip,joshisa/zulip,akuseru/zulip,jimmy54/zulip,shrikrishnaholla/zulip,bitemyapp/zulip,PhilSk/zulip,natanovia/zulip,so0k/zulip,mdavid/zulip,eeshangarg/zulip,littledogboy/zulip,ahmadassaf/zulip,tommyip/zulip,zachallaun/zulip,reyha/zulip,suxinde2009/zulip,joyhchen/zulip,swinghu/zulip,jimmy54/zulip,SmartPeople/zulip,LAndreas/zulip,timabbott/zulip,yocome/zulip,KingxBanana/zulip,sharmaeklavya2/zulip,akuseru/zulip,zorojean/zulip,fw1121/zulip,yocome/zulip,andersk/zulip,developerfm/zulip,EasonYi/zulip,aliceriot/zulip,proliming/zulip,karamcnair/zulip,souravbadami/zulip,willingc/zulip,eeshangarg/zulip,suxinde2009/zulip,LeeRisk/zulip,wweiradio/zulip,codeKonami/zulip,glovebx/zulip,ryansnowboarder/zulip,armooo/zulip,he15his/zulip,xuxiao/zulip,levixie/zulip,hustlzp/zulip,amanharitsh123/zulip,Diptanshu8/zulip,ashwinirudrappa/zulip,swinghu/zulip,atomic-labs/zulip,samatdav/zulip,showell/zulip,EasonYi/zulip,thomasboyt/zulip,Qgap/zulip,rishig/zulip,Qgap/zulip,m1ssou/zulip,joshisa/zulip,guiquanz/zulip,aps-sids/zulip,punchagan/zulip,bastianh/zulip,ryansnowboarder/zulip,hayderimran7/zulip,levixie/zulip,joshisa/zulip,peguin40/zulip,ufosky-server/zulip,easyfmxu/zulip,natanovia/zulip,mohsenSy/zulip,ashwinirudrappa/zulip,stamhe/zulip,littledogboy/zulip,paxapy/zulip,mdavid/zulip,umkay/zulip,levixie/zulip,ericzhou2008/zulip,mahim97/zulip,udxxabp/zulip,sonali0901/zulip,bssrdf/zulip,shrikrishnaholla/zulip,ryanbackman/zulip,bowlofstew/zulip,susansls/zulip,vaidap/zulip,souravbadami/zulip,hackerkid/zulip,amanharitsh123/zulip,saitodisse/zulip,andersk/zulip,aps-sids/zulip,zwily/zulip,Cheppers/zulip,johnny9/zulip,ericzhou2008/zulip,shubhamdhama/zulip,suxinde2009/zulip,zhaoweigg/zulip,calvinleenyc/zulip,suxinde2009/zulip,he15his/zulip,jrowan/zulip,TigorC/zulip,LAndreas/zulip,willingc/zulip,noroot/zulip,gkotian/zulip,PaulPetring/zulip,Batterfii/zulip,hackerkid/zulip,hafeez3000/zulip,showell/zulip,krtkmj/zulip,tommyip/zulip,fw1121/zulip,EasonYi/zulip,grave-w-grave/zulip,wweiradio/zulip,dawran6/zulip,cosmicAsymmetry/zulip,JPJPJPOPOP/zulip,ApsOps/zulip,dotcool/zulip,Batterfii/zulip,Suninus/zulip,thomasboyt/zulip,dxq-git/zulip,voidException/zulip,RobotCaleb/zulip,johnnygaddarr/zulip,akuseru/zulip,JanzTam/zulip,LAndreas/zulip,EasonYi/zulip,so0k/zulip,Qgap/zulip,vakila/zulip,Diptanshu8/zulip,MariaFaBella85/zulip,alliejones/zulip,aakash-cr7/zulip,dxq-git/zulip,eeshangarg/zulip,littledogboy/zulip,firstblade/zulip,SmartPeople/zulip,isht3/zulip,dwrpayne/zulip,zulip/zulip,Juanvulcano/zulip,wavelets/zulip,DazWorrall/zulip,johnnygaddarr/zulip,firstblade/zulip,proliming/zulip,vaidap/zulip,seapasulli/zulip,vabs22/zulip,mdavid/zulip,PaulPetring/zulip,wangdeshui/zulip,EasonYi/zulip,peiwei/zulip,dotcool/zulip,bowlofstew/zulip,blaze225/zulip,jerryge/zulip,natanovia/zulip,hengqujushi/zulip,Vallher/zulip,guiquanz/zulip,ryansnowboarder/zulip,tommyip/zulip,saitodisse/zulip,souravbadami/zulip,yocome/zulip,brockwhittaker/zulip,tbutter/zulip,andersk/zulip,nicholasbs/zulip,schatt/zulip,luyifan/zulip,KJin99/zulip,vakila/zulip,bitemyapp/zulip,Drooids/zulip,Gabriel0402/zulip,rishig/zulip,mohsenSy/zulip,wdaher/zulip,MariaFaBella85/zulip,wdaher/zulip,pradiptad/zulip,johnny9/zulip,verma-varsha/zulip,Batterfii/zulip,sup95/zulip,themass/zulip,mohsenSy/zulip,hayderimran7/zulip,wweiradio/zulip,ahmadassaf/zulip,eeshangarg/zulip,AZtheAsian/zulip,glovebx/zulip,samatdav/zulip,ikasumiwt/zulip,dattatreya303/zulip,karamcnair/zulip,easyfmxu/zulip,synicalsyntax/zulip,natanovia/zulip,mahim97/zulip,dattatreya303/zulip,jrowan/zulip,SmartPeople/zulip,ikasumiwt/zulip,johnnygaddarr/zulip,DazWorrall/zulip,vabs22/zulip,qq1012803704/zulip,dawran6/zulip,zulip/zulip,voidException/zulip,rht/zulip,zulip/zulip,ipernet/zulip,bowlofstew/zulip,hustlzp/zulip,amyliu345/zulip,stamhe/zulip,sonali0901/zulip,ufosky-server/zulip,kou/zulip,bitemyapp/zulip,bastianh/zulip,jainayush975/zulip,verma-varsha/zulip,dotcool/zulip,shaunstanislaus/zulip,proliming/zulip,luyifan/zulip,hustlzp/zulip,willingc/zulip,wavelets/zulip,MayB/zulip,DazWorrall/zulip,technicalpickles/zulip,bitemyapp/zulip,alliejones/zulip,DazWorrall/zulip,vikas-parashar/zulip,esander91/zulip,PhilSk/zulip,peguin40/zulip,Suninus/zulip,AZtheAsian/zulip,adnanh/zulip,developerfm/zulip,christi3k/zulip,hustlzp/zulip,zhaoweigg/zulip,so0k/zulip,Gabriel0402/zulip,wangdeshui/zulip,wdaher/zulip,gkotian/zulip,itnihao/zulip,cosmicAsymmetry/zulip,verma-varsha/zulip,aliceriot/zulip,LeeRisk/zulip,suxinde2009/zulip,dotcool/zulip,ahmadassaf/zulip,tiansiyuan/zulip,TigorC/zulip,zhaoweigg/zulip,zofuthan/zulip,synicalsyntax/zulip,Cheppers/zulip,deer-hope/zulip,calvinleenyc/zulip,andersk/zulip,blaze225/zulip,ashwinirudrappa/zulip,dattatreya303/zulip,shaunstanislaus/zulip,thomasboyt/zulip,mansilladev/zulip,shrikrishnaholla/zulip,xuxiao/zulip,zofuthan/zulip,ApsOps/zulip,vaidap/zulip,jainayush975/zulip,luyifan/zulip,wangdeshui/zulip,susansls/zulip,gigawhitlocks/zulip,eastlhu/zulip,mdavid/zulip,j831/zulip,firstblade/zulip,codeKonami/zulip,vaidap/zulip,zachallaun/zulip,ufosky-server/zulip,Suninus/zulip,Gabriel0402/zulip,susansls/zulip,niftynei/zulip,umkay/zulip,tdr130/zulip,bastianh/zulip,LAndreas/zulip,jerryge/zulip,itnihao/zulip,atomic-labs/zulip,JanzTam/zulip,Juanvulcano/zulip,kou/zulip,Qgap/zulip,mansilladev/zulip,mahim97/zulip,ashwinirudrappa/zulip,MayB/zulip,Vallher/zulip,guiquanz/zulip,tiansiyuan/zulip,amallia/zulip,arpitpanwar/zulip,rishig/zulip,PaulPetring/zulip,kokoar/zulip,hafeez3000/zulip,Jianchun1/zulip,dnmfarrell/zulip,ryansnowboarder/zulip,isht3/zulip,Galexrt/zulip,reyha/zulip,sup95/zulip,amallia/zulip,brainwane/zulip,xuanhan863/zulip,swinghu/zulip,huangkebo/zulip,akuseru/zulip,Vallher/zulip,shubhamdhama/zulip,punchagan/zulip,hafeez3000/zulip,zwily/zulip,zacps/zulip,ericzhou2008/zulip,jackrzhang/zulip,jonesgithub/zulip,dhcrzf/zulip,sharmaeklavya2/zulip,deer-hope/zulip,luyifan/zulip,kaiyuanheshang/zulip,themass/zulip,zulip/zulip,niftynei/zulip,susansls/zulip,noroot/zulip,zachallaun/zulip,littledogboy/zulip,itnihao/zulip,grave-w-grave/zulip,JanzTam/zulip,noroot/zulip,babbage/zulip,jerryge/zulip,huangkebo/zulip,MayB/zulip,bssrdf/zulip,dwrpayne/zulip,bluesea/zulip,proliming/zulip,kou/zulip,tommyip/zulip,arpitpanwar/zulip,babbage/zulip,he15his/zulip,he15his/zulip,timabbott/zulip,schatt/zulip,glovebx/zulip,TigorC/zulip,he15his/zulip,developerfm/zulip,jphilipsen05/zulip,AZtheAsian/zulip,he15his/zulip,jainayush975/zulip,dnmfarrell/zulip,Vallher/zulip,sonali0901/zulip,xuanhan863/zulip,arpitpanwar/zulip,themass/zulip,m1ssou/zulip,arpith/zulip,Frouk/zulip,esander91/zulip,joyhchen/zulip,tdr130/zulip,technicalpickles/zulip,tdr130/zulip,paxapy/zulip,karamcnair/zulip,hengqujushi/zulip,joyhchen/zulip,developerfm/zulip,armooo/zulip,qq1012803704/zulip,saitodisse/zulip,jonesgithub/zulip,hengqujushi/zulip
Add a manage.py command to add read flags up to the current pointer This is required because our migration is going to go in two phases. When we do the database migration (on pushing to master), we update all messages at that point. But prod doesn't know about the new flags field, so any new messages sent on prod will not have the read bit set. When we push to prod, we want to re-run the bit of the migration script that automatically sets read flags on messages older than the users's pointer. (imported from commit 961d33e972eac9ada80089bf1b1269c7fb42d56b)
from optparse import make_option import logging from django.core.management.base import BaseCommand from zephyr.lib.actions import do_deactivate, user_sessions from zephyr.lib import utils from zephyr.models import UserMessage, UserProfile from django.db import transaction, models class Command(BaseCommand): help = "Updates a user's read messages up to her current pointer location" option_list = BaseCommand.option_list + ( make_option('-f', '--for-real', dest='for_real', action='store_true', default=False, help="Actually change message flags. Default is a dry run."), make_option('-a', '--all', dest='all_users', action='store_true', default=False, help="Updates flags for all users at once."), make_option('-r', '--realm', dest='one_realm', action='store_true', default=False, help="Updates flags for all users in one realm at once."), ) def handle(self, *args, **options): if not args and not options["all_users"] and not options["one_realm"]: print "Please specify an e-mail address and/or --realm or --all" exit(1) if options["all_users"]: users = UserProfile.objects.all() elif options["one_realm"]: if not args: print "Please specify which realm to process." exit(1) users = UserProfile.objects.filter(realm__domain=args[0]) else: users = [UserProfile.objects.get(user__email__iexact=args[0])] for user_profile in users: pointer = user_profile.pointer msgs = UserMessage.objects.filter(user_profile=user_profile, flags=~UserMessage.flags.read, message__id__lte=pointer) if not options["for_real"]: for msg in msgs: print "Adding read flag to msg: %s - %s/%s (own msg: %s)" \ % (user_profile.user.email, msg.message.id, msg.id, msg.message.sender.user.email == user_profile.user.email) else: def do_update(batch): with transaction.commit_on_success(): UserMessage.objects.filter(id__in=batch).update(flags=models.F('flags') | UserMessage.flags.read) mids = [m.id for m in msgs] utils.run_in_batches(mids, 250, do_update, 3, logging.info) if not options["for_real"]: print "Dry run completed. Run with --for-real to change message flags." exit(1) print "User messages updated."
<commit_before><commit_msg>Add a manage.py command to add read flags up to the current pointer This is required because our migration is going to go in two phases. When we do the database migration (on pushing to master), we update all messages at that point. But prod doesn't know about the new flags field, so any new messages sent on prod will not have the read bit set. When we push to prod, we want to re-run the bit of the migration script that automatically sets read flags on messages older than the users's pointer. (imported from commit 961d33e972eac9ada80089bf1b1269c7fb42d56b)<commit_after>
from optparse import make_option import logging from django.core.management.base import BaseCommand from zephyr.lib.actions import do_deactivate, user_sessions from zephyr.lib import utils from zephyr.models import UserMessage, UserProfile from django.db import transaction, models class Command(BaseCommand): help = "Updates a user's read messages up to her current pointer location" option_list = BaseCommand.option_list + ( make_option('-f', '--for-real', dest='for_real', action='store_true', default=False, help="Actually change message flags. Default is a dry run."), make_option('-a', '--all', dest='all_users', action='store_true', default=False, help="Updates flags for all users at once."), make_option('-r', '--realm', dest='one_realm', action='store_true', default=False, help="Updates flags for all users in one realm at once."), ) def handle(self, *args, **options): if not args and not options["all_users"] and not options["one_realm"]: print "Please specify an e-mail address and/or --realm or --all" exit(1) if options["all_users"]: users = UserProfile.objects.all() elif options["one_realm"]: if not args: print "Please specify which realm to process." exit(1) users = UserProfile.objects.filter(realm__domain=args[0]) else: users = [UserProfile.objects.get(user__email__iexact=args[0])] for user_profile in users: pointer = user_profile.pointer msgs = UserMessage.objects.filter(user_profile=user_profile, flags=~UserMessage.flags.read, message__id__lte=pointer) if not options["for_real"]: for msg in msgs: print "Adding read flag to msg: %s - %s/%s (own msg: %s)" \ % (user_profile.user.email, msg.message.id, msg.id, msg.message.sender.user.email == user_profile.user.email) else: def do_update(batch): with transaction.commit_on_success(): UserMessage.objects.filter(id__in=batch).update(flags=models.F('flags') | UserMessage.flags.read) mids = [m.id for m in msgs] utils.run_in_batches(mids, 250, do_update, 3, logging.info) if not options["for_real"]: print "Dry run completed. Run with --for-real to change message flags." exit(1) print "User messages updated."
Add a manage.py command to add read flags up to the current pointer This is required because our migration is going to go in two phases. When we do the database migration (on pushing to master), we update all messages at that point. But prod doesn't know about the new flags field, so any new messages sent on prod will not have the read bit set. When we push to prod, we want to re-run the bit of the migration script that automatically sets read flags on messages older than the users's pointer. (imported from commit 961d33e972eac9ada80089bf1b1269c7fb42d56b)from optparse import make_option import logging from django.core.management.base import BaseCommand from zephyr.lib.actions import do_deactivate, user_sessions from zephyr.lib import utils from zephyr.models import UserMessage, UserProfile from django.db import transaction, models class Command(BaseCommand): help = "Updates a user's read messages up to her current pointer location" option_list = BaseCommand.option_list + ( make_option('-f', '--for-real', dest='for_real', action='store_true', default=False, help="Actually change message flags. Default is a dry run."), make_option('-a', '--all', dest='all_users', action='store_true', default=False, help="Updates flags for all users at once."), make_option('-r', '--realm', dest='one_realm', action='store_true', default=False, help="Updates flags for all users in one realm at once."), ) def handle(self, *args, **options): if not args and not options["all_users"] and not options["one_realm"]: print "Please specify an e-mail address and/or --realm or --all" exit(1) if options["all_users"]: users = UserProfile.objects.all() elif options["one_realm"]: if not args: print "Please specify which realm to process." exit(1) users = UserProfile.objects.filter(realm__domain=args[0]) else: users = [UserProfile.objects.get(user__email__iexact=args[0])] for user_profile in users: pointer = user_profile.pointer msgs = UserMessage.objects.filter(user_profile=user_profile, flags=~UserMessage.flags.read, message__id__lte=pointer) if not options["for_real"]: for msg in msgs: print "Adding read flag to msg: %s - %s/%s (own msg: %s)" \ % (user_profile.user.email, msg.message.id, msg.id, msg.message.sender.user.email == user_profile.user.email) else: def do_update(batch): with transaction.commit_on_success(): UserMessage.objects.filter(id__in=batch).update(flags=models.F('flags') | UserMessage.flags.read) mids = [m.id for m in msgs] utils.run_in_batches(mids, 250, do_update, 3, logging.info) if not options["for_real"]: print "Dry run completed. Run with --for-real to change message flags." exit(1) print "User messages updated."
<commit_before><commit_msg>Add a manage.py command to add read flags up to the current pointer This is required because our migration is going to go in two phases. When we do the database migration (on pushing to master), we update all messages at that point. But prod doesn't know about the new flags field, so any new messages sent on prod will not have the read bit set. When we push to prod, we want to re-run the bit of the migration script that automatically sets read flags on messages older than the users's pointer. (imported from commit 961d33e972eac9ada80089bf1b1269c7fb42d56b)<commit_after>from optparse import make_option import logging from django.core.management.base import BaseCommand from zephyr.lib.actions import do_deactivate, user_sessions from zephyr.lib import utils from zephyr.models import UserMessage, UserProfile from django.db import transaction, models class Command(BaseCommand): help = "Updates a user's read messages up to her current pointer location" option_list = BaseCommand.option_list + ( make_option('-f', '--for-real', dest='for_real', action='store_true', default=False, help="Actually change message flags. Default is a dry run."), make_option('-a', '--all', dest='all_users', action='store_true', default=False, help="Updates flags for all users at once."), make_option('-r', '--realm', dest='one_realm', action='store_true', default=False, help="Updates flags for all users in one realm at once."), ) def handle(self, *args, **options): if not args and not options["all_users"] and not options["one_realm"]: print "Please specify an e-mail address and/or --realm or --all" exit(1) if options["all_users"]: users = UserProfile.objects.all() elif options["one_realm"]: if not args: print "Please specify which realm to process." exit(1) users = UserProfile.objects.filter(realm__domain=args[0]) else: users = [UserProfile.objects.get(user__email__iexact=args[0])] for user_profile in users: pointer = user_profile.pointer msgs = UserMessage.objects.filter(user_profile=user_profile, flags=~UserMessage.flags.read, message__id__lte=pointer) if not options["for_real"]: for msg in msgs: print "Adding read flag to msg: %s - %s/%s (own msg: %s)" \ % (user_profile.user.email, msg.message.id, msg.id, msg.message.sender.user.email == user_profile.user.email) else: def do_update(batch): with transaction.commit_on_success(): UserMessage.objects.filter(id__in=batch).update(flags=models.F('flags') | UserMessage.flags.read) mids = [m.id for m in msgs] utils.run_in_batches(mids, 250, do_update, 3, logging.info) if not options["for_real"]: print "Dry run completed. Run with --for-real to change message flags." exit(1) print "User messages updated."
fa470267fff5107e1c4e258e66c677291972c558
scripts/build_sbt_image.py
scripts/build_sbt_image.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- """ Build a Docker image for one of our sbt applications. Usage: build_sbt_image.py --project=<PROJECT> [--version=<VERSION>] [--env=<BUILD_ENV>] build_sbt_image.py -h | --help Options: -h --help Show this screen. --project=<PROJECT> Name of the sbt project (e.g. api, transformer) --version=<VERSION> Version to use in the release ID --env=<BUILD_ENV> Build environment (dev, prod, etc.) """ import subprocess import docker import docopt from tooling import write_release_id, CURRENT_COMMIT, ROOT DEFAULT_VERSION = '0.0.1' DEFAULT_BUILD_ENV = 'dev' if __name__ == '__main__': args = docopt.docopt(__doc__) # Read arguments from docopt project = args['--project'] version = args['--version'] or DEFAULT_VERSION build_env = args['--env'] or DEFAULT_BUILD_ENV print(f'*** Building sbt Docker image for {project}') # Construct the release ID and the tag release_id = f'{version}-{CURRENT_COMMIT}_{build_env}' tag = f'{project}:{release_id}' print(f'*** Image will be tagged {tag}') print(f'*** Building the Scala binaries') subprocess.check_call(['sbt', f'project {project}', 'stage']) print('*** Building the new Docker image') client = docker.from_env() client.images.build(path=ROOT, buildargs={'project': project}, tag=tag) print('*** Saving the release ID to .releases') write_release_id(project=project, release_id=release_id)
Add a script for building the sbt images
Add a script for building the sbt images
Python
mit
wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api
Add a script for building the sbt images
#!/usr/bin/env python # -*- encoding: utf-8 -*- """ Build a Docker image for one of our sbt applications. Usage: build_sbt_image.py --project=<PROJECT> [--version=<VERSION>] [--env=<BUILD_ENV>] build_sbt_image.py -h | --help Options: -h --help Show this screen. --project=<PROJECT> Name of the sbt project (e.g. api, transformer) --version=<VERSION> Version to use in the release ID --env=<BUILD_ENV> Build environment (dev, prod, etc.) """ import subprocess import docker import docopt from tooling import write_release_id, CURRENT_COMMIT, ROOT DEFAULT_VERSION = '0.0.1' DEFAULT_BUILD_ENV = 'dev' if __name__ == '__main__': args = docopt.docopt(__doc__) # Read arguments from docopt project = args['--project'] version = args['--version'] or DEFAULT_VERSION build_env = args['--env'] or DEFAULT_BUILD_ENV print(f'*** Building sbt Docker image for {project}') # Construct the release ID and the tag release_id = f'{version}-{CURRENT_COMMIT}_{build_env}' tag = f'{project}:{release_id}' print(f'*** Image will be tagged {tag}') print(f'*** Building the Scala binaries') subprocess.check_call(['sbt', f'project {project}', 'stage']) print('*** Building the new Docker image') client = docker.from_env() client.images.build(path=ROOT, buildargs={'project': project}, tag=tag) print('*** Saving the release ID to .releases') write_release_id(project=project, release_id=release_id)
<commit_before><commit_msg>Add a script for building the sbt images<commit_after>
#!/usr/bin/env python # -*- encoding: utf-8 -*- """ Build a Docker image for one of our sbt applications. Usage: build_sbt_image.py --project=<PROJECT> [--version=<VERSION>] [--env=<BUILD_ENV>] build_sbt_image.py -h | --help Options: -h --help Show this screen. --project=<PROJECT> Name of the sbt project (e.g. api, transformer) --version=<VERSION> Version to use in the release ID --env=<BUILD_ENV> Build environment (dev, prod, etc.) """ import subprocess import docker import docopt from tooling import write_release_id, CURRENT_COMMIT, ROOT DEFAULT_VERSION = '0.0.1' DEFAULT_BUILD_ENV = 'dev' if __name__ == '__main__': args = docopt.docopt(__doc__) # Read arguments from docopt project = args['--project'] version = args['--version'] or DEFAULT_VERSION build_env = args['--env'] or DEFAULT_BUILD_ENV print(f'*** Building sbt Docker image for {project}') # Construct the release ID and the tag release_id = f'{version}-{CURRENT_COMMIT}_{build_env}' tag = f'{project}:{release_id}' print(f'*** Image will be tagged {tag}') print(f'*** Building the Scala binaries') subprocess.check_call(['sbt', f'project {project}', 'stage']) print('*** Building the new Docker image') client = docker.from_env() client.images.build(path=ROOT, buildargs={'project': project}, tag=tag) print('*** Saving the release ID to .releases') write_release_id(project=project, release_id=release_id)
Add a script for building the sbt images#!/usr/bin/env python # -*- encoding: utf-8 -*- """ Build a Docker image for one of our sbt applications. Usage: build_sbt_image.py --project=<PROJECT> [--version=<VERSION>] [--env=<BUILD_ENV>] build_sbt_image.py -h | --help Options: -h --help Show this screen. --project=<PROJECT> Name of the sbt project (e.g. api, transformer) --version=<VERSION> Version to use in the release ID --env=<BUILD_ENV> Build environment (dev, prod, etc.) """ import subprocess import docker import docopt from tooling import write_release_id, CURRENT_COMMIT, ROOT DEFAULT_VERSION = '0.0.1' DEFAULT_BUILD_ENV = 'dev' if __name__ == '__main__': args = docopt.docopt(__doc__) # Read arguments from docopt project = args['--project'] version = args['--version'] or DEFAULT_VERSION build_env = args['--env'] or DEFAULT_BUILD_ENV print(f'*** Building sbt Docker image for {project}') # Construct the release ID and the tag release_id = f'{version}-{CURRENT_COMMIT}_{build_env}' tag = f'{project}:{release_id}' print(f'*** Image will be tagged {tag}') print(f'*** Building the Scala binaries') subprocess.check_call(['sbt', f'project {project}', 'stage']) print('*** Building the new Docker image') client = docker.from_env() client.images.build(path=ROOT, buildargs={'project': project}, tag=tag) print('*** Saving the release ID to .releases') write_release_id(project=project, release_id=release_id)
<commit_before><commit_msg>Add a script for building the sbt images<commit_after>#!/usr/bin/env python # -*- encoding: utf-8 -*- """ Build a Docker image for one of our sbt applications. Usage: build_sbt_image.py --project=<PROJECT> [--version=<VERSION>] [--env=<BUILD_ENV>] build_sbt_image.py -h | --help Options: -h --help Show this screen. --project=<PROJECT> Name of the sbt project (e.g. api, transformer) --version=<VERSION> Version to use in the release ID --env=<BUILD_ENV> Build environment (dev, prod, etc.) """ import subprocess import docker import docopt from tooling import write_release_id, CURRENT_COMMIT, ROOT DEFAULT_VERSION = '0.0.1' DEFAULT_BUILD_ENV = 'dev' if __name__ == '__main__': args = docopt.docopt(__doc__) # Read arguments from docopt project = args['--project'] version = args['--version'] or DEFAULT_VERSION build_env = args['--env'] or DEFAULT_BUILD_ENV print(f'*** Building sbt Docker image for {project}') # Construct the release ID and the tag release_id = f'{version}-{CURRENT_COMMIT}_{build_env}' tag = f'{project}:{release_id}' print(f'*** Image will be tagged {tag}') print(f'*** Building the Scala binaries') subprocess.check_call(['sbt', f'project {project}', 'stage']) print('*** Building the new Docker image') client = docker.from_env() client.images.build(path=ROOT, buildargs={'project': project}, tag=tag) print('*** Saving the release ID to .releases') write_release_id(project=project, release_id=release_id)
828521e71b2afd93f53b13222fbdfaf9e855d442
scripts/comment_scraper.py
scripts/comment_scraper.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import praw # Connect to Reddit # ---------------------------- user_agent = "Quick comment thread scraper by /u/mediaarts" r = praw.Reddit(user_agent = user_agent) # Get comment thread and populate dict # ---------------------------- submission_id = "1p1j6c" submission = r.get_submission(submission_id = submission_id, comment_sort = 'top') comments = submission.comments flat_comments = praw.helpers.flatten_tree(comments) print("flat_comments length: {}".format(len(flat_comments))) print("flat_comments class: {}".format(type(flat_comments))) print("first comment class: {}".format(type(flat_comments[0]))) print("last comment class: {}".format(type(flat_comments[len(flat_comments) - 1]))) print("first comment attrs: {}".format(dir(flat_comments[0]))) print("first comment score: {}".format(flat_comments[0].score)) print("first comment author: {}".format(flat_comments[0].author)) fname = submission_id + '.txt' with open(fname, 'w') as f: for comment in flat_comments: if isinstance(comment, praw.objects.Comment): f.write("\n\n") if comment.is_root: f.write("---\n\n") else: f.write("Child comment \n") f.write("Author: " + str(comment.author) + "\n") f.write("Score: " + str(comment.score) + "\n") f.write("Comment: \n\n" + comment.body.encode('utf-8'))
Add scratch Reddit comment scraper
Add scratch Reddit comment scraper
Python
mit
PsyBorgs/redditanalyser,PsyBorgs/redditanalyser
Add scratch Reddit comment scraper
#!/usr/bin/env python # -*- coding: utf-8 -*- import praw # Connect to Reddit # ---------------------------- user_agent = "Quick comment thread scraper by /u/mediaarts" r = praw.Reddit(user_agent = user_agent) # Get comment thread and populate dict # ---------------------------- submission_id = "1p1j6c" submission = r.get_submission(submission_id = submission_id, comment_sort = 'top') comments = submission.comments flat_comments = praw.helpers.flatten_tree(comments) print("flat_comments length: {}".format(len(flat_comments))) print("flat_comments class: {}".format(type(flat_comments))) print("first comment class: {}".format(type(flat_comments[0]))) print("last comment class: {}".format(type(flat_comments[len(flat_comments) - 1]))) print("first comment attrs: {}".format(dir(flat_comments[0]))) print("first comment score: {}".format(flat_comments[0].score)) print("first comment author: {}".format(flat_comments[0].author)) fname = submission_id + '.txt' with open(fname, 'w') as f: for comment in flat_comments: if isinstance(comment, praw.objects.Comment): f.write("\n\n") if comment.is_root: f.write("---\n\n") else: f.write("Child comment \n") f.write("Author: " + str(comment.author) + "\n") f.write("Score: " + str(comment.score) + "\n") f.write("Comment: \n\n" + comment.body.encode('utf-8'))
<commit_before><commit_msg>Add scratch Reddit comment scraper<commit_after>
#!/usr/bin/env python # -*- coding: utf-8 -*- import praw # Connect to Reddit # ---------------------------- user_agent = "Quick comment thread scraper by /u/mediaarts" r = praw.Reddit(user_agent = user_agent) # Get comment thread and populate dict # ---------------------------- submission_id = "1p1j6c" submission = r.get_submission(submission_id = submission_id, comment_sort = 'top') comments = submission.comments flat_comments = praw.helpers.flatten_tree(comments) print("flat_comments length: {}".format(len(flat_comments))) print("flat_comments class: {}".format(type(flat_comments))) print("first comment class: {}".format(type(flat_comments[0]))) print("last comment class: {}".format(type(flat_comments[len(flat_comments) - 1]))) print("first comment attrs: {}".format(dir(flat_comments[0]))) print("first comment score: {}".format(flat_comments[0].score)) print("first comment author: {}".format(flat_comments[0].author)) fname = submission_id + '.txt' with open(fname, 'w') as f: for comment in flat_comments: if isinstance(comment, praw.objects.Comment): f.write("\n\n") if comment.is_root: f.write("---\n\n") else: f.write("Child comment \n") f.write("Author: " + str(comment.author) + "\n") f.write("Score: " + str(comment.score) + "\n") f.write("Comment: \n\n" + comment.body.encode('utf-8'))
Add scratch Reddit comment scraper#!/usr/bin/env python # -*- coding: utf-8 -*- import praw # Connect to Reddit # ---------------------------- user_agent = "Quick comment thread scraper by /u/mediaarts" r = praw.Reddit(user_agent = user_agent) # Get comment thread and populate dict # ---------------------------- submission_id = "1p1j6c" submission = r.get_submission(submission_id = submission_id, comment_sort = 'top') comments = submission.comments flat_comments = praw.helpers.flatten_tree(comments) print("flat_comments length: {}".format(len(flat_comments))) print("flat_comments class: {}".format(type(flat_comments))) print("first comment class: {}".format(type(flat_comments[0]))) print("last comment class: {}".format(type(flat_comments[len(flat_comments) - 1]))) print("first comment attrs: {}".format(dir(flat_comments[0]))) print("first comment score: {}".format(flat_comments[0].score)) print("first comment author: {}".format(flat_comments[0].author)) fname = submission_id + '.txt' with open(fname, 'w') as f: for comment in flat_comments: if isinstance(comment, praw.objects.Comment): f.write("\n\n") if comment.is_root: f.write("---\n\n") else: f.write("Child comment \n") f.write("Author: " + str(comment.author) + "\n") f.write("Score: " + str(comment.score) + "\n") f.write("Comment: \n\n" + comment.body.encode('utf-8'))
<commit_before><commit_msg>Add scratch Reddit comment scraper<commit_after>#!/usr/bin/env python # -*- coding: utf-8 -*- import praw # Connect to Reddit # ---------------------------- user_agent = "Quick comment thread scraper by /u/mediaarts" r = praw.Reddit(user_agent = user_agent) # Get comment thread and populate dict # ---------------------------- submission_id = "1p1j6c" submission = r.get_submission(submission_id = submission_id, comment_sort = 'top') comments = submission.comments flat_comments = praw.helpers.flatten_tree(comments) print("flat_comments length: {}".format(len(flat_comments))) print("flat_comments class: {}".format(type(flat_comments))) print("first comment class: {}".format(type(flat_comments[0]))) print("last comment class: {}".format(type(flat_comments[len(flat_comments) - 1]))) print("first comment attrs: {}".format(dir(flat_comments[0]))) print("first comment score: {}".format(flat_comments[0].score)) print("first comment author: {}".format(flat_comments[0].author)) fname = submission_id + '.txt' with open(fname, 'w') as f: for comment in flat_comments: if isinstance(comment, praw.objects.Comment): f.write("\n\n") if comment.is_root: f.write("---\n\n") else: f.write("Child comment \n") f.write("Author: " + str(comment.author) + "\n") f.write("Score: " + str(comment.score) + "\n") f.write("Comment: \n\n" + comment.body.encode('utf-8'))
34299b315cc02044050181ffe73b09042bb19a0a
evewspace/API/default_settings.py
evewspace/API/default_settings.py
# Eve W-Space # Copyright (C) 2013 Andrew Austin and other contributors # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. An additional term under section # 7 of the GPL is included in the LICENSE file. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from core.models import ConfigEntry #defaults = [("TEST_SETTING", "BOB")] defaults = [ ("API_ALLOW_CHARACTER_KEY", "0"), ("API_ALLOW_EXPIRING_KEY", "0"), ] def load_defaults(): for setting in defaults: config = ConfigEntry.objects.get_or_create(name=setting[0], user=None)[0] config.value = setting[1] config.save()
Add default settings module for API
Add default settings module for API
Python
apache-2.0
evewspace/eve-wspace,nyrocron/eve-wspace,Maarten28/eve-wspace,nyrocron/eve-wspace,Zumochi/eve-wspace,Maarten28/eve-wspace,Maarten28/eve-wspace,acdervis/eve-wspace,hybrid1969/eve-wspace,hybrid1969/eve-wspace,marbindrakon/eve-wspace,Unsettled/eve-wspace,mmalyska/eve-wspace,Maarten28/eve-wspace,marbindrakon/eve-wspace,gpapaz/eve-wspace,gpapaz/eve-wspace,marbindrakon/eve-wspace,marbindrakon/eve-wspace,Zumochi/eve-wspace,Unsettled/eve-wspace,proycon/eve-wspace,proycon/eve-wspace,gpapaz/eve-wspace,Zumochi/eve-wspace,proycon/eve-wspace,hybrid1969/eve-wspace,nyrocron/eve-wspace,nyrocron/eve-wspace,proycon/eve-wspace,evewspace/eve-wspace,Zumochi/eve-wspace,evewspace/eve-wspace,evewspace/eve-wspace,acdervis/eve-wspace,Unsettled/eve-wspace,mmalyska/eve-wspace,mmalyska/eve-wspace,mmalyska/eve-wspace,acdervis/eve-wspace,gpapaz/eve-wspace,hybrid1969/eve-wspace,acdervis/eve-wspace,Unsettled/eve-wspace
Add default settings module for API
# Eve W-Space # Copyright (C) 2013 Andrew Austin and other contributors # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. An additional term under section # 7 of the GPL is included in the LICENSE file. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from core.models import ConfigEntry #defaults = [("TEST_SETTING", "BOB")] defaults = [ ("API_ALLOW_CHARACTER_KEY", "0"), ("API_ALLOW_EXPIRING_KEY", "0"), ] def load_defaults(): for setting in defaults: config = ConfigEntry.objects.get_or_create(name=setting[0], user=None)[0] config.value = setting[1] config.save()
<commit_before><commit_msg>Add default settings module for API<commit_after>
# Eve W-Space # Copyright (C) 2013 Andrew Austin and other contributors # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. An additional term under section # 7 of the GPL is included in the LICENSE file. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from core.models import ConfigEntry #defaults = [("TEST_SETTING", "BOB")] defaults = [ ("API_ALLOW_CHARACTER_KEY", "0"), ("API_ALLOW_EXPIRING_KEY", "0"), ] def load_defaults(): for setting in defaults: config = ConfigEntry.objects.get_or_create(name=setting[0], user=None)[0] config.value = setting[1] config.save()
Add default settings module for API# Eve W-Space # Copyright (C) 2013 Andrew Austin and other contributors # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. An additional term under section # 7 of the GPL is included in the LICENSE file. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from core.models import ConfigEntry #defaults = [("TEST_SETTING", "BOB")] defaults = [ ("API_ALLOW_CHARACTER_KEY", "0"), ("API_ALLOW_EXPIRING_KEY", "0"), ] def load_defaults(): for setting in defaults: config = ConfigEntry.objects.get_or_create(name=setting[0], user=None)[0] config.value = setting[1] config.save()
<commit_before><commit_msg>Add default settings module for API<commit_after># Eve W-Space # Copyright (C) 2013 Andrew Austin and other contributors # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. An additional term under section # 7 of the GPL is included in the LICENSE file. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from core.models import ConfigEntry #defaults = [("TEST_SETTING", "BOB")] defaults = [ ("API_ALLOW_CHARACTER_KEY", "0"), ("API_ALLOW_EXPIRING_KEY", "0"), ] def load_defaults(): for setting in defaults: config = ConfigEntry.objects.get_or_create(name=setting[0], user=None)[0] config.value = setting[1] config.save()
49046745e9000281c4cf225836ad988b04a59517
judge/migrations/0112_language_extensions.py
judge/migrations/0112_language_extensions.py
from django.db import migrations def update_language_extensions(apps, schema_editor): Language = apps.get_model('judge', 'Language') extension_mapping = { 'ADA': 'adb', 'AWK': 'awk', 'BASH': 'sh', 'BF': 'c', 'C': 'c', 'C11': 'c', 'CBL': 'cbl', 'CLANG': 'c', 'CLANGX': 'cpp', 'COFFEE': 'coffee', 'CPP03': 'cpp', 'CPP11': 'cpp', 'CPP14': 'cpp', 'CPP17': 'cpp', 'D': 'd', 'DART': 'dart', 'F95': 'f95', 'FORTH': 'fs', 'GAS32': 'asm', 'GAS64': 'asm', 'GASARM': 'asm', 'GO': 'go', 'GROOVY': 'groovy', 'HASK': 'hs', 'ICK': 'i', 'JAVA10': 'java', 'JAVA11': 'java', 'JAVA8': 'java', 'JAVA9': 'java', 'KOTLIN': 'kt', 'LUA': 'lua', 'MONOCS': 'cs', 'MONOFS': 'fs', 'MONOVB': 'vb', 'NASM': 'asm', 'NASM64': 'asm', 'OBJC': 'm', 'OCAML': 'ml', 'PAS': 'pas', 'PERL': 'pl', 'PHP': 'php', 'PIKE': 'pike', 'PRO': 'pl', 'PY2': 'py', 'PY3': 'py', 'PYPY': 'py', 'PYPY3': 'py', 'RKT': 'rkt', 'RUBY18': 'rb', 'RUBY2': 'rb', 'RUST': 'rs', 'SBCL': 'cl', 'SCALA': 'scala', 'SCM': 'scm', 'SED': 'sed', 'SWIFT': 'swift', 'TCL': 'tcl', 'TEXT': 'txt', 'TUR': 't', 'V8JS': 'js', 'ZIG': 'zig', } languages = Language.objects.all() for language in languages: try: extension = extension_mapping[language.key] except KeyError: print('Warning: no extension found for %s. Setting extension to language key.' % language.key) extension = language.key.lower() language.extension = extension Language.objects.bulk_update(languages, ['extension']) class Migration(migrations.Migration): dependencies = [ ('judge', '0111_blank_assignees_ticket'), ] operations = [ migrations.RunPython(update_language_extensions, reverse_code=migrations.RunPython.noop), ]
Add migration to update Language extensions
Add migration to update Language extensions
Python
agpl-3.0
DMOJ/site,DMOJ/site,DMOJ/site,DMOJ/site
Add migration to update Language extensions
from django.db import migrations def update_language_extensions(apps, schema_editor): Language = apps.get_model('judge', 'Language') extension_mapping = { 'ADA': 'adb', 'AWK': 'awk', 'BASH': 'sh', 'BF': 'c', 'C': 'c', 'C11': 'c', 'CBL': 'cbl', 'CLANG': 'c', 'CLANGX': 'cpp', 'COFFEE': 'coffee', 'CPP03': 'cpp', 'CPP11': 'cpp', 'CPP14': 'cpp', 'CPP17': 'cpp', 'D': 'd', 'DART': 'dart', 'F95': 'f95', 'FORTH': 'fs', 'GAS32': 'asm', 'GAS64': 'asm', 'GASARM': 'asm', 'GO': 'go', 'GROOVY': 'groovy', 'HASK': 'hs', 'ICK': 'i', 'JAVA10': 'java', 'JAVA11': 'java', 'JAVA8': 'java', 'JAVA9': 'java', 'KOTLIN': 'kt', 'LUA': 'lua', 'MONOCS': 'cs', 'MONOFS': 'fs', 'MONOVB': 'vb', 'NASM': 'asm', 'NASM64': 'asm', 'OBJC': 'm', 'OCAML': 'ml', 'PAS': 'pas', 'PERL': 'pl', 'PHP': 'php', 'PIKE': 'pike', 'PRO': 'pl', 'PY2': 'py', 'PY3': 'py', 'PYPY': 'py', 'PYPY3': 'py', 'RKT': 'rkt', 'RUBY18': 'rb', 'RUBY2': 'rb', 'RUST': 'rs', 'SBCL': 'cl', 'SCALA': 'scala', 'SCM': 'scm', 'SED': 'sed', 'SWIFT': 'swift', 'TCL': 'tcl', 'TEXT': 'txt', 'TUR': 't', 'V8JS': 'js', 'ZIG': 'zig', } languages = Language.objects.all() for language in languages: try: extension = extension_mapping[language.key] except KeyError: print('Warning: no extension found for %s. Setting extension to language key.' % language.key) extension = language.key.lower() language.extension = extension Language.objects.bulk_update(languages, ['extension']) class Migration(migrations.Migration): dependencies = [ ('judge', '0111_blank_assignees_ticket'), ] operations = [ migrations.RunPython(update_language_extensions, reverse_code=migrations.RunPython.noop), ]
<commit_before><commit_msg>Add migration to update Language extensions<commit_after>
from django.db import migrations def update_language_extensions(apps, schema_editor): Language = apps.get_model('judge', 'Language') extension_mapping = { 'ADA': 'adb', 'AWK': 'awk', 'BASH': 'sh', 'BF': 'c', 'C': 'c', 'C11': 'c', 'CBL': 'cbl', 'CLANG': 'c', 'CLANGX': 'cpp', 'COFFEE': 'coffee', 'CPP03': 'cpp', 'CPP11': 'cpp', 'CPP14': 'cpp', 'CPP17': 'cpp', 'D': 'd', 'DART': 'dart', 'F95': 'f95', 'FORTH': 'fs', 'GAS32': 'asm', 'GAS64': 'asm', 'GASARM': 'asm', 'GO': 'go', 'GROOVY': 'groovy', 'HASK': 'hs', 'ICK': 'i', 'JAVA10': 'java', 'JAVA11': 'java', 'JAVA8': 'java', 'JAVA9': 'java', 'KOTLIN': 'kt', 'LUA': 'lua', 'MONOCS': 'cs', 'MONOFS': 'fs', 'MONOVB': 'vb', 'NASM': 'asm', 'NASM64': 'asm', 'OBJC': 'm', 'OCAML': 'ml', 'PAS': 'pas', 'PERL': 'pl', 'PHP': 'php', 'PIKE': 'pike', 'PRO': 'pl', 'PY2': 'py', 'PY3': 'py', 'PYPY': 'py', 'PYPY3': 'py', 'RKT': 'rkt', 'RUBY18': 'rb', 'RUBY2': 'rb', 'RUST': 'rs', 'SBCL': 'cl', 'SCALA': 'scala', 'SCM': 'scm', 'SED': 'sed', 'SWIFT': 'swift', 'TCL': 'tcl', 'TEXT': 'txt', 'TUR': 't', 'V8JS': 'js', 'ZIG': 'zig', } languages = Language.objects.all() for language in languages: try: extension = extension_mapping[language.key] except KeyError: print('Warning: no extension found for %s. Setting extension to language key.' % language.key) extension = language.key.lower() language.extension = extension Language.objects.bulk_update(languages, ['extension']) class Migration(migrations.Migration): dependencies = [ ('judge', '0111_blank_assignees_ticket'), ] operations = [ migrations.RunPython(update_language_extensions, reverse_code=migrations.RunPython.noop), ]
Add migration to update Language extensionsfrom django.db import migrations def update_language_extensions(apps, schema_editor): Language = apps.get_model('judge', 'Language') extension_mapping = { 'ADA': 'adb', 'AWK': 'awk', 'BASH': 'sh', 'BF': 'c', 'C': 'c', 'C11': 'c', 'CBL': 'cbl', 'CLANG': 'c', 'CLANGX': 'cpp', 'COFFEE': 'coffee', 'CPP03': 'cpp', 'CPP11': 'cpp', 'CPP14': 'cpp', 'CPP17': 'cpp', 'D': 'd', 'DART': 'dart', 'F95': 'f95', 'FORTH': 'fs', 'GAS32': 'asm', 'GAS64': 'asm', 'GASARM': 'asm', 'GO': 'go', 'GROOVY': 'groovy', 'HASK': 'hs', 'ICK': 'i', 'JAVA10': 'java', 'JAVA11': 'java', 'JAVA8': 'java', 'JAVA9': 'java', 'KOTLIN': 'kt', 'LUA': 'lua', 'MONOCS': 'cs', 'MONOFS': 'fs', 'MONOVB': 'vb', 'NASM': 'asm', 'NASM64': 'asm', 'OBJC': 'm', 'OCAML': 'ml', 'PAS': 'pas', 'PERL': 'pl', 'PHP': 'php', 'PIKE': 'pike', 'PRO': 'pl', 'PY2': 'py', 'PY3': 'py', 'PYPY': 'py', 'PYPY3': 'py', 'RKT': 'rkt', 'RUBY18': 'rb', 'RUBY2': 'rb', 'RUST': 'rs', 'SBCL': 'cl', 'SCALA': 'scala', 'SCM': 'scm', 'SED': 'sed', 'SWIFT': 'swift', 'TCL': 'tcl', 'TEXT': 'txt', 'TUR': 't', 'V8JS': 'js', 'ZIG': 'zig', } languages = Language.objects.all() for language in languages: try: extension = extension_mapping[language.key] except KeyError: print('Warning: no extension found for %s. Setting extension to language key.' % language.key) extension = language.key.lower() language.extension = extension Language.objects.bulk_update(languages, ['extension']) class Migration(migrations.Migration): dependencies = [ ('judge', '0111_blank_assignees_ticket'), ] operations = [ migrations.RunPython(update_language_extensions, reverse_code=migrations.RunPython.noop), ]
<commit_before><commit_msg>Add migration to update Language extensions<commit_after>from django.db import migrations def update_language_extensions(apps, schema_editor): Language = apps.get_model('judge', 'Language') extension_mapping = { 'ADA': 'adb', 'AWK': 'awk', 'BASH': 'sh', 'BF': 'c', 'C': 'c', 'C11': 'c', 'CBL': 'cbl', 'CLANG': 'c', 'CLANGX': 'cpp', 'COFFEE': 'coffee', 'CPP03': 'cpp', 'CPP11': 'cpp', 'CPP14': 'cpp', 'CPP17': 'cpp', 'D': 'd', 'DART': 'dart', 'F95': 'f95', 'FORTH': 'fs', 'GAS32': 'asm', 'GAS64': 'asm', 'GASARM': 'asm', 'GO': 'go', 'GROOVY': 'groovy', 'HASK': 'hs', 'ICK': 'i', 'JAVA10': 'java', 'JAVA11': 'java', 'JAVA8': 'java', 'JAVA9': 'java', 'KOTLIN': 'kt', 'LUA': 'lua', 'MONOCS': 'cs', 'MONOFS': 'fs', 'MONOVB': 'vb', 'NASM': 'asm', 'NASM64': 'asm', 'OBJC': 'm', 'OCAML': 'ml', 'PAS': 'pas', 'PERL': 'pl', 'PHP': 'php', 'PIKE': 'pike', 'PRO': 'pl', 'PY2': 'py', 'PY3': 'py', 'PYPY': 'py', 'PYPY3': 'py', 'RKT': 'rkt', 'RUBY18': 'rb', 'RUBY2': 'rb', 'RUST': 'rs', 'SBCL': 'cl', 'SCALA': 'scala', 'SCM': 'scm', 'SED': 'sed', 'SWIFT': 'swift', 'TCL': 'tcl', 'TEXT': 'txt', 'TUR': 't', 'V8JS': 'js', 'ZIG': 'zig', } languages = Language.objects.all() for language in languages: try: extension = extension_mapping[language.key] except KeyError: print('Warning: no extension found for %s. Setting extension to language key.' % language.key) extension = language.key.lower() language.extension = extension Language.objects.bulk_update(languages, ['extension']) class Migration(migrations.Migration): dependencies = [ ('judge', '0111_blank_assignees_ticket'), ] operations = [ migrations.RunPython(update_language_extensions, reverse_code=migrations.RunPython.noop), ]
d6e6c5caa26469b101333df5a901a94f3fe532f3
tests/test_classbasedview.py
tests/test_classbasedview.py
import asyncio import pytest from aiohttp import web from aiohttp.web_urldispatcher import View from unittest import mock def test_ctor(): request = mock.Mock() view = View(request) assert view.request is request @pytest.mark.run_loop def test_render_ok(): resp = web.Response(text='OK') class MyView(View): @asyncio.coroutine def get(self): return resp request = mock.Mock() request.method = 'GET' resp2 = yield from MyView(request) assert resp is resp2 @pytest.mark.run_loop def test_render_unknown_method(): class MyView(View): @asyncio.coroutine def get(self): return web.Response(text='OK') request = mock.Mock() request.method = 'UNKNOWN' with pytest.raises(web.HTTPMethodNotAllowed) as ctx: yield from MyView(request) assert ctx.value.status == 405 @pytest.mark.run_loop def test_render_unsupported_method(): class MyView(View): @asyncio.coroutine def get(self): return web.Response(text='OK') request = mock.Mock() request.method = 'POST' with pytest.raises(web.HTTPMethodNotAllowed) as ctx: yield from MyView(request) assert ctx.value.status == 405
Add tests for class based views
Add tests for class based views
Python
apache-2.0
jashandeep-sohi/aiohttp,rutsky/aiohttp,decentfox/aiohttp,alex-eri/aiohttp-1,arthurdarcet/aiohttp,juliatem/aiohttp,esaezgil/aiohttp,pfreixes/aiohttp,mind1master/aiohttp,z2v/aiohttp,KeepSafe/aiohttp,mind1master/aiohttp,decentfox/aiohttp,mind1master/aiohttp,elastic-coders/aiohttp,rutsky/aiohttp,jettify/aiohttp,pfreixes/aiohttp,juliatem/aiohttp,panda73111/aiohttp,z2v/aiohttp,arthurdarcet/aiohttp,alex-eri/aiohttp-1,vaskalas/aiohttp,hellysmile/aiohttp,esaezgil/aiohttp,vaskalas/aiohttp,alex-eri/aiohttp-1,KeepSafe/aiohttp,moden-py/aiohttp,singulared/aiohttp,jettify/aiohttp,AraHaanOrg/aiohttp,panda73111/aiohttp,AraHaanOrg/aiohttp,moden-py/aiohttp,elastic-coders/aiohttp,z2v/aiohttp,Eyepea/aiohttp,singulared/aiohttp,vaskalas/aiohttp,jashandeep-sohi/aiohttp,panda73111/aiohttp,rutsky/aiohttp,decentfox/aiohttp,arthurdarcet/aiohttp,singulared/aiohttp,moden-py/aiohttp,playpauseandstop/aiohttp,jettify/aiohttp,hellysmile/aiohttp,elastic-coders/aiohttp,KeepSafe/aiohttp,jashandeep-sohi/aiohttp,esaezgil/aiohttp
Add tests for class based views
import asyncio import pytest from aiohttp import web from aiohttp.web_urldispatcher import View from unittest import mock def test_ctor(): request = mock.Mock() view = View(request) assert view.request is request @pytest.mark.run_loop def test_render_ok(): resp = web.Response(text='OK') class MyView(View): @asyncio.coroutine def get(self): return resp request = mock.Mock() request.method = 'GET' resp2 = yield from MyView(request) assert resp is resp2 @pytest.mark.run_loop def test_render_unknown_method(): class MyView(View): @asyncio.coroutine def get(self): return web.Response(text='OK') request = mock.Mock() request.method = 'UNKNOWN' with pytest.raises(web.HTTPMethodNotAllowed) as ctx: yield from MyView(request) assert ctx.value.status == 405 @pytest.mark.run_loop def test_render_unsupported_method(): class MyView(View): @asyncio.coroutine def get(self): return web.Response(text='OK') request = mock.Mock() request.method = 'POST' with pytest.raises(web.HTTPMethodNotAllowed) as ctx: yield from MyView(request) assert ctx.value.status == 405
<commit_before><commit_msg>Add tests for class based views<commit_after>
import asyncio import pytest from aiohttp import web from aiohttp.web_urldispatcher import View from unittest import mock def test_ctor(): request = mock.Mock() view = View(request) assert view.request is request @pytest.mark.run_loop def test_render_ok(): resp = web.Response(text='OK') class MyView(View): @asyncio.coroutine def get(self): return resp request = mock.Mock() request.method = 'GET' resp2 = yield from MyView(request) assert resp is resp2 @pytest.mark.run_loop def test_render_unknown_method(): class MyView(View): @asyncio.coroutine def get(self): return web.Response(text='OK') request = mock.Mock() request.method = 'UNKNOWN' with pytest.raises(web.HTTPMethodNotAllowed) as ctx: yield from MyView(request) assert ctx.value.status == 405 @pytest.mark.run_loop def test_render_unsupported_method(): class MyView(View): @asyncio.coroutine def get(self): return web.Response(text='OK') request = mock.Mock() request.method = 'POST' with pytest.raises(web.HTTPMethodNotAllowed) as ctx: yield from MyView(request) assert ctx.value.status == 405
Add tests for class based viewsimport asyncio import pytest from aiohttp import web from aiohttp.web_urldispatcher import View from unittest import mock def test_ctor(): request = mock.Mock() view = View(request) assert view.request is request @pytest.mark.run_loop def test_render_ok(): resp = web.Response(text='OK') class MyView(View): @asyncio.coroutine def get(self): return resp request = mock.Mock() request.method = 'GET' resp2 = yield from MyView(request) assert resp is resp2 @pytest.mark.run_loop def test_render_unknown_method(): class MyView(View): @asyncio.coroutine def get(self): return web.Response(text='OK') request = mock.Mock() request.method = 'UNKNOWN' with pytest.raises(web.HTTPMethodNotAllowed) as ctx: yield from MyView(request) assert ctx.value.status == 405 @pytest.mark.run_loop def test_render_unsupported_method(): class MyView(View): @asyncio.coroutine def get(self): return web.Response(text='OK') request = mock.Mock() request.method = 'POST' with pytest.raises(web.HTTPMethodNotAllowed) as ctx: yield from MyView(request) assert ctx.value.status == 405
<commit_before><commit_msg>Add tests for class based views<commit_after>import asyncio import pytest from aiohttp import web from aiohttp.web_urldispatcher import View from unittest import mock def test_ctor(): request = mock.Mock() view = View(request) assert view.request is request @pytest.mark.run_loop def test_render_ok(): resp = web.Response(text='OK') class MyView(View): @asyncio.coroutine def get(self): return resp request = mock.Mock() request.method = 'GET' resp2 = yield from MyView(request) assert resp is resp2 @pytest.mark.run_loop def test_render_unknown_method(): class MyView(View): @asyncio.coroutine def get(self): return web.Response(text='OK') request = mock.Mock() request.method = 'UNKNOWN' with pytest.raises(web.HTTPMethodNotAllowed) as ctx: yield from MyView(request) assert ctx.value.status == 405 @pytest.mark.run_loop def test_render_unsupported_method(): class MyView(View): @asyncio.coroutine def get(self): return web.Response(text='OK') request = mock.Mock() request.method = 'POST' with pytest.raises(web.HTTPMethodNotAllowed) as ctx: yield from MyView(request) assert ctx.value.status == 405
082bdc7c12074f7df3d0a308b00f0d512ae98a16
lib/tarSupport.py
lib/tarSupport.py
import os import sys import tarfile import cStringIO class GlideinTar: """ potential exception needs to be caught by calling routine """ def __init__(self): self.strings = {} self.files = [] def add_file(self, filename, arc_dirname): if os.path.exists(filename): self.files.append((filename, dirname)) def add_string(self, name, string_data): self.strings[name] = string_data def create_tar(self, tf): for file in self.files: file, dirname = file if dirname: tf.add(file, arcname=os.path.join(dirname, os.path.split(file)[-1])) else: tf.add(file) for filename, string in self.strings.items(): fd_str = cStringIO.StringIO(string) fd_str.seek(0) ti = tarfile.TarInfo() ti.size = len(string) ti.name = filename ti.type = tarfile.REGTYPE tf.addfile(ti, fd_str) def create_tar_file(self, fd): tf = tarfile.open(fileobj=fd, mode="w:gz") self.create_tar(tf) tf.close() def create_tar_blob(self): from cStringIO import StringIO file_out = StringIO() tf = tarfile.open(fileobj=file_out, mode="w:gz") self.create_tar(tf) tf.close() return file_out.getvalue()
Add the ability to create Tar files
Add the ability to create Tar files
Python
bsd-3-clause
bbockelm/glideinWMS,bbockelm/glideinWMS,bbockelm/glideinWMS,bbockelm/glideinWMS
Add the ability to create Tar files
import os import sys import tarfile import cStringIO class GlideinTar: """ potential exception needs to be caught by calling routine """ def __init__(self): self.strings = {} self.files = [] def add_file(self, filename, arc_dirname): if os.path.exists(filename): self.files.append((filename, dirname)) def add_string(self, name, string_data): self.strings[name] = string_data def create_tar(self, tf): for file in self.files: file, dirname = file if dirname: tf.add(file, arcname=os.path.join(dirname, os.path.split(file)[-1])) else: tf.add(file) for filename, string in self.strings.items(): fd_str = cStringIO.StringIO(string) fd_str.seek(0) ti = tarfile.TarInfo() ti.size = len(string) ti.name = filename ti.type = tarfile.REGTYPE tf.addfile(ti, fd_str) def create_tar_file(self, fd): tf = tarfile.open(fileobj=fd, mode="w:gz") self.create_tar(tf) tf.close() def create_tar_blob(self): from cStringIO import StringIO file_out = StringIO() tf = tarfile.open(fileobj=file_out, mode="w:gz") self.create_tar(tf) tf.close() return file_out.getvalue()
<commit_before><commit_msg>Add the ability to create Tar files<commit_after>
import os import sys import tarfile import cStringIO class GlideinTar: """ potential exception needs to be caught by calling routine """ def __init__(self): self.strings = {} self.files = [] def add_file(self, filename, arc_dirname): if os.path.exists(filename): self.files.append((filename, dirname)) def add_string(self, name, string_data): self.strings[name] = string_data def create_tar(self, tf): for file in self.files: file, dirname = file if dirname: tf.add(file, arcname=os.path.join(dirname, os.path.split(file)[-1])) else: tf.add(file) for filename, string in self.strings.items(): fd_str = cStringIO.StringIO(string) fd_str.seek(0) ti = tarfile.TarInfo() ti.size = len(string) ti.name = filename ti.type = tarfile.REGTYPE tf.addfile(ti, fd_str) def create_tar_file(self, fd): tf = tarfile.open(fileobj=fd, mode="w:gz") self.create_tar(tf) tf.close() def create_tar_blob(self): from cStringIO import StringIO file_out = StringIO() tf = tarfile.open(fileobj=file_out, mode="w:gz") self.create_tar(tf) tf.close() return file_out.getvalue()
Add the ability to create Tar filesimport os import sys import tarfile import cStringIO class GlideinTar: """ potential exception needs to be caught by calling routine """ def __init__(self): self.strings = {} self.files = [] def add_file(self, filename, arc_dirname): if os.path.exists(filename): self.files.append((filename, dirname)) def add_string(self, name, string_data): self.strings[name] = string_data def create_tar(self, tf): for file in self.files: file, dirname = file if dirname: tf.add(file, arcname=os.path.join(dirname, os.path.split(file)[-1])) else: tf.add(file) for filename, string in self.strings.items(): fd_str = cStringIO.StringIO(string) fd_str.seek(0) ti = tarfile.TarInfo() ti.size = len(string) ti.name = filename ti.type = tarfile.REGTYPE tf.addfile(ti, fd_str) def create_tar_file(self, fd): tf = tarfile.open(fileobj=fd, mode="w:gz") self.create_tar(tf) tf.close() def create_tar_blob(self): from cStringIO import StringIO file_out = StringIO() tf = tarfile.open(fileobj=file_out, mode="w:gz") self.create_tar(tf) tf.close() return file_out.getvalue()
<commit_before><commit_msg>Add the ability to create Tar files<commit_after>import os import sys import tarfile import cStringIO class GlideinTar: """ potential exception needs to be caught by calling routine """ def __init__(self): self.strings = {} self.files = [] def add_file(self, filename, arc_dirname): if os.path.exists(filename): self.files.append((filename, dirname)) def add_string(self, name, string_data): self.strings[name] = string_data def create_tar(self, tf): for file in self.files: file, dirname = file if dirname: tf.add(file, arcname=os.path.join(dirname, os.path.split(file)[-1])) else: tf.add(file) for filename, string in self.strings.items(): fd_str = cStringIO.StringIO(string) fd_str.seek(0) ti = tarfile.TarInfo() ti.size = len(string) ti.name = filename ti.type = tarfile.REGTYPE tf.addfile(ti, fd_str) def create_tar_file(self, fd): tf = tarfile.open(fileobj=fd, mode="w:gz") self.create_tar(tf) tf.close() def create_tar_blob(self): from cStringIO import StringIO file_out = StringIO() tf = tarfile.open(fileobj=file_out, mode="w:gz") self.create_tar(tf) tf.close() return file_out.getvalue()
011c1701813c3107433d261581189c57e3b3302f
megalist_dataflow/utils/group_by_execution_dofn.py
megalist_dataflow/utils/group_by_execution_dofn.py
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from apache_beam import DoFn class GroupByExecutionDoFn(DoFn): """ Group elements received in batches. Elements must by tuples were [0] is and Execution. When an Execution changes between elements, a batch is returned if it hasn't archived batch size """ def __init__(self, batch_size=5000 # type: int ): super().__init__() self._batch_size = batch_size self._batch = None self._last_execution = None def start_bundle(self): self._batch = [] def process(self, element, *args, **kwargs): execution = element[0] if self._last_execution is not None and self._last_execution != execution: yield self._batch self._batch = [] self._last_execution = execution self._batch.append(element) if len(self._batch) >= self._batch_size: yield self._batch self._batch = []
Create DoFn to group and batch data based on Execution equality
Create DoFn to group and batch data based on Execution equality
Python
apache-2.0
google/megalista,google/megalista
Create DoFn to group and batch data based on Execution equality
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from apache_beam import DoFn class GroupByExecutionDoFn(DoFn): """ Group elements received in batches. Elements must by tuples were [0] is and Execution. When an Execution changes between elements, a batch is returned if it hasn't archived batch size """ def __init__(self, batch_size=5000 # type: int ): super().__init__() self._batch_size = batch_size self._batch = None self._last_execution = None def start_bundle(self): self._batch = [] def process(self, element, *args, **kwargs): execution = element[0] if self._last_execution is not None and self._last_execution != execution: yield self._batch self._batch = [] self._last_execution = execution self._batch.append(element) if len(self._batch) >= self._batch_size: yield self._batch self._batch = []
<commit_before><commit_msg>Create DoFn to group and batch data based on Execution equality<commit_after>
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from apache_beam import DoFn class GroupByExecutionDoFn(DoFn): """ Group elements received in batches. Elements must by tuples were [0] is and Execution. When an Execution changes between elements, a batch is returned if it hasn't archived batch size """ def __init__(self, batch_size=5000 # type: int ): super().__init__() self._batch_size = batch_size self._batch = None self._last_execution = None def start_bundle(self): self._batch = [] def process(self, element, *args, **kwargs): execution = element[0] if self._last_execution is not None and self._last_execution != execution: yield self._batch self._batch = [] self._last_execution = execution self._batch.append(element) if len(self._batch) >= self._batch_size: yield self._batch self._batch = []
Create DoFn to group and batch data based on Execution equality# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from apache_beam import DoFn class GroupByExecutionDoFn(DoFn): """ Group elements received in batches. Elements must by tuples were [0] is and Execution. When an Execution changes between elements, a batch is returned if it hasn't archived batch size """ def __init__(self, batch_size=5000 # type: int ): super().__init__() self._batch_size = batch_size self._batch = None self._last_execution = None def start_bundle(self): self._batch = [] def process(self, element, *args, **kwargs): execution = element[0] if self._last_execution is not None and self._last_execution != execution: yield self._batch self._batch = [] self._last_execution = execution self._batch.append(element) if len(self._batch) >= self._batch_size: yield self._batch self._batch = []
<commit_before><commit_msg>Create DoFn to group and batch data based on Execution equality<commit_after># Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from apache_beam import DoFn class GroupByExecutionDoFn(DoFn): """ Group elements received in batches. Elements must by tuples were [0] is and Execution. When an Execution changes between elements, a batch is returned if it hasn't archived batch size """ def __init__(self, batch_size=5000 # type: int ): super().__init__() self._batch_size = batch_size self._batch = None self._last_execution = None def start_bundle(self): self._batch = [] def process(self, element, *args, **kwargs): execution = element[0] if self._last_execution is not None and self._last_execution != execution: yield self._batch self._batch = [] self._last_execution = execution self._batch.append(element) if len(self._batch) >= self._batch_size: yield self._batch self._batch = []
10c7d2f33d34e5a158795701b2d9bc71bbb1c120
integration_tests/samples/rtm_v2/rtm_v2_proxy_app.py
integration_tests/samples/rtm_v2/rtm_v2_proxy_app.py
# ------------------ # Only for running this script here import sys from os.path import dirname sys.path.insert(1, f"{dirname(__file__)}/../../..") # ------------------ import logging logging.basicConfig( level=logging.DEBUG, format="%(asctime)s.%(msecs)03d %(levelname)s %(filename)s (%(lineno)s): %(message)s", datefmt="%Y-%m-%d %H:%M:%S", ) logger = logging.getLogger(__name__) import os from slack_sdk.rtm.v2 import RTMClient from integration_tests.env_variable_names import SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN # pip3 install proxy.py # proxy --port 9000 --log-level d proxy_url = "http://localhost:9000" if __name__ == "__main__": rtm = RTMClient( token=os.environ.get(SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN), trace_enabled=True, all_message_trace_enabled=True, proxy=proxy_url, ) @rtm.on("message") def handle(client: RTMClient, event: dict): client.web_client.reactions_add( channel=event["channel"], timestamp=event["ts"], name="eyes", ) @rtm.on("*") def handle(client: RTMClient, event: dict): logger.info(event) rtm.start()
Add RTM v2 client example too
Add RTM v2 client example too
Python
mit
slackapi/python-slackclient,slackapi/python-slackclient,slackhq/python-slackclient,slackapi/python-slackclient
Add RTM v2 client example too
# ------------------ # Only for running this script here import sys from os.path import dirname sys.path.insert(1, f"{dirname(__file__)}/../../..") # ------------------ import logging logging.basicConfig( level=logging.DEBUG, format="%(asctime)s.%(msecs)03d %(levelname)s %(filename)s (%(lineno)s): %(message)s", datefmt="%Y-%m-%d %H:%M:%S", ) logger = logging.getLogger(__name__) import os from slack_sdk.rtm.v2 import RTMClient from integration_tests.env_variable_names import SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN # pip3 install proxy.py # proxy --port 9000 --log-level d proxy_url = "http://localhost:9000" if __name__ == "__main__": rtm = RTMClient( token=os.environ.get(SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN), trace_enabled=True, all_message_trace_enabled=True, proxy=proxy_url, ) @rtm.on("message") def handle(client: RTMClient, event: dict): client.web_client.reactions_add( channel=event["channel"], timestamp=event["ts"], name="eyes", ) @rtm.on("*") def handle(client: RTMClient, event: dict): logger.info(event) rtm.start()
<commit_before><commit_msg>Add RTM v2 client example too<commit_after>
# ------------------ # Only for running this script here import sys from os.path import dirname sys.path.insert(1, f"{dirname(__file__)}/../../..") # ------------------ import logging logging.basicConfig( level=logging.DEBUG, format="%(asctime)s.%(msecs)03d %(levelname)s %(filename)s (%(lineno)s): %(message)s", datefmt="%Y-%m-%d %H:%M:%S", ) logger = logging.getLogger(__name__) import os from slack_sdk.rtm.v2 import RTMClient from integration_tests.env_variable_names import SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN # pip3 install proxy.py # proxy --port 9000 --log-level d proxy_url = "http://localhost:9000" if __name__ == "__main__": rtm = RTMClient( token=os.environ.get(SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN), trace_enabled=True, all_message_trace_enabled=True, proxy=proxy_url, ) @rtm.on("message") def handle(client: RTMClient, event: dict): client.web_client.reactions_add( channel=event["channel"], timestamp=event["ts"], name="eyes", ) @rtm.on("*") def handle(client: RTMClient, event: dict): logger.info(event) rtm.start()
Add RTM v2 client example too# ------------------ # Only for running this script here import sys from os.path import dirname sys.path.insert(1, f"{dirname(__file__)}/../../..") # ------------------ import logging logging.basicConfig( level=logging.DEBUG, format="%(asctime)s.%(msecs)03d %(levelname)s %(filename)s (%(lineno)s): %(message)s", datefmt="%Y-%m-%d %H:%M:%S", ) logger = logging.getLogger(__name__) import os from slack_sdk.rtm.v2 import RTMClient from integration_tests.env_variable_names import SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN # pip3 install proxy.py # proxy --port 9000 --log-level d proxy_url = "http://localhost:9000" if __name__ == "__main__": rtm = RTMClient( token=os.environ.get(SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN), trace_enabled=True, all_message_trace_enabled=True, proxy=proxy_url, ) @rtm.on("message") def handle(client: RTMClient, event: dict): client.web_client.reactions_add( channel=event["channel"], timestamp=event["ts"], name="eyes", ) @rtm.on("*") def handle(client: RTMClient, event: dict): logger.info(event) rtm.start()
<commit_before><commit_msg>Add RTM v2 client example too<commit_after># ------------------ # Only for running this script here import sys from os.path import dirname sys.path.insert(1, f"{dirname(__file__)}/../../..") # ------------------ import logging logging.basicConfig( level=logging.DEBUG, format="%(asctime)s.%(msecs)03d %(levelname)s %(filename)s (%(lineno)s): %(message)s", datefmt="%Y-%m-%d %H:%M:%S", ) logger = logging.getLogger(__name__) import os from slack_sdk.rtm.v2 import RTMClient from integration_tests.env_variable_names import SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN # pip3 install proxy.py # proxy --port 9000 --log-level d proxy_url = "http://localhost:9000" if __name__ == "__main__": rtm = RTMClient( token=os.environ.get(SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN), trace_enabled=True, all_message_trace_enabled=True, proxy=proxy_url, ) @rtm.on("message") def handle(client: RTMClient, event: dict): client.web_client.reactions_add( channel=event["channel"], timestamp=event["ts"], name="eyes", ) @rtm.on("*") def handle(client: RTMClient, event: dict): logger.info(event) rtm.start()
5367b40e7dec274cb36df6e5dfdd53fca986345b
scripts/annotate-true-allele-freqs.py
scripts/annotate-true-allele-freqs.py
from cyvcf2 import VCF, Writer def subclone_vaf(gt): """Calculate subclone allele frequency""" if np.all(gt[:2] == [1, 1]): return 1.0 elif np.all(gt[:2] == [0, 1]) or np.all(gt[:2] == [1, 0]): return 0.5 else: return 0.0 # Reader vcf_in = VCF(snakemake.input.vcf) # Setup subclone information subclones = ["Som{}".format(i) for i in range(1, 5)] fractions = [1/3, 1/3, 1/4, 1/12] subclone_idx = [vcf_in.samples.index(s) for s in subclones] # Prepare writer bcf_out = Writer(snakemake.output[0], bcf_in) bcf_out.add_info_to_header({"ID": "AF", "Number": "1", "Description": "True tumor allele frequency", "Type": "Float"}) for rec in vcf_in: if len(rec.ALT) > 1: raise ValueError("multiallelic sites are not supported at the moment") # calculate AF vaf = sum(fraction * subclone_vaf(rec.genotypes[idx]) for idx, fraction in zip(subclone_idx, fractions)) rec.INFO["AF"] = vaf bcf_out.write_record(rec) bcf_out.close()
Add script to annotate allele frequencies.
Add script to annotate allele frequencies.
Python
mit
PROSIC/prosic-evaluation,PROSIC/prosic-evaluation
Add script to annotate allele frequencies.
from cyvcf2 import VCF, Writer def subclone_vaf(gt): """Calculate subclone allele frequency""" if np.all(gt[:2] == [1, 1]): return 1.0 elif np.all(gt[:2] == [0, 1]) or np.all(gt[:2] == [1, 0]): return 0.5 else: return 0.0 # Reader vcf_in = VCF(snakemake.input.vcf) # Setup subclone information subclones = ["Som{}".format(i) for i in range(1, 5)] fractions = [1/3, 1/3, 1/4, 1/12] subclone_idx = [vcf_in.samples.index(s) for s in subclones] # Prepare writer bcf_out = Writer(snakemake.output[0], bcf_in) bcf_out.add_info_to_header({"ID": "AF", "Number": "1", "Description": "True tumor allele frequency", "Type": "Float"}) for rec in vcf_in: if len(rec.ALT) > 1: raise ValueError("multiallelic sites are not supported at the moment") # calculate AF vaf = sum(fraction * subclone_vaf(rec.genotypes[idx]) for idx, fraction in zip(subclone_idx, fractions)) rec.INFO["AF"] = vaf bcf_out.write_record(rec) bcf_out.close()
<commit_before><commit_msg>Add script to annotate allele frequencies.<commit_after>
from cyvcf2 import VCF, Writer def subclone_vaf(gt): """Calculate subclone allele frequency""" if np.all(gt[:2] == [1, 1]): return 1.0 elif np.all(gt[:2] == [0, 1]) or np.all(gt[:2] == [1, 0]): return 0.5 else: return 0.0 # Reader vcf_in = VCF(snakemake.input.vcf) # Setup subclone information subclones = ["Som{}".format(i) for i in range(1, 5)] fractions = [1/3, 1/3, 1/4, 1/12] subclone_idx = [vcf_in.samples.index(s) for s in subclones] # Prepare writer bcf_out = Writer(snakemake.output[0], bcf_in) bcf_out.add_info_to_header({"ID": "AF", "Number": "1", "Description": "True tumor allele frequency", "Type": "Float"}) for rec in vcf_in: if len(rec.ALT) > 1: raise ValueError("multiallelic sites are not supported at the moment") # calculate AF vaf = sum(fraction * subclone_vaf(rec.genotypes[idx]) for idx, fraction in zip(subclone_idx, fractions)) rec.INFO["AF"] = vaf bcf_out.write_record(rec) bcf_out.close()
Add script to annotate allele frequencies.from cyvcf2 import VCF, Writer def subclone_vaf(gt): """Calculate subclone allele frequency""" if np.all(gt[:2] == [1, 1]): return 1.0 elif np.all(gt[:2] == [0, 1]) or np.all(gt[:2] == [1, 0]): return 0.5 else: return 0.0 # Reader vcf_in = VCF(snakemake.input.vcf) # Setup subclone information subclones = ["Som{}".format(i) for i in range(1, 5)] fractions = [1/3, 1/3, 1/4, 1/12] subclone_idx = [vcf_in.samples.index(s) for s in subclones] # Prepare writer bcf_out = Writer(snakemake.output[0], bcf_in) bcf_out.add_info_to_header({"ID": "AF", "Number": "1", "Description": "True tumor allele frequency", "Type": "Float"}) for rec in vcf_in: if len(rec.ALT) > 1: raise ValueError("multiallelic sites are not supported at the moment") # calculate AF vaf = sum(fraction * subclone_vaf(rec.genotypes[idx]) for idx, fraction in zip(subclone_idx, fractions)) rec.INFO["AF"] = vaf bcf_out.write_record(rec) bcf_out.close()
<commit_before><commit_msg>Add script to annotate allele frequencies.<commit_after>from cyvcf2 import VCF, Writer def subclone_vaf(gt): """Calculate subclone allele frequency""" if np.all(gt[:2] == [1, 1]): return 1.0 elif np.all(gt[:2] == [0, 1]) or np.all(gt[:2] == [1, 0]): return 0.5 else: return 0.0 # Reader vcf_in = VCF(snakemake.input.vcf) # Setup subclone information subclones = ["Som{}".format(i) for i in range(1, 5)] fractions = [1/3, 1/3, 1/4, 1/12] subclone_idx = [vcf_in.samples.index(s) for s in subclones] # Prepare writer bcf_out = Writer(snakemake.output[0], bcf_in) bcf_out.add_info_to_header({"ID": "AF", "Number": "1", "Description": "True tumor allele frequency", "Type": "Float"}) for rec in vcf_in: if len(rec.ALT) > 1: raise ValueError("multiallelic sites are not supported at the moment") # calculate AF vaf = sum(fraction * subclone_vaf(rec.genotypes[idx]) for idx, fraction in zip(subclone_idx, fractions)) rec.INFO["AF"] = vaf bcf_out.write_record(rec) bcf_out.close()
cf7fd8a8ba0943303dd5baaee831e7070460c0a0
lowfat/management/commands/addactivities.py
lowfat/management/commands/addactivities.py
import pandas as pd from django.core.management.base import BaseCommand from lowfat.models import Fund class Command(BaseCommand): help = "Add activities" def add_arguments(self, parser): parser.add_argument('csv', nargs='?', default='activities.csv') # pylint: disable=too-many-branches,too-many-locals def handle(self, *args, **options): data = pd.read_csv(options['csv']) for index, line in data.iterrows(): # pylint: disable=no-member,unused-variable try: funds = Fund.objects.filter( claimant__forenames=line["fornames"], claimant__surname=line["surname"], title=line["title"] ) for fund in funds: if line["category"] == "Organising": fund.category = "H" elif line["category"] == "Attending": fund.category = "A" if line["focus"] == "Domain": fund.focus = "D" elif line["focus"] == "Cross_cutting": fund.focus = "C" fund.activity = line["activities"] print("Changing {}...".format(fund)) fund.save() print("Changed {}...".format(fund)) except BaseException as exception: print("Error: {}\n\t{}".format(exception, line))
Add script to import activities
Add script to import activities
Python
bsd-3-clause
softwaresaved/fat,softwaresaved/fat,softwaresaved/fat,softwaresaved/fat
Add script to import activities
import pandas as pd from django.core.management.base import BaseCommand from lowfat.models import Fund class Command(BaseCommand): help = "Add activities" def add_arguments(self, parser): parser.add_argument('csv', nargs='?', default='activities.csv') # pylint: disable=too-many-branches,too-many-locals def handle(self, *args, **options): data = pd.read_csv(options['csv']) for index, line in data.iterrows(): # pylint: disable=no-member,unused-variable try: funds = Fund.objects.filter( claimant__forenames=line["fornames"], claimant__surname=line["surname"], title=line["title"] ) for fund in funds: if line["category"] == "Organising": fund.category = "H" elif line["category"] == "Attending": fund.category = "A" if line["focus"] == "Domain": fund.focus = "D" elif line["focus"] == "Cross_cutting": fund.focus = "C" fund.activity = line["activities"] print("Changing {}...".format(fund)) fund.save() print("Changed {}...".format(fund)) except BaseException as exception: print("Error: {}\n\t{}".format(exception, line))
<commit_before><commit_msg>Add script to import activities<commit_after>
import pandas as pd from django.core.management.base import BaseCommand from lowfat.models import Fund class Command(BaseCommand): help = "Add activities" def add_arguments(self, parser): parser.add_argument('csv', nargs='?', default='activities.csv') # pylint: disable=too-many-branches,too-many-locals def handle(self, *args, **options): data = pd.read_csv(options['csv']) for index, line in data.iterrows(): # pylint: disable=no-member,unused-variable try: funds = Fund.objects.filter( claimant__forenames=line["fornames"], claimant__surname=line["surname"], title=line["title"] ) for fund in funds: if line["category"] == "Organising": fund.category = "H" elif line["category"] == "Attending": fund.category = "A" if line["focus"] == "Domain": fund.focus = "D" elif line["focus"] == "Cross_cutting": fund.focus = "C" fund.activity = line["activities"] print("Changing {}...".format(fund)) fund.save() print("Changed {}...".format(fund)) except BaseException as exception: print("Error: {}\n\t{}".format(exception, line))
Add script to import activitiesimport pandas as pd from django.core.management.base import BaseCommand from lowfat.models import Fund class Command(BaseCommand): help = "Add activities" def add_arguments(self, parser): parser.add_argument('csv', nargs='?', default='activities.csv') # pylint: disable=too-many-branches,too-many-locals def handle(self, *args, **options): data = pd.read_csv(options['csv']) for index, line in data.iterrows(): # pylint: disable=no-member,unused-variable try: funds = Fund.objects.filter( claimant__forenames=line["fornames"], claimant__surname=line["surname"], title=line["title"] ) for fund in funds: if line["category"] == "Organising": fund.category = "H" elif line["category"] == "Attending": fund.category = "A" if line["focus"] == "Domain": fund.focus = "D" elif line["focus"] == "Cross_cutting": fund.focus = "C" fund.activity = line["activities"] print("Changing {}...".format(fund)) fund.save() print("Changed {}...".format(fund)) except BaseException as exception: print("Error: {}\n\t{}".format(exception, line))
<commit_before><commit_msg>Add script to import activities<commit_after>import pandas as pd from django.core.management.base import BaseCommand from lowfat.models import Fund class Command(BaseCommand): help = "Add activities" def add_arguments(self, parser): parser.add_argument('csv', nargs='?', default='activities.csv') # pylint: disable=too-many-branches,too-many-locals def handle(self, *args, **options): data = pd.read_csv(options['csv']) for index, line in data.iterrows(): # pylint: disable=no-member,unused-variable try: funds = Fund.objects.filter( claimant__forenames=line["fornames"], claimant__surname=line["surname"], title=line["title"] ) for fund in funds: if line["category"] == "Organising": fund.category = "H" elif line["category"] == "Attending": fund.category = "A" if line["focus"] == "Domain": fund.focus = "D" elif line["focus"] == "Cross_cutting": fund.focus = "C" fund.activity = line["activities"] print("Changing {}...".format(fund)) fund.save() print("Changed {}...".format(fund)) except BaseException as exception: print("Error: {}\n\t{}".format(exception, line))
5a641395fb336313f091d28ffb17625df836d2a3
create_admin.py
create_admin.py
from getpass import getpass from md5 import md5 from redis import Redis def main(): database = Redis() database.set("user:admin:password", md5(getpass()).hexdigest()) database.delete("user:admin:profiles") database.rpush("user:admin:profiles", "admin") if __name__ == "__main__": main()
Create script that helps to create an admin user.
Create script that helps to create an admin user.
Python
mit
Computiva/carbono,Computiva/carbono,Computiva/carbono,Computiva/carbono
Create script that helps to create an admin user.
from getpass import getpass from md5 import md5 from redis import Redis def main(): database = Redis() database.set("user:admin:password", md5(getpass()).hexdigest()) database.delete("user:admin:profiles") database.rpush("user:admin:profiles", "admin") if __name__ == "__main__": main()
<commit_before><commit_msg>Create script that helps to create an admin user.<commit_after>
from getpass import getpass from md5 import md5 from redis import Redis def main(): database = Redis() database.set("user:admin:password", md5(getpass()).hexdigest()) database.delete("user:admin:profiles") database.rpush("user:admin:profiles", "admin") if __name__ == "__main__": main()
Create script that helps to create an admin user.from getpass import getpass from md5 import md5 from redis import Redis def main(): database = Redis() database.set("user:admin:password", md5(getpass()).hexdigest()) database.delete("user:admin:profiles") database.rpush("user:admin:profiles", "admin") if __name__ == "__main__": main()
<commit_before><commit_msg>Create script that helps to create an admin user.<commit_after>from getpass import getpass from md5 import md5 from redis import Redis def main(): database = Redis() database.set("user:admin:password", md5(getpass()).hexdigest()) database.delete("user:admin:profiles") database.rpush("user:admin:profiles", "admin") if __name__ == "__main__": main()
1867707bd33bb4908e63f82f3d975fcf5d9e829a
plc-upo-alarm.py
plc-upo-alarm.py
import shlex import sys import re from subprocess import Popen, PIPE def extractMacAddress(ampStatLine): macAddr = None items = ampStatLine.split( " " ) for index in range(len(items)): if (items[index] == "MAC") and ((index+1) < len(items)): if re.match("[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", items[index+1].lower()): return items[index+1] return macAddr device = "eth0" if len(sys.argv) > 1: device = sys.argv[1] cmd = "ampstat -m -i {}".format(device) process = Popen(shlex.split(cmd), stdout=PIPE) (output, err) = process.communicate() exit_code = process.wait() allowedMacAddrMap = dict() outputArray = output.splitlines() for entries in outputArray: macAddr = extractMacAddress(entries) if macAddr is not None: # Check if mac address is allowed on the network print macAddr
Read ampstat -m -i <device> and print out the mac addresses on the powerline network
Read ampstat -m -i <device> and print out the mac addresses on the powerline network
Python
mit
alanbertadev/plc_upo_alarm
Read ampstat -m -i <device> and print out the mac addresses on the powerline network
import shlex import sys import re from subprocess import Popen, PIPE def extractMacAddress(ampStatLine): macAddr = None items = ampStatLine.split( " " ) for index in range(len(items)): if (items[index] == "MAC") and ((index+1) < len(items)): if re.match("[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", items[index+1].lower()): return items[index+1] return macAddr device = "eth0" if len(sys.argv) > 1: device = sys.argv[1] cmd = "ampstat -m -i {}".format(device) process = Popen(shlex.split(cmd), stdout=PIPE) (output, err) = process.communicate() exit_code = process.wait() allowedMacAddrMap = dict() outputArray = output.splitlines() for entries in outputArray: macAddr = extractMacAddress(entries) if macAddr is not None: # Check if mac address is allowed on the network print macAddr
<commit_before><commit_msg>Read ampstat -m -i <device> and print out the mac addresses on the powerline network<commit_after>
import shlex import sys import re from subprocess import Popen, PIPE def extractMacAddress(ampStatLine): macAddr = None items = ampStatLine.split( " " ) for index in range(len(items)): if (items[index] == "MAC") and ((index+1) < len(items)): if re.match("[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", items[index+1].lower()): return items[index+1] return macAddr device = "eth0" if len(sys.argv) > 1: device = sys.argv[1] cmd = "ampstat -m -i {}".format(device) process = Popen(shlex.split(cmd), stdout=PIPE) (output, err) = process.communicate() exit_code = process.wait() allowedMacAddrMap = dict() outputArray = output.splitlines() for entries in outputArray: macAddr = extractMacAddress(entries) if macAddr is not None: # Check if mac address is allowed on the network print macAddr
Read ampstat -m -i <device> and print out the mac addresses on the powerline networkimport shlex import sys import re from subprocess import Popen, PIPE def extractMacAddress(ampStatLine): macAddr = None items = ampStatLine.split( " " ) for index in range(len(items)): if (items[index] == "MAC") and ((index+1) < len(items)): if re.match("[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", items[index+1].lower()): return items[index+1] return macAddr device = "eth0" if len(sys.argv) > 1: device = sys.argv[1] cmd = "ampstat -m -i {}".format(device) process = Popen(shlex.split(cmd), stdout=PIPE) (output, err) = process.communicate() exit_code = process.wait() allowedMacAddrMap = dict() outputArray = output.splitlines() for entries in outputArray: macAddr = extractMacAddress(entries) if macAddr is not None: # Check if mac address is allowed on the network print macAddr
<commit_before><commit_msg>Read ampstat -m -i <device> and print out the mac addresses on the powerline network<commit_after>import shlex import sys import re from subprocess import Popen, PIPE def extractMacAddress(ampStatLine): macAddr = None items = ampStatLine.split( " " ) for index in range(len(items)): if (items[index] == "MAC") and ((index+1) < len(items)): if re.match("[0-9a-f]{2}([-:])[0-9a-f]{2}(\\1[0-9a-f]{2}){4}$", items[index+1].lower()): return items[index+1] return macAddr device = "eth0" if len(sys.argv) > 1: device = sys.argv[1] cmd = "ampstat -m -i {}".format(device) process = Popen(shlex.split(cmd), stdout=PIPE) (output, err) = process.communicate() exit_code = process.wait() allowedMacAddrMap = dict() outputArray = output.splitlines() for entries in outputArray: macAddr = extractMacAddress(entries) if macAddr is not None: # Check if mac address is allowed on the network print macAddr
000ceb19945af56a0196654fa2f4e924aaa1f4ae
publications_bootstrap/migrations/0003_db_index.py
publications_bootstrap/migrations/0003_db_index.py
# -*- coding: utf-8 -*- # Generated by Django 1.10.7 on 2017-06-30 09:41 from __future__ import unicode_literals from django.db import migrations, models import echoices.fields import publications_bootstrap.fields import publications_bootstrap.models.publication class Migration(migrations.Migration): dependencies = [ ('publications_bootstrap', '0002_initial_data'), ] operations = [ migrations.AlterField( model_name='catalog', name='title', field=models.CharField(db_index=True, max_length=128, unique=True), ), migrations.AlterField( model_name='publication', name='citekey', field=publications_bootstrap.fields.NullCharField(blank=True, db_index=True, help_text='BibTex citation key. Leave blank if unsure.', max_length=512, null=True, unique=True), ), migrations.AlterField( model_name='publication', name='external', field=models.BooleanField(db_index=True, default=False, help_text='If publication was written in another lab, mark as external.'), ), migrations.AlterField( model_name='publication', name='month', field=echoices.fields.make_echoicefield(blank=True, db_index=True, echoices=publications_bootstrap.models.publication.EMonths, null=True), ), migrations.AlterField( model_name='publication', name='title', field=models.CharField(db_index=True, max_length=512), ), migrations.AlterField( model_name='publication', name='year', field=models.PositiveIntegerField(db_index=True), ), migrations.AlterField( model_name='type', name='hidden', field=models.BooleanField(db_index=True, default=False, help_text='Hide publications from main view.'), ), migrations.AlterField( model_name='type', name='title', field=models.CharField(db_index=True, max_length=128, unique=True), ), ]
Add new migrations with db indexes
Add new migrations with db indexes
Python
mit
mbourqui/django-publications-bootstrap,mbourqui/django-publications-bootstrap,mbourqui/django-publications-bootstrap
Add new migrations with db indexes
# -*- coding: utf-8 -*- # Generated by Django 1.10.7 on 2017-06-30 09:41 from __future__ import unicode_literals from django.db import migrations, models import echoices.fields import publications_bootstrap.fields import publications_bootstrap.models.publication class Migration(migrations.Migration): dependencies = [ ('publications_bootstrap', '0002_initial_data'), ] operations = [ migrations.AlterField( model_name='catalog', name='title', field=models.CharField(db_index=True, max_length=128, unique=True), ), migrations.AlterField( model_name='publication', name='citekey', field=publications_bootstrap.fields.NullCharField(blank=True, db_index=True, help_text='BibTex citation key. Leave blank if unsure.', max_length=512, null=True, unique=True), ), migrations.AlterField( model_name='publication', name='external', field=models.BooleanField(db_index=True, default=False, help_text='If publication was written in another lab, mark as external.'), ), migrations.AlterField( model_name='publication', name='month', field=echoices.fields.make_echoicefield(blank=True, db_index=True, echoices=publications_bootstrap.models.publication.EMonths, null=True), ), migrations.AlterField( model_name='publication', name='title', field=models.CharField(db_index=True, max_length=512), ), migrations.AlterField( model_name='publication', name='year', field=models.PositiveIntegerField(db_index=True), ), migrations.AlterField( model_name='type', name='hidden', field=models.BooleanField(db_index=True, default=False, help_text='Hide publications from main view.'), ), migrations.AlterField( model_name='type', name='title', field=models.CharField(db_index=True, max_length=128, unique=True), ), ]
<commit_before><commit_msg>Add new migrations with db indexes<commit_after>
# -*- coding: utf-8 -*- # Generated by Django 1.10.7 on 2017-06-30 09:41 from __future__ import unicode_literals from django.db import migrations, models import echoices.fields import publications_bootstrap.fields import publications_bootstrap.models.publication class Migration(migrations.Migration): dependencies = [ ('publications_bootstrap', '0002_initial_data'), ] operations = [ migrations.AlterField( model_name='catalog', name='title', field=models.CharField(db_index=True, max_length=128, unique=True), ), migrations.AlterField( model_name='publication', name='citekey', field=publications_bootstrap.fields.NullCharField(blank=True, db_index=True, help_text='BibTex citation key. Leave blank if unsure.', max_length=512, null=True, unique=True), ), migrations.AlterField( model_name='publication', name='external', field=models.BooleanField(db_index=True, default=False, help_text='If publication was written in another lab, mark as external.'), ), migrations.AlterField( model_name='publication', name='month', field=echoices.fields.make_echoicefield(blank=True, db_index=True, echoices=publications_bootstrap.models.publication.EMonths, null=True), ), migrations.AlterField( model_name='publication', name='title', field=models.CharField(db_index=True, max_length=512), ), migrations.AlterField( model_name='publication', name='year', field=models.PositiveIntegerField(db_index=True), ), migrations.AlterField( model_name='type', name='hidden', field=models.BooleanField(db_index=True, default=False, help_text='Hide publications from main view.'), ), migrations.AlterField( model_name='type', name='title', field=models.CharField(db_index=True, max_length=128, unique=True), ), ]
Add new migrations with db indexes# -*- coding: utf-8 -*- # Generated by Django 1.10.7 on 2017-06-30 09:41 from __future__ import unicode_literals from django.db import migrations, models import echoices.fields import publications_bootstrap.fields import publications_bootstrap.models.publication class Migration(migrations.Migration): dependencies = [ ('publications_bootstrap', '0002_initial_data'), ] operations = [ migrations.AlterField( model_name='catalog', name='title', field=models.CharField(db_index=True, max_length=128, unique=True), ), migrations.AlterField( model_name='publication', name='citekey', field=publications_bootstrap.fields.NullCharField(blank=True, db_index=True, help_text='BibTex citation key. Leave blank if unsure.', max_length=512, null=True, unique=True), ), migrations.AlterField( model_name='publication', name='external', field=models.BooleanField(db_index=True, default=False, help_text='If publication was written in another lab, mark as external.'), ), migrations.AlterField( model_name='publication', name='month', field=echoices.fields.make_echoicefield(blank=True, db_index=True, echoices=publications_bootstrap.models.publication.EMonths, null=True), ), migrations.AlterField( model_name='publication', name='title', field=models.CharField(db_index=True, max_length=512), ), migrations.AlterField( model_name='publication', name='year', field=models.PositiveIntegerField(db_index=True), ), migrations.AlterField( model_name='type', name='hidden', field=models.BooleanField(db_index=True, default=False, help_text='Hide publications from main view.'), ), migrations.AlterField( model_name='type', name='title', field=models.CharField(db_index=True, max_length=128, unique=True), ), ]
<commit_before><commit_msg>Add new migrations with db indexes<commit_after># -*- coding: utf-8 -*- # Generated by Django 1.10.7 on 2017-06-30 09:41 from __future__ import unicode_literals from django.db import migrations, models import echoices.fields import publications_bootstrap.fields import publications_bootstrap.models.publication class Migration(migrations.Migration): dependencies = [ ('publications_bootstrap', '0002_initial_data'), ] operations = [ migrations.AlterField( model_name='catalog', name='title', field=models.CharField(db_index=True, max_length=128, unique=True), ), migrations.AlterField( model_name='publication', name='citekey', field=publications_bootstrap.fields.NullCharField(blank=True, db_index=True, help_text='BibTex citation key. Leave blank if unsure.', max_length=512, null=True, unique=True), ), migrations.AlterField( model_name='publication', name='external', field=models.BooleanField(db_index=True, default=False, help_text='If publication was written in another lab, mark as external.'), ), migrations.AlterField( model_name='publication', name='month', field=echoices.fields.make_echoicefield(blank=True, db_index=True, echoices=publications_bootstrap.models.publication.EMonths, null=True), ), migrations.AlterField( model_name='publication', name='title', field=models.CharField(db_index=True, max_length=512), ), migrations.AlterField( model_name='publication', name='year', field=models.PositiveIntegerField(db_index=True), ), migrations.AlterField( model_name='type', name='hidden', field=models.BooleanField(db_index=True, default=False, help_text='Hide publications from main view.'), ), migrations.AlterField( model_name='type', name='title', field=models.CharField(db_index=True, max_length=128, unique=True), ), ]
10df3cd5e4c8517652efdb8381155253aa6a8157
osfclient/tests/test_cli.py
osfclient/tests/test_cli.py
from unittest.mock import call from unittest.mock import Mock from unittest.mock import patch from osfclient import cli @patch('osfclient.cli.os.path.exists', return_value=True) @patch('osfclient.cli.configparser.ConfigParser') def test_config_file(MockConfigParser, os_path_exists): MockConfigParser().__getitem__ = Mock(return_value={'project': '1234'}) config = cli.config_from_file() assert config == {'project': '1234'} assert call.read('.osfcli.config') in MockConfigParser().mock_calls assert call('osf') in MockConfigParser().__getitem__.mock_calls def test_config_from_env_replace_username(): def simple_getenv(key): if key == 'OSF_USERNAME': return 'theusername' with patch('osfclient.cli.os.getenv', side_effect=simple_getenv): config = cli.config_from_env({'username': 'notusername'}) assert config == {'username': 'theusername'} def test_config_from_env_username(): def simple_getenv(key): if key == 'OSF_USERNAME': return None with patch('osfclient.cli.os.getenv', side_effect=simple_getenv): config = cli.config_from_env({'username': 'theusername'}) assert config == {'username': 'theusername'} def test_config_from_env_replace_project(): def simple_getenv(key): if key == 'OSF_PROJECT': return 'theproject' with patch('osfclient.cli.os.getenv', side_effect=simple_getenv): config = cli.config_from_env({'project': 'notproject'}) assert config == {'project': 'theproject'} def test_config_from_env_project(): def simple_getenv(key): if key == 'OSF_PROJECT': return None with patch('osfclient.cli.os.getenv', side_effect=simple_getenv): config = cli.config_from_env({'project': 'theproject'}) assert config == {'project': 'theproject'}
Add test for file based configuration
Add test for file based configuration
Python
bsd-3-clause
betatim/osf-cli,betatim/osf-cli
Add test for file based configuration
from unittest.mock import call from unittest.mock import Mock from unittest.mock import patch from osfclient import cli @patch('osfclient.cli.os.path.exists', return_value=True) @patch('osfclient.cli.configparser.ConfigParser') def test_config_file(MockConfigParser, os_path_exists): MockConfigParser().__getitem__ = Mock(return_value={'project': '1234'}) config = cli.config_from_file() assert config == {'project': '1234'} assert call.read('.osfcli.config') in MockConfigParser().mock_calls assert call('osf') in MockConfigParser().__getitem__.mock_calls def test_config_from_env_replace_username(): def simple_getenv(key): if key == 'OSF_USERNAME': return 'theusername' with patch('osfclient.cli.os.getenv', side_effect=simple_getenv): config = cli.config_from_env({'username': 'notusername'}) assert config == {'username': 'theusername'} def test_config_from_env_username(): def simple_getenv(key): if key == 'OSF_USERNAME': return None with patch('osfclient.cli.os.getenv', side_effect=simple_getenv): config = cli.config_from_env({'username': 'theusername'}) assert config == {'username': 'theusername'} def test_config_from_env_replace_project(): def simple_getenv(key): if key == 'OSF_PROJECT': return 'theproject' with patch('osfclient.cli.os.getenv', side_effect=simple_getenv): config = cli.config_from_env({'project': 'notproject'}) assert config == {'project': 'theproject'} def test_config_from_env_project(): def simple_getenv(key): if key == 'OSF_PROJECT': return None with patch('osfclient.cli.os.getenv', side_effect=simple_getenv): config = cli.config_from_env({'project': 'theproject'}) assert config == {'project': 'theproject'}
<commit_before><commit_msg>Add test for file based configuration<commit_after>
from unittest.mock import call from unittest.mock import Mock from unittest.mock import patch from osfclient import cli @patch('osfclient.cli.os.path.exists', return_value=True) @patch('osfclient.cli.configparser.ConfigParser') def test_config_file(MockConfigParser, os_path_exists): MockConfigParser().__getitem__ = Mock(return_value={'project': '1234'}) config = cli.config_from_file() assert config == {'project': '1234'} assert call.read('.osfcli.config') in MockConfigParser().mock_calls assert call('osf') in MockConfigParser().__getitem__.mock_calls def test_config_from_env_replace_username(): def simple_getenv(key): if key == 'OSF_USERNAME': return 'theusername' with patch('osfclient.cli.os.getenv', side_effect=simple_getenv): config = cli.config_from_env({'username': 'notusername'}) assert config == {'username': 'theusername'} def test_config_from_env_username(): def simple_getenv(key): if key == 'OSF_USERNAME': return None with patch('osfclient.cli.os.getenv', side_effect=simple_getenv): config = cli.config_from_env({'username': 'theusername'}) assert config == {'username': 'theusername'} def test_config_from_env_replace_project(): def simple_getenv(key): if key == 'OSF_PROJECT': return 'theproject' with patch('osfclient.cli.os.getenv', side_effect=simple_getenv): config = cli.config_from_env({'project': 'notproject'}) assert config == {'project': 'theproject'} def test_config_from_env_project(): def simple_getenv(key): if key == 'OSF_PROJECT': return None with patch('osfclient.cli.os.getenv', side_effect=simple_getenv): config = cli.config_from_env({'project': 'theproject'}) assert config == {'project': 'theproject'}
Add test for file based configurationfrom unittest.mock import call from unittest.mock import Mock from unittest.mock import patch from osfclient import cli @patch('osfclient.cli.os.path.exists', return_value=True) @patch('osfclient.cli.configparser.ConfigParser') def test_config_file(MockConfigParser, os_path_exists): MockConfigParser().__getitem__ = Mock(return_value={'project': '1234'}) config = cli.config_from_file() assert config == {'project': '1234'} assert call.read('.osfcli.config') in MockConfigParser().mock_calls assert call('osf') in MockConfigParser().__getitem__.mock_calls def test_config_from_env_replace_username(): def simple_getenv(key): if key == 'OSF_USERNAME': return 'theusername' with patch('osfclient.cli.os.getenv', side_effect=simple_getenv): config = cli.config_from_env({'username': 'notusername'}) assert config == {'username': 'theusername'} def test_config_from_env_username(): def simple_getenv(key): if key == 'OSF_USERNAME': return None with patch('osfclient.cli.os.getenv', side_effect=simple_getenv): config = cli.config_from_env({'username': 'theusername'}) assert config == {'username': 'theusername'} def test_config_from_env_replace_project(): def simple_getenv(key): if key == 'OSF_PROJECT': return 'theproject' with patch('osfclient.cli.os.getenv', side_effect=simple_getenv): config = cli.config_from_env({'project': 'notproject'}) assert config == {'project': 'theproject'} def test_config_from_env_project(): def simple_getenv(key): if key == 'OSF_PROJECT': return None with patch('osfclient.cli.os.getenv', side_effect=simple_getenv): config = cli.config_from_env({'project': 'theproject'}) assert config == {'project': 'theproject'}
<commit_before><commit_msg>Add test for file based configuration<commit_after>from unittest.mock import call from unittest.mock import Mock from unittest.mock import patch from osfclient import cli @patch('osfclient.cli.os.path.exists', return_value=True) @patch('osfclient.cli.configparser.ConfigParser') def test_config_file(MockConfigParser, os_path_exists): MockConfigParser().__getitem__ = Mock(return_value={'project': '1234'}) config = cli.config_from_file() assert config == {'project': '1234'} assert call.read('.osfcli.config') in MockConfigParser().mock_calls assert call('osf') in MockConfigParser().__getitem__.mock_calls def test_config_from_env_replace_username(): def simple_getenv(key): if key == 'OSF_USERNAME': return 'theusername' with patch('osfclient.cli.os.getenv', side_effect=simple_getenv): config = cli.config_from_env({'username': 'notusername'}) assert config == {'username': 'theusername'} def test_config_from_env_username(): def simple_getenv(key): if key == 'OSF_USERNAME': return None with patch('osfclient.cli.os.getenv', side_effect=simple_getenv): config = cli.config_from_env({'username': 'theusername'}) assert config == {'username': 'theusername'} def test_config_from_env_replace_project(): def simple_getenv(key): if key == 'OSF_PROJECT': return 'theproject' with patch('osfclient.cli.os.getenv', side_effect=simple_getenv): config = cli.config_from_env({'project': 'notproject'}) assert config == {'project': 'theproject'} def test_config_from_env_project(): def simple_getenv(key): if key == 'OSF_PROJECT': return None with patch('osfclient.cli.os.getenv', side_effect=simple_getenv): config = cli.config_from_env({'project': 'theproject'}) assert config == {'project': 'theproject'}
0a8ca89881c08c78816c47e5b54aa7f78fdb8cc0
src/servers/urls.py
src/servers/urls.py
''' Servers URL Configuration ''' from django.conf.urls import url from . import views urlpatterns = [ url(r'^$', views.index, name="servers"), url(r'^(?P<slug>.+)/', views.server, name="server"), ]
Create URL routes to the two server app views
Create URL routes to the two server app views
Python
mit
Jonpro03/Minecrunch_Web,Jonpro03/Minecrunch_Web,Jonpro03/Minecrunch_Web
Create URL routes to the two server app views
''' Servers URL Configuration ''' from django.conf.urls import url from . import views urlpatterns = [ url(r'^$', views.index, name="servers"), url(r'^(?P<slug>.+)/', views.server, name="server"), ]
<commit_before><commit_msg>Create URL routes to the two server app views<commit_after>
''' Servers URL Configuration ''' from django.conf.urls import url from . import views urlpatterns = [ url(r'^$', views.index, name="servers"), url(r'^(?P<slug>.+)/', views.server, name="server"), ]
Create URL routes to the two server app views''' Servers URL Configuration ''' from django.conf.urls import url from . import views urlpatterns = [ url(r'^$', views.index, name="servers"), url(r'^(?P<slug>.+)/', views.server, name="server"), ]
<commit_before><commit_msg>Create URL routes to the two server app views<commit_after>''' Servers URL Configuration ''' from django.conf.urls import url from . import views urlpatterns = [ url(r'^$', views.index, name="servers"), url(r'^(?P<slug>.+)/', views.server, name="server"), ]
b96d62fb4797360e17bd7ffa2a22f9a89977605a
excerises/src/II/q6.py
excerises/src/II/q6.py
D = {'a': 0, 'b': 1, 'c': 2} # Attempting to access a key that does not exist # will result in a KeyError Exception try: D['d'] except KeyError as err: print('KeyError for', err) # Assign to a non-existing key (will create entry) D['d'] = 'spam' print(D) # For Lists cannot assign to a non-existing index L = ['a', 'b', 'c'] try: L[3] = 'd' except IndexError as err: print('IndexError ', err)
Add Q6 for Section II
Add Q6 for Section II
Python
bsd-3-clause
skellykiernan/pylearn,skellykiernan/pylearn
Add Q6 for Section II
D = {'a': 0, 'b': 1, 'c': 2} # Attempting to access a key that does not exist # will result in a KeyError Exception try: D['d'] except KeyError as err: print('KeyError for', err) # Assign to a non-existing key (will create entry) D['d'] = 'spam' print(D) # For Lists cannot assign to a non-existing index L = ['a', 'b', 'c'] try: L[3] = 'd' except IndexError as err: print('IndexError ', err)
<commit_before><commit_msg>Add Q6 for Section II<commit_after>
D = {'a': 0, 'b': 1, 'c': 2} # Attempting to access a key that does not exist # will result in a KeyError Exception try: D['d'] except KeyError as err: print('KeyError for', err) # Assign to a non-existing key (will create entry) D['d'] = 'spam' print(D) # For Lists cannot assign to a non-existing index L = ['a', 'b', 'c'] try: L[3] = 'd' except IndexError as err: print('IndexError ', err)
Add Q6 for Section IID = {'a': 0, 'b': 1, 'c': 2} # Attempting to access a key that does not exist # will result in a KeyError Exception try: D['d'] except KeyError as err: print('KeyError for', err) # Assign to a non-existing key (will create entry) D['d'] = 'spam' print(D) # For Lists cannot assign to a non-existing index L = ['a', 'b', 'c'] try: L[3] = 'd' except IndexError as err: print('IndexError ', err)
<commit_before><commit_msg>Add Q6 for Section II<commit_after>D = {'a': 0, 'b': 1, 'c': 2} # Attempting to access a key that does not exist # will result in a KeyError Exception try: D['d'] except KeyError as err: print('KeyError for', err) # Assign to a non-existing key (will create entry) D['d'] = 'spam' print(D) # For Lists cannot assign to a non-existing index L = ['a', 'b', 'c'] try: L[3] = 'd' except IndexError as err: print('IndexError ', err)
ac9d87bf486f8062d1c2d8122e2dc5660546a22f
menpofit/clm/expert/base.py
menpofit/clm/expert/base.py
import numpy as np from menpofit.math.correlationfilter import mccf, imccf # TODO: document me! class IncrementalCorrelationFilterThinWrapper(object): r""" """ def __init__(self, cf_callable=mccf, icf_callable=imccf): self.cf_callable = cf_callable self.icf_callable = icf_callable def increment(self, A, B, n_x, Z, t): r""" """ # Turn list of X into ndarray if isinstance(Z, list): Z = np.asarray(Z) return self.icf_callable(A, B, n_x, Z, t) def train(self, X, t): r""" """ # Turn list of X into ndarray if isinstance(X, list): X = np.asarray(X) # Return linear svm filter and bias return self.cf_callable(X, t)
Add dummy wrapper for correlation filters
Add dummy wrapper for correlation filters
Python
bsd-3-clause
grigorisg9gr/menpofit,yuxiang-zhou/menpofit,yuxiang-zhou/menpofit,grigorisg9gr/menpofit
Add dummy wrapper for correlation filters
import numpy as np from menpofit.math.correlationfilter import mccf, imccf # TODO: document me! class IncrementalCorrelationFilterThinWrapper(object): r""" """ def __init__(self, cf_callable=mccf, icf_callable=imccf): self.cf_callable = cf_callable self.icf_callable = icf_callable def increment(self, A, B, n_x, Z, t): r""" """ # Turn list of X into ndarray if isinstance(Z, list): Z = np.asarray(Z) return self.icf_callable(A, B, n_x, Z, t) def train(self, X, t): r""" """ # Turn list of X into ndarray if isinstance(X, list): X = np.asarray(X) # Return linear svm filter and bias return self.cf_callable(X, t)
<commit_before><commit_msg>Add dummy wrapper for correlation filters<commit_after>
import numpy as np from menpofit.math.correlationfilter import mccf, imccf # TODO: document me! class IncrementalCorrelationFilterThinWrapper(object): r""" """ def __init__(self, cf_callable=mccf, icf_callable=imccf): self.cf_callable = cf_callable self.icf_callable = icf_callable def increment(self, A, B, n_x, Z, t): r""" """ # Turn list of X into ndarray if isinstance(Z, list): Z = np.asarray(Z) return self.icf_callable(A, B, n_x, Z, t) def train(self, X, t): r""" """ # Turn list of X into ndarray if isinstance(X, list): X = np.asarray(X) # Return linear svm filter and bias return self.cf_callable(X, t)
Add dummy wrapper for correlation filtersimport numpy as np from menpofit.math.correlationfilter import mccf, imccf # TODO: document me! class IncrementalCorrelationFilterThinWrapper(object): r""" """ def __init__(self, cf_callable=mccf, icf_callable=imccf): self.cf_callable = cf_callable self.icf_callable = icf_callable def increment(self, A, B, n_x, Z, t): r""" """ # Turn list of X into ndarray if isinstance(Z, list): Z = np.asarray(Z) return self.icf_callable(A, B, n_x, Z, t) def train(self, X, t): r""" """ # Turn list of X into ndarray if isinstance(X, list): X = np.asarray(X) # Return linear svm filter and bias return self.cf_callable(X, t)
<commit_before><commit_msg>Add dummy wrapper for correlation filters<commit_after>import numpy as np from menpofit.math.correlationfilter import mccf, imccf # TODO: document me! class IncrementalCorrelationFilterThinWrapper(object): r""" """ def __init__(self, cf_callable=mccf, icf_callable=imccf): self.cf_callable = cf_callable self.icf_callable = icf_callable def increment(self, A, B, n_x, Z, t): r""" """ # Turn list of X into ndarray if isinstance(Z, list): Z = np.asarray(Z) return self.icf_callable(A, B, n_x, Z, t) def train(self, X, t): r""" """ # Turn list of X into ndarray if isinstance(X, list): X = np.asarray(X) # Return linear svm filter and bias return self.cf_callable(X, t)
eef6382ffbe5a9d69747fe98819af446c7aea661
temba/msgs/migrations/0065_external_id_partial_index.py
temba/msgs/migrations/0065_external_id_partial_index.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('msgs', '0064_auto_20160908_1340'), ] operations = [ migrations.RunSQL( "CREATE INDEX msgs_msg_external_id_where_nonnull ON msgs_msg(external_id) WHERE external_id IS NOT NULL", "DROP INDEX msgs_msg_external_id_where_nonnull" ) ]
Add partial index for non-null external ids
Add partial index for non-null external ids
Python
agpl-3.0
tsotetsi/textily-web,pulilab/rapidpro,tsotetsi/textily-web,tsotetsi/textily-web,pulilab/rapidpro,pulilab/rapidpro,tsotetsi/textily-web,pulilab/rapidpro,pulilab/rapidpro,tsotetsi/textily-web
Add partial index for non-null external ids
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('msgs', '0064_auto_20160908_1340'), ] operations = [ migrations.RunSQL( "CREATE INDEX msgs_msg_external_id_where_nonnull ON msgs_msg(external_id) WHERE external_id IS NOT NULL", "DROP INDEX msgs_msg_external_id_where_nonnull" ) ]
<commit_before><commit_msg>Add partial index for non-null external ids<commit_after>
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('msgs', '0064_auto_20160908_1340'), ] operations = [ migrations.RunSQL( "CREATE INDEX msgs_msg_external_id_where_nonnull ON msgs_msg(external_id) WHERE external_id IS NOT NULL", "DROP INDEX msgs_msg_external_id_where_nonnull" ) ]
Add partial index for non-null external ids# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('msgs', '0064_auto_20160908_1340'), ] operations = [ migrations.RunSQL( "CREATE INDEX msgs_msg_external_id_where_nonnull ON msgs_msg(external_id) WHERE external_id IS NOT NULL", "DROP INDEX msgs_msg_external_id_where_nonnull" ) ]
<commit_before><commit_msg>Add partial index for non-null external ids<commit_after># -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('msgs', '0064_auto_20160908_1340'), ] operations = [ migrations.RunSQL( "CREATE INDEX msgs_msg_external_id_where_nonnull ON msgs_msg(external_id) WHERE external_id IS NOT NULL", "DROP INDEX msgs_msg_external_id_where_nonnull" ) ]
b08e7fd64da5342508807420e5c9aa6c3686a68e
scripts/analytics/institutions.py
scripts/analytics/institutions.py
from modularodm import Q from website.app import init_app from website.models import User, Node, Institution def get_institutions(): institutions = Institution.find(Q('_id', 'ne', None)) return institutions def get_user_count_by_institutions(): institutions = get_institutions() user_counts = [] for institution in institutions: query = Q('_affiliated_institutions', 'eq', institution.node) user_counts.append({institution.name: User.find(query).count()}) return user_counts def get_node_count_by_institutions(): institutions = get_institutions() node_counts = [] for institution in institutions: query = ( Q('is_deleted', 'ne', True) & Q('is_folder', 'ne', True) & Q('parent_node', 'eq', None) ) node_counts.append({institution.name: Node.find_by_institutions(institution, query).count()}) return node_counts def main(): users_by_institutions = get_user_count_by_institutions() nodes_by_institutions = get_node_count_by_institutions() print(users_by_institutions) print(nodes_by_institutions) if __name__ == '__main__': init_app() main()
Add basic script without the ability to send the data anywhere
Add basic script without the ability to send the data anywhere
Python
apache-2.0
Johnetordoff/osf.io,hmoco/osf.io,laurenrevere/osf.io,mattclark/osf.io,baylee-d/osf.io,icereval/osf.io,laurenrevere/osf.io,sloria/osf.io,HalcyonChimera/osf.io,caseyrollins/osf.io,binoculars/osf.io,caseyrollins/osf.io,alexschiller/osf.io,acshi/osf.io,CenterForOpenScience/osf.io,cslzchen/osf.io,crcresearch/osf.io,felliott/osf.io,caneruguz/osf.io,chrisseto/osf.io,emetsger/osf.io,hmoco/osf.io,monikagrabowska/osf.io,emetsger/osf.io,mfraezz/osf.io,CenterForOpenScience/osf.io,chrisseto/osf.io,baylee-d/osf.io,brianjgeiger/osf.io,rdhyee/osf.io,TomBaxter/osf.io,brianjgeiger/osf.io,caseyrollins/osf.io,mluo613/osf.io,adlius/osf.io,Johnetordoff/osf.io,hmoco/osf.io,erinspace/osf.io,saradbowman/osf.io,rdhyee/osf.io,acshi/osf.io,cwisecarver/osf.io,sloria/osf.io,Nesiehr/osf.io,felliott/osf.io,Nesiehr/osf.io,cwisecarver/osf.io,rdhyee/osf.io,chennan47/osf.io,monikagrabowska/osf.io,HalcyonChimera/osf.io,Johnetordoff/osf.io,acshi/osf.io,chrisseto/osf.io,Nesiehr/osf.io,caneruguz/osf.io,crcresearch/osf.io,monikagrabowska/osf.io,adlius/osf.io,leb2dg/osf.io,chrisseto/osf.io,erinspace/osf.io,adlius/osf.io,mfraezz/osf.io,erinspace/osf.io,aaxelb/osf.io,hmoco/osf.io,emetsger/osf.io,chennan47/osf.io,Nesiehr/osf.io,mluo613/osf.io,pattisdr/osf.io,icereval/osf.io,alexschiller/osf.io,mfraezz/osf.io,chennan47/osf.io,TomBaxter/osf.io,mattclark/osf.io,binoculars/osf.io,brianjgeiger/osf.io,aaxelb/osf.io,saradbowman/osf.io,acshi/osf.io,caneruguz/osf.io,emetsger/osf.io,rdhyee/osf.io,pattisdr/osf.io,HalcyonChimera/osf.io,crcresearch/osf.io,mluo613/osf.io,mluo613/osf.io,monikagrabowska/osf.io,cslzchen/osf.io,adlius/osf.io,acshi/osf.io,TomBaxter/osf.io,aaxelb/osf.io,cwisecarver/osf.io,mfraezz/osf.io,alexschiller/osf.io,baylee-d/osf.io,icereval/osf.io,leb2dg/osf.io,alexschiller/osf.io,alexschiller/osf.io,laurenrevere/osf.io,pattisdr/osf.io,mattclark/osf.io,leb2dg/osf.io,caneruguz/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,cwisecarver/osf.io,mluo613/osf.io,HalcyonChimera/osf.io,felliott/osf.io,CenterForOpenScience/osf.io,aaxelb/osf.io,CenterForOpenScience/osf.io,binoculars/osf.io,felliott/osf.io,sloria/osf.io,cslzchen/osf.io,leb2dg/osf.io,Johnetordoff/osf.io,monikagrabowska/osf.io
Add basic script without the ability to send the data anywhere
from modularodm import Q from website.app import init_app from website.models import User, Node, Institution def get_institutions(): institutions = Institution.find(Q('_id', 'ne', None)) return institutions def get_user_count_by_institutions(): institutions = get_institutions() user_counts = [] for institution in institutions: query = Q('_affiliated_institutions', 'eq', institution.node) user_counts.append({institution.name: User.find(query).count()}) return user_counts def get_node_count_by_institutions(): institutions = get_institutions() node_counts = [] for institution in institutions: query = ( Q('is_deleted', 'ne', True) & Q('is_folder', 'ne', True) & Q('parent_node', 'eq', None) ) node_counts.append({institution.name: Node.find_by_institutions(institution, query).count()}) return node_counts def main(): users_by_institutions = get_user_count_by_institutions() nodes_by_institutions = get_node_count_by_institutions() print(users_by_institutions) print(nodes_by_institutions) if __name__ == '__main__': init_app() main()
<commit_before><commit_msg>Add basic script without the ability to send the data anywhere<commit_after>
from modularodm import Q from website.app import init_app from website.models import User, Node, Institution def get_institutions(): institutions = Institution.find(Q('_id', 'ne', None)) return institutions def get_user_count_by_institutions(): institutions = get_institutions() user_counts = [] for institution in institutions: query = Q('_affiliated_institutions', 'eq', institution.node) user_counts.append({institution.name: User.find(query).count()}) return user_counts def get_node_count_by_institutions(): institutions = get_institutions() node_counts = [] for institution in institutions: query = ( Q('is_deleted', 'ne', True) & Q('is_folder', 'ne', True) & Q('parent_node', 'eq', None) ) node_counts.append({institution.name: Node.find_by_institutions(institution, query).count()}) return node_counts def main(): users_by_institutions = get_user_count_by_institutions() nodes_by_institutions = get_node_count_by_institutions() print(users_by_institutions) print(nodes_by_institutions) if __name__ == '__main__': init_app() main()
Add basic script without the ability to send the data anywherefrom modularodm import Q from website.app import init_app from website.models import User, Node, Institution def get_institutions(): institutions = Institution.find(Q('_id', 'ne', None)) return institutions def get_user_count_by_institutions(): institutions = get_institutions() user_counts = [] for institution in institutions: query = Q('_affiliated_institutions', 'eq', institution.node) user_counts.append({institution.name: User.find(query).count()}) return user_counts def get_node_count_by_institutions(): institutions = get_institutions() node_counts = [] for institution in institutions: query = ( Q('is_deleted', 'ne', True) & Q('is_folder', 'ne', True) & Q('parent_node', 'eq', None) ) node_counts.append({institution.name: Node.find_by_institutions(institution, query).count()}) return node_counts def main(): users_by_institutions = get_user_count_by_institutions() nodes_by_institutions = get_node_count_by_institutions() print(users_by_institutions) print(nodes_by_institutions) if __name__ == '__main__': init_app() main()
<commit_before><commit_msg>Add basic script without the ability to send the data anywhere<commit_after>from modularodm import Q from website.app import init_app from website.models import User, Node, Institution def get_institutions(): institutions = Institution.find(Q('_id', 'ne', None)) return institutions def get_user_count_by_institutions(): institutions = get_institutions() user_counts = [] for institution in institutions: query = Q('_affiliated_institutions', 'eq', institution.node) user_counts.append({institution.name: User.find(query).count()}) return user_counts def get_node_count_by_institutions(): institutions = get_institutions() node_counts = [] for institution in institutions: query = ( Q('is_deleted', 'ne', True) & Q('is_folder', 'ne', True) & Q('parent_node', 'eq', None) ) node_counts.append({institution.name: Node.find_by_institutions(institution, query).count()}) return node_counts def main(): users_by_institutions = get_user_count_by_institutions() nodes_by_institutions = get_node_count_by_institutions() print(users_by_institutions) print(nodes_by_institutions) if __name__ == '__main__': init_app() main()
f79465a56f45aaa74593bf0176015dabd5845b25
src/lovelace/management/commands/send_mass_email.py
src/lovelace/management/commands/send_mass_email.py
import sys import json import requests from bs4 import BeautifulSoup from django.core.management.base import BaseCommand, CommandError from users.models import UserProfile from lovelace.settings import MAILGUN_API_URL, MAILGUN_API_KEY, LOVELACE_FROM_EMAIL class Command(BaseCommand): help = "Send a mass email to all Project Lovelace users." def add_arguments(self, parser): parser.add_argument('-S', '--subject', type=str, help="Subject of the email.") parser.add_argument('-M', '--message', type=str, help="File containing a message to email out.") def handle(self, *args, **kwargs): subject = kwargs['subject'] message_filepath = kwargs['message'] with open(message_filepath, 'r') as f: message_html = f.read() soup = BeautifulSoup(message_html, "html.parser") message_text = soup.get_text() users_to_email = UserProfile.objects.filter(subscribe_to_emails=True) to_emails = [] recipient_vars = {} for user in users_to_email: to_emails.append(user.user.email) recipient_vars[user.user.email] = {'username': user.user.username} self.stdout.write(self.style.WARNING("MAILGUN_API_KEY={:s}".format(MAILGUN_API_KEY))) self.stdout.write(self.style.WARNING("FROM: {:s}\n".format(LOVELACE_FROM_EMAIL))) self.stdout.write(self.style.WARNING("Subject: {:s}".format(subject))) self.stdout.write(self.style.WARNING("Email HTML content ({:s}):".format(message_filepath))) self.stdout.write("{:s}".format(message_html)) self.stdout.write(self.style.WARNING("\nEmail text content ({:s}):".format(message_filepath))) self.stdout.write("{:s}".format(message_text)) self.stdout.write(self.style.WARNING("\nThis email will be sent to {:d} recipients.".format(len(to_emails)))) send_for_sure = input(self.style.WARNING("Are you sure you want to send this mass email? [Y/n] ")) if send_for_sure != "Y": self.stdout.write(self.style.NOTICE("No email was sent. Aborting script.")) sys.exit(0) self.stdout.write(self.style.SUCCESS("\nSending Mailgun request...")) response = requests.post(MAILGUN_API_URL, auth=('api', MAILGUN_API_KEY), data={'from': LOVELACE_FROM_EMAIL, 'to': to_emails, 'subject': subject, 'html': message_html, 'text': message_text, 'recipient-variables': json.dumps(recipient_vars)}) self.stdout.write(self.style.WARNING("Mailgun response:")) self.stdout.write("{:}".format(response)) self.stdout.write("{:}".format(response.content)) self.stdout.write(self.style.SUCCESS("\nEmails sent!"))
Send mass email Django command
Send mass email Django command
Python
mit
project-lovelace/lovelace-website,project-lovelace/lovelace-website,project-lovelace/lovelace-website
Send mass email Django command
import sys import json import requests from bs4 import BeautifulSoup from django.core.management.base import BaseCommand, CommandError from users.models import UserProfile from lovelace.settings import MAILGUN_API_URL, MAILGUN_API_KEY, LOVELACE_FROM_EMAIL class Command(BaseCommand): help = "Send a mass email to all Project Lovelace users." def add_arguments(self, parser): parser.add_argument('-S', '--subject', type=str, help="Subject of the email.") parser.add_argument('-M', '--message', type=str, help="File containing a message to email out.") def handle(self, *args, **kwargs): subject = kwargs['subject'] message_filepath = kwargs['message'] with open(message_filepath, 'r') as f: message_html = f.read() soup = BeautifulSoup(message_html, "html.parser") message_text = soup.get_text() users_to_email = UserProfile.objects.filter(subscribe_to_emails=True) to_emails = [] recipient_vars = {} for user in users_to_email: to_emails.append(user.user.email) recipient_vars[user.user.email] = {'username': user.user.username} self.stdout.write(self.style.WARNING("MAILGUN_API_KEY={:s}".format(MAILGUN_API_KEY))) self.stdout.write(self.style.WARNING("FROM: {:s}\n".format(LOVELACE_FROM_EMAIL))) self.stdout.write(self.style.WARNING("Subject: {:s}".format(subject))) self.stdout.write(self.style.WARNING("Email HTML content ({:s}):".format(message_filepath))) self.stdout.write("{:s}".format(message_html)) self.stdout.write(self.style.WARNING("\nEmail text content ({:s}):".format(message_filepath))) self.stdout.write("{:s}".format(message_text)) self.stdout.write(self.style.WARNING("\nThis email will be sent to {:d} recipients.".format(len(to_emails)))) send_for_sure = input(self.style.WARNING("Are you sure you want to send this mass email? [Y/n] ")) if send_for_sure != "Y": self.stdout.write(self.style.NOTICE("No email was sent. Aborting script.")) sys.exit(0) self.stdout.write(self.style.SUCCESS("\nSending Mailgun request...")) response = requests.post(MAILGUN_API_URL, auth=('api', MAILGUN_API_KEY), data={'from': LOVELACE_FROM_EMAIL, 'to': to_emails, 'subject': subject, 'html': message_html, 'text': message_text, 'recipient-variables': json.dumps(recipient_vars)}) self.stdout.write(self.style.WARNING("Mailgun response:")) self.stdout.write("{:}".format(response)) self.stdout.write("{:}".format(response.content)) self.stdout.write(self.style.SUCCESS("\nEmails sent!"))
<commit_before><commit_msg>Send mass email Django command<commit_after>
import sys import json import requests from bs4 import BeautifulSoup from django.core.management.base import BaseCommand, CommandError from users.models import UserProfile from lovelace.settings import MAILGUN_API_URL, MAILGUN_API_KEY, LOVELACE_FROM_EMAIL class Command(BaseCommand): help = "Send a mass email to all Project Lovelace users." def add_arguments(self, parser): parser.add_argument('-S', '--subject', type=str, help="Subject of the email.") parser.add_argument('-M', '--message', type=str, help="File containing a message to email out.") def handle(self, *args, **kwargs): subject = kwargs['subject'] message_filepath = kwargs['message'] with open(message_filepath, 'r') as f: message_html = f.read() soup = BeautifulSoup(message_html, "html.parser") message_text = soup.get_text() users_to_email = UserProfile.objects.filter(subscribe_to_emails=True) to_emails = [] recipient_vars = {} for user in users_to_email: to_emails.append(user.user.email) recipient_vars[user.user.email] = {'username': user.user.username} self.stdout.write(self.style.WARNING("MAILGUN_API_KEY={:s}".format(MAILGUN_API_KEY))) self.stdout.write(self.style.WARNING("FROM: {:s}\n".format(LOVELACE_FROM_EMAIL))) self.stdout.write(self.style.WARNING("Subject: {:s}".format(subject))) self.stdout.write(self.style.WARNING("Email HTML content ({:s}):".format(message_filepath))) self.stdout.write("{:s}".format(message_html)) self.stdout.write(self.style.WARNING("\nEmail text content ({:s}):".format(message_filepath))) self.stdout.write("{:s}".format(message_text)) self.stdout.write(self.style.WARNING("\nThis email will be sent to {:d} recipients.".format(len(to_emails)))) send_for_sure = input(self.style.WARNING("Are you sure you want to send this mass email? [Y/n] ")) if send_for_sure != "Y": self.stdout.write(self.style.NOTICE("No email was sent. Aborting script.")) sys.exit(0) self.stdout.write(self.style.SUCCESS("\nSending Mailgun request...")) response = requests.post(MAILGUN_API_URL, auth=('api', MAILGUN_API_KEY), data={'from': LOVELACE_FROM_EMAIL, 'to': to_emails, 'subject': subject, 'html': message_html, 'text': message_text, 'recipient-variables': json.dumps(recipient_vars)}) self.stdout.write(self.style.WARNING("Mailgun response:")) self.stdout.write("{:}".format(response)) self.stdout.write("{:}".format(response.content)) self.stdout.write(self.style.SUCCESS("\nEmails sent!"))
Send mass email Django commandimport sys import json import requests from bs4 import BeautifulSoup from django.core.management.base import BaseCommand, CommandError from users.models import UserProfile from lovelace.settings import MAILGUN_API_URL, MAILGUN_API_KEY, LOVELACE_FROM_EMAIL class Command(BaseCommand): help = "Send a mass email to all Project Lovelace users." def add_arguments(self, parser): parser.add_argument('-S', '--subject', type=str, help="Subject of the email.") parser.add_argument('-M', '--message', type=str, help="File containing a message to email out.") def handle(self, *args, **kwargs): subject = kwargs['subject'] message_filepath = kwargs['message'] with open(message_filepath, 'r') as f: message_html = f.read() soup = BeautifulSoup(message_html, "html.parser") message_text = soup.get_text() users_to_email = UserProfile.objects.filter(subscribe_to_emails=True) to_emails = [] recipient_vars = {} for user in users_to_email: to_emails.append(user.user.email) recipient_vars[user.user.email] = {'username': user.user.username} self.stdout.write(self.style.WARNING("MAILGUN_API_KEY={:s}".format(MAILGUN_API_KEY))) self.stdout.write(self.style.WARNING("FROM: {:s}\n".format(LOVELACE_FROM_EMAIL))) self.stdout.write(self.style.WARNING("Subject: {:s}".format(subject))) self.stdout.write(self.style.WARNING("Email HTML content ({:s}):".format(message_filepath))) self.stdout.write("{:s}".format(message_html)) self.stdout.write(self.style.WARNING("\nEmail text content ({:s}):".format(message_filepath))) self.stdout.write("{:s}".format(message_text)) self.stdout.write(self.style.WARNING("\nThis email will be sent to {:d} recipients.".format(len(to_emails)))) send_for_sure = input(self.style.WARNING("Are you sure you want to send this mass email? [Y/n] ")) if send_for_sure != "Y": self.stdout.write(self.style.NOTICE("No email was sent. Aborting script.")) sys.exit(0) self.stdout.write(self.style.SUCCESS("\nSending Mailgun request...")) response = requests.post(MAILGUN_API_URL, auth=('api', MAILGUN_API_KEY), data={'from': LOVELACE_FROM_EMAIL, 'to': to_emails, 'subject': subject, 'html': message_html, 'text': message_text, 'recipient-variables': json.dumps(recipient_vars)}) self.stdout.write(self.style.WARNING("Mailgun response:")) self.stdout.write("{:}".format(response)) self.stdout.write("{:}".format(response.content)) self.stdout.write(self.style.SUCCESS("\nEmails sent!"))
<commit_before><commit_msg>Send mass email Django command<commit_after>import sys import json import requests from bs4 import BeautifulSoup from django.core.management.base import BaseCommand, CommandError from users.models import UserProfile from lovelace.settings import MAILGUN_API_URL, MAILGUN_API_KEY, LOVELACE_FROM_EMAIL class Command(BaseCommand): help = "Send a mass email to all Project Lovelace users." def add_arguments(self, parser): parser.add_argument('-S', '--subject', type=str, help="Subject of the email.") parser.add_argument('-M', '--message', type=str, help="File containing a message to email out.") def handle(self, *args, **kwargs): subject = kwargs['subject'] message_filepath = kwargs['message'] with open(message_filepath, 'r') as f: message_html = f.read() soup = BeautifulSoup(message_html, "html.parser") message_text = soup.get_text() users_to_email = UserProfile.objects.filter(subscribe_to_emails=True) to_emails = [] recipient_vars = {} for user in users_to_email: to_emails.append(user.user.email) recipient_vars[user.user.email] = {'username': user.user.username} self.stdout.write(self.style.WARNING("MAILGUN_API_KEY={:s}".format(MAILGUN_API_KEY))) self.stdout.write(self.style.WARNING("FROM: {:s}\n".format(LOVELACE_FROM_EMAIL))) self.stdout.write(self.style.WARNING("Subject: {:s}".format(subject))) self.stdout.write(self.style.WARNING("Email HTML content ({:s}):".format(message_filepath))) self.stdout.write("{:s}".format(message_html)) self.stdout.write(self.style.WARNING("\nEmail text content ({:s}):".format(message_filepath))) self.stdout.write("{:s}".format(message_text)) self.stdout.write(self.style.WARNING("\nThis email will be sent to {:d} recipients.".format(len(to_emails)))) send_for_sure = input(self.style.WARNING("Are you sure you want to send this mass email? [Y/n] ")) if send_for_sure != "Y": self.stdout.write(self.style.NOTICE("No email was sent. Aborting script.")) sys.exit(0) self.stdout.write(self.style.SUCCESS("\nSending Mailgun request...")) response = requests.post(MAILGUN_API_URL, auth=('api', MAILGUN_API_KEY), data={'from': LOVELACE_FROM_EMAIL, 'to': to_emails, 'subject': subject, 'html': message_html, 'text': message_text, 'recipient-variables': json.dumps(recipient_vars)}) self.stdout.write(self.style.WARNING("Mailgun response:")) self.stdout.write("{:}".format(response)) self.stdout.write("{:}".format(response.content)) self.stdout.write(self.style.SUCCESS("\nEmails sent!"))
cb991585c786d1c899da85a2abdf58cc9725d44a
prototypes/converters/autovasp_converter.py
prototypes/converters/autovasp_converter.py
import os import json import sys from tqdm import tqdm from parsers.utils import find_files from parsers.ase_parser import parse_ase from validator import Validator # This is the converter for # Arguments: # input_path (string): The file or directory where the data resides. This should not be hard-coded in the function, for portability. # metadata (string or dict): The path to the JSON dataset metadata file or a dict containing the dataset metadata. # verbose (bool): Should the script print status messages to standard output? Default False. def convert(input_path, metadata, verbose=False): if verbose: print("Begin converting") # Collect the metadata if type(metadata) is str: try: with open(metadata, 'r') as metadata_file: dataset_metadata = json.load(metadata_file) except Exception as e: sys.exit("Error: Unable to read metadata: " + repr(e)) elif type(metadata) is dict: dataset_metadata = metadata else: sys.exit("Error: Invalid metadata parameter") # Make a Validator to help write the feedstock # You must pass the metadata to the constructor # Each Validator instance can only be used for a single dataset dataset_validator = Validator(dataset_metadata) # Get the data # Each record also needs its own metadata for data_file in tqdm(find_files(input_path, "^OUTCAR$"), desc="Processing files", disable= not verbose): data = parse_ase(os.path.join(data_file["path"], data_file["filename"]), "vasp") uri = "https://data.materialsdatafacility.org/collections/" + data_file["no_root_path"] + "/" + data_file["filename"] record_metadata = { "globus_subject": uri, "acl": ["public"], # "mdf-publish.publication.collection": , # "mdf_data_class": , "mdf-base.material_composition": data["frames"][0]["chemical_formula"], # "cite_as": , # "license": , "dc.title": dataset_metadata["dc.title"] + " - " + data["frames"][0]["chemical_formula"], # "dc.creator": , "dc.identifier": uri, # "dc.contributor.author": , # "dc.subject": , # "dc.description": , # "dc.relatedidentifier": , # "dc.year": , "data": { # "raw": , "files": {"outcar": uri} } } # Pass each individual record to the Validator result = dataset_validator.write_record(record_metadata) # Check if the Validator accepted the record, and print a message if it didn't # If the Validator returns "success" == True, the record was written successfully if result["success"] is not True: print("Error:", result["message"], ":", result.get("invalid_metadata", "")) if verbose: print("Finished converting")
Complete converter for VASP prototype
Complete converter for VASP prototype Prototyping autoconverter for VASP files. Does not yet handle URIs outside of data.materialsdatafacility.org.
Python
apache-2.0
materials-data-facility/forge
Complete converter for VASP prototype Prototyping autoconverter for VASP files. Does not yet handle URIs outside of data.materialsdatafacility.org.
import os import json import sys from tqdm import tqdm from parsers.utils import find_files from parsers.ase_parser import parse_ase from validator import Validator # This is the converter for # Arguments: # input_path (string): The file or directory where the data resides. This should not be hard-coded in the function, for portability. # metadata (string or dict): The path to the JSON dataset metadata file or a dict containing the dataset metadata. # verbose (bool): Should the script print status messages to standard output? Default False. def convert(input_path, metadata, verbose=False): if verbose: print("Begin converting") # Collect the metadata if type(metadata) is str: try: with open(metadata, 'r') as metadata_file: dataset_metadata = json.load(metadata_file) except Exception as e: sys.exit("Error: Unable to read metadata: " + repr(e)) elif type(metadata) is dict: dataset_metadata = metadata else: sys.exit("Error: Invalid metadata parameter") # Make a Validator to help write the feedstock # You must pass the metadata to the constructor # Each Validator instance can only be used for a single dataset dataset_validator = Validator(dataset_metadata) # Get the data # Each record also needs its own metadata for data_file in tqdm(find_files(input_path, "^OUTCAR$"), desc="Processing files", disable= not verbose): data = parse_ase(os.path.join(data_file["path"], data_file["filename"]), "vasp") uri = "https://data.materialsdatafacility.org/collections/" + data_file["no_root_path"] + "/" + data_file["filename"] record_metadata = { "globus_subject": uri, "acl": ["public"], # "mdf-publish.publication.collection": , # "mdf_data_class": , "mdf-base.material_composition": data["frames"][0]["chemical_formula"], # "cite_as": , # "license": , "dc.title": dataset_metadata["dc.title"] + " - " + data["frames"][0]["chemical_formula"], # "dc.creator": , "dc.identifier": uri, # "dc.contributor.author": , # "dc.subject": , # "dc.description": , # "dc.relatedidentifier": , # "dc.year": , "data": { # "raw": , "files": {"outcar": uri} } } # Pass each individual record to the Validator result = dataset_validator.write_record(record_metadata) # Check if the Validator accepted the record, and print a message if it didn't # If the Validator returns "success" == True, the record was written successfully if result["success"] is not True: print("Error:", result["message"], ":", result.get("invalid_metadata", "")) if verbose: print("Finished converting")
<commit_before><commit_msg>Complete converter for VASP prototype Prototyping autoconverter for VASP files. Does not yet handle URIs outside of data.materialsdatafacility.org.<commit_after>
import os import json import sys from tqdm import tqdm from parsers.utils import find_files from parsers.ase_parser import parse_ase from validator import Validator # This is the converter for # Arguments: # input_path (string): The file or directory where the data resides. This should not be hard-coded in the function, for portability. # metadata (string or dict): The path to the JSON dataset metadata file or a dict containing the dataset metadata. # verbose (bool): Should the script print status messages to standard output? Default False. def convert(input_path, metadata, verbose=False): if verbose: print("Begin converting") # Collect the metadata if type(metadata) is str: try: with open(metadata, 'r') as metadata_file: dataset_metadata = json.load(metadata_file) except Exception as e: sys.exit("Error: Unable to read metadata: " + repr(e)) elif type(metadata) is dict: dataset_metadata = metadata else: sys.exit("Error: Invalid metadata parameter") # Make a Validator to help write the feedstock # You must pass the metadata to the constructor # Each Validator instance can only be used for a single dataset dataset_validator = Validator(dataset_metadata) # Get the data # Each record also needs its own metadata for data_file in tqdm(find_files(input_path, "^OUTCAR$"), desc="Processing files", disable= not verbose): data = parse_ase(os.path.join(data_file["path"], data_file["filename"]), "vasp") uri = "https://data.materialsdatafacility.org/collections/" + data_file["no_root_path"] + "/" + data_file["filename"] record_metadata = { "globus_subject": uri, "acl": ["public"], # "mdf-publish.publication.collection": , # "mdf_data_class": , "mdf-base.material_composition": data["frames"][0]["chemical_formula"], # "cite_as": , # "license": , "dc.title": dataset_metadata["dc.title"] + " - " + data["frames"][0]["chemical_formula"], # "dc.creator": , "dc.identifier": uri, # "dc.contributor.author": , # "dc.subject": , # "dc.description": , # "dc.relatedidentifier": , # "dc.year": , "data": { # "raw": , "files": {"outcar": uri} } } # Pass each individual record to the Validator result = dataset_validator.write_record(record_metadata) # Check if the Validator accepted the record, and print a message if it didn't # If the Validator returns "success" == True, the record was written successfully if result["success"] is not True: print("Error:", result["message"], ":", result.get("invalid_metadata", "")) if verbose: print("Finished converting")
Complete converter for VASP prototype Prototyping autoconverter for VASP files. Does not yet handle URIs outside of data.materialsdatafacility.org.import os import json import sys from tqdm import tqdm from parsers.utils import find_files from parsers.ase_parser import parse_ase from validator import Validator # This is the converter for # Arguments: # input_path (string): The file or directory where the data resides. This should not be hard-coded in the function, for portability. # metadata (string or dict): The path to the JSON dataset metadata file or a dict containing the dataset metadata. # verbose (bool): Should the script print status messages to standard output? Default False. def convert(input_path, metadata, verbose=False): if verbose: print("Begin converting") # Collect the metadata if type(metadata) is str: try: with open(metadata, 'r') as metadata_file: dataset_metadata = json.load(metadata_file) except Exception as e: sys.exit("Error: Unable to read metadata: " + repr(e)) elif type(metadata) is dict: dataset_metadata = metadata else: sys.exit("Error: Invalid metadata parameter") # Make a Validator to help write the feedstock # You must pass the metadata to the constructor # Each Validator instance can only be used for a single dataset dataset_validator = Validator(dataset_metadata) # Get the data # Each record also needs its own metadata for data_file in tqdm(find_files(input_path, "^OUTCAR$"), desc="Processing files", disable= not verbose): data = parse_ase(os.path.join(data_file["path"], data_file["filename"]), "vasp") uri = "https://data.materialsdatafacility.org/collections/" + data_file["no_root_path"] + "/" + data_file["filename"] record_metadata = { "globus_subject": uri, "acl": ["public"], # "mdf-publish.publication.collection": , # "mdf_data_class": , "mdf-base.material_composition": data["frames"][0]["chemical_formula"], # "cite_as": , # "license": , "dc.title": dataset_metadata["dc.title"] + " - " + data["frames"][0]["chemical_formula"], # "dc.creator": , "dc.identifier": uri, # "dc.contributor.author": , # "dc.subject": , # "dc.description": , # "dc.relatedidentifier": , # "dc.year": , "data": { # "raw": , "files": {"outcar": uri} } } # Pass each individual record to the Validator result = dataset_validator.write_record(record_metadata) # Check if the Validator accepted the record, and print a message if it didn't # If the Validator returns "success" == True, the record was written successfully if result["success"] is not True: print("Error:", result["message"], ":", result.get("invalid_metadata", "")) if verbose: print("Finished converting")
<commit_before><commit_msg>Complete converter for VASP prototype Prototyping autoconverter for VASP files. Does not yet handle URIs outside of data.materialsdatafacility.org.<commit_after>import os import json import sys from tqdm import tqdm from parsers.utils import find_files from parsers.ase_parser import parse_ase from validator import Validator # This is the converter for # Arguments: # input_path (string): The file or directory where the data resides. This should not be hard-coded in the function, for portability. # metadata (string or dict): The path to the JSON dataset metadata file or a dict containing the dataset metadata. # verbose (bool): Should the script print status messages to standard output? Default False. def convert(input_path, metadata, verbose=False): if verbose: print("Begin converting") # Collect the metadata if type(metadata) is str: try: with open(metadata, 'r') as metadata_file: dataset_metadata = json.load(metadata_file) except Exception as e: sys.exit("Error: Unable to read metadata: " + repr(e)) elif type(metadata) is dict: dataset_metadata = metadata else: sys.exit("Error: Invalid metadata parameter") # Make a Validator to help write the feedstock # You must pass the metadata to the constructor # Each Validator instance can only be used for a single dataset dataset_validator = Validator(dataset_metadata) # Get the data # Each record also needs its own metadata for data_file in tqdm(find_files(input_path, "^OUTCAR$"), desc="Processing files", disable= not verbose): data = parse_ase(os.path.join(data_file["path"], data_file["filename"]), "vasp") uri = "https://data.materialsdatafacility.org/collections/" + data_file["no_root_path"] + "/" + data_file["filename"] record_metadata = { "globus_subject": uri, "acl": ["public"], # "mdf-publish.publication.collection": , # "mdf_data_class": , "mdf-base.material_composition": data["frames"][0]["chemical_formula"], # "cite_as": , # "license": , "dc.title": dataset_metadata["dc.title"] + " - " + data["frames"][0]["chemical_formula"], # "dc.creator": , "dc.identifier": uri, # "dc.contributor.author": , # "dc.subject": , # "dc.description": , # "dc.relatedidentifier": , # "dc.year": , "data": { # "raw": , "files": {"outcar": uri} } } # Pass each individual record to the Validator result = dataset_validator.write_record(record_metadata) # Check if the Validator accepted the record, and print a message if it didn't # If the Validator returns "success" == True, the record was written successfully if result["success"] is not True: print("Error:", result["message"], ":", result.get("invalid_metadata", "")) if verbose: print("Finished converting")
ccf986954bfc4a5e56ed608aefbb416bf7acab3c
tests/test_sso/test_bitbucket_id_creation.py
tests/test_sso/test_bitbucket_id_creation.py
from urllib.parse import urlparse from django.urls import reverse from tests.utils import BaseViewTest class BitbucketIdentityTest(BaseViewTest): def test_wrong_provider_raises_404(self): auth_path = reverse('sso:create_identity', kwargs={'provider': 'undefined'}) resp = self.client.get(auth_path) assert resp.status_code == 404 def test_get_redirects_to_login(self): auth_path = reverse('sso:create_identity', kwargs={'provider': 'bitbucket'}) resp = self.client.get(auth_path) redirect = urlparse(resp['Location']) assert resp.status_code == 302 assert redirect.path == reverse('users:login') def test_flow(self): auth_path = reverse('sso:create_identity', kwargs={'provider': 'bitbucket'}) resp = self.client.post(auth_path) assert resp.status_code == 302 redirect = urlparse(resp['Location']) assert redirect.scheme == 'https' assert redirect.netloc == 'bitbucket.org' assert redirect.path == '/site/oauth2/authorize'
Add bitbucket identity creation tests
Add bitbucket identity creation tests
Python
apache-2.0
polyaxon/polyaxon,polyaxon/polyaxon,polyaxon/polyaxon
Add bitbucket identity creation tests
from urllib.parse import urlparse from django.urls import reverse from tests.utils import BaseViewTest class BitbucketIdentityTest(BaseViewTest): def test_wrong_provider_raises_404(self): auth_path = reverse('sso:create_identity', kwargs={'provider': 'undefined'}) resp = self.client.get(auth_path) assert resp.status_code == 404 def test_get_redirects_to_login(self): auth_path = reverse('sso:create_identity', kwargs={'provider': 'bitbucket'}) resp = self.client.get(auth_path) redirect = urlparse(resp['Location']) assert resp.status_code == 302 assert redirect.path == reverse('users:login') def test_flow(self): auth_path = reverse('sso:create_identity', kwargs={'provider': 'bitbucket'}) resp = self.client.post(auth_path) assert resp.status_code == 302 redirect = urlparse(resp['Location']) assert redirect.scheme == 'https' assert redirect.netloc == 'bitbucket.org' assert redirect.path == '/site/oauth2/authorize'
<commit_before><commit_msg>Add bitbucket identity creation tests<commit_after>
from urllib.parse import urlparse from django.urls import reverse from tests.utils import BaseViewTest class BitbucketIdentityTest(BaseViewTest): def test_wrong_provider_raises_404(self): auth_path = reverse('sso:create_identity', kwargs={'provider': 'undefined'}) resp = self.client.get(auth_path) assert resp.status_code == 404 def test_get_redirects_to_login(self): auth_path = reverse('sso:create_identity', kwargs={'provider': 'bitbucket'}) resp = self.client.get(auth_path) redirect = urlparse(resp['Location']) assert resp.status_code == 302 assert redirect.path == reverse('users:login') def test_flow(self): auth_path = reverse('sso:create_identity', kwargs={'provider': 'bitbucket'}) resp = self.client.post(auth_path) assert resp.status_code == 302 redirect = urlparse(resp['Location']) assert redirect.scheme == 'https' assert redirect.netloc == 'bitbucket.org' assert redirect.path == '/site/oauth2/authorize'
Add bitbucket identity creation testsfrom urllib.parse import urlparse from django.urls import reverse from tests.utils import BaseViewTest class BitbucketIdentityTest(BaseViewTest): def test_wrong_provider_raises_404(self): auth_path = reverse('sso:create_identity', kwargs={'provider': 'undefined'}) resp = self.client.get(auth_path) assert resp.status_code == 404 def test_get_redirects_to_login(self): auth_path = reverse('sso:create_identity', kwargs={'provider': 'bitbucket'}) resp = self.client.get(auth_path) redirect = urlparse(resp['Location']) assert resp.status_code == 302 assert redirect.path == reverse('users:login') def test_flow(self): auth_path = reverse('sso:create_identity', kwargs={'provider': 'bitbucket'}) resp = self.client.post(auth_path) assert resp.status_code == 302 redirect = urlparse(resp['Location']) assert redirect.scheme == 'https' assert redirect.netloc == 'bitbucket.org' assert redirect.path == '/site/oauth2/authorize'
<commit_before><commit_msg>Add bitbucket identity creation tests<commit_after>from urllib.parse import urlparse from django.urls import reverse from tests.utils import BaseViewTest class BitbucketIdentityTest(BaseViewTest): def test_wrong_provider_raises_404(self): auth_path = reverse('sso:create_identity', kwargs={'provider': 'undefined'}) resp = self.client.get(auth_path) assert resp.status_code == 404 def test_get_redirects_to_login(self): auth_path = reverse('sso:create_identity', kwargs={'provider': 'bitbucket'}) resp = self.client.get(auth_path) redirect = urlparse(resp['Location']) assert resp.status_code == 302 assert redirect.path == reverse('users:login') def test_flow(self): auth_path = reverse('sso:create_identity', kwargs={'provider': 'bitbucket'}) resp = self.client.post(auth_path) assert resp.status_code == 302 redirect = urlparse(resp['Location']) assert redirect.scheme == 'https' assert redirect.netloc == 'bitbucket.org' assert redirect.path == '/site/oauth2/authorize'
5519ccdeceffa72bd1822e2d6d42f9215ea7229e
mzalendo/core/management/commands/core_import_keynan_boundaries_2013.py
mzalendo/core/management/commands/core_import_keynan_boundaries_2013.py
# This script imports the boundaries for the 2013 Kenyan election into # MapIt - it uses the generic mapit_import script. import json import os import sys import urllib from tempfile import NamedTemporaryFile from optparse import make_option from django.core.management import call_command from django.core.management.base import NoArgsCommand from mapit.models import Generation class Command(NoArgsCommand): help = 'Import boundaries for the 2013 election' option_list = NoArgsCommand.option_list + ( make_option('--commit', action='store_true', dest='commit', help='Actually update the database'), ) def handle_noargs(self, **options): new = Generation.objects.new() if not new: raise Exception, "There's no new inactive generation to import into" geojson_urls = (('dis', 'COUNTY_NAM', 'http://vote.iebc.or.ke/js/counties.geojson'), ('con', 'CONSTITUEN', 'http://vote.iebc.or.ke/js/constituencies.geojson')) for area_type_code, name_field, url in geojson_urls: f = urllib.urlopen(url) data = json.load(f) f.close() data['features'] = [f for f in data['features'] if f['properties']['COUNTY_NAM']] with NamedTemporaryFile(delete=False) as ntf: json.dump(data, ntf) print >> sys.stderr, ntf.name keyword_arguments = {'generation_id': new.id, 'area_type_code': area_type_code, 'name_type_code': 'name', 'country_code': 'k', 'name_field': name_field, 'code_field': None, 'code_type': None, 'encoding': None, 'preserve': True, 'verbose': True, 'use_code_as_id': None} keyword_arguments.update(options) call_command('mapit_import', ntf.name, **keyword_arguments) os.remove(ntf.name)
Add a command that imports the 2013 election boundaries
Add a command that imports the 2013 election boundaries This uses the generic mapit_import script via django.core.management.call_command.
Python
agpl-3.0
patricmutwiri/pombola,geoffkilpin/pombola,ken-muturi/pombola,geoffkilpin/pombola,hzj123/56th,hzj123/56th,patricmutwiri/pombola,ken-muturi/pombola,ken-muturi/pombola,hzj123/56th,mysociety/pombola,hzj123/56th,patricmutwiri/pombola,hzj123/56th,patricmutwiri/pombola,ken-muturi/pombola,patricmutwiri/pombola,geoffkilpin/pombola,ken-muturi/pombola,mysociety/pombola,mysociety/pombola,patricmutwiri/pombola,mysociety/pombola,hzj123/56th,geoffkilpin/pombola,geoffkilpin/pombola,geoffkilpin/pombola,mysociety/pombola,ken-muturi/pombola,mysociety/pombola
Add a command that imports the 2013 election boundaries This uses the generic mapit_import script via django.core.management.call_command.
# This script imports the boundaries for the 2013 Kenyan election into # MapIt - it uses the generic mapit_import script. import json import os import sys import urllib from tempfile import NamedTemporaryFile from optparse import make_option from django.core.management import call_command from django.core.management.base import NoArgsCommand from mapit.models import Generation class Command(NoArgsCommand): help = 'Import boundaries for the 2013 election' option_list = NoArgsCommand.option_list + ( make_option('--commit', action='store_true', dest='commit', help='Actually update the database'), ) def handle_noargs(self, **options): new = Generation.objects.new() if not new: raise Exception, "There's no new inactive generation to import into" geojson_urls = (('dis', 'COUNTY_NAM', 'http://vote.iebc.or.ke/js/counties.geojson'), ('con', 'CONSTITUEN', 'http://vote.iebc.or.ke/js/constituencies.geojson')) for area_type_code, name_field, url in geojson_urls: f = urllib.urlopen(url) data = json.load(f) f.close() data['features'] = [f for f in data['features'] if f['properties']['COUNTY_NAM']] with NamedTemporaryFile(delete=False) as ntf: json.dump(data, ntf) print >> sys.stderr, ntf.name keyword_arguments = {'generation_id': new.id, 'area_type_code': area_type_code, 'name_type_code': 'name', 'country_code': 'k', 'name_field': name_field, 'code_field': None, 'code_type': None, 'encoding': None, 'preserve': True, 'verbose': True, 'use_code_as_id': None} keyword_arguments.update(options) call_command('mapit_import', ntf.name, **keyword_arguments) os.remove(ntf.name)
<commit_before><commit_msg>Add a command that imports the 2013 election boundaries This uses the generic mapit_import script via django.core.management.call_command.<commit_after>
# This script imports the boundaries for the 2013 Kenyan election into # MapIt - it uses the generic mapit_import script. import json import os import sys import urllib from tempfile import NamedTemporaryFile from optparse import make_option from django.core.management import call_command from django.core.management.base import NoArgsCommand from mapit.models import Generation class Command(NoArgsCommand): help = 'Import boundaries for the 2013 election' option_list = NoArgsCommand.option_list + ( make_option('--commit', action='store_true', dest='commit', help='Actually update the database'), ) def handle_noargs(self, **options): new = Generation.objects.new() if not new: raise Exception, "There's no new inactive generation to import into" geojson_urls = (('dis', 'COUNTY_NAM', 'http://vote.iebc.or.ke/js/counties.geojson'), ('con', 'CONSTITUEN', 'http://vote.iebc.or.ke/js/constituencies.geojson')) for area_type_code, name_field, url in geojson_urls: f = urllib.urlopen(url) data = json.load(f) f.close() data['features'] = [f for f in data['features'] if f['properties']['COUNTY_NAM']] with NamedTemporaryFile(delete=False) as ntf: json.dump(data, ntf) print >> sys.stderr, ntf.name keyword_arguments = {'generation_id': new.id, 'area_type_code': area_type_code, 'name_type_code': 'name', 'country_code': 'k', 'name_field': name_field, 'code_field': None, 'code_type': None, 'encoding': None, 'preserve': True, 'verbose': True, 'use_code_as_id': None} keyword_arguments.update(options) call_command('mapit_import', ntf.name, **keyword_arguments) os.remove(ntf.name)
Add a command that imports the 2013 election boundaries This uses the generic mapit_import script via django.core.management.call_command.# This script imports the boundaries for the 2013 Kenyan election into # MapIt - it uses the generic mapit_import script. import json import os import sys import urllib from tempfile import NamedTemporaryFile from optparse import make_option from django.core.management import call_command from django.core.management.base import NoArgsCommand from mapit.models import Generation class Command(NoArgsCommand): help = 'Import boundaries for the 2013 election' option_list = NoArgsCommand.option_list + ( make_option('--commit', action='store_true', dest='commit', help='Actually update the database'), ) def handle_noargs(self, **options): new = Generation.objects.new() if not new: raise Exception, "There's no new inactive generation to import into" geojson_urls = (('dis', 'COUNTY_NAM', 'http://vote.iebc.or.ke/js/counties.geojson'), ('con', 'CONSTITUEN', 'http://vote.iebc.or.ke/js/constituencies.geojson')) for area_type_code, name_field, url in geojson_urls: f = urllib.urlopen(url) data = json.load(f) f.close() data['features'] = [f for f in data['features'] if f['properties']['COUNTY_NAM']] with NamedTemporaryFile(delete=False) as ntf: json.dump(data, ntf) print >> sys.stderr, ntf.name keyword_arguments = {'generation_id': new.id, 'area_type_code': area_type_code, 'name_type_code': 'name', 'country_code': 'k', 'name_field': name_field, 'code_field': None, 'code_type': None, 'encoding': None, 'preserve': True, 'verbose': True, 'use_code_as_id': None} keyword_arguments.update(options) call_command('mapit_import', ntf.name, **keyword_arguments) os.remove(ntf.name)
<commit_before><commit_msg>Add a command that imports the 2013 election boundaries This uses the generic mapit_import script via django.core.management.call_command.<commit_after># This script imports the boundaries for the 2013 Kenyan election into # MapIt - it uses the generic mapit_import script. import json import os import sys import urllib from tempfile import NamedTemporaryFile from optparse import make_option from django.core.management import call_command from django.core.management.base import NoArgsCommand from mapit.models import Generation class Command(NoArgsCommand): help = 'Import boundaries for the 2013 election' option_list = NoArgsCommand.option_list + ( make_option('--commit', action='store_true', dest='commit', help='Actually update the database'), ) def handle_noargs(self, **options): new = Generation.objects.new() if not new: raise Exception, "There's no new inactive generation to import into" geojson_urls = (('dis', 'COUNTY_NAM', 'http://vote.iebc.or.ke/js/counties.geojson'), ('con', 'CONSTITUEN', 'http://vote.iebc.or.ke/js/constituencies.geojson')) for area_type_code, name_field, url in geojson_urls: f = urllib.urlopen(url) data = json.load(f) f.close() data['features'] = [f for f in data['features'] if f['properties']['COUNTY_NAM']] with NamedTemporaryFile(delete=False) as ntf: json.dump(data, ntf) print >> sys.stderr, ntf.name keyword_arguments = {'generation_id': new.id, 'area_type_code': area_type_code, 'name_type_code': 'name', 'country_code': 'k', 'name_field': name_field, 'code_field': None, 'code_type': None, 'encoding': None, 'preserve': True, 'verbose': True, 'use_code_as_id': None} keyword_arguments.update(options) call_command('mapit_import', ntf.name, **keyword_arguments) os.remove(ntf.name)
d6020572b9882a26939c71cb5d6ea795f0ef2d44
tests/test_spans.py
tests/test_spans.py
import unittest import datetime import hiro from sifr.span import Minute, Year, Month, Day, Hour class SpanTests(unittest.TestCase): def test_minute(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Minute(now, ["single"]) self.assertEqual(span.key, "single:2012-12-12_00:00") def test_hour(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Hour(now, ["single"]) self.assertEqual(span.key, "single:2012-12-12_00") def test_day(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Day(now, ["single"]) self.assertEqual(span.key, "single:2012-12-12") def test_month(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Month(now, ["single"]) self.assertEqual(span.key, "single:2012-12") def test_year(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Year(now, ["single"]) self.assertEqual(span.key, "single:2012")
Add basic tests for spans
Add basic tests for spans
Python
mit
alisaifee/sifr,alisaifee/sifr
Add basic tests for spans
import unittest import datetime import hiro from sifr.span import Minute, Year, Month, Day, Hour class SpanTests(unittest.TestCase): def test_minute(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Minute(now, ["single"]) self.assertEqual(span.key, "single:2012-12-12_00:00") def test_hour(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Hour(now, ["single"]) self.assertEqual(span.key, "single:2012-12-12_00") def test_day(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Day(now, ["single"]) self.assertEqual(span.key, "single:2012-12-12") def test_month(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Month(now, ["single"]) self.assertEqual(span.key, "single:2012-12") def test_year(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Year(now, ["single"]) self.assertEqual(span.key, "single:2012")
<commit_before><commit_msg>Add basic tests for spans<commit_after>
import unittest import datetime import hiro from sifr.span import Minute, Year, Month, Day, Hour class SpanTests(unittest.TestCase): def test_minute(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Minute(now, ["single"]) self.assertEqual(span.key, "single:2012-12-12_00:00") def test_hour(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Hour(now, ["single"]) self.assertEqual(span.key, "single:2012-12-12_00") def test_day(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Day(now, ["single"]) self.assertEqual(span.key, "single:2012-12-12") def test_month(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Month(now, ["single"]) self.assertEqual(span.key, "single:2012-12") def test_year(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Year(now, ["single"]) self.assertEqual(span.key, "single:2012")
Add basic tests for spansimport unittest import datetime import hiro from sifr.span import Minute, Year, Month, Day, Hour class SpanTests(unittest.TestCase): def test_minute(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Minute(now, ["single"]) self.assertEqual(span.key, "single:2012-12-12_00:00") def test_hour(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Hour(now, ["single"]) self.assertEqual(span.key, "single:2012-12-12_00") def test_day(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Day(now, ["single"]) self.assertEqual(span.key, "single:2012-12-12") def test_month(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Month(now, ["single"]) self.assertEqual(span.key, "single:2012-12") def test_year(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Year(now, ["single"]) self.assertEqual(span.key, "single:2012")
<commit_before><commit_msg>Add basic tests for spans<commit_after>import unittest import datetime import hiro from sifr.span import Minute, Year, Month, Day, Hour class SpanTests(unittest.TestCase): def test_minute(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Minute(now, ["single"]) self.assertEqual(span.key, "single:2012-12-12_00:00") def test_hour(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Hour(now, ["single"]) self.assertEqual(span.key, "single:2012-12-12_00") def test_day(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Day(now, ["single"]) self.assertEqual(span.key, "single:2012-12-12") def test_month(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Month(now, ["single"]) self.assertEqual(span.key, "single:2012-12") def test_year(self): with hiro.Timeline().freeze(datetime.datetime(2012, 12, 12)): now = datetime.datetime.now() span = Year(now, ["single"]) self.assertEqual(span.key, "single:2012")
c7621caf782e44c76d2477813726ea35de00d49c
tests/test_store.py
tests/test_store.py
# pylint: disable=C0103,C0111,W0703 import unittest from bumblebee.store import Store class TestStore(unittest.TestCase): def setUp(self): self.store = Store() self.anyKey = "some-key" self.anyValue = "some-value" self.unsetKey = "invalid-key" def test_set_value(self): self.store.set(self.anyKey, self.anyValue) self.assertEquals(self.store.get(self.anyKey), self.anyValue) def test_get_invalid_value(self): result = self.store.get(self.unsetKey) self.assertEquals(result, None) def test_get_invalid_with_default_value(self): result = self.store.get(self.unsetKey, self.anyValue) self.assertEquals(result, self.anyValue) # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
Add unit tests for the store class
[tests] Add unit tests for the store class
Python
mit
tobi-wan-kenobi/bumblebee-status,tobi-wan-kenobi/bumblebee-status
[tests] Add unit tests for the store class
# pylint: disable=C0103,C0111,W0703 import unittest from bumblebee.store import Store class TestStore(unittest.TestCase): def setUp(self): self.store = Store() self.anyKey = "some-key" self.anyValue = "some-value" self.unsetKey = "invalid-key" def test_set_value(self): self.store.set(self.anyKey, self.anyValue) self.assertEquals(self.store.get(self.anyKey), self.anyValue) def test_get_invalid_value(self): result = self.store.get(self.unsetKey) self.assertEquals(result, None) def test_get_invalid_with_default_value(self): result = self.store.get(self.unsetKey, self.anyValue) self.assertEquals(result, self.anyValue) # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
<commit_before><commit_msg>[tests] Add unit tests for the store class<commit_after>
# pylint: disable=C0103,C0111,W0703 import unittest from bumblebee.store import Store class TestStore(unittest.TestCase): def setUp(self): self.store = Store() self.anyKey = "some-key" self.anyValue = "some-value" self.unsetKey = "invalid-key" def test_set_value(self): self.store.set(self.anyKey, self.anyValue) self.assertEquals(self.store.get(self.anyKey), self.anyValue) def test_get_invalid_value(self): result = self.store.get(self.unsetKey) self.assertEquals(result, None) def test_get_invalid_with_default_value(self): result = self.store.get(self.unsetKey, self.anyValue) self.assertEquals(result, self.anyValue) # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
[tests] Add unit tests for the store class# pylint: disable=C0103,C0111,W0703 import unittest from bumblebee.store import Store class TestStore(unittest.TestCase): def setUp(self): self.store = Store() self.anyKey = "some-key" self.anyValue = "some-value" self.unsetKey = "invalid-key" def test_set_value(self): self.store.set(self.anyKey, self.anyValue) self.assertEquals(self.store.get(self.anyKey), self.anyValue) def test_get_invalid_value(self): result = self.store.get(self.unsetKey) self.assertEquals(result, None) def test_get_invalid_with_default_value(self): result = self.store.get(self.unsetKey, self.anyValue) self.assertEquals(result, self.anyValue) # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
<commit_before><commit_msg>[tests] Add unit tests for the store class<commit_after># pylint: disable=C0103,C0111,W0703 import unittest from bumblebee.store import Store class TestStore(unittest.TestCase): def setUp(self): self.store = Store() self.anyKey = "some-key" self.anyValue = "some-value" self.unsetKey = "invalid-key" def test_set_value(self): self.store.set(self.anyKey, self.anyValue) self.assertEquals(self.store.get(self.anyKey), self.anyValue) def test_get_invalid_value(self): result = self.store.get(self.unsetKey) self.assertEquals(result, None) def test_get_invalid_with_default_value(self): result = self.store.get(self.unsetKey, self.anyValue) self.assertEquals(result, self.anyValue) # vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4