commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
51a7d69b6c8cd1cceaccbb0288e1d9e222619b61
|
tests/test_controllers/test_hellocontroller.py
|
tests/test_controllers/test_hellocontroller.py
|
import unittest
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "..", ".."))
from pkg_resources import require
require("mock")
from mock import Mock
from malcolm.controllers.hellocontroller import HelloController
class TestHelloController(unittest.TestCase):
def test_init(self):
block = Mock()
c = HelloController(block)
self.assertIs(block, c.block)
block.add_method.assert_called_once()
self.assertEquals(c.say_hello, block.add_method.call_args[0][0].func)
def test_say_hello(self):
c = HelloController(Mock())
args = {"name":"test_name"}
expected = {"greeting":"Hello test_name"}
self.assertEquals(expected, c.say_hello(args))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
Add unit test for HelloController
|
Add unit test for HelloController
|
Python
|
apache-2.0
|
dls-controls/pymalcolm,dls-controls/pymalcolm,dls-controls/pymalcolm
|
Add unit test for HelloController
|
import unittest
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "..", ".."))
from pkg_resources import require
require("mock")
from mock import Mock
from malcolm.controllers.hellocontroller import HelloController
class TestHelloController(unittest.TestCase):
def test_init(self):
block = Mock()
c = HelloController(block)
self.assertIs(block, c.block)
block.add_method.assert_called_once()
self.assertEquals(c.say_hello, block.add_method.call_args[0][0].func)
def test_say_hello(self):
c = HelloController(Mock())
args = {"name":"test_name"}
expected = {"greeting":"Hello test_name"}
self.assertEquals(expected, c.say_hello(args))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add unit test for HelloController<commit_after>
|
import unittest
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "..", ".."))
from pkg_resources import require
require("mock")
from mock import Mock
from malcolm.controllers.hellocontroller import HelloController
class TestHelloController(unittest.TestCase):
def test_init(self):
block = Mock()
c = HelloController(block)
self.assertIs(block, c.block)
block.add_method.assert_called_once()
self.assertEquals(c.say_hello, block.add_method.call_args[0][0].func)
def test_say_hello(self):
c = HelloController(Mock())
args = {"name":"test_name"}
expected = {"greeting":"Hello test_name"}
self.assertEquals(expected, c.say_hello(args))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
Add unit test for HelloControllerimport unittest
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "..", ".."))
from pkg_resources import require
require("mock")
from mock import Mock
from malcolm.controllers.hellocontroller import HelloController
class TestHelloController(unittest.TestCase):
def test_init(self):
block = Mock()
c = HelloController(block)
self.assertIs(block, c.block)
block.add_method.assert_called_once()
self.assertEquals(c.say_hello, block.add_method.call_args[0][0].func)
def test_say_hello(self):
c = HelloController(Mock())
args = {"name":"test_name"}
expected = {"greeting":"Hello test_name"}
self.assertEquals(expected, c.say_hello(args))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add unit test for HelloController<commit_after>import unittest
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "..", ".."))
from pkg_resources import require
require("mock")
from mock import Mock
from malcolm.controllers.hellocontroller import HelloController
class TestHelloController(unittest.TestCase):
def test_init(self):
block = Mock()
c = HelloController(block)
self.assertIs(block, c.block)
block.add_method.assert_called_once()
self.assertEquals(c.say_hello, block.add_method.call_args[0][0].func)
def test_say_hello(self):
c = HelloController(Mock())
args = {"name":"test_name"}
expected = {"greeting":"Hello test_name"}
self.assertEquals(expected, c.say_hello(args))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
|
f1b09372f3a4286925ba5892c3f5fded399932bb
|
generate-data-using-jmeter/05-fabric-create-salesman-order/fabfile.py
|
generate-data-using-jmeter/05-fabric-create-salesman-order/fabfile.py
|
from datetime import datetime, timedelta
from fabric.api import local, task, lcd
import random
@task
def create_salesman_order():
"""
Create Sales Order GT
# 1. date between START and END.
# 2. work from Monday to Saturday.
# 3. create number_order_on_day.
# 4. time between 08:00 and 17:00.
"""
PATH_JMETER = '/home/coc/Applications/apache-jmeter/bin/jmeter'
PATH_JMETER_SCRIPT_DIR = '/home/coc/lab/jmeter/create-salesman-order/'
JMETER_SCRIPT = 'create-order-by-salesman.jmx'
MIN_NUMBER_ORDER_PER_DAY = 0
MAX_NUMBER_ORDER_PER_DAY = 3
START_DATE_STR = '2015-05-13 00:00:00'
END_DATE_STR = '2015-12-31 00:00:00'
START_SECOND = 8*60*60
END_SECOND = 17*60*60
JMETER_COMMAND = '{} -n -t {}'.format(PATH_JMETER, JMETER_SCRIPT)
START_DATE = datetime.strptime(START_DATE_STR, '%Y-%m-%d %H:%M:%S')
END_DATE = datetime.strptime(END_DATE_STR, '%Y-%m-%d %H:%M:%S')
day = START_DATE
while day <= END_DATE:
# don't work on Sunday
if day.weekday() == 6:
day = day + timedelta(days=1)
continue
number_order_on_day = random.randint(MIN_NUMBER_ORDER_PER_DAY, MAX_NUMBER_ORDER_PER_DAY)
print('Number order on day:', number_order_on_day)
time_shifts = sorted([random.randint(START_SECOND, END_SECOND)
for _ in range(number_order_on_day)])
for time_shift in time_shifts:
t = day + timedelta(seconds=time_shift)
local('sudo date -s "%s"' %(datetime.strftime(t, '%Y-%m-%d %H:%M:%S')))
local('date --rfc-3339=seconds >> order_date.log')
with lcd(PATH_JMETER_SCRIPT):
local(JMETER_COMMAND)
day = day + timedelta(days=1)
|
Add generate data using jmeter: add fabric script.
|
Add generate data using jmeter: add fabric script.
|
Python
|
mit
|
tranhuucuong91/test-tools
|
Add generate data using jmeter: add fabric script.
|
from datetime import datetime, timedelta
from fabric.api import local, task, lcd
import random
@task
def create_salesman_order():
"""
Create Sales Order GT
# 1. date between START and END.
# 2. work from Monday to Saturday.
# 3. create number_order_on_day.
# 4. time between 08:00 and 17:00.
"""
PATH_JMETER = '/home/coc/Applications/apache-jmeter/bin/jmeter'
PATH_JMETER_SCRIPT_DIR = '/home/coc/lab/jmeter/create-salesman-order/'
JMETER_SCRIPT = 'create-order-by-salesman.jmx'
MIN_NUMBER_ORDER_PER_DAY = 0
MAX_NUMBER_ORDER_PER_DAY = 3
START_DATE_STR = '2015-05-13 00:00:00'
END_DATE_STR = '2015-12-31 00:00:00'
START_SECOND = 8*60*60
END_SECOND = 17*60*60
JMETER_COMMAND = '{} -n -t {}'.format(PATH_JMETER, JMETER_SCRIPT)
START_DATE = datetime.strptime(START_DATE_STR, '%Y-%m-%d %H:%M:%S')
END_DATE = datetime.strptime(END_DATE_STR, '%Y-%m-%d %H:%M:%S')
day = START_DATE
while day <= END_DATE:
# don't work on Sunday
if day.weekday() == 6:
day = day + timedelta(days=1)
continue
number_order_on_day = random.randint(MIN_NUMBER_ORDER_PER_DAY, MAX_NUMBER_ORDER_PER_DAY)
print('Number order on day:', number_order_on_day)
time_shifts = sorted([random.randint(START_SECOND, END_SECOND)
for _ in range(number_order_on_day)])
for time_shift in time_shifts:
t = day + timedelta(seconds=time_shift)
local('sudo date -s "%s"' %(datetime.strftime(t, '%Y-%m-%d %H:%M:%S')))
local('date --rfc-3339=seconds >> order_date.log')
with lcd(PATH_JMETER_SCRIPT):
local(JMETER_COMMAND)
day = day + timedelta(days=1)
|
<commit_before><commit_msg>Add generate data using jmeter: add fabric script.<commit_after>
|
from datetime import datetime, timedelta
from fabric.api import local, task, lcd
import random
@task
def create_salesman_order():
"""
Create Sales Order GT
# 1. date between START and END.
# 2. work from Monday to Saturday.
# 3. create number_order_on_day.
# 4. time between 08:00 and 17:00.
"""
PATH_JMETER = '/home/coc/Applications/apache-jmeter/bin/jmeter'
PATH_JMETER_SCRIPT_DIR = '/home/coc/lab/jmeter/create-salesman-order/'
JMETER_SCRIPT = 'create-order-by-salesman.jmx'
MIN_NUMBER_ORDER_PER_DAY = 0
MAX_NUMBER_ORDER_PER_DAY = 3
START_DATE_STR = '2015-05-13 00:00:00'
END_DATE_STR = '2015-12-31 00:00:00'
START_SECOND = 8*60*60
END_SECOND = 17*60*60
JMETER_COMMAND = '{} -n -t {}'.format(PATH_JMETER, JMETER_SCRIPT)
START_DATE = datetime.strptime(START_DATE_STR, '%Y-%m-%d %H:%M:%S')
END_DATE = datetime.strptime(END_DATE_STR, '%Y-%m-%d %H:%M:%S')
day = START_DATE
while day <= END_DATE:
# don't work on Sunday
if day.weekday() == 6:
day = day + timedelta(days=1)
continue
number_order_on_day = random.randint(MIN_NUMBER_ORDER_PER_DAY, MAX_NUMBER_ORDER_PER_DAY)
print('Number order on day:', number_order_on_day)
time_shifts = sorted([random.randint(START_SECOND, END_SECOND)
for _ in range(number_order_on_day)])
for time_shift in time_shifts:
t = day + timedelta(seconds=time_shift)
local('sudo date -s "%s"' %(datetime.strftime(t, '%Y-%m-%d %H:%M:%S')))
local('date --rfc-3339=seconds >> order_date.log')
with lcd(PATH_JMETER_SCRIPT):
local(JMETER_COMMAND)
day = day + timedelta(days=1)
|
Add generate data using jmeter: add fabric script.from datetime import datetime, timedelta
from fabric.api import local, task, lcd
import random
@task
def create_salesman_order():
"""
Create Sales Order GT
# 1. date between START and END.
# 2. work from Monday to Saturday.
# 3. create number_order_on_day.
# 4. time between 08:00 and 17:00.
"""
PATH_JMETER = '/home/coc/Applications/apache-jmeter/bin/jmeter'
PATH_JMETER_SCRIPT_DIR = '/home/coc/lab/jmeter/create-salesman-order/'
JMETER_SCRIPT = 'create-order-by-salesman.jmx'
MIN_NUMBER_ORDER_PER_DAY = 0
MAX_NUMBER_ORDER_PER_DAY = 3
START_DATE_STR = '2015-05-13 00:00:00'
END_DATE_STR = '2015-12-31 00:00:00'
START_SECOND = 8*60*60
END_SECOND = 17*60*60
JMETER_COMMAND = '{} -n -t {}'.format(PATH_JMETER, JMETER_SCRIPT)
START_DATE = datetime.strptime(START_DATE_STR, '%Y-%m-%d %H:%M:%S')
END_DATE = datetime.strptime(END_DATE_STR, '%Y-%m-%d %H:%M:%S')
day = START_DATE
while day <= END_DATE:
# don't work on Sunday
if day.weekday() == 6:
day = day + timedelta(days=1)
continue
number_order_on_day = random.randint(MIN_NUMBER_ORDER_PER_DAY, MAX_NUMBER_ORDER_PER_DAY)
print('Number order on day:', number_order_on_day)
time_shifts = sorted([random.randint(START_SECOND, END_SECOND)
for _ in range(number_order_on_day)])
for time_shift in time_shifts:
t = day + timedelta(seconds=time_shift)
local('sudo date -s "%s"' %(datetime.strftime(t, '%Y-%m-%d %H:%M:%S')))
local('date --rfc-3339=seconds >> order_date.log')
with lcd(PATH_JMETER_SCRIPT):
local(JMETER_COMMAND)
day = day + timedelta(days=1)
|
<commit_before><commit_msg>Add generate data using jmeter: add fabric script.<commit_after>from datetime import datetime, timedelta
from fabric.api import local, task, lcd
import random
@task
def create_salesman_order():
"""
Create Sales Order GT
# 1. date between START and END.
# 2. work from Monday to Saturday.
# 3. create number_order_on_day.
# 4. time between 08:00 and 17:00.
"""
PATH_JMETER = '/home/coc/Applications/apache-jmeter/bin/jmeter'
PATH_JMETER_SCRIPT_DIR = '/home/coc/lab/jmeter/create-salesman-order/'
JMETER_SCRIPT = 'create-order-by-salesman.jmx'
MIN_NUMBER_ORDER_PER_DAY = 0
MAX_NUMBER_ORDER_PER_DAY = 3
START_DATE_STR = '2015-05-13 00:00:00'
END_DATE_STR = '2015-12-31 00:00:00'
START_SECOND = 8*60*60
END_SECOND = 17*60*60
JMETER_COMMAND = '{} -n -t {}'.format(PATH_JMETER, JMETER_SCRIPT)
START_DATE = datetime.strptime(START_DATE_STR, '%Y-%m-%d %H:%M:%S')
END_DATE = datetime.strptime(END_DATE_STR, '%Y-%m-%d %H:%M:%S')
day = START_DATE
while day <= END_DATE:
# don't work on Sunday
if day.weekday() == 6:
day = day + timedelta(days=1)
continue
number_order_on_day = random.randint(MIN_NUMBER_ORDER_PER_DAY, MAX_NUMBER_ORDER_PER_DAY)
print('Number order on day:', number_order_on_day)
time_shifts = sorted([random.randint(START_SECOND, END_SECOND)
for _ in range(number_order_on_day)])
for time_shift in time_shifts:
t = day + timedelta(seconds=time_shift)
local('sudo date -s "%s"' %(datetime.strftime(t, '%Y-%m-%d %H:%M:%S')))
local('date --rfc-3339=seconds >> order_date.log')
with lcd(PATH_JMETER_SCRIPT):
local(JMETER_COMMAND)
day = day + timedelta(days=1)
|
|
29bd89c6ca182afa8b5f163ddf1bd985bec4ed3f
|
release-scripts/update-canonical-urls.py
|
release-scripts/update-canonical-urls.py
|
#!/usr/bin/env python
import os
import re
VERSION_MASK = "__version__"
def split_version(f):
m = re.match("(master|v[0-9]+\\.[0-9]+)/", f)
if m:
return m.group(1), f[:m.start(1)] + VERSION_MASK + f[m.end(1):-3]
return None, f[:-3]
def version_later_than(v1, v2):
# Basic implementation for now. Improve if we ever go past v9.Y
# or vX.9!
return v1 > v2
if __name__ == "__main__":
# Find all the .md files.
md_files = []
for root, _, files in os.walk("."):
md_files = md_files + [os.path.join(root, f)[2:]
for f in filter(lambda fn: fn.endswith(".md"),
files)]
# For each file, find the latest available version with a parallel
# file name.
masked_to_latest_version = {}
for f in md_files:
version, masked = split_version(f)
if version:
latest_version = masked_to_latest_version.get(masked)
if latest_version and version_later_than(latest_version, version):
pass
else:
masked_to_latest_version[masked] = version
for f in md_files:
version, masked = split_version(f)
latest_version = masked_to_latest_version.get(masked)
if latest_version:
print masked, "->", latest_version
c = ("sed -i \"" +
"s,^canonical_url:.*,canonical_url: " +
"'https://docs.projectcalico.org/%s',\" %s" % (
masked.replace(VERSION_MASK, latest_version),
f))
#print c
os.system(c)
|
Automate calculation of canonical URLs
|
Automate calculation of canonical URLs
|
Python
|
apache-2.0
|
bcreane/calico,bcreane/calico,bcreane/calico,bcreane/calico,bcreane/calico,bcreane/calico
|
Automate calculation of canonical URLs
|
#!/usr/bin/env python
import os
import re
VERSION_MASK = "__version__"
def split_version(f):
m = re.match("(master|v[0-9]+\\.[0-9]+)/", f)
if m:
return m.group(1), f[:m.start(1)] + VERSION_MASK + f[m.end(1):-3]
return None, f[:-3]
def version_later_than(v1, v2):
# Basic implementation for now. Improve if we ever go past v9.Y
# or vX.9!
return v1 > v2
if __name__ == "__main__":
# Find all the .md files.
md_files = []
for root, _, files in os.walk("."):
md_files = md_files + [os.path.join(root, f)[2:]
for f in filter(lambda fn: fn.endswith(".md"),
files)]
# For each file, find the latest available version with a parallel
# file name.
masked_to_latest_version = {}
for f in md_files:
version, masked = split_version(f)
if version:
latest_version = masked_to_latest_version.get(masked)
if latest_version and version_later_than(latest_version, version):
pass
else:
masked_to_latest_version[masked] = version
for f in md_files:
version, masked = split_version(f)
latest_version = masked_to_latest_version.get(masked)
if latest_version:
print masked, "->", latest_version
c = ("sed -i \"" +
"s,^canonical_url:.*,canonical_url: " +
"'https://docs.projectcalico.org/%s',\" %s" % (
masked.replace(VERSION_MASK, latest_version),
f))
#print c
os.system(c)
|
<commit_before><commit_msg>Automate calculation of canonical URLs<commit_after>
|
#!/usr/bin/env python
import os
import re
VERSION_MASK = "__version__"
def split_version(f):
m = re.match("(master|v[0-9]+\\.[0-9]+)/", f)
if m:
return m.group(1), f[:m.start(1)] + VERSION_MASK + f[m.end(1):-3]
return None, f[:-3]
def version_later_than(v1, v2):
# Basic implementation for now. Improve if we ever go past v9.Y
# or vX.9!
return v1 > v2
if __name__ == "__main__":
# Find all the .md files.
md_files = []
for root, _, files in os.walk("."):
md_files = md_files + [os.path.join(root, f)[2:]
for f in filter(lambda fn: fn.endswith(".md"),
files)]
# For each file, find the latest available version with a parallel
# file name.
masked_to_latest_version = {}
for f in md_files:
version, masked = split_version(f)
if version:
latest_version = masked_to_latest_version.get(masked)
if latest_version and version_later_than(latest_version, version):
pass
else:
masked_to_latest_version[masked] = version
for f in md_files:
version, masked = split_version(f)
latest_version = masked_to_latest_version.get(masked)
if latest_version:
print masked, "->", latest_version
c = ("sed -i \"" +
"s,^canonical_url:.*,canonical_url: " +
"'https://docs.projectcalico.org/%s',\" %s" % (
masked.replace(VERSION_MASK, latest_version),
f))
#print c
os.system(c)
|
Automate calculation of canonical URLs#!/usr/bin/env python
import os
import re
VERSION_MASK = "__version__"
def split_version(f):
m = re.match("(master|v[0-9]+\\.[0-9]+)/", f)
if m:
return m.group(1), f[:m.start(1)] + VERSION_MASK + f[m.end(1):-3]
return None, f[:-3]
def version_later_than(v1, v2):
# Basic implementation for now. Improve if we ever go past v9.Y
# or vX.9!
return v1 > v2
if __name__ == "__main__":
# Find all the .md files.
md_files = []
for root, _, files in os.walk("."):
md_files = md_files + [os.path.join(root, f)[2:]
for f in filter(lambda fn: fn.endswith(".md"),
files)]
# For each file, find the latest available version with a parallel
# file name.
masked_to_latest_version = {}
for f in md_files:
version, masked = split_version(f)
if version:
latest_version = masked_to_latest_version.get(masked)
if latest_version and version_later_than(latest_version, version):
pass
else:
masked_to_latest_version[masked] = version
for f in md_files:
version, masked = split_version(f)
latest_version = masked_to_latest_version.get(masked)
if latest_version:
print masked, "->", latest_version
c = ("sed -i \"" +
"s,^canonical_url:.*,canonical_url: " +
"'https://docs.projectcalico.org/%s',\" %s" % (
masked.replace(VERSION_MASK, latest_version),
f))
#print c
os.system(c)
|
<commit_before><commit_msg>Automate calculation of canonical URLs<commit_after>#!/usr/bin/env python
import os
import re
VERSION_MASK = "__version__"
def split_version(f):
m = re.match("(master|v[0-9]+\\.[0-9]+)/", f)
if m:
return m.group(1), f[:m.start(1)] + VERSION_MASK + f[m.end(1):-3]
return None, f[:-3]
def version_later_than(v1, v2):
# Basic implementation for now. Improve if we ever go past v9.Y
# or vX.9!
return v1 > v2
if __name__ == "__main__":
# Find all the .md files.
md_files = []
for root, _, files in os.walk("."):
md_files = md_files + [os.path.join(root, f)[2:]
for f in filter(lambda fn: fn.endswith(".md"),
files)]
# For each file, find the latest available version with a parallel
# file name.
masked_to_latest_version = {}
for f in md_files:
version, masked = split_version(f)
if version:
latest_version = masked_to_latest_version.get(masked)
if latest_version and version_later_than(latest_version, version):
pass
else:
masked_to_latest_version[masked] = version
for f in md_files:
version, masked = split_version(f)
latest_version = masked_to_latest_version.get(masked)
if latest_version:
print masked, "->", latest_version
c = ("sed -i \"" +
"s,^canonical_url:.*,canonical_url: " +
"'https://docs.projectcalico.org/%s',\" %s" % (
masked.replace(VERSION_MASK, latest_version),
f))
#print c
os.system(c)
|
|
62fa48e28ab46c4b48e9fcd3cc909b80f83c1fb6
|
examples/Numbers/numbers.py
|
examples/Numbers/numbers.py
|
from bessie import BaseClient, Endpoint
class NumbersApi(BaseClient):
endpoints = [
Endpoint('GET', 'random', None),
Endpoint('GET', '<n>/trivia', None),
Endpoint('GET', '<n>/date', None),
Endpoint('GET', '<n1>/<n2>/date')
]
base_url='http://numbersapi.com'
if __name__ == '__main__':
api = NumbersApi()
print(api.n(4).trivia.get().text)
print(api.n(4).date.get().text)
print(api.n1(4).n2(3).date.get().text)
print(api.random.get(min=10, max=20).text)
|
Add very basic NumbersApi example
|
Add very basic NumbersApi example
|
Python
|
mit
|
andymitchhank/bessie
|
Add very basic NumbersApi example
|
from bessie import BaseClient, Endpoint
class NumbersApi(BaseClient):
endpoints = [
Endpoint('GET', 'random', None),
Endpoint('GET', '<n>/trivia', None),
Endpoint('GET', '<n>/date', None),
Endpoint('GET', '<n1>/<n2>/date')
]
base_url='http://numbersapi.com'
if __name__ == '__main__':
api = NumbersApi()
print(api.n(4).trivia.get().text)
print(api.n(4).date.get().text)
print(api.n1(4).n2(3).date.get().text)
print(api.random.get(min=10, max=20).text)
|
<commit_before><commit_msg>Add very basic NumbersApi example<commit_after>
|
from bessie import BaseClient, Endpoint
class NumbersApi(BaseClient):
endpoints = [
Endpoint('GET', 'random', None),
Endpoint('GET', '<n>/trivia', None),
Endpoint('GET', '<n>/date', None),
Endpoint('GET', '<n1>/<n2>/date')
]
base_url='http://numbersapi.com'
if __name__ == '__main__':
api = NumbersApi()
print(api.n(4).trivia.get().text)
print(api.n(4).date.get().text)
print(api.n1(4).n2(3).date.get().text)
print(api.random.get(min=10, max=20).text)
|
Add very basic NumbersApi examplefrom bessie import BaseClient, Endpoint
class NumbersApi(BaseClient):
endpoints = [
Endpoint('GET', 'random', None),
Endpoint('GET', '<n>/trivia', None),
Endpoint('GET', '<n>/date', None),
Endpoint('GET', '<n1>/<n2>/date')
]
base_url='http://numbersapi.com'
if __name__ == '__main__':
api = NumbersApi()
print(api.n(4).trivia.get().text)
print(api.n(4).date.get().text)
print(api.n1(4).n2(3).date.get().text)
print(api.random.get(min=10, max=20).text)
|
<commit_before><commit_msg>Add very basic NumbersApi example<commit_after>from bessie import BaseClient, Endpoint
class NumbersApi(BaseClient):
endpoints = [
Endpoint('GET', 'random', None),
Endpoint('GET', '<n>/trivia', None),
Endpoint('GET', '<n>/date', None),
Endpoint('GET', '<n1>/<n2>/date')
]
base_url='http://numbersapi.com'
if __name__ == '__main__':
api = NumbersApi()
print(api.n(4).trivia.get().text)
print(api.n(4).date.get().text)
print(api.n1(4).n2(3).date.get().text)
print(api.random.get(min=10, max=20).text)
|
|
eaed3dff1e478ee0ba9e1564ab22fc9ec059f166
|
skimage/transform/tests/test_pyramids.py
|
skimage/transform/tests/test_pyramids.py
|
from numpy.testing import assert_array_equal, run_module_suite
from skimage import data
from skimage.transform import (pyramid_reduce, pyramid_expand,
build_gaussian_pyramid, build_laplacian_pyramid)
image = data.lena()
def test_pyramid_reduce():
rows, cols, dim = image.shape
out = pyramid_reduce(image, factor=2)
assert_array_equal(out.shape, (rows / 2, cols / 2, dim))
def test_pyramid_expand():
rows, cols, dim = image.shape
out = pyramid_expand(image, factor=2)
assert_array_equal(out.shape, (rows * 2, cols * 2, dim))
def test_build_gaussian_pyramid():
rows, cols, dim = image.shape
pyramid = build_gaussian_pyramid(image, factor=2)
for layer, out in enumerate(pyramid):
layer_shape = (rows / 2 ** layer, cols / 2 ** layer, dim)
assert_array_equal(out.shape, layer_shape)
def test_build_laplacian_pyramid():
rows, cols, dim = image.shape
pyramid = build_laplacian_pyramid(image, factor=2)
for layer, out in enumerate(pyramid):
layer_shape = (rows / 2 ** layer, cols / 2 ** layer, dim)
assert_array_equal(out.shape, layer_shape)
if __name__ == "__main__":
run_module_suite()
|
Add tests for pyramid functions
|
Add tests for pyramid functions
|
Python
|
bsd-3-clause
|
michaelpacer/scikit-image,almarklein/scikit-image,bennlich/scikit-image,paalge/scikit-image,emon10005/scikit-image,youprofit/scikit-image,GaZ3ll3/scikit-image,SamHames/scikit-image,warmspringwinds/scikit-image,bsipocz/scikit-image,pratapvardhan/scikit-image,SamHames/scikit-image,rjeli/scikit-image,vighneshbirodkar/scikit-image,robintw/scikit-image,vighneshbirodkar/scikit-image,chintak/scikit-image,GaZ3ll3/scikit-image,ajaybhat/scikit-image,keflavich/scikit-image,ajaybhat/scikit-image,SamHames/scikit-image,jwiggins/scikit-image,Hiyorimi/scikit-image,juliusbierk/scikit-image,Midafi/scikit-image,jwiggins/scikit-image,SamHames/scikit-image,chriscrosscutler/scikit-image,ofgulban/scikit-image,ofgulban/scikit-image,keflavich/scikit-image,rjeli/scikit-image,dpshelio/scikit-image,bsipocz/scikit-image,chintak/scikit-image,emon10005/scikit-image,michaelaye/scikit-image,almarklein/scikit-image,Midafi/scikit-image,almarklein/scikit-image,juliusbierk/scikit-image,vighneshbirodkar/scikit-image,paalge/scikit-image,robintw/scikit-image,blink1073/scikit-image,newville/scikit-image,michaelpacer/scikit-image,WarrenWeckesser/scikits-image,bennlich/scikit-image,WarrenWeckesser/scikits-image,chintak/scikit-image,chintak/scikit-image,ClinicalGraphics/scikit-image,chriscrosscutler/scikit-image,Britefury/scikit-image,paalge/scikit-image,pratapvardhan/scikit-image,youprofit/scikit-image,michaelaye/scikit-image,newville/scikit-image,oew1v07/scikit-image,ClinicalGraphics/scikit-image,rjeli/scikit-image,oew1v07/scikit-image,Hiyorimi/scikit-image,Britefury/scikit-image,blink1073/scikit-image,dpshelio/scikit-image,ofgulban/scikit-image,almarklein/scikit-image,warmspringwinds/scikit-image
|
Add tests for pyramid functions
|
from numpy.testing import assert_array_equal, run_module_suite
from skimage import data
from skimage.transform import (pyramid_reduce, pyramid_expand,
build_gaussian_pyramid, build_laplacian_pyramid)
image = data.lena()
def test_pyramid_reduce():
rows, cols, dim = image.shape
out = pyramid_reduce(image, factor=2)
assert_array_equal(out.shape, (rows / 2, cols / 2, dim))
def test_pyramid_expand():
rows, cols, dim = image.shape
out = pyramid_expand(image, factor=2)
assert_array_equal(out.shape, (rows * 2, cols * 2, dim))
def test_build_gaussian_pyramid():
rows, cols, dim = image.shape
pyramid = build_gaussian_pyramid(image, factor=2)
for layer, out in enumerate(pyramid):
layer_shape = (rows / 2 ** layer, cols / 2 ** layer, dim)
assert_array_equal(out.shape, layer_shape)
def test_build_laplacian_pyramid():
rows, cols, dim = image.shape
pyramid = build_laplacian_pyramid(image, factor=2)
for layer, out in enumerate(pyramid):
layer_shape = (rows / 2 ** layer, cols / 2 ** layer, dim)
assert_array_equal(out.shape, layer_shape)
if __name__ == "__main__":
run_module_suite()
|
<commit_before><commit_msg>Add tests for pyramid functions<commit_after>
|
from numpy.testing import assert_array_equal, run_module_suite
from skimage import data
from skimage.transform import (pyramid_reduce, pyramid_expand,
build_gaussian_pyramid, build_laplacian_pyramid)
image = data.lena()
def test_pyramid_reduce():
rows, cols, dim = image.shape
out = pyramid_reduce(image, factor=2)
assert_array_equal(out.shape, (rows / 2, cols / 2, dim))
def test_pyramid_expand():
rows, cols, dim = image.shape
out = pyramid_expand(image, factor=2)
assert_array_equal(out.shape, (rows * 2, cols * 2, dim))
def test_build_gaussian_pyramid():
rows, cols, dim = image.shape
pyramid = build_gaussian_pyramid(image, factor=2)
for layer, out in enumerate(pyramid):
layer_shape = (rows / 2 ** layer, cols / 2 ** layer, dim)
assert_array_equal(out.shape, layer_shape)
def test_build_laplacian_pyramid():
rows, cols, dim = image.shape
pyramid = build_laplacian_pyramid(image, factor=2)
for layer, out in enumerate(pyramid):
layer_shape = (rows / 2 ** layer, cols / 2 ** layer, dim)
assert_array_equal(out.shape, layer_shape)
if __name__ == "__main__":
run_module_suite()
|
Add tests for pyramid functionsfrom numpy.testing import assert_array_equal, run_module_suite
from skimage import data
from skimage.transform import (pyramid_reduce, pyramid_expand,
build_gaussian_pyramid, build_laplacian_pyramid)
image = data.lena()
def test_pyramid_reduce():
rows, cols, dim = image.shape
out = pyramid_reduce(image, factor=2)
assert_array_equal(out.shape, (rows / 2, cols / 2, dim))
def test_pyramid_expand():
rows, cols, dim = image.shape
out = pyramid_expand(image, factor=2)
assert_array_equal(out.shape, (rows * 2, cols * 2, dim))
def test_build_gaussian_pyramid():
rows, cols, dim = image.shape
pyramid = build_gaussian_pyramid(image, factor=2)
for layer, out in enumerate(pyramid):
layer_shape = (rows / 2 ** layer, cols / 2 ** layer, dim)
assert_array_equal(out.shape, layer_shape)
def test_build_laplacian_pyramid():
rows, cols, dim = image.shape
pyramid = build_laplacian_pyramid(image, factor=2)
for layer, out in enumerate(pyramid):
layer_shape = (rows / 2 ** layer, cols / 2 ** layer, dim)
assert_array_equal(out.shape, layer_shape)
if __name__ == "__main__":
run_module_suite()
|
<commit_before><commit_msg>Add tests for pyramid functions<commit_after>from numpy.testing import assert_array_equal, run_module_suite
from skimage import data
from skimage.transform import (pyramid_reduce, pyramid_expand,
build_gaussian_pyramid, build_laplacian_pyramid)
image = data.lena()
def test_pyramid_reduce():
rows, cols, dim = image.shape
out = pyramid_reduce(image, factor=2)
assert_array_equal(out.shape, (rows / 2, cols / 2, dim))
def test_pyramid_expand():
rows, cols, dim = image.shape
out = pyramid_expand(image, factor=2)
assert_array_equal(out.shape, (rows * 2, cols * 2, dim))
def test_build_gaussian_pyramid():
rows, cols, dim = image.shape
pyramid = build_gaussian_pyramid(image, factor=2)
for layer, out in enumerate(pyramid):
layer_shape = (rows / 2 ** layer, cols / 2 ** layer, dim)
assert_array_equal(out.shape, layer_shape)
def test_build_laplacian_pyramid():
rows, cols, dim = image.shape
pyramid = build_laplacian_pyramid(image, factor=2)
for layer, out in enumerate(pyramid):
layer_shape = (rows / 2 ** layer, cols / 2 ** layer, dim)
assert_array_equal(out.shape, layer_shape)
if __name__ == "__main__":
run_module_suite()
|
|
5fcf221ab22e182b009bb95156e9789219bf50f4
|
ideascube/conf/kb_usa_wmapache.py
|
ideascube/conf/kb_usa_wmapache.py
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'en'
IDEASCUBE_NAME = 'WHITE MOUNTAIN APACHE'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'gutenberg',
},
{
'id': 'wiktionary',
},
{
'id': 'ted',
'sessions': [
('tedbusiness.en', 'Business'),
('teddesign.en', 'Design'),
('tedentertainment.en', 'Entertainment'),
('tedglobalissues.en', 'Global Issues'),
('tedscience.en', 'Science'),
('tedtechnology.en', 'Technology'),
]
},
{
'id': 'khanacademy',
},
]
|
Add conf file for KoomBook USA Apache
|
Add conf file for KoomBook USA Apache
|
Python
|
agpl-3.0
|
ideascube/ideascube,ideascube/ideascube,ideascube/ideascube,ideascube/ideascube
|
Add conf file for KoomBook USA Apache
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'en'
IDEASCUBE_NAME = 'WHITE MOUNTAIN APACHE'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'gutenberg',
},
{
'id': 'wiktionary',
},
{
'id': 'ted',
'sessions': [
('tedbusiness.en', 'Business'),
('teddesign.en', 'Design'),
('tedentertainment.en', 'Entertainment'),
('tedglobalissues.en', 'Global Issues'),
('tedscience.en', 'Science'),
('tedtechnology.en', 'Technology'),
]
},
{
'id': 'khanacademy',
},
]
|
<commit_before><commit_msg>Add conf file for KoomBook USA Apache<commit_after>
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'en'
IDEASCUBE_NAME = 'WHITE MOUNTAIN APACHE'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'gutenberg',
},
{
'id': 'wiktionary',
},
{
'id': 'ted',
'sessions': [
('tedbusiness.en', 'Business'),
('teddesign.en', 'Design'),
('tedentertainment.en', 'Entertainment'),
('tedglobalissues.en', 'Global Issues'),
('tedscience.en', 'Science'),
('tedtechnology.en', 'Technology'),
]
},
{
'id': 'khanacademy',
},
]
|
Add conf file for KoomBook USA Apache# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'en'
IDEASCUBE_NAME = 'WHITE MOUNTAIN APACHE'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'gutenberg',
},
{
'id': 'wiktionary',
},
{
'id': 'ted',
'sessions': [
('tedbusiness.en', 'Business'),
('teddesign.en', 'Design'),
('tedentertainment.en', 'Entertainment'),
('tedglobalissues.en', 'Global Issues'),
('tedscience.en', 'Science'),
('tedtechnology.en', 'Technology'),
]
},
{
'id': 'khanacademy',
},
]
|
<commit_before><commit_msg>Add conf file for KoomBook USA Apache<commit_after># -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'en'
IDEASCUBE_NAME = 'WHITE MOUNTAIN APACHE'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'gutenberg',
},
{
'id': 'wiktionary',
},
{
'id': 'ted',
'sessions': [
('tedbusiness.en', 'Business'),
('teddesign.en', 'Design'),
('tedentertainment.en', 'Entertainment'),
('tedglobalissues.en', 'Global Issues'),
('tedscience.en', 'Science'),
('tedtechnology.en', 'Technology'),
]
},
{
'id': 'khanacademy',
},
]
|
|
d3d45373acb04e4a6128874437092aabd55ceac9
|
scripts/emerge/automate_rfmake.py
|
scripts/emerge/automate_rfmake.py
|
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Usage:
python automate_rfmake.py -i /path/to/emerge_file -d /path/to/output
"""
import argparse
import os
import sys
def parse_input_file(filename):
"""
Read input data and standardise sequences and names.
Example input is provided in `example.tsv`.
"""
with open(filename, 'r') as f:
lines = f.readlines()
return [x.strip() for x in lines]
def run(args):
"""
* run rfmake
* check for overlaps
"""
for rna in parse_input_file(args.inputfile):
folder_name = os.path.join(args.destination, '%s' % rna)
if not os.path.exists(folder_name):
os.mkdir(folder_name)
os.chdir(folder_name)
cmd = ('bsub -o {0}/lsf_rfmake_output.txt -e {0}/lsf_rfmake_error.txt -g /emerge '
'"cd {0} && '
'rfmake.pl -t 50 -a && cd .. && '
'rqc-overlap.pl {0}"').format(folder_name)
print cmd
os.system(cmd)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--destination', default=os.getcwd(), help='Specify folder where the output will be created')
parser.add_argument('-i', '--inputfile', default='example.tsv', help='Specify input file with names of folders where the analysis needs to be done')
args = parser.parse_args()
if not args.inputfile:
print 'Please specify input file'
sys.exit()
run(args)
|
Add a script for running rfmake on emerge data
|
Add a script for running rfmake on emerge data
|
Python
|
apache-2.0
|
Rfam/rfam-production,Rfam/rfam-production,Rfam/rfam-production
|
Add a script for running rfmake on emerge data
|
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Usage:
python automate_rfmake.py -i /path/to/emerge_file -d /path/to/output
"""
import argparse
import os
import sys
def parse_input_file(filename):
"""
Read input data and standardise sequences and names.
Example input is provided in `example.tsv`.
"""
with open(filename, 'r') as f:
lines = f.readlines()
return [x.strip() for x in lines]
def run(args):
"""
* run rfmake
* check for overlaps
"""
for rna in parse_input_file(args.inputfile):
folder_name = os.path.join(args.destination, '%s' % rna)
if not os.path.exists(folder_name):
os.mkdir(folder_name)
os.chdir(folder_name)
cmd = ('bsub -o {0}/lsf_rfmake_output.txt -e {0}/lsf_rfmake_error.txt -g /emerge '
'"cd {0} && '
'rfmake.pl -t 50 -a && cd .. && '
'rqc-overlap.pl {0}"').format(folder_name)
print cmd
os.system(cmd)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--destination', default=os.getcwd(), help='Specify folder where the output will be created')
parser.add_argument('-i', '--inputfile', default='example.tsv', help='Specify input file with names of folders where the analysis needs to be done')
args = parser.parse_args()
if not args.inputfile:
print 'Please specify input file'
sys.exit()
run(args)
|
<commit_before><commit_msg>Add a script for running rfmake on emerge data<commit_after>
|
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Usage:
python automate_rfmake.py -i /path/to/emerge_file -d /path/to/output
"""
import argparse
import os
import sys
def parse_input_file(filename):
"""
Read input data and standardise sequences and names.
Example input is provided in `example.tsv`.
"""
with open(filename, 'r') as f:
lines = f.readlines()
return [x.strip() for x in lines]
def run(args):
"""
* run rfmake
* check for overlaps
"""
for rna in parse_input_file(args.inputfile):
folder_name = os.path.join(args.destination, '%s' % rna)
if not os.path.exists(folder_name):
os.mkdir(folder_name)
os.chdir(folder_name)
cmd = ('bsub -o {0}/lsf_rfmake_output.txt -e {0}/lsf_rfmake_error.txt -g /emerge '
'"cd {0} && '
'rfmake.pl -t 50 -a && cd .. && '
'rqc-overlap.pl {0}"').format(folder_name)
print cmd
os.system(cmd)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--destination', default=os.getcwd(), help='Specify folder where the output will be created')
parser.add_argument('-i', '--inputfile', default='example.tsv', help='Specify input file with names of folders where the analysis needs to be done')
args = parser.parse_args()
if not args.inputfile:
print 'Please specify input file'
sys.exit()
run(args)
|
Add a script for running rfmake on emerge data"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Usage:
python automate_rfmake.py -i /path/to/emerge_file -d /path/to/output
"""
import argparse
import os
import sys
def parse_input_file(filename):
"""
Read input data and standardise sequences and names.
Example input is provided in `example.tsv`.
"""
with open(filename, 'r') as f:
lines = f.readlines()
return [x.strip() for x in lines]
def run(args):
"""
* run rfmake
* check for overlaps
"""
for rna in parse_input_file(args.inputfile):
folder_name = os.path.join(args.destination, '%s' % rna)
if not os.path.exists(folder_name):
os.mkdir(folder_name)
os.chdir(folder_name)
cmd = ('bsub -o {0}/lsf_rfmake_output.txt -e {0}/lsf_rfmake_error.txt -g /emerge '
'"cd {0} && '
'rfmake.pl -t 50 -a && cd .. && '
'rqc-overlap.pl {0}"').format(folder_name)
print cmd
os.system(cmd)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--destination', default=os.getcwd(), help='Specify folder where the output will be created')
parser.add_argument('-i', '--inputfile', default='example.tsv', help='Specify input file with names of folders where the analysis needs to be done')
args = parser.parse_args()
if not args.inputfile:
print 'Please specify input file'
sys.exit()
run(args)
|
<commit_before><commit_msg>Add a script for running rfmake on emerge data<commit_after>"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Usage:
python automate_rfmake.py -i /path/to/emerge_file -d /path/to/output
"""
import argparse
import os
import sys
def parse_input_file(filename):
"""
Read input data and standardise sequences and names.
Example input is provided in `example.tsv`.
"""
with open(filename, 'r') as f:
lines = f.readlines()
return [x.strip() for x in lines]
def run(args):
"""
* run rfmake
* check for overlaps
"""
for rna in parse_input_file(args.inputfile):
folder_name = os.path.join(args.destination, '%s' % rna)
if not os.path.exists(folder_name):
os.mkdir(folder_name)
os.chdir(folder_name)
cmd = ('bsub -o {0}/lsf_rfmake_output.txt -e {0}/lsf_rfmake_error.txt -g /emerge '
'"cd {0} && '
'rfmake.pl -t 50 -a && cd .. && '
'rqc-overlap.pl {0}"').format(folder_name)
print cmd
os.system(cmd)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--destination', default=os.getcwd(), help='Specify folder where the output will be created')
parser.add_argument('-i', '--inputfile', default='example.tsv', help='Specify input file with names of folders where the analysis needs to be done')
args = parser.parse_args()
if not args.inputfile:
print 'Please specify input file'
sys.exit()
run(args)
|
|
462e4ca5c6d15e50c781d5c29a478df276543440
|
scripts/verify-locale-keys-usage.py
|
scripts/verify-locale-keys-usage.py
|
import json
import os
ROOT_PATH = os.path.dirname(os.path.dirname(__file__))
DEFAULT_LOCALE_PATH = os.path.join(ROOT_PATH, "app/locales/taiga/locale-en.json")
def keywords(key, value):
if key is not None and not isinstance(value, dict):
return [".".join(key)]
if key is not None and isinstance(value, dict):
kws = []
for item_key in value.keys():
kws += keywords(key+[item_key], value[item_key])
return kws
if key is None and isinstance(value, dict):
kws = []
for item_key in value.keys():
kws += keywords([item_key], value[item_key])
return kws
def read_file(path):
with open(path) as fd:
return fd.read()
def check_keyword(keyword, files_text):
for text in files_text:
if text.find(keyword) != -1:
return True
return False
def verify_keywords_usage():
locales = json.load(open(DEFAULT_LOCALE_PATH))
all_files = []
for root, dirs, files in os.walk(os.path.join(ROOT_PATH, 'app')):
json_and_jade_files = list(filter(lambda x: x.endswith('.coffee') or x.endswith('.jade'), files))
json_and_jade_files = map(lambda x: os.path.join(root, x), json_and_jade_files)
all_files += json_and_jade_files
all_files_text = list(map(read_file, all_files))
for keyword in keywords(None, locales):
if not check_keyword(keyword, all_files_text):
print("Keyword unused: {}".format(keyword))
if __name__ == "__main__":
verify_keywords_usage()
|
Add verify locale keys usage script
|
Add verify locale keys usage script
|
Python
|
agpl-3.0
|
taigaio/taiga-front,taigaio/taiga-front,taigaio/taiga-front
|
Add verify locale keys usage script
|
import json
import os
ROOT_PATH = os.path.dirname(os.path.dirname(__file__))
DEFAULT_LOCALE_PATH = os.path.join(ROOT_PATH, "app/locales/taiga/locale-en.json")
def keywords(key, value):
if key is not None and not isinstance(value, dict):
return [".".join(key)]
if key is not None and isinstance(value, dict):
kws = []
for item_key in value.keys():
kws += keywords(key+[item_key], value[item_key])
return kws
if key is None and isinstance(value, dict):
kws = []
for item_key in value.keys():
kws += keywords([item_key], value[item_key])
return kws
def read_file(path):
with open(path) as fd:
return fd.read()
def check_keyword(keyword, files_text):
for text in files_text:
if text.find(keyword) != -1:
return True
return False
def verify_keywords_usage():
locales = json.load(open(DEFAULT_LOCALE_PATH))
all_files = []
for root, dirs, files in os.walk(os.path.join(ROOT_PATH, 'app')):
json_and_jade_files = list(filter(lambda x: x.endswith('.coffee') or x.endswith('.jade'), files))
json_and_jade_files = map(lambda x: os.path.join(root, x), json_and_jade_files)
all_files += json_and_jade_files
all_files_text = list(map(read_file, all_files))
for keyword in keywords(None, locales):
if not check_keyword(keyword, all_files_text):
print("Keyword unused: {}".format(keyword))
if __name__ == "__main__":
verify_keywords_usage()
|
<commit_before><commit_msg>Add verify locale keys usage script<commit_after>
|
import json
import os
ROOT_PATH = os.path.dirname(os.path.dirname(__file__))
DEFAULT_LOCALE_PATH = os.path.join(ROOT_PATH, "app/locales/taiga/locale-en.json")
def keywords(key, value):
if key is not None and not isinstance(value, dict):
return [".".join(key)]
if key is not None and isinstance(value, dict):
kws = []
for item_key in value.keys():
kws += keywords(key+[item_key], value[item_key])
return kws
if key is None and isinstance(value, dict):
kws = []
for item_key in value.keys():
kws += keywords([item_key], value[item_key])
return kws
def read_file(path):
with open(path) as fd:
return fd.read()
def check_keyword(keyword, files_text):
for text in files_text:
if text.find(keyword) != -1:
return True
return False
def verify_keywords_usage():
locales = json.load(open(DEFAULT_LOCALE_PATH))
all_files = []
for root, dirs, files in os.walk(os.path.join(ROOT_PATH, 'app')):
json_and_jade_files = list(filter(lambda x: x.endswith('.coffee') or x.endswith('.jade'), files))
json_and_jade_files = map(lambda x: os.path.join(root, x), json_and_jade_files)
all_files += json_and_jade_files
all_files_text = list(map(read_file, all_files))
for keyword in keywords(None, locales):
if not check_keyword(keyword, all_files_text):
print("Keyword unused: {}".format(keyword))
if __name__ == "__main__":
verify_keywords_usage()
|
Add verify locale keys usage scriptimport json
import os
ROOT_PATH = os.path.dirname(os.path.dirname(__file__))
DEFAULT_LOCALE_PATH = os.path.join(ROOT_PATH, "app/locales/taiga/locale-en.json")
def keywords(key, value):
if key is not None and not isinstance(value, dict):
return [".".join(key)]
if key is not None and isinstance(value, dict):
kws = []
for item_key in value.keys():
kws += keywords(key+[item_key], value[item_key])
return kws
if key is None and isinstance(value, dict):
kws = []
for item_key in value.keys():
kws += keywords([item_key], value[item_key])
return kws
def read_file(path):
with open(path) as fd:
return fd.read()
def check_keyword(keyword, files_text):
for text in files_text:
if text.find(keyword) != -1:
return True
return False
def verify_keywords_usage():
locales = json.load(open(DEFAULT_LOCALE_PATH))
all_files = []
for root, dirs, files in os.walk(os.path.join(ROOT_PATH, 'app')):
json_and_jade_files = list(filter(lambda x: x.endswith('.coffee') or x.endswith('.jade'), files))
json_and_jade_files = map(lambda x: os.path.join(root, x), json_and_jade_files)
all_files += json_and_jade_files
all_files_text = list(map(read_file, all_files))
for keyword in keywords(None, locales):
if not check_keyword(keyword, all_files_text):
print("Keyword unused: {}".format(keyword))
if __name__ == "__main__":
verify_keywords_usage()
|
<commit_before><commit_msg>Add verify locale keys usage script<commit_after>import json
import os
ROOT_PATH = os.path.dirname(os.path.dirname(__file__))
DEFAULT_LOCALE_PATH = os.path.join(ROOT_PATH, "app/locales/taiga/locale-en.json")
def keywords(key, value):
if key is not None and not isinstance(value, dict):
return [".".join(key)]
if key is not None and isinstance(value, dict):
kws = []
for item_key in value.keys():
kws += keywords(key+[item_key], value[item_key])
return kws
if key is None and isinstance(value, dict):
kws = []
for item_key in value.keys():
kws += keywords([item_key], value[item_key])
return kws
def read_file(path):
with open(path) as fd:
return fd.read()
def check_keyword(keyword, files_text):
for text in files_text:
if text.find(keyword) != -1:
return True
return False
def verify_keywords_usage():
locales = json.load(open(DEFAULT_LOCALE_PATH))
all_files = []
for root, dirs, files in os.walk(os.path.join(ROOT_PATH, 'app')):
json_and_jade_files = list(filter(lambda x: x.endswith('.coffee') or x.endswith('.jade'), files))
json_and_jade_files = map(lambda x: os.path.join(root, x), json_and_jade_files)
all_files += json_and_jade_files
all_files_text = list(map(read_file, all_files))
for keyword in keywords(None, locales):
if not check_keyword(keyword, all_files_text):
print("Keyword unused: {}".format(keyword))
if __name__ == "__main__":
verify_keywords_usage()
|
|
557350d511b04f85f46b0693fa5769ebd3468793
|
tests/test_js_bootstrapped.py
|
tests/test_js_bootstrapped.py
|
from validator.errorbundler import ErrorBundle
from validator.testcases import scripting
scripting.traverser.DEBUG = True
def _test(script):
err = ErrorBundle()
err.save_resource("em:bootstrap", "true")
scripting.test_js_file(err, "foo", script)
return err
def test_bootstrapped():
"Performs a test on a JS file"
methods = (
("nsICategoryManager", "addCategoryEntry()"),
("nsIObserverService", "addObserver()"),
("nsIResProtocolHandler", "setSubstitution('foo', 'bar')"),
("nsIStyleSheetService", "loadAndRegisterSheet()"),
("nsIStringBundleService", "createStringBundle()"),
("nsIWindowMediator", "registerNotification()"),
("nsIWindowWatcher", "addListener()"),
)
for method in methods:
assert _test("""
Components.classes[""]
.getService(Components.interfaces.%s)
.%s;
""" % method).failed()
assert not _test("""
Components.classes[""]
.getService(Components.interfaces.nsIResProtocolHandler)
.setSubstitution("foo", null);
""").failed()
def test_bootstrapped_componentmanager():
for method in ('autoRegister', 'registerFactory'):
assert _test("""
Components.manager.QueryInterface(Components.interfaces.nsIComponentRegistrar)
.%s();
""" % method).failed()
|
Add tests for (and fix a minor bug in) the last two commits.
|
Add tests for (and fix a minor bug in) the last two commits.
|
Python
|
bsd-3-clause
|
eviljeff/app-validator,eviljeff/app-validator,diox/amo-validator,muffinresearch/amo-validator,eviljeff/app-validator,kmaglione/amo-validator,kumar303/amo-validator,mstriemer/amo-validator,muffinresearch/amo-validator,mozilla/app-validator,eviljeff/app-validator,diox/amo-validator,stasm/app-validator,mattbasta/perfalator,mattbasta/amo-validator,magopian/amo-validator,wagnerand/amo-validator,kmaglione/amo-validator,diox/app-validator,stasm/app-validator,mozilla/app-validator,mstriemer/app-validator,diox/app-validator,stasm/app-validator,kumar303/amo-validator,mozilla/amo-validator,mstriemer/amo-validator,mstriemer/app-validator,muffinresearch/amo-validator,mattbasta/perfalator,mattbasta/perfalator,diox/amo-validator,mstriemer/amo-validator,wagnerand/amo-validator,mozilla/app-validator,mozilla/amo-validator,mstriemer/app-validator,mozilla/amo-validator,mattbasta/amo-validator,mattbasta/amo-validator,kumar303/amo-validator,magopian/amo-validator,magopian/amo-validator,diox/app-validator,stasm/app-validator,mozilla/amo-validator,magopian/amo-validator,kumar303/amo-validator,muffinresearch/amo-validator,kmaglione/amo-validator,mstriemer/amo-validator,wagnerand/amo-validator,wagnerand/amo-validator,diox/amo-validator,mozilla/app-validator,kmaglione/amo-validator,diox/app-validator
|
Add tests for (and fix a minor bug in) the last two commits.
|
from validator.errorbundler import ErrorBundle
from validator.testcases import scripting
scripting.traverser.DEBUG = True
def _test(script):
err = ErrorBundle()
err.save_resource("em:bootstrap", "true")
scripting.test_js_file(err, "foo", script)
return err
def test_bootstrapped():
"Performs a test on a JS file"
methods = (
("nsICategoryManager", "addCategoryEntry()"),
("nsIObserverService", "addObserver()"),
("nsIResProtocolHandler", "setSubstitution('foo', 'bar')"),
("nsIStyleSheetService", "loadAndRegisterSheet()"),
("nsIStringBundleService", "createStringBundle()"),
("nsIWindowMediator", "registerNotification()"),
("nsIWindowWatcher", "addListener()"),
)
for method in methods:
assert _test("""
Components.classes[""]
.getService(Components.interfaces.%s)
.%s;
""" % method).failed()
assert not _test("""
Components.classes[""]
.getService(Components.interfaces.nsIResProtocolHandler)
.setSubstitution("foo", null);
""").failed()
def test_bootstrapped_componentmanager():
for method in ('autoRegister', 'registerFactory'):
assert _test("""
Components.manager.QueryInterface(Components.interfaces.nsIComponentRegistrar)
.%s();
""" % method).failed()
|
<commit_before><commit_msg>Add tests for (and fix a minor bug in) the last two commits.<commit_after>
|
from validator.errorbundler import ErrorBundle
from validator.testcases import scripting
scripting.traverser.DEBUG = True
def _test(script):
err = ErrorBundle()
err.save_resource("em:bootstrap", "true")
scripting.test_js_file(err, "foo", script)
return err
def test_bootstrapped():
"Performs a test on a JS file"
methods = (
("nsICategoryManager", "addCategoryEntry()"),
("nsIObserverService", "addObserver()"),
("nsIResProtocolHandler", "setSubstitution('foo', 'bar')"),
("nsIStyleSheetService", "loadAndRegisterSheet()"),
("nsIStringBundleService", "createStringBundle()"),
("nsIWindowMediator", "registerNotification()"),
("nsIWindowWatcher", "addListener()"),
)
for method in methods:
assert _test("""
Components.classes[""]
.getService(Components.interfaces.%s)
.%s;
""" % method).failed()
assert not _test("""
Components.classes[""]
.getService(Components.interfaces.nsIResProtocolHandler)
.setSubstitution("foo", null);
""").failed()
def test_bootstrapped_componentmanager():
for method in ('autoRegister', 'registerFactory'):
assert _test("""
Components.manager.QueryInterface(Components.interfaces.nsIComponentRegistrar)
.%s();
""" % method).failed()
|
Add tests for (and fix a minor bug in) the last two commits.from validator.errorbundler import ErrorBundle
from validator.testcases import scripting
scripting.traverser.DEBUG = True
def _test(script):
err = ErrorBundle()
err.save_resource("em:bootstrap", "true")
scripting.test_js_file(err, "foo", script)
return err
def test_bootstrapped():
"Performs a test on a JS file"
methods = (
("nsICategoryManager", "addCategoryEntry()"),
("nsIObserverService", "addObserver()"),
("nsIResProtocolHandler", "setSubstitution('foo', 'bar')"),
("nsIStyleSheetService", "loadAndRegisterSheet()"),
("nsIStringBundleService", "createStringBundle()"),
("nsIWindowMediator", "registerNotification()"),
("nsIWindowWatcher", "addListener()"),
)
for method in methods:
assert _test("""
Components.classes[""]
.getService(Components.interfaces.%s)
.%s;
""" % method).failed()
assert not _test("""
Components.classes[""]
.getService(Components.interfaces.nsIResProtocolHandler)
.setSubstitution("foo", null);
""").failed()
def test_bootstrapped_componentmanager():
for method in ('autoRegister', 'registerFactory'):
assert _test("""
Components.manager.QueryInterface(Components.interfaces.nsIComponentRegistrar)
.%s();
""" % method).failed()
|
<commit_before><commit_msg>Add tests for (and fix a minor bug in) the last two commits.<commit_after>from validator.errorbundler import ErrorBundle
from validator.testcases import scripting
scripting.traverser.DEBUG = True
def _test(script):
err = ErrorBundle()
err.save_resource("em:bootstrap", "true")
scripting.test_js_file(err, "foo", script)
return err
def test_bootstrapped():
"Performs a test on a JS file"
methods = (
("nsICategoryManager", "addCategoryEntry()"),
("nsIObserverService", "addObserver()"),
("nsIResProtocolHandler", "setSubstitution('foo', 'bar')"),
("nsIStyleSheetService", "loadAndRegisterSheet()"),
("nsIStringBundleService", "createStringBundle()"),
("nsIWindowMediator", "registerNotification()"),
("nsIWindowWatcher", "addListener()"),
)
for method in methods:
assert _test("""
Components.classes[""]
.getService(Components.interfaces.%s)
.%s;
""" % method).failed()
assert not _test("""
Components.classes[""]
.getService(Components.interfaces.nsIResProtocolHandler)
.setSubstitution("foo", null);
""").failed()
def test_bootstrapped_componentmanager():
for method in ('autoRegister', 'registerFactory'):
assert _test("""
Components.manager.QueryInterface(Components.interfaces.nsIComponentRegistrar)
.%s();
""" % method).failed()
|
|
dcdf0247ff1612ef257a5ab49fe54c7f56c19ea3
|
tests/data_checks/test_gold_standards.py
|
tests/data_checks/test_gold_standards.py
|
# ------------------------------------------------
# built-ins
import unittest
# local
from utils.base import TestPostgapBase
# ------------------------------------------------
# TODO: Expected (target, disease) pairs from experts
EXPECTED_ASSOCIATIONS = [
('ENSG00001', 'EFO_00001'),
('ENSG00002', 'EFO_00001')
]
class TestPostgapGoldStandards(TestPostgapBase):
def test_each_expected_association_is_present(self):
self.skipTest('CHECK ALL GOLD STANDARD EXPECTED ASSOCIATIONS')
if __name__ == '__main__':
unittest.main()
|
Add placeholder for gold standard
|
Add placeholder for gold standard
|
Python
|
apache-2.0
|
Ensembl/cttv024,Ensembl/cttv024
|
Add placeholder for gold standard
|
# ------------------------------------------------
# built-ins
import unittest
# local
from utils.base import TestPostgapBase
# ------------------------------------------------
# TODO: Expected (target, disease) pairs from experts
EXPECTED_ASSOCIATIONS = [
('ENSG00001', 'EFO_00001'),
('ENSG00002', 'EFO_00001')
]
class TestPostgapGoldStandards(TestPostgapBase):
def test_each_expected_association_is_present(self):
self.skipTest('CHECK ALL GOLD STANDARD EXPECTED ASSOCIATIONS')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add placeholder for gold standard<commit_after>
|
# ------------------------------------------------
# built-ins
import unittest
# local
from utils.base import TestPostgapBase
# ------------------------------------------------
# TODO: Expected (target, disease) pairs from experts
EXPECTED_ASSOCIATIONS = [
('ENSG00001', 'EFO_00001'),
('ENSG00002', 'EFO_00001')
]
class TestPostgapGoldStandards(TestPostgapBase):
def test_each_expected_association_is_present(self):
self.skipTest('CHECK ALL GOLD STANDARD EXPECTED ASSOCIATIONS')
if __name__ == '__main__':
unittest.main()
|
Add placeholder for gold standard# ------------------------------------------------
# built-ins
import unittest
# local
from utils.base import TestPostgapBase
# ------------------------------------------------
# TODO: Expected (target, disease) pairs from experts
EXPECTED_ASSOCIATIONS = [
('ENSG00001', 'EFO_00001'),
('ENSG00002', 'EFO_00001')
]
class TestPostgapGoldStandards(TestPostgapBase):
def test_each_expected_association_is_present(self):
self.skipTest('CHECK ALL GOLD STANDARD EXPECTED ASSOCIATIONS')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add placeholder for gold standard<commit_after># ------------------------------------------------
# built-ins
import unittest
# local
from utils.base import TestPostgapBase
# ------------------------------------------------
# TODO: Expected (target, disease) pairs from experts
EXPECTED_ASSOCIATIONS = [
('ENSG00001', 'EFO_00001'),
('ENSG00002', 'EFO_00001')
]
class TestPostgapGoldStandards(TestPostgapBase):
def test_each_expected_association_is_present(self):
self.skipTest('CHECK ALL GOLD STANDARD EXPECTED ASSOCIATIONS')
if __name__ == '__main__':
unittest.main()
|
|
31da92f943c8db6b81c41bbca49f461250b9ae83
|
tests/server/blueprints/phenotypes/test_server_phenotypes.py
|
tests/server/blueprints/phenotypes/test_server_phenotypes.py
|
# -*- coding: utf-8 -*-
from flask import url_for
def test_phenotypes(app, institute_obj):
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
# WHEN accessing the phenotypes page
resp = client.get(url_for('phenotypes.hpo_terms'))
# THEN it should return a page
assert resp.status_code == 200
#def test_search_phenotypes(app, real_variant_database):
# GIVEN an initialized app
# GIVEN a valid user and institute
# adapter = real_variant_database
# with app.test_client() as client:
# GIVEN that the user could be logged in
# resp = client.get(url_for('auto_login'))
# assert resp.status_code == 200
# GIVEN an HPO-term in the database
# hpo_term = adapter.hpo_term_collection.find_one()
# assert hpo_term
# hpo_query = hpo_term['hpo_id']
# assert hpo_query
# WHEN searching the phenotypes page
# resp = client.post(url_for('phenotypes.hpo_terms', hpo_term=hpo_query))
# THEN it should return a page
# assert resp.status_code == 200
# that contains the search term
# assert hpo_term.encode() in resp.data
|
Add test for phenotypes page.
|
Add test for phenotypes page.
|
Python
|
bsd-3-clause
|
Clinical-Genomics/scout,Clinical-Genomics/scout,Clinical-Genomics/scout
|
Add test for phenotypes page.
|
# -*- coding: utf-8 -*-
from flask import url_for
def test_phenotypes(app, institute_obj):
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
# WHEN accessing the phenotypes page
resp = client.get(url_for('phenotypes.hpo_terms'))
# THEN it should return a page
assert resp.status_code == 200
#def test_search_phenotypes(app, real_variant_database):
# GIVEN an initialized app
# GIVEN a valid user and institute
# adapter = real_variant_database
# with app.test_client() as client:
# GIVEN that the user could be logged in
# resp = client.get(url_for('auto_login'))
# assert resp.status_code == 200
# GIVEN an HPO-term in the database
# hpo_term = adapter.hpo_term_collection.find_one()
# assert hpo_term
# hpo_query = hpo_term['hpo_id']
# assert hpo_query
# WHEN searching the phenotypes page
# resp = client.post(url_for('phenotypes.hpo_terms', hpo_term=hpo_query))
# THEN it should return a page
# assert resp.status_code == 200
# that contains the search term
# assert hpo_term.encode() in resp.data
|
<commit_before><commit_msg>Add test for phenotypes page.<commit_after>
|
# -*- coding: utf-8 -*-
from flask import url_for
def test_phenotypes(app, institute_obj):
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
# WHEN accessing the phenotypes page
resp = client.get(url_for('phenotypes.hpo_terms'))
# THEN it should return a page
assert resp.status_code == 200
#def test_search_phenotypes(app, real_variant_database):
# GIVEN an initialized app
# GIVEN a valid user and institute
# adapter = real_variant_database
# with app.test_client() as client:
# GIVEN that the user could be logged in
# resp = client.get(url_for('auto_login'))
# assert resp.status_code == 200
# GIVEN an HPO-term in the database
# hpo_term = adapter.hpo_term_collection.find_one()
# assert hpo_term
# hpo_query = hpo_term['hpo_id']
# assert hpo_query
# WHEN searching the phenotypes page
# resp = client.post(url_for('phenotypes.hpo_terms', hpo_term=hpo_query))
# THEN it should return a page
# assert resp.status_code == 200
# that contains the search term
# assert hpo_term.encode() in resp.data
|
Add test for phenotypes page.# -*- coding: utf-8 -*-
from flask import url_for
def test_phenotypes(app, institute_obj):
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
# WHEN accessing the phenotypes page
resp = client.get(url_for('phenotypes.hpo_terms'))
# THEN it should return a page
assert resp.status_code == 200
#def test_search_phenotypes(app, real_variant_database):
# GIVEN an initialized app
# GIVEN a valid user and institute
# adapter = real_variant_database
# with app.test_client() as client:
# GIVEN that the user could be logged in
# resp = client.get(url_for('auto_login'))
# assert resp.status_code == 200
# GIVEN an HPO-term in the database
# hpo_term = adapter.hpo_term_collection.find_one()
# assert hpo_term
# hpo_query = hpo_term['hpo_id']
# assert hpo_query
# WHEN searching the phenotypes page
# resp = client.post(url_for('phenotypes.hpo_terms', hpo_term=hpo_query))
# THEN it should return a page
# assert resp.status_code == 200
# that contains the search term
# assert hpo_term.encode() in resp.data
|
<commit_before><commit_msg>Add test for phenotypes page.<commit_after># -*- coding: utf-8 -*-
from flask import url_for
def test_phenotypes(app, institute_obj):
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
# WHEN accessing the phenotypes page
resp = client.get(url_for('phenotypes.hpo_terms'))
# THEN it should return a page
assert resp.status_code == 200
#def test_search_phenotypes(app, real_variant_database):
# GIVEN an initialized app
# GIVEN a valid user and institute
# adapter = real_variant_database
# with app.test_client() as client:
# GIVEN that the user could be logged in
# resp = client.get(url_for('auto_login'))
# assert resp.status_code == 200
# GIVEN an HPO-term in the database
# hpo_term = adapter.hpo_term_collection.find_one()
# assert hpo_term
# hpo_query = hpo_term['hpo_id']
# assert hpo_query
# WHEN searching the phenotypes page
# resp = client.post(url_for('phenotypes.hpo_terms', hpo_term=hpo_query))
# THEN it should return a page
# assert resp.status_code == 200
# that contains the search term
# assert hpo_term.encode() in resp.data
|
|
90c3f568b1179e4c5da9f8349f909d25e5221f10
|
tests/integration/modules/test_win_ip.py
|
tests/integration/modules/test_win_ip.py
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
import re
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
# Import Salt libs
import salt.utils
@skipIf(not salt.utils.is_windows(), 'windows test only')
class WinIPTest(ModuleCase):
'''
Tests for salt.modules.win_ip
'''
def test_get_default_gateway(self):
'''
Test getting default gateway
'''
ip = re.compile(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$')
ret = self.run_function('ip.get_default_gateway')
assert ip.match(ret)
def test_ip_is_enabled(self):
'''
Test ip.is_enabled
'''
assert self.run_function('ip.is_enabled', ['Ethernet'])
assert 'not found' in self.run_function('ip.is_enabled', ['doesnotexist'])
|
Add windows ip module integration tests
|
Add windows ip module integration tests
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Add windows ip module integration tests
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
import re
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
# Import Salt libs
import salt.utils
@skipIf(not salt.utils.is_windows(), 'windows test only')
class WinIPTest(ModuleCase):
'''
Tests for salt.modules.win_ip
'''
def test_get_default_gateway(self):
'''
Test getting default gateway
'''
ip = re.compile(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$')
ret = self.run_function('ip.get_default_gateway')
assert ip.match(ret)
def test_ip_is_enabled(self):
'''
Test ip.is_enabled
'''
assert self.run_function('ip.is_enabled', ['Ethernet'])
assert 'not found' in self.run_function('ip.is_enabled', ['doesnotexist'])
|
<commit_before><commit_msg>Add windows ip module integration tests<commit_after>
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
import re
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
# Import Salt libs
import salt.utils
@skipIf(not salt.utils.is_windows(), 'windows test only')
class WinIPTest(ModuleCase):
'''
Tests for salt.modules.win_ip
'''
def test_get_default_gateway(self):
'''
Test getting default gateway
'''
ip = re.compile(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$')
ret = self.run_function('ip.get_default_gateway')
assert ip.match(ret)
def test_ip_is_enabled(self):
'''
Test ip.is_enabled
'''
assert self.run_function('ip.is_enabled', ['Ethernet'])
assert 'not found' in self.run_function('ip.is_enabled', ['doesnotexist'])
|
Add windows ip module integration tests# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
import re
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
# Import Salt libs
import salt.utils
@skipIf(not salt.utils.is_windows(), 'windows test only')
class WinIPTest(ModuleCase):
'''
Tests for salt.modules.win_ip
'''
def test_get_default_gateway(self):
'''
Test getting default gateway
'''
ip = re.compile(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$')
ret = self.run_function('ip.get_default_gateway')
assert ip.match(ret)
def test_ip_is_enabled(self):
'''
Test ip.is_enabled
'''
assert self.run_function('ip.is_enabled', ['Ethernet'])
assert 'not found' in self.run_function('ip.is_enabled', ['doesnotexist'])
|
<commit_before><commit_msg>Add windows ip module integration tests<commit_after># -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
import re
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.unit import skipIf
# Import Salt libs
import salt.utils
@skipIf(not salt.utils.is_windows(), 'windows test only')
class WinIPTest(ModuleCase):
'''
Tests for salt.modules.win_ip
'''
def test_get_default_gateway(self):
'''
Test getting default gateway
'''
ip = re.compile(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$')
ret = self.run_function('ip.get_default_gateway')
assert ip.match(ret)
def test_ip_is_enabled(self):
'''
Test ip.is_enabled
'''
assert self.run_function('ip.is_enabled', ['Ethernet'])
assert 'not found' in self.run_function('ip.is_enabled', ['doesnotexist'])
|
|
faa4f2f26b051dc9946d9ee1d3ca209aebdd8880
|
p3/templatetags/accepted_talks.py
|
p3/templatetags/accepted_talks.py
|
from __future__ import unicode_literals
from django import template
from conference import models
from ..utils import profile_url, talk_title
register = template.Library()
# These must match the talk .type or .admin_type
TYPE_NAMES = (
('k', 'Keynotes', ''),
('t', 'Talks', ''),
('r', 'Training sessions', ''),
('p', 'Poster sessions', ''),
('i', 'Interactive sessions', ''),
('n', 'Panels', ''),
('h', 'Help desks', (
'Help desks provide slots for attendees to discuss '
'their problems one-on-one with experts from the projects.'
)),
('m', 'EuroPython sessions', (
'The EuroPython sessions are intended for anyone interested '
'in helping with the EuroPython organization in the coming years.'
)),
)
def _check_talk_types(type_names):
d = set(x[0] for x in type_names)
for code, entry in models.TALK_TYPE:
assert code[0] in d, 'Talk type code %r is missing' % code[0]
def speaker_listing(talk):
return [{
'url': profile_url(speaker.user),
'fullname': '{} {}'.format(speaker.user.first_name, speaker.user.last_name),
} for speaker in talk.get_all_speakers()]
@register.assignment_tag
def get_accepted_talks(conference):
_check_talk_types(TYPE_NAMES)
talks = models.Talk.objects.filter(
conference=conference, status='accepted')
# Group by types
talk_types = {}
for talk in talks:
talk_type = talk.type[:1]
admin_type = talk.admin_type[:1]
if (admin_type == 'm' or 'EPS' in talk.title or
'EuroPython 20' in talk.title):
type = 'm'
elif (admin_type == 'k' or talk.title.lower().startswith('keynote')):
type = 'k'
elif admin_type in ('x', 'o', 'c', 'l', 'r', 's', 'e'):
# Don't list these placeholders or plenary sessions
# used in the schedule
continue
else:
type = talk_type
if type in talk_types:
talk_types[type].append(talk)
else:
talk_types[type] = [talk]
output = {}
for type, type_name, description in TYPE_NAMES:
bag = talk_types.get(type, [])
# Sort by talk title using title case
bag.sort(key=lambda talk: talk_title(talk).title())
output[type] = {
'type': type_name,
'talks': [{
'title': talk_title(talk),
'url': talk.get_absolute_url(),
'speakers': speaker_listing(talk),
'talk': talk,
} for talk in bag]
}
return output
|
Add basic accepted talks template tag
|
Add basic accepted talks template tag
|
Python
|
bsd-2-clause
|
artcz/epcon,EuroPython/epcon,artcz/epcon,EuroPython/epcon,malemburg/epcon,artcz/epcon,matrixise/epcon,EuroPython/epcon,malemburg/epcon,matrixise/epcon,malemburg/epcon,artcz/epcon,malemburg/epcon,artcz/epcon,matrixise/epcon,EuroPython/epcon,matrixise/epcon,matrixise/epcon,artcz/epcon,matrixise/epcon,malemburg/epcon,malemburg/epcon
|
Add basic accepted talks template tag
|
from __future__ import unicode_literals
from django import template
from conference import models
from ..utils import profile_url, talk_title
register = template.Library()
# These must match the talk .type or .admin_type
TYPE_NAMES = (
('k', 'Keynotes', ''),
('t', 'Talks', ''),
('r', 'Training sessions', ''),
('p', 'Poster sessions', ''),
('i', 'Interactive sessions', ''),
('n', 'Panels', ''),
('h', 'Help desks', (
'Help desks provide slots for attendees to discuss '
'their problems one-on-one with experts from the projects.'
)),
('m', 'EuroPython sessions', (
'The EuroPython sessions are intended for anyone interested '
'in helping with the EuroPython organization in the coming years.'
)),
)
def _check_talk_types(type_names):
d = set(x[0] for x in type_names)
for code, entry in models.TALK_TYPE:
assert code[0] in d, 'Talk type code %r is missing' % code[0]
def speaker_listing(talk):
return [{
'url': profile_url(speaker.user),
'fullname': '{} {}'.format(speaker.user.first_name, speaker.user.last_name),
} for speaker in talk.get_all_speakers()]
@register.assignment_tag
def get_accepted_talks(conference):
_check_talk_types(TYPE_NAMES)
talks = models.Talk.objects.filter(
conference=conference, status='accepted')
# Group by types
talk_types = {}
for talk in talks:
talk_type = talk.type[:1]
admin_type = talk.admin_type[:1]
if (admin_type == 'm' or 'EPS' in talk.title or
'EuroPython 20' in talk.title):
type = 'm'
elif (admin_type == 'k' or talk.title.lower().startswith('keynote')):
type = 'k'
elif admin_type in ('x', 'o', 'c', 'l', 'r', 's', 'e'):
# Don't list these placeholders or plenary sessions
# used in the schedule
continue
else:
type = talk_type
if type in talk_types:
talk_types[type].append(talk)
else:
talk_types[type] = [talk]
output = {}
for type, type_name, description in TYPE_NAMES:
bag = talk_types.get(type, [])
# Sort by talk title using title case
bag.sort(key=lambda talk: talk_title(talk).title())
output[type] = {
'type': type_name,
'talks': [{
'title': talk_title(talk),
'url': talk.get_absolute_url(),
'speakers': speaker_listing(talk),
'talk': talk,
} for talk in bag]
}
return output
|
<commit_before><commit_msg>Add basic accepted talks template tag<commit_after>
|
from __future__ import unicode_literals
from django import template
from conference import models
from ..utils import profile_url, talk_title
register = template.Library()
# These must match the talk .type or .admin_type
TYPE_NAMES = (
('k', 'Keynotes', ''),
('t', 'Talks', ''),
('r', 'Training sessions', ''),
('p', 'Poster sessions', ''),
('i', 'Interactive sessions', ''),
('n', 'Panels', ''),
('h', 'Help desks', (
'Help desks provide slots for attendees to discuss '
'their problems one-on-one with experts from the projects.'
)),
('m', 'EuroPython sessions', (
'The EuroPython sessions are intended for anyone interested '
'in helping with the EuroPython organization in the coming years.'
)),
)
def _check_talk_types(type_names):
d = set(x[0] for x in type_names)
for code, entry in models.TALK_TYPE:
assert code[0] in d, 'Talk type code %r is missing' % code[0]
def speaker_listing(talk):
return [{
'url': profile_url(speaker.user),
'fullname': '{} {}'.format(speaker.user.first_name, speaker.user.last_name),
} for speaker in talk.get_all_speakers()]
@register.assignment_tag
def get_accepted_talks(conference):
_check_talk_types(TYPE_NAMES)
talks = models.Talk.objects.filter(
conference=conference, status='accepted')
# Group by types
talk_types = {}
for talk in talks:
talk_type = talk.type[:1]
admin_type = talk.admin_type[:1]
if (admin_type == 'm' or 'EPS' in talk.title or
'EuroPython 20' in talk.title):
type = 'm'
elif (admin_type == 'k' or talk.title.lower().startswith('keynote')):
type = 'k'
elif admin_type in ('x', 'o', 'c', 'l', 'r', 's', 'e'):
# Don't list these placeholders or plenary sessions
# used in the schedule
continue
else:
type = talk_type
if type in talk_types:
talk_types[type].append(talk)
else:
talk_types[type] = [talk]
output = {}
for type, type_name, description in TYPE_NAMES:
bag = talk_types.get(type, [])
# Sort by talk title using title case
bag.sort(key=lambda talk: talk_title(talk).title())
output[type] = {
'type': type_name,
'talks': [{
'title': talk_title(talk),
'url': talk.get_absolute_url(),
'speakers': speaker_listing(talk),
'talk': talk,
} for talk in bag]
}
return output
|
Add basic accepted talks template tagfrom __future__ import unicode_literals
from django import template
from conference import models
from ..utils import profile_url, talk_title
register = template.Library()
# These must match the talk .type or .admin_type
TYPE_NAMES = (
('k', 'Keynotes', ''),
('t', 'Talks', ''),
('r', 'Training sessions', ''),
('p', 'Poster sessions', ''),
('i', 'Interactive sessions', ''),
('n', 'Panels', ''),
('h', 'Help desks', (
'Help desks provide slots for attendees to discuss '
'their problems one-on-one with experts from the projects.'
)),
('m', 'EuroPython sessions', (
'The EuroPython sessions are intended for anyone interested '
'in helping with the EuroPython organization in the coming years.'
)),
)
def _check_talk_types(type_names):
d = set(x[0] for x in type_names)
for code, entry in models.TALK_TYPE:
assert code[0] in d, 'Talk type code %r is missing' % code[0]
def speaker_listing(talk):
return [{
'url': profile_url(speaker.user),
'fullname': '{} {}'.format(speaker.user.first_name, speaker.user.last_name),
} for speaker in talk.get_all_speakers()]
@register.assignment_tag
def get_accepted_talks(conference):
_check_talk_types(TYPE_NAMES)
talks = models.Talk.objects.filter(
conference=conference, status='accepted')
# Group by types
talk_types = {}
for talk in talks:
talk_type = talk.type[:1]
admin_type = talk.admin_type[:1]
if (admin_type == 'm' or 'EPS' in talk.title or
'EuroPython 20' in talk.title):
type = 'm'
elif (admin_type == 'k' or talk.title.lower().startswith('keynote')):
type = 'k'
elif admin_type in ('x', 'o', 'c', 'l', 'r', 's', 'e'):
# Don't list these placeholders or plenary sessions
# used in the schedule
continue
else:
type = talk_type
if type in talk_types:
talk_types[type].append(talk)
else:
talk_types[type] = [talk]
output = {}
for type, type_name, description in TYPE_NAMES:
bag = talk_types.get(type, [])
# Sort by talk title using title case
bag.sort(key=lambda talk: talk_title(talk).title())
output[type] = {
'type': type_name,
'talks': [{
'title': talk_title(talk),
'url': talk.get_absolute_url(),
'speakers': speaker_listing(talk),
'talk': talk,
} for talk in bag]
}
return output
|
<commit_before><commit_msg>Add basic accepted talks template tag<commit_after>from __future__ import unicode_literals
from django import template
from conference import models
from ..utils import profile_url, talk_title
register = template.Library()
# These must match the talk .type or .admin_type
TYPE_NAMES = (
('k', 'Keynotes', ''),
('t', 'Talks', ''),
('r', 'Training sessions', ''),
('p', 'Poster sessions', ''),
('i', 'Interactive sessions', ''),
('n', 'Panels', ''),
('h', 'Help desks', (
'Help desks provide slots for attendees to discuss '
'their problems one-on-one with experts from the projects.'
)),
('m', 'EuroPython sessions', (
'The EuroPython sessions are intended for anyone interested '
'in helping with the EuroPython organization in the coming years.'
)),
)
def _check_talk_types(type_names):
d = set(x[0] for x in type_names)
for code, entry in models.TALK_TYPE:
assert code[0] in d, 'Talk type code %r is missing' % code[0]
def speaker_listing(talk):
return [{
'url': profile_url(speaker.user),
'fullname': '{} {}'.format(speaker.user.first_name, speaker.user.last_name),
} for speaker in talk.get_all_speakers()]
@register.assignment_tag
def get_accepted_talks(conference):
_check_talk_types(TYPE_NAMES)
talks = models.Talk.objects.filter(
conference=conference, status='accepted')
# Group by types
talk_types = {}
for talk in talks:
talk_type = talk.type[:1]
admin_type = talk.admin_type[:1]
if (admin_type == 'm' or 'EPS' in talk.title or
'EuroPython 20' in talk.title):
type = 'm'
elif (admin_type == 'k' or talk.title.lower().startswith('keynote')):
type = 'k'
elif admin_type in ('x', 'o', 'c', 'l', 'r', 's', 'e'):
# Don't list these placeholders or plenary sessions
# used in the schedule
continue
else:
type = talk_type
if type in talk_types:
talk_types[type].append(talk)
else:
talk_types[type] = [talk]
output = {}
for type, type_name, description in TYPE_NAMES:
bag = talk_types.get(type, [])
# Sort by talk title using title case
bag.sort(key=lambda talk: talk_title(talk).title())
output[type] = {
'type': type_name,
'talks': [{
'title': talk_title(talk),
'url': talk.get_absolute_url(),
'speakers': speaker_listing(talk),
'talk': talk,
} for talk in bag]
}
return output
|
|
1740e3550ecd73c296a5adf90383ebb7c6c1fcf2
|
katagawa/engine/__init__.py
|
katagawa/engine/__init__.py
|
"""
Katagawa engines are the actual SQL connections behind the scenes. They actually emit the raw SQL to the database
server, and return the results produced.
"""
import importlib
import dsnparse
import logging
from katagawa.engine.base import BaseEngine
BASE_PATH = "katagawa.engine.backends"
logger = logging.getLogger("Katagawa.engine")
def create_engine(dsn: str, **kwargs) -> BaseEngine:
"""
Creates an engine from the specified DSN.
:param dsn: The DSN to use.
:return: A new :class:`katagawa.engine.base.BaseEngine` that was created from the specified DSN.
"""
parsed = dsnparse.parse(dsn)
# Get the DB type and the name of the driver, if applicable.
db = parsed.schemes[0]
try:
driver = parsed.schemes[1]
except IndexError:
driver = None
if driver is None:
# Import the backend to get the default driver.
mod = importlib.import_module(BASE_PATH + ".{}".format(db))
driver = getattr(mod, "__DEFAULT__")
# Import the driver class.
path = BASE_PATH + ".{}.{}".format(db, driver)
logger.debug("Attempting to load database engine {}".format(path))
# This will raise an ImportError/ModuleNotFoundError (3.6+) if it failed to import, so we don't care about error
# handling here.
imported = importlib.import_module(path)
# Find a class that is a subclass of BaseEngine, and has the same `__module__` as the imported name.
for i in dir(imported):
item = getattr(imported, i)
if issubclass(item, BaseEngine):
if item.__module__ == path:
break
else:
raise ImportError("Failed to load specified driver")
# Initialize a new instance of the engine.
engine = item(dsn, **kwargs)
return engine
|
Add a `create_engine` function which creates an engine from a DSN.
|
Add a `create_engine` function which creates an engine from a DSN.
|
Python
|
mit
|
SunDwarf/asyncqlio
|
Add a `create_engine` function which creates an engine from a DSN.
|
"""
Katagawa engines are the actual SQL connections behind the scenes. They actually emit the raw SQL to the database
server, and return the results produced.
"""
import importlib
import dsnparse
import logging
from katagawa.engine.base import BaseEngine
BASE_PATH = "katagawa.engine.backends"
logger = logging.getLogger("Katagawa.engine")
def create_engine(dsn: str, **kwargs) -> BaseEngine:
"""
Creates an engine from the specified DSN.
:param dsn: The DSN to use.
:return: A new :class:`katagawa.engine.base.BaseEngine` that was created from the specified DSN.
"""
parsed = dsnparse.parse(dsn)
# Get the DB type and the name of the driver, if applicable.
db = parsed.schemes[0]
try:
driver = parsed.schemes[1]
except IndexError:
driver = None
if driver is None:
# Import the backend to get the default driver.
mod = importlib.import_module(BASE_PATH + ".{}".format(db))
driver = getattr(mod, "__DEFAULT__")
# Import the driver class.
path = BASE_PATH + ".{}.{}".format(db, driver)
logger.debug("Attempting to load database engine {}".format(path))
# This will raise an ImportError/ModuleNotFoundError (3.6+) if it failed to import, so we don't care about error
# handling here.
imported = importlib.import_module(path)
# Find a class that is a subclass of BaseEngine, and has the same `__module__` as the imported name.
for i in dir(imported):
item = getattr(imported, i)
if issubclass(item, BaseEngine):
if item.__module__ == path:
break
else:
raise ImportError("Failed to load specified driver")
# Initialize a new instance of the engine.
engine = item(dsn, **kwargs)
return engine
|
<commit_before><commit_msg>Add a `create_engine` function which creates an engine from a DSN.<commit_after>
|
"""
Katagawa engines are the actual SQL connections behind the scenes. They actually emit the raw SQL to the database
server, and return the results produced.
"""
import importlib
import dsnparse
import logging
from katagawa.engine.base import BaseEngine
BASE_PATH = "katagawa.engine.backends"
logger = logging.getLogger("Katagawa.engine")
def create_engine(dsn: str, **kwargs) -> BaseEngine:
"""
Creates an engine from the specified DSN.
:param dsn: The DSN to use.
:return: A new :class:`katagawa.engine.base.BaseEngine` that was created from the specified DSN.
"""
parsed = dsnparse.parse(dsn)
# Get the DB type and the name of the driver, if applicable.
db = parsed.schemes[0]
try:
driver = parsed.schemes[1]
except IndexError:
driver = None
if driver is None:
# Import the backend to get the default driver.
mod = importlib.import_module(BASE_PATH + ".{}".format(db))
driver = getattr(mod, "__DEFAULT__")
# Import the driver class.
path = BASE_PATH + ".{}.{}".format(db, driver)
logger.debug("Attempting to load database engine {}".format(path))
# This will raise an ImportError/ModuleNotFoundError (3.6+) if it failed to import, so we don't care about error
# handling here.
imported = importlib.import_module(path)
# Find a class that is a subclass of BaseEngine, and has the same `__module__` as the imported name.
for i in dir(imported):
item = getattr(imported, i)
if issubclass(item, BaseEngine):
if item.__module__ == path:
break
else:
raise ImportError("Failed to load specified driver")
# Initialize a new instance of the engine.
engine = item(dsn, **kwargs)
return engine
|
Add a `create_engine` function which creates an engine from a DSN."""
Katagawa engines are the actual SQL connections behind the scenes. They actually emit the raw SQL to the database
server, and return the results produced.
"""
import importlib
import dsnparse
import logging
from katagawa.engine.base import BaseEngine
BASE_PATH = "katagawa.engine.backends"
logger = logging.getLogger("Katagawa.engine")
def create_engine(dsn: str, **kwargs) -> BaseEngine:
"""
Creates an engine from the specified DSN.
:param dsn: The DSN to use.
:return: A new :class:`katagawa.engine.base.BaseEngine` that was created from the specified DSN.
"""
parsed = dsnparse.parse(dsn)
# Get the DB type and the name of the driver, if applicable.
db = parsed.schemes[0]
try:
driver = parsed.schemes[1]
except IndexError:
driver = None
if driver is None:
# Import the backend to get the default driver.
mod = importlib.import_module(BASE_PATH + ".{}".format(db))
driver = getattr(mod, "__DEFAULT__")
# Import the driver class.
path = BASE_PATH + ".{}.{}".format(db, driver)
logger.debug("Attempting to load database engine {}".format(path))
# This will raise an ImportError/ModuleNotFoundError (3.6+) if it failed to import, so we don't care about error
# handling here.
imported = importlib.import_module(path)
# Find a class that is a subclass of BaseEngine, and has the same `__module__` as the imported name.
for i in dir(imported):
item = getattr(imported, i)
if issubclass(item, BaseEngine):
if item.__module__ == path:
break
else:
raise ImportError("Failed to load specified driver")
# Initialize a new instance of the engine.
engine = item(dsn, **kwargs)
return engine
|
<commit_before><commit_msg>Add a `create_engine` function which creates an engine from a DSN.<commit_after>"""
Katagawa engines are the actual SQL connections behind the scenes. They actually emit the raw SQL to the database
server, and return the results produced.
"""
import importlib
import dsnparse
import logging
from katagawa.engine.base import BaseEngine
BASE_PATH = "katagawa.engine.backends"
logger = logging.getLogger("Katagawa.engine")
def create_engine(dsn: str, **kwargs) -> BaseEngine:
"""
Creates an engine from the specified DSN.
:param dsn: The DSN to use.
:return: A new :class:`katagawa.engine.base.BaseEngine` that was created from the specified DSN.
"""
parsed = dsnparse.parse(dsn)
# Get the DB type and the name of the driver, if applicable.
db = parsed.schemes[0]
try:
driver = parsed.schemes[1]
except IndexError:
driver = None
if driver is None:
# Import the backend to get the default driver.
mod = importlib.import_module(BASE_PATH + ".{}".format(db))
driver = getattr(mod, "__DEFAULT__")
# Import the driver class.
path = BASE_PATH + ".{}.{}".format(db, driver)
logger.debug("Attempting to load database engine {}".format(path))
# This will raise an ImportError/ModuleNotFoundError (3.6+) if it failed to import, so we don't care about error
# handling here.
imported = importlib.import_module(path)
# Find a class that is a subclass of BaseEngine, and has the same `__module__` as the imported name.
for i in dir(imported):
item = getattr(imported, i)
if issubclass(item, BaseEngine):
if item.__module__ == path:
break
else:
raise ImportError("Failed to load specified driver")
# Initialize a new instance of the engine.
engine = item(dsn, **kwargs)
return engine
|
|
3ea5e8956987b2c7ff0830186ca601118693f3d1
|
benchmarks/legendre1_sage.py
|
benchmarks/legendre1_sage.py
|
print "import..."
from timeit import default_timer as clock
from sage.all import var, Integer
print " done."
def fact(n):
if n in [0, 1]:
return 1
else:
return n*fact(n-1)
def diff(e, x, n):
for i in range(n):
e = e.diff(x)
return e
def legendre(n, x):
e = Integer(1)/(Integer(2)**n * fact(Integer(n))) * diff((x**2-1)**n, x, n)
return e.expand()
var("x")
for n in range(10):
print n, legendre(n, x)
t1 = clock()
e = legendre(500, x)
t2 = clock()
print "Total time for legendre(500, x):", t2-t1, "s"
|
Add the Legendre benchmark in Sage
|
Add the Legendre benchmark in Sage
|
Python
|
mit
|
symengine/symengine.py,symengine/symengine.py,bjodah/symengine.py,symengine/symengine.py,bjodah/symengine.py,bjodah/symengine.py
|
Add the Legendre benchmark in Sage
|
print "import..."
from timeit import default_timer as clock
from sage.all import var, Integer
print " done."
def fact(n):
if n in [0, 1]:
return 1
else:
return n*fact(n-1)
def diff(e, x, n):
for i in range(n):
e = e.diff(x)
return e
def legendre(n, x):
e = Integer(1)/(Integer(2)**n * fact(Integer(n))) * diff((x**2-1)**n, x, n)
return e.expand()
var("x")
for n in range(10):
print n, legendre(n, x)
t1 = clock()
e = legendre(500, x)
t2 = clock()
print "Total time for legendre(500, x):", t2-t1, "s"
|
<commit_before><commit_msg>Add the Legendre benchmark in Sage<commit_after>
|
print "import..."
from timeit import default_timer as clock
from sage.all import var, Integer
print " done."
def fact(n):
if n in [0, 1]:
return 1
else:
return n*fact(n-1)
def diff(e, x, n):
for i in range(n):
e = e.diff(x)
return e
def legendre(n, x):
e = Integer(1)/(Integer(2)**n * fact(Integer(n))) * diff((x**2-1)**n, x, n)
return e.expand()
var("x")
for n in range(10):
print n, legendre(n, x)
t1 = clock()
e = legendre(500, x)
t2 = clock()
print "Total time for legendre(500, x):", t2-t1, "s"
|
Add the Legendre benchmark in Sageprint "import..."
from timeit import default_timer as clock
from sage.all import var, Integer
print " done."
def fact(n):
if n in [0, 1]:
return 1
else:
return n*fact(n-1)
def diff(e, x, n):
for i in range(n):
e = e.diff(x)
return e
def legendre(n, x):
e = Integer(1)/(Integer(2)**n * fact(Integer(n))) * diff((x**2-1)**n, x, n)
return e.expand()
var("x")
for n in range(10):
print n, legendre(n, x)
t1 = clock()
e = legendre(500, x)
t2 = clock()
print "Total time for legendre(500, x):", t2-t1, "s"
|
<commit_before><commit_msg>Add the Legendre benchmark in Sage<commit_after>print "import..."
from timeit import default_timer as clock
from sage.all import var, Integer
print " done."
def fact(n):
if n in [0, 1]:
return 1
else:
return n*fact(n-1)
def diff(e, x, n):
for i in range(n):
e = e.diff(x)
return e
def legendre(n, x):
e = Integer(1)/(Integer(2)**n * fact(Integer(n))) * diff((x**2-1)**n, x, n)
return e.expand()
var("x")
for n in range(10):
print n, legendre(n, x)
t1 = clock()
e = legendre(500, x)
t2 = clock()
print "Total time for legendre(500, x):", t2-t1, "s"
|
|
e28ebe8bf2209d624e4b9470a4f86dcffcc5be56
|
src/80_Remove_Duplicates_from_Sorted_Array_II.py
|
src/80_Remove_Duplicates_from_Sorted_Array_II.py
|
class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
k = 2
if len(nums) == 0:
return 0
m = 1
count = 1
for i in range(1, len(nums)):
if nums[i] == nums[i - 1]:
if count < k:
nums[m] = nums[i]
m += 1
count += 1
else:
count = 1
nums[m] = nums[i]
m += 1
return m
|
Remove Duplicates from Sorted Array II
|
完成了第80题: Remove Duplicates from Sorted Array II
|
Python
|
mit
|
ChuanleiGuo/AlgorithmsPlayground,ChuanleiGuo/AlgorithmsPlayground,ChuanleiGuo/AlgorithmsPlayground,ChuanleiGuo/AlgorithmsPlayground
|
完成了第80题: Remove Duplicates from Sorted Array II
|
class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
k = 2
if len(nums) == 0:
return 0
m = 1
count = 1
for i in range(1, len(nums)):
if nums[i] == nums[i - 1]:
if count < k:
nums[m] = nums[i]
m += 1
count += 1
else:
count = 1
nums[m] = nums[i]
m += 1
return m
|
<commit_before><commit_msg>完成了第80题: Remove Duplicates from Sorted Array II<commit_after>
|
class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
k = 2
if len(nums) == 0:
return 0
m = 1
count = 1
for i in range(1, len(nums)):
if nums[i] == nums[i - 1]:
if count < k:
nums[m] = nums[i]
m += 1
count += 1
else:
count = 1
nums[m] = nums[i]
m += 1
return m
|
完成了第80题: Remove Duplicates from Sorted Array IIclass Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
k = 2
if len(nums) == 0:
return 0
m = 1
count = 1
for i in range(1, len(nums)):
if nums[i] == nums[i - 1]:
if count < k:
nums[m] = nums[i]
m += 1
count += 1
else:
count = 1
nums[m] = nums[i]
m += 1
return m
|
<commit_before><commit_msg>完成了第80题: Remove Duplicates from Sorted Array II<commit_after>class Solution(object):
def removeDuplicates(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
k = 2
if len(nums) == 0:
return 0
m = 1
count = 1
for i in range(1, len(nums)):
if nums[i] == nums[i - 1]:
if count < k:
nums[m] = nums[i]
m += 1
count += 1
else:
count = 1
nums[m] = nums[i]
m += 1
return m
|
|
2b793812b0e237e90048c33439ac940466ad068a
|
mapping/make_num_from_fq.py
|
mapping/make_num_from_fq.py
|
import gzip
from sys import argv
if __name__ == "__main__":
fastq = gzip.open(argv[1], 'rt')
base, _ = argv[1].rsplit('.remap', 1)
num = gzip.open(base+'.to.remap.num.gz', 'wt')
line = next(fastq)
last = ''
while line:
curr, counts = line.strip().rsplit(':', 1)
if curr != last:
counts = int(counts)
num.write('{}\n'.format(2*counts))
num.flush()
last = curr
seq = next(fastq)
header_again = next(fastq)
quals = next(fastq)
line = next(fastq)
|
Add script to make the .num.txt.gz file from the fastq
|
Add script to make the .num.txt.gz file from the fastq
|
Python
|
apache-2.0
|
TheFraserLab/Hornet
|
Add script to make the .num.txt.gz file from the fastq
|
import gzip
from sys import argv
if __name__ == "__main__":
fastq = gzip.open(argv[1], 'rt')
base, _ = argv[1].rsplit('.remap', 1)
num = gzip.open(base+'.to.remap.num.gz', 'wt')
line = next(fastq)
last = ''
while line:
curr, counts = line.strip().rsplit(':', 1)
if curr != last:
counts = int(counts)
num.write('{}\n'.format(2*counts))
num.flush()
last = curr
seq = next(fastq)
header_again = next(fastq)
quals = next(fastq)
line = next(fastq)
|
<commit_before><commit_msg>Add script to make the .num.txt.gz file from the fastq<commit_after>
|
import gzip
from sys import argv
if __name__ == "__main__":
fastq = gzip.open(argv[1], 'rt')
base, _ = argv[1].rsplit('.remap', 1)
num = gzip.open(base+'.to.remap.num.gz', 'wt')
line = next(fastq)
last = ''
while line:
curr, counts = line.strip().rsplit(':', 1)
if curr != last:
counts = int(counts)
num.write('{}\n'.format(2*counts))
num.flush()
last = curr
seq = next(fastq)
header_again = next(fastq)
quals = next(fastq)
line = next(fastq)
|
Add script to make the .num.txt.gz file from the fastqimport gzip
from sys import argv
if __name__ == "__main__":
fastq = gzip.open(argv[1], 'rt')
base, _ = argv[1].rsplit('.remap', 1)
num = gzip.open(base+'.to.remap.num.gz', 'wt')
line = next(fastq)
last = ''
while line:
curr, counts = line.strip().rsplit(':', 1)
if curr != last:
counts = int(counts)
num.write('{}\n'.format(2*counts))
num.flush()
last = curr
seq = next(fastq)
header_again = next(fastq)
quals = next(fastq)
line = next(fastq)
|
<commit_before><commit_msg>Add script to make the .num.txt.gz file from the fastq<commit_after>import gzip
from sys import argv
if __name__ == "__main__":
fastq = gzip.open(argv[1], 'rt')
base, _ = argv[1].rsplit('.remap', 1)
num = gzip.open(base+'.to.remap.num.gz', 'wt')
line = next(fastq)
last = ''
while line:
curr, counts = line.strip().rsplit(':', 1)
if curr != last:
counts = int(counts)
num.write('{}\n'.format(2*counts))
num.flush()
last = curr
seq = next(fastq)
header_again = next(fastq)
quals = next(fastq)
line = next(fastq)
|
|
a3d5736fdd8ae747bb7c375ddf1e7934f2227d25
|
go/tests/test_urls.py
|
go/tests/test_urls.py
|
from django.core.urlresolvers import reverse
from go.base.tests.helpers import GoDjangoTestCase, DjangoVumiApiHelper
class TestLoginAs(GoDjangoTestCase):
def setUp(self):
self.vumi_helper = self.add_helper(DjangoVumiApiHelper())
self.superuser_helper = self.vumi_helper.make_django_user(
superuser=True, email='superuser@example.com')
self.user_helper_1 = self.vumi_helper.make_django_user(
email='user1@example.com')
self.user_helper_2 = self.vumi_helper.make_django_user(
email='user2@example.com')
self.superuser_client = self.vumi_helper.get_client(
username='superuser@example.com')
self.user_client_1 = self.vumi_helper.get_client(
username='user1@example.com')
def test_successful_login_as(self):
""" Superusers should be able to use login-as. """
user_2_pk = self.user_helper_2.get_django_user().pk
response = self.superuser_client.get(
reverse('loginas-user-login', kwargs={'user_id': str(user_2_pk)}))
self.assertRedirects(response, reverse('home'), target_status_code=302)
self.assertEqual(response.client.session.get('_go_user_account_key'),
'test-2-user')
def test_failed_login_as(self):
""" Ordinary users should not be able to use login-as. """
user_2_pk = self.user_helper_2.get_django_user().pk
response = self.user_client_1.get(
reverse('loginas-user-login', kwargs={'user_id': str(user_2_pk)}))
self.assertRedirects(response, reverse('home'), target_status_code=302)
self.assertEqual(response.client.session.get('_go_user_account_key'),
'test-1-user')
|
Add tests for login-as URLs.
|
Add tests for login-as URLs.
|
Python
|
bsd-3-clause
|
praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go,praekelt/vumi-go
|
Add tests for login-as URLs.
|
from django.core.urlresolvers import reverse
from go.base.tests.helpers import GoDjangoTestCase, DjangoVumiApiHelper
class TestLoginAs(GoDjangoTestCase):
def setUp(self):
self.vumi_helper = self.add_helper(DjangoVumiApiHelper())
self.superuser_helper = self.vumi_helper.make_django_user(
superuser=True, email='superuser@example.com')
self.user_helper_1 = self.vumi_helper.make_django_user(
email='user1@example.com')
self.user_helper_2 = self.vumi_helper.make_django_user(
email='user2@example.com')
self.superuser_client = self.vumi_helper.get_client(
username='superuser@example.com')
self.user_client_1 = self.vumi_helper.get_client(
username='user1@example.com')
def test_successful_login_as(self):
""" Superusers should be able to use login-as. """
user_2_pk = self.user_helper_2.get_django_user().pk
response = self.superuser_client.get(
reverse('loginas-user-login', kwargs={'user_id': str(user_2_pk)}))
self.assertRedirects(response, reverse('home'), target_status_code=302)
self.assertEqual(response.client.session.get('_go_user_account_key'),
'test-2-user')
def test_failed_login_as(self):
""" Ordinary users should not be able to use login-as. """
user_2_pk = self.user_helper_2.get_django_user().pk
response = self.user_client_1.get(
reverse('loginas-user-login', kwargs={'user_id': str(user_2_pk)}))
self.assertRedirects(response, reverse('home'), target_status_code=302)
self.assertEqual(response.client.session.get('_go_user_account_key'),
'test-1-user')
|
<commit_before><commit_msg>Add tests for login-as URLs.<commit_after>
|
from django.core.urlresolvers import reverse
from go.base.tests.helpers import GoDjangoTestCase, DjangoVumiApiHelper
class TestLoginAs(GoDjangoTestCase):
def setUp(self):
self.vumi_helper = self.add_helper(DjangoVumiApiHelper())
self.superuser_helper = self.vumi_helper.make_django_user(
superuser=True, email='superuser@example.com')
self.user_helper_1 = self.vumi_helper.make_django_user(
email='user1@example.com')
self.user_helper_2 = self.vumi_helper.make_django_user(
email='user2@example.com')
self.superuser_client = self.vumi_helper.get_client(
username='superuser@example.com')
self.user_client_1 = self.vumi_helper.get_client(
username='user1@example.com')
def test_successful_login_as(self):
""" Superusers should be able to use login-as. """
user_2_pk = self.user_helper_2.get_django_user().pk
response = self.superuser_client.get(
reverse('loginas-user-login', kwargs={'user_id': str(user_2_pk)}))
self.assertRedirects(response, reverse('home'), target_status_code=302)
self.assertEqual(response.client.session.get('_go_user_account_key'),
'test-2-user')
def test_failed_login_as(self):
""" Ordinary users should not be able to use login-as. """
user_2_pk = self.user_helper_2.get_django_user().pk
response = self.user_client_1.get(
reverse('loginas-user-login', kwargs={'user_id': str(user_2_pk)}))
self.assertRedirects(response, reverse('home'), target_status_code=302)
self.assertEqual(response.client.session.get('_go_user_account_key'),
'test-1-user')
|
Add tests for login-as URLs.from django.core.urlresolvers import reverse
from go.base.tests.helpers import GoDjangoTestCase, DjangoVumiApiHelper
class TestLoginAs(GoDjangoTestCase):
def setUp(self):
self.vumi_helper = self.add_helper(DjangoVumiApiHelper())
self.superuser_helper = self.vumi_helper.make_django_user(
superuser=True, email='superuser@example.com')
self.user_helper_1 = self.vumi_helper.make_django_user(
email='user1@example.com')
self.user_helper_2 = self.vumi_helper.make_django_user(
email='user2@example.com')
self.superuser_client = self.vumi_helper.get_client(
username='superuser@example.com')
self.user_client_1 = self.vumi_helper.get_client(
username='user1@example.com')
def test_successful_login_as(self):
""" Superusers should be able to use login-as. """
user_2_pk = self.user_helper_2.get_django_user().pk
response = self.superuser_client.get(
reverse('loginas-user-login', kwargs={'user_id': str(user_2_pk)}))
self.assertRedirects(response, reverse('home'), target_status_code=302)
self.assertEqual(response.client.session.get('_go_user_account_key'),
'test-2-user')
def test_failed_login_as(self):
""" Ordinary users should not be able to use login-as. """
user_2_pk = self.user_helper_2.get_django_user().pk
response = self.user_client_1.get(
reverse('loginas-user-login', kwargs={'user_id': str(user_2_pk)}))
self.assertRedirects(response, reverse('home'), target_status_code=302)
self.assertEqual(response.client.session.get('_go_user_account_key'),
'test-1-user')
|
<commit_before><commit_msg>Add tests for login-as URLs.<commit_after>from django.core.urlresolvers import reverse
from go.base.tests.helpers import GoDjangoTestCase, DjangoVumiApiHelper
class TestLoginAs(GoDjangoTestCase):
def setUp(self):
self.vumi_helper = self.add_helper(DjangoVumiApiHelper())
self.superuser_helper = self.vumi_helper.make_django_user(
superuser=True, email='superuser@example.com')
self.user_helper_1 = self.vumi_helper.make_django_user(
email='user1@example.com')
self.user_helper_2 = self.vumi_helper.make_django_user(
email='user2@example.com')
self.superuser_client = self.vumi_helper.get_client(
username='superuser@example.com')
self.user_client_1 = self.vumi_helper.get_client(
username='user1@example.com')
def test_successful_login_as(self):
""" Superusers should be able to use login-as. """
user_2_pk = self.user_helper_2.get_django_user().pk
response = self.superuser_client.get(
reverse('loginas-user-login', kwargs={'user_id': str(user_2_pk)}))
self.assertRedirects(response, reverse('home'), target_status_code=302)
self.assertEqual(response.client.session.get('_go_user_account_key'),
'test-2-user')
def test_failed_login_as(self):
""" Ordinary users should not be able to use login-as. """
user_2_pk = self.user_helper_2.get_django_user().pk
response = self.user_client_1.get(
reverse('loginas-user-login', kwargs={'user_id': str(user_2_pk)}))
self.assertRedirects(response, reverse('home'), target_status_code=302)
self.assertEqual(response.client.session.get('_go_user_account_key'),
'test-1-user')
|
|
aab3ddec9fc0c88bf6fb96e1efd762a277cb48ad
|
ideascube/conf/kb_eth_kytabu.py
|
ideascube/conf/kb_eth_kytabu.py
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'fr'
IDEASCUBE_NAME = 'KYTABU'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'gutenberg',
},
{
'id': 'khanacademy',
},
]
|
Add conf file for KoomBook Kytabu
|
Add conf file for KoomBook Kytabu
|
Python
|
agpl-3.0
|
ideascube/ideascube,ideascube/ideascube,ideascube/ideascube,ideascube/ideascube
|
Add conf file for KoomBook Kytabu
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'fr'
IDEASCUBE_NAME = 'KYTABU'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'gutenberg',
},
{
'id': 'khanacademy',
},
]
|
<commit_before><commit_msg>Add conf file for KoomBook Kytabu<commit_after>
|
# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'fr'
IDEASCUBE_NAME = 'KYTABU'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'gutenberg',
},
{
'id': 'khanacademy',
},
]
|
Add conf file for KoomBook Kytabu# -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'fr'
IDEASCUBE_NAME = 'KYTABU'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'gutenberg',
},
{
'id': 'khanacademy',
},
]
|
<commit_before><commit_msg>Add conf file for KoomBook Kytabu<commit_after># -*- coding: utf-8 -*-
"""KoomBook conf"""
from .kb import * # noqa
LANGUAGE_CODE = 'fr'
IDEASCUBE_NAME = 'KYTABU'
HOME_CARDS = STAFF_HOME_CARDS + [
{
'id': 'blog',
},
{
'id': 'mediacenter',
},
{
'id': 'gutenberg',
},
{
'id': 'khanacademy',
},
]
|
|
677f721e38dcbefaf9378eba6e54a5f4b63c1a3f
|
migrations/versions/febbe2d7e47b_use_largebinary_for_fernet_key_columns.py
|
migrations/versions/febbe2d7e47b_use_largebinary_for_fernet_key_columns.py
|
"""Use LargeBinary for fernet key columns.
Revision ID: febbe2d7e47b
Revises: 8c431c5e70a8
Create Date: 2022-03-19 11:40:07.203662
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "febbe2d7e47b"
down_revision = "8c431c5e70a8"
def upgrade():
with op.batch_alter_table("organizations", schema=None) as batch_op:
batch_op.drop_column("fastly_encrypted_api_key")
batch_op.drop_column("aws_encrypted_secret_key")
with op.batch_alter_table("organizations", schema=None) as batch_op:
batch_op.add_column(
sa.Column(
"fastly_encrypted_api_key", sa.LargeBinary(), nullable=True
)
)
batch_op.add_column(
sa.Column(
"aws_encrypted_secret_key", sa.LargeBinary(), nullable=True
)
)
def downgrade():
with op.batch_alter_table("organizations", schema=None) as batch_op:
batch_op.drop_column("fastly_encrypted_api_key")
batch_op.drop_column("aws_encrypted_secret_key")
with op.batch_alter_table("organizations", schema=None) as batch_op:
batch_op.add_column(
sa.Column(
"fastly_encrypted_api_key",
sa.VARCHAR(length=255),
nullable=True,
)
)
batch_op.add_column(
sa.Column(
"aws_encrypted_secret_key",
sa.VARCHAR(length=255),
nullable=True,
)
)
|
Create migration febbe2d7e47b for fernet columns
|
Create migration febbe2d7e47b for fernet columns
Updates fastly_encrypted_api_key and aws_encrypted_secret_key to use
LargeBinary types for their columns.
|
Python
|
mit
|
lsst-sqre/ltd-keeper,lsst-sqre/ltd-keeper
|
Create migration febbe2d7e47b for fernet columns
Updates fastly_encrypted_api_key and aws_encrypted_secret_key to use
LargeBinary types for their columns.
|
"""Use LargeBinary for fernet key columns.
Revision ID: febbe2d7e47b
Revises: 8c431c5e70a8
Create Date: 2022-03-19 11:40:07.203662
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "febbe2d7e47b"
down_revision = "8c431c5e70a8"
def upgrade():
with op.batch_alter_table("organizations", schema=None) as batch_op:
batch_op.drop_column("fastly_encrypted_api_key")
batch_op.drop_column("aws_encrypted_secret_key")
with op.batch_alter_table("organizations", schema=None) as batch_op:
batch_op.add_column(
sa.Column(
"fastly_encrypted_api_key", sa.LargeBinary(), nullable=True
)
)
batch_op.add_column(
sa.Column(
"aws_encrypted_secret_key", sa.LargeBinary(), nullable=True
)
)
def downgrade():
with op.batch_alter_table("organizations", schema=None) as batch_op:
batch_op.drop_column("fastly_encrypted_api_key")
batch_op.drop_column("aws_encrypted_secret_key")
with op.batch_alter_table("organizations", schema=None) as batch_op:
batch_op.add_column(
sa.Column(
"fastly_encrypted_api_key",
sa.VARCHAR(length=255),
nullable=True,
)
)
batch_op.add_column(
sa.Column(
"aws_encrypted_secret_key",
sa.VARCHAR(length=255),
nullable=True,
)
)
|
<commit_before><commit_msg>Create migration febbe2d7e47b for fernet columns
Updates fastly_encrypted_api_key and aws_encrypted_secret_key to use
LargeBinary types for their columns.<commit_after>
|
"""Use LargeBinary for fernet key columns.
Revision ID: febbe2d7e47b
Revises: 8c431c5e70a8
Create Date: 2022-03-19 11:40:07.203662
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "febbe2d7e47b"
down_revision = "8c431c5e70a8"
def upgrade():
with op.batch_alter_table("organizations", schema=None) as batch_op:
batch_op.drop_column("fastly_encrypted_api_key")
batch_op.drop_column("aws_encrypted_secret_key")
with op.batch_alter_table("organizations", schema=None) as batch_op:
batch_op.add_column(
sa.Column(
"fastly_encrypted_api_key", sa.LargeBinary(), nullable=True
)
)
batch_op.add_column(
sa.Column(
"aws_encrypted_secret_key", sa.LargeBinary(), nullable=True
)
)
def downgrade():
with op.batch_alter_table("organizations", schema=None) as batch_op:
batch_op.drop_column("fastly_encrypted_api_key")
batch_op.drop_column("aws_encrypted_secret_key")
with op.batch_alter_table("organizations", schema=None) as batch_op:
batch_op.add_column(
sa.Column(
"fastly_encrypted_api_key",
sa.VARCHAR(length=255),
nullable=True,
)
)
batch_op.add_column(
sa.Column(
"aws_encrypted_secret_key",
sa.VARCHAR(length=255),
nullable=True,
)
)
|
Create migration febbe2d7e47b for fernet columns
Updates fastly_encrypted_api_key and aws_encrypted_secret_key to use
LargeBinary types for their columns."""Use LargeBinary for fernet key columns.
Revision ID: febbe2d7e47b
Revises: 8c431c5e70a8
Create Date: 2022-03-19 11:40:07.203662
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "febbe2d7e47b"
down_revision = "8c431c5e70a8"
def upgrade():
with op.batch_alter_table("organizations", schema=None) as batch_op:
batch_op.drop_column("fastly_encrypted_api_key")
batch_op.drop_column("aws_encrypted_secret_key")
with op.batch_alter_table("organizations", schema=None) as batch_op:
batch_op.add_column(
sa.Column(
"fastly_encrypted_api_key", sa.LargeBinary(), nullable=True
)
)
batch_op.add_column(
sa.Column(
"aws_encrypted_secret_key", sa.LargeBinary(), nullable=True
)
)
def downgrade():
with op.batch_alter_table("organizations", schema=None) as batch_op:
batch_op.drop_column("fastly_encrypted_api_key")
batch_op.drop_column("aws_encrypted_secret_key")
with op.batch_alter_table("organizations", schema=None) as batch_op:
batch_op.add_column(
sa.Column(
"fastly_encrypted_api_key",
sa.VARCHAR(length=255),
nullable=True,
)
)
batch_op.add_column(
sa.Column(
"aws_encrypted_secret_key",
sa.VARCHAR(length=255),
nullable=True,
)
)
|
<commit_before><commit_msg>Create migration febbe2d7e47b for fernet columns
Updates fastly_encrypted_api_key and aws_encrypted_secret_key to use
LargeBinary types for their columns.<commit_after>"""Use LargeBinary for fernet key columns.
Revision ID: febbe2d7e47b
Revises: 8c431c5e70a8
Create Date: 2022-03-19 11:40:07.203662
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "febbe2d7e47b"
down_revision = "8c431c5e70a8"
def upgrade():
with op.batch_alter_table("organizations", schema=None) as batch_op:
batch_op.drop_column("fastly_encrypted_api_key")
batch_op.drop_column("aws_encrypted_secret_key")
with op.batch_alter_table("organizations", schema=None) as batch_op:
batch_op.add_column(
sa.Column(
"fastly_encrypted_api_key", sa.LargeBinary(), nullable=True
)
)
batch_op.add_column(
sa.Column(
"aws_encrypted_secret_key", sa.LargeBinary(), nullable=True
)
)
def downgrade():
with op.batch_alter_table("organizations", schema=None) as batch_op:
batch_op.drop_column("fastly_encrypted_api_key")
batch_op.drop_column("aws_encrypted_secret_key")
with op.batch_alter_table("organizations", schema=None) as batch_op:
batch_op.add_column(
sa.Column(
"fastly_encrypted_api_key",
sa.VARCHAR(length=255),
nullable=True,
)
)
batch_op.add_column(
sa.Column(
"aws_encrypted_secret_key",
sa.VARCHAR(length=255),
nullable=True,
)
)
|
|
46c140bd441904f9cbfa655382223363c6e6e2fc
|
server/tests/test_views.py
|
server/tests/test_views.py
|
from django.contrib.auth.models import User as AuthUser
from django.core.urlresolvers import resolve
from django.http import HttpRequest
from django.template.loader import render_to_string
from django.test import TestCase
from django.utils.html import escape
from server.models import Player, Song, Request
class SongRequestTest(TestCase):
def test_saving_a_new_song_request(self):
self.user = AuthUser.objects.create_user(username='barry', email='barry@white.com',
password='myeverything')
self.song = Song.songs.create(pk=43, title='Title 1',
artist='Artist 1', path='Path 1', last_time_play=0)
self.client.login(username='barry', password='myeverything')
self.client.post(
'/request/',
data={'id_song': 43}
)
self.assertEqual(Request.requests.count(), 1)
new_request = Request.requests.first()
self.assertEqual(new_request.song.pk, 43)
|
Add first test for views
|
Add first test for views
|
Python
|
bsd-3-clause
|
raspberrywhite/raspberrywhite,raspberrywhite/raspberrywhite,raspberrywhite/raspberrywhite,raspberrywhite/raspberrywhite
|
Add first test for views
|
from django.contrib.auth.models import User as AuthUser
from django.core.urlresolvers import resolve
from django.http import HttpRequest
from django.template.loader import render_to_string
from django.test import TestCase
from django.utils.html import escape
from server.models import Player, Song, Request
class SongRequestTest(TestCase):
def test_saving_a_new_song_request(self):
self.user = AuthUser.objects.create_user(username='barry', email='barry@white.com',
password='myeverything')
self.song = Song.songs.create(pk=43, title='Title 1',
artist='Artist 1', path='Path 1', last_time_play=0)
self.client.login(username='barry', password='myeverything')
self.client.post(
'/request/',
data={'id_song': 43}
)
self.assertEqual(Request.requests.count(), 1)
new_request = Request.requests.first()
self.assertEqual(new_request.song.pk, 43)
|
<commit_before><commit_msg>Add first test for views<commit_after>
|
from django.contrib.auth.models import User as AuthUser
from django.core.urlresolvers import resolve
from django.http import HttpRequest
from django.template.loader import render_to_string
from django.test import TestCase
from django.utils.html import escape
from server.models import Player, Song, Request
class SongRequestTest(TestCase):
def test_saving_a_new_song_request(self):
self.user = AuthUser.objects.create_user(username='barry', email='barry@white.com',
password='myeverything')
self.song = Song.songs.create(pk=43, title='Title 1',
artist='Artist 1', path='Path 1', last_time_play=0)
self.client.login(username='barry', password='myeverything')
self.client.post(
'/request/',
data={'id_song': 43}
)
self.assertEqual(Request.requests.count(), 1)
new_request = Request.requests.first()
self.assertEqual(new_request.song.pk, 43)
|
Add first test for viewsfrom django.contrib.auth.models import User as AuthUser
from django.core.urlresolvers import resolve
from django.http import HttpRequest
from django.template.loader import render_to_string
from django.test import TestCase
from django.utils.html import escape
from server.models import Player, Song, Request
class SongRequestTest(TestCase):
def test_saving_a_new_song_request(self):
self.user = AuthUser.objects.create_user(username='barry', email='barry@white.com',
password='myeverything')
self.song = Song.songs.create(pk=43, title='Title 1',
artist='Artist 1', path='Path 1', last_time_play=0)
self.client.login(username='barry', password='myeverything')
self.client.post(
'/request/',
data={'id_song': 43}
)
self.assertEqual(Request.requests.count(), 1)
new_request = Request.requests.first()
self.assertEqual(new_request.song.pk, 43)
|
<commit_before><commit_msg>Add first test for views<commit_after>from django.contrib.auth.models import User as AuthUser
from django.core.urlresolvers import resolve
from django.http import HttpRequest
from django.template.loader import render_to_string
from django.test import TestCase
from django.utils.html import escape
from server.models import Player, Song, Request
class SongRequestTest(TestCase):
def test_saving_a_new_song_request(self):
self.user = AuthUser.objects.create_user(username='barry', email='barry@white.com',
password='myeverything')
self.song = Song.songs.create(pk=43, title='Title 1',
artist='Artist 1', path='Path 1', last_time_play=0)
self.client.login(username='barry', password='myeverything')
self.client.post(
'/request/',
data={'id_song': 43}
)
self.assertEqual(Request.requests.count(), 1)
new_request = Request.requests.first()
self.assertEqual(new_request.song.pk, 43)
|
|
e30333f6c7d4dfc14934fea6d379ba0ae035ce0f
|
numba/cuda/tests/cudadrv/test_ir_patch.py
|
numba/cuda/tests/cudadrv/test_ir_patch.py
|
from __future__ import print_function, absolute_import, division
from numba.cuda.testing import unittest
from numba.cuda.testing import skip_on_cudasim
from numba.cuda.cudadrv.nvvm import llvm39_to_34_ir
@skip_on_cudasim('Linking unsupported in the simulator')
class TestIRPatch(unittest.TestCase):
def test_load_rewrite(self):
text = "%myload = not really"
out = llvm39_to_34_ir(text)
# No rewrite
self.assertEqual(text, out)
text = "%myload = load i32, i32* val"
out = llvm39_to_34_ir(text)
# Rewritten
self.assertEqual("%myload = load i32* val", out)
if __name__ == '__main__':
unittest.main()
|
Add tests for ir downgrade patch
|
Add tests for ir downgrade patch
|
Python
|
bsd-2-clause
|
jriehl/numba,sklam/numba,cpcloud/numba,IntelLabs/numba,jriehl/numba,stuartarchibald/numba,cpcloud/numba,jriehl/numba,numba/numba,stonebig/numba,stuartarchibald/numba,stonebig/numba,sklam/numba,IntelLabs/numba,gmarkall/numba,numba/numba,cpcloud/numba,seibert/numba,seibert/numba,sklam/numba,gmarkall/numba,numba/numba,sklam/numba,cpcloud/numba,IntelLabs/numba,numba/numba,gmarkall/numba,stonebig/numba,cpcloud/numba,gmarkall/numba,stonebig/numba,stuartarchibald/numba,seibert/numba,seibert/numba,IntelLabs/numba,stonebig/numba,jriehl/numba,sklam/numba,stuartarchibald/numba,seibert/numba,numba/numba,IntelLabs/numba,jriehl/numba,gmarkall/numba,stuartarchibald/numba
|
Add tests for ir downgrade patch
|
from __future__ import print_function, absolute_import, division
from numba.cuda.testing import unittest
from numba.cuda.testing import skip_on_cudasim
from numba.cuda.cudadrv.nvvm import llvm39_to_34_ir
@skip_on_cudasim('Linking unsupported in the simulator')
class TestIRPatch(unittest.TestCase):
def test_load_rewrite(self):
text = "%myload = not really"
out = llvm39_to_34_ir(text)
# No rewrite
self.assertEqual(text, out)
text = "%myload = load i32, i32* val"
out = llvm39_to_34_ir(text)
# Rewritten
self.assertEqual("%myload = load i32* val", out)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for ir downgrade patch<commit_after>
|
from __future__ import print_function, absolute_import, division
from numba.cuda.testing import unittest
from numba.cuda.testing import skip_on_cudasim
from numba.cuda.cudadrv.nvvm import llvm39_to_34_ir
@skip_on_cudasim('Linking unsupported in the simulator')
class TestIRPatch(unittest.TestCase):
def test_load_rewrite(self):
text = "%myload = not really"
out = llvm39_to_34_ir(text)
# No rewrite
self.assertEqual(text, out)
text = "%myload = load i32, i32* val"
out = llvm39_to_34_ir(text)
# Rewritten
self.assertEqual("%myload = load i32* val", out)
if __name__ == '__main__':
unittest.main()
|
Add tests for ir downgrade patchfrom __future__ import print_function, absolute_import, division
from numba.cuda.testing import unittest
from numba.cuda.testing import skip_on_cudasim
from numba.cuda.cudadrv.nvvm import llvm39_to_34_ir
@skip_on_cudasim('Linking unsupported in the simulator')
class TestIRPatch(unittest.TestCase):
def test_load_rewrite(self):
text = "%myload = not really"
out = llvm39_to_34_ir(text)
# No rewrite
self.assertEqual(text, out)
text = "%myload = load i32, i32* val"
out = llvm39_to_34_ir(text)
# Rewritten
self.assertEqual("%myload = load i32* val", out)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for ir downgrade patch<commit_after>from __future__ import print_function, absolute_import, division
from numba.cuda.testing import unittest
from numba.cuda.testing import skip_on_cudasim
from numba.cuda.cudadrv.nvvm import llvm39_to_34_ir
@skip_on_cudasim('Linking unsupported in the simulator')
class TestIRPatch(unittest.TestCase):
def test_load_rewrite(self):
text = "%myload = not really"
out = llvm39_to_34_ir(text)
# No rewrite
self.assertEqual(text, out)
text = "%myload = load i32, i32* val"
out = llvm39_to_34_ir(text)
# Rewritten
self.assertEqual("%myload = load i32* val", out)
if __name__ == '__main__':
unittest.main()
|
|
02740a850817d2d3294965e97333218b5f44f532
|
setup.py
|
setup.py
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.15',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.16',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
Update the PyPI version to 0.2.16.
|
Update the PyPI version to 0.2.16.
|
Python
|
mit
|
electronick1/todoist-python,Doist/todoist-python
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.15',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
Update the PyPI version to 0.2.16.
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.16',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
<commit_before># -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.15',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
<commit_msg>Update the PyPI version to 0.2.16.<commit_after>
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.16',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.15',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
Update the PyPI version to 0.2.16.# -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.16',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
<commit_before># -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.15',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
<commit_msg>Update the PyPI version to 0.2.16.<commit_after># -*- coding: utf-8 -*-
import os
from setuptools import setup
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname)).read()
except:
return ''
setup(
name='todoist-python',
version='0.2.16',
packages=['todoist', 'todoist.managers'],
author='Doist Team',
author_email='info@todoist.com',
license='BSD',
description='todoist-python - The official Todoist Python API library',
long_description = read('README.md'),
install_requires=[
'requests',
],
# see here for complete list of classifiers
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=(
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
),
)
|
145cf9e539d44b3996a4ab916edb7873fa5090f0
|
other/wrapping-cpp/swig/c++/test_mylib.py
|
other/wrapping-cpp/swig/c++/test_mylib.py
|
import os
import pytest
@pytest.fixture
def setup(request):
def teardown():
print("Running make clean")
os.system('make clean')
print("Completed finaliser")
request.addfinalizer(teardown)
os.system('make clean')
os.system('make all')
def test_squared(setup):
import mylib
assert 16. == mylib.squared(4)
def test_myfunction(setup):
import mylib
assert 16. == mylib.myfunction(mylib.squared, 4)
|
import os
import pytest
# Need to call Makefile in directory where this test file is
def call_make(target):
# where is this file
this_file = os.path.realpath(__file__)
this_dir = os.path.split(this_file)[0]
cd_command = "cd {}".format(this_dir)
make_command = "make {}".format(target)
command = '{}; {}'.format(cd_command, make_command)
print("About to execute: '{}'".format(command))
os.system(command)
@pytest.fixture
def setup(request):
def teardown():
print("Running make clean")
call_make('clean')
print("Completed finaliser")
request.addfinalizer(teardown)
call_make('clean')
call_make('all')
def test_squared(setup):
import mylib
assert 16. == mylib.squared(4)
def test_myfunction(setup):
import mylib
assert 16. == mylib.myfunction(mylib.squared, 4)
|
Allow file to be called from other directories
|
Allow file to be called from other directories
|
Python
|
bsd-2-clause
|
fangohr/oommf-python,ryanpepper/oommf-python,ryanpepper/oommf-python,ryanpepper/oommf-python,fangohr/oommf-python,fangohr/oommf-python,ryanpepper/oommf-python
|
import os
import pytest
@pytest.fixture
def setup(request):
def teardown():
print("Running make clean")
os.system('make clean')
print("Completed finaliser")
request.addfinalizer(teardown)
os.system('make clean')
os.system('make all')
def test_squared(setup):
import mylib
assert 16. == mylib.squared(4)
def test_myfunction(setup):
import mylib
assert 16. == mylib.myfunction(mylib.squared, 4)
Allow file to be called from other directories
|
import os
import pytest
# Need to call Makefile in directory where this test file is
def call_make(target):
# where is this file
this_file = os.path.realpath(__file__)
this_dir = os.path.split(this_file)[0]
cd_command = "cd {}".format(this_dir)
make_command = "make {}".format(target)
command = '{}; {}'.format(cd_command, make_command)
print("About to execute: '{}'".format(command))
os.system(command)
@pytest.fixture
def setup(request):
def teardown():
print("Running make clean")
call_make('clean')
print("Completed finaliser")
request.addfinalizer(teardown)
call_make('clean')
call_make('all')
def test_squared(setup):
import mylib
assert 16. == mylib.squared(4)
def test_myfunction(setup):
import mylib
assert 16. == mylib.myfunction(mylib.squared, 4)
|
<commit_before>import os
import pytest
@pytest.fixture
def setup(request):
def teardown():
print("Running make clean")
os.system('make clean')
print("Completed finaliser")
request.addfinalizer(teardown)
os.system('make clean')
os.system('make all')
def test_squared(setup):
import mylib
assert 16. == mylib.squared(4)
def test_myfunction(setup):
import mylib
assert 16. == mylib.myfunction(mylib.squared, 4)
<commit_msg>Allow file to be called from other directories<commit_after>
|
import os
import pytest
# Need to call Makefile in directory where this test file is
def call_make(target):
# where is this file
this_file = os.path.realpath(__file__)
this_dir = os.path.split(this_file)[0]
cd_command = "cd {}".format(this_dir)
make_command = "make {}".format(target)
command = '{}; {}'.format(cd_command, make_command)
print("About to execute: '{}'".format(command))
os.system(command)
@pytest.fixture
def setup(request):
def teardown():
print("Running make clean")
call_make('clean')
print("Completed finaliser")
request.addfinalizer(teardown)
call_make('clean')
call_make('all')
def test_squared(setup):
import mylib
assert 16. == mylib.squared(4)
def test_myfunction(setup):
import mylib
assert 16. == mylib.myfunction(mylib.squared, 4)
|
import os
import pytest
@pytest.fixture
def setup(request):
def teardown():
print("Running make clean")
os.system('make clean')
print("Completed finaliser")
request.addfinalizer(teardown)
os.system('make clean')
os.system('make all')
def test_squared(setup):
import mylib
assert 16. == mylib.squared(4)
def test_myfunction(setup):
import mylib
assert 16. == mylib.myfunction(mylib.squared, 4)
Allow file to be called from other directoriesimport os
import pytest
# Need to call Makefile in directory where this test file is
def call_make(target):
# where is this file
this_file = os.path.realpath(__file__)
this_dir = os.path.split(this_file)[0]
cd_command = "cd {}".format(this_dir)
make_command = "make {}".format(target)
command = '{}; {}'.format(cd_command, make_command)
print("About to execute: '{}'".format(command))
os.system(command)
@pytest.fixture
def setup(request):
def teardown():
print("Running make clean")
call_make('clean')
print("Completed finaliser")
request.addfinalizer(teardown)
call_make('clean')
call_make('all')
def test_squared(setup):
import mylib
assert 16. == mylib.squared(4)
def test_myfunction(setup):
import mylib
assert 16. == mylib.myfunction(mylib.squared, 4)
|
<commit_before>import os
import pytest
@pytest.fixture
def setup(request):
def teardown():
print("Running make clean")
os.system('make clean')
print("Completed finaliser")
request.addfinalizer(teardown)
os.system('make clean')
os.system('make all')
def test_squared(setup):
import mylib
assert 16. == mylib.squared(4)
def test_myfunction(setup):
import mylib
assert 16. == mylib.myfunction(mylib.squared, 4)
<commit_msg>Allow file to be called from other directories<commit_after>import os
import pytest
# Need to call Makefile in directory where this test file is
def call_make(target):
# where is this file
this_file = os.path.realpath(__file__)
this_dir = os.path.split(this_file)[0]
cd_command = "cd {}".format(this_dir)
make_command = "make {}".format(target)
command = '{}; {}'.format(cd_command, make_command)
print("About to execute: '{}'".format(command))
os.system(command)
@pytest.fixture
def setup(request):
def teardown():
print("Running make clean")
call_make('clean')
print("Completed finaliser")
request.addfinalizer(teardown)
call_make('clean')
call_make('all')
def test_squared(setup):
import mylib
assert 16. == mylib.squared(4)
def test_myfunction(setup):
import mylib
assert 16. == mylib.myfunction(mylib.squared, 4)
|
4325c2fc8308f604eef379551524a2d8f8bfc8bb
|
corehq/apps/sms/management/commands/find_commconnect_enabled_projects.py
|
corehq/apps/sms/management/commands/find_commconnect_enabled_projects.py
|
from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
class Command(BaseCommand):
args = ""
help = ""
def handle(self, *args, **options):
for domain in Domain.get_all():
if domain.commconnect_enabled:
print "%s has commconnect_enabled=True" % domain.name
|
Add management command for finding commconnect enabled projects
|
Add management command for finding commconnect enabled projects
|
Python
|
bsd-3-clause
|
puttarajubr/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq
|
Add management command for finding commconnect enabled projects
|
from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
class Command(BaseCommand):
args = ""
help = ""
def handle(self, *args, **options):
for domain in Domain.get_all():
if domain.commconnect_enabled:
print "%s has commconnect_enabled=True" % domain.name
|
<commit_before><commit_msg>Add management command for finding commconnect enabled projects<commit_after>
|
from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
class Command(BaseCommand):
args = ""
help = ""
def handle(self, *args, **options):
for domain in Domain.get_all():
if domain.commconnect_enabled:
print "%s has commconnect_enabled=True" % domain.name
|
Add management command for finding commconnect enabled projectsfrom django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
class Command(BaseCommand):
args = ""
help = ""
def handle(self, *args, **options):
for domain in Domain.get_all():
if domain.commconnect_enabled:
print "%s has commconnect_enabled=True" % domain.name
|
<commit_before><commit_msg>Add management command for finding commconnect enabled projects<commit_after>from django.core.management.base import BaseCommand
from corehq.apps.domain.models import Domain
class Command(BaseCommand):
args = ""
help = ""
def handle(self, *args, **options):
for domain in Domain.get_all():
if domain.commconnect_enabled:
print "%s has commconnect_enabled=True" % domain.name
|
|
ada5c5039d9a516fa9e6bc7741fbbbcd7f35d30f
|
migrations/0003_auto_20190327_1951.py
|
migrations/0003_auto_20190327_1951.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-03-27 19:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('announcements', '0002_auto_20141004_1330'),
]
operations = [
migrations.AlterField(
model_name='announcement',
name='level',
field=models.SmallIntegerField(choices=[(10, 'debug'), (20, 'info'), (25, 'success'), (30, 'warning'), (40, 'error')], verbose_name='Level'),
),
]
|
Update migrations for Django 1.11 and Python 3
|
Update migrations for Django 1.11 and Python 3
|
Python
|
mit
|
mback2k/django-app-announcements
|
Update migrations for Django 1.11 and Python 3
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-03-27 19:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('announcements', '0002_auto_20141004_1330'),
]
operations = [
migrations.AlterField(
model_name='announcement',
name='level',
field=models.SmallIntegerField(choices=[(10, 'debug'), (20, 'info'), (25, 'success'), (30, 'warning'), (40, 'error')], verbose_name='Level'),
),
]
|
<commit_before><commit_msg>Update migrations for Django 1.11 and Python 3<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-03-27 19:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('announcements', '0002_auto_20141004_1330'),
]
operations = [
migrations.AlterField(
model_name='announcement',
name='level',
field=models.SmallIntegerField(choices=[(10, 'debug'), (20, 'info'), (25, 'success'), (30, 'warning'), (40, 'error')], verbose_name='Level'),
),
]
|
Update migrations for Django 1.11 and Python 3# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-03-27 19:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('announcements', '0002_auto_20141004_1330'),
]
operations = [
migrations.AlterField(
model_name='announcement',
name='level',
field=models.SmallIntegerField(choices=[(10, 'debug'), (20, 'info'), (25, 'success'), (30, 'warning'), (40, 'error')], verbose_name='Level'),
),
]
|
<commit_before><commit_msg>Update migrations for Django 1.11 and Python 3<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-03-27 19:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('announcements', '0002_auto_20141004_1330'),
]
operations = [
migrations.AlterField(
model_name='announcement',
name='level',
field=models.SmallIntegerField(choices=[(10, 'debug'), (20, 'info'), (25, 'success'), (30, 'warning'), (40, 'error')], verbose_name='Level'),
),
]
|
|
2d58336d0605b313a90f2d1c3c13e0c33f4c592c
|
scripts/cache/warn_cache.py
|
scripts/cache/warn_cache.py
|
# Need something to cache the warnings GIS files, since they are so
# huge.
import os
FINAL = "/mesonet/share/pickup/wwa/"
for year in range(1986,2010):
cmd = "wget -q -O %s/%s_all.zip http://localhost/cgi-bin/request/gis/watchwarn.py?year1=%s&month1=1&day1=1&hour1=0&minute1=0&year2=%s&month2=1&day2=1&hour2=0&minute2=0" % (FINAL, year, year, year+1)
os.system(cmd)
cmd = "wget -q -O %s/%s_tsmf.zip http://localhost/cgi-bin/request/gis/watchwarn.py?year1=%s&month1=1&day1=1&hour1=0&minute1=0&year2=%s&month2=1&day2=1&hour2=0&minute2=0&limite0=yes" % (FINAL, year, year, year+1)
os.system(cmd)
if year > 2001:
cmd = "wget -q -O %s/%s_tsmf_sbw.zip http://localhost/cgi-bin/request/gis/watchwarn.py?year1=%s&month1=1&day1=1&hour1=0&minute1=0&year2=%s&month2=1&day2=1&hour2=0&minute2=0&limit0=yes&limit1=yes" % (FINAL, year, year, year+1)
os.system(cmd)
|
Add cache script for the warnings
|
Add cache script for the warnings
|
Python
|
mit
|
akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem
|
Add cache script for the warnings
|
# Need something to cache the warnings GIS files, since they are so
# huge.
import os
FINAL = "/mesonet/share/pickup/wwa/"
for year in range(1986,2010):
cmd = "wget -q -O %s/%s_all.zip http://localhost/cgi-bin/request/gis/watchwarn.py?year1=%s&month1=1&day1=1&hour1=0&minute1=0&year2=%s&month2=1&day2=1&hour2=0&minute2=0" % (FINAL, year, year, year+1)
os.system(cmd)
cmd = "wget -q -O %s/%s_tsmf.zip http://localhost/cgi-bin/request/gis/watchwarn.py?year1=%s&month1=1&day1=1&hour1=0&minute1=0&year2=%s&month2=1&day2=1&hour2=0&minute2=0&limite0=yes" % (FINAL, year, year, year+1)
os.system(cmd)
if year > 2001:
cmd = "wget -q -O %s/%s_tsmf_sbw.zip http://localhost/cgi-bin/request/gis/watchwarn.py?year1=%s&month1=1&day1=1&hour1=0&minute1=0&year2=%s&month2=1&day2=1&hour2=0&minute2=0&limit0=yes&limit1=yes" % (FINAL, year, year, year+1)
os.system(cmd)
|
<commit_before><commit_msg>Add cache script for the warnings<commit_after>
|
# Need something to cache the warnings GIS files, since they are so
# huge.
import os
FINAL = "/mesonet/share/pickup/wwa/"
for year in range(1986,2010):
cmd = "wget -q -O %s/%s_all.zip http://localhost/cgi-bin/request/gis/watchwarn.py?year1=%s&month1=1&day1=1&hour1=0&minute1=0&year2=%s&month2=1&day2=1&hour2=0&minute2=0" % (FINAL, year, year, year+1)
os.system(cmd)
cmd = "wget -q -O %s/%s_tsmf.zip http://localhost/cgi-bin/request/gis/watchwarn.py?year1=%s&month1=1&day1=1&hour1=0&minute1=0&year2=%s&month2=1&day2=1&hour2=0&minute2=0&limite0=yes" % (FINAL, year, year, year+1)
os.system(cmd)
if year > 2001:
cmd = "wget -q -O %s/%s_tsmf_sbw.zip http://localhost/cgi-bin/request/gis/watchwarn.py?year1=%s&month1=1&day1=1&hour1=0&minute1=0&year2=%s&month2=1&day2=1&hour2=0&minute2=0&limit0=yes&limit1=yes" % (FINAL, year, year, year+1)
os.system(cmd)
|
Add cache script for the warnings# Need something to cache the warnings GIS files, since they are so
# huge.
import os
FINAL = "/mesonet/share/pickup/wwa/"
for year in range(1986,2010):
cmd = "wget -q -O %s/%s_all.zip http://localhost/cgi-bin/request/gis/watchwarn.py?year1=%s&month1=1&day1=1&hour1=0&minute1=0&year2=%s&month2=1&day2=1&hour2=0&minute2=0" % (FINAL, year, year, year+1)
os.system(cmd)
cmd = "wget -q -O %s/%s_tsmf.zip http://localhost/cgi-bin/request/gis/watchwarn.py?year1=%s&month1=1&day1=1&hour1=0&minute1=0&year2=%s&month2=1&day2=1&hour2=0&minute2=0&limite0=yes" % (FINAL, year, year, year+1)
os.system(cmd)
if year > 2001:
cmd = "wget -q -O %s/%s_tsmf_sbw.zip http://localhost/cgi-bin/request/gis/watchwarn.py?year1=%s&month1=1&day1=1&hour1=0&minute1=0&year2=%s&month2=1&day2=1&hour2=0&minute2=0&limit0=yes&limit1=yes" % (FINAL, year, year, year+1)
os.system(cmd)
|
<commit_before><commit_msg>Add cache script for the warnings<commit_after># Need something to cache the warnings GIS files, since they are so
# huge.
import os
FINAL = "/mesonet/share/pickup/wwa/"
for year in range(1986,2010):
cmd = "wget -q -O %s/%s_all.zip http://localhost/cgi-bin/request/gis/watchwarn.py?year1=%s&month1=1&day1=1&hour1=0&minute1=0&year2=%s&month2=1&day2=1&hour2=0&minute2=0" % (FINAL, year, year, year+1)
os.system(cmd)
cmd = "wget -q -O %s/%s_tsmf.zip http://localhost/cgi-bin/request/gis/watchwarn.py?year1=%s&month1=1&day1=1&hour1=0&minute1=0&year2=%s&month2=1&day2=1&hour2=0&minute2=0&limite0=yes" % (FINAL, year, year, year+1)
os.system(cmd)
if year > 2001:
cmd = "wget -q -O %s/%s_tsmf_sbw.zip http://localhost/cgi-bin/request/gis/watchwarn.py?year1=%s&month1=1&day1=1&hour1=0&minute1=0&year2=%s&month2=1&day2=1&hour2=0&minute2=0&limit0=yes&limit1=yes" % (FINAL, year, year, year+1)
os.system(cmd)
|
|
068a273d859a082c153acee4192a34df0b1ce4ea
|
scripts/update-readme-with-pack-list.py
|
scripts/update-readme-with-pack-list.py
|
#!/usr/bin/env python
"""
Script which updates README.md with a list of all the available packs.
"""
import os
import copy
import argparse
import yaml
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
PACKS_DIR = os.path.join(CURRENT_DIR, '../packs')
README_PATH = os.path.join(CURRENT_DIR, '../README.md')
BASE_URL = 'https://github.com/StackStorm/st2contrib/tree/master/packs'
def get_pack_list():
packs = os.listdir(PACKS_DIR)
packs = sorted(packs)
return packs
def get_pack_metadata(pack):
metadata_path = os.path.join(PACKS_DIR, pack, 'pack.yaml')
with open(metadata_path, 'r') as fp:
content = fp.read()
metadata = yaml.safe_load(content)
return metadata
def generate_pack_list_table(packs):
lines = []
lines.append('Name | Description | Author | Latest Version')
lines.append('---- | ----------- | ------ | -------------- ')
for pack_name, metadata in packs:
values = copy.deepcopy(metadata)
values['base_url'] = BASE_URL
line = '| [%(name)s](%(base_url)s/%(name)s) | %(description)s | %(author)s | %(version)s' % (values)
lines.append(line)
result = '\n'.join(lines)
return result
def get_updated_readme(table):
with open(README_PATH, 'r') as fp:
current_readme = fp.read()
head = current_readme.split('## Available Packs\n\n')[0]
tail = current_readme.split('## License, and Contributors Agreement')[1]
replacement = '## Available Packs\n\n'
replacement += table + '\n\n'
replacement += '## License, and Contributors Agreement'
updated_readme = head + replacement + tail
return updated_readme
def main(dry_run):
packs = get_pack_list()
packs_with_metadata = []
for pack in packs:
try:
metadata = get_pack_metadata(pack=pack)
except IOError:
continue
packs_with_metadata.append((pack, metadata))
table = generate_pack_list_table(packs=packs_with_metadata)
updated_readme = get_updated_readme(table=table)
if dry_run:
print(updated_readme)
else:
with open(README_PATH, 'w') as fp:
fp.write(updated_readme)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('--dry-run', help='Print the new readme to stdout',
action='store_true', default=False)
args = parser.parse_args()
main(dry_run=args.dry_run)
|
Add script for upating readme with auto-generated pack list.
|
Add script for upating readme with auto-generated pack list.
|
Python
|
apache-2.0
|
pidah/st2contrib,tonybaloney/st2contrib,lmEshoo/st2contrib,pearsontechnology/st2contrib,meirwah/st2contrib,jtopjian/st2contrib,lmEshoo/st2contrib,pearsontechnology/st2contrib,Aamir-raza-1/st2contrib,jtopjian/st2contrib,pearsontechnology/st2contrib,armab/st2contrib,digideskio/st2contrib,pinterb/st2contrib,tonybaloney/st2contrib,StackStorm/st2contrib,tonybaloney/st2contrib,armab/st2contrib,Aamir-raza-1/st2contrib,lakshmi-kannan/st2contrib,pearsontechnology/st2contrib,psychopenguin/st2contrib,pidah/st2contrib,digideskio/st2contrib,meirwah/st2contrib,psychopenguin/st2contrib,pidah/st2contrib,lakshmi-kannan/st2contrib,dennybaa/st2contrib,dennybaa/st2contrib,StackStorm/st2contrib,pinterb/st2contrib,armab/st2contrib,StackStorm/st2contrib
|
Add script for upating readme with auto-generated pack list.
|
#!/usr/bin/env python
"""
Script which updates README.md with a list of all the available packs.
"""
import os
import copy
import argparse
import yaml
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
PACKS_DIR = os.path.join(CURRENT_DIR, '../packs')
README_PATH = os.path.join(CURRENT_DIR, '../README.md')
BASE_URL = 'https://github.com/StackStorm/st2contrib/tree/master/packs'
def get_pack_list():
packs = os.listdir(PACKS_DIR)
packs = sorted(packs)
return packs
def get_pack_metadata(pack):
metadata_path = os.path.join(PACKS_DIR, pack, 'pack.yaml')
with open(metadata_path, 'r') as fp:
content = fp.read()
metadata = yaml.safe_load(content)
return metadata
def generate_pack_list_table(packs):
lines = []
lines.append('Name | Description | Author | Latest Version')
lines.append('---- | ----------- | ------ | -------------- ')
for pack_name, metadata in packs:
values = copy.deepcopy(metadata)
values['base_url'] = BASE_URL
line = '| [%(name)s](%(base_url)s/%(name)s) | %(description)s | %(author)s | %(version)s' % (values)
lines.append(line)
result = '\n'.join(lines)
return result
def get_updated_readme(table):
with open(README_PATH, 'r') as fp:
current_readme = fp.read()
head = current_readme.split('## Available Packs\n\n')[0]
tail = current_readme.split('## License, and Contributors Agreement')[1]
replacement = '## Available Packs\n\n'
replacement += table + '\n\n'
replacement += '## License, and Contributors Agreement'
updated_readme = head + replacement + tail
return updated_readme
def main(dry_run):
packs = get_pack_list()
packs_with_metadata = []
for pack in packs:
try:
metadata = get_pack_metadata(pack=pack)
except IOError:
continue
packs_with_metadata.append((pack, metadata))
table = generate_pack_list_table(packs=packs_with_metadata)
updated_readme = get_updated_readme(table=table)
if dry_run:
print(updated_readme)
else:
with open(README_PATH, 'w') as fp:
fp.write(updated_readme)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('--dry-run', help='Print the new readme to stdout',
action='store_true', default=False)
args = parser.parse_args()
main(dry_run=args.dry_run)
|
<commit_before><commit_msg>Add script for upating readme with auto-generated pack list.<commit_after>
|
#!/usr/bin/env python
"""
Script which updates README.md with a list of all the available packs.
"""
import os
import copy
import argparse
import yaml
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
PACKS_DIR = os.path.join(CURRENT_DIR, '../packs')
README_PATH = os.path.join(CURRENT_DIR, '../README.md')
BASE_URL = 'https://github.com/StackStorm/st2contrib/tree/master/packs'
def get_pack_list():
packs = os.listdir(PACKS_DIR)
packs = sorted(packs)
return packs
def get_pack_metadata(pack):
metadata_path = os.path.join(PACKS_DIR, pack, 'pack.yaml')
with open(metadata_path, 'r') as fp:
content = fp.read()
metadata = yaml.safe_load(content)
return metadata
def generate_pack_list_table(packs):
lines = []
lines.append('Name | Description | Author | Latest Version')
lines.append('---- | ----------- | ------ | -------------- ')
for pack_name, metadata in packs:
values = copy.deepcopy(metadata)
values['base_url'] = BASE_URL
line = '| [%(name)s](%(base_url)s/%(name)s) | %(description)s | %(author)s | %(version)s' % (values)
lines.append(line)
result = '\n'.join(lines)
return result
def get_updated_readme(table):
with open(README_PATH, 'r') as fp:
current_readme = fp.read()
head = current_readme.split('## Available Packs\n\n')[0]
tail = current_readme.split('## License, and Contributors Agreement')[1]
replacement = '## Available Packs\n\n'
replacement += table + '\n\n'
replacement += '## License, and Contributors Agreement'
updated_readme = head + replacement + tail
return updated_readme
def main(dry_run):
packs = get_pack_list()
packs_with_metadata = []
for pack in packs:
try:
metadata = get_pack_metadata(pack=pack)
except IOError:
continue
packs_with_metadata.append((pack, metadata))
table = generate_pack_list_table(packs=packs_with_metadata)
updated_readme = get_updated_readme(table=table)
if dry_run:
print(updated_readme)
else:
with open(README_PATH, 'w') as fp:
fp.write(updated_readme)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('--dry-run', help='Print the new readme to stdout',
action='store_true', default=False)
args = parser.parse_args()
main(dry_run=args.dry_run)
|
Add script for upating readme with auto-generated pack list.#!/usr/bin/env python
"""
Script which updates README.md with a list of all the available packs.
"""
import os
import copy
import argparse
import yaml
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
PACKS_DIR = os.path.join(CURRENT_DIR, '../packs')
README_PATH = os.path.join(CURRENT_DIR, '../README.md')
BASE_URL = 'https://github.com/StackStorm/st2contrib/tree/master/packs'
def get_pack_list():
packs = os.listdir(PACKS_DIR)
packs = sorted(packs)
return packs
def get_pack_metadata(pack):
metadata_path = os.path.join(PACKS_DIR, pack, 'pack.yaml')
with open(metadata_path, 'r') as fp:
content = fp.read()
metadata = yaml.safe_load(content)
return metadata
def generate_pack_list_table(packs):
lines = []
lines.append('Name | Description | Author | Latest Version')
lines.append('---- | ----------- | ------ | -------------- ')
for pack_name, metadata in packs:
values = copy.deepcopy(metadata)
values['base_url'] = BASE_URL
line = '| [%(name)s](%(base_url)s/%(name)s) | %(description)s | %(author)s | %(version)s' % (values)
lines.append(line)
result = '\n'.join(lines)
return result
def get_updated_readme(table):
with open(README_PATH, 'r') as fp:
current_readme = fp.read()
head = current_readme.split('## Available Packs\n\n')[0]
tail = current_readme.split('## License, and Contributors Agreement')[1]
replacement = '## Available Packs\n\n'
replacement += table + '\n\n'
replacement += '## License, and Contributors Agreement'
updated_readme = head + replacement + tail
return updated_readme
def main(dry_run):
packs = get_pack_list()
packs_with_metadata = []
for pack in packs:
try:
metadata = get_pack_metadata(pack=pack)
except IOError:
continue
packs_with_metadata.append((pack, metadata))
table = generate_pack_list_table(packs=packs_with_metadata)
updated_readme = get_updated_readme(table=table)
if dry_run:
print(updated_readme)
else:
with open(README_PATH, 'w') as fp:
fp.write(updated_readme)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('--dry-run', help='Print the new readme to stdout',
action='store_true', default=False)
args = parser.parse_args()
main(dry_run=args.dry_run)
|
<commit_before><commit_msg>Add script for upating readme with auto-generated pack list.<commit_after>#!/usr/bin/env python
"""
Script which updates README.md with a list of all the available packs.
"""
import os
import copy
import argparse
import yaml
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
PACKS_DIR = os.path.join(CURRENT_DIR, '../packs')
README_PATH = os.path.join(CURRENT_DIR, '../README.md')
BASE_URL = 'https://github.com/StackStorm/st2contrib/tree/master/packs'
def get_pack_list():
packs = os.listdir(PACKS_DIR)
packs = sorted(packs)
return packs
def get_pack_metadata(pack):
metadata_path = os.path.join(PACKS_DIR, pack, 'pack.yaml')
with open(metadata_path, 'r') as fp:
content = fp.read()
metadata = yaml.safe_load(content)
return metadata
def generate_pack_list_table(packs):
lines = []
lines.append('Name | Description | Author | Latest Version')
lines.append('---- | ----------- | ------ | -------------- ')
for pack_name, metadata in packs:
values = copy.deepcopy(metadata)
values['base_url'] = BASE_URL
line = '| [%(name)s](%(base_url)s/%(name)s) | %(description)s | %(author)s | %(version)s' % (values)
lines.append(line)
result = '\n'.join(lines)
return result
def get_updated_readme(table):
with open(README_PATH, 'r') as fp:
current_readme = fp.read()
head = current_readme.split('## Available Packs\n\n')[0]
tail = current_readme.split('## License, and Contributors Agreement')[1]
replacement = '## Available Packs\n\n'
replacement += table + '\n\n'
replacement += '## License, and Contributors Agreement'
updated_readme = head + replacement + tail
return updated_readme
def main(dry_run):
packs = get_pack_list()
packs_with_metadata = []
for pack in packs:
try:
metadata = get_pack_metadata(pack=pack)
except IOError:
continue
packs_with_metadata.append((pack, metadata))
table = generate_pack_list_table(packs=packs_with_metadata)
updated_readme = get_updated_readme(table=table)
if dry_run:
print(updated_readme)
else:
with open(README_PATH, 'w') as fp:
fp.write(updated_readme)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='')
parser.add_argument('--dry-run', help='Print the new readme to stdout',
action='store_true', default=False)
args = parser.parse_args()
main(dry_run=args.dry_run)
|
|
b84b04178d1ad16ae8c515c002427244183de0ca
|
photutils/detection/base.py
|
photutils/detection/base.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements the base classes for detecting stars in an
astronomical image. Each star-finding class should define a method
called ``find_stars`` that finds stars in an image.
"""
import abc
__all__ = ['StarFinderBase']
class StarFinderBase(metaclass=abc.ABCMeta):
"""
Abstract base class for star finders.
"""
def __call__(self, data, mask=None):
return self.find_stars(data, mask=mask)
@abc.abstractmethod
def find_stars(self, data, mask=None):
"""
Find stars in an astronomical image.
Parameters
----------
data : 2D array_like
The 2D image array.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a
`True` value indicates the corresponding element of ``data``
is masked. Masked pixels are ignored when searching for
stars.
Returns
-------
table : `~astropy.table.Table` or `None`
A table of found stars. If no stars are found then `None` is
returned.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
|
Put StarFinderBase is separate module
|
Put StarFinderBase is separate module
|
Python
|
bsd-3-clause
|
larrybradley/photutils,astropy/photutils
|
Put StarFinderBase is separate module
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements the base classes for detecting stars in an
astronomical image. Each star-finding class should define a method
called ``find_stars`` that finds stars in an image.
"""
import abc
__all__ = ['StarFinderBase']
class StarFinderBase(metaclass=abc.ABCMeta):
"""
Abstract base class for star finders.
"""
def __call__(self, data, mask=None):
return self.find_stars(data, mask=mask)
@abc.abstractmethod
def find_stars(self, data, mask=None):
"""
Find stars in an astronomical image.
Parameters
----------
data : 2D array_like
The 2D image array.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a
`True` value indicates the corresponding element of ``data``
is masked. Masked pixels are ignored when searching for
stars.
Returns
-------
table : `~astropy.table.Table` or `None`
A table of found stars. If no stars are found then `None` is
returned.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
|
<commit_before><commit_msg>Put StarFinderBase is separate module<commit_after>
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements the base classes for detecting stars in an
astronomical image. Each star-finding class should define a method
called ``find_stars`` that finds stars in an image.
"""
import abc
__all__ = ['StarFinderBase']
class StarFinderBase(metaclass=abc.ABCMeta):
"""
Abstract base class for star finders.
"""
def __call__(self, data, mask=None):
return self.find_stars(data, mask=mask)
@abc.abstractmethod
def find_stars(self, data, mask=None):
"""
Find stars in an astronomical image.
Parameters
----------
data : 2D array_like
The 2D image array.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a
`True` value indicates the corresponding element of ``data``
is masked. Masked pixels are ignored when searching for
stars.
Returns
-------
table : `~astropy.table.Table` or `None`
A table of found stars. If no stars are found then `None` is
returned.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
|
Put StarFinderBase is separate module# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements the base classes for detecting stars in an
astronomical image. Each star-finding class should define a method
called ``find_stars`` that finds stars in an image.
"""
import abc
__all__ = ['StarFinderBase']
class StarFinderBase(metaclass=abc.ABCMeta):
"""
Abstract base class for star finders.
"""
def __call__(self, data, mask=None):
return self.find_stars(data, mask=mask)
@abc.abstractmethod
def find_stars(self, data, mask=None):
"""
Find stars in an astronomical image.
Parameters
----------
data : 2D array_like
The 2D image array.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a
`True` value indicates the corresponding element of ``data``
is masked. Masked pixels are ignored when searching for
stars.
Returns
-------
table : `~astropy.table.Table` or `None`
A table of found stars. If no stars are found then `None` is
returned.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
|
<commit_before><commit_msg>Put StarFinderBase is separate module<commit_after># Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module implements the base classes for detecting stars in an
astronomical image. Each star-finding class should define a method
called ``find_stars`` that finds stars in an image.
"""
import abc
__all__ = ['StarFinderBase']
class StarFinderBase(metaclass=abc.ABCMeta):
"""
Abstract base class for star finders.
"""
def __call__(self, data, mask=None):
return self.find_stars(data, mask=mask)
@abc.abstractmethod
def find_stars(self, data, mask=None):
"""
Find stars in an astronomical image.
Parameters
----------
data : 2D array_like
The 2D image array.
mask : 2D bool array, optional
A boolean mask with the same shape as ``data``, where a
`True` value indicates the corresponding element of ``data``
is masked. Masked pixels are ignored when searching for
stars.
Returns
-------
table : `~astropy.table.Table` or `None`
A table of found stars. If no stars are found then `None` is
returned.
"""
raise NotImplementedError('Needs to be implemented in a subclass.')
|
|
6500a93f4b13f6c51be88288542aabdc0dea954a
|
table_loader.py
|
table_loader.py
|
import csv
def load_table(filepath, headers=False):
''' Return a dict representing a roll table loaded from filepath.
Loads a roll table from the CSV file at filepath into a dict whose keys
are tuples specifying the range of rolls (min, max) associated with the
event specified in that key's value (a string describing the event).
If headers is True, then it is assumed that the first row of the file
contains some descriptive headers and the row is ignored. It defaults
to False.
The first column of the CSV should be the numbers or ranges of numbers
to roll in order to 'bring about' the associated event in the same row
of the second column. Ranges should be specified with dashes e.g.
a roll of 1 to 10 inclusive would be written as '1-10'. None of the
intervals should overlap.'''
table = {}
with open(filepath, newline='') as table_file:
table_reader = csv.reader(table_file)
for row in table_reader:
if headers and table_reader.line_num == 1:
# Ignore the first line if headers is True
continue
roll = row[0]
event = row[1]
if row[0].find("-") != -1:
# A range of rolls has been specified for this table item.
min_roll = int(roll[:roll.find("-")])
max_roll = int(roll[roll.find("-")+1:])
table[(min_roll, max_roll)] = event
else:
# A single roll has been specified for this table item.
roll_num = int(roll)
table[(roll_num, roll_num)] = event
return table
|
Add function to load CSV roll tables into dicts
|
Add function to load CSV roll tables into dicts
Specify the format for roll table CSV files. Dashes are used to
specify ranges for consistency with tables printed in the core
rulebooks. Headers are optional simply to give flexibility.
Range tuples are used in the dict even for single roll entries
in order to make the autoroller easier to write.
The reader function is left in its own file for now to
compartmentalise the codebase.
|
Python
|
mit
|
whonut/Random-Table-Roller,whonut/Random-Table-Roller,whonut/Random-Table-Roller
|
Add function to load CSV roll tables into dicts
Specify the format for roll table CSV files. Dashes are used to
specify ranges for consistency with tables printed in the core
rulebooks. Headers are optional simply to give flexibility.
Range tuples are used in the dict even for single roll entries
in order to make the autoroller easier to write.
The reader function is left in its own file for now to
compartmentalise the codebase.
|
import csv
def load_table(filepath, headers=False):
''' Return a dict representing a roll table loaded from filepath.
Loads a roll table from the CSV file at filepath into a dict whose keys
are tuples specifying the range of rolls (min, max) associated with the
event specified in that key's value (a string describing the event).
If headers is True, then it is assumed that the first row of the file
contains some descriptive headers and the row is ignored. It defaults
to False.
The first column of the CSV should be the numbers or ranges of numbers
to roll in order to 'bring about' the associated event in the same row
of the second column. Ranges should be specified with dashes e.g.
a roll of 1 to 10 inclusive would be written as '1-10'. None of the
intervals should overlap.'''
table = {}
with open(filepath, newline='') as table_file:
table_reader = csv.reader(table_file)
for row in table_reader:
if headers and table_reader.line_num == 1:
# Ignore the first line if headers is True
continue
roll = row[0]
event = row[1]
if row[0].find("-") != -1:
# A range of rolls has been specified for this table item.
min_roll = int(roll[:roll.find("-")])
max_roll = int(roll[roll.find("-")+1:])
table[(min_roll, max_roll)] = event
else:
# A single roll has been specified for this table item.
roll_num = int(roll)
table[(roll_num, roll_num)] = event
return table
|
<commit_before><commit_msg>Add function to load CSV roll tables into dicts
Specify the format for roll table CSV files. Dashes are used to
specify ranges for consistency with tables printed in the core
rulebooks. Headers are optional simply to give flexibility.
Range tuples are used in the dict even for single roll entries
in order to make the autoroller easier to write.
The reader function is left in its own file for now to
compartmentalise the codebase.<commit_after>
|
import csv
def load_table(filepath, headers=False):
''' Return a dict representing a roll table loaded from filepath.
Loads a roll table from the CSV file at filepath into a dict whose keys
are tuples specifying the range of rolls (min, max) associated with the
event specified in that key's value (a string describing the event).
If headers is True, then it is assumed that the first row of the file
contains some descriptive headers and the row is ignored. It defaults
to False.
The first column of the CSV should be the numbers or ranges of numbers
to roll in order to 'bring about' the associated event in the same row
of the second column. Ranges should be specified with dashes e.g.
a roll of 1 to 10 inclusive would be written as '1-10'. None of the
intervals should overlap.'''
table = {}
with open(filepath, newline='') as table_file:
table_reader = csv.reader(table_file)
for row in table_reader:
if headers and table_reader.line_num == 1:
# Ignore the first line if headers is True
continue
roll = row[0]
event = row[1]
if row[0].find("-") != -1:
# A range of rolls has been specified for this table item.
min_roll = int(roll[:roll.find("-")])
max_roll = int(roll[roll.find("-")+1:])
table[(min_roll, max_roll)] = event
else:
# A single roll has been specified for this table item.
roll_num = int(roll)
table[(roll_num, roll_num)] = event
return table
|
Add function to load CSV roll tables into dicts
Specify the format for roll table CSV files. Dashes are used to
specify ranges for consistency with tables printed in the core
rulebooks. Headers are optional simply to give flexibility.
Range tuples are used in the dict even for single roll entries
in order to make the autoroller easier to write.
The reader function is left in its own file for now to
compartmentalise the codebase.import csv
def load_table(filepath, headers=False):
''' Return a dict representing a roll table loaded from filepath.
Loads a roll table from the CSV file at filepath into a dict whose keys
are tuples specifying the range of rolls (min, max) associated with the
event specified in that key's value (a string describing the event).
If headers is True, then it is assumed that the first row of the file
contains some descriptive headers and the row is ignored. It defaults
to False.
The first column of the CSV should be the numbers or ranges of numbers
to roll in order to 'bring about' the associated event in the same row
of the second column. Ranges should be specified with dashes e.g.
a roll of 1 to 10 inclusive would be written as '1-10'. None of the
intervals should overlap.'''
table = {}
with open(filepath, newline='') as table_file:
table_reader = csv.reader(table_file)
for row in table_reader:
if headers and table_reader.line_num == 1:
# Ignore the first line if headers is True
continue
roll = row[0]
event = row[1]
if row[0].find("-") != -1:
# A range of rolls has been specified for this table item.
min_roll = int(roll[:roll.find("-")])
max_roll = int(roll[roll.find("-")+1:])
table[(min_roll, max_roll)] = event
else:
# A single roll has been specified for this table item.
roll_num = int(roll)
table[(roll_num, roll_num)] = event
return table
|
<commit_before><commit_msg>Add function to load CSV roll tables into dicts
Specify the format for roll table CSV files. Dashes are used to
specify ranges for consistency with tables printed in the core
rulebooks. Headers are optional simply to give flexibility.
Range tuples are used in the dict even for single roll entries
in order to make the autoroller easier to write.
The reader function is left in its own file for now to
compartmentalise the codebase.<commit_after>import csv
def load_table(filepath, headers=False):
''' Return a dict representing a roll table loaded from filepath.
Loads a roll table from the CSV file at filepath into a dict whose keys
are tuples specifying the range of rolls (min, max) associated with the
event specified in that key's value (a string describing the event).
If headers is True, then it is assumed that the first row of the file
contains some descriptive headers and the row is ignored. It defaults
to False.
The first column of the CSV should be the numbers or ranges of numbers
to roll in order to 'bring about' the associated event in the same row
of the second column. Ranges should be specified with dashes e.g.
a roll of 1 to 10 inclusive would be written as '1-10'. None of the
intervals should overlap.'''
table = {}
with open(filepath, newline='') as table_file:
table_reader = csv.reader(table_file)
for row in table_reader:
if headers and table_reader.line_num == 1:
# Ignore the first line if headers is True
continue
roll = row[0]
event = row[1]
if row[0].find("-") != -1:
# A range of rolls has been specified for this table item.
min_roll = int(roll[:roll.find("-")])
max_roll = int(roll[roll.find("-")+1:])
table[(min_roll, max_roll)] = event
else:
# A single roll has been specified for this table item.
roll_num = int(roll)
table[(roll_num, roll_num)] = event
return table
|
|
ecce72199a8c9f0f333715419d572444d5b9fc90
|
shade/tests/functional/test_devstack.py
|
shade/tests/functional/test_devstack.py
|
# Copyright (c) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""
test_devstack
-------------
Throw errors if we do not actually detect the services we're supposed to.
"""
import os
from testscenarios import load_tests_apply_scenarios as load_tests # noqa
from shade.tests.functional import base
class TestDevstack(base.BaseFunctionalTestCase):
scenarios = [
('designate', dict(env='DESIGNATE', service='dns')),
('heat', dict(env='HEAT', service='orchestration')),
('magnum', dict(env='MAGNUM', service='container')),
('neutron', dict(env='NEUTRON', service='network')),
('swift', dict(env='SWIFT', service='object-store')),
]
def test_has_service(self):
if os.environ.get('SHADE_HAS_{env}'.format(env=self.env), '0') == '1':
self.assertTrue(self.demo_cloud.has_service(self.service))
|
Add test to trap for missing services
|
Add test to trap for missing services
Recently when there was an issue with the magnum devstack plugin causing
the shade gate to not have swift, we didn't notice except through the
ansible tests. That's because we have a bunch of has_service checks in
the tests themselves to deal with different configs. Unfortunately, that
makes it easy to fail open.
Put in a test, along with changes to devstack-gate jobs, to throw errors
if services do not show up that should.
Depends-On: I2433c7bced6c8ca785634056de45ddf624031509
Change-Id: I16f477c405583b315fff24929d6c7b2ca4f2eae3
|
Python
|
apache-2.0
|
openstack/python-openstacksdk,dtroyer/python-openstacksdk,openstack/python-openstacksdk,dtroyer/python-openstacksdk,stackforge/python-openstacksdk,stackforge/python-openstacksdk,openstack-infra/shade,openstack-infra/shade
|
Add test to trap for missing services
Recently when there was an issue with the magnum devstack plugin causing
the shade gate to not have swift, we didn't notice except through the
ansible tests. That's because we have a bunch of has_service checks in
the tests themselves to deal with different configs. Unfortunately, that
makes it easy to fail open.
Put in a test, along with changes to devstack-gate jobs, to throw errors
if services do not show up that should.
Depends-On: I2433c7bced6c8ca785634056de45ddf624031509
Change-Id: I16f477c405583b315fff24929d6c7b2ca4f2eae3
|
# Copyright (c) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""
test_devstack
-------------
Throw errors if we do not actually detect the services we're supposed to.
"""
import os
from testscenarios import load_tests_apply_scenarios as load_tests # noqa
from shade.tests.functional import base
class TestDevstack(base.BaseFunctionalTestCase):
scenarios = [
('designate', dict(env='DESIGNATE', service='dns')),
('heat', dict(env='HEAT', service='orchestration')),
('magnum', dict(env='MAGNUM', service='container')),
('neutron', dict(env='NEUTRON', service='network')),
('swift', dict(env='SWIFT', service='object-store')),
]
def test_has_service(self):
if os.environ.get('SHADE_HAS_{env}'.format(env=self.env), '0') == '1':
self.assertTrue(self.demo_cloud.has_service(self.service))
|
<commit_before><commit_msg>Add test to trap for missing services
Recently when there was an issue with the magnum devstack plugin causing
the shade gate to not have swift, we didn't notice except through the
ansible tests. That's because we have a bunch of has_service checks in
the tests themselves to deal with different configs. Unfortunately, that
makes it easy to fail open.
Put in a test, along with changes to devstack-gate jobs, to throw errors
if services do not show up that should.
Depends-On: I2433c7bced6c8ca785634056de45ddf624031509
Change-Id: I16f477c405583b315fff24929d6c7b2ca4f2eae3<commit_after>
|
# Copyright (c) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""
test_devstack
-------------
Throw errors if we do not actually detect the services we're supposed to.
"""
import os
from testscenarios import load_tests_apply_scenarios as load_tests # noqa
from shade.tests.functional import base
class TestDevstack(base.BaseFunctionalTestCase):
scenarios = [
('designate', dict(env='DESIGNATE', service='dns')),
('heat', dict(env='HEAT', service='orchestration')),
('magnum', dict(env='MAGNUM', service='container')),
('neutron', dict(env='NEUTRON', service='network')),
('swift', dict(env='SWIFT', service='object-store')),
]
def test_has_service(self):
if os.environ.get('SHADE_HAS_{env}'.format(env=self.env), '0') == '1':
self.assertTrue(self.demo_cloud.has_service(self.service))
|
Add test to trap for missing services
Recently when there was an issue with the magnum devstack plugin causing
the shade gate to not have swift, we didn't notice except through the
ansible tests. That's because we have a bunch of has_service checks in
the tests themselves to deal with different configs. Unfortunately, that
makes it easy to fail open.
Put in a test, along with changes to devstack-gate jobs, to throw errors
if services do not show up that should.
Depends-On: I2433c7bced6c8ca785634056de45ddf624031509
Change-Id: I16f477c405583b315fff24929d6c7b2ca4f2eae3# Copyright (c) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""
test_devstack
-------------
Throw errors if we do not actually detect the services we're supposed to.
"""
import os
from testscenarios import load_tests_apply_scenarios as load_tests # noqa
from shade.tests.functional import base
class TestDevstack(base.BaseFunctionalTestCase):
scenarios = [
('designate', dict(env='DESIGNATE', service='dns')),
('heat', dict(env='HEAT', service='orchestration')),
('magnum', dict(env='MAGNUM', service='container')),
('neutron', dict(env='NEUTRON', service='network')),
('swift', dict(env='SWIFT', service='object-store')),
]
def test_has_service(self):
if os.environ.get('SHADE_HAS_{env}'.format(env=self.env), '0') == '1':
self.assertTrue(self.demo_cloud.has_service(self.service))
|
<commit_before><commit_msg>Add test to trap for missing services
Recently when there was an issue with the magnum devstack plugin causing
the shade gate to not have swift, we didn't notice except through the
ansible tests. That's because we have a bunch of has_service checks in
the tests themselves to deal with different configs. Unfortunately, that
makes it easy to fail open.
Put in a test, along with changes to devstack-gate jobs, to throw errors
if services do not show up that should.
Depends-On: I2433c7bced6c8ca785634056de45ddf624031509
Change-Id: I16f477c405583b315fff24929d6c7b2ca4f2eae3<commit_after># Copyright (c) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""
test_devstack
-------------
Throw errors if we do not actually detect the services we're supposed to.
"""
import os
from testscenarios import load_tests_apply_scenarios as load_tests # noqa
from shade.tests.functional import base
class TestDevstack(base.BaseFunctionalTestCase):
scenarios = [
('designate', dict(env='DESIGNATE', service='dns')),
('heat', dict(env='HEAT', service='orchestration')),
('magnum', dict(env='MAGNUM', service='container')),
('neutron', dict(env='NEUTRON', service='network')),
('swift', dict(env='SWIFT', service='object-store')),
]
def test_has_service(self):
if os.environ.get('SHADE_HAS_{env}'.format(env=self.env), '0') == '1':
self.assertTrue(self.demo_cloud.has_service(self.service))
|
|
f109f24e8f10d1fd3f8940c0eb54b157aa9ed909
|
content/test/gpu/gpu_tests/pixel_expectations.py
|
content/test/gpu/gpu_tests/pixel_expectations.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class PixelExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
self.Fail('Pixel.Canvas2DRedBox',
[ 'linux', ('nvidia', 0x104a)], bug=511580)
self.Fail('Pixel.CSS3DBlueBox',
[ 'linux', ('nvidia', 0x104a)], bug=511580)
self.Fail('Pixel.WebGLGreenTriangle',
[ 'linux', ('nvidia', 0x104a)], bug=511580)
pass
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class PixelExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
self.Fail('Pixel.Canvas2DRedBox', bug=511580)
self.Fail('Pixel.CSS3DBlueBox', bug=511580)
self.Fail('Pixel.WebGLGreenTriangle', bug=511580)
pass
|
Mark pixel tests as failing on all platform
|
Mark pixel tests as failing on all platform
BUG=511580
R=kbr@chromium.org
Review URL: https://codereview.chromium.org/1245243003
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#340191}
|
Python
|
bsd-3-clause
|
Just-D/chromium-1,Chilledheart/chromium,axinging/chromium-crosswalk,Just-D/chromium-1,axinging/chromium-crosswalk,Pluto-tv/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,axinging/chromium-crosswalk,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,Pluto-tv/chromium-crosswalk,axinging/chromium-crosswalk,axinging/chromium-crosswalk,axinging/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,Chilledheart/chromium,Chilledheart/chromium,axinging/chromium-crosswalk,axinging/chromium-crosswalk,Just-D/chromium-1,Chilledheart/chromium,axinging/chromium-crosswalk
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class PixelExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
self.Fail('Pixel.Canvas2DRedBox',
[ 'linux', ('nvidia', 0x104a)], bug=511580)
self.Fail('Pixel.CSS3DBlueBox',
[ 'linux', ('nvidia', 0x104a)], bug=511580)
self.Fail('Pixel.WebGLGreenTriangle',
[ 'linux', ('nvidia', 0x104a)], bug=511580)
pass
Mark pixel tests as failing on all platform
BUG=511580
R=kbr@chromium.org
Review URL: https://codereview.chromium.org/1245243003
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#340191}
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class PixelExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
self.Fail('Pixel.Canvas2DRedBox', bug=511580)
self.Fail('Pixel.CSS3DBlueBox', bug=511580)
self.Fail('Pixel.WebGLGreenTriangle', bug=511580)
pass
|
<commit_before># Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class PixelExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
self.Fail('Pixel.Canvas2DRedBox',
[ 'linux', ('nvidia', 0x104a)], bug=511580)
self.Fail('Pixel.CSS3DBlueBox',
[ 'linux', ('nvidia', 0x104a)], bug=511580)
self.Fail('Pixel.WebGLGreenTriangle',
[ 'linux', ('nvidia', 0x104a)], bug=511580)
pass
<commit_msg>Mark pixel tests as failing on all platform
BUG=511580
R=kbr@chromium.org
Review URL: https://codereview.chromium.org/1245243003
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#340191}<commit_after>
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class PixelExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
self.Fail('Pixel.Canvas2DRedBox', bug=511580)
self.Fail('Pixel.CSS3DBlueBox', bug=511580)
self.Fail('Pixel.WebGLGreenTriangle', bug=511580)
pass
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class PixelExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
self.Fail('Pixel.Canvas2DRedBox',
[ 'linux', ('nvidia', 0x104a)], bug=511580)
self.Fail('Pixel.CSS3DBlueBox',
[ 'linux', ('nvidia', 0x104a)], bug=511580)
self.Fail('Pixel.WebGLGreenTriangle',
[ 'linux', ('nvidia', 0x104a)], bug=511580)
pass
Mark pixel tests as failing on all platform
BUG=511580
R=kbr@chromium.org
Review URL: https://codereview.chromium.org/1245243003
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#340191}# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class PixelExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
self.Fail('Pixel.Canvas2DRedBox', bug=511580)
self.Fail('Pixel.CSS3DBlueBox', bug=511580)
self.Fail('Pixel.WebGLGreenTriangle', bug=511580)
pass
|
<commit_before># Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class PixelExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
self.Fail('Pixel.Canvas2DRedBox',
[ 'linux', ('nvidia', 0x104a)], bug=511580)
self.Fail('Pixel.CSS3DBlueBox',
[ 'linux', ('nvidia', 0x104a)], bug=511580)
self.Fail('Pixel.WebGLGreenTriangle',
[ 'linux', ('nvidia', 0x104a)], bug=511580)
pass
<commit_msg>Mark pixel tests as failing on all platform
BUG=511580
R=kbr@chromium.org
Review URL: https://codereview.chromium.org/1245243003
Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#340191}<commit_after># Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from gpu_test_expectations import GpuTestExpectations
# See the GpuTestExpectations class for documentation.
class PixelExpectations(GpuTestExpectations):
def SetExpectations(self):
# Sample Usage:
# self.Fail('Pixel.Canvas2DRedBox',
# ['mac', 'amd', ('nvidia', 0x1234)], bug=123)
self.Fail('Pixel.Canvas2DRedBox', bug=511580)
self.Fail('Pixel.CSS3DBlueBox', bug=511580)
self.Fail('Pixel.WebGLGreenTriangle', bug=511580)
pass
|
2fe3b8040730e769ce4814b5e7be35ff0293d8a8
|
corehq/messaging/scheduling/tests/test_content.py
|
corehq/messaging/scheduling/tests/test_content.py
|
from corehq.apps.users.models import CommCareUser
from corehq.messaging.scheduling.models import CustomContent
from corehq.messaging.scheduling.scheduling_partitioned.models import (
AlertScheduleInstance,
TimedScheduleInstance,
CaseAlertScheduleInstance,
CaseTimedScheduleInstance,
)
from django.test import TestCase, override_settings
from mock import patch, call
AVAILABLE_CUSTOM_SCHEDULING_CONTENT = {
'TEST': 'corehq.messaging.scheduling.tests.test_content.custom_content_handler',
}
def custom_content_handler(recipient, schedule_instance):
return ['Message 1', 'Message 2']
class TestContent(TestCase):
@classmethod
def setUpClass(cls):
super(TestContent, cls).setUpClass()
cls.user = CommCareUser(phone_numbers=['9990000000000'])
@override_settings(AVAILABLE_CUSTOM_SCHEDULING_CONTENT=AVAILABLE_CUSTOM_SCHEDULING_CONTENT)
def test_custom_content(self):
for cls in (
AlertScheduleInstance,
TimedScheduleInstance,
CaseAlertScheduleInstance,
CaseTimedScheduleInstance,
):
with patch('corehq.messaging.scheduling.models.content.send_sms_for_schedule_instance') as patched:
schedule_instance = cls()
CustomContent(custom_content_id='TEST').send(self.user, schedule_instance)
patched.assert_has_calls([
call(schedule_instance, self.user, '9990000000000', 'Message 1'),
call(schedule_instance, self.user, '9990000000000', 'Message 2'),
])
|
Add generic test for custom content resolution
|
Add generic test for custom content resolution
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add generic test for custom content resolution
|
from corehq.apps.users.models import CommCareUser
from corehq.messaging.scheduling.models import CustomContent
from corehq.messaging.scheduling.scheduling_partitioned.models import (
AlertScheduleInstance,
TimedScheduleInstance,
CaseAlertScheduleInstance,
CaseTimedScheduleInstance,
)
from django.test import TestCase, override_settings
from mock import patch, call
AVAILABLE_CUSTOM_SCHEDULING_CONTENT = {
'TEST': 'corehq.messaging.scheduling.tests.test_content.custom_content_handler',
}
def custom_content_handler(recipient, schedule_instance):
return ['Message 1', 'Message 2']
class TestContent(TestCase):
@classmethod
def setUpClass(cls):
super(TestContent, cls).setUpClass()
cls.user = CommCareUser(phone_numbers=['9990000000000'])
@override_settings(AVAILABLE_CUSTOM_SCHEDULING_CONTENT=AVAILABLE_CUSTOM_SCHEDULING_CONTENT)
def test_custom_content(self):
for cls in (
AlertScheduleInstance,
TimedScheduleInstance,
CaseAlertScheduleInstance,
CaseTimedScheduleInstance,
):
with patch('corehq.messaging.scheduling.models.content.send_sms_for_schedule_instance') as patched:
schedule_instance = cls()
CustomContent(custom_content_id='TEST').send(self.user, schedule_instance)
patched.assert_has_calls([
call(schedule_instance, self.user, '9990000000000', 'Message 1'),
call(schedule_instance, self.user, '9990000000000', 'Message 2'),
])
|
<commit_before><commit_msg>Add generic test for custom content resolution<commit_after>
|
from corehq.apps.users.models import CommCareUser
from corehq.messaging.scheduling.models import CustomContent
from corehq.messaging.scheduling.scheduling_partitioned.models import (
AlertScheduleInstance,
TimedScheduleInstance,
CaseAlertScheduleInstance,
CaseTimedScheduleInstance,
)
from django.test import TestCase, override_settings
from mock import patch, call
AVAILABLE_CUSTOM_SCHEDULING_CONTENT = {
'TEST': 'corehq.messaging.scheduling.tests.test_content.custom_content_handler',
}
def custom_content_handler(recipient, schedule_instance):
return ['Message 1', 'Message 2']
class TestContent(TestCase):
@classmethod
def setUpClass(cls):
super(TestContent, cls).setUpClass()
cls.user = CommCareUser(phone_numbers=['9990000000000'])
@override_settings(AVAILABLE_CUSTOM_SCHEDULING_CONTENT=AVAILABLE_CUSTOM_SCHEDULING_CONTENT)
def test_custom_content(self):
for cls in (
AlertScheduleInstance,
TimedScheduleInstance,
CaseAlertScheduleInstance,
CaseTimedScheduleInstance,
):
with patch('corehq.messaging.scheduling.models.content.send_sms_for_schedule_instance') as patched:
schedule_instance = cls()
CustomContent(custom_content_id='TEST').send(self.user, schedule_instance)
patched.assert_has_calls([
call(schedule_instance, self.user, '9990000000000', 'Message 1'),
call(schedule_instance, self.user, '9990000000000', 'Message 2'),
])
|
Add generic test for custom content resolutionfrom corehq.apps.users.models import CommCareUser
from corehq.messaging.scheduling.models import CustomContent
from corehq.messaging.scheduling.scheduling_partitioned.models import (
AlertScheduleInstance,
TimedScheduleInstance,
CaseAlertScheduleInstance,
CaseTimedScheduleInstance,
)
from django.test import TestCase, override_settings
from mock import patch, call
AVAILABLE_CUSTOM_SCHEDULING_CONTENT = {
'TEST': 'corehq.messaging.scheduling.tests.test_content.custom_content_handler',
}
def custom_content_handler(recipient, schedule_instance):
return ['Message 1', 'Message 2']
class TestContent(TestCase):
@classmethod
def setUpClass(cls):
super(TestContent, cls).setUpClass()
cls.user = CommCareUser(phone_numbers=['9990000000000'])
@override_settings(AVAILABLE_CUSTOM_SCHEDULING_CONTENT=AVAILABLE_CUSTOM_SCHEDULING_CONTENT)
def test_custom_content(self):
for cls in (
AlertScheduleInstance,
TimedScheduleInstance,
CaseAlertScheduleInstance,
CaseTimedScheduleInstance,
):
with patch('corehq.messaging.scheduling.models.content.send_sms_for_schedule_instance') as patched:
schedule_instance = cls()
CustomContent(custom_content_id='TEST').send(self.user, schedule_instance)
patched.assert_has_calls([
call(schedule_instance, self.user, '9990000000000', 'Message 1'),
call(schedule_instance, self.user, '9990000000000', 'Message 2'),
])
|
<commit_before><commit_msg>Add generic test for custom content resolution<commit_after>from corehq.apps.users.models import CommCareUser
from corehq.messaging.scheduling.models import CustomContent
from corehq.messaging.scheduling.scheduling_partitioned.models import (
AlertScheduleInstance,
TimedScheduleInstance,
CaseAlertScheduleInstance,
CaseTimedScheduleInstance,
)
from django.test import TestCase, override_settings
from mock import patch, call
AVAILABLE_CUSTOM_SCHEDULING_CONTENT = {
'TEST': 'corehq.messaging.scheduling.tests.test_content.custom_content_handler',
}
def custom_content_handler(recipient, schedule_instance):
return ['Message 1', 'Message 2']
class TestContent(TestCase):
@classmethod
def setUpClass(cls):
super(TestContent, cls).setUpClass()
cls.user = CommCareUser(phone_numbers=['9990000000000'])
@override_settings(AVAILABLE_CUSTOM_SCHEDULING_CONTENT=AVAILABLE_CUSTOM_SCHEDULING_CONTENT)
def test_custom_content(self):
for cls in (
AlertScheduleInstance,
TimedScheduleInstance,
CaseAlertScheduleInstance,
CaseTimedScheduleInstance,
):
with patch('corehq.messaging.scheduling.models.content.send_sms_for_schedule_instance') as patched:
schedule_instance = cls()
CustomContent(custom_content_id='TEST').send(self.user, schedule_instance)
patched.assert_has_calls([
call(schedule_instance, self.user, '9990000000000', 'Message 1'),
call(schedule_instance, self.user, '9990000000000', 'Message 2'),
])
|
|
4d82d539b25a1a7f3e5e5b5c5bfa25a171757fdc
|
code/python/knub/thesis/word2vec_gaussian_lda_preprocessing.py
|
code/python/knub/thesis/word2vec_gaussian_lda_preprocessing.py
|
import argparse
import logging
import os
from gensim.models import Word2Vec
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Prepare model for Gaussian LDA")
parser.add_argument("--topic_model", type=str)
parser.add_argument("--embedding_model", type=str)
args = parser.parse_args()
word2vec = Word2Vec.load_word2vec_format(args.embedding_model, binary=True)
embedding_name = os.path.basename(args.embedding_model)
with open(args.topic_model + "." + embedding_name + ".gaussian-lda", "w") as output:
with open(args.topic_model + ".restricted.alphabet", "r") as f:
for line in f:
word = line.split("#")[0]
output.write(word + " ")
output.write(" ".join(word2vec[word]))
output.write("\n")
|
Add Gaussian LDA preparation script.
|
Add Gaussian LDA preparation script.
|
Python
|
apache-2.0
|
knub/master-thesis,knub/master-thesis,knub/master-thesis,knub/master-thesis
|
Add Gaussian LDA preparation script.
|
import argparse
import logging
import os
from gensim.models import Word2Vec
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Prepare model for Gaussian LDA")
parser.add_argument("--topic_model", type=str)
parser.add_argument("--embedding_model", type=str)
args = parser.parse_args()
word2vec = Word2Vec.load_word2vec_format(args.embedding_model, binary=True)
embedding_name = os.path.basename(args.embedding_model)
with open(args.topic_model + "." + embedding_name + ".gaussian-lda", "w") as output:
with open(args.topic_model + ".restricted.alphabet", "r") as f:
for line in f:
word = line.split("#")[0]
output.write(word + " ")
output.write(" ".join(word2vec[word]))
output.write("\n")
|
<commit_before><commit_msg>Add Gaussian LDA preparation script.<commit_after>
|
import argparse
import logging
import os
from gensim.models import Word2Vec
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Prepare model for Gaussian LDA")
parser.add_argument("--topic_model", type=str)
parser.add_argument("--embedding_model", type=str)
args = parser.parse_args()
word2vec = Word2Vec.load_word2vec_format(args.embedding_model, binary=True)
embedding_name = os.path.basename(args.embedding_model)
with open(args.topic_model + "." + embedding_name + ".gaussian-lda", "w") as output:
with open(args.topic_model + ".restricted.alphabet", "r") as f:
for line in f:
word = line.split("#")[0]
output.write(word + " ")
output.write(" ".join(word2vec[word]))
output.write("\n")
|
Add Gaussian LDA preparation script.import argparse
import logging
import os
from gensim.models import Word2Vec
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Prepare model for Gaussian LDA")
parser.add_argument("--topic_model", type=str)
parser.add_argument("--embedding_model", type=str)
args = parser.parse_args()
word2vec = Word2Vec.load_word2vec_format(args.embedding_model, binary=True)
embedding_name = os.path.basename(args.embedding_model)
with open(args.topic_model + "." + embedding_name + ".gaussian-lda", "w") as output:
with open(args.topic_model + ".restricted.alphabet", "r") as f:
for line in f:
word = line.split("#")[0]
output.write(word + " ")
output.write(" ".join(word2vec[word]))
output.write("\n")
|
<commit_before><commit_msg>Add Gaussian LDA preparation script.<commit_after>import argparse
import logging
import os
from gensim.models import Word2Vec
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Prepare model for Gaussian LDA")
parser.add_argument("--topic_model", type=str)
parser.add_argument("--embedding_model", type=str)
args = parser.parse_args()
word2vec = Word2Vec.load_word2vec_format(args.embedding_model, binary=True)
embedding_name = os.path.basename(args.embedding_model)
with open(args.topic_model + "." + embedding_name + ".gaussian-lda", "w") as output:
with open(args.topic_model + ".restricted.alphabet", "r") as f:
for line in f:
word = line.split("#")[0]
output.write(word + " ")
output.write(" ".join(word2vec[word]))
output.write("\n")
|
|
2f976fda50e0946486383b61a996a36b0f3ff9ae
|
django/setup.py
|
django/setup.py
|
import subprocess
import sys
import setup_util
import os
from os.path import expanduser
home = expanduser("~")
def start(args):
setup_util.replace_text("django/hello/hello/settings.py", "HOST': '.*'", "HOST': '" + args.database_host + "'")
setup_util.replace_text("django/hello/hello/settings.py", "\/home\/ubuntu", home)
subprocess.Popen("gunicorn hello.wsgi:application --worker-class=\"egg:meinheld#gunicorn_worker\" -b 0.0.0.0:8080 -w " + str((args.max_threads * 2)) + " --log-level=critical", shell=True, cwd="django/hello")
return 0
def stop():
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'gunicorn' in line:
try:
pid = int(line.split(None, 2)[1])
os.kill(pid, 9)
except OSError:
pass
return 0
|
import subprocess
import sys
import setup_util
import os
from os.path import expanduser
home = expanduser("~")
def start(args):
setup_util.replace_text("django/hello/hello/settings.py", "HOST': '.*'", "HOST': '" + args.database_host + "'")
setup_util.replace_text("django/hello/hello/settings.py", "\/home\/ubuntu", home)
# because pooling doesn't work with meinheld, it's necessary to create a ton of gunicorn threads (think apache pre-fork)
# to allow the OS to switch processes when waiting for socket I/O.
args.max_threads *= 8
# and go from there until the database server runs out of memory for new threads (connections)
subprocess.Popen("gunicorn hello.wsgi:application --worker-class=\"egg:meinheld#gunicorn_worker\" -b 0.0.0.0:8080 -w " + str((args.max_threads * 2)) + " --log-level=critical", shell=True, cwd="django/hello")
return 0
def stop():
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'gunicorn' in line:
try:
pid = int(line.split(None, 2)[1])
os.kill(pid, 9)
except OSError:
pass
return 0
|
Use more gunicorn threads when pooling database connector isn't available.
|
Use more gunicorn threads when pooling database connector isn't available.
When using postgres with meinheld, the best you can do so far (as far as I know) is up the number of threads.
|
Python
|
bsd-3-clause
|
raziel057/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,denkab/FrameworkBenchmarks,doom369/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,sgml/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,valyala/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,Verber/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,grob/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,valyala/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,grob/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,zloster/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,herloct/FrameworkBenchmarks,herloct/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,dmacd/FB-try1,F3Community/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,khellang/FrameworkBenchmarks,actframework/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,actframework/FrameworkBenchmarks,actframework/FrameworkBenchmarks,sxend/FrameworkBenchmarks,valyala/FrameworkBenchmarks,sxend/FrameworkBenchmarks,Verber/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,torhve/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,grob/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,denkab/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,jamming/FrameworkBenchmarks,denkab/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,zloster/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,khellang/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,doom369/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,leafo/FrameworkBenchmarks,dmacd/FB-try1,yunspace/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,torhve/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,Verber/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,herloct/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,zapov/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,denkab/FrameworkBenchmarks,dmacd/FB-try1,herloct/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,methane/FrameworkBenchmarks,torhve/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,jamming/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,valyala/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,khellang/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,doom369/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,sgml/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,Verber/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,denkab/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,joshk/FrameworkBenchmarks,sxend/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,testn/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,zapov/FrameworkBenchmarks,methane/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,Verber/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,zloster/FrameworkBenchmarks,valyala/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,grob/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,doom369/FrameworkBenchmarks,grob/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,jamming/FrameworkBenchmarks,jamming/FrameworkBenchmarks,leafo/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,actframework/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,testn/FrameworkBenchmarks,methane/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,denkab/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,zapov/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,dmacd/FB-try1,Eyepea/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,grob/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,dmacd/FB-try1,diablonhn/FrameworkBenchmarks,zloster/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,doom369/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,zloster/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,torhve/FrameworkBenchmarks,sgml/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,zapov/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,zloster/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,khellang/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,methane/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,khellang/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,testn/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,dmacd/FB-try1,greg-hellings/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,doom369/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,khellang/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,valyala/FrameworkBenchmarks,joshk/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,leafo/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,doom369/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,jamming/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,actframework/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,herloct/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,Verber/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,methane/FrameworkBenchmarks,actframework/FrameworkBenchmarks,actframework/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,Verber/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,sxend/FrameworkBenchmarks,testn/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,herloct/FrameworkBenchmarks,doom369/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,valyala/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,khellang/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,sgml/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,grob/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,grob/FrameworkBenchmarks,joshk/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,doom369/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,sxend/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,zapov/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,jamming/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,valyala/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,testn/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,zloster/FrameworkBenchmarks,leafo/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,methane/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,herloct/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,sxend/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,actframework/FrameworkBenchmarks,testn/FrameworkBenchmarks,zloster/FrameworkBenchmarks,testn/FrameworkBenchmarks,doom369/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,denkab/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,sgml/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,torhve/FrameworkBenchmarks,khellang/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,sxend/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,joshk/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,joshk/FrameworkBenchmarks,zloster/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,doom369/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,sgml/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,joshk/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,actframework/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,methane/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,zapov/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,khellang/FrameworkBenchmarks,jamming/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,joshk/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,torhve/FrameworkBenchmarks,herloct/FrameworkBenchmarks,sxend/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,methane/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,zapov/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,actframework/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,zloster/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,khellang/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,testn/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,khellang/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,herloct/FrameworkBenchmarks,zloster/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,testn/FrameworkBenchmarks,grob/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,leafo/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,dmacd/FB-try1,sxend/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,testn/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,valyala/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,doom369/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,dmacd/FB-try1,Eyepea/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,sgml/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,sxend/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,grob/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,testn/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,Verber/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,torhve/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,zloster/FrameworkBenchmarks,sgml/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,testn/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,jamming/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,leafo/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,zapov/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,methane/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,Verber/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,zapov/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,joshk/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,torhve/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,herloct/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,sxend/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,valyala/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,sgml/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,dmacd/FB-try1,youprofit/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,zapov/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,herloct/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,jamming/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,zloster/FrameworkBenchmarks,zapov/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,Verber/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,yunspace/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,Verber/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,jamming/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,actframework/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,doom369/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,khellang/FrameworkBenchmarks,zloster/FrameworkBenchmarks,zapov/FrameworkBenchmarks,denkab/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,doom369/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,sxend/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,grob/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,torhve/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,denkab/FrameworkBenchmarks,joshk/FrameworkBenchmarks,Verber/FrameworkBenchmarks,testn/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,joshk/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,torhve/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,sxend/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,herloct/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,torhve/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,Synchro/FrameworkBenchmarks,methane/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,herloct/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,sgml/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,methane/FrameworkBenchmarks,zloster/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,valyala/FrameworkBenchmarks,zloster/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,sxend/FrameworkBenchmarks,zloster/FrameworkBenchmarks,jamming/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,grob/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,dmacd/FB-try1,grob/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,denkab/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,zapov/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,jetty-project/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,herloct/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,jeevatkm/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,leafo/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,khellang/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,sxend/FrameworkBenchmarks,actframework/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,joshk/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,kostya-sh/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,sgml/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,zloster/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,jamming/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,waiteb3/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,circlespainter/FrameworkBenchmarks,dmacd/FB-try1,ashawnbandy-te-tfb/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,steveklabnik/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,raziel057/FrameworkBenchmarks,actframework/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,jamming/FrameworkBenchmarks,khellang/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,zhuochenKIDD/FrameworkBenchmarks,Eyepea/FrameworkBenchmarks,sgml/FrameworkBenchmarks,RockinRoel/FrameworkBenchmarks,methane/FrameworkBenchmarks,joshk/FrameworkBenchmarks,lcp0578/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,valyala/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,grob/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,actframework/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,valyala/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,diablonhn/FrameworkBenchmarks,Verber/FrameworkBenchmarks,zapov/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,denkab/FrameworkBenchmarks,julienschmidt/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,donovanmuller/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,joshk/FrameworkBenchmarks,herloct/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,methane/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,Rayne/FrameworkBenchmarks,sgml/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,leafo/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,saturday06/FrameworkBenchmarks,zapov/FrameworkBenchmarks,valyala/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,sagenschneider/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,actframework/FrameworkBenchmarks,mfirry/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,leafo/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,zapov/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,knewmanTE/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,actframework/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,testn/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,doom369/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,psfblair/FrameworkBenchmarks,nathana1/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,s-ludwig/FrameworkBenchmarks,sxend/FrameworkBenchmarks,martin-g/FrameworkBenchmarks,denkab/FrameworkBenchmarks,torhve/FrameworkBenchmarks,jamming/FrameworkBenchmarks,jebbstewart/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,joshk/FrameworkBenchmarks,dmacd/FB-try1,leafo/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,markkolich/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,leafo/FrameworkBenchmarks,doom369/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,PermeAgility/FrameworkBenchmarks,victorbriz/FrameworkBenchmarks,greg-hellings/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,youprofit/FrameworkBenchmarks,Dith3r/FrameworkBenchmarks,k-r-g/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,stefanocasazza/FrameworkBenchmarks,sanjoydesk/FrameworkBenchmarks,thousandsofthem/FrameworkBenchmarks,Verber/FrameworkBenchmarks,kbrock/FrameworkBenchmarks,denkab/FrameworkBenchmarks,fabianmurariu/FrameworkBenchmarks,greenlaw110/FrameworkBenchmarks,ashawnbandy-te-tfb/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,hperadin/FrameworkBenchmarks,seem-sky/FrameworkBenchmarks,denkab/FrameworkBenchmarks,jaguililla/FrameworkBenchmarks,ratpack/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,Ocramius/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,methane/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,hamiltont/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,alubbe/FrameworkBenchmarks,nbrady-techempower/FrameworkBenchmarks,kellabyte/FrameworkBenchmarks,zdanek/FrameworkBenchmarks,leafo/FrameworkBenchmarks,nkasvosve/FrameworkBenchmarks,sxend/FrameworkBenchmarks,sxend/FrameworkBenchmarks,MTDdk/FrameworkBenchmarks,marko-asplund/FrameworkBenchmarks,xitrum-framework/FrameworkBenchmarks,Jesterovskiy/FrameworkBenchmarks,F3Community/FrameworkBenchmarks,zane-techempower/FrameworkBenchmarks,Rydgel/FrameworkBenchmarks,sgml/FrameworkBenchmarks,doom369/FrameworkBenchmarks,alubbe/FrameworkBenchmarks
|
import subprocess
import sys
import setup_util
import os
from os.path import expanduser
home = expanduser("~")
def start(args):
setup_util.replace_text("django/hello/hello/settings.py", "HOST': '.*'", "HOST': '" + args.database_host + "'")
setup_util.replace_text("django/hello/hello/settings.py", "\/home\/ubuntu", home)
subprocess.Popen("gunicorn hello.wsgi:application --worker-class=\"egg:meinheld#gunicorn_worker\" -b 0.0.0.0:8080 -w " + str((args.max_threads * 2)) + " --log-level=critical", shell=True, cwd="django/hello")
return 0
def stop():
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'gunicorn' in line:
try:
pid = int(line.split(None, 2)[1])
os.kill(pid, 9)
except OSError:
pass
return 0
Use more gunicorn threads when pooling database connector isn't available.
When using postgres with meinheld, the best you can do so far (as far as I know) is up the number of threads.
|
import subprocess
import sys
import setup_util
import os
from os.path import expanduser
home = expanduser("~")
def start(args):
setup_util.replace_text("django/hello/hello/settings.py", "HOST': '.*'", "HOST': '" + args.database_host + "'")
setup_util.replace_text("django/hello/hello/settings.py", "\/home\/ubuntu", home)
# because pooling doesn't work with meinheld, it's necessary to create a ton of gunicorn threads (think apache pre-fork)
# to allow the OS to switch processes when waiting for socket I/O.
args.max_threads *= 8
# and go from there until the database server runs out of memory for new threads (connections)
subprocess.Popen("gunicorn hello.wsgi:application --worker-class=\"egg:meinheld#gunicorn_worker\" -b 0.0.0.0:8080 -w " + str((args.max_threads * 2)) + " --log-level=critical", shell=True, cwd="django/hello")
return 0
def stop():
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'gunicorn' in line:
try:
pid = int(line.split(None, 2)[1])
os.kill(pid, 9)
except OSError:
pass
return 0
|
<commit_before>import subprocess
import sys
import setup_util
import os
from os.path import expanduser
home = expanduser("~")
def start(args):
setup_util.replace_text("django/hello/hello/settings.py", "HOST': '.*'", "HOST': '" + args.database_host + "'")
setup_util.replace_text("django/hello/hello/settings.py", "\/home\/ubuntu", home)
subprocess.Popen("gunicorn hello.wsgi:application --worker-class=\"egg:meinheld#gunicorn_worker\" -b 0.0.0.0:8080 -w " + str((args.max_threads * 2)) + " --log-level=critical", shell=True, cwd="django/hello")
return 0
def stop():
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'gunicorn' in line:
try:
pid = int(line.split(None, 2)[1])
os.kill(pid, 9)
except OSError:
pass
return 0
<commit_msg>Use more gunicorn threads when pooling database connector isn't available.
When using postgres with meinheld, the best you can do so far (as far as I know) is up the number of threads.<commit_after>
|
import subprocess
import sys
import setup_util
import os
from os.path import expanduser
home = expanduser("~")
def start(args):
setup_util.replace_text("django/hello/hello/settings.py", "HOST': '.*'", "HOST': '" + args.database_host + "'")
setup_util.replace_text("django/hello/hello/settings.py", "\/home\/ubuntu", home)
# because pooling doesn't work with meinheld, it's necessary to create a ton of gunicorn threads (think apache pre-fork)
# to allow the OS to switch processes when waiting for socket I/O.
args.max_threads *= 8
# and go from there until the database server runs out of memory for new threads (connections)
subprocess.Popen("gunicorn hello.wsgi:application --worker-class=\"egg:meinheld#gunicorn_worker\" -b 0.0.0.0:8080 -w " + str((args.max_threads * 2)) + " --log-level=critical", shell=True, cwd="django/hello")
return 0
def stop():
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'gunicorn' in line:
try:
pid = int(line.split(None, 2)[1])
os.kill(pid, 9)
except OSError:
pass
return 0
|
import subprocess
import sys
import setup_util
import os
from os.path import expanduser
home = expanduser("~")
def start(args):
setup_util.replace_text("django/hello/hello/settings.py", "HOST': '.*'", "HOST': '" + args.database_host + "'")
setup_util.replace_text("django/hello/hello/settings.py", "\/home\/ubuntu", home)
subprocess.Popen("gunicorn hello.wsgi:application --worker-class=\"egg:meinheld#gunicorn_worker\" -b 0.0.0.0:8080 -w " + str((args.max_threads * 2)) + " --log-level=critical", shell=True, cwd="django/hello")
return 0
def stop():
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'gunicorn' in line:
try:
pid = int(line.split(None, 2)[1])
os.kill(pid, 9)
except OSError:
pass
return 0
Use more gunicorn threads when pooling database connector isn't available.
When using postgres with meinheld, the best you can do so far (as far as I know) is up the number of threads.import subprocess
import sys
import setup_util
import os
from os.path import expanduser
home = expanduser("~")
def start(args):
setup_util.replace_text("django/hello/hello/settings.py", "HOST': '.*'", "HOST': '" + args.database_host + "'")
setup_util.replace_text("django/hello/hello/settings.py", "\/home\/ubuntu", home)
# because pooling doesn't work with meinheld, it's necessary to create a ton of gunicorn threads (think apache pre-fork)
# to allow the OS to switch processes when waiting for socket I/O.
args.max_threads *= 8
# and go from there until the database server runs out of memory for new threads (connections)
subprocess.Popen("gunicorn hello.wsgi:application --worker-class=\"egg:meinheld#gunicorn_worker\" -b 0.0.0.0:8080 -w " + str((args.max_threads * 2)) + " --log-level=critical", shell=True, cwd="django/hello")
return 0
def stop():
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'gunicorn' in line:
try:
pid = int(line.split(None, 2)[1])
os.kill(pid, 9)
except OSError:
pass
return 0
|
<commit_before>import subprocess
import sys
import setup_util
import os
from os.path import expanduser
home = expanduser("~")
def start(args):
setup_util.replace_text("django/hello/hello/settings.py", "HOST': '.*'", "HOST': '" + args.database_host + "'")
setup_util.replace_text("django/hello/hello/settings.py", "\/home\/ubuntu", home)
subprocess.Popen("gunicorn hello.wsgi:application --worker-class=\"egg:meinheld#gunicorn_worker\" -b 0.0.0.0:8080 -w " + str((args.max_threads * 2)) + " --log-level=critical", shell=True, cwd="django/hello")
return 0
def stop():
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'gunicorn' in line:
try:
pid = int(line.split(None, 2)[1])
os.kill(pid, 9)
except OSError:
pass
return 0
<commit_msg>Use more gunicorn threads when pooling database connector isn't available.
When using postgres with meinheld, the best you can do so far (as far as I know) is up the number of threads.<commit_after>import subprocess
import sys
import setup_util
import os
from os.path import expanduser
home = expanduser("~")
def start(args):
setup_util.replace_text("django/hello/hello/settings.py", "HOST': '.*'", "HOST': '" + args.database_host + "'")
setup_util.replace_text("django/hello/hello/settings.py", "\/home\/ubuntu", home)
# because pooling doesn't work with meinheld, it's necessary to create a ton of gunicorn threads (think apache pre-fork)
# to allow the OS to switch processes when waiting for socket I/O.
args.max_threads *= 8
# and go from there until the database server runs out of memory for new threads (connections)
subprocess.Popen("gunicorn hello.wsgi:application --worker-class=\"egg:meinheld#gunicorn_worker\" -b 0.0.0.0:8080 -w " + str((args.max_threads * 2)) + " --log-level=critical", shell=True, cwd="django/hello")
return 0
def stop():
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'gunicorn' in line:
try:
pid = int(line.split(None, 2)[1])
os.kill(pid, 9)
except OSError:
pass
return 0
|
77b248c18bf1edfe6d36fb8c1ca2fc96258dacb2
|
tests/test_oauth2/test_refresh.py
|
tests/test_oauth2/test_refresh.py
|
# coding: utf-8
from .base import TestCase
from .base import create_server, sqlalchemy_provider, cache_provider
from .base import db, Client, User, Token
class TestDefaultProvider(TestCase):
def create_server(self):
create_server(self.app)
def prepare_data(self):
self.create_server()
oauth_client = Client(
name='ios', client_id='client', client_secret='secret',
_redirect_uris='http://localhost/authorized',
)
db.session.add(User(username='foo'))
db.session.add(oauth_client)
db.session.commit()
self.oauth_client = oauth_client
def test_get_token(self):
user = User.query.first()
token = Token(
user_id=user.id,
client_id=self.oauth_client.client_id,
access_token='foo',
refresh_token='bar',
expires_in=1000,
)
db.session.add(token)
db.session.commit()
rv = self.client.post('/oauth/token', data={
'grant_type': 'refresh_token',
'refresh_token': token.refresh_token,
'client_id': self.oauth_client.client_id,
'client_secret': self.oauth_client.client_secret,
})
assert b'access_token' in rv.data
class TestSQLAlchemyProvider(TestDefaultProvider):
def create_server(self):
create_server(self.app, sqlalchemy_provider(self.app))
class TestCacheProvider(TestDefaultProvider):
def create_server(self):
create_server(self.app, cache_provider(self.app))
|
Add test for refresh token
|
Add test for refresh token
|
Python
|
bsd-3-clause
|
stianpr/flask-oauthlib,stianpr/flask-oauthlib,CoreyHyllested/flask-oauthlib,cogniteev/flask-oauthlib,lepture/flask-oauthlib,landler/flask-oauthlib,tonyseek/flask-oauthlib,landler/flask-oauthlib,brightforme/flask-oauthlib,cogniteev/flask-oauthlib,PyBossa/flask-oauthlib,Ryan-K/flask-oauthlib,icook/flask-oauthlib,lepture/flask-oauthlib,adambard/flask-oauthlib,auerj/flask-oauthlib,PyBossa/flask-oauthlib,brightforme/flask-oauthlib,adambard/flask-oauthlib,tonyseek/flask-oauthlib,Ryan-K/flask-oauthlib,auerj/flask-oauthlib,CoreyHyllested/flask-oauthlib,huxuan/flask-oauthlib,icook/flask-oauthlib,CommonsCloud/CommonsCloud-FlaskOAuthlib,CommonsCloud/CommonsCloud-FlaskOAuthlib,huxuan/flask-oauthlib
|
Add test for refresh token
|
# coding: utf-8
from .base import TestCase
from .base import create_server, sqlalchemy_provider, cache_provider
from .base import db, Client, User, Token
class TestDefaultProvider(TestCase):
def create_server(self):
create_server(self.app)
def prepare_data(self):
self.create_server()
oauth_client = Client(
name='ios', client_id='client', client_secret='secret',
_redirect_uris='http://localhost/authorized',
)
db.session.add(User(username='foo'))
db.session.add(oauth_client)
db.session.commit()
self.oauth_client = oauth_client
def test_get_token(self):
user = User.query.first()
token = Token(
user_id=user.id,
client_id=self.oauth_client.client_id,
access_token='foo',
refresh_token='bar',
expires_in=1000,
)
db.session.add(token)
db.session.commit()
rv = self.client.post('/oauth/token', data={
'grant_type': 'refresh_token',
'refresh_token': token.refresh_token,
'client_id': self.oauth_client.client_id,
'client_secret': self.oauth_client.client_secret,
})
assert b'access_token' in rv.data
class TestSQLAlchemyProvider(TestDefaultProvider):
def create_server(self):
create_server(self.app, sqlalchemy_provider(self.app))
class TestCacheProvider(TestDefaultProvider):
def create_server(self):
create_server(self.app, cache_provider(self.app))
|
<commit_before><commit_msg>Add test for refresh token<commit_after>
|
# coding: utf-8
from .base import TestCase
from .base import create_server, sqlalchemy_provider, cache_provider
from .base import db, Client, User, Token
class TestDefaultProvider(TestCase):
def create_server(self):
create_server(self.app)
def prepare_data(self):
self.create_server()
oauth_client = Client(
name='ios', client_id='client', client_secret='secret',
_redirect_uris='http://localhost/authorized',
)
db.session.add(User(username='foo'))
db.session.add(oauth_client)
db.session.commit()
self.oauth_client = oauth_client
def test_get_token(self):
user = User.query.first()
token = Token(
user_id=user.id,
client_id=self.oauth_client.client_id,
access_token='foo',
refresh_token='bar',
expires_in=1000,
)
db.session.add(token)
db.session.commit()
rv = self.client.post('/oauth/token', data={
'grant_type': 'refresh_token',
'refresh_token': token.refresh_token,
'client_id': self.oauth_client.client_id,
'client_secret': self.oauth_client.client_secret,
})
assert b'access_token' in rv.data
class TestSQLAlchemyProvider(TestDefaultProvider):
def create_server(self):
create_server(self.app, sqlalchemy_provider(self.app))
class TestCacheProvider(TestDefaultProvider):
def create_server(self):
create_server(self.app, cache_provider(self.app))
|
Add test for refresh token# coding: utf-8
from .base import TestCase
from .base import create_server, sqlalchemy_provider, cache_provider
from .base import db, Client, User, Token
class TestDefaultProvider(TestCase):
def create_server(self):
create_server(self.app)
def prepare_data(self):
self.create_server()
oauth_client = Client(
name='ios', client_id='client', client_secret='secret',
_redirect_uris='http://localhost/authorized',
)
db.session.add(User(username='foo'))
db.session.add(oauth_client)
db.session.commit()
self.oauth_client = oauth_client
def test_get_token(self):
user = User.query.first()
token = Token(
user_id=user.id,
client_id=self.oauth_client.client_id,
access_token='foo',
refresh_token='bar',
expires_in=1000,
)
db.session.add(token)
db.session.commit()
rv = self.client.post('/oauth/token', data={
'grant_type': 'refresh_token',
'refresh_token': token.refresh_token,
'client_id': self.oauth_client.client_id,
'client_secret': self.oauth_client.client_secret,
})
assert b'access_token' in rv.data
class TestSQLAlchemyProvider(TestDefaultProvider):
def create_server(self):
create_server(self.app, sqlalchemy_provider(self.app))
class TestCacheProvider(TestDefaultProvider):
def create_server(self):
create_server(self.app, cache_provider(self.app))
|
<commit_before><commit_msg>Add test for refresh token<commit_after># coding: utf-8
from .base import TestCase
from .base import create_server, sqlalchemy_provider, cache_provider
from .base import db, Client, User, Token
class TestDefaultProvider(TestCase):
def create_server(self):
create_server(self.app)
def prepare_data(self):
self.create_server()
oauth_client = Client(
name='ios', client_id='client', client_secret='secret',
_redirect_uris='http://localhost/authorized',
)
db.session.add(User(username='foo'))
db.session.add(oauth_client)
db.session.commit()
self.oauth_client = oauth_client
def test_get_token(self):
user = User.query.first()
token = Token(
user_id=user.id,
client_id=self.oauth_client.client_id,
access_token='foo',
refresh_token='bar',
expires_in=1000,
)
db.session.add(token)
db.session.commit()
rv = self.client.post('/oauth/token', data={
'grant_type': 'refresh_token',
'refresh_token': token.refresh_token,
'client_id': self.oauth_client.client_id,
'client_secret': self.oauth_client.client_secret,
})
assert b'access_token' in rv.data
class TestSQLAlchemyProvider(TestDefaultProvider):
def create_server(self):
create_server(self.app, sqlalchemy_provider(self.app))
class TestCacheProvider(TestDefaultProvider):
def create_server(self):
create_server(self.app, cache_provider(self.app))
|
|
0a0a34bc1e6e09b85a3e1739679a7b76dc2672c4
|
examples/chart_maker/my_chart.py
|
examples/chart_maker/my_chart.py
|
from seleniumbase import BaseCase
class MyChartMakerClass(BaseCase):
def test_chart_maker(self):
self.create_pie_chart(title="Automated Tests")
self.add_data_point("Passed", 7, color="#95d96f")
self.add_data_point("Untested", 2, color="#eaeaea")
self.add_data_point("Failed", 1, color="#f1888f")
self.create_presentation()
self.add_slide(self.extract_chart())
self.begin_presentation()
|
Add an example of SeleniumBase Chart Maker
|
Add an example of SeleniumBase Chart Maker
|
Python
|
mit
|
mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase
|
Add an example of SeleniumBase Chart Maker
|
from seleniumbase import BaseCase
class MyChartMakerClass(BaseCase):
def test_chart_maker(self):
self.create_pie_chart(title="Automated Tests")
self.add_data_point("Passed", 7, color="#95d96f")
self.add_data_point("Untested", 2, color="#eaeaea")
self.add_data_point("Failed", 1, color="#f1888f")
self.create_presentation()
self.add_slide(self.extract_chart())
self.begin_presentation()
|
<commit_before><commit_msg>Add an example of SeleniumBase Chart Maker<commit_after>
|
from seleniumbase import BaseCase
class MyChartMakerClass(BaseCase):
def test_chart_maker(self):
self.create_pie_chart(title="Automated Tests")
self.add_data_point("Passed", 7, color="#95d96f")
self.add_data_point("Untested", 2, color="#eaeaea")
self.add_data_point("Failed", 1, color="#f1888f")
self.create_presentation()
self.add_slide(self.extract_chart())
self.begin_presentation()
|
Add an example of SeleniumBase Chart Makerfrom seleniumbase import BaseCase
class MyChartMakerClass(BaseCase):
def test_chart_maker(self):
self.create_pie_chart(title="Automated Tests")
self.add_data_point("Passed", 7, color="#95d96f")
self.add_data_point("Untested", 2, color="#eaeaea")
self.add_data_point("Failed", 1, color="#f1888f")
self.create_presentation()
self.add_slide(self.extract_chart())
self.begin_presentation()
|
<commit_before><commit_msg>Add an example of SeleniumBase Chart Maker<commit_after>from seleniumbase import BaseCase
class MyChartMakerClass(BaseCase):
def test_chart_maker(self):
self.create_pie_chart(title="Automated Tests")
self.add_data_point("Passed", 7, color="#95d96f")
self.add_data_point("Untested", 2, color="#eaeaea")
self.add_data_point("Failed", 1, color="#f1888f")
self.create_presentation()
self.add_slide(self.extract_chart())
self.begin_presentation()
|
|
3b1e38785505e4f472e7cb0bce487b7d31efbf1d
|
cyhdfs3/tests/test_file.py
|
cyhdfs3/tests/test_file.py
|
import inspect
import posixpath
from utils import *
def test_block_locations(hdfs):
testname = inspect.stack()[0][3]
fname = posixpath.join(TEST_DIR, testname)
data = b'a' * 2 * 2 ** 20
data += b'b' * 2 * 2 ** 19
with hdfs.open(fname, 'w', block_size=1 * 2 ** 20) as f:
f.write(data)
blocks = hdfs.get_block_locations(fname)
assert len(blocks) == 3
assert blocks[0].length == 1 * 2 ** 20
assert blocks[1].length == 1 * 2 ** 20
assert blocks[2].length == 2 * 2 ** 19
def test_path_info_file(hdfs):
testname = inspect.stack()[0][3]
fname = posixpath.join(TEST_DIR, testname)
replication = 1
block_size = 1 * 2 ** 20
data = b'a' * 2 * 2 ** 20
with hdfs.open(fname, 'w', block_size=block_size, replication=replication) as f:
f.write(data)
fileinfo = hdfs.path_info(fname)
assert fileinfo.kind == 'f'
assert fileinfo.name == fname
assert fileinfo.size == len(data)
assert fileinfo.block_size == block_size
assert fileinfo.replication == replication
assert fileinfo.owner == 'root'
assert fileinfo.group == 'supergroup'
def test_path_info_dir(hdfs):
testname = inspect.stack()[0][3]
fname = posixpath.join(TEST_DIR, testname)
hdfs.create_dir(fname)
n = 5
data = b'a' * 2 * 2 ** 20
for i in range(n):
tfname = posixpath.join(fname, str(i))
with hdfs.open(tfname, 'w') as f:
f.write(data)
fileinfo = hdfs.path_info(fname)
assert fileinfo.kind == 'd'
assert fileinfo.name == fname
assert fileinfo.size == 0
assert fileinfo.replication == 0
assert fileinfo.replication == 0
assert fileinfo.owner == 'root'
assert fileinfo.group == 'supergroup'
|
Add tests for file blocks and file info
|
Add tests for file blocks and file info
|
Python
|
apache-2.0
|
danielfrg/cyhdfs3,danielfrg/libhdfs3.py,danielfrg/libhdfs3.py,danielfrg/cyhdfs3
|
Add tests for file blocks and file info
|
import inspect
import posixpath
from utils import *
def test_block_locations(hdfs):
testname = inspect.stack()[0][3]
fname = posixpath.join(TEST_DIR, testname)
data = b'a' * 2 * 2 ** 20
data += b'b' * 2 * 2 ** 19
with hdfs.open(fname, 'w', block_size=1 * 2 ** 20) as f:
f.write(data)
blocks = hdfs.get_block_locations(fname)
assert len(blocks) == 3
assert blocks[0].length == 1 * 2 ** 20
assert blocks[1].length == 1 * 2 ** 20
assert blocks[2].length == 2 * 2 ** 19
def test_path_info_file(hdfs):
testname = inspect.stack()[0][3]
fname = posixpath.join(TEST_DIR, testname)
replication = 1
block_size = 1 * 2 ** 20
data = b'a' * 2 * 2 ** 20
with hdfs.open(fname, 'w', block_size=block_size, replication=replication) as f:
f.write(data)
fileinfo = hdfs.path_info(fname)
assert fileinfo.kind == 'f'
assert fileinfo.name == fname
assert fileinfo.size == len(data)
assert fileinfo.block_size == block_size
assert fileinfo.replication == replication
assert fileinfo.owner == 'root'
assert fileinfo.group == 'supergroup'
def test_path_info_dir(hdfs):
testname = inspect.stack()[0][3]
fname = posixpath.join(TEST_DIR, testname)
hdfs.create_dir(fname)
n = 5
data = b'a' * 2 * 2 ** 20
for i in range(n):
tfname = posixpath.join(fname, str(i))
with hdfs.open(tfname, 'w') as f:
f.write(data)
fileinfo = hdfs.path_info(fname)
assert fileinfo.kind == 'd'
assert fileinfo.name == fname
assert fileinfo.size == 0
assert fileinfo.replication == 0
assert fileinfo.replication == 0
assert fileinfo.owner == 'root'
assert fileinfo.group == 'supergroup'
|
<commit_before><commit_msg>Add tests for file blocks and file info<commit_after>
|
import inspect
import posixpath
from utils import *
def test_block_locations(hdfs):
testname = inspect.stack()[0][3]
fname = posixpath.join(TEST_DIR, testname)
data = b'a' * 2 * 2 ** 20
data += b'b' * 2 * 2 ** 19
with hdfs.open(fname, 'w', block_size=1 * 2 ** 20) as f:
f.write(data)
blocks = hdfs.get_block_locations(fname)
assert len(blocks) == 3
assert blocks[0].length == 1 * 2 ** 20
assert blocks[1].length == 1 * 2 ** 20
assert blocks[2].length == 2 * 2 ** 19
def test_path_info_file(hdfs):
testname = inspect.stack()[0][3]
fname = posixpath.join(TEST_DIR, testname)
replication = 1
block_size = 1 * 2 ** 20
data = b'a' * 2 * 2 ** 20
with hdfs.open(fname, 'w', block_size=block_size, replication=replication) as f:
f.write(data)
fileinfo = hdfs.path_info(fname)
assert fileinfo.kind == 'f'
assert fileinfo.name == fname
assert fileinfo.size == len(data)
assert fileinfo.block_size == block_size
assert fileinfo.replication == replication
assert fileinfo.owner == 'root'
assert fileinfo.group == 'supergroup'
def test_path_info_dir(hdfs):
testname = inspect.stack()[0][3]
fname = posixpath.join(TEST_DIR, testname)
hdfs.create_dir(fname)
n = 5
data = b'a' * 2 * 2 ** 20
for i in range(n):
tfname = posixpath.join(fname, str(i))
with hdfs.open(tfname, 'w') as f:
f.write(data)
fileinfo = hdfs.path_info(fname)
assert fileinfo.kind == 'd'
assert fileinfo.name == fname
assert fileinfo.size == 0
assert fileinfo.replication == 0
assert fileinfo.replication == 0
assert fileinfo.owner == 'root'
assert fileinfo.group == 'supergroup'
|
Add tests for file blocks and file infoimport inspect
import posixpath
from utils import *
def test_block_locations(hdfs):
testname = inspect.stack()[0][3]
fname = posixpath.join(TEST_DIR, testname)
data = b'a' * 2 * 2 ** 20
data += b'b' * 2 * 2 ** 19
with hdfs.open(fname, 'w', block_size=1 * 2 ** 20) as f:
f.write(data)
blocks = hdfs.get_block_locations(fname)
assert len(blocks) == 3
assert blocks[0].length == 1 * 2 ** 20
assert blocks[1].length == 1 * 2 ** 20
assert blocks[2].length == 2 * 2 ** 19
def test_path_info_file(hdfs):
testname = inspect.stack()[0][3]
fname = posixpath.join(TEST_DIR, testname)
replication = 1
block_size = 1 * 2 ** 20
data = b'a' * 2 * 2 ** 20
with hdfs.open(fname, 'w', block_size=block_size, replication=replication) as f:
f.write(data)
fileinfo = hdfs.path_info(fname)
assert fileinfo.kind == 'f'
assert fileinfo.name == fname
assert fileinfo.size == len(data)
assert fileinfo.block_size == block_size
assert fileinfo.replication == replication
assert fileinfo.owner == 'root'
assert fileinfo.group == 'supergroup'
def test_path_info_dir(hdfs):
testname = inspect.stack()[0][3]
fname = posixpath.join(TEST_DIR, testname)
hdfs.create_dir(fname)
n = 5
data = b'a' * 2 * 2 ** 20
for i in range(n):
tfname = posixpath.join(fname, str(i))
with hdfs.open(tfname, 'w') as f:
f.write(data)
fileinfo = hdfs.path_info(fname)
assert fileinfo.kind == 'd'
assert fileinfo.name == fname
assert fileinfo.size == 0
assert fileinfo.replication == 0
assert fileinfo.replication == 0
assert fileinfo.owner == 'root'
assert fileinfo.group == 'supergroup'
|
<commit_before><commit_msg>Add tests for file blocks and file info<commit_after>import inspect
import posixpath
from utils import *
def test_block_locations(hdfs):
testname = inspect.stack()[0][3]
fname = posixpath.join(TEST_DIR, testname)
data = b'a' * 2 * 2 ** 20
data += b'b' * 2 * 2 ** 19
with hdfs.open(fname, 'w', block_size=1 * 2 ** 20) as f:
f.write(data)
blocks = hdfs.get_block_locations(fname)
assert len(blocks) == 3
assert blocks[0].length == 1 * 2 ** 20
assert blocks[1].length == 1 * 2 ** 20
assert blocks[2].length == 2 * 2 ** 19
def test_path_info_file(hdfs):
testname = inspect.stack()[0][3]
fname = posixpath.join(TEST_DIR, testname)
replication = 1
block_size = 1 * 2 ** 20
data = b'a' * 2 * 2 ** 20
with hdfs.open(fname, 'w', block_size=block_size, replication=replication) as f:
f.write(data)
fileinfo = hdfs.path_info(fname)
assert fileinfo.kind == 'f'
assert fileinfo.name == fname
assert fileinfo.size == len(data)
assert fileinfo.block_size == block_size
assert fileinfo.replication == replication
assert fileinfo.owner == 'root'
assert fileinfo.group == 'supergroup'
def test_path_info_dir(hdfs):
testname = inspect.stack()[0][3]
fname = posixpath.join(TEST_DIR, testname)
hdfs.create_dir(fname)
n = 5
data = b'a' * 2 * 2 ** 20
for i in range(n):
tfname = posixpath.join(fname, str(i))
with hdfs.open(tfname, 'w') as f:
f.write(data)
fileinfo = hdfs.path_info(fname)
assert fileinfo.kind == 'd'
assert fileinfo.name == fname
assert fileinfo.size == 0
assert fileinfo.replication == 0
assert fileinfo.replication == 0
assert fileinfo.owner == 'root'
assert fileinfo.group == 'supergroup'
|
|
b948968dc616b671b2b426ead5768e60e86a38c1
|
cumulusci/tasks/metadata/modify.py
|
cumulusci/tasks/metadata/modify.py
|
import glob
import os
import lxml.etree as ET
from xml.sax.saxutils import escape
from cumulusci.core.tasks import BaseTask
class RemoveElementsXPath(BaseTask):
task_options = {
'elements': {
'description': 'A list of dictionaries containing path and xpath keys. The path key is a file path that supports wildcards and xpath is the xpath for the elements to remove. Multiple dictionaries can be passed in the list to run multiple removal queries in the same task. Metadata elements in the xpath need to be prefixed with ns:, for example: ./ns:Layout/ns:relatedLists',
'required': True,
},
'chdir': {
'description': 'Change the current directory before running the replace',
}
}
def _run_task(self):
cwd = os.getcwd()
chdir = self.options.get('chdir')
if chdir:
self.logger.info('Changing directory to {}'.format(chdir))
os.chdir(chdir)
for element in self.options['elements']:
self._process_element(element)
if chdir:
os.chdir(cwd)
def _process_element(self, element):
self.logger.info(
'Removing elements matching {xpath} from {path}'.format(
**element
)
)
for f in glob.glob(element['path']):
with open(f, 'rw') as fp:
orig = fp.read()
fp.seek(0)
root = ET.parse(open(f))
res = root.findall(element['xpath'].replace('ns:','{http://soap.sforce.com/2006/04/metadata}'))
for element in res:
element.getparent().remove(element)
processed = '{}\n{}\n'.format(
'<?xml version="1.0" encoding="UTF-8"?>',
ET.tostring(root),
)
if orig != processed:
self.logger.info('Modified {}'.format(f))
fp = open(f, 'w')
fp.write(processed)
|
Add new task class RemoveElementsXPath for removing XML elements using XPath selectors
|
Add new task class RemoveElementsXPath for removing XML elements using
XPath selectors
|
Python
|
bsd-3-clause
|
e02d96ec16/CumulusCI,e02d96ec16/CumulusCI,SalesforceFoundation/CumulusCI,SalesforceFoundation/CumulusCI
|
Add new task class RemoveElementsXPath for removing XML elements using
XPath selectors
|
import glob
import os
import lxml.etree as ET
from xml.sax.saxutils import escape
from cumulusci.core.tasks import BaseTask
class RemoveElementsXPath(BaseTask):
task_options = {
'elements': {
'description': 'A list of dictionaries containing path and xpath keys. The path key is a file path that supports wildcards and xpath is the xpath for the elements to remove. Multiple dictionaries can be passed in the list to run multiple removal queries in the same task. Metadata elements in the xpath need to be prefixed with ns:, for example: ./ns:Layout/ns:relatedLists',
'required': True,
},
'chdir': {
'description': 'Change the current directory before running the replace',
}
}
def _run_task(self):
cwd = os.getcwd()
chdir = self.options.get('chdir')
if chdir:
self.logger.info('Changing directory to {}'.format(chdir))
os.chdir(chdir)
for element in self.options['elements']:
self._process_element(element)
if chdir:
os.chdir(cwd)
def _process_element(self, element):
self.logger.info(
'Removing elements matching {xpath} from {path}'.format(
**element
)
)
for f in glob.glob(element['path']):
with open(f, 'rw') as fp:
orig = fp.read()
fp.seek(0)
root = ET.parse(open(f))
res = root.findall(element['xpath'].replace('ns:','{http://soap.sforce.com/2006/04/metadata}'))
for element in res:
element.getparent().remove(element)
processed = '{}\n{}\n'.format(
'<?xml version="1.0" encoding="UTF-8"?>',
ET.tostring(root),
)
if orig != processed:
self.logger.info('Modified {}'.format(f))
fp = open(f, 'w')
fp.write(processed)
|
<commit_before><commit_msg>Add new task class RemoveElementsXPath for removing XML elements using
XPath selectors<commit_after>
|
import glob
import os
import lxml.etree as ET
from xml.sax.saxutils import escape
from cumulusci.core.tasks import BaseTask
class RemoveElementsXPath(BaseTask):
task_options = {
'elements': {
'description': 'A list of dictionaries containing path and xpath keys. The path key is a file path that supports wildcards and xpath is the xpath for the elements to remove. Multiple dictionaries can be passed in the list to run multiple removal queries in the same task. Metadata elements in the xpath need to be prefixed with ns:, for example: ./ns:Layout/ns:relatedLists',
'required': True,
},
'chdir': {
'description': 'Change the current directory before running the replace',
}
}
def _run_task(self):
cwd = os.getcwd()
chdir = self.options.get('chdir')
if chdir:
self.logger.info('Changing directory to {}'.format(chdir))
os.chdir(chdir)
for element in self.options['elements']:
self._process_element(element)
if chdir:
os.chdir(cwd)
def _process_element(self, element):
self.logger.info(
'Removing elements matching {xpath} from {path}'.format(
**element
)
)
for f in glob.glob(element['path']):
with open(f, 'rw') as fp:
orig = fp.read()
fp.seek(0)
root = ET.parse(open(f))
res = root.findall(element['xpath'].replace('ns:','{http://soap.sforce.com/2006/04/metadata}'))
for element in res:
element.getparent().remove(element)
processed = '{}\n{}\n'.format(
'<?xml version="1.0" encoding="UTF-8"?>',
ET.tostring(root),
)
if orig != processed:
self.logger.info('Modified {}'.format(f))
fp = open(f, 'w')
fp.write(processed)
|
Add new task class RemoveElementsXPath for removing XML elements using
XPath selectorsimport glob
import os
import lxml.etree as ET
from xml.sax.saxutils import escape
from cumulusci.core.tasks import BaseTask
class RemoveElementsXPath(BaseTask):
task_options = {
'elements': {
'description': 'A list of dictionaries containing path and xpath keys. The path key is a file path that supports wildcards and xpath is the xpath for the elements to remove. Multiple dictionaries can be passed in the list to run multiple removal queries in the same task. Metadata elements in the xpath need to be prefixed with ns:, for example: ./ns:Layout/ns:relatedLists',
'required': True,
},
'chdir': {
'description': 'Change the current directory before running the replace',
}
}
def _run_task(self):
cwd = os.getcwd()
chdir = self.options.get('chdir')
if chdir:
self.logger.info('Changing directory to {}'.format(chdir))
os.chdir(chdir)
for element in self.options['elements']:
self._process_element(element)
if chdir:
os.chdir(cwd)
def _process_element(self, element):
self.logger.info(
'Removing elements matching {xpath} from {path}'.format(
**element
)
)
for f in glob.glob(element['path']):
with open(f, 'rw') as fp:
orig = fp.read()
fp.seek(0)
root = ET.parse(open(f))
res = root.findall(element['xpath'].replace('ns:','{http://soap.sforce.com/2006/04/metadata}'))
for element in res:
element.getparent().remove(element)
processed = '{}\n{}\n'.format(
'<?xml version="1.0" encoding="UTF-8"?>',
ET.tostring(root),
)
if orig != processed:
self.logger.info('Modified {}'.format(f))
fp = open(f, 'w')
fp.write(processed)
|
<commit_before><commit_msg>Add new task class RemoveElementsXPath for removing XML elements using
XPath selectors<commit_after>import glob
import os
import lxml.etree as ET
from xml.sax.saxutils import escape
from cumulusci.core.tasks import BaseTask
class RemoveElementsXPath(BaseTask):
task_options = {
'elements': {
'description': 'A list of dictionaries containing path and xpath keys. The path key is a file path that supports wildcards and xpath is the xpath for the elements to remove. Multiple dictionaries can be passed in the list to run multiple removal queries in the same task. Metadata elements in the xpath need to be prefixed with ns:, for example: ./ns:Layout/ns:relatedLists',
'required': True,
},
'chdir': {
'description': 'Change the current directory before running the replace',
}
}
def _run_task(self):
cwd = os.getcwd()
chdir = self.options.get('chdir')
if chdir:
self.logger.info('Changing directory to {}'.format(chdir))
os.chdir(chdir)
for element in self.options['elements']:
self._process_element(element)
if chdir:
os.chdir(cwd)
def _process_element(self, element):
self.logger.info(
'Removing elements matching {xpath} from {path}'.format(
**element
)
)
for f in glob.glob(element['path']):
with open(f, 'rw') as fp:
orig = fp.read()
fp.seek(0)
root = ET.parse(open(f))
res = root.findall(element['xpath'].replace('ns:','{http://soap.sforce.com/2006/04/metadata}'))
for element in res:
element.getparent().remove(element)
processed = '{}\n{}\n'.format(
'<?xml version="1.0" encoding="UTF-8"?>',
ET.tostring(root),
)
if orig != processed:
self.logger.info('Modified {}'.format(f))
fp = open(f, 'w')
fp.write(processed)
|
|
a336c6d939cc9b18780d8671af41b8128701db40
|
src/adhocracy_core/adhocracy_core/scripts/test_import_local_roles.py
|
src/adhocracy_core/adhocracy_core/scripts/test_import_local_roles.py
|
from pyramid import testing
from tempfile import mkstemp
import os
import json
class TestImportLocalRoles: # pragma: no cover
def test_import_local_roles(self, registry):
from adhocracy_core.scripts.import_local_roles import _import_local_roles
(self._tempfd, filename) = mkstemp()
with open(filename, 'w') as f:
f.write(json.dumps([
{"path": "/alt-treptow",
"roles": {"initiators-treptow-koepenick": ["role:initiator"]}}
]))
root = testing.DummyResource()
root['alt-treptow'] = testing.DummyResource(__parent__=root)
_import_local_roles(root, registry, filename)
assert root['alt-treptow'].__local_roles__ == \
{'initiators-treptow-koepenick': set(['role:initiator'])}
def teardown_method(self, method):
if self._tempfd is not None:
os.close(self._tempfd)
|
Add unit test for import_local_roles script
|
Add unit test for import_local_roles script
|
Python
|
agpl-3.0
|
liqd/adhocracy3.mercator,xs2maverick/adhocracy3.mercator,fhartwig/adhocracy3.mercator,liqd/adhocracy3.mercator,liqd/adhocracy3.mercator,fhartwig/adhocracy3.mercator,liqd/adhocracy3.mercator,liqd/adhocracy3.mercator,fhartwig/adhocracy3.mercator,liqd/adhocracy3.mercator,xs2maverick/adhocracy3.mercator,fhartwig/adhocracy3.mercator,fhartwig/adhocracy3.mercator,xs2maverick/adhocracy3.mercator,xs2maverick/adhocracy3.mercator,xs2maverick/adhocracy3.mercator,liqd/adhocracy3.mercator,fhartwig/adhocracy3.mercator,fhartwig/adhocracy3.mercator,xs2maverick/adhocracy3.mercator
|
Add unit test for import_local_roles script
|
from pyramid import testing
from tempfile import mkstemp
import os
import json
class TestImportLocalRoles: # pragma: no cover
def test_import_local_roles(self, registry):
from adhocracy_core.scripts.import_local_roles import _import_local_roles
(self._tempfd, filename) = mkstemp()
with open(filename, 'w') as f:
f.write(json.dumps([
{"path": "/alt-treptow",
"roles": {"initiators-treptow-koepenick": ["role:initiator"]}}
]))
root = testing.DummyResource()
root['alt-treptow'] = testing.DummyResource(__parent__=root)
_import_local_roles(root, registry, filename)
assert root['alt-treptow'].__local_roles__ == \
{'initiators-treptow-koepenick': set(['role:initiator'])}
def teardown_method(self, method):
if self._tempfd is not None:
os.close(self._tempfd)
|
<commit_before><commit_msg>Add unit test for import_local_roles script<commit_after>
|
from pyramid import testing
from tempfile import mkstemp
import os
import json
class TestImportLocalRoles: # pragma: no cover
def test_import_local_roles(self, registry):
from adhocracy_core.scripts.import_local_roles import _import_local_roles
(self._tempfd, filename) = mkstemp()
with open(filename, 'w') as f:
f.write(json.dumps([
{"path": "/alt-treptow",
"roles": {"initiators-treptow-koepenick": ["role:initiator"]}}
]))
root = testing.DummyResource()
root['alt-treptow'] = testing.DummyResource(__parent__=root)
_import_local_roles(root, registry, filename)
assert root['alt-treptow'].__local_roles__ == \
{'initiators-treptow-koepenick': set(['role:initiator'])}
def teardown_method(self, method):
if self._tempfd is not None:
os.close(self._tempfd)
|
Add unit test for import_local_roles scriptfrom pyramid import testing
from tempfile import mkstemp
import os
import json
class TestImportLocalRoles: # pragma: no cover
def test_import_local_roles(self, registry):
from adhocracy_core.scripts.import_local_roles import _import_local_roles
(self._tempfd, filename) = mkstemp()
with open(filename, 'w') as f:
f.write(json.dumps([
{"path": "/alt-treptow",
"roles": {"initiators-treptow-koepenick": ["role:initiator"]}}
]))
root = testing.DummyResource()
root['alt-treptow'] = testing.DummyResource(__parent__=root)
_import_local_roles(root, registry, filename)
assert root['alt-treptow'].__local_roles__ == \
{'initiators-treptow-koepenick': set(['role:initiator'])}
def teardown_method(self, method):
if self._tempfd is not None:
os.close(self._tempfd)
|
<commit_before><commit_msg>Add unit test for import_local_roles script<commit_after>from pyramid import testing
from tempfile import mkstemp
import os
import json
class TestImportLocalRoles: # pragma: no cover
def test_import_local_roles(self, registry):
from adhocracy_core.scripts.import_local_roles import _import_local_roles
(self._tempfd, filename) = mkstemp()
with open(filename, 'w') as f:
f.write(json.dumps([
{"path": "/alt-treptow",
"roles": {"initiators-treptow-koepenick": ["role:initiator"]}}
]))
root = testing.DummyResource()
root['alt-treptow'] = testing.DummyResource(__parent__=root)
_import_local_roles(root, registry, filename)
assert root['alt-treptow'].__local_roles__ == \
{'initiators-treptow-koepenick': set(['role:initiator'])}
def teardown_method(self, method):
if self._tempfd is not None:
os.close(self._tempfd)
|
|
34927e575294ecadebca62a9d8decda550fd1001
|
alembic/versions/8888a0f4c1ef_add_code_of_conduct_and_data_policy_.py
|
alembic/versions/8888a0f4c1ef_add_code_of_conduct_and_data_policy_.py
|
"""Add Code of Conduct and Data Policy agreement columns
Revision ID: 8888a0f4c1ef
Revises: fa869546b8ca
Create Date: 2019-09-01 01:17:02.604266
"""
# revision identifiers, used by Alembic.
revision = '8888a0f4c1ef'
down_revision = 'fa869546b8ca'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
try:
is_sqlite = op.get_context().dialect.name == 'sqlite'
except Exception:
is_sqlite = False
if is_sqlite:
op.get_context().connection.execute('PRAGMA foreign_keys=ON;')
utcnow_server_default = "(datetime('now', 'utc'))"
else:
utcnow_server_default = "timezone('utc', current_timestamp)"
def sqlite_column_reflect_listener(inspector, table, column_info):
"""Adds parenthesis around SQLite datetime defaults for utcnow."""
if column_info['default'] == "datetime('now', 'utc')":
column_info['default'] = utcnow_server_default
sqlite_reflect_kwargs = {
'listeners': [('column_reflect', sqlite_column_reflect_listener)]
}
# ===========================================================================
# HOWTO: Handle alter statements in SQLite
#
# def upgrade():
# if is_sqlite:
# with op.batch_alter_table('table_name', reflect_kwargs=sqlite_reflect_kwargs) as batch_op:
# batch_op.alter_column('column_name', type_=sa.Unicode(), server_default='', nullable=False)
# else:
# op.alter_column('table_name', 'column_name', type_=sa.Unicode(), server_default='', nullable=False)
#
# ===========================================================================
def upgrade():
op.add_column('indie_developer', sa.Column('agreed_coc', sa.Boolean(), server_default='False', nullable=False))
op.add_column('indie_developer', sa.Column('agreed_data_policy', sa.Boolean(), server_default='False', nullable=False))
def downgrade():
op.drop_column('indie_developer', 'agreed_data_policy')
op.drop_column('indie_developer', 'agreed_coc')
|
Include the database migration this time
|
Include the database migration this time
|
Python
|
agpl-3.0
|
magfest/ubersystem,magfest/ubersystem,magfest/ubersystem,magfest/ubersystem
|
Include the database migration this time
|
"""Add Code of Conduct and Data Policy agreement columns
Revision ID: 8888a0f4c1ef
Revises: fa869546b8ca
Create Date: 2019-09-01 01:17:02.604266
"""
# revision identifiers, used by Alembic.
revision = '8888a0f4c1ef'
down_revision = 'fa869546b8ca'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
try:
is_sqlite = op.get_context().dialect.name == 'sqlite'
except Exception:
is_sqlite = False
if is_sqlite:
op.get_context().connection.execute('PRAGMA foreign_keys=ON;')
utcnow_server_default = "(datetime('now', 'utc'))"
else:
utcnow_server_default = "timezone('utc', current_timestamp)"
def sqlite_column_reflect_listener(inspector, table, column_info):
"""Adds parenthesis around SQLite datetime defaults for utcnow."""
if column_info['default'] == "datetime('now', 'utc')":
column_info['default'] = utcnow_server_default
sqlite_reflect_kwargs = {
'listeners': [('column_reflect', sqlite_column_reflect_listener)]
}
# ===========================================================================
# HOWTO: Handle alter statements in SQLite
#
# def upgrade():
# if is_sqlite:
# with op.batch_alter_table('table_name', reflect_kwargs=sqlite_reflect_kwargs) as batch_op:
# batch_op.alter_column('column_name', type_=sa.Unicode(), server_default='', nullable=False)
# else:
# op.alter_column('table_name', 'column_name', type_=sa.Unicode(), server_default='', nullable=False)
#
# ===========================================================================
def upgrade():
op.add_column('indie_developer', sa.Column('agreed_coc', sa.Boolean(), server_default='False', nullable=False))
op.add_column('indie_developer', sa.Column('agreed_data_policy', sa.Boolean(), server_default='False', nullable=False))
def downgrade():
op.drop_column('indie_developer', 'agreed_data_policy')
op.drop_column('indie_developer', 'agreed_coc')
|
<commit_before><commit_msg>Include the database migration this time<commit_after>
|
"""Add Code of Conduct and Data Policy agreement columns
Revision ID: 8888a0f4c1ef
Revises: fa869546b8ca
Create Date: 2019-09-01 01:17:02.604266
"""
# revision identifiers, used by Alembic.
revision = '8888a0f4c1ef'
down_revision = 'fa869546b8ca'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
try:
is_sqlite = op.get_context().dialect.name == 'sqlite'
except Exception:
is_sqlite = False
if is_sqlite:
op.get_context().connection.execute('PRAGMA foreign_keys=ON;')
utcnow_server_default = "(datetime('now', 'utc'))"
else:
utcnow_server_default = "timezone('utc', current_timestamp)"
def sqlite_column_reflect_listener(inspector, table, column_info):
"""Adds parenthesis around SQLite datetime defaults for utcnow."""
if column_info['default'] == "datetime('now', 'utc')":
column_info['default'] = utcnow_server_default
sqlite_reflect_kwargs = {
'listeners': [('column_reflect', sqlite_column_reflect_listener)]
}
# ===========================================================================
# HOWTO: Handle alter statements in SQLite
#
# def upgrade():
# if is_sqlite:
# with op.batch_alter_table('table_name', reflect_kwargs=sqlite_reflect_kwargs) as batch_op:
# batch_op.alter_column('column_name', type_=sa.Unicode(), server_default='', nullable=False)
# else:
# op.alter_column('table_name', 'column_name', type_=sa.Unicode(), server_default='', nullable=False)
#
# ===========================================================================
def upgrade():
op.add_column('indie_developer', sa.Column('agreed_coc', sa.Boolean(), server_default='False', nullable=False))
op.add_column('indie_developer', sa.Column('agreed_data_policy', sa.Boolean(), server_default='False', nullable=False))
def downgrade():
op.drop_column('indie_developer', 'agreed_data_policy')
op.drop_column('indie_developer', 'agreed_coc')
|
Include the database migration this time"""Add Code of Conduct and Data Policy agreement columns
Revision ID: 8888a0f4c1ef
Revises: fa869546b8ca
Create Date: 2019-09-01 01:17:02.604266
"""
# revision identifiers, used by Alembic.
revision = '8888a0f4c1ef'
down_revision = 'fa869546b8ca'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
try:
is_sqlite = op.get_context().dialect.name == 'sqlite'
except Exception:
is_sqlite = False
if is_sqlite:
op.get_context().connection.execute('PRAGMA foreign_keys=ON;')
utcnow_server_default = "(datetime('now', 'utc'))"
else:
utcnow_server_default = "timezone('utc', current_timestamp)"
def sqlite_column_reflect_listener(inspector, table, column_info):
"""Adds parenthesis around SQLite datetime defaults for utcnow."""
if column_info['default'] == "datetime('now', 'utc')":
column_info['default'] = utcnow_server_default
sqlite_reflect_kwargs = {
'listeners': [('column_reflect', sqlite_column_reflect_listener)]
}
# ===========================================================================
# HOWTO: Handle alter statements in SQLite
#
# def upgrade():
# if is_sqlite:
# with op.batch_alter_table('table_name', reflect_kwargs=sqlite_reflect_kwargs) as batch_op:
# batch_op.alter_column('column_name', type_=sa.Unicode(), server_default='', nullable=False)
# else:
# op.alter_column('table_name', 'column_name', type_=sa.Unicode(), server_default='', nullable=False)
#
# ===========================================================================
def upgrade():
op.add_column('indie_developer', sa.Column('agreed_coc', sa.Boolean(), server_default='False', nullable=False))
op.add_column('indie_developer', sa.Column('agreed_data_policy', sa.Boolean(), server_default='False', nullable=False))
def downgrade():
op.drop_column('indie_developer', 'agreed_data_policy')
op.drop_column('indie_developer', 'agreed_coc')
|
<commit_before><commit_msg>Include the database migration this time<commit_after>"""Add Code of Conduct and Data Policy agreement columns
Revision ID: 8888a0f4c1ef
Revises: fa869546b8ca
Create Date: 2019-09-01 01:17:02.604266
"""
# revision identifiers, used by Alembic.
revision = '8888a0f4c1ef'
down_revision = 'fa869546b8ca'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
try:
is_sqlite = op.get_context().dialect.name == 'sqlite'
except Exception:
is_sqlite = False
if is_sqlite:
op.get_context().connection.execute('PRAGMA foreign_keys=ON;')
utcnow_server_default = "(datetime('now', 'utc'))"
else:
utcnow_server_default = "timezone('utc', current_timestamp)"
def sqlite_column_reflect_listener(inspector, table, column_info):
"""Adds parenthesis around SQLite datetime defaults for utcnow."""
if column_info['default'] == "datetime('now', 'utc')":
column_info['default'] = utcnow_server_default
sqlite_reflect_kwargs = {
'listeners': [('column_reflect', sqlite_column_reflect_listener)]
}
# ===========================================================================
# HOWTO: Handle alter statements in SQLite
#
# def upgrade():
# if is_sqlite:
# with op.batch_alter_table('table_name', reflect_kwargs=sqlite_reflect_kwargs) as batch_op:
# batch_op.alter_column('column_name', type_=sa.Unicode(), server_default='', nullable=False)
# else:
# op.alter_column('table_name', 'column_name', type_=sa.Unicode(), server_default='', nullable=False)
#
# ===========================================================================
def upgrade():
op.add_column('indie_developer', sa.Column('agreed_coc', sa.Boolean(), server_default='False', nullable=False))
op.add_column('indie_developer', sa.Column('agreed_data_policy', sa.Boolean(), server_default='False', nullable=False))
def downgrade():
op.drop_column('indie_developer', 'agreed_data_policy')
op.drop_column('indie_developer', 'agreed_coc')
|
|
6863a9526af42f2c7615abe22f3a6eb40b759da2
|
sorting/quick_sort.py
|
sorting/quick_sort.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Quick Sort. It is an application of divide and conquer strategy.
Best, Average: O(nlogn), Worst: O(n^2).
'''
def partion(array, left, right, pivot, order):
p = left
pv = array[pivot]
array[right], array[pivot] = array[pivot], array[right]
if order == 'asc':
for i in xrange(left, right):
if array[i] <= pv:
if i != p:
array[i], array[p] = array[p], array[i]
p += 1
else:
for i in xrange(left, right):
if array[i] >= pv:
if i != p:
array[i], array[p] = array[p], array[i]
p += 1
array[p], array[right] = array[right], array[p]
return p
def quick_sort(array, left, right, order):
if left >= right:
return
# here we always use the middle index between left and right as the pivot,
# it is not always the best one of course.
pivot = left + (right - left) / 2
p = partion(array, left, right, pivot, order)
quick_sort(array, left, p - 1, order)
quick_sort(array, p + 1, right, order)
return array
def sort(array, order='asc'):
'''
In-place sort array use quick sort algorithm in ascending or descending
order.
Return the sorted array.
'''
if not array:
raise Exception('No element to sort.')
n = len(array)
return quick_sort(array, 0, n - 1, order)
if __name__ == '__main__':
import random
from argparse import ArgumentParser
parser = ArgumentParser(description='Sort array use Quick Sort algorithm.')
parser.add_argument('random', type=int, help='max random number count')
parser.add_argument('--order', type=str, default='asc', choices=['asc', 'desc'],
help='sort in ascending or descending.')
args = parser.parse_args()
# to avoid 'ValueError("sample larger than population")' when call
# random.sample().
r_end = 1000
if args.random >= r_end:
r_end = args.random + 10
randoms = random.sample(xrange(1, r_end), args.random)
print 'before sort:\t', randoms
sort(randoms, args.order)
print 'after sort:\t', randoms
|
Implement the quick sort algorithm.
|
Implement the quick sort algorithm.
|
Python
|
mit
|
weichen2046/algorithm-study,weichen2046/algorithm-study
|
Implement the quick sort algorithm.
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Quick Sort. It is an application of divide and conquer strategy.
Best, Average: O(nlogn), Worst: O(n^2).
'''
def partion(array, left, right, pivot, order):
p = left
pv = array[pivot]
array[right], array[pivot] = array[pivot], array[right]
if order == 'asc':
for i in xrange(left, right):
if array[i] <= pv:
if i != p:
array[i], array[p] = array[p], array[i]
p += 1
else:
for i in xrange(left, right):
if array[i] >= pv:
if i != p:
array[i], array[p] = array[p], array[i]
p += 1
array[p], array[right] = array[right], array[p]
return p
def quick_sort(array, left, right, order):
if left >= right:
return
# here we always use the middle index between left and right as the pivot,
# it is not always the best one of course.
pivot = left + (right - left) / 2
p = partion(array, left, right, pivot, order)
quick_sort(array, left, p - 1, order)
quick_sort(array, p + 1, right, order)
return array
def sort(array, order='asc'):
'''
In-place sort array use quick sort algorithm in ascending or descending
order.
Return the sorted array.
'''
if not array:
raise Exception('No element to sort.')
n = len(array)
return quick_sort(array, 0, n - 1, order)
if __name__ == '__main__':
import random
from argparse import ArgumentParser
parser = ArgumentParser(description='Sort array use Quick Sort algorithm.')
parser.add_argument('random', type=int, help='max random number count')
parser.add_argument('--order', type=str, default='asc', choices=['asc', 'desc'],
help='sort in ascending or descending.')
args = parser.parse_args()
# to avoid 'ValueError("sample larger than population")' when call
# random.sample().
r_end = 1000
if args.random >= r_end:
r_end = args.random + 10
randoms = random.sample(xrange(1, r_end), args.random)
print 'before sort:\t', randoms
sort(randoms, args.order)
print 'after sort:\t', randoms
|
<commit_before><commit_msg>Implement the quick sort algorithm.<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Quick Sort. It is an application of divide and conquer strategy.
Best, Average: O(nlogn), Worst: O(n^2).
'''
def partion(array, left, right, pivot, order):
p = left
pv = array[pivot]
array[right], array[pivot] = array[pivot], array[right]
if order == 'asc':
for i in xrange(left, right):
if array[i] <= pv:
if i != p:
array[i], array[p] = array[p], array[i]
p += 1
else:
for i in xrange(left, right):
if array[i] >= pv:
if i != p:
array[i], array[p] = array[p], array[i]
p += 1
array[p], array[right] = array[right], array[p]
return p
def quick_sort(array, left, right, order):
if left >= right:
return
# here we always use the middle index between left and right as the pivot,
# it is not always the best one of course.
pivot = left + (right - left) / 2
p = partion(array, left, right, pivot, order)
quick_sort(array, left, p - 1, order)
quick_sort(array, p + 1, right, order)
return array
def sort(array, order='asc'):
'''
In-place sort array use quick sort algorithm in ascending or descending
order.
Return the sorted array.
'''
if not array:
raise Exception('No element to sort.')
n = len(array)
return quick_sort(array, 0, n - 1, order)
if __name__ == '__main__':
import random
from argparse import ArgumentParser
parser = ArgumentParser(description='Sort array use Quick Sort algorithm.')
parser.add_argument('random', type=int, help='max random number count')
parser.add_argument('--order', type=str, default='asc', choices=['asc', 'desc'],
help='sort in ascending or descending.')
args = parser.parse_args()
# to avoid 'ValueError("sample larger than population")' when call
# random.sample().
r_end = 1000
if args.random >= r_end:
r_end = args.random + 10
randoms = random.sample(xrange(1, r_end), args.random)
print 'before sort:\t', randoms
sort(randoms, args.order)
print 'after sort:\t', randoms
|
Implement the quick sort algorithm.#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Quick Sort. It is an application of divide and conquer strategy.
Best, Average: O(nlogn), Worst: O(n^2).
'''
def partion(array, left, right, pivot, order):
p = left
pv = array[pivot]
array[right], array[pivot] = array[pivot], array[right]
if order == 'asc':
for i in xrange(left, right):
if array[i] <= pv:
if i != p:
array[i], array[p] = array[p], array[i]
p += 1
else:
for i in xrange(left, right):
if array[i] >= pv:
if i != p:
array[i], array[p] = array[p], array[i]
p += 1
array[p], array[right] = array[right], array[p]
return p
def quick_sort(array, left, right, order):
if left >= right:
return
# here we always use the middle index between left and right as the pivot,
# it is not always the best one of course.
pivot = left + (right - left) / 2
p = partion(array, left, right, pivot, order)
quick_sort(array, left, p - 1, order)
quick_sort(array, p + 1, right, order)
return array
def sort(array, order='asc'):
'''
In-place sort array use quick sort algorithm in ascending or descending
order.
Return the sorted array.
'''
if not array:
raise Exception('No element to sort.')
n = len(array)
return quick_sort(array, 0, n - 1, order)
if __name__ == '__main__':
import random
from argparse import ArgumentParser
parser = ArgumentParser(description='Sort array use Quick Sort algorithm.')
parser.add_argument('random', type=int, help='max random number count')
parser.add_argument('--order', type=str, default='asc', choices=['asc', 'desc'],
help='sort in ascending or descending.')
args = parser.parse_args()
# to avoid 'ValueError("sample larger than population")' when call
# random.sample().
r_end = 1000
if args.random >= r_end:
r_end = args.random + 10
randoms = random.sample(xrange(1, r_end), args.random)
print 'before sort:\t', randoms
sort(randoms, args.order)
print 'after sort:\t', randoms
|
<commit_before><commit_msg>Implement the quick sort algorithm.<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Quick Sort. It is an application of divide and conquer strategy.
Best, Average: O(nlogn), Worst: O(n^2).
'''
def partion(array, left, right, pivot, order):
p = left
pv = array[pivot]
array[right], array[pivot] = array[pivot], array[right]
if order == 'asc':
for i in xrange(left, right):
if array[i] <= pv:
if i != p:
array[i], array[p] = array[p], array[i]
p += 1
else:
for i in xrange(left, right):
if array[i] >= pv:
if i != p:
array[i], array[p] = array[p], array[i]
p += 1
array[p], array[right] = array[right], array[p]
return p
def quick_sort(array, left, right, order):
if left >= right:
return
# here we always use the middle index between left and right as the pivot,
# it is not always the best one of course.
pivot = left + (right - left) / 2
p = partion(array, left, right, pivot, order)
quick_sort(array, left, p - 1, order)
quick_sort(array, p + 1, right, order)
return array
def sort(array, order='asc'):
'''
In-place sort array use quick sort algorithm in ascending or descending
order.
Return the sorted array.
'''
if not array:
raise Exception('No element to sort.')
n = len(array)
return quick_sort(array, 0, n - 1, order)
if __name__ == '__main__':
import random
from argparse import ArgumentParser
parser = ArgumentParser(description='Sort array use Quick Sort algorithm.')
parser.add_argument('random', type=int, help='max random number count')
parser.add_argument('--order', type=str, default='asc', choices=['asc', 'desc'],
help='sort in ascending or descending.')
args = parser.parse_args()
# to avoid 'ValueError("sample larger than population")' when call
# random.sample().
r_end = 1000
if args.random >= r_end:
r_end = args.random + 10
randoms = random.sample(xrange(1, r_end), args.random)
print 'before sort:\t', randoms
sort(randoms, args.order)
print 'after sort:\t', randoms
|
|
72b944740a474b3f476ed36fd5940cdac2895754
|
src/VipFormat/emitAllXml.py
|
src/VipFormat/emitAllXml.py
|
import precinct
import electoralDistrict
def emitProlog():
print '<?xml version="1.0"?>'
print '<VipObject xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" schemaVersion="5.0" xsi:noNamespaceSchemaLocation="http://votinginfoproject.github.com/vip-specification/vip_spec.xsd">'
def emitEpilog():
print '</VipObject>'
def emitAll():
emitProlog()
precinct.emitAllPrecincts()
electoralDistrict.emitAllElectoralDistricts()
emitEpilog()
if __name__=='__main__':
emitAll()
|
Add wrapper code to emit all of the XML things
|
Add wrapper code to emit all of the XML things
|
Python
|
apache-2.0
|
mapmydemocracy/civichackathon,mapmydemocracy/civichackathon
|
Add wrapper code to emit all of the XML things
|
import precinct
import electoralDistrict
def emitProlog():
print '<?xml version="1.0"?>'
print '<VipObject xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" schemaVersion="5.0" xsi:noNamespaceSchemaLocation="http://votinginfoproject.github.com/vip-specification/vip_spec.xsd">'
def emitEpilog():
print '</VipObject>'
def emitAll():
emitProlog()
precinct.emitAllPrecincts()
electoralDistrict.emitAllElectoralDistricts()
emitEpilog()
if __name__=='__main__':
emitAll()
|
<commit_before><commit_msg>Add wrapper code to emit all of the XML things<commit_after>
|
import precinct
import electoralDistrict
def emitProlog():
print '<?xml version="1.0"?>'
print '<VipObject xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" schemaVersion="5.0" xsi:noNamespaceSchemaLocation="http://votinginfoproject.github.com/vip-specification/vip_spec.xsd">'
def emitEpilog():
print '</VipObject>'
def emitAll():
emitProlog()
precinct.emitAllPrecincts()
electoralDistrict.emitAllElectoralDistricts()
emitEpilog()
if __name__=='__main__':
emitAll()
|
Add wrapper code to emit all of the XML things
import precinct
import electoralDistrict
def emitProlog():
print '<?xml version="1.0"?>'
print '<VipObject xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" schemaVersion="5.0" xsi:noNamespaceSchemaLocation="http://votinginfoproject.github.com/vip-specification/vip_spec.xsd">'
def emitEpilog():
print '</VipObject>'
def emitAll():
emitProlog()
precinct.emitAllPrecincts()
electoralDistrict.emitAllElectoralDistricts()
emitEpilog()
if __name__=='__main__':
emitAll()
|
<commit_before><commit_msg>Add wrapper code to emit all of the XML things<commit_after>
import precinct
import electoralDistrict
def emitProlog():
print '<?xml version="1.0"?>'
print '<VipObject xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" schemaVersion="5.0" xsi:noNamespaceSchemaLocation="http://votinginfoproject.github.com/vip-specification/vip_spec.xsd">'
def emitEpilog():
print '</VipObject>'
def emitAll():
emitProlog()
precinct.emitAllPrecincts()
electoralDistrict.emitAllElectoralDistricts()
emitEpilog()
if __name__=='__main__':
emitAll()
|
|
229e5136b56ffa6488abd81647d5811a5371ce8a
|
src/citation_vs_blocking.py
|
src/citation_vs_blocking.py
|
#!/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 24 14:56:23 2016
@author: Xilin Sun <s.sn.giraffe AT gmail.com>
Get the number of prior art citations for each Motorola patent;
Get the number of blocking actions for each Motorola patent;
Find the relationship between these numbers;
"""
|
Add a script to find the relationship between numbers of prior art citations and numbers of blocking actions, and types of blocking actions
|
Add a script to find the relationship between numbers of prior art citations and numbers of blocking actions, and types of blocking actions
|
Python
|
bsd-2-clause
|
PatentBlocker/Motorola_Patent_Citations,PatentBlocker/Motorola_Patent_Citations
|
Add a script to find the relationship between numbers of prior art citations and numbers of blocking actions, and types of blocking actions
|
#!/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 24 14:56:23 2016
@author: Xilin Sun <s.sn.giraffe AT gmail.com>
Get the number of prior art citations for each Motorola patent;
Get the number of blocking actions for each Motorola patent;
Find the relationship between these numbers;
"""
|
<commit_before><commit_msg>Add a script to find the relationship between numbers of prior art citations and numbers of blocking actions, and types of blocking actions<commit_after>
|
#!/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 24 14:56:23 2016
@author: Xilin Sun <s.sn.giraffe AT gmail.com>
Get the number of prior art citations for each Motorola patent;
Get the number of blocking actions for each Motorola patent;
Find the relationship between these numbers;
"""
|
Add a script to find the relationship between numbers of prior art citations and numbers of blocking actions, and types of blocking actions#!/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 24 14:56:23 2016
@author: Xilin Sun <s.sn.giraffe AT gmail.com>
Get the number of prior art citations for each Motorola patent;
Get the number of blocking actions for each Motorola patent;
Find the relationship between these numbers;
"""
|
<commit_before><commit_msg>Add a script to find the relationship between numbers of prior art citations and numbers of blocking actions, and types of blocking actions<commit_after>#!/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 24 14:56:23 2016
@author: Xilin Sun <s.sn.giraffe AT gmail.com>
Get the number of prior art citations for each Motorola patent;
Get the number of blocking actions for each Motorola patent;
Find the relationship between these numbers;
"""
|
|
83f0aeb4c8bdea3da65c50b9545bacb9c1aec74b
|
models/secure_pass.py
|
models/secure_pass.py
|
from hashlib import sha512 # crypto-secure hashing algorithm
from pbkdf2 import PBKDF2 # key-stretching algorithm
from os import urandom # crypto-secure random number gen
ITERATIONS = 5000
SALT_LEN = 32
KEY_LEN = 64
class Password:
def __init__(self, password):
self.salt = urandom(SALT_LEN) # 256-bit salt
self.key = PBKDF2(
passphrase=password,
salt=self.salt,
iterations=ITERATIONS,
digestmodule=sha512
).read(KEY_LEN)
def get_hash(self):
return self.key
def get_salt(self):
return self.salt
@staticmethod
def check_pass(password, key, thesalt):
return PBKDF2(
passphrase=password,
salt=thesalt,
iterations=ITERATIONS,
digestmodule=sha512
).read(KEY_LEN) \
== \
key
|
Add file to securely hash passwords
|
Add file to securely hash passwords
|
Python
|
mit
|
davenportw15/SnailMail,davenportw15/SnailMail,davenportw15/SnailMail
|
Add file to securely hash passwords
|
from hashlib import sha512 # crypto-secure hashing algorithm
from pbkdf2 import PBKDF2 # key-stretching algorithm
from os import urandom # crypto-secure random number gen
ITERATIONS = 5000
SALT_LEN = 32
KEY_LEN = 64
class Password:
def __init__(self, password):
self.salt = urandom(SALT_LEN) # 256-bit salt
self.key = PBKDF2(
passphrase=password,
salt=self.salt,
iterations=ITERATIONS,
digestmodule=sha512
).read(KEY_LEN)
def get_hash(self):
return self.key
def get_salt(self):
return self.salt
@staticmethod
def check_pass(password, key, thesalt):
return PBKDF2(
passphrase=password,
salt=thesalt,
iterations=ITERATIONS,
digestmodule=sha512
).read(KEY_LEN) \
== \
key
|
<commit_before><commit_msg>Add file to securely hash passwords<commit_after>
|
from hashlib import sha512 # crypto-secure hashing algorithm
from pbkdf2 import PBKDF2 # key-stretching algorithm
from os import urandom # crypto-secure random number gen
ITERATIONS = 5000
SALT_LEN = 32
KEY_LEN = 64
class Password:
def __init__(self, password):
self.salt = urandom(SALT_LEN) # 256-bit salt
self.key = PBKDF2(
passphrase=password,
salt=self.salt,
iterations=ITERATIONS,
digestmodule=sha512
).read(KEY_LEN)
def get_hash(self):
return self.key
def get_salt(self):
return self.salt
@staticmethod
def check_pass(password, key, thesalt):
return PBKDF2(
passphrase=password,
salt=thesalt,
iterations=ITERATIONS,
digestmodule=sha512
).read(KEY_LEN) \
== \
key
|
Add file to securely hash passwords
from hashlib import sha512 # crypto-secure hashing algorithm
from pbkdf2 import PBKDF2 # key-stretching algorithm
from os import urandom # crypto-secure random number gen
ITERATIONS = 5000
SALT_LEN = 32
KEY_LEN = 64
class Password:
def __init__(self, password):
self.salt = urandom(SALT_LEN) # 256-bit salt
self.key = PBKDF2(
passphrase=password,
salt=self.salt,
iterations=ITERATIONS,
digestmodule=sha512
).read(KEY_LEN)
def get_hash(self):
return self.key
def get_salt(self):
return self.salt
@staticmethod
def check_pass(password, key, thesalt):
return PBKDF2(
passphrase=password,
salt=thesalt,
iterations=ITERATIONS,
digestmodule=sha512
).read(KEY_LEN) \
== \
key
|
<commit_before><commit_msg>Add file to securely hash passwords<commit_after>
from hashlib import sha512 # crypto-secure hashing algorithm
from pbkdf2 import PBKDF2 # key-stretching algorithm
from os import urandom # crypto-secure random number gen
ITERATIONS = 5000
SALT_LEN = 32
KEY_LEN = 64
class Password:
def __init__(self, password):
self.salt = urandom(SALT_LEN) # 256-bit salt
self.key = PBKDF2(
passphrase=password,
salt=self.salt,
iterations=ITERATIONS,
digestmodule=sha512
).read(KEY_LEN)
def get_hash(self):
return self.key
def get_salt(self):
return self.salt
@staticmethod
def check_pass(password, key, thesalt):
return PBKDF2(
passphrase=password,
salt=thesalt,
iterations=ITERATIONS,
digestmodule=sha512
).read(KEY_LEN) \
== \
key
|
|
f614502f0a67b754f5757935ffb0d86920998dcb
|
glamkit_collections/contrib/work_creator/migrations/0012_auto_20170502_2209.py
|
glamkit_collections/contrib/work_creator/migrations/0012_auto_20170502_2209.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gk_collections_work_creator', '0011_role_title_plural'),
]
operations = [
migrations.AlterModelOptions(
name='creatorbase',
options={'ordering': ('name_sort', 'slug', 'publishing_is_draft'), 'verbose_name': 'creator'},
),
migrations.AlterModelOptions(
name='workbase',
options={'ordering': ('slug', 'publishing_is_draft'), 'verbose_name': 'work'},
),
]
|
Add DB migration for more explicit ordering of work & creator models
|
Add DB migration for more explicit ordering of work & creator models
|
Python
|
mit
|
ic-labs/glamkit-collections,ic-labs/django-icekit,ic-labs/django-icekit,ic-labs/django-icekit,ic-labs/glamkit-collections,ic-labs/django-icekit
|
Add DB migration for more explicit ordering of work & creator models
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gk_collections_work_creator', '0011_role_title_plural'),
]
operations = [
migrations.AlterModelOptions(
name='creatorbase',
options={'ordering': ('name_sort', 'slug', 'publishing_is_draft'), 'verbose_name': 'creator'},
),
migrations.AlterModelOptions(
name='workbase',
options={'ordering': ('slug', 'publishing_is_draft'), 'verbose_name': 'work'},
),
]
|
<commit_before><commit_msg>Add DB migration for more explicit ordering of work & creator models<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gk_collections_work_creator', '0011_role_title_plural'),
]
operations = [
migrations.AlterModelOptions(
name='creatorbase',
options={'ordering': ('name_sort', 'slug', 'publishing_is_draft'), 'verbose_name': 'creator'},
),
migrations.AlterModelOptions(
name='workbase',
options={'ordering': ('slug', 'publishing_is_draft'), 'verbose_name': 'work'},
),
]
|
Add DB migration for more explicit ordering of work & creator models# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gk_collections_work_creator', '0011_role_title_plural'),
]
operations = [
migrations.AlterModelOptions(
name='creatorbase',
options={'ordering': ('name_sort', 'slug', 'publishing_is_draft'), 'verbose_name': 'creator'},
),
migrations.AlterModelOptions(
name='workbase',
options={'ordering': ('slug', 'publishing_is_draft'), 'verbose_name': 'work'},
),
]
|
<commit_before><commit_msg>Add DB migration for more explicit ordering of work & creator models<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gk_collections_work_creator', '0011_role_title_plural'),
]
operations = [
migrations.AlterModelOptions(
name='creatorbase',
options={'ordering': ('name_sort', 'slug', 'publishing_is_draft'), 'verbose_name': 'creator'},
),
migrations.AlterModelOptions(
name='workbase',
options={'ordering': ('slug', 'publishing_is_draft'), 'verbose_name': 'work'},
),
]
|
|
554f3437e06c3bd295a38b3b87c93bf6c0af6f52
|
test/jpypetest/test_hash.py
|
test/jpypetest/test_hash.py
|
# *****************************************************************************
# Copyright 2019 Karl Einar Nelson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# *****************************************************************************
import jpype
import common
import sys
class HashTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
def testHashString(self):
self.assertIsNotNone(hash(jpype.java.lang.String("upside down")))
self.assertIsNotNone(hash(jpype.JString("upside down")))
def testHashArray(self):
self.assertIsNotNone(hash(jpype.JArray(jpype.JInt)([1,2,3])))
def testHashObject(self):
self.assertIsNotNone(hash(jpype.java.lang.Object()))
def testHashBoolean(self):
self.assertIsNotNone(hash(jpype.java.lang.Boolean(True)))
def testHashByte(self):
self.assertIsNotNone(hash(jpype.java.lang.Byte(5)))
def testHashChar(self):
self.assertIsNotNone(hash(jpype.java.lang.Character("a")))
def testHashShort(self):
self.assertIsNotNone(hash(jpype.java.lang.Short(1)))
def testHashLong(self):
self.assertIsNotNone(hash(jpype.java.lang.Long(55)))
def testHashInteger(self):
self.assertIsNotNone(hash(jpype.java.lang.Integer(123)))
def testHashFloat(self):
self.assertIsNotNone(hash(jpype.java.lang.Float(3.141592)))
def testHashDouble(self):
self.assertIsNotNone(hash(jpype.java.lang.Double(6.62607004e-34)))
|
Test for problems in hash.
|
Test for problems in hash.
|
Python
|
apache-2.0
|
originell/jpype,originell/jpype,originell/jpype,originell/jpype,originell/jpype
|
Test for problems in hash.
|
# *****************************************************************************
# Copyright 2019 Karl Einar Nelson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# *****************************************************************************
import jpype
import common
import sys
class HashTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
def testHashString(self):
self.assertIsNotNone(hash(jpype.java.lang.String("upside down")))
self.assertIsNotNone(hash(jpype.JString("upside down")))
def testHashArray(self):
self.assertIsNotNone(hash(jpype.JArray(jpype.JInt)([1,2,3])))
def testHashObject(self):
self.assertIsNotNone(hash(jpype.java.lang.Object()))
def testHashBoolean(self):
self.assertIsNotNone(hash(jpype.java.lang.Boolean(True)))
def testHashByte(self):
self.assertIsNotNone(hash(jpype.java.lang.Byte(5)))
def testHashChar(self):
self.assertIsNotNone(hash(jpype.java.lang.Character("a")))
def testHashShort(self):
self.assertIsNotNone(hash(jpype.java.lang.Short(1)))
def testHashLong(self):
self.assertIsNotNone(hash(jpype.java.lang.Long(55)))
def testHashInteger(self):
self.assertIsNotNone(hash(jpype.java.lang.Integer(123)))
def testHashFloat(self):
self.assertIsNotNone(hash(jpype.java.lang.Float(3.141592)))
def testHashDouble(self):
self.assertIsNotNone(hash(jpype.java.lang.Double(6.62607004e-34)))
|
<commit_before><commit_msg>Test for problems in hash.<commit_after>
|
# *****************************************************************************
# Copyright 2019 Karl Einar Nelson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# *****************************************************************************
import jpype
import common
import sys
class HashTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
def testHashString(self):
self.assertIsNotNone(hash(jpype.java.lang.String("upside down")))
self.assertIsNotNone(hash(jpype.JString("upside down")))
def testHashArray(self):
self.assertIsNotNone(hash(jpype.JArray(jpype.JInt)([1,2,3])))
def testHashObject(self):
self.assertIsNotNone(hash(jpype.java.lang.Object()))
def testHashBoolean(self):
self.assertIsNotNone(hash(jpype.java.lang.Boolean(True)))
def testHashByte(self):
self.assertIsNotNone(hash(jpype.java.lang.Byte(5)))
def testHashChar(self):
self.assertIsNotNone(hash(jpype.java.lang.Character("a")))
def testHashShort(self):
self.assertIsNotNone(hash(jpype.java.lang.Short(1)))
def testHashLong(self):
self.assertIsNotNone(hash(jpype.java.lang.Long(55)))
def testHashInteger(self):
self.assertIsNotNone(hash(jpype.java.lang.Integer(123)))
def testHashFloat(self):
self.assertIsNotNone(hash(jpype.java.lang.Float(3.141592)))
def testHashDouble(self):
self.assertIsNotNone(hash(jpype.java.lang.Double(6.62607004e-34)))
|
Test for problems in hash.# *****************************************************************************
# Copyright 2019 Karl Einar Nelson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# *****************************************************************************
import jpype
import common
import sys
class HashTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
def testHashString(self):
self.assertIsNotNone(hash(jpype.java.lang.String("upside down")))
self.assertIsNotNone(hash(jpype.JString("upside down")))
def testHashArray(self):
self.assertIsNotNone(hash(jpype.JArray(jpype.JInt)([1,2,3])))
def testHashObject(self):
self.assertIsNotNone(hash(jpype.java.lang.Object()))
def testHashBoolean(self):
self.assertIsNotNone(hash(jpype.java.lang.Boolean(True)))
def testHashByte(self):
self.assertIsNotNone(hash(jpype.java.lang.Byte(5)))
def testHashChar(self):
self.assertIsNotNone(hash(jpype.java.lang.Character("a")))
def testHashShort(self):
self.assertIsNotNone(hash(jpype.java.lang.Short(1)))
def testHashLong(self):
self.assertIsNotNone(hash(jpype.java.lang.Long(55)))
def testHashInteger(self):
self.assertIsNotNone(hash(jpype.java.lang.Integer(123)))
def testHashFloat(self):
self.assertIsNotNone(hash(jpype.java.lang.Float(3.141592)))
def testHashDouble(self):
self.assertIsNotNone(hash(jpype.java.lang.Double(6.62607004e-34)))
|
<commit_before><commit_msg>Test for problems in hash.<commit_after># *****************************************************************************
# Copyright 2019 Karl Einar Nelson
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# *****************************************************************************
import jpype
import common
import sys
class HashTestCase(common.JPypeTestCase):
def setUp(self):
common.JPypeTestCase.setUp(self)
def testHashString(self):
self.assertIsNotNone(hash(jpype.java.lang.String("upside down")))
self.assertIsNotNone(hash(jpype.JString("upside down")))
def testHashArray(self):
self.assertIsNotNone(hash(jpype.JArray(jpype.JInt)([1,2,3])))
def testHashObject(self):
self.assertIsNotNone(hash(jpype.java.lang.Object()))
def testHashBoolean(self):
self.assertIsNotNone(hash(jpype.java.lang.Boolean(True)))
def testHashByte(self):
self.assertIsNotNone(hash(jpype.java.lang.Byte(5)))
def testHashChar(self):
self.assertIsNotNone(hash(jpype.java.lang.Character("a")))
def testHashShort(self):
self.assertIsNotNone(hash(jpype.java.lang.Short(1)))
def testHashLong(self):
self.assertIsNotNone(hash(jpype.java.lang.Long(55)))
def testHashInteger(self):
self.assertIsNotNone(hash(jpype.java.lang.Integer(123)))
def testHashFloat(self):
self.assertIsNotNone(hash(jpype.java.lang.Float(3.141592)))
def testHashDouble(self):
self.assertIsNotNone(hash(jpype.java.lang.Double(6.62607004e-34)))
|
|
667ad3e48fb1107b34bff54231bf653b621b01cb
|
common/migrations/0002_auto_20150717_2202.py
|
common/migrations/0002_auto_20150717_2202.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('common', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='news',
name='content_markup_type',
field=models.CharField(default=b'restructuredtext', max_length=30, editable=False, choices=[(b'', b'--'), (b'html', b'html'), (b'plain', b'plain'), (b'restructuredtext', b'restructuredtext')]),
preserve_default=True,
),
]
|
Add a migration for common
|
Add a migration for common
|
Python
|
agpl-3.0
|
Turupawn/website,lutris/website,Turupawn/website,Turupawn/website,lutris/website,lutris/website,Turupawn/website,lutris/website
|
Add a migration for common
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('common', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='news',
name='content_markup_type',
field=models.CharField(default=b'restructuredtext', max_length=30, editable=False, choices=[(b'', b'--'), (b'html', b'html'), (b'plain', b'plain'), (b'restructuredtext', b'restructuredtext')]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add a migration for common<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('common', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='news',
name='content_markup_type',
field=models.CharField(default=b'restructuredtext', max_length=30, editable=False, choices=[(b'', b'--'), (b'html', b'html'), (b'plain', b'plain'), (b'restructuredtext', b'restructuredtext')]),
preserve_default=True,
),
]
|
Add a migration for common# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('common', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='news',
name='content_markup_type',
field=models.CharField(default=b'restructuredtext', max_length=30, editable=False, choices=[(b'', b'--'), (b'html', b'html'), (b'plain', b'plain'), (b'restructuredtext', b'restructuredtext')]),
preserve_default=True,
),
]
|
<commit_before><commit_msg>Add a migration for common<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('common', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='news',
name='content_markup_type',
field=models.CharField(default=b'restructuredtext', max_length=30, editable=False, choices=[(b'', b'--'), (b'html', b'html'), (b'plain', b'plain'), (b'restructuredtext', b'restructuredtext')]),
preserve_default=True,
),
]
|
|
a854abd2d9580e1c31e659cb4d0bd182f96c5b81
|
CodeFights/bishopAndPawn.py
|
CodeFights/bishopAndPawn.py
|
#!/usr/local/bin/python
# Code Fights Bishop and Pawn Problem
def bishopAndPawn(bishop, pawn):
epsilon = 0.001
dist = ((ord(bishop[0]) - ord(pawn[0]))**2 +
(int(bishop[1]) - int(pawn[1]))**2)**(0.5)
dist = dist / (2**0.5) # distance is a multiple of sqrt(2)
return (round(dist) - dist < epsilon and bishop[0] != pawn[0] and
bishop[1] != pawn[1])
def main():
tests = [
["a1", "c3", True],
["h1", "h3", False],
["a5", "c3", True],
["g1", "f3", False],
["e7", "d6", True],
["e7", "a3", True],
["e3", "a7", True],
["a1", "h8", True],
["a1", "h7", False],
["h1", "a8", True]
]
for t in tests:
res = bishopAndPawn(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: bishopAndPawn({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: bishopAndPawn({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights bishop and pawn problem
|
Solve Code Fights bishop and pawn problem
|
Python
|
mit
|
HKuz/Test_Code
|
Solve Code Fights bishop and pawn problem
|
#!/usr/local/bin/python
# Code Fights Bishop and Pawn Problem
def bishopAndPawn(bishop, pawn):
epsilon = 0.001
dist = ((ord(bishop[0]) - ord(pawn[0]))**2 +
(int(bishop[1]) - int(pawn[1]))**2)**(0.5)
dist = dist / (2**0.5) # distance is a multiple of sqrt(2)
return (round(dist) - dist < epsilon and bishop[0] != pawn[0] and
bishop[1] != pawn[1])
def main():
tests = [
["a1", "c3", True],
["h1", "h3", False],
["a5", "c3", True],
["g1", "f3", False],
["e7", "d6", True],
["e7", "a3", True],
["e3", "a7", True],
["a1", "h8", True],
["a1", "h7", False],
["h1", "a8", True]
]
for t in tests:
res = bishopAndPawn(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: bishopAndPawn({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: bishopAndPawn({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights bishop and pawn problem<commit_after>
|
#!/usr/local/bin/python
# Code Fights Bishop and Pawn Problem
def bishopAndPawn(bishop, pawn):
epsilon = 0.001
dist = ((ord(bishop[0]) - ord(pawn[0]))**2 +
(int(bishop[1]) - int(pawn[1]))**2)**(0.5)
dist = dist / (2**0.5) # distance is a multiple of sqrt(2)
return (round(dist) - dist < epsilon and bishop[0] != pawn[0] and
bishop[1] != pawn[1])
def main():
tests = [
["a1", "c3", True],
["h1", "h3", False],
["a5", "c3", True],
["g1", "f3", False],
["e7", "d6", True],
["e7", "a3", True],
["e3", "a7", True],
["a1", "h8", True],
["a1", "h7", False],
["h1", "a8", True]
]
for t in tests:
res = bishopAndPawn(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: bishopAndPawn({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: bishopAndPawn({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
Solve Code Fights bishop and pawn problem#!/usr/local/bin/python
# Code Fights Bishop and Pawn Problem
def bishopAndPawn(bishop, pawn):
epsilon = 0.001
dist = ((ord(bishop[0]) - ord(pawn[0]))**2 +
(int(bishop[1]) - int(pawn[1]))**2)**(0.5)
dist = dist / (2**0.5) # distance is a multiple of sqrt(2)
return (round(dist) - dist < epsilon and bishop[0] != pawn[0] and
bishop[1] != pawn[1])
def main():
tests = [
["a1", "c3", True],
["h1", "h3", False],
["a5", "c3", True],
["g1", "f3", False],
["e7", "d6", True],
["e7", "a3", True],
["e3", "a7", True],
["a1", "h8", True],
["a1", "h7", False],
["h1", "a8", True]
]
for t in tests:
res = bishopAndPawn(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: bishopAndPawn({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: bishopAndPawn({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Solve Code Fights bishop and pawn problem<commit_after>#!/usr/local/bin/python
# Code Fights Bishop and Pawn Problem
def bishopAndPawn(bishop, pawn):
epsilon = 0.001
dist = ((ord(bishop[0]) - ord(pawn[0]))**2 +
(int(bishop[1]) - int(pawn[1]))**2)**(0.5)
dist = dist / (2**0.5) # distance is a multiple of sqrt(2)
return (round(dist) - dist < epsilon and bishop[0] != pawn[0] and
bishop[1] != pawn[1])
def main():
tests = [
["a1", "c3", True],
["h1", "h3", False],
["a5", "c3", True],
["g1", "f3", False],
["e7", "d6", True],
["e7", "a3", True],
["e3", "a7", True],
["a1", "h8", True],
["a1", "h7", False],
["h1", "a8", True]
]
for t in tests:
res = bishopAndPawn(t[0], t[1])
ans = t[2]
if ans == res:
print("PASSED: bishopAndPawn({}, {}) returned {}"
.format(t[0], t[1], res))
else:
print(("FAILED: bishopAndPawn({}, {}) returned {},"
"answer: {}").format(t[0], t[1], res, ans))
if __name__ == '__main__':
main()
|
|
314de418ff9e920238c5dcb274a6b009fab728d0
|
icp.py
|
icp.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 25 18:07:42 2017
@author: vostok
"""
import numpy as np
def icp_metric(image_stars, catalog_stars, vectors=False):
closest_distances = np.empty_like(image_stars['X'])
closest_indices = np.empty_like(image_stars['X'])
catalog_X, catalog_Y = catalog_stars
for i, star in enumerate(image_stars):
dx = star['X'] - catalog_X
dy = star['Y'] - catalog_Y
r2 = dx**2 + dy**2
closest_distances[i] = r2.min()
closest_indices[i] = r2.argmin()
if not vectors:
return closest_distances.sum()
return closest_distances.sum(), \
closest_distances, closest_indices.astype('int')
|
Add function for ICP metrics (and plotting)
|
Add function for ICP metrics (and plotting)
|
Python
|
mit
|
lkangas/python-tycho2
|
Add function for ICP metrics (and plotting)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 25 18:07:42 2017
@author: vostok
"""
import numpy as np
def icp_metric(image_stars, catalog_stars, vectors=False):
closest_distances = np.empty_like(image_stars['X'])
closest_indices = np.empty_like(image_stars['X'])
catalog_X, catalog_Y = catalog_stars
for i, star in enumerate(image_stars):
dx = star['X'] - catalog_X
dy = star['Y'] - catalog_Y
r2 = dx**2 + dy**2
closest_distances[i] = r2.min()
closest_indices[i] = r2.argmin()
if not vectors:
return closest_distances.sum()
return closest_distances.sum(), \
closest_distances, closest_indices.astype('int')
|
<commit_before><commit_msg>Add function for ICP metrics (and plotting)<commit_after>
|
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 25 18:07:42 2017
@author: vostok
"""
import numpy as np
def icp_metric(image_stars, catalog_stars, vectors=False):
closest_distances = np.empty_like(image_stars['X'])
closest_indices = np.empty_like(image_stars['X'])
catalog_X, catalog_Y = catalog_stars
for i, star in enumerate(image_stars):
dx = star['X'] - catalog_X
dy = star['Y'] - catalog_Y
r2 = dx**2 + dy**2
closest_distances[i] = r2.min()
closest_indices[i] = r2.argmin()
if not vectors:
return closest_distances.sum()
return closest_distances.sum(), \
closest_distances, closest_indices.astype('int')
|
Add function for ICP metrics (and plotting)# -*- coding: utf-8 -*-
"""
Created on Mon Dec 25 18:07:42 2017
@author: vostok
"""
import numpy as np
def icp_metric(image_stars, catalog_stars, vectors=False):
closest_distances = np.empty_like(image_stars['X'])
closest_indices = np.empty_like(image_stars['X'])
catalog_X, catalog_Y = catalog_stars
for i, star in enumerate(image_stars):
dx = star['X'] - catalog_X
dy = star['Y'] - catalog_Y
r2 = dx**2 + dy**2
closest_distances[i] = r2.min()
closest_indices[i] = r2.argmin()
if not vectors:
return closest_distances.sum()
return closest_distances.sum(), \
closest_distances, closest_indices.astype('int')
|
<commit_before><commit_msg>Add function for ICP metrics (and plotting)<commit_after># -*- coding: utf-8 -*-
"""
Created on Mon Dec 25 18:07:42 2017
@author: vostok
"""
import numpy as np
def icp_metric(image_stars, catalog_stars, vectors=False):
closest_distances = np.empty_like(image_stars['X'])
closest_indices = np.empty_like(image_stars['X'])
catalog_X, catalog_Y = catalog_stars
for i, star in enumerate(image_stars):
dx = star['X'] - catalog_X
dy = star['Y'] - catalog_Y
r2 = dx**2 + dy**2
closest_distances[i] = r2.min()
closest_indices[i] = r2.argmin()
if not vectors:
return closest_distances.sum()
return closest_distances.sum(), \
closest_distances, closest_indices.astype('int')
|
|
36c0ff5f5139b5c0277f23d535aa28d597c991a5
|
depot/tests/test_rentals.py
|
depot/tests/test_rentals.py
|
from django.contrib.auth.models import User
from depot.models import Depot, Organization
from rental.models import Rental
from verleihtool.test import ClientTestCase
from datetime import datetime
class RentalsTestCase(ClientTestCase):
def create_rental(self, depot, firstname, lastname, state):
return Rental.objects.create(
depot=depot,
firstname=firstname,
lastname=lastname,
start_date=datetime(2017, 3, 25, 0, 0),
return_date=datetime(2017, 3, 27, 0, 0),
state=state
)
def setUp(self):
super().setUp()
organization = Organization.objects.create(
name='My organization'
)
self.depot1 = Depot.objects.create(
name='My 1st depot',
organization=organization
)
self.depot2 = Depot.objects.create(
name='My 2nd depot',
organization=organization
)
def test_depot_rentals_as_guest(self):
response = self.as_guest.get('/depots/%d/rentals/' % self.depot1.id)
self.assertEqual(response.status_code, 403)
def test_depot_rentals_as_depot_manager(self):
self.depot1.manager_users.add(self.user)
response = self.as_user.get('/depots/%d/rentals/' % self.depot1.id)
self.assertSuccess(response, 'depot/rentals.html')
def test_depot_rentals_show_rental_list(self):
self.create_rental(self.depot1, 'Dick', 'Sendors', Rental.STATE_PENDING),
self.create_rental(self.depot1, 'Greg', 'Johnson', Rental.STATE_APPROVED),
self.create_rental(self.depot2, 'Doris', 'Brier', Rental.STATE_REVOKED)
self.depot1.manager_users.add(self.user)
response = self.as_user.get('/depots/%d/rentals/' % self.depot1.id)
self.assertSuccess(response, 'depot/rentals.html')
self.assertContains(response, 'Dick Sendors')
self.assertContains(response, 'Greg Johnson')
self.assertNotContains(response, 'Doris Brier')
self.assertContains(response, 'March 25, 2017')
self.assertContains(response, 'March 27, 2017')
self.assertContains(response, 'pending')
self.assertContains(response, 'approved')
self.assertNotContains(response, 'revoked')
self.assertNotContains(response, 'declined')
self.assertNotContains(response, 'returned')
|
Test for the rentals list
|
Test for the rentals list
|
Python
|
agpl-3.0
|
verleihtool/verleihtool,verleihtool/verleihtool,verleihtool/verleihtool,verleihtool/verleihtool
|
Test for the rentals list
|
from django.contrib.auth.models import User
from depot.models import Depot, Organization
from rental.models import Rental
from verleihtool.test import ClientTestCase
from datetime import datetime
class RentalsTestCase(ClientTestCase):
def create_rental(self, depot, firstname, lastname, state):
return Rental.objects.create(
depot=depot,
firstname=firstname,
lastname=lastname,
start_date=datetime(2017, 3, 25, 0, 0),
return_date=datetime(2017, 3, 27, 0, 0),
state=state
)
def setUp(self):
super().setUp()
organization = Organization.objects.create(
name='My organization'
)
self.depot1 = Depot.objects.create(
name='My 1st depot',
organization=organization
)
self.depot2 = Depot.objects.create(
name='My 2nd depot',
organization=organization
)
def test_depot_rentals_as_guest(self):
response = self.as_guest.get('/depots/%d/rentals/' % self.depot1.id)
self.assertEqual(response.status_code, 403)
def test_depot_rentals_as_depot_manager(self):
self.depot1.manager_users.add(self.user)
response = self.as_user.get('/depots/%d/rentals/' % self.depot1.id)
self.assertSuccess(response, 'depot/rentals.html')
def test_depot_rentals_show_rental_list(self):
self.create_rental(self.depot1, 'Dick', 'Sendors', Rental.STATE_PENDING),
self.create_rental(self.depot1, 'Greg', 'Johnson', Rental.STATE_APPROVED),
self.create_rental(self.depot2, 'Doris', 'Brier', Rental.STATE_REVOKED)
self.depot1.manager_users.add(self.user)
response = self.as_user.get('/depots/%d/rentals/' % self.depot1.id)
self.assertSuccess(response, 'depot/rentals.html')
self.assertContains(response, 'Dick Sendors')
self.assertContains(response, 'Greg Johnson')
self.assertNotContains(response, 'Doris Brier')
self.assertContains(response, 'March 25, 2017')
self.assertContains(response, 'March 27, 2017')
self.assertContains(response, 'pending')
self.assertContains(response, 'approved')
self.assertNotContains(response, 'revoked')
self.assertNotContains(response, 'declined')
self.assertNotContains(response, 'returned')
|
<commit_before><commit_msg>Test for the rentals list<commit_after>
|
from django.contrib.auth.models import User
from depot.models import Depot, Organization
from rental.models import Rental
from verleihtool.test import ClientTestCase
from datetime import datetime
class RentalsTestCase(ClientTestCase):
def create_rental(self, depot, firstname, lastname, state):
return Rental.objects.create(
depot=depot,
firstname=firstname,
lastname=lastname,
start_date=datetime(2017, 3, 25, 0, 0),
return_date=datetime(2017, 3, 27, 0, 0),
state=state
)
def setUp(self):
super().setUp()
organization = Organization.objects.create(
name='My organization'
)
self.depot1 = Depot.objects.create(
name='My 1st depot',
organization=organization
)
self.depot2 = Depot.objects.create(
name='My 2nd depot',
organization=organization
)
def test_depot_rentals_as_guest(self):
response = self.as_guest.get('/depots/%d/rentals/' % self.depot1.id)
self.assertEqual(response.status_code, 403)
def test_depot_rentals_as_depot_manager(self):
self.depot1.manager_users.add(self.user)
response = self.as_user.get('/depots/%d/rentals/' % self.depot1.id)
self.assertSuccess(response, 'depot/rentals.html')
def test_depot_rentals_show_rental_list(self):
self.create_rental(self.depot1, 'Dick', 'Sendors', Rental.STATE_PENDING),
self.create_rental(self.depot1, 'Greg', 'Johnson', Rental.STATE_APPROVED),
self.create_rental(self.depot2, 'Doris', 'Brier', Rental.STATE_REVOKED)
self.depot1.manager_users.add(self.user)
response = self.as_user.get('/depots/%d/rentals/' % self.depot1.id)
self.assertSuccess(response, 'depot/rentals.html')
self.assertContains(response, 'Dick Sendors')
self.assertContains(response, 'Greg Johnson')
self.assertNotContains(response, 'Doris Brier')
self.assertContains(response, 'March 25, 2017')
self.assertContains(response, 'March 27, 2017')
self.assertContains(response, 'pending')
self.assertContains(response, 'approved')
self.assertNotContains(response, 'revoked')
self.assertNotContains(response, 'declined')
self.assertNotContains(response, 'returned')
|
Test for the rentals listfrom django.contrib.auth.models import User
from depot.models import Depot, Organization
from rental.models import Rental
from verleihtool.test import ClientTestCase
from datetime import datetime
class RentalsTestCase(ClientTestCase):
def create_rental(self, depot, firstname, lastname, state):
return Rental.objects.create(
depot=depot,
firstname=firstname,
lastname=lastname,
start_date=datetime(2017, 3, 25, 0, 0),
return_date=datetime(2017, 3, 27, 0, 0),
state=state
)
def setUp(self):
super().setUp()
organization = Organization.objects.create(
name='My organization'
)
self.depot1 = Depot.objects.create(
name='My 1st depot',
organization=organization
)
self.depot2 = Depot.objects.create(
name='My 2nd depot',
organization=organization
)
def test_depot_rentals_as_guest(self):
response = self.as_guest.get('/depots/%d/rentals/' % self.depot1.id)
self.assertEqual(response.status_code, 403)
def test_depot_rentals_as_depot_manager(self):
self.depot1.manager_users.add(self.user)
response = self.as_user.get('/depots/%d/rentals/' % self.depot1.id)
self.assertSuccess(response, 'depot/rentals.html')
def test_depot_rentals_show_rental_list(self):
self.create_rental(self.depot1, 'Dick', 'Sendors', Rental.STATE_PENDING),
self.create_rental(self.depot1, 'Greg', 'Johnson', Rental.STATE_APPROVED),
self.create_rental(self.depot2, 'Doris', 'Brier', Rental.STATE_REVOKED)
self.depot1.manager_users.add(self.user)
response = self.as_user.get('/depots/%d/rentals/' % self.depot1.id)
self.assertSuccess(response, 'depot/rentals.html')
self.assertContains(response, 'Dick Sendors')
self.assertContains(response, 'Greg Johnson')
self.assertNotContains(response, 'Doris Brier')
self.assertContains(response, 'March 25, 2017')
self.assertContains(response, 'March 27, 2017')
self.assertContains(response, 'pending')
self.assertContains(response, 'approved')
self.assertNotContains(response, 'revoked')
self.assertNotContains(response, 'declined')
self.assertNotContains(response, 'returned')
|
<commit_before><commit_msg>Test for the rentals list<commit_after>from django.contrib.auth.models import User
from depot.models import Depot, Organization
from rental.models import Rental
from verleihtool.test import ClientTestCase
from datetime import datetime
class RentalsTestCase(ClientTestCase):
def create_rental(self, depot, firstname, lastname, state):
return Rental.objects.create(
depot=depot,
firstname=firstname,
lastname=lastname,
start_date=datetime(2017, 3, 25, 0, 0),
return_date=datetime(2017, 3, 27, 0, 0),
state=state
)
def setUp(self):
super().setUp()
organization = Organization.objects.create(
name='My organization'
)
self.depot1 = Depot.objects.create(
name='My 1st depot',
organization=organization
)
self.depot2 = Depot.objects.create(
name='My 2nd depot',
organization=organization
)
def test_depot_rentals_as_guest(self):
response = self.as_guest.get('/depots/%d/rentals/' % self.depot1.id)
self.assertEqual(response.status_code, 403)
def test_depot_rentals_as_depot_manager(self):
self.depot1.manager_users.add(self.user)
response = self.as_user.get('/depots/%d/rentals/' % self.depot1.id)
self.assertSuccess(response, 'depot/rentals.html')
def test_depot_rentals_show_rental_list(self):
self.create_rental(self.depot1, 'Dick', 'Sendors', Rental.STATE_PENDING),
self.create_rental(self.depot1, 'Greg', 'Johnson', Rental.STATE_APPROVED),
self.create_rental(self.depot2, 'Doris', 'Brier', Rental.STATE_REVOKED)
self.depot1.manager_users.add(self.user)
response = self.as_user.get('/depots/%d/rentals/' % self.depot1.id)
self.assertSuccess(response, 'depot/rentals.html')
self.assertContains(response, 'Dick Sendors')
self.assertContains(response, 'Greg Johnson')
self.assertNotContains(response, 'Doris Brier')
self.assertContains(response, 'March 25, 2017')
self.assertContains(response, 'March 27, 2017')
self.assertContains(response, 'pending')
self.assertContains(response, 'approved')
self.assertNotContains(response, 'revoked')
self.assertNotContains(response, 'declined')
self.assertNotContains(response, 'returned')
|
|
9a4cd54a254089e2ffdffc97b40791bb96041660
|
zilencer/management/commands/add_remote_server.py
|
zilencer/management/commands/add_remote_server.py
|
from argparse import ArgumentParser
from typing import Any
from zerver.lib.management import ZulipBaseCommand
from zilencer.models import RemoteZulipServer
class Command(ZulipBaseCommand):
help = """Add a remote Zulip server for push notifications."""
def add_arguments(self, parser: ArgumentParser) -> None:
group = parser.add_argument_group("command-specific arguments")
group.add_argument('uuid', help="the user's `zulip_org_id`")
group.add_argument('key', help="the user's `zulip_org_key`")
group.add_argument('--hostname', '-n', required=True,
help="the hostname, for human identification")
group.add_argument('--email', '-e', required=True,
help="a contact email address")
def handle(self, uuid: str, key: str, hostname: str, email: str,
**options: Any) -> None:
RemoteZulipServer.objects.create(uuid=uuid,
api_key=key,
hostname=hostname,
contact_email=email)
|
Add a simple management command to create a RemoteZulipServer.
|
zilencer: Add a simple management command to create a RemoteZulipServer.
This saves us from having to go into a `manage.py shell` to do this,
and adds a bit more structure like the usage message.
|
Python
|
apache-2.0
|
rishig/zulip,showell/zulip,zulip/zulip,synicalsyntax/zulip,dhcrzf/zulip,rishig/zulip,zulip/zulip,shubhamdhama/zulip,shubhamdhama/zulip,hackerkid/zulip,dhcrzf/zulip,punchagan/zulip,synicalsyntax/zulip,timabbott/zulip,rishig/zulip,kou/zulip,tommyip/zulip,andersk/zulip,timabbott/zulip,rht/zulip,timabbott/zulip,brainwane/zulip,showell/zulip,synicalsyntax/zulip,shubhamdhama/zulip,synicalsyntax/zulip,kou/zulip,dhcrzf/zulip,andersk/zulip,eeshangarg/zulip,kou/zulip,dhcrzf/zulip,brainwane/zulip,eeshangarg/zulip,tommyip/zulip,zulip/zulip,kou/zulip,jackrzhang/zulip,shubhamdhama/zulip,hackerkid/zulip,punchagan/zulip,timabbott/zulip,eeshangarg/zulip,timabbott/zulip,rht/zulip,andersk/zulip,tommyip/zulip,brainwane/zulip,rishig/zulip,brainwane/zulip,rishig/zulip,showell/zulip,jackrzhang/zulip,showell/zulip,showell/zulip,brainwane/zulip,jackrzhang/zulip,synicalsyntax/zulip,dhcrzf/zulip,rishig/zulip,shubhamdhama/zulip,tommyip/zulip,tommyip/zulip,zulip/zulip,eeshangarg/zulip,andersk/zulip,zulip/zulip,timabbott/zulip,punchagan/zulip,rht/zulip,hackerkid/zulip,eeshangarg/zulip,hackerkid/zulip,kou/zulip,shubhamdhama/zulip,hackerkid/zulip,hackerkid/zulip,rht/zulip,kou/zulip,dhcrzf/zulip,jackrzhang/zulip,brainwane/zulip,tommyip/zulip,eeshangarg/zulip,showell/zulip,eeshangarg/zulip,punchagan/zulip,zulip/zulip,jackrzhang/zulip,jackrzhang/zulip,dhcrzf/zulip,tommyip/zulip,brainwane/zulip,punchagan/zulip,rishig/zulip,punchagan/zulip,andersk/zulip,shubhamdhama/zulip,timabbott/zulip,hackerkid/zulip,andersk/zulip,punchagan/zulip,andersk/zulip,zulip/zulip,rht/zulip,synicalsyntax/zulip,rht/zulip,jackrzhang/zulip,kou/zulip,rht/zulip,showell/zulip,synicalsyntax/zulip
|
zilencer: Add a simple management command to create a RemoteZulipServer.
This saves us from having to go into a `manage.py shell` to do this,
and adds a bit more structure like the usage message.
|
from argparse import ArgumentParser
from typing import Any
from zerver.lib.management import ZulipBaseCommand
from zilencer.models import RemoteZulipServer
class Command(ZulipBaseCommand):
help = """Add a remote Zulip server for push notifications."""
def add_arguments(self, parser: ArgumentParser) -> None:
group = parser.add_argument_group("command-specific arguments")
group.add_argument('uuid', help="the user's `zulip_org_id`")
group.add_argument('key', help="the user's `zulip_org_key`")
group.add_argument('--hostname', '-n', required=True,
help="the hostname, for human identification")
group.add_argument('--email', '-e', required=True,
help="a contact email address")
def handle(self, uuid: str, key: str, hostname: str, email: str,
**options: Any) -> None:
RemoteZulipServer.objects.create(uuid=uuid,
api_key=key,
hostname=hostname,
contact_email=email)
|
<commit_before><commit_msg>zilencer: Add a simple management command to create a RemoteZulipServer.
This saves us from having to go into a `manage.py shell` to do this,
and adds a bit more structure like the usage message.<commit_after>
|
from argparse import ArgumentParser
from typing import Any
from zerver.lib.management import ZulipBaseCommand
from zilencer.models import RemoteZulipServer
class Command(ZulipBaseCommand):
help = """Add a remote Zulip server for push notifications."""
def add_arguments(self, parser: ArgumentParser) -> None:
group = parser.add_argument_group("command-specific arguments")
group.add_argument('uuid', help="the user's `zulip_org_id`")
group.add_argument('key', help="the user's `zulip_org_key`")
group.add_argument('--hostname', '-n', required=True,
help="the hostname, for human identification")
group.add_argument('--email', '-e', required=True,
help="a contact email address")
def handle(self, uuid: str, key: str, hostname: str, email: str,
**options: Any) -> None:
RemoteZulipServer.objects.create(uuid=uuid,
api_key=key,
hostname=hostname,
contact_email=email)
|
zilencer: Add a simple management command to create a RemoteZulipServer.
This saves us from having to go into a `manage.py shell` to do this,
and adds a bit more structure like the usage message.from argparse import ArgumentParser
from typing import Any
from zerver.lib.management import ZulipBaseCommand
from zilencer.models import RemoteZulipServer
class Command(ZulipBaseCommand):
help = """Add a remote Zulip server for push notifications."""
def add_arguments(self, parser: ArgumentParser) -> None:
group = parser.add_argument_group("command-specific arguments")
group.add_argument('uuid', help="the user's `zulip_org_id`")
group.add_argument('key', help="the user's `zulip_org_key`")
group.add_argument('--hostname', '-n', required=True,
help="the hostname, for human identification")
group.add_argument('--email', '-e', required=True,
help="a contact email address")
def handle(self, uuid: str, key: str, hostname: str, email: str,
**options: Any) -> None:
RemoteZulipServer.objects.create(uuid=uuid,
api_key=key,
hostname=hostname,
contact_email=email)
|
<commit_before><commit_msg>zilencer: Add a simple management command to create a RemoteZulipServer.
This saves us from having to go into a `manage.py shell` to do this,
and adds a bit more structure like the usage message.<commit_after>from argparse import ArgumentParser
from typing import Any
from zerver.lib.management import ZulipBaseCommand
from zilencer.models import RemoteZulipServer
class Command(ZulipBaseCommand):
help = """Add a remote Zulip server for push notifications."""
def add_arguments(self, parser: ArgumentParser) -> None:
group = parser.add_argument_group("command-specific arguments")
group.add_argument('uuid', help="the user's `zulip_org_id`")
group.add_argument('key', help="the user's `zulip_org_key`")
group.add_argument('--hostname', '-n', required=True,
help="the hostname, for human identification")
group.add_argument('--email', '-e', required=True,
help="a contact email address")
def handle(self, uuid: str, key: str, hostname: str, email: str,
**options: Any) -> None:
RemoteZulipServer.objects.create(uuid=uuid,
api_key=key,
hostname=hostname,
contact_email=email)
|
|
d85ac47afeda6753157b77d1ffd7d27f122d42d7
|
tests/api/conftest.py
|
tests/api/conftest.py
|
from unittest import mock
import pytest
import fmn.api.main
from fmn.api.auth import get_identity
@pytest.fixture
def api_identity(fasjson_user_data):
class TestIdentity:
name = fasjson_user_data["username"]
def get_test_identity():
return TestIdentity
with mock.patch.dict(fmn.api.main.app.dependency_overrides):
fmn.api.main.app.dependency_overrides[get_identity] = get_test_identity
yield TestIdentity
|
Add test fixture mocking an API identity
|
Add test fixture mocking an API identity
Related: #606
Signed-off-by: Nils Philippsen <ad3fa8d847df2b57853a376ad688e4be8041ecd4@redhat.com>
|
Python
|
lgpl-2.1
|
fedora-infra/fmn,fedora-infra/fmn,fedora-infra/fmn,fedora-infra/fmn,fedora-infra/fmn
|
Add test fixture mocking an API identity
Related: #606
Signed-off-by: Nils Philippsen <ad3fa8d847df2b57853a376ad688e4be8041ecd4@redhat.com>
|
from unittest import mock
import pytest
import fmn.api.main
from fmn.api.auth import get_identity
@pytest.fixture
def api_identity(fasjson_user_data):
class TestIdentity:
name = fasjson_user_data["username"]
def get_test_identity():
return TestIdentity
with mock.patch.dict(fmn.api.main.app.dependency_overrides):
fmn.api.main.app.dependency_overrides[get_identity] = get_test_identity
yield TestIdentity
|
<commit_before><commit_msg>Add test fixture mocking an API identity
Related: #606
Signed-off-by: Nils Philippsen <ad3fa8d847df2b57853a376ad688e4be8041ecd4@redhat.com><commit_after>
|
from unittest import mock
import pytest
import fmn.api.main
from fmn.api.auth import get_identity
@pytest.fixture
def api_identity(fasjson_user_data):
class TestIdentity:
name = fasjson_user_data["username"]
def get_test_identity():
return TestIdentity
with mock.patch.dict(fmn.api.main.app.dependency_overrides):
fmn.api.main.app.dependency_overrides[get_identity] = get_test_identity
yield TestIdentity
|
Add test fixture mocking an API identity
Related: #606
Signed-off-by: Nils Philippsen <ad3fa8d847df2b57853a376ad688e4be8041ecd4@redhat.com>from unittest import mock
import pytest
import fmn.api.main
from fmn.api.auth import get_identity
@pytest.fixture
def api_identity(fasjson_user_data):
class TestIdentity:
name = fasjson_user_data["username"]
def get_test_identity():
return TestIdentity
with mock.patch.dict(fmn.api.main.app.dependency_overrides):
fmn.api.main.app.dependency_overrides[get_identity] = get_test_identity
yield TestIdentity
|
<commit_before><commit_msg>Add test fixture mocking an API identity
Related: #606
Signed-off-by: Nils Philippsen <ad3fa8d847df2b57853a376ad688e4be8041ecd4@redhat.com><commit_after>from unittest import mock
import pytest
import fmn.api.main
from fmn.api.auth import get_identity
@pytest.fixture
def api_identity(fasjson_user_data):
class TestIdentity:
name = fasjson_user_data["username"]
def get_test_identity():
return TestIdentity
with mock.patch.dict(fmn.api.main.app.dependency_overrides):
fmn.api.main.app.dependency_overrides[get_identity] = get_test_identity
yield TestIdentity
|
|
2a5ca3c708582b213950d7134a2e29333b8dfd11
|
tests/test_backend_tools.py
|
tests/test_backend_tools.py
|
import unittest
from pymanopt.tools import flatten_arguments
class TestArgumentFlattening(unittest.TestCase):
def _test_flatten_arguments(
self, arguments, correctly_flattened_arguments):
flattened_arguments = flatten_arguments(arguments)
self.assertEqual(flattened_arguments, correctly_flattened_arguments)
flattened_arguments_with_signature_hint = flatten_arguments(
arguments, signature=arguments)
self.assertEqual(flattened_arguments_with_signature_hint,
correctly_flattened_arguments)
def test_single_argument(self):
arguments = ("x",)
self._test_flatten_arguments(arguments, arguments)
def test_multiple_arguments(self):
arguments = ("x", "y", "z")
self._test_flatten_arguments(arguments, arguments)
def test_nested_arguments(self):
arguments = (("x", "y"), "z")
self._test_flatten_arguments(arguments, ("x", "y", "z"))
|
Add test stub module for backend tools
|
Add test stub module for backend tools
Signed-off-by: Niklas Koep <342d5290239d9c5264c8f98185afedb99596601a@gmail.com>
|
Python
|
bsd-3-clause
|
nkoep/pymanopt,pymanopt/pymanopt,pymanopt/pymanopt,nkoep/pymanopt,nkoep/pymanopt
|
Add test stub module for backend tools
Signed-off-by: Niklas Koep <342d5290239d9c5264c8f98185afedb99596601a@gmail.com>
|
import unittest
from pymanopt.tools import flatten_arguments
class TestArgumentFlattening(unittest.TestCase):
def _test_flatten_arguments(
self, arguments, correctly_flattened_arguments):
flattened_arguments = flatten_arguments(arguments)
self.assertEqual(flattened_arguments, correctly_flattened_arguments)
flattened_arguments_with_signature_hint = flatten_arguments(
arguments, signature=arguments)
self.assertEqual(flattened_arguments_with_signature_hint,
correctly_flattened_arguments)
def test_single_argument(self):
arguments = ("x",)
self._test_flatten_arguments(arguments, arguments)
def test_multiple_arguments(self):
arguments = ("x", "y", "z")
self._test_flatten_arguments(arguments, arguments)
def test_nested_arguments(self):
arguments = (("x", "y"), "z")
self._test_flatten_arguments(arguments, ("x", "y", "z"))
|
<commit_before><commit_msg>Add test stub module for backend tools
Signed-off-by: Niklas Koep <342d5290239d9c5264c8f98185afedb99596601a@gmail.com><commit_after>
|
import unittest
from pymanopt.tools import flatten_arguments
class TestArgumentFlattening(unittest.TestCase):
def _test_flatten_arguments(
self, arguments, correctly_flattened_arguments):
flattened_arguments = flatten_arguments(arguments)
self.assertEqual(flattened_arguments, correctly_flattened_arguments)
flattened_arguments_with_signature_hint = flatten_arguments(
arguments, signature=arguments)
self.assertEqual(flattened_arguments_with_signature_hint,
correctly_flattened_arguments)
def test_single_argument(self):
arguments = ("x",)
self._test_flatten_arguments(arguments, arguments)
def test_multiple_arguments(self):
arguments = ("x", "y", "z")
self._test_flatten_arguments(arguments, arguments)
def test_nested_arguments(self):
arguments = (("x", "y"), "z")
self._test_flatten_arguments(arguments, ("x", "y", "z"))
|
Add test stub module for backend tools
Signed-off-by: Niklas Koep <342d5290239d9c5264c8f98185afedb99596601a@gmail.com>import unittest
from pymanopt.tools import flatten_arguments
class TestArgumentFlattening(unittest.TestCase):
def _test_flatten_arguments(
self, arguments, correctly_flattened_arguments):
flattened_arguments = flatten_arguments(arguments)
self.assertEqual(flattened_arguments, correctly_flattened_arguments)
flattened_arguments_with_signature_hint = flatten_arguments(
arguments, signature=arguments)
self.assertEqual(flattened_arguments_with_signature_hint,
correctly_flattened_arguments)
def test_single_argument(self):
arguments = ("x",)
self._test_flatten_arguments(arguments, arguments)
def test_multiple_arguments(self):
arguments = ("x", "y", "z")
self._test_flatten_arguments(arguments, arguments)
def test_nested_arguments(self):
arguments = (("x", "y"), "z")
self._test_flatten_arguments(arguments, ("x", "y", "z"))
|
<commit_before><commit_msg>Add test stub module for backend tools
Signed-off-by: Niklas Koep <342d5290239d9c5264c8f98185afedb99596601a@gmail.com><commit_after>import unittest
from pymanopt.tools import flatten_arguments
class TestArgumentFlattening(unittest.TestCase):
def _test_flatten_arguments(
self, arguments, correctly_flattened_arguments):
flattened_arguments = flatten_arguments(arguments)
self.assertEqual(flattened_arguments, correctly_flattened_arguments)
flattened_arguments_with_signature_hint = flatten_arguments(
arguments, signature=arguments)
self.assertEqual(flattened_arguments_with_signature_hint,
correctly_flattened_arguments)
def test_single_argument(self):
arguments = ("x",)
self._test_flatten_arguments(arguments, arguments)
def test_multiple_arguments(self):
arguments = ("x", "y", "z")
self._test_flatten_arguments(arguments, arguments)
def test_nested_arguments(self):
arguments = (("x", "y"), "z")
self._test_flatten_arguments(arguments, ("x", "y", "z"))
|
|
2e5cc8f3a8ccfc6299e98d1b46ed46b093e261a5
|
tests/unit/test_handlers.py
|
tests/unit/test_handlers.py
|
from pmxbot import core
def test_contains_always_match():
"""
Contains handler should always match if no rate is specified.
"""
handler = core.ContainsHandler(name='#', func=None)
assert handler.match('Tell me about #foo', channel='bar')
|
Add test capturing bad implementation of contains handler.
|
Add test capturing bad implementation of contains handler.
|
Python
|
bsd-3-clause
|
jamwt/diesel-pmxbot,jamwt/diesel-pmxbot
|
Add test capturing bad implementation of contains handler.
|
from pmxbot import core
def test_contains_always_match():
"""
Contains handler should always match if no rate is specified.
"""
handler = core.ContainsHandler(name='#', func=None)
assert handler.match('Tell me about #foo', channel='bar')
|
<commit_before><commit_msg>Add test capturing bad implementation of contains handler.<commit_after>
|
from pmxbot import core
def test_contains_always_match():
"""
Contains handler should always match if no rate is specified.
"""
handler = core.ContainsHandler(name='#', func=None)
assert handler.match('Tell me about #foo', channel='bar')
|
Add test capturing bad implementation of contains handler.from pmxbot import core
def test_contains_always_match():
"""
Contains handler should always match if no rate is specified.
"""
handler = core.ContainsHandler(name='#', func=None)
assert handler.match('Tell me about #foo', channel='bar')
|
<commit_before><commit_msg>Add test capturing bad implementation of contains handler.<commit_after>from pmxbot import core
def test_contains_always_match():
"""
Contains handler should always match if no rate is specified.
"""
handler = core.ContainsHandler(name='#', func=None)
assert handler.match('Tell me about #foo', channel='bar')
|
|
e60397f163607fb606c3c7e872da5e9d0e37781c
|
ynr/apps/ynr_refactoring/migrations/0003_move_person_identifiers.py
|
ynr/apps/ynr_refactoring/migrations/0003_move_person_identifiers.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-19 18:25
from __future__ import unicode_literals
from django.db import migrations
def move_person_identifiers(apps, schema_editor):
ContentType = apps.get_model("contenttypes", "ContentType")
PersonIdentifier = apps.get_model("people", "PersonIdentifier")
Person = apps.get_model("people", "Person")
Identifier = apps.get_model("popolo", "Identifier")
person_content_type_id = ContentType.objects.get_for_model(Person).pk
qs = Identifier.objects.filter(
content_type_id=person_content_type_id, scheme="uk.org.publicwhip"
)
for identifier in qs:
public_whip_id = identifier.identifier.split("/")[-1]
PersonIdentifier.objects.update_or_create(
person_id=identifier.object_id,
value="https://www.theyworkforyou.com/mp/{}/".format(
public_whip_id
),
value_type="theyworkforyou",
internal_identifier=identifier.identifier,
)
class Migration(migrations.Migration):
dependencies = [("ynr_refactoring", "0002_move_old_election_slugs")]
operations = [
migrations.RunPython(move_person_identifiers, migrations.RunPython.noop)
]
|
Move TheyWorkForYou IDs to PersonIdentifiers
|
Move TheyWorkForYou IDs to PersonIdentifiers
|
Python
|
agpl-3.0
|
DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative,DemocracyClub/yournextrepresentative
|
Move TheyWorkForYou IDs to PersonIdentifiers
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-19 18:25
from __future__ import unicode_literals
from django.db import migrations
def move_person_identifiers(apps, schema_editor):
ContentType = apps.get_model("contenttypes", "ContentType")
PersonIdentifier = apps.get_model("people", "PersonIdentifier")
Person = apps.get_model("people", "Person")
Identifier = apps.get_model("popolo", "Identifier")
person_content_type_id = ContentType.objects.get_for_model(Person).pk
qs = Identifier.objects.filter(
content_type_id=person_content_type_id, scheme="uk.org.publicwhip"
)
for identifier in qs:
public_whip_id = identifier.identifier.split("/")[-1]
PersonIdentifier.objects.update_or_create(
person_id=identifier.object_id,
value="https://www.theyworkforyou.com/mp/{}/".format(
public_whip_id
),
value_type="theyworkforyou",
internal_identifier=identifier.identifier,
)
class Migration(migrations.Migration):
dependencies = [("ynr_refactoring", "0002_move_old_election_slugs")]
operations = [
migrations.RunPython(move_person_identifiers, migrations.RunPython.noop)
]
|
<commit_before><commit_msg>Move TheyWorkForYou IDs to PersonIdentifiers<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-19 18:25
from __future__ import unicode_literals
from django.db import migrations
def move_person_identifiers(apps, schema_editor):
ContentType = apps.get_model("contenttypes", "ContentType")
PersonIdentifier = apps.get_model("people", "PersonIdentifier")
Person = apps.get_model("people", "Person")
Identifier = apps.get_model("popolo", "Identifier")
person_content_type_id = ContentType.objects.get_for_model(Person).pk
qs = Identifier.objects.filter(
content_type_id=person_content_type_id, scheme="uk.org.publicwhip"
)
for identifier in qs:
public_whip_id = identifier.identifier.split("/")[-1]
PersonIdentifier.objects.update_or_create(
person_id=identifier.object_id,
value="https://www.theyworkforyou.com/mp/{}/".format(
public_whip_id
),
value_type="theyworkforyou",
internal_identifier=identifier.identifier,
)
class Migration(migrations.Migration):
dependencies = [("ynr_refactoring", "0002_move_old_election_slugs")]
operations = [
migrations.RunPython(move_person_identifiers, migrations.RunPython.noop)
]
|
Move TheyWorkForYou IDs to PersonIdentifiers# -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-19 18:25
from __future__ import unicode_literals
from django.db import migrations
def move_person_identifiers(apps, schema_editor):
ContentType = apps.get_model("contenttypes", "ContentType")
PersonIdentifier = apps.get_model("people", "PersonIdentifier")
Person = apps.get_model("people", "Person")
Identifier = apps.get_model("popolo", "Identifier")
person_content_type_id = ContentType.objects.get_for_model(Person).pk
qs = Identifier.objects.filter(
content_type_id=person_content_type_id, scheme="uk.org.publicwhip"
)
for identifier in qs:
public_whip_id = identifier.identifier.split("/")[-1]
PersonIdentifier.objects.update_or_create(
person_id=identifier.object_id,
value="https://www.theyworkforyou.com/mp/{}/".format(
public_whip_id
),
value_type="theyworkforyou",
internal_identifier=identifier.identifier,
)
class Migration(migrations.Migration):
dependencies = [("ynr_refactoring", "0002_move_old_election_slugs")]
operations = [
migrations.RunPython(move_person_identifiers, migrations.RunPython.noop)
]
|
<commit_before><commit_msg>Move TheyWorkForYou IDs to PersonIdentifiers<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.18 on 2019-01-19 18:25
from __future__ import unicode_literals
from django.db import migrations
def move_person_identifiers(apps, schema_editor):
ContentType = apps.get_model("contenttypes", "ContentType")
PersonIdentifier = apps.get_model("people", "PersonIdentifier")
Person = apps.get_model("people", "Person")
Identifier = apps.get_model("popolo", "Identifier")
person_content_type_id = ContentType.objects.get_for_model(Person).pk
qs = Identifier.objects.filter(
content_type_id=person_content_type_id, scheme="uk.org.publicwhip"
)
for identifier in qs:
public_whip_id = identifier.identifier.split("/")[-1]
PersonIdentifier.objects.update_or_create(
person_id=identifier.object_id,
value="https://www.theyworkforyou.com/mp/{}/".format(
public_whip_id
),
value_type="theyworkforyou",
internal_identifier=identifier.identifier,
)
class Migration(migrations.Migration):
dependencies = [("ynr_refactoring", "0002_move_old_election_slugs")]
operations = [
migrations.RunPython(move_person_identifiers, migrations.RunPython.noop)
]
|
|
abfb39c841293d719809d595af6aa7e9d6a12e87
|
py/add-strings.py
|
py/add-strings.py
|
class Solution(object):
def addStrings(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
"""
l1, l2 = len(num1), len(num2)
if l1 > l2:
l1, l2 = l2, l1
num1, num2 = num2, num1
carry = 0
out = []
for i in xrange(1, l1 + 1):
s = int(num1[-i]) + int(num2[-i]) + carry
out.append(s % 10)
carry = s / 10
for i in xrange(l1 + 1, l2 + 1):
s = int(num2[-i]) + carry
out.append(s % 10)
carry = s / 10
if carry:
out.append(carry)
return ''.join(map(str, out[::-1]))
|
Add py solution for 415. Add Strings
|
Add py solution for 415. Add Strings
415. Add Strings: https://leetcode.com/problems/add-strings/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 415. Add Strings
415. Add Strings: https://leetcode.com/problems/add-strings/
|
class Solution(object):
def addStrings(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
"""
l1, l2 = len(num1), len(num2)
if l1 > l2:
l1, l2 = l2, l1
num1, num2 = num2, num1
carry = 0
out = []
for i in xrange(1, l1 + 1):
s = int(num1[-i]) + int(num2[-i]) + carry
out.append(s % 10)
carry = s / 10
for i in xrange(l1 + 1, l2 + 1):
s = int(num2[-i]) + carry
out.append(s % 10)
carry = s / 10
if carry:
out.append(carry)
return ''.join(map(str, out[::-1]))
|
<commit_before><commit_msg>Add py solution for 415. Add Strings
415. Add Strings: https://leetcode.com/problems/add-strings/<commit_after>
|
class Solution(object):
def addStrings(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
"""
l1, l2 = len(num1), len(num2)
if l1 > l2:
l1, l2 = l2, l1
num1, num2 = num2, num1
carry = 0
out = []
for i in xrange(1, l1 + 1):
s = int(num1[-i]) + int(num2[-i]) + carry
out.append(s % 10)
carry = s / 10
for i in xrange(l1 + 1, l2 + 1):
s = int(num2[-i]) + carry
out.append(s % 10)
carry = s / 10
if carry:
out.append(carry)
return ''.join(map(str, out[::-1]))
|
Add py solution for 415. Add Strings
415. Add Strings: https://leetcode.com/problems/add-strings/class Solution(object):
def addStrings(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
"""
l1, l2 = len(num1), len(num2)
if l1 > l2:
l1, l2 = l2, l1
num1, num2 = num2, num1
carry = 0
out = []
for i in xrange(1, l1 + 1):
s = int(num1[-i]) + int(num2[-i]) + carry
out.append(s % 10)
carry = s / 10
for i in xrange(l1 + 1, l2 + 1):
s = int(num2[-i]) + carry
out.append(s % 10)
carry = s / 10
if carry:
out.append(carry)
return ''.join(map(str, out[::-1]))
|
<commit_before><commit_msg>Add py solution for 415. Add Strings
415. Add Strings: https://leetcode.com/problems/add-strings/<commit_after>class Solution(object):
def addStrings(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
"""
l1, l2 = len(num1), len(num2)
if l1 > l2:
l1, l2 = l2, l1
num1, num2 = num2, num1
carry = 0
out = []
for i in xrange(1, l1 + 1):
s = int(num1[-i]) + int(num2[-i]) + carry
out.append(s % 10)
carry = s / 10
for i in xrange(l1 + 1, l2 + 1):
s = int(num2[-i]) + carry
out.append(s % 10)
carry = s / 10
if carry:
out.append(carry)
return ''.join(map(str, out[::-1]))
|
|
11c060ea43082dfaad503b2af9149e69878ca4e6
|
zinnia_ckeditor/__init__.py
|
zinnia_ckeditor/__init__.py
|
"""CKEditor for Django-blog-zinnia"""
__version__ = '1.0'
__license__ = 'BSD License'
__author__ = 'Fantomas42'
__email__ = 'fantomas42@gmail.com'
__url__ = 'https://github.com/django-blog-zinnia/zinnia-wysiwyg-ckeditor'
|
Create the package with metadatas
|
Create the package with metadatas
|
Python
|
bsd-3-clause
|
django-blog-zinnia/zinnia-wysiwyg-ckeditor
|
Create the package with metadatas
|
"""CKEditor for Django-blog-zinnia"""
__version__ = '1.0'
__license__ = 'BSD License'
__author__ = 'Fantomas42'
__email__ = 'fantomas42@gmail.com'
__url__ = 'https://github.com/django-blog-zinnia/zinnia-wysiwyg-ckeditor'
|
<commit_before><commit_msg>Create the package with metadatas<commit_after>
|
"""CKEditor for Django-blog-zinnia"""
__version__ = '1.0'
__license__ = 'BSD License'
__author__ = 'Fantomas42'
__email__ = 'fantomas42@gmail.com'
__url__ = 'https://github.com/django-blog-zinnia/zinnia-wysiwyg-ckeditor'
|
Create the package with metadatas"""CKEditor for Django-blog-zinnia"""
__version__ = '1.0'
__license__ = 'BSD License'
__author__ = 'Fantomas42'
__email__ = 'fantomas42@gmail.com'
__url__ = 'https://github.com/django-blog-zinnia/zinnia-wysiwyg-ckeditor'
|
<commit_before><commit_msg>Create the package with metadatas<commit_after>"""CKEditor for Django-blog-zinnia"""
__version__ = '1.0'
__license__ = 'BSD License'
__author__ = 'Fantomas42'
__email__ = 'fantomas42@gmail.com'
__url__ = 'https://github.com/django-blog-zinnia/zinnia-wysiwyg-ckeditor'
|
|
41625db23c40c0d1b9479321693bbca6b56fbd25
|
foliant/swagger2markdown.py
|
foliant/swagger2markdown.py
|
"""Swagger to Markdown converter."""
import shlex, subprocess
def convert(swagger_location, output_file, template_file):
"""Convert Swagger JSON file to Markdown."""
if template_file:
command = "swagger2markdown -i %s -o %s -t %s" % (
swagger_location,
output_file,
template_file
)
else:
command = "swagger2markdown -i %s -o %s" % (
swagger_location,
output_file
)
print("Baking output... ", end='')
try:
proc = subprocess.check_output(
shlex.split(command),
stderr=subprocess.PIPE
)
print("Done!")
except subprocess.CalledProcessError as e:
quit(e.stderr.decode())
|
Add Swagger to Markdown converter.
|
Add Swagger to Markdown converter.
|
Python
|
mit
|
foliant-docs/foliant
|
Add Swagger to Markdown converter.
|
"""Swagger to Markdown converter."""
import shlex, subprocess
def convert(swagger_location, output_file, template_file):
"""Convert Swagger JSON file to Markdown."""
if template_file:
command = "swagger2markdown -i %s -o %s -t %s" % (
swagger_location,
output_file,
template_file
)
else:
command = "swagger2markdown -i %s -o %s" % (
swagger_location,
output_file
)
print("Baking output... ", end='')
try:
proc = subprocess.check_output(
shlex.split(command),
stderr=subprocess.PIPE
)
print("Done!")
except subprocess.CalledProcessError as e:
quit(e.stderr.decode())
|
<commit_before><commit_msg>Add Swagger to Markdown converter.<commit_after>
|
"""Swagger to Markdown converter."""
import shlex, subprocess
def convert(swagger_location, output_file, template_file):
"""Convert Swagger JSON file to Markdown."""
if template_file:
command = "swagger2markdown -i %s -o %s -t %s" % (
swagger_location,
output_file,
template_file
)
else:
command = "swagger2markdown -i %s -o %s" % (
swagger_location,
output_file
)
print("Baking output... ", end='')
try:
proc = subprocess.check_output(
shlex.split(command),
stderr=subprocess.PIPE
)
print("Done!")
except subprocess.CalledProcessError as e:
quit(e.stderr.decode())
|
Add Swagger to Markdown converter."""Swagger to Markdown converter."""
import shlex, subprocess
def convert(swagger_location, output_file, template_file):
"""Convert Swagger JSON file to Markdown."""
if template_file:
command = "swagger2markdown -i %s -o %s -t %s" % (
swagger_location,
output_file,
template_file
)
else:
command = "swagger2markdown -i %s -o %s" % (
swagger_location,
output_file
)
print("Baking output... ", end='')
try:
proc = subprocess.check_output(
shlex.split(command),
stderr=subprocess.PIPE
)
print("Done!")
except subprocess.CalledProcessError as e:
quit(e.stderr.decode())
|
<commit_before><commit_msg>Add Swagger to Markdown converter.<commit_after>"""Swagger to Markdown converter."""
import shlex, subprocess
def convert(swagger_location, output_file, template_file):
"""Convert Swagger JSON file to Markdown."""
if template_file:
command = "swagger2markdown -i %s -o %s -t %s" % (
swagger_location,
output_file,
template_file
)
else:
command = "swagger2markdown -i %s -o %s" % (
swagger_location,
output_file
)
print("Baking output... ", end='')
try:
proc = subprocess.check_output(
shlex.split(command),
stderr=subprocess.PIPE
)
print("Done!")
except subprocess.CalledProcessError as e:
quit(e.stderr.decode())
|
|
463360261d573f3f3ca934264bd27a072b96d3e6
|
src/mmw/apps/core/decorators.py
|
src/mmw/apps/core/decorators.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
import sys
import rollbar
from django.utils.timezone import now
from apps.core.models import RequestLog
def log_request(view):
"""
Log the request and its response as a RequestLog model
"""
def decorator(request, *args, **kwargs):
requested_at = now()
view_result = view(request, *args, **kwargs)
user = request.user if request.user.is_authenticated() else None
response_time = now() - requested_at
response_ms = int(response_time.total_seconds() * 1000)
log = RequestLog.objects.create(
user=user,
job_uuid=view_result.data.get('job', None),
requested_at=requested_at,
response_ms=response_ms,
status_code=view_result.status_code,
path=request.path,
query_params=request.query_params.dict(),
method=request.method,
host=request.get_host(),
remote_addr=_get_remote_addr(request))
try:
log.save()
except Exception:
pass
return view_result
decorator.__name__ = view.__name__
decorator.__dict__ = view.__dict__
decorator.__doc__ = view.__doc__
return decorator
def _get_remote_addr(request):
# get IP
ipaddr = request.META.get("HTTP_X_FORWARDED_FOR", None)
if ipaddr:
# X_FORWARDED_FOR returns client1, proxy1, proxy2,...
return [x.strip() for x in ipaddr.split(",")][0]
else:
return request.META.get("REMOTE_ADDR", "")
|
Create log_request decorator to log API requests
|
Create log_request decorator to log API requests
* Creates a new decorator to save info about API requets (user, path, etc)
* Heavily inspired by
https://github.com/aschn/drf-tracking/blob/master/rest_framework_tracking/mixins.py
|
Python
|
apache-2.0
|
WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed
|
Create log_request decorator to log API requests
* Creates a new decorator to save info about API requets (user, path, etc)
* Heavily inspired by
https://github.com/aschn/drf-tracking/blob/master/rest_framework_tracking/mixins.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
import sys
import rollbar
from django.utils.timezone import now
from apps.core.models import RequestLog
def log_request(view):
"""
Log the request and its response as a RequestLog model
"""
def decorator(request, *args, **kwargs):
requested_at = now()
view_result = view(request, *args, **kwargs)
user = request.user if request.user.is_authenticated() else None
response_time = now() - requested_at
response_ms = int(response_time.total_seconds() * 1000)
log = RequestLog.objects.create(
user=user,
job_uuid=view_result.data.get('job', None),
requested_at=requested_at,
response_ms=response_ms,
status_code=view_result.status_code,
path=request.path,
query_params=request.query_params.dict(),
method=request.method,
host=request.get_host(),
remote_addr=_get_remote_addr(request))
try:
log.save()
except Exception:
pass
return view_result
decorator.__name__ = view.__name__
decorator.__dict__ = view.__dict__
decorator.__doc__ = view.__doc__
return decorator
def _get_remote_addr(request):
# get IP
ipaddr = request.META.get("HTTP_X_FORWARDED_FOR", None)
if ipaddr:
# X_FORWARDED_FOR returns client1, proxy1, proxy2,...
return [x.strip() for x in ipaddr.split(",")][0]
else:
return request.META.get("REMOTE_ADDR", "")
|
<commit_before><commit_msg>Create log_request decorator to log API requests
* Creates a new decorator to save info about API requets (user, path, etc)
* Heavily inspired by
https://github.com/aschn/drf-tracking/blob/master/rest_framework_tracking/mixins.py<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
import sys
import rollbar
from django.utils.timezone import now
from apps.core.models import RequestLog
def log_request(view):
"""
Log the request and its response as a RequestLog model
"""
def decorator(request, *args, **kwargs):
requested_at = now()
view_result = view(request, *args, **kwargs)
user = request.user if request.user.is_authenticated() else None
response_time = now() - requested_at
response_ms = int(response_time.total_seconds() * 1000)
log = RequestLog.objects.create(
user=user,
job_uuid=view_result.data.get('job', None),
requested_at=requested_at,
response_ms=response_ms,
status_code=view_result.status_code,
path=request.path,
query_params=request.query_params.dict(),
method=request.method,
host=request.get_host(),
remote_addr=_get_remote_addr(request))
try:
log.save()
except Exception:
pass
return view_result
decorator.__name__ = view.__name__
decorator.__dict__ = view.__dict__
decorator.__doc__ = view.__doc__
return decorator
def _get_remote_addr(request):
# get IP
ipaddr = request.META.get("HTTP_X_FORWARDED_FOR", None)
if ipaddr:
# X_FORWARDED_FOR returns client1, proxy1, proxy2,...
return [x.strip() for x in ipaddr.split(",")][0]
else:
return request.META.get("REMOTE_ADDR", "")
|
Create log_request decorator to log API requests
* Creates a new decorator to save info about API requets (user, path, etc)
* Heavily inspired by
https://github.com/aschn/drf-tracking/blob/master/rest_framework_tracking/mixins.py# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
import sys
import rollbar
from django.utils.timezone import now
from apps.core.models import RequestLog
def log_request(view):
"""
Log the request and its response as a RequestLog model
"""
def decorator(request, *args, **kwargs):
requested_at = now()
view_result = view(request, *args, **kwargs)
user = request.user if request.user.is_authenticated() else None
response_time = now() - requested_at
response_ms = int(response_time.total_seconds() * 1000)
log = RequestLog.objects.create(
user=user,
job_uuid=view_result.data.get('job', None),
requested_at=requested_at,
response_ms=response_ms,
status_code=view_result.status_code,
path=request.path,
query_params=request.query_params.dict(),
method=request.method,
host=request.get_host(),
remote_addr=_get_remote_addr(request))
try:
log.save()
except Exception:
pass
return view_result
decorator.__name__ = view.__name__
decorator.__dict__ = view.__dict__
decorator.__doc__ = view.__doc__
return decorator
def _get_remote_addr(request):
# get IP
ipaddr = request.META.get("HTTP_X_FORWARDED_FOR", None)
if ipaddr:
# X_FORWARDED_FOR returns client1, proxy1, proxy2,...
return [x.strip() for x in ipaddr.split(",")][0]
else:
return request.META.get("REMOTE_ADDR", "")
|
<commit_before><commit_msg>Create log_request decorator to log API requests
* Creates a new decorator to save info about API requets (user, path, etc)
* Heavily inspired by
https://github.com/aschn/drf-tracking/blob/master/rest_framework_tracking/mixins.py<commit_after># -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
import sys
import rollbar
from django.utils.timezone import now
from apps.core.models import RequestLog
def log_request(view):
"""
Log the request and its response as a RequestLog model
"""
def decorator(request, *args, **kwargs):
requested_at = now()
view_result = view(request, *args, **kwargs)
user = request.user if request.user.is_authenticated() else None
response_time = now() - requested_at
response_ms = int(response_time.total_seconds() * 1000)
log = RequestLog.objects.create(
user=user,
job_uuid=view_result.data.get('job', None),
requested_at=requested_at,
response_ms=response_ms,
status_code=view_result.status_code,
path=request.path,
query_params=request.query_params.dict(),
method=request.method,
host=request.get_host(),
remote_addr=_get_remote_addr(request))
try:
log.save()
except Exception:
pass
return view_result
decorator.__name__ = view.__name__
decorator.__dict__ = view.__dict__
decorator.__doc__ = view.__doc__
return decorator
def _get_remote_addr(request):
# get IP
ipaddr = request.META.get("HTTP_X_FORWARDED_FOR", None)
if ipaddr:
# X_FORWARDED_FOR returns client1, proxy1, proxy2,...
return [x.strip() for x in ipaddr.split(",")][0]
else:
return request.META.get("REMOTE_ADDR", "")
|
|
4e3c76fa2c2af7b19bcac72c25044b9534198e18
|
appModules/potplayermini.py
|
appModules/potplayermini.py
|
# -*- coding: utf-8 -*-
# appModule for Daum potplayer (automatic reading subtitle and play information)
# This file part of Daum potplayer AppModule
# Copyright <2016> aheu <advck1123 at GMail Dot com>, license details <license.txt>
from potplayer import AppModule
|
Add pot player mini support
|
Add pot player mini support
|
Python
|
mit
|
dnz3d4c/potplayerNVDAAddon
|
Add pot player mini support
|
# -*- coding: utf-8 -*-
# appModule for Daum potplayer (automatic reading subtitle and play information)
# This file part of Daum potplayer AppModule
# Copyright <2016> aheu <advck1123 at GMail Dot com>, license details <license.txt>
from potplayer import AppModule
|
<commit_before><commit_msg>Add pot player mini support<commit_after>
|
# -*- coding: utf-8 -*-
# appModule for Daum potplayer (automatic reading subtitle and play information)
# This file part of Daum potplayer AppModule
# Copyright <2016> aheu <advck1123 at GMail Dot com>, license details <license.txt>
from potplayer import AppModule
|
Add pot player mini support# -*- coding: utf-8 -*-
# appModule for Daum potplayer (automatic reading subtitle and play information)
# This file part of Daum potplayer AppModule
# Copyright <2016> aheu <advck1123 at GMail Dot com>, license details <license.txt>
from potplayer import AppModule
|
<commit_before><commit_msg>Add pot player mini support<commit_after># -*- coding: utf-8 -*-
# appModule for Daum potplayer (automatic reading subtitle and play information)
# This file part of Daum potplayer AppModule
# Copyright <2016> aheu <advck1123 at GMail Dot com>, license details <license.txt>
from potplayer import AppModule
|
|
839716bb2ea94d8c8a3f6cf44ceecf0afcfd6c55
|
migrations/versions/2213b8196921_.py
|
migrations/versions/2213b8196921_.py
|
"""Use native JSON for entry.content on postgres
Revision ID: 2213b8196921
Revises: 2b7f5e38dd73
Create Date: 2013-12-15 13:48:25.988000
"""
# revision identifiers, used by Alembic.
revision = '2213b8196921'
down_revision = '2b7f5e38dd73'
from marvin.types import JSONType
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('entry', 'content',
existing_type=JSONType(),
nullable=True)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('entry', 'content',
existing_type=JSONType(),
nullable=False)
### end Alembic commands ###
|
Add migration for entry.content -> JSON
|
Add migration for entry.content -> JSON
|
Python
|
mit
|
streamr/marvin,streamr/marvin,streamr/marvin
|
Add migration for entry.content -> JSON
|
"""Use native JSON for entry.content on postgres
Revision ID: 2213b8196921
Revises: 2b7f5e38dd73
Create Date: 2013-12-15 13:48:25.988000
"""
# revision identifiers, used by Alembic.
revision = '2213b8196921'
down_revision = '2b7f5e38dd73'
from marvin.types import JSONType
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('entry', 'content',
existing_type=JSONType(),
nullable=True)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('entry', 'content',
existing_type=JSONType(),
nullable=False)
### end Alembic commands ###
|
<commit_before><commit_msg>Add migration for entry.content -> JSON<commit_after>
|
"""Use native JSON for entry.content on postgres
Revision ID: 2213b8196921
Revises: 2b7f5e38dd73
Create Date: 2013-12-15 13:48:25.988000
"""
# revision identifiers, used by Alembic.
revision = '2213b8196921'
down_revision = '2b7f5e38dd73'
from marvin.types import JSONType
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('entry', 'content',
existing_type=JSONType(),
nullable=True)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('entry', 'content',
existing_type=JSONType(),
nullable=False)
### end Alembic commands ###
|
Add migration for entry.content -> JSON"""Use native JSON for entry.content on postgres
Revision ID: 2213b8196921
Revises: 2b7f5e38dd73
Create Date: 2013-12-15 13:48:25.988000
"""
# revision identifiers, used by Alembic.
revision = '2213b8196921'
down_revision = '2b7f5e38dd73'
from marvin.types import JSONType
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('entry', 'content',
existing_type=JSONType(),
nullable=True)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('entry', 'content',
existing_type=JSONType(),
nullable=False)
### end Alembic commands ###
|
<commit_before><commit_msg>Add migration for entry.content -> JSON<commit_after>"""Use native JSON for entry.content on postgres
Revision ID: 2213b8196921
Revises: 2b7f5e38dd73
Create Date: 2013-12-15 13:48:25.988000
"""
# revision identifiers, used by Alembic.
revision = '2213b8196921'
down_revision = '2b7f5e38dd73'
from marvin.types import JSONType
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('entry', 'content',
existing_type=JSONType(),
nullable=True)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('entry', 'content',
existing_type=JSONType(),
nullable=False)
### end Alembic commands ###
|
|
52d8a8f7e616523360528367b6354ebca6c44f8e
|
migrations/versions/2c240cb3edd1_.py
|
migrations/versions/2c240cb3edd1_.py
|
"""Add movie metadata (imdb rating, number of votes, metascore) and relevancy
Revision ID: 2c240cb3edd1
Revises: 588336e02ca
Create Date: 2014-02-09 13:46:18.630000
"""
# revision identifiers, used by Alembic.
revision = '2c240cb3edd1'
down_revision = '588336e02ca'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('movie', sa.Column('imdb_rating', sa.Float(), nullable=False, default=0))
op.add_column('movie', sa.Column('metascore', sa.Integer(), nullable=False, default=0))
op.add_column('movie', sa.Column('number_of_imdb_votes', sa.Integer(), nullable=False, default=0))
op.add_column('movie', sa.Column('relevancy', sa.Float(), nullable=False, default=0))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('movie', 'relevancy')
op.drop_column('movie', 'number_of_imdb_votes')
op.drop_column('movie', 'metascore')
op.drop_column('movie', 'imdb_rating')
### end Alembic commands ###
|
Add migrations for movie ranking metadata
|
Add migrations for movie ranking metadata
|
Python
|
mit
|
streamr/marvin,streamr/marvin,streamr/marvin
|
Add migrations for movie ranking metadata
|
"""Add movie metadata (imdb rating, number of votes, metascore) and relevancy
Revision ID: 2c240cb3edd1
Revises: 588336e02ca
Create Date: 2014-02-09 13:46:18.630000
"""
# revision identifiers, used by Alembic.
revision = '2c240cb3edd1'
down_revision = '588336e02ca'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('movie', sa.Column('imdb_rating', sa.Float(), nullable=False, default=0))
op.add_column('movie', sa.Column('metascore', sa.Integer(), nullable=False, default=0))
op.add_column('movie', sa.Column('number_of_imdb_votes', sa.Integer(), nullable=False, default=0))
op.add_column('movie', sa.Column('relevancy', sa.Float(), nullable=False, default=0))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('movie', 'relevancy')
op.drop_column('movie', 'number_of_imdb_votes')
op.drop_column('movie', 'metascore')
op.drop_column('movie', 'imdb_rating')
### end Alembic commands ###
|
<commit_before><commit_msg>Add migrations for movie ranking metadata<commit_after>
|
"""Add movie metadata (imdb rating, number of votes, metascore) and relevancy
Revision ID: 2c240cb3edd1
Revises: 588336e02ca
Create Date: 2014-02-09 13:46:18.630000
"""
# revision identifiers, used by Alembic.
revision = '2c240cb3edd1'
down_revision = '588336e02ca'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('movie', sa.Column('imdb_rating', sa.Float(), nullable=False, default=0))
op.add_column('movie', sa.Column('metascore', sa.Integer(), nullable=False, default=0))
op.add_column('movie', sa.Column('number_of_imdb_votes', sa.Integer(), nullable=False, default=0))
op.add_column('movie', sa.Column('relevancy', sa.Float(), nullable=False, default=0))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('movie', 'relevancy')
op.drop_column('movie', 'number_of_imdb_votes')
op.drop_column('movie', 'metascore')
op.drop_column('movie', 'imdb_rating')
### end Alembic commands ###
|
Add migrations for movie ranking metadata"""Add movie metadata (imdb rating, number of votes, metascore) and relevancy
Revision ID: 2c240cb3edd1
Revises: 588336e02ca
Create Date: 2014-02-09 13:46:18.630000
"""
# revision identifiers, used by Alembic.
revision = '2c240cb3edd1'
down_revision = '588336e02ca'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('movie', sa.Column('imdb_rating', sa.Float(), nullable=False, default=0))
op.add_column('movie', sa.Column('metascore', sa.Integer(), nullable=False, default=0))
op.add_column('movie', sa.Column('number_of_imdb_votes', sa.Integer(), nullable=False, default=0))
op.add_column('movie', sa.Column('relevancy', sa.Float(), nullable=False, default=0))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('movie', 'relevancy')
op.drop_column('movie', 'number_of_imdb_votes')
op.drop_column('movie', 'metascore')
op.drop_column('movie', 'imdb_rating')
### end Alembic commands ###
|
<commit_before><commit_msg>Add migrations for movie ranking metadata<commit_after>"""Add movie metadata (imdb rating, number of votes, metascore) and relevancy
Revision ID: 2c240cb3edd1
Revises: 588336e02ca
Create Date: 2014-02-09 13:46:18.630000
"""
# revision identifiers, used by Alembic.
revision = '2c240cb3edd1'
down_revision = '588336e02ca'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('movie', sa.Column('imdb_rating', sa.Float(), nullable=False, default=0))
op.add_column('movie', sa.Column('metascore', sa.Integer(), nullable=False, default=0))
op.add_column('movie', sa.Column('number_of_imdb_votes', sa.Integer(), nullable=False, default=0))
op.add_column('movie', sa.Column('relevancy', sa.Float(), nullable=False, default=0))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('movie', 'relevancy')
op.drop_column('movie', 'number_of_imdb_votes')
op.drop_column('movie', 'metascore')
op.drop_column('movie', 'imdb_rating')
### end Alembic commands ###
|
|
e5c57e7e0110fb35707ef4900f289698fb778dfd
|
tests/unit/test_settings.py
|
tests/unit/test_settings.py
|
"""
Tests application settings are connected to Django
settings and have sensible default values.
"""
import importlib
from django.test import TestCase, override_settings
from drf_signed_auth import settings
class SettingsTest(TestCase):
def setUp(self):
self.sut = settings
self.addCleanup(lambda: importlib.reload(settings))
def test_default_ttl(self):
self.assertEqual(30, settings.SIGNED_URL_TTL)
def test_ttl_set_from_django_settings(self):
expected = 9999
with override_settings(SIGNED_URL_TTL=expected):
importlib.reload(settings)
self.assertEqual(expected, settings.SIGNED_URL_TTL)
def test_default_signature_param(self):
self.assertEqual('sig', settings.SIGNED_URL_QUERY_PARAM)
def test_signature_param_from_django_settings(self):
expected = 'serenity'
with override_settings(SIGNED_URL_QUERY_PARAM=expected):
importlib.reload(settings)
self.assertEqual(expected, settings.SIGNED_URL_QUERY_PARAM)
|
Add tests for settings & defaults
|
Add tests for settings & defaults
|
Python
|
bsd-2-clause
|
marcgibbons/drf_signed_auth,marcgibbons/drf_signed_auth,marcgibbons/drf_signed_auth
|
Add tests for settings & defaults
|
"""
Tests application settings are connected to Django
settings and have sensible default values.
"""
import importlib
from django.test import TestCase, override_settings
from drf_signed_auth import settings
class SettingsTest(TestCase):
def setUp(self):
self.sut = settings
self.addCleanup(lambda: importlib.reload(settings))
def test_default_ttl(self):
self.assertEqual(30, settings.SIGNED_URL_TTL)
def test_ttl_set_from_django_settings(self):
expected = 9999
with override_settings(SIGNED_URL_TTL=expected):
importlib.reload(settings)
self.assertEqual(expected, settings.SIGNED_URL_TTL)
def test_default_signature_param(self):
self.assertEqual('sig', settings.SIGNED_URL_QUERY_PARAM)
def test_signature_param_from_django_settings(self):
expected = 'serenity'
with override_settings(SIGNED_URL_QUERY_PARAM=expected):
importlib.reload(settings)
self.assertEqual(expected, settings.SIGNED_URL_QUERY_PARAM)
|
<commit_before><commit_msg>Add tests for settings & defaults<commit_after>
|
"""
Tests application settings are connected to Django
settings and have sensible default values.
"""
import importlib
from django.test import TestCase, override_settings
from drf_signed_auth import settings
class SettingsTest(TestCase):
def setUp(self):
self.sut = settings
self.addCleanup(lambda: importlib.reload(settings))
def test_default_ttl(self):
self.assertEqual(30, settings.SIGNED_URL_TTL)
def test_ttl_set_from_django_settings(self):
expected = 9999
with override_settings(SIGNED_URL_TTL=expected):
importlib.reload(settings)
self.assertEqual(expected, settings.SIGNED_URL_TTL)
def test_default_signature_param(self):
self.assertEqual('sig', settings.SIGNED_URL_QUERY_PARAM)
def test_signature_param_from_django_settings(self):
expected = 'serenity'
with override_settings(SIGNED_URL_QUERY_PARAM=expected):
importlib.reload(settings)
self.assertEqual(expected, settings.SIGNED_URL_QUERY_PARAM)
|
Add tests for settings & defaults"""
Tests application settings are connected to Django
settings and have sensible default values.
"""
import importlib
from django.test import TestCase, override_settings
from drf_signed_auth import settings
class SettingsTest(TestCase):
def setUp(self):
self.sut = settings
self.addCleanup(lambda: importlib.reload(settings))
def test_default_ttl(self):
self.assertEqual(30, settings.SIGNED_URL_TTL)
def test_ttl_set_from_django_settings(self):
expected = 9999
with override_settings(SIGNED_URL_TTL=expected):
importlib.reload(settings)
self.assertEqual(expected, settings.SIGNED_URL_TTL)
def test_default_signature_param(self):
self.assertEqual('sig', settings.SIGNED_URL_QUERY_PARAM)
def test_signature_param_from_django_settings(self):
expected = 'serenity'
with override_settings(SIGNED_URL_QUERY_PARAM=expected):
importlib.reload(settings)
self.assertEqual(expected, settings.SIGNED_URL_QUERY_PARAM)
|
<commit_before><commit_msg>Add tests for settings & defaults<commit_after>"""
Tests application settings are connected to Django
settings and have sensible default values.
"""
import importlib
from django.test import TestCase, override_settings
from drf_signed_auth import settings
class SettingsTest(TestCase):
def setUp(self):
self.sut = settings
self.addCleanup(lambda: importlib.reload(settings))
def test_default_ttl(self):
self.assertEqual(30, settings.SIGNED_URL_TTL)
def test_ttl_set_from_django_settings(self):
expected = 9999
with override_settings(SIGNED_URL_TTL=expected):
importlib.reload(settings)
self.assertEqual(expected, settings.SIGNED_URL_TTL)
def test_default_signature_param(self):
self.assertEqual('sig', settings.SIGNED_URL_QUERY_PARAM)
def test_signature_param_from_django_settings(self):
expected = 'serenity'
with override_settings(SIGNED_URL_QUERY_PARAM=expected):
importlib.reload(settings)
self.assertEqual(expected, settings.SIGNED_URL_QUERY_PARAM)
|
|
7e56bb9c4ac96e46bb786277414724de742af8f0
|
photutils/utils/tests/test_misc.py
|
photutils/utils/tests/test_misc.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the _misc module.
"""
import pytest
from .._misc import _get_meta
@pytest.mark.parametrize('utc', (False, True))
def test_get_meta(utc):
meta = _get_meta(utc)
keys = ('date', 'version')
for key in keys:
assert key in meta
versions = meta['version']
assert isinstance(versions, dict)
keys = ('Python', 'photutils', 'astropy', 'numpy', 'scipy', 'skimage',
'sklearn', 'matplotlib', 'gwcs', 'bottleneck')
for key in keys:
assert key in versions
|
Add unit tests for utils._misc
|
Add unit tests for utils._misc
|
Python
|
bsd-3-clause
|
astropy/photutils,larrybradley/photutils
|
Add unit tests for utils._misc
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the _misc module.
"""
import pytest
from .._misc import _get_meta
@pytest.mark.parametrize('utc', (False, True))
def test_get_meta(utc):
meta = _get_meta(utc)
keys = ('date', 'version')
for key in keys:
assert key in meta
versions = meta['version']
assert isinstance(versions, dict)
keys = ('Python', 'photutils', 'astropy', 'numpy', 'scipy', 'skimage',
'sklearn', 'matplotlib', 'gwcs', 'bottleneck')
for key in keys:
assert key in versions
|
<commit_before><commit_msg>Add unit tests for utils._misc<commit_after>
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the _misc module.
"""
import pytest
from .._misc import _get_meta
@pytest.mark.parametrize('utc', (False, True))
def test_get_meta(utc):
meta = _get_meta(utc)
keys = ('date', 'version')
for key in keys:
assert key in meta
versions = meta['version']
assert isinstance(versions, dict)
keys = ('Python', 'photutils', 'astropy', 'numpy', 'scipy', 'skimage',
'sklearn', 'matplotlib', 'gwcs', 'bottleneck')
for key in keys:
assert key in versions
|
Add unit tests for utils._misc# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the _misc module.
"""
import pytest
from .._misc import _get_meta
@pytest.mark.parametrize('utc', (False, True))
def test_get_meta(utc):
meta = _get_meta(utc)
keys = ('date', 'version')
for key in keys:
assert key in meta
versions = meta['version']
assert isinstance(versions, dict)
keys = ('Python', 'photutils', 'astropy', 'numpy', 'scipy', 'skimage',
'sklearn', 'matplotlib', 'gwcs', 'bottleneck')
for key in keys:
assert key in versions
|
<commit_before><commit_msg>Add unit tests for utils._misc<commit_after># Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the _misc module.
"""
import pytest
from .._misc import _get_meta
@pytest.mark.parametrize('utc', (False, True))
def test_get_meta(utc):
meta = _get_meta(utc)
keys = ('date', 'version')
for key in keys:
assert key in meta
versions = meta['version']
assert isinstance(versions, dict)
keys = ('Python', 'photutils', 'astropy', 'numpy', 'scipy', 'skimage',
'sklearn', 'matplotlib', 'gwcs', 'bottleneck')
for key in keys:
assert key in versions
|
|
a9116a9c47602f24c9e4fa6ca30b99507ae29703
|
dumpws.py
|
dumpws.py
|
#!/usr/bin/env python3
import websockets
import asyncio
import time
# import logging
# logger = logging.getLogger('websockets')
# logger.setLevel(logging.DEBUG)
# logger.addHandler(logging.StreamHandler())
VARS = ["v.atmosphericDensity", "v.dynamicPressure",
"v.altitude", "v.heightFromTerrain", "v.terrainHeight",
"n.pitch", "n.heading", "n.roll",
"f.throttle",
"v.sasValue", "v.lightValue", "v.brakeValue", "v.gearValue",
"v.surfaceSpeed", "v.verticalSpeed",
"v.surfaceVelocityx", "v.surfaceVelocityy", "v.surfaceVelocityz",
"rpm.available",
"rpm.ATMOSPHEREDEPTH","rpm.EASPEED","rpm.EFFECTIVETHROTTLE",
"rpm.ENGINEOVERHEATALARM", "rpm.GROUNDPROXIMITYALARM", "rpm.SLOPEALARM",
"rpm.RADARALTOCEAN", "rpm.ANGLEOFATTACK", "rpm.SIDESLIP",
"rpm.PLUGIN_JSIFAR:GetFlapSetting", "rpm.TERMINALVELOCITY", "rpm.SURFSPEED"]
@asyncio.coroutine
def testws():
websocket = yield from websockets.connect('ws://192.168.1.73:8085/datalink')
yield from websocket.send('{"+": [' + ",".join('"' + x + '"' for x in VARS) + ']}')
start = time.time()
while True:
message = yield from websocket.recv()
if message is None:
break
print(str(time.time()-start) + " " + message)
asyncio.get_event_loop().run_until_complete(testws())
|
Add a python websocket dumper
|
Add a python websocket dumper
|
Python
|
mit
|
ndevenish/KerbalHUD,ndevenish/KerbalHUD
|
Add a python websocket dumper
|
#!/usr/bin/env python3
import websockets
import asyncio
import time
# import logging
# logger = logging.getLogger('websockets')
# logger.setLevel(logging.DEBUG)
# logger.addHandler(logging.StreamHandler())
VARS = ["v.atmosphericDensity", "v.dynamicPressure",
"v.altitude", "v.heightFromTerrain", "v.terrainHeight",
"n.pitch", "n.heading", "n.roll",
"f.throttle",
"v.sasValue", "v.lightValue", "v.brakeValue", "v.gearValue",
"v.surfaceSpeed", "v.verticalSpeed",
"v.surfaceVelocityx", "v.surfaceVelocityy", "v.surfaceVelocityz",
"rpm.available",
"rpm.ATMOSPHEREDEPTH","rpm.EASPEED","rpm.EFFECTIVETHROTTLE",
"rpm.ENGINEOVERHEATALARM", "rpm.GROUNDPROXIMITYALARM", "rpm.SLOPEALARM",
"rpm.RADARALTOCEAN", "rpm.ANGLEOFATTACK", "rpm.SIDESLIP",
"rpm.PLUGIN_JSIFAR:GetFlapSetting", "rpm.TERMINALVELOCITY", "rpm.SURFSPEED"]
@asyncio.coroutine
def testws():
websocket = yield from websockets.connect('ws://192.168.1.73:8085/datalink')
yield from websocket.send('{"+": [' + ",".join('"' + x + '"' for x in VARS) + ']}')
start = time.time()
while True:
message = yield from websocket.recv()
if message is None:
break
print(str(time.time()-start) + " " + message)
asyncio.get_event_loop().run_until_complete(testws())
|
<commit_before><commit_msg>Add a python websocket dumper<commit_after>
|
#!/usr/bin/env python3
import websockets
import asyncio
import time
# import logging
# logger = logging.getLogger('websockets')
# logger.setLevel(logging.DEBUG)
# logger.addHandler(logging.StreamHandler())
VARS = ["v.atmosphericDensity", "v.dynamicPressure",
"v.altitude", "v.heightFromTerrain", "v.terrainHeight",
"n.pitch", "n.heading", "n.roll",
"f.throttle",
"v.sasValue", "v.lightValue", "v.brakeValue", "v.gearValue",
"v.surfaceSpeed", "v.verticalSpeed",
"v.surfaceVelocityx", "v.surfaceVelocityy", "v.surfaceVelocityz",
"rpm.available",
"rpm.ATMOSPHEREDEPTH","rpm.EASPEED","rpm.EFFECTIVETHROTTLE",
"rpm.ENGINEOVERHEATALARM", "rpm.GROUNDPROXIMITYALARM", "rpm.SLOPEALARM",
"rpm.RADARALTOCEAN", "rpm.ANGLEOFATTACK", "rpm.SIDESLIP",
"rpm.PLUGIN_JSIFAR:GetFlapSetting", "rpm.TERMINALVELOCITY", "rpm.SURFSPEED"]
@asyncio.coroutine
def testws():
websocket = yield from websockets.connect('ws://192.168.1.73:8085/datalink')
yield from websocket.send('{"+": [' + ",".join('"' + x + '"' for x in VARS) + ']}')
start = time.time()
while True:
message = yield from websocket.recv()
if message is None:
break
print(str(time.time()-start) + " " + message)
asyncio.get_event_loop().run_until_complete(testws())
|
Add a python websocket dumper#!/usr/bin/env python3
import websockets
import asyncio
import time
# import logging
# logger = logging.getLogger('websockets')
# logger.setLevel(logging.DEBUG)
# logger.addHandler(logging.StreamHandler())
VARS = ["v.atmosphericDensity", "v.dynamicPressure",
"v.altitude", "v.heightFromTerrain", "v.terrainHeight",
"n.pitch", "n.heading", "n.roll",
"f.throttle",
"v.sasValue", "v.lightValue", "v.brakeValue", "v.gearValue",
"v.surfaceSpeed", "v.verticalSpeed",
"v.surfaceVelocityx", "v.surfaceVelocityy", "v.surfaceVelocityz",
"rpm.available",
"rpm.ATMOSPHEREDEPTH","rpm.EASPEED","rpm.EFFECTIVETHROTTLE",
"rpm.ENGINEOVERHEATALARM", "rpm.GROUNDPROXIMITYALARM", "rpm.SLOPEALARM",
"rpm.RADARALTOCEAN", "rpm.ANGLEOFATTACK", "rpm.SIDESLIP",
"rpm.PLUGIN_JSIFAR:GetFlapSetting", "rpm.TERMINALVELOCITY", "rpm.SURFSPEED"]
@asyncio.coroutine
def testws():
websocket = yield from websockets.connect('ws://192.168.1.73:8085/datalink')
yield from websocket.send('{"+": [' + ",".join('"' + x + '"' for x in VARS) + ']}')
start = time.time()
while True:
message = yield from websocket.recv()
if message is None:
break
print(str(time.time()-start) + " " + message)
asyncio.get_event_loop().run_until_complete(testws())
|
<commit_before><commit_msg>Add a python websocket dumper<commit_after>#!/usr/bin/env python3
import websockets
import asyncio
import time
# import logging
# logger = logging.getLogger('websockets')
# logger.setLevel(logging.DEBUG)
# logger.addHandler(logging.StreamHandler())
VARS = ["v.atmosphericDensity", "v.dynamicPressure",
"v.altitude", "v.heightFromTerrain", "v.terrainHeight",
"n.pitch", "n.heading", "n.roll",
"f.throttle",
"v.sasValue", "v.lightValue", "v.brakeValue", "v.gearValue",
"v.surfaceSpeed", "v.verticalSpeed",
"v.surfaceVelocityx", "v.surfaceVelocityy", "v.surfaceVelocityz",
"rpm.available",
"rpm.ATMOSPHEREDEPTH","rpm.EASPEED","rpm.EFFECTIVETHROTTLE",
"rpm.ENGINEOVERHEATALARM", "rpm.GROUNDPROXIMITYALARM", "rpm.SLOPEALARM",
"rpm.RADARALTOCEAN", "rpm.ANGLEOFATTACK", "rpm.SIDESLIP",
"rpm.PLUGIN_JSIFAR:GetFlapSetting", "rpm.TERMINALVELOCITY", "rpm.SURFSPEED"]
@asyncio.coroutine
def testws():
websocket = yield from websockets.connect('ws://192.168.1.73:8085/datalink')
yield from websocket.send('{"+": [' + ",".join('"' + x + '"' for x in VARS) + ']}')
start = time.time()
while True:
message = yield from websocket.recv()
if message is None:
break
print(str(time.time()-start) + " " + message)
asyncio.get_event_loop().run_until_complete(testws())
|
|
0107cdcfdef6d8562c71b576c39ed52fc7afcedf
|
migrations/versions/0219_default_email_branding.py
|
migrations/versions/0219_default_email_branding.py
|
"""
Revision ID: 0219_default_email_branding
Revises: 0218_another_letter_org
Create Date: 2018-08-24 13:36:49.346156
"""
from alembic import op
from app.models import BRANDING_ORG
revision = '0219_default_email_branding'
down_revision = '0218_another_letter_org'
def upgrade():
op.execute("""
update
email_branding
set
brand_type = '{}'
where
brand_type is null
""".format(BRANDING_ORG))
def downgrade():
pass
|
Set branding_type to org if it’s none
|
Set branding_type to org if it’s none
Same as a0deef06e23e4c81e55d83afb63d4bbab1bdaaa5 but with `is null` not
`= null` 🤦🏻
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Set branding_type to org if it’s none
Same as a0deef06e23e4c81e55d83afb63d4bbab1bdaaa5 but with `is null` not
`= null` 🤦🏻
|
"""
Revision ID: 0219_default_email_branding
Revises: 0218_another_letter_org
Create Date: 2018-08-24 13:36:49.346156
"""
from alembic import op
from app.models import BRANDING_ORG
revision = '0219_default_email_branding'
down_revision = '0218_another_letter_org'
def upgrade():
op.execute("""
update
email_branding
set
brand_type = '{}'
where
brand_type is null
""".format(BRANDING_ORG))
def downgrade():
pass
|
<commit_before><commit_msg>Set branding_type to org if it’s none
Same as a0deef06e23e4c81e55d83afb63d4bbab1bdaaa5 but with `is null` not
`= null` 🤦🏻<commit_after>
|
"""
Revision ID: 0219_default_email_branding
Revises: 0218_another_letter_org
Create Date: 2018-08-24 13:36:49.346156
"""
from alembic import op
from app.models import BRANDING_ORG
revision = '0219_default_email_branding'
down_revision = '0218_another_letter_org'
def upgrade():
op.execute("""
update
email_branding
set
brand_type = '{}'
where
brand_type is null
""".format(BRANDING_ORG))
def downgrade():
pass
|
Set branding_type to org if it’s none
Same as a0deef06e23e4c81e55d83afb63d4bbab1bdaaa5 but with `is null` not
`= null` 🤦🏻"""
Revision ID: 0219_default_email_branding
Revises: 0218_another_letter_org
Create Date: 2018-08-24 13:36:49.346156
"""
from alembic import op
from app.models import BRANDING_ORG
revision = '0219_default_email_branding'
down_revision = '0218_another_letter_org'
def upgrade():
op.execute("""
update
email_branding
set
brand_type = '{}'
where
brand_type is null
""".format(BRANDING_ORG))
def downgrade():
pass
|
<commit_before><commit_msg>Set branding_type to org if it’s none
Same as a0deef06e23e4c81e55d83afb63d4bbab1bdaaa5 but with `is null` not
`= null` 🤦🏻<commit_after>"""
Revision ID: 0219_default_email_branding
Revises: 0218_another_letter_org
Create Date: 2018-08-24 13:36:49.346156
"""
from alembic import op
from app.models import BRANDING_ORG
revision = '0219_default_email_branding'
down_revision = '0218_another_letter_org'
def upgrade():
op.execute("""
update
email_branding
set
brand_type = '{}'
where
brand_type is null
""".format(BRANDING_ORG))
def downgrade():
pass
|
|
d064a1e86e2de66593ec2bf665ccd015ed87e6d2
|
disk_usage.py
|
disk_usage.py
|
#!/usr/bin/env python
"""
Return disk usage statistics about the given path as a (total, used, free)
namedtuple. Values are expressed in bytes.
"""
# Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
# License: MIT
import os
import collections
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
if hasattr(os, 'statvfs'): # POSIX
def disk_usage(path):
st = os.statvfs(path)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
used = (st.f_blocks - st.f_bfree) * st.f_frsize
return _ntuple_diskusage(total, used, free)
elif os.name == 'nt': # Windows
import ctypes
import sys
def disk_usage(path):
_, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), \
ctypes.c_ulonglong()
if sys.version_info >= (3,) or isinstance(path, unicode):
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW
else:
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA
ret = fun(path, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if ret == 0:
raise ctypes.WinError()
used = total.value - free.value
return _ntuple_diskusage(total.value, used, free.value)
else:
raise NotImplementedError("platform not supported")
disk_usage.__doc__ = __doc__
if __name__ == '__main__':
print disk_usage(os.getcwd())
|
Add module to retrieve disk space
|
Add module to retrieve disk space
|
Python
|
mit
|
borevitzlab/Gigavision,borevitzlab/Gigavision,borevitzlab/Gigavision,borevitzlab/Gigvaision-ControlSoftware,borevitzlab/Gigvaision-ControlSoftware
|
Add module to retrieve disk space
|
#!/usr/bin/env python
"""
Return disk usage statistics about the given path as a (total, used, free)
namedtuple. Values are expressed in bytes.
"""
# Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
# License: MIT
import os
import collections
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
if hasattr(os, 'statvfs'): # POSIX
def disk_usage(path):
st = os.statvfs(path)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
used = (st.f_blocks - st.f_bfree) * st.f_frsize
return _ntuple_diskusage(total, used, free)
elif os.name == 'nt': # Windows
import ctypes
import sys
def disk_usage(path):
_, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), \
ctypes.c_ulonglong()
if sys.version_info >= (3,) or isinstance(path, unicode):
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW
else:
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA
ret = fun(path, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if ret == 0:
raise ctypes.WinError()
used = total.value - free.value
return _ntuple_diskusage(total.value, used, free.value)
else:
raise NotImplementedError("platform not supported")
disk_usage.__doc__ = __doc__
if __name__ == '__main__':
print disk_usage(os.getcwd())
|
<commit_before><commit_msg>Add module to retrieve disk space<commit_after>
|
#!/usr/bin/env python
"""
Return disk usage statistics about the given path as a (total, used, free)
namedtuple. Values are expressed in bytes.
"""
# Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
# License: MIT
import os
import collections
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
if hasattr(os, 'statvfs'): # POSIX
def disk_usage(path):
st = os.statvfs(path)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
used = (st.f_blocks - st.f_bfree) * st.f_frsize
return _ntuple_diskusage(total, used, free)
elif os.name == 'nt': # Windows
import ctypes
import sys
def disk_usage(path):
_, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), \
ctypes.c_ulonglong()
if sys.version_info >= (3,) or isinstance(path, unicode):
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW
else:
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA
ret = fun(path, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if ret == 0:
raise ctypes.WinError()
used = total.value - free.value
return _ntuple_diskusage(total.value, used, free.value)
else:
raise NotImplementedError("platform not supported")
disk_usage.__doc__ = __doc__
if __name__ == '__main__':
print disk_usage(os.getcwd())
|
Add module to retrieve disk space#!/usr/bin/env python
"""
Return disk usage statistics about the given path as a (total, used, free)
namedtuple. Values are expressed in bytes.
"""
# Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
# License: MIT
import os
import collections
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
if hasattr(os, 'statvfs'): # POSIX
def disk_usage(path):
st = os.statvfs(path)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
used = (st.f_blocks - st.f_bfree) * st.f_frsize
return _ntuple_diskusage(total, used, free)
elif os.name == 'nt': # Windows
import ctypes
import sys
def disk_usage(path):
_, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), \
ctypes.c_ulonglong()
if sys.version_info >= (3,) or isinstance(path, unicode):
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW
else:
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA
ret = fun(path, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if ret == 0:
raise ctypes.WinError()
used = total.value - free.value
return _ntuple_diskusage(total.value, used, free.value)
else:
raise NotImplementedError("platform not supported")
disk_usage.__doc__ = __doc__
if __name__ == '__main__':
print disk_usage(os.getcwd())
|
<commit_before><commit_msg>Add module to retrieve disk space<commit_after>#!/usr/bin/env python
"""
Return disk usage statistics about the given path as a (total, used, free)
namedtuple. Values are expressed in bytes.
"""
# Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>
# License: MIT
import os
import collections
_ntuple_diskusage = collections.namedtuple('usage', 'total used free')
if hasattr(os, 'statvfs'): # POSIX
def disk_usage(path):
st = os.statvfs(path)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
used = (st.f_blocks - st.f_bfree) * st.f_frsize
return _ntuple_diskusage(total, used, free)
elif os.name == 'nt': # Windows
import ctypes
import sys
def disk_usage(path):
_, total, free = ctypes.c_ulonglong(), ctypes.c_ulonglong(), \
ctypes.c_ulonglong()
if sys.version_info >= (3,) or isinstance(path, unicode):
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExW
else:
fun = ctypes.windll.kernel32.GetDiskFreeSpaceExA
ret = fun(path, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if ret == 0:
raise ctypes.WinError()
used = total.value - free.value
return _ntuple_diskusage(total.value, used, free.value)
else:
raise NotImplementedError("platform not supported")
disk_usage.__doc__ = __doc__
if __name__ == '__main__':
print disk_usage(os.getcwd())
|
|
b996e0f7ddc622afbbd06d1640f7804f0a97745c
|
.github/release_log.py
|
.github/release_log.py
|
#!/usr/bin/env python3
import argparse
import json
import requests
import re
BASE_URL = 'https://api.github.com/repos/magicstack/uvloop/compare'
def main():
parser = argparse.ArgumentParser(
description='Generate release log.')
parser.add_argument('--to', dest='to_hash', default='master', type=str)
parser.add_argument('--from', dest='from_hash', type=str)
args = parser.parse_args()
r = requests.get(f'{BASE_URL}/{args.from_hash}...{args.to_hash}')
data = json.loads(r.text)
for commit in data['commits']:
message = commit['commit']['message']
first_line = message.partition('\n\n')[0]
if commit.get('author'):
username = '@{}'.format(commit['author']['login'])
else:
username = commit['commit']['author']['name']
sha = commit["sha"][:8]
m = re.search(r'\#(?P<num>\d+)\b', message)
if m:
issue_num = m.group('num')
else:
issue_num = None
print(f'* {first_line}')
print(f' (by {username} in {sha}', end='')
if issue_num:
print(f' for #{issue_num})')
else:
print(')')
print()
if __name__ == '__main__':
main()
|
Add a script to generate release log
|
Add a script to generate release log
|
Python
|
apache-2.0
|
1st1/uvloop,MagicStack/uvloop,MagicStack/uvloop
|
Add a script to generate release log
|
#!/usr/bin/env python3
import argparse
import json
import requests
import re
BASE_URL = 'https://api.github.com/repos/magicstack/uvloop/compare'
def main():
parser = argparse.ArgumentParser(
description='Generate release log.')
parser.add_argument('--to', dest='to_hash', default='master', type=str)
parser.add_argument('--from', dest='from_hash', type=str)
args = parser.parse_args()
r = requests.get(f'{BASE_URL}/{args.from_hash}...{args.to_hash}')
data = json.loads(r.text)
for commit in data['commits']:
message = commit['commit']['message']
first_line = message.partition('\n\n')[0]
if commit.get('author'):
username = '@{}'.format(commit['author']['login'])
else:
username = commit['commit']['author']['name']
sha = commit["sha"][:8]
m = re.search(r'\#(?P<num>\d+)\b', message)
if m:
issue_num = m.group('num')
else:
issue_num = None
print(f'* {first_line}')
print(f' (by {username} in {sha}', end='')
if issue_num:
print(f' for #{issue_num})')
else:
print(')')
print()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script to generate release log<commit_after>
|
#!/usr/bin/env python3
import argparse
import json
import requests
import re
BASE_URL = 'https://api.github.com/repos/magicstack/uvloop/compare'
def main():
parser = argparse.ArgumentParser(
description='Generate release log.')
parser.add_argument('--to', dest='to_hash', default='master', type=str)
parser.add_argument('--from', dest='from_hash', type=str)
args = parser.parse_args()
r = requests.get(f'{BASE_URL}/{args.from_hash}...{args.to_hash}')
data = json.loads(r.text)
for commit in data['commits']:
message = commit['commit']['message']
first_line = message.partition('\n\n')[0]
if commit.get('author'):
username = '@{}'.format(commit['author']['login'])
else:
username = commit['commit']['author']['name']
sha = commit["sha"][:8]
m = re.search(r'\#(?P<num>\d+)\b', message)
if m:
issue_num = m.group('num')
else:
issue_num = None
print(f'* {first_line}')
print(f' (by {username} in {sha}', end='')
if issue_num:
print(f' for #{issue_num})')
else:
print(')')
print()
if __name__ == '__main__':
main()
|
Add a script to generate release log#!/usr/bin/env python3
import argparse
import json
import requests
import re
BASE_URL = 'https://api.github.com/repos/magicstack/uvloop/compare'
def main():
parser = argparse.ArgumentParser(
description='Generate release log.')
parser.add_argument('--to', dest='to_hash', default='master', type=str)
parser.add_argument('--from', dest='from_hash', type=str)
args = parser.parse_args()
r = requests.get(f'{BASE_URL}/{args.from_hash}...{args.to_hash}')
data = json.loads(r.text)
for commit in data['commits']:
message = commit['commit']['message']
first_line = message.partition('\n\n')[0]
if commit.get('author'):
username = '@{}'.format(commit['author']['login'])
else:
username = commit['commit']['author']['name']
sha = commit["sha"][:8]
m = re.search(r'\#(?P<num>\d+)\b', message)
if m:
issue_num = m.group('num')
else:
issue_num = None
print(f'* {first_line}')
print(f' (by {username} in {sha}', end='')
if issue_num:
print(f' for #{issue_num})')
else:
print(')')
print()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add a script to generate release log<commit_after>#!/usr/bin/env python3
import argparse
import json
import requests
import re
BASE_URL = 'https://api.github.com/repos/magicstack/uvloop/compare'
def main():
parser = argparse.ArgumentParser(
description='Generate release log.')
parser.add_argument('--to', dest='to_hash', default='master', type=str)
parser.add_argument('--from', dest='from_hash', type=str)
args = parser.parse_args()
r = requests.get(f'{BASE_URL}/{args.from_hash}...{args.to_hash}')
data = json.loads(r.text)
for commit in data['commits']:
message = commit['commit']['message']
first_line = message.partition('\n\n')[0]
if commit.get('author'):
username = '@{}'.format(commit['author']['login'])
else:
username = commit['commit']['author']['name']
sha = commit["sha"][:8]
m = re.search(r'\#(?P<num>\d+)\b', message)
if m:
issue_num = m.group('num')
else:
issue_num = None
print(f'* {first_line}')
print(f' (by {username} in {sha}', end='')
if issue_num:
print(f' for #{issue_num})')
else:
print(')')
print()
if __name__ == '__main__':
main()
|
|
916d4e2860e231be0b2f602212931c32dcf72743
|
migrations/versions/0287_drop_branding_domains.py
|
migrations/versions/0287_drop_branding_domains.py
|
"""
Revision ID: 0287_drop_branding_domains
Revises: 0286_add_unique_email_name
Create Date: 2019-04-05 16:25:11.535816
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0287_drop_branding_domains'
down_revision = '0286_add_unique_email_name'
def upgrade():
op.drop_constraint('uq_email_branding_domain', 'email_branding', type_='unique')
op.drop_column('email_branding', 'domain')
op.drop_constraint('letter_branding_domain_key', 'letter_branding', type_='unique')
op.drop_column('letter_branding', 'domain')
def downgrade():
op.add_column('letter_branding', sa.Column('domain', sa.TEXT(), autoincrement=False, nullable=True))
op.create_unique_constraint('letter_branding_domain_key', 'letter_branding', ['domain'])
op.add_column('email_branding', sa.Column('domain', sa.TEXT(), autoincrement=False, nullable=True))
op.create_unique_constraint('uq_email_branding_domain', 'email_branding', ['domain'])
|
Remove domain columns from branding table
|
Remove domain columns from branding table
This relationship is via the `Organisation` now; we don’t use this
column to fudge a relationship based on the user’s email address and the
matching something in these columns.
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Remove domain columns from branding table
This relationship is via the `Organisation` now; we don’t use this
column to fudge a relationship based on the user’s email address and the
matching something in these columns.
|
"""
Revision ID: 0287_drop_branding_domains
Revises: 0286_add_unique_email_name
Create Date: 2019-04-05 16:25:11.535816
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0287_drop_branding_domains'
down_revision = '0286_add_unique_email_name'
def upgrade():
op.drop_constraint('uq_email_branding_domain', 'email_branding', type_='unique')
op.drop_column('email_branding', 'domain')
op.drop_constraint('letter_branding_domain_key', 'letter_branding', type_='unique')
op.drop_column('letter_branding', 'domain')
def downgrade():
op.add_column('letter_branding', sa.Column('domain', sa.TEXT(), autoincrement=False, nullable=True))
op.create_unique_constraint('letter_branding_domain_key', 'letter_branding', ['domain'])
op.add_column('email_branding', sa.Column('domain', sa.TEXT(), autoincrement=False, nullable=True))
op.create_unique_constraint('uq_email_branding_domain', 'email_branding', ['domain'])
|
<commit_before><commit_msg>Remove domain columns from branding table
This relationship is via the `Organisation` now; we don’t use this
column to fudge a relationship based on the user’s email address and the
matching something in these columns.<commit_after>
|
"""
Revision ID: 0287_drop_branding_domains
Revises: 0286_add_unique_email_name
Create Date: 2019-04-05 16:25:11.535816
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0287_drop_branding_domains'
down_revision = '0286_add_unique_email_name'
def upgrade():
op.drop_constraint('uq_email_branding_domain', 'email_branding', type_='unique')
op.drop_column('email_branding', 'domain')
op.drop_constraint('letter_branding_domain_key', 'letter_branding', type_='unique')
op.drop_column('letter_branding', 'domain')
def downgrade():
op.add_column('letter_branding', sa.Column('domain', sa.TEXT(), autoincrement=False, nullable=True))
op.create_unique_constraint('letter_branding_domain_key', 'letter_branding', ['domain'])
op.add_column('email_branding', sa.Column('domain', sa.TEXT(), autoincrement=False, nullable=True))
op.create_unique_constraint('uq_email_branding_domain', 'email_branding', ['domain'])
|
Remove domain columns from branding table
This relationship is via the `Organisation` now; we don’t use this
column to fudge a relationship based on the user’s email address and the
matching something in these columns."""
Revision ID: 0287_drop_branding_domains
Revises: 0286_add_unique_email_name
Create Date: 2019-04-05 16:25:11.535816
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0287_drop_branding_domains'
down_revision = '0286_add_unique_email_name'
def upgrade():
op.drop_constraint('uq_email_branding_domain', 'email_branding', type_='unique')
op.drop_column('email_branding', 'domain')
op.drop_constraint('letter_branding_domain_key', 'letter_branding', type_='unique')
op.drop_column('letter_branding', 'domain')
def downgrade():
op.add_column('letter_branding', sa.Column('domain', sa.TEXT(), autoincrement=False, nullable=True))
op.create_unique_constraint('letter_branding_domain_key', 'letter_branding', ['domain'])
op.add_column('email_branding', sa.Column('domain', sa.TEXT(), autoincrement=False, nullable=True))
op.create_unique_constraint('uq_email_branding_domain', 'email_branding', ['domain'])
|
<commit_before><commit_msg>Remove domain columns from branding table
This relationship is via the `Organisation` now; we don’t use this
column to fudge a relationship based on the user’s email address and the
matching something in these columns.<commit_after>"""
Revision ID: 0287_drop_branding_domains
Revises: 0286_add_unique_email_name
Create Date: 2019-04-05 16:25:11.535816
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
revision = '0287_drop_branding_domains'
down_revision = '0286_add_unique_email_name'
def upgrade():
op.drop_constraint('uq_email_branding_domain', 'email_branding', type_='unique')
op.drop_column('email_branding', 'domain')
op.drop_constraint('letter_branding_domain_key', 'letter_branding', type_='unique')
op.drop_column('letter_branding', 'domain')
def downgrade():
op.add_column('letter_branding', sa.Column('domain', sa.TEXT(), autoincrement=False, nullable=True))
op.create_unique_constraint('letter_branding_domain_key', 'letter_branding', ['domain'])
op.add_column('email_branding', sa.Column('domain', sa.TEXT(), autoincrement=False, nullable=True))
op.create_unique_constraint('uq_email_branding_domain', 'email_branding', ['domain'])
|
|
fd5824e4e4600b0a3bd58386edfc1029418bd813
|
auth0/v2/test/authentication/test_users.py
|
auth0/v2/test/authentication/test_users.py
|
import unittest
import mock
from ...authentication.users import Users
class TestUsers(unittest.TestCase):
@mock.patch('auth0.v2.authentication.users.Users.get')
def test_userinfo(self, mock_get):
u = Users('my.domain.com')
u.userinfo(access_token='atk')
mock_get.assert_called_with(
url='https://my.domain.com/userinfo',
headers={'Authorization': 'Bearer atk'}
)
@mock.patch('auth0.v2.authentication.users.Users.post')
def test_tokeninfo(self, mock_post):
u = Users('my.domain.com')
u.tokeninfo(jwt='jwtoken')
mock_post.assert_called_with(
url='https://my.domain.com/tokeninfo',
data={'id_token': 'jwtoken'},
headers={'Content-Type: application/json'}
)
|
Add test cases for Users
|
Add test cases for Users
|
Python
|
mit
|
auth0/auth0-python,auth0/auth0-python
|
Add test cases for Users
|
import unittest
import mock
from ...authentication.users import Users
class TestUsers(unittest.TestCase):
@mock.patch('auth0.v2.authentication.users.Users.get')
def test_userinfo(self, mock_get):
u = Users('my.domain.com')
u.userinfo(access_token='atk')
mock_get.assert_called_with(
url='https://my.domain.com/userinfo',
headers={'Authorization': 'Bearer atk'}
)
@mock.patch('auth0.v2.authentication.users.Users.post')
def test_tokeninfo(self, mock_post):
u = Users('my.domain.com')
u.tokeninfo(jwt='jwtoken')
mock_post.assert_called_with(
url='https://my.domain.com/tokeninfo',
data={'id_token': 'jwtoken'},
headers={'Content-Type: application/json'}
)
|
<commit_before><commit_msg>Add test cases for Users<commit_after>
|
import unittest
import mock
from ...authentication.users import Users
class TestUsers(unittest.TestCase):
@mock.patch('auth0.v2.authentication.users.Users.get')
def test_userinfo(self, mock_get):
u = Users('my.domain.com')
u.userinfo(access_token='atk')
mock_get.assert_called_with(
url='https://my.domain.com/userinfo',
headers={'Authorization': 'Bearer atk'}
)
@mock.patch('auth0.v2.authentication.users.Users.post')
def test_tokeninfo(self, mock_post):
u = Users('my.domain.com')
u.tokeninfo(jwt='jwtoken')
mock_post.assert_called_with(
url='https://my.domain.com/tokeninfo',
data={'id_token': 'jwtoken'},
headers={'Content-Type: application/json'}
)
|
Add test cases for Usersimport unittest
import mock
from ...authentication.users import Users
class TestUsers(unittest.TestCase):
@mock.patch('auth0.v2.authentication.users.Users.get')
def test_userinfo(self, mock_get):
u = Users('my.domain.com')
u.userinfo(access_token='atk')
mock_get.assert_called_with(
url='https://my.domain.com/userinfo',
headers={'Authorization': 'Bearer atk'}
)
@mock.patch('auth0.v2.authentication.users.Users.post')
def test_tokeninfo(self, mock_post):
u = Users('my.domain.com')
u.tokeninfo(jwt='jwtoken')
mock_post.assert_called_with(
url='https://my.domain.com/tokeninfo',
data={'id_token': 'jwtoken'},
headers={'Content-Type: application/json'}
)
|
<commit_before><commit_msg>Add test cases for Users<commit_after>import unittest
import mock
from ...authentication.users import Users
class TestUsers(unittest.TestCase):
@mock.patch('auth0.v2.authentication.users.Users.get')
def test_userinfo(self, mock_get):
u = Users('my.domain.com')
u.userinfo(access_token='atk')
mock_get.assert_called_with(
url='https://my.domain.com/userinfo',
headers={'Authorization': 'Bearer atk'}
)
@mock.patch('auth0.v2.authentication.users.Users.post')
def test_tokeninfo(self, mock_post):
u = Users('my.domain.com')
u.tokeninfo(jwt='jwtoken')
mock_post.assert_called_with(
url='https://my.domain.com/tokeninfo',
data={'id_token': 'jwtoken'},
headers={'Content-Type: application/json'}
)
|
|
1358eb62cd54d921da0c76dba8536067ff51a31e
|
server/patch_hosts.py
|
server/patch_hosts.py
|
#!/usr/bin/env python
import argparse
from qlmdm import set_gpg, patch_hosts
set_gpg('server')
def parse_args():
parser = argparse.ArgumentParser(description='Queue a patch for one or '
'more hosts')
parser.add_argument('--host', action='append', help='Host(s) on which to '
'execute command (default is all)')
parser.add_argument('--mode', type=lambda m: int(m, 8), help='Mode for '
'patched file (specify in octal, default 0755)')
parser.add_argument('target_path', help='Relative path of file on '
'destination systems')
parser.add_argument('source_file', help='Local file containing patch '
'content')
args = parser.parse_args()
return args
def main():
args = parse_args()
kwargs = {}
if args.mode:
kwargs['patch_mode'] = args.mode
kwargs['patch_content'] = open(args.source_file).read()
kwargs['hosts'] = args.host if args.host else None
patch_hosts(args.target_path, **kwargs)
if __name__ == '__main__':
main()
|
Add script for sending arbitrary patch files to clients
|
Add script for sending arbitrary patch files to clients
|
Python
|
apache-2.0
|
quantopian/PenguinDome,quantopian/PenguinDome
|
Add script for sending arbitrary patch files to clients
|
#!/usr/bin/env python
import argparse
from qlmdm import set_gpg, patch_hosts
set_gpg('server')
def parse_args():
parser = argparse.ArgumentParser(description='Queue a patch for one or '
'more hosts')
parser.add_argument('--host', action='append', help='Host(s) on which to '
'execute command (default is all)')
parser.add_argument('--mode', type=lambda m: int(m, 8), help='Mode for '
'patched file (specify in octal, default 0755)')
parser.add_argument('target_path', help='Relative path of file on '
'destination systems')
parser.add_argument('source_file', help='Local file containing patch '
'content')
args = parser.parse_args()
return args
def main():
args = parse_args()
kwargs = {}
if args.mode:
kwargs['patch_mode'] = args.mode
kwargs['patch_content'] = open(args.source_file).read()
kwargs['hosts'] = args.host if args.host else None
patch_hosts(args.target_path, **kwargs)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for sending arbitrary patch files to clients<commit_after>
|
#!/usr/bin/env python
import argparse
from qlmdm import set_gpg, patch_hosts
set_gpg('server')
def parse_args():
parser = argparse.ArgumentParser(description='Queue a patch for one or '
'more hosts')
parser.add_argument('--host', action='append', help='Host(s) on which to '
'execute command (default is all)')
parser.add_argument('--mode', type=lambda m: int(m, 8), help='Mode for '
'patched file (specify in octal, default 0755)')
parser.add_argument('target_path', help='Relative path of file on '
'destination systems')
parser.add_argument('source_file', help='Local file containing patch '
'content')
args = parser.parse_args()
return args
def main():
args = parse_args()
kwargs = {}
if args.mode:
kwargs['patch_mode'] = args.mode
kwargs['patch_content'] = open(args.source_file).read()
kwargs['hosts'] = args.host if args.host else None
patch_hosts(args.target_path, **kwargs)
if __name__ == '__main__':
main()
|
Add script for sending arbitrary patch files to clients#!/usr/bin/env python
import argparse
from qlmdm import set_gpg, patch_hosts
set_gpg('server')
def parse_args():
parser = argparse.ArgumentParser(description='Queue a patch for one or '
'more hosts')
parser.add_argument('--host', action='append', help='Host(s) on which to '
'execute command (default is all)')
parser.add_argument('--mode', type=lambda m: int(m, 8), help='Mode for '
'patched file (specify in octal, default 0755)')
parser.add_argument('target_path', help='Relative path of file on '
'destination systems')
parser.add_argument('source_file', help='Local file containing patch '
'content')
args = parser.parse_args()
return args
def main():
args = parse_args()
kwargs = {}
if args.mode:
kwargs['patch_mode'] = args.mode
kwargs['patch_content'] = open(args.source_file).read()
kwargs['hosts'] = args.host if args.host else None
patch_hosts(args.target_path, **kwargs)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for sending arbitrary patch files to clients<commit_after>#!/usr/bin/env python
import argparse
from qlmdm import set_gpg, patch_hosts
set_gpg('server')
def parse_args():
parser = argparse.ArgumentParser(description='Queue a patch for one or '
'more hosts')
parser.add_argument('--host', action='append', help='Host(s) on which to '
'execute command (default is all)')
parser.add_argument('--mode', type=lambda m: int(m, 8), help='Mode for '
'patched file (specify in octal, default 0755)')
parser.add_argument('target_path', help='Relative path of file on '
'destination systems')
parser.add_argument('source_file', help='Local file containing patch '
'content')
args = parser.parse_args()
return args
def main():
args = parse_args()
kwargs = {}
if args.mode:
kwargs['patch_mode'] = args.mode
kwargs['patch_content'] = open(args.source_file).read()
kwargs['hosts'] = args.host if args.host else None
patch_hosts(args.target_path, **kwargs)
if __name__ == '__main__':
main()
|
|
5b0cbbd19aef3a28dcee8b7143c1c0644da9c3ff
|
find_bytes.py
|
find_bytes.py
|
shellcode = "\x31\xdb\xf7\xe3\x53\x43\x53\x6a\x02\x89\xe1\xb0\x66\xcd\x80\x93\x59\xb0\x3f\xcd\x80\x49\x79\xf9\x68\x8f\xf8\x02\x4f\x68\x02\x00\x11\x5c\x89\xe1\xb0\x66\x50\x51\x53\xb3\x03\x89\xe1\xcd\x80\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x52\x53\x89\xe1\xb0\x0b\xcd\x80"
shellcode = shellcode.encode("hex")
shellcode = [shellcode[i:i+2] for i in xrange(0, len(shellcode), 2)]
address = [hex(FindBinary(MinEA(), SEARCH_DOWN|SEARCH_CASE, byte)) for byte in shellcode]
print ','.join(address)
|
Add a script for finding bytes
|
Add a script for finding bytes
|
Python
|
mit
|
jakkdu/idapythons
|
Add a script for finding bytes
|
shellcode = "\x31\xdb\xf7\xe3\x53\x43\x53\x6a\x02\x89\xe1\xb0\x66\xcd\x80\x93\x59\xb0\x3f\xcd\x80\x49\x79\xf9\x68\x8f\xf8\x02\x4f\x68\x02\x00\x11\x5c\x89\xe1\xb0\x66\x50\x51\x53\xb3\x03\x89\xe1\xcd\x80\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x52\x53\x89\xe1\xb0\x0b\xcd\x80"
shellcode = shellcode.encode("hex")
shellcode = [shellcode[i:i+2] for i in xrange(0, len(shellcode), 2)]
address = [hex(FindBinary(MinEA(), SEARCH_DOWN|SEARCH_CASE, byte)) for byte in shellcode]
print ','.join(address)
|
<commit_before><commit_msg>Add a script for finding bytes<commit_after>
|
shellcode = "\x31\xdb\xf7\xe3\x53\x43\x53\x6a\x02\x89\xe1\xb0\x66\xcd\x80\x93\x59\xb0\x3f\xcd\x80\x49\x79\xf9\x68\x8f\xf8\x02\x4f\x68\x02\x00\x11\x5c\x89\xe1\xb0\x66\x50\x51\x53\xb3\x03\x89\xe1\xcd\x80\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x52\x53\x89\xe1\xb0\x0b\xcd\x80"
shellcode = shellcode.encode("hex")
shellcode = [shellcode[i:i+2] for i in xrange(0, len(shellcode), 2)]
address = [hex(FindBinary(MinEA(), SEARCH_DOWN|SEARCH_CASE, byte)) for byte in shellcode]
print ','.join(address)
|
Add a script for finding bytesshellcode = "\x31\xdb\xf7\xe3\x53\x43\x53\x6a\x02\x89\xe1\xb0\x66\xcd\x80\x93\x59\xb0\x3f\xcd\x80\x49\x79\xf9\x68\x8f\xf8\x02\x4f\x68\x02\x00\x11\x5c\x89\xe1\xb0\x66\x50\x51\x53\xb3\x03\x89\xe1\xcd\x80\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x52\x53\x89\xe1\xb0\x0b\xcd\x80"
shellcode = shellcode.encode("hex")
shellcode = [shellcode[i:i+2] for i in xrange(0, len(shellcode), 2)]
address = [hex(FindBinary(MinEA(), SEARCH_DOWN|SEARCH_CASE, byte)) for byte in shellcode]
print ','.join(address)
|
<commit_before><commit_msg>Add a script for finding bytes<commit_after>shellcode = "\x31\xdb\xf7\xe3\x53\x43\x53\x6a\x02\x89\xe1\xb0\x66\xcd\x80\x93\x59\xb0\x3f\xcd\x80\x49\x79\xf9\x68\x8f\xf8\x02\x4f\x68\x02\x00\x11\x5c\x89\xe1\xb0\x66\x50\x51\x53\xb3\x03\x89\xe1\xcd\x80\x52\x68\x2f\x2f\x73\x68\x68\x2f\x62\x69\x6e\x89\xe3\x52\x53\x89\xe1\xb0\x0b\xcd\x80"
shellcode = shellcode.encode("hex")
shellcode = [shellcode[i:i+2] for i in xrange(0, len(shellcode), 2)]
address = [hex(FindBinary(MinEA(), SEARCH_DOWN|SEARCH_CASE, byte)) for byte in shellcode]
print ','.join(address)
|
|
17c42034137ca01557163f9d5386450303b872d7
|
scripts/tests/test_populate_new_and_noteworthy.py
|
scripts/tests/test_populate_new_and_noteworthy.py
|
import datetime
from nose.tools import * # noqa
from tests.base import OsfTestCase
from tests.factories import ProjectFactory
from scripts.populate_new_and_noteworthy_projects import main
class TestPopulateNewAndNoteworthy(OsfTestCase):
def setUp(self):
super(TestPopulateNewAndNoteworthy, self).setUp()
self.project = ProjectFactory()
self.project2 = ProjectFactory()
self.project3 = ProjectFactory()
self.project4 = ProjectFactory()
self.project5 = ProjectFactory()
def test_migrate_new_and_noteworthy(self):
main(dry_run=False)
|
Add file for testing script that populates new and noteworthy nodes. Tests still need to be written.
|
Add file for testing script that populates new and noteworthy nodes. Tests still need to be written.
|
Python
|
apache-2.0
|
caseyrollins/osf.io,TomBaxter/osf.io,TomHeatwole/osf.io,SSJohns/osf.io,Nesiehr/osf.io,crcresearch/osf.io,emetsger/osf.io,crcresearch/osf.io,chennan47/osf.io,caneruguz/osf.io,emetsger/osf.io,leb2dg/osf.io,asanfilippo7/osf.io,TomHeatwole/osf.io,mluo613/osf.io,cwisecarver/osf.io,zamattiac/osf.io,amyshi188/osf.io,DanielSBrown/osf.io,DanielSBrown/osf.io,kch8qx/osf.io,aaxelb/osf.io,rdhyee/osf.io,monikagrabowska/osf.io,RomanZWang/osf.io,mfraezz/osf.io,laurenrevere/osf.io,SSJohns/osf.io,cslzchen/osf.io,abought/osf.io,monikagrabowska/osf.io,abought/osf.io,pattisdr/osf.io,rdhyee/osf.io,alexschiller/osf.io,mattclark/osf.io,acshi/osf.io,adlius/osf.io,mluo613/osf.io,aaxelb/osf.io,wearpants/osf.io,Nesiehr/osf.io,TomBaxter/osf.io,rdhyee/osf.io,Johnetordoff/osf.io,chrisseto/osf.io,amyshi188/osf.io,leb2dg/osf.io,Johnetordoff/osf.io,zachjanicki/osf.io,mluke93/osf.io,erinspace/osf.io,felliott/osf.io,kwierman/osf.io,amyshi188/osf.io,mluo613/osf.io,emetsger/osf.io,abought/osf.io,HalcyonChimera/osf.io,mattclark/osf.io,sloria/osf.io,laurenrevere/osf.io,monikagrabowska/osf.io,hmoco/osf.io,alexschiller/osf.io,kch8qx/osf.io,monikagrabowska/osf.io,wearpants/osf.io,icereval/osf.io,mluke93/osf.io,HalcyonChimera/osf.io,icereval/osf.io,Nesiehr/osf.io,felliott/osf.io,RomanZWang/osf.io,chennan47/osf.io,SSJohns/osf.io,alexschiller/osf.io,leb2dg/osf.io,zachjanicki/osf.io,CenterForOpenScience/osf.io,jnayak1/osf.io,HalcyonChimera/osf.io,caseyrollins/osf.io,caseyrollins/osf.io,mluke93/osf.io,felliott/osf.io,kch8qx/osf.io,kch8qx/osf.io,doublebits/osf.io,TomHeatwole/osf.io,cwisecarver/osf.io,chrisseto/osf.io,Nesiehr/osf.io,samchrisinger/osf.io,doublebits/osf.io,caneruguz/osf.io,rdhyee/osf.io,saradbowman/osf.io,chrisseto/osf.io,pattisdr/osf.io,erinspace/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,DanielSBrown/osf.io,mfraezz/osf.io,doublebits/osf.io,jnayak1/osf.io,leb2dg/osf.io,kch8qx/osf.io,DanielSBrown/osf.io,crcresearch/osf.io,brianjgeiger/osf.io,hmoco/osf.io,asanfilippo7/osf.io,asanfilippo7/osf.io,alexschiller/osf.io,mluo613/osf.io,mfraezz/osf.io,adlius/osf.io,sloria/osf.io,chrisseto/osf.io,mluo613/osf.io,aaxelb/osf.io,cwisecarver/osf.io,HalcyonChimera/osf.io,abought/osf.io,kwierman/osf.io,kwierman/osf.io,laurenrevere/osf.io,caneruguz/osf.io,zamattiac/osf.io,SSJohns/osf.io,wearpants/osf.io,doublebits/osf.io,binoculars/osf.io,adlius/osf.io,zamattiac/osf.io,Johnetordoff/osf.io,mattclark/osf.io,brianjgeiger/osf.io,emetsger/osf.io,zachjanicki/osf.io,CenterForOpenScience/osf.io,asanfilippo7/osf.io,samchrisinger/osf.io,icereval/osf.io,zamattiac/osf.io,pattisdr/osf.io,mluke93/osf.io,zachjanicki/osf.io,samchrisinger/osf.io,acshi/osf.io,cslzchen/osf.io,monikagrabowska/osf.io,binoculars/osf.io,samchrisinger/osf.io,TomBaxter/osf.io,adlius/osf.io,jnayak1/osf.io,doublebits/osf.io,mfraezz/osf.io,hmoco/osf.io,kwierman/osf.io,baylee-d/osf.io,amyshi188/osf.io,RomanZWang/osf.io,aaxelb/osf.io,RomanZWang/osf.io,binoculars/osf.io,jnayak1/osf.io,baylee-d/osf.io,saradbowman/osf.io,wearpants/osf.io,cslzchen/osf.io,Johnetordoff/osf.io,alexschiller/osf.io,CenterForOpenScience/osf.io,chennan47/osf.io,caneruguz/osf.io,CenterForOpenScience/osf.io,RomanZWang/osf.io,baylee-d/osf.io,felliott/osf.io,brianjgeiger/osf.io,hmoco/osf.io,sloria/osf.io,acshi/osf.io,acshi/osf.io,TomHeatwole/osf.io,erinspace/osf.io,cwisecarver/osf.io,acshi/osf.io
|
Add file for testing script that populates new and noteworthy nodes. Tests still need to be written.
|
import datetime
from nose.tools import * # noqa
from tests.base import OsfTestCase
from tests.factories import ProjectFactory
from scripts.populate_new_and_noteworthy_projects import main
class TestPopulateNewAndNoteworthy(OsfTestCase):
def setUp(self):
super(TestPopulateNewAndNoteworthy, self).setUp()
self.project = ProjectFactory()
self.project2 = ProjectFactory()
self.project3 = ProjectFactory()
self.project4 = ProjectFactory()
self.project5 = ProjectFactory()
def test_migrate_new_and_noteworthy(self):
main(dry_run=False)
|
<commit_before><commit_msg>Add file for testing script that populates new and noteworthy nodes. Tests still need to be written.<commit_after>
|
import datetime
from nose.tools import * # noqa
from tests.base import OsfTestCase
from tests.factories import ProjectFactory
from scripts.populate_new_and_noteworthy_projects import main
class TestPopulateNewAndNoteworthy(OsfTestCase):
def setUp(self):
super(TestPopulateNewAndNoteworthy, self).setUp()
self.project = ProjectFactory()
self.project2 = ProjectFactory()
self.project3 = ProjectFactory()
self.project4 = ProjectFactory()
self.project5 = ProjectFactory()
def test_migrate_new_and_noteworthy(self):
main(dry_run=False)
|
Add file for testing script that populates new and noteworthy nodes. Tests still need to be written.import datetime
from nose.tools import * # noqa
from tests.base import OsfTestCase
from tests.factories import ProjectFactory
from scripts.populate_new_and_noteworthy_projects import main
class TestPopulateNewAndNoteworthy(OsfTestCase):
def setUp(self):
super(TestPopulateNewAndNoteworthy, self).setUp()
self.project = ProjectFactory()
self.project2 = ProjectFactory()
self.project3 = ProjectFactory()
self.project4 = ProjectFactory()
self.project5 = ProjectFactory()
def test_migrate_new_and_noteworthy(self):
main(dry_run=False)
|
<commit_before><commit_msg>Add file for testing script that populates new and noteworthy nodes. Tests still need to be written.<commit_after>import datetime
from nose.tools import * # noqa
from tests.base import OsfTestCase
from tests.factories import ProjectFactory
from scripts.populate_new_and_noteworthy_projects import main
class TestPopulateNewAndNoteworthy(OsfTestCase):
def setUp(self):
super(TestPopulateNewAndNoteworthy, self).setUp()
self.project = ProjectFactory()
self.project2 = ProjectFactory()
self.project3 = ProjectFactory()
self.project4 = ProjectFactory()
self.project5 = ProjectFactory()
def test_migrate_new_and_noteworthy(self):
main(dry_run=False)
|
|
09fe0bfb275ded53b2569ce1160a931238fd7116
|
Python/CompareFreeBSDDocs.py
|
Python/CompareFreeBSDDocs.py
|
# -*- Coding: utf-8 -*-
#
# Copyright (C) 2014 Loic BLOT <http://www.unix-experience.fr>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the Europages nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os, os.path
filesEN = [f for f in os.listdir('./doc/articles') if os.path.isdir("doc/articles/%s" % f)]
filesFR = [f for f in os.listdir('./docfr/articles') if os.path.isdir("docfr/articles/%s" % f)]
missingDirectory = []
presentDirectory = []
for dn in filesEN:
if dn not in filesFR:
print "Missing directory article docfr/articles/%s" % dn
missingDirectory.append(dn)
else:
presentDirectory.append(dn)
print dn
#for f in files:
|
Create a basic script to compare FreeBSD docs
|
Create a basic script to compare FreeBSD docs
needs more and more code :)
|
Python
|
bsd-2-clause
|
nerzhul/MiscScripts,nerzhul/MiscScripts
|
Create a basic script to compare FreeBSD docs
needs more and more code :)
|
# -*- Coding: utf-8 -*-
#
# Copyright (C) 2014 Loic BLOT <http://www.unix-experience.fr>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the Europages nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os, os.path
filesEN = [f for f in os.listdir('./doc/articles') if os.path.isdir("doc/articles/%s" % f)]
filesFR = [f for f in os.listdir('./docfr/articles') if os.path.isdir("docfr/articles/%s" % f)]
missingDirectory = []
presentDirectory = []
for dn in filesEN:
if dn not in filesFR:
print "Missing directory article docfr/articles/%s" % dn
missingDirectory.append(dn)
else:
presentDirectory.append(dn)
print dn
#for f in files:
|
<commit_before><commit_msg>Create a basic script to compare FreeBSD docs
needs more and more code :)<commit_after>
|
# -*- Coding: utf-8 -*-
#
# Copyright (C) 2014 Loic BLOT <http://www.unix-experience.fr>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the Europages nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os, os.path
filesEN = [f for f in os.listdir('./doc/articles') if os.path.isdir("doc/articles/%s" % f)]
filesFR = [f for f in os.listdir('./docfr/articles') if os.path.isdir("docfr/articles/%s" % f)]
missingDirectory = []
presentDirectory = []
for dn in filesEN:
if dn not in filesFR:
print "Missing directory article docfr/articles/%s" % dn
missingDirectory.append(dn)
else:
presentDirectory.append(dn)
print dn
#for f in files:
|
Create a basic script to compare FreeBSD docs
needs more and more code :)# -*- Coding: utf-8 -*-
#
# Copyright (C) 2014 Loic BLOT <http://www.unix-experience.fr>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the Europages nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os, os.path
filesEN = [f for f in os.listdir('./doc/articles') if os.path.isdir("doc/articles/%s" % f)]
filesFR = [f for f in os.listdir('./docfr/articles') if os.path.isdir("docfr/articles/%s" % f)]
missingDirectory = []
presentDirectory = []
for dn in filesEN:
if dn not in filesFR:
print "Missing directory article docfr/articles/%s" % dn
missingDirectory.append(dn)
else:
presentDirectory.append(dn)
print dn
#for f in files:
|
<commit_before><commit_msg>Create a basic script to compare FreeBSD docs
needs more and more code :)<commit_after># -*- Coding: utf-8 -*-
#
# Copyright (C) 2014 Loic BLOT <http://www.unix-experience.fr>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the Europages nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os, os.path
filesEN = [f for f in os.listdir('./doc/articles') if os.path.isdir("doc/articles/%s" % f)]
filesFR = [f for f in os.listdir('./docfr/articles') if os.path.isdir("docfr/articles/%s" % f)]
missingDirectory = []
presentDirectory = []
for dn in filesEN:
if dn not in filesFR:
print "Missing directory article docfr/articles/%s" % dn
missingDirectory.append(dn)
else:
presentDirectory.append(dn)
print dn
#for f in files:
|
|
f80bf4cf1723db814e62753ee4bd5a7c302e09ee
|
src/project/lda_corpus.py
|
src/project/lda_corpus.py
|
import sys
import logging
from os.path import isdir, isfile
from gensim import models
from corpus import Corpus
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
class LDACorpus(Corpus):
def __init__(self, dict_loc, vec_loc, no_topics=100, update=1, chunksize=10000, passes=1):
Corpus.__init__(self)
Corpus.load(self, dict_loc, vec_loc)
self.no_topics = no_topics
self.update = update
self.chunksize = chunksize
self.passes = passes
self.dict_loc = dict_loc
self.vec_loc = vec_loc
self.transformation = models.ldamodel.LdaModel(corpus=self.docs, id2word=self.dictionary, num_topics=self.no_topics, update_every=self.update, chunksize=self.chunksize, passes=self.passes)
def print_topics(self):
self.transformation.print_topics(20)
def main():
if len(sys.argv) > 2 and isdir(sys.argv[1]) and isfile(sys.argv[2]) and isfile(sys.argv[3]):
corpus = Corpus(sys.argv[1])
corpus.save(sys.argv[2], sys.argv[3])
corpus = LDACorpus(sys.argv[2], sys.argv[3], no_topics=25)
corpus.print_topics()
else:
print "Corpus requires directory as an argument."
if __name__ == "__main__": main()
|
Add basic framework for LDA
|
Add basic framework for LDA
|
Python
|
mit
|
PinPinIre/Final-Year-Project,PinPinIre/Final-Year-Project,PinPinIre/Final-Year-Project
|
Add basic framework for LDA
|
import sys
import logging
from os.path import isdir, isfile
from gensim import models
from corpus import Corpus
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
class LDACorpus(Corpus):
def __init__(self, dict_loc, vec_loc, no_topics=100, update=1, chunksize=10000, passes=1):
Corpus.__init__(self)
Corpus.load(self, dict_loc, vec_loc)
self.no_topics = no_topics
self.update = update
self.chunksize = chunksize
self.passes = passes
self.dict_loc = dict_loc
self.vec_loc = vec_loc
self.transformation = models.ldamodel.LdaModel(corpus=self.docs, id2word=self.dictionary, num_topics=self.no_topics, update_every=self.update, chunksize=self.chunksize, passes=self.passes)
def print_topics(self):
self.transformation.print_topics(20)
def main():
if len(sys.argv) > 2 and isdir(sys.argv[1]) and isfile(sys.argv[2]) and isfile(sys.argv[3]):
corpus = Corpus(sys.argv[1])
corpus.save(sys.argv[2], sys.argv[3])
corpus = LDACorpus(sys.argv[2], sys.argv[3], no_topics=25)
corpus.print_topics()
else:
print "Corpus requires directory as an argument."
if __name__ == "__main__": main()
|
<commit_before><commit_msg>Add basic framework for LDA<commit_after>
|
import sys
import logging
from os.path import isdir, isfile
from gensim import models
from corpus import Corpus
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
class LDACorpus(Corpus):
def __init__(self, dict_loc, vec_loc, no_topics=100, update=1, chunksize=10000, passes=1):
Corpus.__init__(self)
Corpus.load(self, dict_loc, vec_loc)
self.no_topics = no_topics
self.update = update
self.chunksize = chunksize
self.passes = passes
self.dict_loc = dict_loc
self.vec_loc = vec_loc
self.transformation = models.ldamodel.LdaModel(corpus=self.docs, id2word=self.dictionary, num_topics=self.no_topics, update_every=self.update, chunksize=self.chunksize, passes=self.passes)
def print_topics(self):
self.transformation.print_topics(20)
def main():
if len(sys.argv) > 2 and isdir(sys.argv[1]) and isfile(sys.argv[2]) and isfile(sys.argv[3]):
corpus = Corpus(sys.argv[1])
corpus.save(sys.argv[2], sys.argv[3])
corpus = LDACorpus(sys.argv[2], sys.argv[3], no_topics=25)
corpus.print_topics()
else:
print "Corpus requires directory as an argument."
if __name__ == "__main__": main()
|
Add basic framework for LDAimport sys
import logging
from os.path import isdir, isfile
from gensim import models
from corpus import Corpus
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
class LDACorpus(Corpus):
def __init__(self, dict_loc, vec_loc, no_topics=100, update=1, chunksize=10000, passes=1):
Corpus.__init__(self)
Corpus.load(self, dict_loc, vec_loc)
self.no_topics = no_topics
self.update = update
self.chunksize = chunksize
self.passes = passes
self.dict_loc = dict_loc
self.vec_loc = vec_loc
self.transformation = models.ldamodel.LdaModel(corpus=self.docs, id2word=self.dictionary, num_topics=self.no_topics, update_every=self.update, chunksize=self.chunksize, passes=self.passes)
def print_topics(self):
self.transformation.print_topics(20)
def main():
if len(sys.argv) > 2 and isdir(sys.argv[1]) and isfile(sys.argv[2]) and isfile(sys.argv[3]):
corpus = Corpus(sys.argv[1])
corpus.save(sys.argv[2], sys.argv[3])
corpus = LDACorpus(sys.argv[2], sys.argv[3], no_topics=25)
corpus.print_topics()
else:
print "Corpus requires directory as an argument."
if __name__ == "__main__": main()
|
<commit_before><commit_msg>Add basic framework for LDA<commit_after>import sys
import logging
from os.path import isdir, isfile
from gensim import models
from corpus import Corpus
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
class LDACorpus(Corpus):
def __init__(self, dict_loc, vec_loc, no_topics=100, update=1, chunksize=10000, passes=1):
Corpus.__init__(self)
Corpus.load(self, dict_loc, vec_loc)
self.no_topics = no_topics
self.update = update
self.chunksize = chunksize
self.passes = passes
self.dict_loc = dict_loc
self.vec_loc = vec_loc
self.transformation = models.ldamodel.LdaModel(corpus=self.docs, id2word=self.dictionary, num_topics=self.no_topics, update_every=self.update, chunksize=self.chunksize, passes=self.passes)
def print_topics(self):
self.transformation.print_topics(20)
def main():
if len(sys.argv) > 2 and isdir(sys.argv[1]) and isfile(sys.argv[2]) and isfile(sys.argv[3]):
corpus = Corpus(sys.argv[1])
corpus.save(sys.argv[2], sys.argv[3])
corpus = LDACorpus(sys.argv[2], sys.argv[3], no_topics=25)
corpus.print_topics()
else:
print "Corpus requires directory as an argument."
if __name__ == "__main__": main()
|
|
1984fbe9a59eb5de3c8e9af477ac277a978cf21d
|
evaluation/packages/displacementKernels.py
|
evaluation/packages/displacementKernels.py
|
"""@package DisplacementKernels
This module defines an interface to read and use InputGen projects
See C++ InputGen project for more details on Projects
"""
from decimal import Decimal
class DisplacementKernel(object):
"""Python representation of the C++ class AbstractDisplacementKernel
"""
def __init__(self, name, typeId, enabled):
self.name = name
self.typeId = typeId
self.enabled = enabled
def getName(self):
return self.name
def getType(self):
return self.typeId
def isEnabled(self):
return self.enabled
def __str__(self):
return "%s displacement kernel (enabled=%s)" % (self.name, self.enabled)
class UniformRandomDisplacementKernel(DisplacementKernel):
"""Python representation of the C++ class UniformRandomDisplacementKernel
"""
def __init__(self, rangeMin, rangeMax, enabled):
super(UniformRandomDisplacementKernel, self).__init__("Random (Uniform)", 0, enabled)
self.rangeMin = rangeMin
self.rangeMax = rangeMax
def __init__(self, paramList, enabled):
super(UniformRandomDisplacementKernel, self).__init__("Random (Uniform)", 0, enabled)
self.rangeMin = paramList[0]
self.rangeMax = paramList[1]
class NormalRandomDisplacementKernel(DisplacementKernel):
"""Python representation of the C++ class NormalRandomDisplacementKernel
"""
def __init__(self, mean, stdev, enabled):
super(NormalRandomDisplacementKernel, self).__init__("Random (Normal)", 1, enabled)
self.mean = mean
self.stdev = stdev
def __init__(self, paramList, enabled):
super(NormalRandomDisplacementKernel, self).__init__("Random (Normal)", 1, enabled)
self.mean = paramList[0]
self.stdev = paramList[1]
class BiasDisplacementKernel(DisplacementKernel):
"""Python representation of the C++ class BiasDisplacementKernel
"""
def __init__(self, bias, enabled):
super(BiasDisplacementKernel, self).__init__("Bias", 2, enabled)
self.bias = bias
"""Factory to generate displacement kernels from its typeId
\param paramArray Array generated when parsing the xml file
"""
def generateDisplacementKernel(paramArray):
def UniformRandomDisplacementParam(paramArray):
return float(paramArray['distributionMin']), float(paramArray['distributionMax'])
def NormalRandomDisplacementParam(paramArray):
return float(paramArray['distributionStdDev']), float(paramArray['distributionMean'])
def BiasDisplacementParam(paramArray):
return float(paramArray['bias'])
def isKernelEnabled(paramArray):
return paramArray['enabled'] != '0'
factory = { '0': UniformRandomDisplacementKernel,
'1': NormalRandomDisplacementKernel,
'2': BiasDisplacementKernel }
paramfactory = { '0': UniformRandomDisplacementParam,
'1': NormalRandomDisplacementParam ,
'2': BiasDisplacementParam }
typeId = paramArray['typeId']
return factory[typeId](paramfactory[typeId](paramArray), isKernelEnabled(paramArray))
|
Define kernel classes and a factory to generate them from xml
|
Define kernel classes and a factory to generate them from xml
|
Python
|
apache-2.0
|
amonszpart/globOpt,NUAAXXY/globOpt,NUAAXXY/globOpt,amonszpart/globOpt,NUAAXXY/globOpt,amonszpart/globOpt,amonszpart/globOpt,NUAAXXY/globOpt,amonszpart/globOpt,NUAAXXY/globOpt,amonszpart/globOpt,NUAAXXY/globOpt
|
Define kernel classes and a factory to generate them from xml
|
"""@package DisplacementKernels
This module defines an interface to read and use InputGen projects
See C++ InputGen project for more details on Projects
"""
from decimal import Decimal
class DisplacementKernel(object):
"""Python representation of the C++ class AbstractDisplacementKernel
"""
def __init__(self, name, typeId, enabled):
self.name = name
self.typeId = typeId
self.enabled = enabled
def getName(self):
return self.name
def getType(self):
return self.typeId
def isEnabled(self):
return self.enabled
def __str__(self):
return "%s displacement kernel (enabled=%s)" % (self.name, self.enabled)
class UniformRandomDisplacementKernel(DisplacementKernel):
"""Python representation of the C++ class UniformRandomDisplacementKernel
"""
def __init__(self, rangeMin, rangeMax, enabled):
super(UniformRandomDisplacementKernel, self).__init__("Random (Uniform)", 0, enabled)
self.rangeMin = rangeMin
self.rangeMax = rangeMax
def __init__(self, paramList, enabled):
super(UniformRandomDisplacementKernel, self).__init__("Random (Uniform)", 0, enabled)
self.rangeMin = paramList[0]
self.rangeMax = paramList[1]
class NormalRandomDisplacementKernel(DisplacementKernel):
"""Python representation of the C++ class NormalRandomDisplacementKernel
"""
def __init__(self, mean, stdev, enabled):
super(NormalRandomDisplacementKernel, self).__init__("Random (Normal)", 1, enabled)
self.mean = mean
self.stdev = stdev
def __init__(self, paramList, enabled):
super(NormalRandomDisplacementKernel, self).__init__("Random (Normal)", 1, enabled)
self.mean = paramList[0]
self.stdev = paramList[1]
class BiasDisplacementKernel(DisplacementKernel):
"""Python representation of the C++ class BiasDisplacementKernel
"""
def __init__(self, bias, enabled):
super(BiasDisplacementKernel, self).__init__("Bias", 2, enabled)
self.bias = bias
"""Factory to generate displacement kernels from its typeId
\param paramArray Array generated when parsing the xml file
"""
def generateDisplacementKernel(paramArray):
def UniformRandomDisplacementParam(paramArray):
return float(paramArray['distributionMin']), float(paramArray['distributionMax'])
def NormalRandomDisplacementParam(paramArray):
return float(paramArray['distributionStdDev']), float(paramArray['distributionMean'])
def BiasDisplacementParam(paramArray):
return float(paramArray['bias'])
def isKernelEnabled(paramArray):
return paramArray['enabled'] != '0'
factory = { '0': UniformRandomDisplacementKernel,
'1': NormalRandomDisplacementKernel,
'2': BiasDisplacementKernel }
paramfactory = { '0': UniformRandomDisplacementParam,
'1': NormalRandomDisplacementParam ,
'2': BiasDisplacementParam }
typeId = paramArray['typeId']
return factory[typeId](paramfactory[typeId](paramArray), isKernelEnabled(paramArray))
|
<commit_before><commit_msg>Define kernel classes and a factory to generate them from xml<commit_after>
|
"""@package DisplacementKernels
This module defines an interface to read and use InputGen projects
See C++ InputGen project for more details on Projects
"""
from decimal import Decimal
class DisplacementKernel(object):
"""Python representation of the C++ class AbstractDisplacementKernel
"""
def __init__(self, name, typeId, enabled):
self.name = name
self.typeId = typeId
self.enabled = enabled
def getName(self):
return self.name
def getType(self):
return self.typeId
def isEnabled(self):
return self.enabled
def __str__(self):
return "%s displacement kernel (enabled=%s)" % (self.name, self.enabled)
class UniformRandomDisplacementKernel(DisplacementKernel):
"""Python representation of the C++ class UniformRandomDisplacementKernel
"""
def __init__(self, rangeMin, rangeMax, enabled):
super(UniformRandomDisplacementKernel, self).__init__("Random (Uniform)", 0, enabled)
self.rangeMin = rangeMin
self.rangeMax = rangeMax
def __init__(self, paramList, enabled):
super(UniformRandomDisplacementKernel, self).__init__("Random (Uniform)", 0, enabled)
self.rangeMin = paramList[0]
self.rangeMax = paramList[1]
class NormalRandomDisplacementKernel(DisplacementKernel):
"""Python representation of the C++ class NormalRandomDisplacementKernel
"""
def __init__(self, mean, stdev, enabled):
super(NormalRandomDisplacementKernel, self).__init__("Random (Normal)", 1, enabled)
self.mean = mean
self.stdev = stdev
def __init__(self, paramList, enabled):
super(NormalRandomDisplacementKernel, self).__init__("Random (Normal)", 1, enabled)
self.mean = paramList[0]
self.stdev = paramList[1]
class BiasDisplacementKernel(DisplacementKernel):
"""Python representation of the C++ class BiasDisplacementKernel
"""
def __init__(self, bias, enabled):
super(BiasDisplacementKernel, self).__init__("Bias", 2, enabled)
self.bias = bias
"""Factory to generate displacement kernels from its typeId
\param paramArray Array generated when parsing the xml file
"""
def generateDisplacementKernel(paramArray):
def UniformRandomDisplacementParam(paramArray):
return float(paramArray['distributionMin']), float(paramArray['distributionMax'])
def NormalRandomDisplacementParam(paramArray):
return float(paramArray['distributionStdDev']), float(paramArray['distributionMean'])
def BiasDisplacementParam(paramArray):
return float(paramArray['bias'])
def isKernelEnabled(paramArray):
return paramArray['enabled'] != '0'
factory = { '0': UniformRandomDisplacementKernel,
'1': NormalRandomDisplacementKernel,
'2': BiasDisplacementKernel }
paramfactory = { '0': UniformRandomDisplacementParam,
'1': NormalRandomDisplacementParam ,
'2': BiasDisplacementParam }
typeId = paramArray['typeId']
return factory[typeId](paramfactory[typeId](paramArray), isKernelEnabled(paramArray))
|
Define kernel classes and a factory to generate them from xml"""@package DisplacementKernels
This module defines an interface to read and use InputGen projects
See C++ InputGen project for more details on Projects
"""
from decimal import Decimal
class DisplacementKernel(object):
"""Python representation of the C++ class AbstractDisplacementKernel
"""
def __init__(self, name, typeId, enabled):
self.name = name
self.typeId = typeId
self.enabled = enabled
def getName(self):
return self.name
def getType(self):
return self.typeId
def isEnabled(self):
return self.enabled
def __str__(self):
return "%s displacement kernel (enabled=%s)" % (self.name, self.enabled)
class UniformRandomDisplacementKernel(DisplacementKernel):
"""Python representation of the C++ class UniformRandomDisplacementKernel
"""
def __init__(self, rangeMin, rangeMax, enabled):
super(UniformRandomDisplacementKernel, self).__init__("Random (Uniform)", 0, enabled)
self.rangeMin = rangeMin
self.rangeMax = rangeMax
def __init__(self, paramList, enabled):
super(UniformRandomDisplacementKernel, self).__init__("Random (Uniform)", 0, enabled)
self.rangeMin = paramList[0]
self.rangeMax = paramList[1]
class NormalRandomDisplacementKernel(DisplacementKernel):
"""Python representation of the C++ class NormalRandomDisplacementKernel
"""
def __init__(self, mean, stdev, enabled):
super(NormalRandomDisplacementKernel, self).__init__("Random (Normal)", 1, enabled)
self.mean = mean
self.stdev = stdev
def __init__(self, paramList, enabled):
super(NormalRandomDisplacementKernel, self).__init__("Random (Normal)", 1, enabled)
self.mean = paramList[0]
self.stdev = paramList[1]
class BiasDisplacementKernel(DisplacementKernel):
"""Python representation of the C++ class BiasDisplacementKernel
"""
def __init__(self, bias, enabled):
super(BiasDisplacementKernel, self).__init__("Bias", 2, enabled)
self.bias = bias
"""Factory to generate displacement kernels from its typeId
\param paramArray Array generated when parsing the xml file
"""
def generateDisplacementKernel(paramArray):
def UniformRandomDisplacementParam(paramArray):
return float(paramArray['distributionMin']), float(paramArray['distributionMax'])
def NormalRandomDisplacementParam(paramArray):
return float(paramArray['distributionStdDev']), float(paramArray['distributionMean'])
def BiasDisplacementParam(paramArray):
return float(paramArray['bias'])
def isKernelEnabled(paramArray):
return paramArray['enabled'] != '0'
factory = { '0': UniformRandomDisplacementKernel,
'1': NormalRandomDisplacementKernel,
'2': BiasDisplacementKernel }
paramfactory = { '0': UniformRandomDisplacementParam,
'1': NormalRandomDisplacementParam ,
'2': BiasDisplacementParam }
typeId = paramArray['typeId']
return factory[typeId](paramfactory[typeId](paramArray), isKernelEnabled(paramArray))
|
<commit_before><commit_msg>Define kernel classes and a factory to generate them from xml<commit_after>"""@package DisplacementKernels
This module defines an interface to read and use InputGen projects
See C++ InputGen project for more details on Projects
"""
from decimal import Decimal
class DisplacementKernel(object):
"""Python representation of the C++ class AbstractDisplacementKernel
"""
def __init__(self, name, typeId, enabled):
self.name = name
self.typeId = typeId
self.enabled = enabled
def getName(self):
return self.name
def getType(self):
return self.typeId
def isEnabled(self):
return self.enabled
def __str__(self):
return "%s displacement kernel (enabled=%s)" % (self.name, self.enabled)
class UniformRandomDisplacementKernel(DisplacementKernel):
"""Python representation of the C++ class UniformRandomDisplacementKernel
"""
def __init__(self, rangeMin, rangeMax, enabled):
super(UniformRandomDisplacementKernel, self).__init__("Random (Uniform)", 0, enabled)
self.rangeMin = rangeMin
self.rangeMax = rangeMax
def __init__(self, paramList, enabled):
super(UniformRandomDisplacementKernel, self).__init__("Random (Uniform)", 0, enabled)
self.rangeMin = paramList[0]
self.rangeMax = paramList[1]
class NormalRandomDisplacementKernel(DisplacementKernel):
"""Python representation of the C++ class NormalRandomDisplacementKernel
"""
def __init__(self, mean, stdev, enabled):
super(NormalRandomDisplacementKernel, self).__init__("Random (Normal)", 1, enabled)
self.mean = mean
self.stdev = stdev
def __init__(self, paramList, enabled):
super(NormalRandomDisplacementKernel, self).__init__("Random (Normal)", 1, enabled)
self.mean = paramList[0]
self.stdev = paramList[1]
class BiasDisplacementKernel(DisplacementKernel):
"""Python representation of the C++ class BiasDisplacementKernel
"""
def __init__(self, bias, enabled):
super(BiasDisplacementKernel, self).__init__("Bias", 2, enabled)
self.bias = bias
"""Factory to generate displacement kernels from its typeId
\param paramArray Array generated when parsing the xml file
"""
def generateDisplacementKernel(paramArray):
def UniformRandomDisplacementParam(paramArray):
return float(paramArray['distributionMin']), float(paramArray['distributionMax'])
def NormalRandomDisplacementParam(paramArray):
return float(paramArray['distributionStdDev']), float(paramArray['distributionMean'])
def BiasDisplacementParam(paramArray):
return float(paramArray['bias'])
def isKernelEnabled(paramArray):
return paramArray['enabled'] != '0'
factory = { '0': UniformRandomDisplacementKernel,
'1': NormalRandomDisplacementKernel,
'2': BiasDisplacementKernel }
paramfactory = { '0': UniformRandomDisplacementParam,
'1': NormalRandomDisplacementParam ,
'2': BiasDisplacementParam }
typeId = paramArray['typeId']
return factory[typeId](paramfactory[typeId](paramArray), isKernelEnabled(paramArray))
|
|
f71251f5f5b25da9e76dcb93a5b0ffbda3efa0c6
|
src/lib/constants/element/page_header/lhn_menu/create_new_program.py
|
src/lib/constants/element/page_header/lhn_menu/create_new_program.py
|
SELECTOR_TITLE = '[data-test-id="new_program_field_title_a63ed79d"]'
SELECTOR_DESCRIPTION = '[data-test-id="new_program_field_description_77c4a06d"] ' \
'iframe.wysihtml5-sandbox'
SELECTOR_NOTES = '[data-test-id="new_program_field_notes_75b8bc05"] ' \
'iframe.wysihtml5-sandbox'
SELECTOR_CODE = '[data-test-id="new_program_field_code_334276e2"]'
SELECTOR_STATE = '[data-test-id="new_program_dropdown_state_036a1fa6"]'
SELECTOR_PRIVATE_CHECKBOX = '[data-test-id="new_page_checkbox_ed1fdde7"]'
SELECTOR_PRIMARY_CONTACT = '[data-test-id=' \
'"new_program_field_primary_contact_86160053"]'
SELECTOR_SECONDARY_CONTACT = '[data-test-id=' \
'"new_program_field_secondary_contact_86160053"]'
SELECTOR_BUTTON_SAVE_AND_CLOSE = '[data-test-id=' \
'"new_program_button_save_86160053"]'
SELECTOR_PROGRAM_URL = '[data-test-id=' \
'"new_program_field_program_url_86160053"]'
SELECTOR_REFERENCE_URL = '[data-test-id=' \
'"new_program_field_reference_url_86160053"]'
SELECTOR_EFFECTIVE_DATE = '[data-test-id=' \
'"new_program_field_effective_date_f2783a28"]'
SELECTOR_STOP_DATE = '[data-test-id=' \
'"new_program_field_stop_date_f2783a28"]'
|
Add selectors for elements in new program page
|
Add selectors for elements in new program page
|
Python
|
apache-2.0
|
andrei-karalionak/ggrc-core,edofic/ggrc-core,selahssea/ggrc-core,jmakov/ggrc-core,jmakov/ggrc-core,j0gurt/ggrc-core,prasannav7/ggrc-core,NejcZupec/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,NejcZupec/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,kr41/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,kr41/ggrc-core,prasannav7/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,AleksNeStu/ggrc-core,jmakov/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,josthkko/ggrc-core,jmakov/ggrc-core,josthkko/ggrc-core,AleksNeStu/ggrc-core,prasannav7/ggrc-core,NejcZupec/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,prasannav7/ggrc-core,kr41/ggrc-core,jmakov/ggrc-core
|
Add selectors for elements in new program page
|
SELECTOR_TITLE = '[data-test-id="new_program_field_title_a63ed79d"]'
SELECTOR_DESCRIPTION = '[data-test-id="new_program_field_description_77c4a06d"] ' \
'iframe.wysihtml5-sandbox'
SELECTOR_NOTES = '[data-test-id="new_program_field_notes_75b8bc05"] ' \
'iframe.wysihtml5-sandbox'
SELECTOR_CODE = '[data-test-id="new_program_field_code_334276e2"]'
SELECTOR_STATE = '[data-test-id="new_program_dropdown_state_036a1fa6"]'
SELECTOR_PRIVATE_CHECKBOX = '[data-test-id="new_page_checkbox_ed1fdde7"]'
SELECTOR_PRIMARY_CONTACT = '[data-test-id=' \
'"new_program_field_primary_contact_86160053"]'
SELECTOR_SECONDARY_CONTACT = '[data-test-id=' \
'"new_program_field_secondary_contact_86160053"]'
SELECTOR_BUTTON_SAVE_AND_CLOSE = '[data-test-id=' \
'"new_program_button_save_86160053"]'
SELECTOR_PROGRAM_URL = '[data-test-id=' \
'"new_program_field_program_url_86160053"]'
SELECTOR_REFERENCE_URL = '[data-test-id=' \
'"new_program_field_reference_url_86160053"]'
SELECTOR_EFFECTIVE_DATE = '[data-test-id=' \
'"new_program_field_effective_date_f2783a28"]'
SELECTOR_STOP_DATE = '[data-test-id=' \
'"new_program_field_stop_date_f2783a28"]'
|
<commit_before><commit_msg>Add selectors for elements in new program page<commit_after>
|
SELECTOR_TITLE = '[data-test-id="new_program_field_title_a63ed79d"]'
SELECTOR_DESCRIPTION = '[data-test-id="new_program_field_description_77c4a06d"] ' \
'iframe.wysihtml5-sandbox'
SELECTOR_NOTES = '[data-test-id="new_program_field_notes_75b8bc05"] ' \
'iframe.wysihtml5-sandbox'
SELECTOR_CODE = '[data-test-id="new_program_field_code_334276e2"]'
SELECTOR_STATE = '[data-test-id="new_program_dropdown_state_036a1fa6"]'
SELECTOR_PRIVATE_CHECKBOX = '[data-test-id="new_page_checkbox_ed1fdde7"]'
SELECTOR_PRIMARY_CONTACT = '[data-test-id=' \
'"new_program_field_primary_contact_86160053"]'
SELECTOR_SECONDARY_CONTACT = '[data-test-id=' \
'"new_program_field_secondary_contact_86160053"]'
SELECTOR_BUTTON_SAVE_AND_CLOSE = '[data-test-id=' \
'"new_program_button_save_86160053"]'
SELECTOR_PROGRAM_URL = '[data-test-id=' \
'"new_program_field_program_url_86160053"]'
SELECTOR_REFERENCE_URL = '[data-test-id=' \
'"new_program_field_reference_url_86160053"]'
SELECTOR_EFFECTIVE_DATE = '[data-test-id=' \
'"new_program_field_effective_date_f2783a28"]'
SELECTOR_STOP_DATE = '[data-test-id=' \
'"new_program_field_stop_date_f2783a28"]'
|
Add selectors for elements in new program pageSELECTOR_TITLE = '[data-test-id="new_program_field_title_a63ed79d"]'
SELECTOR_DESCRIPTION = '[data-test-id="new_program_field_description_77c4a06d"] ' \
'iframe.wysihtml5-sandbox'
SELECTOR_NOTES = '[data-test-id="new_program_field_notes_75b8bc05"] ' \
'iframe.wysihtml5-sandbox'
SELECTOR_CODE = '[data-test-id="new_program_field_code_334276e2"]'
SELECTOR_STATE = '[data-test-id="new_program_dropdown_state_036a1fa6"]'
SELECTOR_PRIVATE_CHECKBOX = '[data-test-id="new_page_checkbox_ed1fdde7"]'
SELECTOR_PRIMARY_CONTACT = '[data-test-id=' \
'"new_program_field_primary_contact_86160053"]'
SELECTOR_SECONDARY_CONTACT = '[data-test-id=' \
'"new_program_field_secondary_contact_86160053"]'
SELECTOR_BUTTON_SAVE_AND_CLOSE = '[data-test-id=' \
'"new_program_button_save_86160053"]'
SELECTOR_PROGRAM_URL = '[data-test-id=' \
'"new_program_field_program_url_86160053"]'
SELECTOR_REFERENCE_URL = '[data-test-id=' \
'"new_program_field_reference_url_86160053"]'
SELECTOR_EFFECTIVE_DATE = '[data-test-id=' \
'"new_program_field_effective_date_f2783a28"]'
SELECTOR_STOP_DATE = '[data-test-id=' \
'"new_program_field_stop_date_f2783a28"]'
|
<commit_before><commit_msg>Add selectors for elements in new program page<commit_after>SELECTOR_TITLE = '[data-test-id="new_program_field_title_a63ed79d"]'
SELECTOR_DESCRIPTION = '[data-test-id="new_program_field_description_77c4a06d"] ' \
'iframe.wysihtml5-sandbox'
SELECTOR_NOTES = '[data-test-id="new_program_field_notes_75b8bc05"] ' \
'iframe.wysihtml5-sandbox'
SELECTOR_CODE = '[data-test-id="new_program_field_code_334276e2"]'
SELECTOR_STATE = '[data-test-id="new_program_dropdown_state_036a1fa6"]'
SELECTOR_PRIVATE_CHECKBOX = '[data-test-id="new_page_checkbox_ed1fdde7"]'
SELECTOR_PRIMARY_CONTACT = '[data-test-id=' \
'"new_program_field_primary_contact_86160053"]'
SELECTOR_SECONDARY_CONTACT = '[data-test-id=' \
'"new_program_field_secondary_contact_86160053"]'
SELECTOR_BUTTON_SAVE_AND_CLOSE = '[data-test-id=' \
'"new_program_button_save_86160053"]'
SELECTOR_PROGRAM_URL = '[data-test-id=' \
'"new_program_field_program_url_86160053"]'
SELECTOR_REFERENCE_URL = '[data-test-id=' \
'"new_program_field_reference_url_86160053"]'
SELECTOR_EFFECTIVE_DATE = '[data-test-id=' \
'"new_program_field_effective_date_f2783a28"]'
SELECTOR_STOP_DATE = '[data-test-id=' \
'"new_program_field_stop_date_f2783a28"]'
|
|
faa68400ef3e3001bd87e4e9d0189b78908c3e9f
|
myselenium.py
|
myselenium.py
|
# -*- coding:utf-8 -*-
from selenium import webdriver
import requests
import sqlite3
browser = webdriver.Firefox()
browser.get('http://www.mouser.cn')
html_source = browser.page_source
print html_source
coon = sqlite3.connect('/root/.mozilla/firefox/gmfs2ivm.default/cookies.sqlite')
cursor = coon.cursor()
cursor.execute('select name, value from moz_cookies where baseDomain="mouser.cn"')
cookies = cursor.fetchall()
coon.close()
cookie=[item[0]+"="+item[1]for item in cookies]
cookiestr=';'.join(item for item in cookie)
print cookiestr
myheaders = {
'Host': 'www.mouser.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'Upgrade-Insecure-Requests': '1',
'If-None-Match': "76b9f323a7b0ec42447e8435c1bc98bd",
'Cache-Control': 'max-age=0',
'Cookie':cookiestr
}
s = requests.session()
#r = s.get('http://www.mouser.cn/Semiconductors/RF-Semiconductors/_/N-96p9c/', headers=myheaders)
r = s.get('http://www.mouser.cn/Semiconductors/RF-Semiconductors/_/N-96p9c/', headers=myheaders)
data = r.content
f = open('data.html', 'w')
f.write(data)
f.close()
browser.close()
|
Use selenium to open url and then extract local cookies into requests.get
|
Use selenium to open url and then extract local cookies into requests.get
|
Python
|
apache-2.0
|
huangchuchuan/Spider
|
Use selenium to open url and then extract local cookies into requests.get
|
# -*- coding:utf-8 -*-
from selenium import webdriver
import requests
import sqlite3
browser = webdriver.Firefox()
browser.get('http://www.mouser.cn')
html_source = browser.page_source
print html_source
coon = sqlite3.connect('/root/.mozilla/firefox/gmfs2ivm.default/cookies.sqlite')
cursor = coon.cursor()
cursor.execute('select name, value from moz_cookies where baseDomain="mouser.cn"')
cookies = cursor.fetchall()
coon.close()
cookie=[item[0]+"="+item[1]for item in cookies]
cookiestr=';'.join(item for item in cookie)
print cookiestr
myheaders = {
'Host': 'www.mouser.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'Upgrade-Insecure-Requests': '1',
'If-None-Match': "76b9f323a7b0ec42447e8435c1bc98bd",
'Cache-Control': 'max-age=0',
'Cookie':cookiestr
}
s = requests.session()
#r = s.get('http://www.mouser.cn/Semiconductors/RF-Semiconductors/_/N-96p9c/', headers=myheaders)
r = s.get('http://www.mouser.cn/Semiconductors/RF-Semiconductors/_/N-96p9c/', headers=myheaders)
data = r.content
f = open('data.html', 'w')
f.write(data)
f.close()
browser.close()
|
<commit_before><commit_msg>Use selenium to open url and then extract local cookies into requests.get<commit_after>
|
# -*- coding:utf-8 -*-
from selenium import webdriver
import requests
import sqlite3
browser = webdriver.Firefox()
browser.get('http://www.mouser.cn')
html_source = browser.page_source
print html_source
coon = sqlite3.connect('/root/.mozilla/firefox/gmfs2ivm.default/cookies.sqlite')
cursor = coon.cursor()
cursor.execute('select name, value from moz_cookies where baseDomain="mouser.cn"')
cookies = cursor.fetchall()
coon.close()
cookie=[item[0]+"="+item[1]for item in cookies]
cookiestr=';'.join(item for item in cookie)
print cookiestr
myheaders = {
'Host': 'www.mouser.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'Upgrade-Insecure-Requests': '1',
'If-None-Match': "76b9f323a7b0ec42447e8435c1bc98bd",
'Cache-Control': 'max-age=0',
'Cookie':cookiestr
}
s = requests.session()
#r = s.get('http://www.mouser.cn/Semiconductors/RF-Semiconductors/_/N-96p9c/', headers=myheaders)
r = s.get('http://www.mouser.cn/Semiconductors/RF-Semiconductors/_/N-96p9c/', headers=myheaders)
data = r.content
f = open('data.html', 'w')
f.write(data)
f.close()
browser.close()
|
Use selenium to open url and then extract local cookies into requests.get# -*- coding:utf-8 -*-
from selenium import webdriver
import requests
import sqlite3
browser = webdriver.Firefox()
browser.get('http://www.mouser.cn')
html_source = browser.page_source
print html_source
coon = sqlite3.connect('/root/.mozilla/firefox/gmfs2ivm.default/cookies.sqlite')
cursor = coon.cursor()
cursor.execute('select name, value from moz_cookies where baseDomain="mouser.cn"')
cookies = cursor.fetchall()
coon.close()
cookie=[item[0]+"="+item[1]for item in cookies]
cookiestr=';'.join(item for item in cookie)
print cookiestr
myheaders = {
'Host': 'www.mouser.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'Upgrade-Insecure-Requests': '1',
'If-None-Match': "76b9f323a7b0ec42447e8435c1bc98bd",
'Cache-Control': 'max-age=0',
'Cookie':cookiestr
}
s = requests.session()
#r = s.get('http://www.mouser.cn/Semiconductors/RF-Semiconductors/_/N-96p9c/', headers=myheaders)
r = s.get('http://www.mouser.cn/Semiconductors/RF-Semiconductors/_/N-96p9c/', headers=myheaders)
data = r.content
f = open('data.html', 'w')
f.write(data)
f.close()
browser.close()
|
<commit_before><commit_msg>Use selenium to open url and then extract local cookies into requests.get<commit_after># -*- coding:utf-8 -*-
from selenium import webdriver
import requests
import sqlite3
browser = webdriver.Firefox()
browser.get('http://www.mouser.cn')
html_source = browser.page_source
print html_source
coon = sqlite3.connect('/root/.mozilla/firefox/gmfs2ivm.default/cookies.sqlite')
cursor = coon.cursor()
cursor.execute('select name, value from moz_cookies where baseDomain="mouser.cn"')
cookies = cursor.fetchall()
coon.close()
cookie=[item[0]+"="+item[1]for item in cookies]
cookiestr=';'.join(item for item in cookie)
print cookiestr
myheaders = {
'Host': 'www.mouser.cn',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'Upgrade-Insecure-Requests': '1',
'If-None-Match': "76b9f323a7b0ec42447e8435c1bc98bd",
'Cache-Control': 'max-age=0',
'Cookie':cookiestr
}
s = requests.session()
#r = s.get('http://www.mouser.cn/Semiconductors/RF-Semiconductors/_/N-96p9c/', headers=myheaders)
r = s.get('http://www.mouser.cn/Semiconductors/RF-Semiconductors/_/N-96p9c/', headers=myheaders)
data = r.content
f = open('data.html', 'w')
f.write(data)
f.close()
browser.close()
|
|
86e6577fc9737b76b287c676fa7b7ceb64c25838
|
karabo_data/tests/test_agipd_geometry.py
|
karabo_data/tests/test_agipd_geometry.py
|
import numpy as np
from karabo_data.geometry2 import AGIPD_1MGeometry
def test_snap_assemble_data():
geom = AGIPD_1MGeometry.from_quad_positions(quad_pos=[
(-525, 625),
(-550, -10),
(520, -160),
(542.5, 475),
])
snap_geom = geom.snap()
stacked_data = np.zeros((16, 512, 128))
img, centre = snap_geom.position_all_modules(stacked_data)
assert img.shape == (1296, 1132)
assert tuple(centre) == (651, 570)
assert np.isnan(img[0, 0])
assert img[50, 50] == 0
|
Add rough test for AGIPD geometry
|
Add rough test for AGIPD geometry
|
Python
|
bsd-3-clause
|
European-XFEL/h5tools-py
|
Add rough test for AGIPD geometry
|
import numpy as np
from karabo_data.geometry2 import AGIPD_1MGeometry
def test_snap_assemble_data():
geom = AGIPD_1MGeometry.from_quad_positions(quad_pos=[
(-525, 625),
(-550, -10),
(520, -160),
(542.5, 475),
])
snap_geom = geom.snap()
stacked_data = np.zeros((16, 512, 128))
img, centre = snap_geom.position_all_modules(stacked_data)
assert img.shape == (1296, 1132)
assert tuple(centre) == (651, 570)
assert np.isnan(img[0, 0])
assert img[50, 50] == 0
|
<commit_before><commit_msg>Add rough test for AGIPD geometry<commit_after>
|
import numpy as np
from karabo_data.geometry2 import AGIPD_1MGeometry
def test_snap_assemble_data():
geom = AGIPD_1MGeometry.from_quad_positions(quad_pos=[
(-525, 625),
(-550, -10),
(520, -160),
(542.5, 475),
])
snap_geom = geom.snap()
stacked_data = np.zeros((16, 512, 128))
img, centre = snap_geom.position_all_modules(stacked_data)
assert img.shape == (1296, 1132)
assert tuple(centre) == (651, 570)
assert np.isnan(img[0, 0])
assert img[50, 50] == 0
|
Add rough test for AGIPD geometryimport numpy as np
from karabo_data.geometry2 import AGIPD_1MGeometry
def test_snap_assemble_data():
geom = AGIPD_1MGeometry.from_quad_positions(quad_pos=[
(-525, 625),
(-550, -10),
(520, -160),
(542.5, 475),
])
snap_geom = geom.snap()
stacked_data = np.zeros((16, 512, 128))
img, centre = snap_geom.position_all_modules(stacked_data)
assert img.shape == (1296, 1132)
assert tuple(centre) == (651, 570)
assert np.isnan(img[0, 0])
assert img[50, 50] == 0
|
<commit_before><commit_msg>Add rough test for AGIPD geometry<commit_after>import numpy as np
from karabo_data.geometry2 import AGIPD_1MGeometry
def test_snap_assemble_data():
geom = AGIPD_1MGeometry.from_quad_positions(quad_pos=[
(-525, 625),
(-550, -10),
(520, -160),
(542.5, 475),
])
snap_geom = geom.snap()
stacked_data = np.zeros((16, 512, 128))
img, centre = snap_geom.position_all_modules(stacked_data)
assert img.shape == (1296, 1132)
assert tuple(centre) == (651, 570)
assert np.isnan(img[0, 0])
assert img[50, 50] == 0
|
|
306b18870044798aa4256bba8cd213a6ef2b9365
|
myflaskapp/migrations/versions/042f45c107c4_create_items_table.py
|
myflaskapp/migrations/versions/042f45c107c4_create_items_table.py
|
"""Create items table
Revision ID: 042f45c107c4
Revises: 8b1cf2bfda5e
Create Date: 2017-04-24 02:31:00.545797
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '042f45c107c4'
down_revision = '8b1cf2bfda5e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('items')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('items',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('text', sa.VARCHAR(length=80), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
|
Create Items table db migration
|
Create Items table db migration
|
Python
|
mit
|
terryjbates/test-driven-development-with-python,terryjbates/test-driven-development-with-python,terryjbates/test-driven-development-with-python,terryjbates/test-driven-development-with-python,terryjbates/test-driven-development-with-python
|
Create Items table db migration
|
"""Create items table
Revision ID: 042f45c107c4
Revises: 8b1cf2bfda5e
Create Date: 2017-04-24 02:31:00.545797
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '042f45c107c4'
down_revision = '8b1cf2bfda5e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('items')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('items',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('text', sa.VARCHAR(length=80), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
|
<commit_before><commit_msg>Create Items table db migration<commit_after>
|
"""Create items table
Revision ID: 042f45c107c4
Revises: 8b1cf2bfda5e
Create Date: 2017-04-24 02:31:00.545797
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '042f45c107c4'
down_revision = '8b1cf2bfda5e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('items')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('items',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('text', sa.VARCHAR(length=80), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
|
Create Items table db migration"""Create items table
Revision ID: 042f45c107c4
Revises: 8b1cf2bfda5e
Create Date: 2017-04-24 02:31:00.545797
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '042f45c107c4'
down_revision = '8b1cf2bfda5e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('items')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('items',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('text', sa.VARCHAR(length=80), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
|
<commit_before><commit_msg>Create Items table db migration<commit_after>"""Create items table
Revision ID: 042f45c107c4
Revises: 8b1cf2bfda5e
Create Date: 2017-04-24 02:31:00.545797
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '042f45c107c4'
down_revision = '8b1cf2bfda5e'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('items')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('items',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('text', sa.VARCHAR(length=80), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
|
|
bbb448e6909fd975ba4f1a19d40febad4464df8c
|
tests/test_file_handle.py
|
tests/test_file_handle.py
|
"""Check that given file handles are not closed."""
import unittest
from os.path import join, dirname
from msoffcrypto import OfficeFile
#: directory with input
DATA_DIR = join(dirname(__file__), 'inputs')
class FileHandleTest(unittest.TestCase):
"""See module doc."""
def test_file_handle_open(self):
"""Check that file handles are open after is_encrypted()."""
for suffix in 'doc', 'ppt', 'xls':
path = join(DATA_DIR, 'plain.' + suffix)
with open(path, 'rb') as file_handle:
ofile = OfficeFile(file_handle)
# do something with ofile
self.assertEqual(ofile.is_encrypted(), False)
# check that file handle is still open
self.assertFalse(file_handle.closed)
# destroy OfficeFile, calls destructor
del ofile
# check that file handle is still open
self.assertFalse(file_handle.closed)
# just for completeness:
# check that file handle is now closed
self.assertTrue(file_handle.closed)
# if someone calls this as script, run unittests
if __name__ == '__main__':
unittest.main()
|
Create unittest to avoid repeating excessive closing
|
Create unittest to avoid repeating excessive closing
"Thou shalst not close file handles that SHE, your caller, has given you!"
Ensure we comply with this dogma.
|
Python
|
mit
|
nolze/ms-offcrypto-tool,nolze/msoffcrypto-tool,nolze/msoffcrypto-tool,nolze/ms-offcrypto-tool
|
Create unittest to avoid repeating excessive closing
"Thou shalst not close file handles that SHE, your caller, has given you!"
Ensure we comply with this dogma.
|
"""Check that given file handles are not closed."""
import unittest
from os.path import join, dirname
from msoffcrypto import OfficeFile
#: directory with input
DATA_DIR = join(dirname(__file__), 'inputs')
class FileHandleTest(unittest.TestCase):
"""See module doc."""
def test_file_handle_open(self):
"""Check that file handles are open after is_encrypted()."""
for suffix in 'doc', 'ppt', 'xls':
path = join(DATA_DIR, 'plain.' + suffix)
with open(path, 'rb') as file_handle:
ofile = OfficeFile(file_handle)
# do something with ofile
self.assertEqual(ofile.is_encrypted(), False)
# check that file handle is still open
self.assertFalse(file_handle.closed)
# destroy OfficeFile, calls destructor
del ofile
# check that file handle is still open
self.assertFalse(file_handle.closed)
# just for completeness:
# check that file handle is now closed
self.assertTrue(file_handle.closed)
# if someone calls this as script, run unittests
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Create unittest to avoid repeating excessive closing
"Thou shalst not close file handles that SHE, your caller, has given you!"
Ensure we comply with this dogma.<commit_after>
|
"""Check that given file handles are not closed."""
import unittest
from os.path import join, dirname
from msoffcrypto import OfficeFile
#: directory with input
DATA_DIR = join(dirname(__file__), 'inputs')
class FileHandleTest(unittest.TestCase):
"""See module doc."""
def test_file_handle_open(self):
"""Check that file handles are open after is_encrypted()."""
for suffix in 'doc', 'ppt', 'xls':
path = join(DATA_DIR, 'plain.' + suffix)
with open(path, 'rb') as file_handle:
ofile = OfficeFile(file_handle)
# do something with ofile
self.assertEqual(ofile.is_encrypted(), False)
# check that file handle is still open
self.assertFalse(file_handle.closed)
# destroy OfficeFile, calls destructor
del ofile
# check that file handle is still open
self.assertFalse(file_handle.closed)
# just for completeness:
# check that file handle is now closed
self.assertTrue(file_handle.closed)
# if someone calls this as script, run unittests
if __name__ == '__main__':
unittest.main()
|
Create unittest to avoid repeating excessive closing
"Thou shalst not close file handles that SHE, your caller, has given you!"
Ensure we comply with this dogma."""Check that given file handles are not closed."""
import unittest
from os.path import join, dirname
from msoffcrypto import OfficeFile
#: directory with input
DATA_DIR = join(dirname(__file__), 'inputs')
class FileHandleTest(unittest.TestCase):
"""See module doc."""
def test_file_handle_open(self):
"""Check that file handles are open after is_encrypted()."""
for suffix in 'doc', 'ppt', 'xls':
path = join(DATA_DIR, 'plain.' + suffix)
with open(path, 'rb') as file_handle:
ofile = OfficeFile(file_handle)
# do something with ofile
self.assertEqual(ofile.is_encrypted(), False)
# check that file handle is still open
self.assertFalse(file_handle.closed)
# destroy OfficeFile, calls destructor
del ofile
# check that file handle is still open
self.assertFalse(file_handle.closed)
# just for completeness:
# check that file handle is now closed
self.assertTrue(file_handle.closed)
# if someone calls this as script, run unittests
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Create unittest to avoid repeating excessive closing
"Thou shalst not close file handles that SHE, your caller, has given you!"
Ensure we comply with this dogma.<commit_after>"""Check that given file handles are not closed."""
import unittest
from os.path import join, dirname
from msoffcrypto import OfficeFile
#: directory with input
DATA_DIR = join(dirname(__file__), 'inputs')
class FileHandleTest(unittest.TestCase):
"""See module doc."""
def test_file_handle_open(self):
"""Check that file handles are open after is_encrypted()."""
for suffix in 'doc', 'ppt', 'xls':
path = join(DATA_DIR, 'plain.' + suffix)
with open(path, 'rb') as file_handle:
ofile = OfficeFile(file_handle)
# do something with ofile
self.assertEqual(ofile.is_encrypted(), False)
# check that file handle is still open
self.assertFalse(file_handle.closed)
# destroy OfficeFile, calls destructor
del ofile
# check that file handle is still open
self.assertFalse(file_handle.closed)
# just for completeness:
# check that file handle is now closed
self.assertTrue(file_handle.closed)
# if someone calls this as script, run unittests
if __name__ == '__main__':
unittest.main()
|
|
21b8788e2df4fa143dcb7594510c3bff60367224
|
designate/storage/impl_sqlalchemy/migrate_repo/versions/018_add_back_unique_name_deleted_sqlite.py
|
designate/storage/impl_sqlalchemy/migrate_repo/versions/018_add_back_unique_name_deleted_sqlite.py
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Patrick Galbraith <patg@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from sqlalchemy import MetaData, Table
from migrate.changeset.constraint import UniqueConstraint
LOG = logging.getLogger(__name__)
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):
domains_table = Table('domains', meta, autoload=True)
servers_table = Table('servers', meta, autoload=True)
# Add missing multi-column unique index
constraint = UniqueConstraint('name', 'deleted',
name='unique_domain_name',
table=domains_table)
constraint.create()
# Add a missing unique index
constraint = UniqueConstraint('name',
name='unique_server_name',
table=servers_table)
constraint.create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):
domains_table = Table('domains', meta, autoload=True)
servers_table = Table('servers', meta, autoload=True)
# Add a new multi-column unique index
constraint = UniqueConstraint('name', 'deleted',
name='unique_domain_name',
table=domains_table)
constraint.drop()
# Add a missing unique index
constraint = UniqueConstraint('name',
name='unique_server_name',
table=servers_table)
constraint.drop()
|
Replace missing indexes for SQLite
|
Replace missing indexes for SQLite
Migration 018 to replace missing indexes on domains and servers
Bug #1200027
Change-Id: I2240af58c7730d019916924f8f314134899c7cf6
|
Python
|
apache-2.0
|
ionrock/designate,grahamhayes/designate,openstack/designate,richm/designate,kiall/designate-py3,ramsateesh/designate,tonyli71/designate,cneill/designate,kiall/designate-py3,grahamhayes/designate,ramsateesh/designate,tonyli71/designate,muraliselva10/designate,kiall/designate-py3,kiall/designate-py3,muraliselva10/designate,cneill/designate,cneill/designate-testing,ionrock/designate,openstack/designate,cneill/designate,NeCTAR-RC/designate,cneill/designate-testing,richm/designate,kiall/designate-py3,melodous/designate,tonyli71/designate,openstack/designate,cneill/designate-testing,ionrock/designate,melodous/designate,cneill/designate,cneill/designate,ramsateesh/designate,NeCTAR-RC/designate,muraliselva10/designate,grahamhayes/designate,melodous/designate,melodous/designate
|
Replace missing indexes for SQLite
Migration 018 to replace missing indexes on domains and servers
Bug #1200027
Change-Id: I2240af58c7730d019916924f8f314134899c7cf6
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Patrick Galbraith <patg@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from sqlalchemy import MetaData, Table
from migrate.changeset.constraint import UniqueConstraint
LOG = logging.getLogger(__name__)
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):
domains_table = Table('domains', meta, autoload=True)
servers_table = Table('servers', meta, autoload=True)
# Add missing multi-column unique index
constraint = UniqueConstraint('name', 'deleted',
name='unique_domain_name',
table=domains_table)
constraint.create()
# Add a missing unique index
constraint = UniqueConstraint('name',
name='unique_server_name',
table=servers_table)
constraint.create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):
domains_table = Table('domains', meta, autoload=True)
servers_table = Table('servers', meta, autoload=True)
# Add a new multi-column unique index
constraint = UniqueConstraint('name', 'deleted',
name='unique_domain_name',
table=domains_table)
constraint.drop()
# Add a missing unique index
constraint = UniqueConstraint('name',
name='unique_server_name',
table=servers_table)
constraint.drop()
|
<commit_before><commit_msg>Replace missing indexes for SQLite
Migration 018 to replace missing indexes on domains and servers
Bug #1200027
Change-Id: I2240af58c7730d019916924f8f314134899c7cf6<commit_after>
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Patrick Galbraith <patg@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from sqlalchemy import MetaData, Table
from migrate.changeset.constraint import UniqueConstraint
LOG = logging.getLogger(__name__)
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):
domains_table = Table('domains', meta, autoload=True)
servers_table = Table('servers', meta, autoload=True)
# Add missing multi-column unique index
constraint = UniqueConstraint('name', 'deleted',
name='unique_domain_name',
table=domains_table)
constraint.create()
# Add a missing unique index
constraint = UniqueConstraint('name',
name='unique_server_name',
table=servers_table)
constraint.create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):
domains_table = Table('domains', meta, autoload=True)
servers_table = Table('servers', meta, autoload=True)
# Add a new multi-column unique index
constraint = UniqueConstraint('name', 'deleted',
name='unique_domain_name',
table=domains_table)
constraint.drop()
# Add a missing unique index
constraint = UniqueConstraint('name',
name='unique_server_name',
table=servers_table)
constraint.drop()
|
Replace missing indexes for SQLite
Migration 018 to replace missing indexes on domains and servers
Bug #1200027
Change-Id: I2240af58c7730d019916924f8f314134899c7cf6# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Patrick Galbraith <patg@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from sqlalchemy import MetaData, Table
from migrate.changeset.constraint import UniqueConstraint
LOG = logging.getLogger(__name__)
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):
domains_table = Table('domains', meta, autoload=True)
servers_table = Table('servers', meta, autoload=True)
# Add missing multi-column unique index
constraint = UniqueConstraint('name', 'deleted',
name='unique_domain_name',
table=domains_table)
constraint.create()
# Add a missing unique index
constraint = UniqueConstraint('name',
name='unique_server_name',
table=servers_table)
constraint.create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):
domains_table = Table('domains', meta, autoload=True)
servers_table = Table('servers', meta, autoload=True)
# Add a new multi-column unique index
constraint = UniqueConstraint('name', 'deleted',
name='unique_domain_name',
table=domains_table)
constraint.drop()
# Add a missing unique index
constraint = UniqueConstraint('name',
name='unique_server_name',
table=servers_table)
constraint.drop()
|
<commit_before><commit_msg>Replace missing indexes for SQLite
Migration 018 to replace missing indexes on domains and servers
Bug #1200027
Change-Id: I2240af58c7730d019916924f8f314134899c7cf6<commit_after># Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Author: Patrick Galbraith <patg@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from sqlalchemy import MetaData, Table
from migrate.changeset.constraint import UniqueConstraint
LOG = logging.getLogger(__name__)
meta = MetaData()
def upgrade(migrate_engine):
meta.bind = migrate_engine
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):
domains_table = Table('domains', meta, autoload=True)
servers_table = Table('servers', meta, autoload=True)
# Add missing multi-column unique index
constraint = UniqueConstraint('name', 'deleted',
name='unique_domain_name',
table=domains_table)
constraint.create()
# Add a missing unique index
constraint = UniqueConstraint('name',
name='unique_server_name',
table=servers_table)
constraint.create()
def downgrade(migrate_engine):
meta.bind = migrate_engine
dialect = migrate_engine.url.get_dialect().name
if dialect.startswith('sqlite'):
domains_table = Table('domains', meta, autoload=True)
servers_table = Table('servers', meta, autoload=True)
# Add a new multi-column unique index
constraint = UniqueConstraint('name', 'deleted',
name='unique_domain_name',
table=domains_table)
constraint.drop()
# Add a missing unique index
constraint = UniqueConstraint('name',
name='unique_server_name',
table=servers_table)
constraint.drop()
|
|
93bb70c34f7c6eb4a72f8a6f53982a4228fe710f
|
update.py
|
update.py
|
#! /usr/bin/python
import os, subprocess
os.chdir(os.path.dirname(os.path.abspath(__file__)))
subprocess.call([ 'python',
os.path.join('..', 'venus', 'planet.py'),
'planet.ini' ])
|
Add a script for cron
|
Add a script for cron
|
Python
|
mit
|
kzys/planet-tempura
|
Add a script for cron
|
#! /usr/bin/python
import os, subprocess
os.chdir(os.path.dirname(os.path.abspath(__file__)))
subprocess.call([ 'python',
os.path.join('..', 'venus', 'planet.py'),
'planet.ini' ])
|
<commit_before><commit_msg>Add a script for cron<commit_after>
|
#! /usr/bin/python
import os, subprocess
os.chdir(os.path.dirname(os.path.abspath(__file__)))
subprocess.call([ 'python',
os.path.join('..', 'venus', 'planet.py'),
'planet.ini' ])
|
Add a script for cron#! /usr/bin/python
import os, subprocess
os.chdir(os.path.dirname(os.path.abspath(__file__)))
subprocess.call([ 'python',
os.path.join('..', 'venus', 'planet.py'),
'planet.ini' ])
|
<commit_before><commit_msg>Add a script for cron<commit_after>#! /usr/bin/python
import os, subprocess
os.chdir(os.path.dirname(os.path.abspath(__file__)))
subprocess.call([ 'python',
os.path.join('..', 'venus', 'planet.py'),
'planet.ini' ])
|
|
1bb0855bc70598bafb341b48c93e5e26ec8b5818
|
xc7/fasm2bels/tests/test_verilog_modeling.py
|
xc7/fasm2bels/tests/test_verilog_modeling.py
|
import unittest
from fasm2bels.verilog_modeling import Wire, Constant, Bus, NoConnect
class TestVerilogModeling(unittest.TestCase):
def test_connections(self):
self.assertEqual("a", Wire("a").to_string())
self.assertEqual("1'b0", Constant(0).to_string())
self.assertEqual("1'b1", Constant(1).to_string())
self.assertEqual(
"{1'b0, 1'b1}",
Bus([Constant(1), Constant(0)]).to_string()
)
self.assertEqual(
"{a, 1'b1}",
Bus([Constant(1), Wire('a')]).to_string()
)
self.assertEqual("", NoConnect().to_string())
def test_rename(self):
self.assertEqual("b", Wire("a").to_string({'a': 'b'}))
self.assertEqual(
"{b, 1'b1}",
Bus([Constant(1), Wire('a')]).to_string({'a': 'b'})
)
def test_iter_connections(self):
self.assertEqual(list(Wire('a').iter_wires()), [(None, "a")])
self.assertEqual(
list(Bus([Constant(1), Wire('a')]).iter_wires()), [(1, "a")]
)
self.assertEqual(
list(Bus([Wire('b'), Wire('a')]).iter_wires()),
[(0, "b"), (1, "a")]
)
self.assertEqual(list(Constant(0).iter_wires()), [])
self.assertEqual(list(NoConnect().iter_wires()), [])
|
Add test for new connection modelling objects.
|
Add test for new connection modelling objects.
Signed-off-by: Keith Rothman <1bc19627a439baf17510dc2d0b2d250c96d445a5@users.noreply.github.com>
|
Python
|
isc
|
SymbiFlow/symbiflow-xc-fasm2bels
|
Add test for new connection modelling objects.
Signed-off-by: Keith Rothman <1bc19627a439baf17510dc2d0b2d250c96d445a5@users.noreply.github.com>
|
import unittest
from fasm2bels.verilog_modeling import Wire, Constant, Bus, NoConnect
class TestVerilogModeling(unittest.TestCase):
def test_connections(self):
self.assertEqual("a", Wire("a").to_string())
self.assertEqual("1'b0", Constant(0).to_string())
self.assertEqual("1'b1", Constant(1).to_string())
self.assertEqual(
"{1'b0, 1'b1}",
Bus([Constant(1), Constant(0)]).to_string()
)
self.assertEqual(
"{a, 1'b1}",
Bus([Constant(1), Wire('a')]).to_string()
)
self.assertEqual("", NoConnect().to_string())
def test_rename(self):
self.assertEqual("b", Wire("a").to_string({'a': 'b'}))
self.assertEqual(
"{b, 1'b1}",
Bus([Constant(1), Wire('a')]).to_string({'a': 'b'})
)
def test_iter_connections(self):
self.assertEqual(list(Wire('a').iter_wires()), [(None, "a")])
self.assertEqual(
list(Bus([Constant(1), Wire('a')]).iter_wires()), [(1, "a")]
)
self.assertEqual(
list(Bus([Wire('b'), Wire('a')]).iter_wires()),
[(0, "b"), (1, "a")]
)
self.assertEqual(list(Constant(0).iter_wires()), [])
self.assertEqual(list(NoConnect().iter_wires()), [])
|
<commit_before><commit_msg>Add test for new connection modelling objects.
Signed-off-by: Keith Rothman <1bc19627a439baf17510dc2d0b2d250c96d445a5@users.noreply.github.com><commit_after>
|
import unittest
from fasm2bels.verilog_modeling import Wire, Constant, Bus, NoConnect
class TestVerilogModeling(unittest.TestCase):
def test_connections(self):
self.assertEqual("a", Wire("a").to_string())
self.assertEqual("1'b0", Constant(0).to_string())
self.assertEqual("1'b1", Constant(1).to_string())
self.assertEqual(
"{1'b0, 1'b1}",
Bus([Constant(1), Constant(0)]).to_string()
)
self.assertEqual(
"{a, 1'b1}",
Bus([Constant(1), Wire('a')]).to_string()
)
self.assertEqual("", NoConnect().to_string())
def test_rename(self):
self.assertEqual("b", Wire("a").to_string({'a': 'b'}))
self.assertEqual(
"{b, 1'b1}",
Bus([Constant(1), Wire('a')]).to_string({'a': 'b'})
)
def test_iter_connections(self):
self.assertEqual(list(Wire('a').iter_wires()), [(None, "a")])
self.assertEqual(
list(Bus([Constant(1), Wire('a')]).iter_wires()), [(1, "a")]
)
self.assertEqual(
list(Bus([Wire('b'), Wire('a')]).iter_wires()),
[(0, "b"), (1, "a")]
)
self.assertEqual(list(Constant(0).iter_wires()), [])
self.assertEqual(list(NoConnect().iter_wires()), [])
|
Add test for new connection modelling objects.
Signed-off-by: Keith Rothman <1bc19627a439baf17510dc2d0b2d250c96d445a5@users.noreply.github.com>import unittest
from fasm2bels.verilog_modeling import Wire, Constant, Bus, NoConnect
class TestVerilogModeling(unittest.TestCase):
def test_connections(self):
self.assertEqual("a", Wire("a").to_string())
self.assertEqual("1'b0", Constant(0).to_string())
self.assertEqual("1'b1", Constant(1).to_string())
self.assertEqual(
"{1'b0, 1'b1}",
Bus([Constant(1), Constant(0)]).to_string()
)
self.assertEqual(
"{a, 1'b1}",
Bus([Constant(1), Wire('a')]).to_string()
)
self.assertEqual("", NoConnect().to_string())
def test_rename(self):
self.assertEqual("b", Wire("a").to_string({'a': 'b'}))
self.assertEqual(
"{b, 1'b1}",
Bus([Constant(1), Wire('a')]).to_string({'a': 'b'})
)
def test_iter_connections(self):
self.assertEqual(list(Wire('a').iter_wires()), [(None, "a")])
self.assertEqual(
list(Bus([Constant(1), Wire('a')]).iter_wires()), [(1, "a")]
)
self.assertEqual(
list(Bus([Wire('b'), Wire('a')]).iter_wires()),
[(0, "b"), (1, "a")]
)
self.assertEqual(list(Constant(0).iter_wires()), [])
self.assertEqual(list(NoConnect().iter_wires()), [])
|
<commit_before><commit_msg>Add test for new connection modelling objects.
Signed-off-by: Keith Rothman <1bc19627a439baf17510dc2d0b2d250c96d445a5@users.noreply.github.com><commit_after>import unittest
from fasm2bels.verilog_modeling import Wire, Constant, Bus, NoConnect
class TestVerilogModeling(unittest.TestCase):
def test_connections(self):
self.assertEqual("a", Wire("a").to_string())
self.assertEqual("1'b0", Constant(0).to_string())
self.assertEqual("1'b1", Constant(1).to_string())
self.assertEqual(
"{1'b0, 1'b1}",
Bus([Constant(1), Constant(0)]).to_string()
)
self.assertEqual(
"{a, 1'b1}",
Bus([Constant(1), Wire('a')]).to_string()
)
self.assertEqual("", NoConnect().to_string())
def test_rename(self):
self.assertEqual("b", Wire("a").to_string({'a': 'b'}))
self.assertEqual(
"{b, 1'b1}",
Bus([Constant(1), Wire('a')]).to_string({'a': 'b'})
)
def test_iter_connections(self):
self.assertEqual(list(Wire('a').iter_wires()), [(None, "a")])
self.assertEqual(
list(Bus([Constant(1), Wire('a')]).iter_wires()), [(1, "a")]
)
self.assertEqual(
list(Bus([Wire('b'), Wire('a')]).iter_wires()),
[(0, "b"), (1, "a")]
)
self.assertEqual(list(Constant(0).iter_wires()), [])
self.assertEqual(list(NoConnect().iter_wires()), [])
|
|
42a1aaba8daa253b99f444a512f8231db47dfbb2
|
helpers.py
|
helpers.py
|
import array
import numpy as np
def load_glove_vectors(filename, vocab=None):
"""
Load glove vectors from a .txt file.
Optionally limit the vocabulary to save memory. `vocab` should be a set.
"""
dct = {}
vectors = array.array('d')
current_idx = 0
with open(filename, "r", encoding="utf-8") as f:
for _, line in enumerate(f):
tokens = line.split(" ")
word = tokens[0]
entries = tokens[1:]
if not vocab or word in vocab:
dct[word] = current_idx
vectors.extend(float(x) for x in entries)
current_idx += 1
word_dim = len(entries)
num_vectors = len(dct)
return [np.array(vectors).reshape(num_vectors, word_dim), dct]
def evaluate_recall(y, y_labels, n=1):
num_examples = float(len(y))
num_correct = 0
for predictions, label in zip(y, y_labels):
if label in predictions[:n]:
num_correct += 1
return num_correct/num_examples
|
import array
import numpy as np
import pandas as pd
def load_glove_vectors(filename, vocab=None):
"""
Load glove vectors from a .txt file.
Optionally limit the vocabulary to save memory. `vocab` should be a set.
"""
dct = {}
vectors = array.array('d')
current_idx = 0
with open(filename, "r", encoding="utf-8") as f:
for _, line in enumerate(f):
tokens = line.split(" ")
word = tokens[0]
entries = tokens[1:]
if not vocab or word in vocab:
dct[word] = current_idx
vectors.extend(float(x) for x in entries)
current_idx += 1
word_dim = len(entries)
num_vectors = len(dct)
return [np.array(vectors).reshape(num_vectors, word_dim), dct]
def evaluate_recall(y, y_labels, n=1):
num_examples = float(len(y))
num_correct = 0
for predictions, label in zip(y, y_labels):
if label in predictions[:n]:
num_correct += 1
return num_correct/num_examples
def convert_to_labeled_df(df):
"""
Converts the test/validation data from the Ubuntu Dialog corpus into a train-like Data Frame with labels.
This Data Frame can be used to easily get accuarcy values for cross-validation
"""
result = []
for idx, row in df.iterrows():
context = row.Context
result.append([context, row.iloc[1], 1])
for distractor in row.iloc[2:]:
result.append([context, distractor, 0])
return pd.DataFrame(result, columns=["Context", "Utterance", "Label"])
|
Add dataset conversion helper function
|
Add dataset conversion helper function
|
Python
|
mit
|
AotY/chatbot-retrieval,LepiorzDaniel/test2
|
import array
import numpy as np
def load_glove_vectors(filename, vocab=None):
"""
Load glove vectors from a .txt file.
Optionally limit the vocabulary to save memory. `vocab` should be a set.
"""
dct = {}
vectors = array.array('d')
current_idx = 0
with open(filename, "r", encoding="utf-8") as f:
for _, line in enumerate(f):
tokens = line.split(" ")
word = tokens[0]
entries = tokens[1:]
if not vocab or word in vocab:
dct[word] = current_idx
vectors.extend(float(x) for x in entries)
current_idx += 1
word_dim = len(entries)
num_vectors = len(dct)
return [np.array(vectors).reshape(num_vectors, word_dim), dct]
def evaluate_recall(y, y_labels, n=1):
num_examples = float(len(y))
num_correct = 0
for predictions, label in zip(y, y_labels):
if label in predictions[:n]:
num_correct += 1
return num_correct/num_examples
Add dataset conversion helper function
|
import array
import numpy as np
import pandas as pd
def load_glove_vectors(filename, vocab=None):
"""
Load glove vectors from a .txt file.
Optionally limit the vocabulary to save memory. `vocab` should be a set.
"""
dct = {}
vectors = array.array('d')
current_idx = 0
with open(filename, "r", encoding="utf-8") as f:
for _, line in enumerate(f):
tokens = line.split(" ")
word = tokens[0]
entries = tokens[1:]
if not vocab or word in vocab:
dct[word] = current_idx
vectors.extend(float(x) for x in entries)
current_idx += 1
word_dim = len(entries)
num_vectors = len(dct)
return [np.array(vectors).reshape(num_vectors, word_dim), dct]
def evaluate_recall(y, y_labels, n=1):
num_examples = float(len(y))
num_correct = 0
for predictions, label in zip(y, y_labels):
if label in predictions[:n]:
num_correct += 1
return num_correct/num_examples
def convert_to_labeled_df(df):
"""
Converts the test/validation data from the Ubuntu Dialog corpus into a train-like Data Frame with labels.
This Data Frame can be used to easily get accuarcy values for cross-validation
"""
result = []
for idx, row in df.iterrows():
context = row.Context
result.append([context, row.iloc[1], 1])
for distractor in row.iloc[2:]:
result.append([context, distractor, 0])
return pd.DataFrame(result, columns=["Context", "Utterance", "Label"])
|
<commit_before>import array
import numpy as np
def load_glove_vectors(filename, vocab=None):
"""
Load glove vectors from a .txt file.
Optionally limit the vocabulary to save memory. `vocab` should be a set.
"""
dct = {}
vectors = array.array('d')
current_idx = 0
with open(filename, "r", encoding="utf-8") as f:
for _, line in enumerate(f):
tokens = line.split(" ")
word = tokens[0]
entries = tokens[1:]
if not vocab or word in vocab:
dct[word] = current_idx
vectors.extend(float(x) for x in entries)
current_idx += 1
word_dim = len(entries)
num_vectors = len(dct)
return [np.array(vectors).reshape(num_vectors, word_dim), dct]
def evaluate_recall(y, y_labels, n=1):
num_examples = float(len(y))
num_correct = 0
for predictions, label in zip(y, y_labels):
if label in predictions[:n]:
num_correct += 1
return num_correct/num_examples
<commit_msg>Add dataset conversion helper function<commit_after>
|
import array
import numpy as np
import pandas as pd
def load_glove_vectors(filename, vocab=None):
"""
Load glove vectors from a .txt file.
Optionally limit the vocabulary to save memory. `vocab` should be a set.
"""
dct = {}
vectors = array.array('d')
current_idx = 0
with open(filename, "r", encoding="utf-8") as f:
for _, line in enumerate(f):
tokens = line.split(" ")
word = tokens[0]
entries = tokens[1:]
if not vocab or word in vocab:
dct[word] = current_idx
vectors.extend(float(x) for x in entries)
current_idx += 1
word_dim = len(entries)
num_vectors = len(dct)
return [np.array(vectors).reshape(num_vectors, word_dim), dct]
def evaluate_recall(y, y_labels, n=1):
num_examples = float(len(y))
num_correct = 0
for predictions, label in zip(y, y_labels):
if label in predictions[:n]:
num_correct += 1
return num_correct/num_examples
def convert_to_labeled_df(df):
"""
Converts the test/validation data from the Ubuntu Dialog corpus into a train-like Data Frame with labels.
This Data Frame can be used to easily get accuarcy values for cross-validation
"""
result = []
for idx, row in df.iterrows():
context = row.Context
result.append([context, row.iloc[1], 1])
for distractor in row.iloc[2:]:
result.append([context, distractor, 0])
return pd.DataFrame(result, columns=["Context", "Utterance", "Label"])
|
import array
import numpy as np
def load_glove_vectors(filename, vocab=None):
"""
Load glove vectors from a .txt file.
Optionally limit the vocabulary to save memory. `vocab` should be a set.
"""
dct = {}
vectors = array.array('d')
current_idx = 0
with open(filename, "r", encoding="utf-8") as f:
for _, line in enumerate(f):
tokens = line.split(" ")
word = tokens[0]
entries = tokens[1:]
if not vocab or word in vocab:
dct[word] = current_idx
vectors.extend(float(x) for x in entries)
current_idx += 1
word_dim = len(entries)
num_vectors = len(dct)
return [np.array(vectors).reshape(num_vectors, word_dim), dct]
def evaluate_recall(y, y_labels, n=1):
num_examples = float(len(y))
num_correct = 0
for predictions, label in zip(y, y_labels):
if label in predictions[:n]:
num_correct += 1
return num_correct/num_examples
Add dataset conversion helper functionimport array
import numpy as np
import pandas as pd
def load_glove_vectors(filename, vocab=None):
"""
Load glove vectors from a .txt file.
Optionally limit the vocabulary to save memory. `vocab` should be a set.
"""
dct = {}
vectors = array.array('d')
current_idx = 0
with open(filename, "r", encoding="utf-8") as f:
for _, line in enumerate(f):
tokens = line.split(" ")
word = tokens[0]
entries = tokens[1:]
if not vocab or word in vocab:
dct[word] = current_idx
vectors.extend(float(x) for x in entries)
current_idx += 1
word_dim = len(entries)
num_vectors = len(dct)
return [np.array(vectors).reshape(num_vectors, word_dim), dct]
def evaluate_recall(y, y_labels, n=1):
num_examples = float(len(y))
num_correct = 0
for predictions, label in zip(y, y_labels):
if label in predictions[:n]:
num_correct += 1
return num_correct/num_examples
def convert_to_labeled_df(df):
"""
Converts the test/validation data from the Ubuntu Dialog corpus into a train-like Data Frame with labels.
This Data Frame can be used to easily get accuarcy values for cross-validation
"""
result = []
for idx, row in df.iterrows():
context = row.Context
result.append([context, row.iloc[1], 1])
for distractor in row.iloc[2:]:
result.append([context, distractor, 0])
return pd.DataFrame(result, columns=["Context", "Utterance", "Label"])
|
<commit_before>import array
import numpy as np
def load_glove_vectors(filename, vocab=None):
"""
Load glove vectors from a .txt file.
Optionally limit the vocabulary to save memory. `vocab` should be a set.
"""
dct = {}
vectors = array.array('d')
current_idx = 0
with open(filename, "r", encoding="utf-8") as f:
for _, line in enumerate(f):
tokens = line.split(" ")
word = tokens[0]
entries = tokens[1:]
if not vocab or word in vocab:
dct[word] = current_idx
vectors.extend(float(x) for x in entries)
current_idx += 1
word_dim = len(entries)
num_vectors = len(dct)
return [np.array(vectors).reshape(num_vectors, word_dim), dct]
def evaluate_recall(y, y_labels, n=1):
num_examples = float(len(y))
num_correct = 0
for predictions, label in zip(y, y_labels):
if label in predictions[:n]:
num_correct += 1
return num_correct/num_examples
<commit_msg>Add dataset conversion helper function<commit_after>import array
import numpy as np
import pandas as pd
def load_glove_vectors(filename, vocab=None):
"""
Load glove vectors from a .txt file.
Optionally limit the vocabulary to save memory. `vocab` should be a set.
"""
dct = {}
vectors = array.array('d')
current_idx = 0
with open(filename, "r", encoding="utf-8") as f:
for _, line in enumerate(f):
tokens = line.split(" ")
word = tokens[0]
entries = tokens[1:]
if not vocab or word in vocab:
dct[word] = current_idx
vectors.extend(float(x) for x in entries)
current_idx += 1
word_dim = len(entries)
num_vectors = len(dct)
return [np.array(vectors).reshape(num_vectors, word_dim), dct]
def evaluate_recall(y, y_labels, n=1):
num_examples = float(len(y))
num_correct = 0
for predictions, label in zip(y, y_labels):
if label in predictions[:n]:
num_correct += 1
return num_correct/num_examples
def convert_to_labeled_df(df):
"""
Converts the test/validation data from the Ubuntu Dialog corpus into a train-like Data Frame with labels.
This Data Frame can be used to easily get accuarcy values for cross-validation
"""
result = []
for idx, row in df.iterrows():
context = row.Context
result.append([context, row.iloc[1], 1])
for distractor in row.iloc[2:]:
result.append([context, distractor, 0])
return pd.DataFrame(result, columns=["Context", "Utterance", "Label"])
|
4b9798b028afbfcfc4ae7417f1adc488a9567977
|
benchmarks/expand2b_sympy.py
|
benchmarks/expand2b_sympy.py
|
from timeit import default_timer as clock
from sympy import ring, ZZ
R, x, y, z, w = ring("x y z w", ZZ)
e = (x+y+z+w)**15
t1 = clock()
f = e*(e+w)
t2 = clock()
#print f
print "Total time:", t2-t1, "s"
print "number of terms:", len(f)
|
Add SymPy benchmark for the expand2b problem
|
Add SymPy benchmark for the expand2b problem
|
Python
|
mit
|
bjodah/symengine.py,bjodah/symengine.py,symengine/symengine.py,bjodah/symengine.py,symengine/symengine.py,symengine/symengine.py
|
Add SymPy benchmark for the expand2b problem
|
from timeit import default_timer as clock
from sympy import ring, ZZ
R, x, y, z, w = ring("x y z w", ZZ)
e = (x+y+z+w)**15
t1 = clock()
f = e*(e+w)
t2 = clock()
#print f
print "Total time:", t2-t1, "s"
print "number of terms:", len(f)
|
<commit_before><commit_msg>Add SymPy benchmark for the expand2b problem<commit_after>
|
from timeit import default_timer as clock
from sympy import ring, ZZ
R, x, y, z, w = ring("x y z w", ZZ)
e = (x+y+z+w)**15
t1 = clock()
f = e*(e+w)
t2 = clock()
#print f
print "Total time:", t2-t1, "s"
print "number of terms:", len(f)
|
Add SymPy benchmark for the expand2b problemfrom timeit import default_timer as clock
from sympy import ring, ZZ
R, x, y, z, w = ring("x y z w", ZZ)
e = (x+y+z+w)**15
t1 = clock()
f = e*(e+w)
t2 = clock()
#print f
print "Total time:", t2-t1, "s"
print "number of terms:", len(f)
|
<commit_before><commit_msg>Add SymPy benchmark for the expand2b problem<commit_after>from timeit import default_timer as clock
from sympy import ring, ZZ
R, x, y, z, w = ring("x y z w", ZZ)
e = (x+y+z+w)**15
t1 = clock()
f = e*(e+w)
t2 = clock()
#print f
print "Total time:", t2-t1, "s"
print "number of terms:", len(f)
|
|
e4e345e348093639a488855058b1b5ebe44e3507
|
lintcode/Hard/180_Binary_Representation.py
|
lintcode/Hard/180_Binary_Representation.py
|
class Solution:
#@param n: Given a decimal number that is passed in as a string
#@return: A string
def binaryRepresentation(self, n):
# write you code here
int_part = int(float(n))
dec_part = float('0' + n[n.find('.'):]) if n.find('.') != -1 else 0
dec_str = ''
valid = True
while (len(dec_str) < 32 and dec_part != 0):
dec_part = dec_part * 2
if (dec_part >= 1):
dec_str += '1'
dec_part -= 1
else:
dec_str += '0'
if (len(dec_str) >= 32 and dec_part != 0):
valid = False
if (valid):
if (dec_str):
return bin(int_part).replace('0b', '') + '.' + dec_str
return bin(int_part).replace('0b', '')
else:
return 'ERROR'
|
Add solution to lintcode question 180
|
Add solution to lintcode question 180
|
Python
|
mit
|
Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode
|
Add solution to lintcode question 180
|
class Solution:
#@param n: Given a decimal number that is passed in as a string
#@return: A string
def binaryRepresentation(self, n):
# write you code here
int_part = int(float(n))
dec_part = float('0' + n[n.find('.'):]) if n.find('.') != -1 else 0
dec_str = ''
valid = True
while (len(dec_str) < 32 and dec_part != 0):
dec_part = dec_part * 2
if (dec_part >= 1):
dec_str += '1'
dec_part -= 1
else:
dec_str += '0'
if (len(dec_str) >= 32 and dec_part != 0):
valid = False
if (valid):
if (dec_str):
return bin(int_part).replace('0b', '') + '.' + dec_str
return bin(int_part).replace('0b', '')
else:
return 'ERROR'
|
<commit_before><commit_msg>Add solution to lintcode question 180<commit_after>
|
class Solution:
#@param n: Given a decimal number that is passed in as a string
#@return: A string
def binaryRepresentation(self, n):
# write you code here
int_part = int(float(n))
dec_part = float('0' + n[n.find('.'):]) if n.find('.') != -1 else 0
dec_str = ''
valid = True
while (len(dec_str) < 32 and dec_part != 0):
dec_part = dec_part * 2
if (dec_part >= 1):
dec_str += '1'
dec_part -= 1
else:
dec_str += '0'
if (len(dec_str) >= 32 and dec_part != 0):
valid = False
if (valid):
if (dec_str):
return bin(int_part).replace('0b', '') + '.' + dec_str
return bin(int_part).replace('0b', '')
else:
return 'ERROR'
|
Add solution to lintcode question 180class Solution:
#@param n: Given a decimal number that is passed in as a string
#@return: A string
def binaryRepresentation(self, n):
# write you code here
int_part = int(float(n))
dec_part = float('0' + n[n.find('.'):]) if n.find('.') != -1 else 0
dec_str = ''
valid = True
while (len(dec_str) < 32 and dec_part != 0):
dec_part = dec_part * 2
if (dec_part >= 1):
dec_str += '1'
dec_part -= 1
else:
dec_str += '0'
if (len(dec_str) >= 32 and dec_part != 0):
valid = False
if (valid):
if (dec_str):
return bin(int_part).replace('0b', '') + '.' + dec_str
return bin(int_part).replace('0b', '')
else:
return 'ERROR'
|
<commit_before><commit_msg>Add solution to lintcode question 180<commit_after>class Solution:
#@param n: Given a decimal number that is passed in as a string
#@return: A string
def binaryRepresentation(self, n):
# write you code here
int_part = int(float(n))
dec_part = float('0' + n[n.find('.'):]) if n.find('.') != -1 else 0
dec_str = ''
valid = True
while (len(dec_str) < 32 and dec_part != 0):
dec_part = dec_part * 2
if (dec_part >= 1):
dec_str += '1'
dec_part -= 1
else:
dec_str += '0'
if (len(dec_str) >= 32 and dec_part != 0):
valid = False
if (valid):
if (dec_str):
return bin(int_part).replace('0b', '') + '.' + dec_str
return bin(int_part).replace('0b', '')
else:
return 'ERROR'
|
|
a006c5f13e25d36f72e0878b4245e0edb126da68
|
ckanext/requestdata/controllers/search.py
|
ckanext/requestdata/controllers/search.py
|
try:
# CKAN 2.7 and later
from ckan.common import config
except ImportError:
# CKAN 2.6 and earlier
from pylons import config
is_hdx = config.get('hdx_portal')
if is_hdx:
from ckanext.hdx_search.controllers.search_controller import HDXSearchController as PackageController
else:
from ckan.controllers.package import PackageController
class SearchController(PackageController):
def search_datasets(self):
if is_hdx:
return self.search()
else:
pass
|
try:
# CKAN 2.7 and later
from ckan.common import config
except ImportError:
# CKAN 2.6 and earlier
from pylons import config
from paste.deploy.converters import asbool
is_hdx = asbool(config.get('hdx_portal', False))
if is_hdx:
from ckanext.hdx_search.controllers.search_controller\
import HDXSearchController as PackageController
else:
from ckan.controllers.package import PackageController
class SearchController(PackageController):
def search_datasets(self):
return self.search()
|
Convert hdx_portal to a boolean value
|
Convert hdx_portal to a boolean value
|
Python
|
agpl-3.0
|
ViderumGlobal/ckanext-requestdata,ViderumGlobal/ckanext-requestdata,ViderumGlobal/ckanext-requestdata,ViderumGlobal/ckanext-requestdata
|
try:
# CKAN 2.7 and later
from ckan.common import config
except ImportError:
# CKAN 2.6 and earlier
from pylons import config
is_hdx = config.get('hdx_portal')
if is_hdx:
from ckanext.hdx_search.controllers.search_controller import HDXSearchController as PackageController
else:
from ckan.controllers.package import PackageController
class SearchController(PackageController):
def search_datasets(self):
if is_hdx:
return self.search()
else:
pass
Convert hdx_portal to a boolean value
|
try:
# CKAN 2.7 and later
from ckan.common import config
except ImportError:
# CKAN 2.6 and earlier
from pylons import config
from paste.deploy.converters import asbool
is_hdx = asbool(config.get('hdx_portal', False))
if is_hdx:
from ckanext.hdx_search.controllers.search_controller\
import HDXSearchController as PackageController
else:
from ckan.controllers.package import PackageController
class SearchController(PackageController):
def search_datasets(self):
return self.search()
|
<commit_before>try:
# CKAN 2.7 and later
from ckan.common import config
except ImportError:
# CKAN 2.6 and earlier
from pylons import config
is_hdx = config.get('hdx_portal')
if is_hdx:
from ckanext.hdx_search.controllers.search_controller import HDXSearchController as PackageController
else:
from ckan.controllers.package import PackageController
class SearchController(PackageController):
def search_datasets(self):
if is_hdx:
return self.search()
else:
pass
<commit_msg>Convert hdx_portal to a boolean value<commit_after>
|
try:
# CKAN 2.7 and later
from ckan.common import config
except ImportError:
# CKAN 2.6 and earlier
from pylons import config
from paste.deploy.converters import asbool
is_hdx = asbool(config.get('hdx_portal', False))
if is_hdx:
from ckanext.hdx_search.controllers.search_controller\
import HDXSearchController as PackageController
else:
from ckan.controllers.package import PackageController
class SearchController(PackageController):
def search_datasets(self):
return self.search()
|
try:
# CKAN 2.7 and later
from ckan.common import config
except ImportError:
# CKAN 2.6 and earlier
from pylons import config
is_hdx = config.get('hdx_portal')
if is_hdx:
from ckanext.hdx_search.controllers.search_controller import HDXSearchController as PackageController
else:
from ckan.controllers.package import PackageController
class SearchController(PackageController):
def search_datasets(self):
if is_hdx:
return self.search()
else:
pass
Convert hdx_portal to a boolean valuetry:
# CKAN 2.7 and later
from ckan.common import config
except ImportError:
# CKAN 2.6 and earlier
from pylons import config
from paste.deploy.converters import asbool
is_hdx = asbool(config.get('hdx_portal', False))
if is_hdx:
from ckanext.hdx_search.controllers.search_controller\
import HDXSearchController as PackageController
else:
from ckan.controllers.package import PackageController
class SearchController(PackageController):
def search_datasets(self):
return self.search()
|
<commit_before>try:
# CKAN 2.7 and later
from ckan.common import config
except ImportError:
# CKAN 2.6 and earlier
from pylons import config
is_hdx = config.get('hdx_portal')
if is_hdx:
from ckanext.hdx_search.controllers.search_controller import HDXSearchController as PackageController
else:
from ckan.controllers.package import PackageController
class SearchController(PackageController):
def search_datasets(self):
if is_hdx:
return self.search()
else:
pass
<commit_msg>Convert hdx_portal to a boolean value<commit_after>try:
# CKAN 2.7 and later
from ckan.common import config
except ImportError:
# CKAN 2.6 and earlier
from pylons import config
from paste.deploy.converters import asbool
is_hdx = asbool(config.get('hdx_portal', False))
if is_hdx:
from ckanext.hdx_search.controllers.search_controller\
import HDXSearchController as PackageController
else:
from ckan.controllers.package import PackageController
class SearchController(PackageController):
def search_datasets(self):
return self.search()
|
1d9e9e3f6b5796db7f72f108e3a19327f0313cd9
|
tead/event.py
|
tead/event.py
|
import queue
class Event:
def __init__(self, eventType='', userParam=dict()):
self.type = eventType
self.userParam = userParam
class EventSystem:
def __init__(self):
self._eventQueue = queue.Queue()
self._eventHandlers = dict()
def registerEventHander(self, eventType, callback):
''' Register a handler to be called on the given event type.
eventType specifies the type of event the handler should process.
callback specifies the function that should be called on the event.
Its function header should look like "def myCallback(event):"
Returns the ID of the handler.
'''
if not eventType in self._eventHandlers:
self._eventHandlers[eventType] = []
handlerID = len(self._eventHandlers[eventType])
self._eventHandlers[eventType].append(callback)
return handlerID
def unregisterEventHandler(self, eventType, handlerID):
''' Unregister a handler, so it won't be called on the specified event.
eventType specifies the type of event the handler should process.
handlerID specifies the ID of the handler, which should be unregistered.
The ID was returned by the corresponding register-function.
Returns True on success, else False.
'''
if not eventType in self._eventHandlers:
return False
if handlerID >= len(self._eventHandlers[eventType]):
return False
self._eventHandlers[eventType].pop(handlerID)
return True
def createEvent(self, event):
self._eventQueue.put_nowait(event)
def processEvents(self):
while not self._eventQueue.empty():
event = self._eventQueue.get_nowait()
# check if eventhandler wants to process event
if not event.type in self._eventHandlers:
continue
for cb in self._eventHandlers[event.type]:
cb(event)
|
Implement first prototype of EventSystem
|
Implement first prototype of EventSystem
|
Python
|
mit
|
JeFaProductions/TextAdventure2
|
Implement first prototype of EventSystem
|
import queue
class Event:
def __init__(self, eventType='', userParam=dict()):
self.type = eventType
self.userParam = userParam
class EventSystem:
def __init__(self):
self._eventQueue = queue.Queue()
self._eventHandlers = dict()
def registerEventHander(self, eventType, callback):
''' Register a handler to be called on the given event type.
eventType specifies the type of event the handler should process.
callback specifies the function that should be called on the event.
Its function header should look like "def myCallback(event):"
Returns the ID of the handler.
'''
if not eventType in self._eventHandlers:
self._eventHandlers[eventType] = []
handlerID = len(self._eventHandlers[eventType])
self._eventHandlers[eventType].append(callback)
return handlerID
def unregisterEventHandler(self, eventType, handlerID):
''' Unregister a handler, so it won't be called on the specified event.
eventType specifies the type of event the handler should process.
handlerID specifies the ID of the handler, which should be unregistered.
The ID was returned by the corresponding register-function.
Returns True on success, else False.
'''
if not eventType in self._eventHandlers:
return False
if handlerID >= len(self._eventHandlers[eventType]):
return False
self._eventHandlers[eventType].pop(handlerID)
return True
def createEvent(self, event):
self._eventQueue.put_nowait(event)
def processEvents(self):
while not self._eventQueue.empty():
event = self._eventQueue.get_nowait()
# check if eventhandler wants to process event
if not event.type in self._eventHandlers:
continue
for cb in self._eventHandlers[event.type]:
cb(event)
|
<commit_before><commit_msg>Implement first prototype of EventSystem<commit_after>
|
import queue
class Event:
def __init__(self, eventType='', userParam=dict()):
self.type = eventType
self.userParam = userParam
class EventSystem:
def __init__(self):
self._eventQueue = queue.Queue()
self._eventHandlers = dict()
def registerEventHander(self, eventType, callback):
''' Register a handler to be called on the given event type.
eventType specifies the type of event the handler should process.
callback specifies the function that should be called on the event.
Its function header should look like "def myCallback(event):"
Returns the ID of the handler.
'''
if not eventType in self._eventHandlers:
self._eventHandlers[eventType] = []
handlerID = len(self._eventHandlers[eventType])
self._eventHandlers[eventType].append(callback)
return handlerID
def unregisterEventHandler(self, eventType, handlerID):
''' Unregister a handler, so it won't be called on the specified event.
eventType specifies the type of event the handler should process.
handlerID specifies the ID of the handler, which should be unregistered.
The ID was returned by the corresponding register-function.
Returns True on success, else False.
'''
if not eventType in self._eventHandlers:
return False
if handlerID >= len(self._eventHandlers[eventType]):
return False
self._eventHandlers[eventType].pop(handlerID)
return True
def createEvent(self, event):
self._eventQueue.put_nowait(event)
def processEvents(self):
while not self._eventQueue.empty():
event = self._eventQueue.get_nowait()
# check if eventhandler wants to process event
if not event.type in self._eventHandlers:
continue
for cb in self._eventHandlers[event.type]:
cb(event)
|
Implement first prototype of EventSystemimport queue
class Event:
def __init__(self, eventType='', userParam=dict()):
self.type = eventType
self.userParam = userParam
class EventSystem:
def __init__(self):
self._eventQueue = queue.Queue()
self._eventHandlers = dict()
def registerEventHander(self, eventType, callback):
''' Register a handler to be called on the given event type.
eventType specifies the type of event the handler should process.
callback specifies the function that should be called on the event.
Its function header should look like "def myCallback(event):"
Returns the ID of the handler.
'''
if not eventType in self._eventHandlers:
self._eventHandlers[eventType] = []
handlerID = len(self._eventHandlers[eventType])
self._eventHandlers[eventType].append(callback)
return handlerID
def unregisterEventHandler(self, eventType, handlerID):
''' Unregister a handler, so it won't be called on the specified event.
eventType specifies the type of event the handler should process.
handlerID specifies the ID of the handler, which should be unregistered.
The ID was returned by the corresponding register-function.
Returns True on success, else False.
'''
if not eventType in self._eventHandlers:
return False
if handlerID >= len(self._eventHandlers[eventType]):
return False
self._eventHandlers[eventType].pop(handlerID)
return True
def createEvent(self, event):
self._eventQueue.put_nowait(event)
def processEvents(self):
while not self._eventQueue.empty():
event = self._eventQueue.get_nowait()
# check if eventhandler wants to process event
if not event.type in self._eventHandlers:
continue
for cb in self._eventHandlers[event.type]:
cb(event)
|
<commit_before><commit_msg>Implement first prototype of EventSystem<commit_after>import queue
class Event:
def __init__(self, eventType='', userParam=dict()):
self.type = eventType
self.userParam = userParam
class EventSystem:
def __init__(self):
self._eventQueue = queue.Queue()
self._eventHandlers = dict()
def registerEventHander(self, eventType, callback):
''' Register a handler to be called on the given event type.
eventType specifies the type of event the handler should process.
callback specifies the function that should be called on the event.
Its function header should look like "def myCallback(event):"
Returns the ID of the handler.
'''
if not eventType in self._eventHandlers:
self._eventHandlers[eventType] = []
handlerID = len(self._eventHandlers[eventType])
self._eventHandlers[eventType].append(callback)
return handlerID
def unregisterEventHandler(self, eventType, handlerID):
''' Unregister a handler, so it won't be called on the specified event.
eventType specifies the type of event the handler should process.
handlerID specifies the ID of the handler, which should be unregistered.
The ID was returned by the corresponding register-function.
Returns True on success, else False.
'''
if not eventType in self._eventHandlers:
return False
if handlerID >= len(self._eventHandlers[eventType]):
return False
self._eventHandlers[eventType].pop(handlerID)
return True
def createEvent(self, event):
self._eventQueue.put_nowait(event)
def processEvents(self):
while not self._eventQueue.empty():
event = self._eventQueue.get_nowait()
# check if eventhandler wants to process event
if not event.type in self._eventHandlers:
continue
for cb in self._eventHandlers[event.type]:
cb(event)
|
|
429a2fa0d24c4c56506d584b42a977b13874b9ec
|
test/test_live.py
|
test/test_live.py
|
# from unittest import TestCase
# from elasticsearch import Elasticsearch
# from query import Match, Range
#
#
# def get_docs(ret):
# return ret['hits']['hits']
#
#
# class TestLive(TestCase):
# es = None
#
# @staticmethod
# def query(query):
# return TestLive.es.search('test_index', 'test_doc', query)
#
# def setUp(self):
# TestLive.es = Elasticsearch(
# 'localhost',
# http_auth=('elastic', 'changeme'),
# port=9200
# )
#
# doc = {
# 'bool': True,
# 'number': 5,
# 'text': 'Lorum ipsum'
# }
# TestLive.es.index('test_index', 'test_doc', doc, id=1)
#
# def test_connection(self):
# ret = TestLive.es.ping()
# self.assertTrue(ret)
#
# def test_all(self):
# q = {}
# ret = TestLive.es.search('test_index', 'test_doc', q)
# self.assertEquals(type(ret), dict)
# self.assertIn('hits', ret)
# self.assertEquals(len(get_docs(ret)), 1)
#
# def test_match(self):
# q = Match('text', 'lorum')
# ret = TestLive.es.search('test_index', 'test_doc', q)
# self.assertEquals(len(get_docs(ret)), 1)
#
# def test_range(self):
# q = Range('number', {'gte': 10})
# ret = TestLive.query(q)
# self.assertEquals(len(get_docs(ret)), 0)
|
Add test with live database, disabled by default such that all test complete when using master
|
Add test with live database, disabled by default such that all test complete when using master
|
Python
|
mit
|
pietermarsman/elastic-queries
|
Add test with live database, disabled by default such that all test complete when using master
|
# from unittest import TestCase
# from elasticsearch import Elasticsearch
# from query import Match, Range
#
#
# def get_docs(ret):
# return ret['hits']['hits']
#
#
# class TestLive(TestCase):
# es = None
#
# @staticmethod
# def query(query):
# return TestLive.es.search('test_index', 'test_doc', query)
#
# def setUp(self):
# TestLive.es = Elasticsearch(
# 'localhost',
# http_auth=('elastic', 'changeme'),
# port=9200
# )
#
# doc = {
# 'bool': True,
# 'number': 5,
# 'text': 'Lorum ipsum'
# }
# TestLive.es.index('test_index', 'test_doc', doc, id=1)
#
# def test_connection(self):
# ret = TestLive.es.ping()
# self.assertTrue(ret)
#
# def test_all(self):
# q = {}
# ret = TestLive.es.search('test_index', 'test_doc', q)
# self.assertEquals(type(ret), dict)
# self.assertIn('hits', ret)
# self.assertEquals(len(get_docs(ret)), 1)
#
# def test_match(self):
# q = Match('text', 'lorum')
# ret = TestLive.es.search('test_index', 'test_doc', q)
# self.assertEquals(len(get_docs(ret)), 1)
#
# def test_range(self):
# q = Range('number', {'gte': 10})
# ret = TestLive.query(q)
# self.assertEquals(len(get_docs(ret)), 0)
|
<commit_before><commit_msg>Add test with live database, disabled by default such that all test complete when using master<commit_after>
|
# from unittest import TestCase
# from elasticsearch import Elasticsearch
# from query import Match, Range
#
#
# def get_docs(ret):
# return ret['hits']['hits']
#
#
# class TestLive(TestCase):
# es = None
#
# @staticmethod
# def query(query):
# return TestLive.es.search('test_index', 'test_doc', query)
#
# def setUp(self):
# TestLive.es = Elasticsearch(
# 'localhost',
# http_auth=('elastic', 'changeme'),
# port=9200
# )
#
# doc = {
# 'bool': True,
# 'number': 5,
# 'text': 'Lorum ipsum'
# }
# TestLive.es.index('test_index', 'test_doc', doc, id=1)
#
# def test_connection(self):
# ret = TestLive.es.ping()
# self.assertTrue(ret)
#
# def test_all(self):
# q = {}
# ret = TestLive.es.search('test_index', 'test_doc', q)
# self.assertEquals(type(ret), dict)
# self.assertIn('hits', ret)
# self.assertEquals(len(get_docs(ret)), 1)
#
# def test_match(self):
# q = Match('text', 'lorum')
# ret = TestLive.es.search('test_index', 'test_doc', q)
# self.assertEquals(len(get_docs(ret)), 1)
#
# def test_range(self):
# q = Range('number', {'gte': 10})
# ret = TestLive.query(q)
# self.assertEquals(len(get_docs(ret)), 0)
|
Add test with live database, disabled by default such that all test complete when using master# from unittest import TestCase
# from elasticsearch import Elasticsearch
# from query import Match, Range
#
#
# def get_docs(ret):
# return ret['hits']['hits']
#
#
# class TestLive(TestCase):
# es = None
#
# @staticmethod
# def query(query):
# return TestLive.es.search('test_index', 'test_doc', query)
#
# def setUp(self):
# TestLive.es = Elasticsearch(
# 'localhost',
# http_auth=('elastic', 'changeme'),
# port=9200
# )
#
# doc = {
# 'bool': True,
# 'number': 5,
# 'text': 'Lorum ipsum'
# }
# TestLive.es.index('test_index', 'test_doc', doc, id=1)
#
# def test_connection(self):
# ret = TestLive.es.ping()
# self.assertTrue(ret)
#
# def test_all(self):
# q = {}
# ret = TestLive.es.search('test_index', 'test_doc', q)
# self.assertEquals(type(ret), dict)
# self.assertIn('hits', ret)
# self.assertEquals(len(get_docs(ret)), 1)
#
# def test_match(self):
# q = Match('text', 'lorum')
# ret = TestLive.es.search('test_index', 'test_doc', q)
# self.assertEquals(len(get_docs(ret)), 1)
#
# def test_range(self):
# q = Range('number', {'gte': 10})
# ret = TestLive.query(q)
# self.assertEquals(len(get_docs(ret)), 0)
|
<commit_before><commit_msg>Add test with live database, disabled by default such that all test complete when using master<commit_after># from unittest import TestCase
# from elasticsearch import Elasticsearch
# from query import Match, Range
#
#
# def get_docs(ret):
# return ret['hits']['hits']
#
#
# class TestLive(TestCase):
# es = None
#
# @staticmethod
# def query(query):
# return TestLive.es.search('test_index', 'test_doc', query)
#
# def setUp(self):
# TestLive.es = Elasticsearch(
# 'localhost',
# http_auth=('elastic', 'changeme'),
# port=9200
# )
#
# doc = {
# 'bool': True,
# 'number': 5,
# 'text': 'Lorum ipsum'
# }
# TestLive.es.index('test_index', 'test_doc', doc, id=1)
#
# def test_connection(self):
# ret = TestLive.es.ping()
# self.assertTrue(ret)
#
# def test_all(self):
# q = {}
# ret = TestLive.es.search('test_index', 'test_doc', q)
# self.assertEquals(type(ret), dict)
# self.assertIn('hits', ret)
# self.assertEquals(len(get_docs(ret)), 1)
#
# def test_match(self):
# q = Match('text', 'lorum')
# ret = TestLive.es.search('test_index', 'test_doc', q)
# self.assertEquals(len(get_docs(ret)), 1)
#
# def test_range(self):
# q = Range('number', {'gte': 10})
# ret = TestLive.query(q)
# self.assertEquals(len(get_docs(ret)), 0)
|
|
54942867218662dc1856faf6fda989ccadfda08d
|
test/test_otps.py
|
test/test_otps.py
|
from stompy.model.otps import read_otps
import six
six.moves.reload_module(read_otps)
modfile="data/DATA/Model_OR1km"
def test_read_otps():
times=np.arange( np.datetime64('2010-01-01 00:00'),
np.datetime64('2010-01-10 00:00'),
np.timedelta64(15,'m') )
pred_h,pred_u,pred_v=read_otps.tide_pred(modfile,lon=[235.25], lat=[44.5],
time=times)
if 0:
# Compare to NOAA:
# The comparison is not great - probably because this database has very few constituents, just
# M2, S2, N2, K2
from stompy.io.local import noaa_coops
cache_dir='cache'
os.path.exists(cache_dir) or os.mkdir(cache)
sb_tides=noaa_coops.coops_dataset_product(9435380,'water_level',
start_date=times[0],
end_date=times[-1],
days_per_request='M',
cache_dir=cache_dir)
from stompy import utils
plt.clf()
plt.plot(utils.to_datetime(times),pred_h,label='h')
water_level=sb_tides.water_level.isel(station=0)
water_level = water_level - water_level.mean()
plt.plot( utils.to_datetime(sb_tides.time),water_level, label='NOAA')
plt.gcf().autofmt_xdate()
plt.legend()
|
Add basic test for OTPS code
|
Add basic test for OTPS code
|
Python
|
mit
|
rustychris/stompy,rustychris/stompy
|
Add basic test for OTPS code
|
from stompy.model.otps import read_otps
import six
six.moves.reload_module(read_otps)
modfile="data/DATA/Model_OR1km"
def test_read_otps():
times=np.arange( np.datetime64('2010-01-01 00:00'),
np.datetime64('2010-01-10 00:00'),
np.timedelta64(15,'m') )
pred_h,pred_u,pred_v=read_otps.tide_pred(modfile,lon=[235.25], lat=[44.5],
time=times)
if 0:
# Compare to NOAA:
# The comparison is not great - probably because this database has very few constituents, just
# M2, S2, N2, K2
from stompy.io.local import noaa_coops
cache_dir='cache'
os.path.exists(cache_dir) or os.mkdir(cache)
sb_tides=noaa_coops.coops_dataset_product(9435380,'water_level',
start_date=times[0],
end_date=times[-1],
days_per_request='M',
cache_dir=cache_dir)
from stompy import utils
plt.clf()
plt.plot(utils.to_datetime(times),pred_h,label='h')
water_level=sb_tides.water_level.isel(station=0)
water_level = water_level - water_level.mean()
plt.plot( utils.to_datetime(sb_tides.time),water_level, label='NOAA')
plt.gcf().autofmt_xdate()
plt.legend()
|
<commit_before><commit_msg>Add basic test for OTPS code<commit_after>
|
from stompy.model.otps import read_otps
import six
six.moves.reload_module(read_otps)
modfile="data/DATA/Model_OR1km"
def test_read_otps():
times=np.arange( np.datetime64('2010-01-01 00:00'),
np.datetime64('2010-01-10 00:00'),
np.timedelta64(15,'m') )
pred_h,pred_u,pred_v=read_otps.tide_pred(modfile,lon=[235.25], lat=[44.5],
time=times)
if 0:
# Compare to NOAA:
# The comparison is not great - probably because this database has very few constituents, just
# M2, S2, N2, K2
from stompy.io.local import noaa_coops
cache_dir='cache'
os.path.exists(cache_dir) or os.mkdir(cache)
sb_tides=noaa_coops.coops_dataset_product(9435380,'water_level',
start_date=times[0],
end_date=times[-1],
days_per_request='M',
cache_dir=cache_dir)
from stompy import utils
plt.clf()
plt.plot(utils.to_datetime(times),pred_h,label='h')
water_level=sb_tides.water_level.isel(station=0)
water_level = water_level - water_level.mean()
plt.plot( utils.to_datetime(sb_tides.time),water_level, label='NOAA')
plt.gcf().autofmt_xdate()
plt.legend()
|
Add basic test for OTPS codefrom stompy.model.otps import read_otps
import six
six.moves.reload_module(read_otps)
modfile="data/DATA/Model_OR1km"
def test_read_otps():
times=np.arange( np.datetime64('2010-01-01 00:00'),
np.datetime64('2010-01-10 00:00'),
np.timedelta64(15,'m') )
pred_h,pred_u,pred_v=read_otps.tide_pred(modfile,lon=[235.25], lat=[44.5],
time=times)
if 0:
# Compare to NOAA:
# The comparison is not great - probably because this database has very few constituents, just
# M2, S2, N2, K2
from stompy.io.local import noaa_coops
cache_dir='cache'
os.path.exists(cache_dir) or os.mkdir(cache)
sb_tides=noaa_coops.coops_dataset_product(9435380,'water_level',
start_date=times[0],
end_date=times[-1],
days_per_request='M',
cache_dir=cache_dir)
from stompy import utils
plt.clf()
plt.plot(utils.to_datetime(times),pred_h,label='h')
water_level=sb_tides.water_level.isel(station=0)
water_level = water_level - water_level.mean()
plt.plot( utils.to_datetime(sb_tides.time),water_level, label='NOAA')
plt.gcf().autofmt_xdate()
plt.legend()
|
<commit_before><commit_msg>Add basic test for OTPS code<commit_after>from stompy.model.otps import read_otps
import six
six.moves.reload_module(read_otps)
modfile="data/DATA/Model_OR1km"
def test_read_otps():
times=np.arange( np.datetime64('2010-01-01 00:00'),
np.datetime64('2010-01-10 00:00'),
np.timedelta64(15,'m') )
pred_h,pred_u,pred_v=read_otps.tide_pred(modfile,lon=[235.25], lat=[44.5],
time=times)
if 0:
# Compare to NOAA:
# The comparison is not great - probably because this database has very few constituents, just
# M2, S2, N2, K2
from stompy.io.local import noaa_coops
cache_dir='cache'
os.path.exists(cache_dir) or os.mkdir(cache)
sb_tides=noaa_coops.coops_dataset_product(9435380,'water_level',
start_date=times[0],
end_date=times[-1],
days_per_request='M',
cache_dir=cache_dir)
from stompy import utils
plt.clf()
plt.plot(utils.to_datetime(times),pred_h,label='h')
water_level=sb_tides.water_level.isel(station=0)
water_level = water_level - water_level.mean()
plt.plot( utils.to_datetime(sb_tides.time),water_level, label='NOAA')
plt.gcf().autofmt_xdate()
plt.legend()
|
|
04337a036429e98edab7c2e5f17086a3ccfe263b
|
jsonsempai.py
|
jsonsempai.py
|
import sys
class SempaiLoader(object):
def __init__(self, *args):
print args
def find_module(self, fullname, path=None):
print 'finding', fullname, path
if fullname == 'simple':
return self
return None
sys.path_hooks.append(SempaiLoader)
sys.path.insert(0, 'simple')
|
Add very simple module finder
|
Add very simple module finder
|
Python
|
mit
|
kragniz/json-sempai
|
Add very simple module finder
|
import sys
class SempaiLoader(object):
def __init__(self, *args):
print args
def find_module(self, fullname, path=None):
print 'finding', fullname, path
if fullname == 'simple':
return self
return None
sys.path_hooks.append(SempaiLoader)
sys.path.insert(0, 'simple')
|
<commit_before><commit_msg>Add very simple module finder<commit_after>
|
import sys
class SempaiLoader(object):
def __init__(self, *args):
print args
def find_module(self, fullname, path=None):
print 'finding', fullname, path
if fullname == 'simple':
return self
return None
sys.path_hooks.append(SempaiLoader)
sys.path.insert(0, 'simple')
|
Add very simple module finderimport sys
class SempaiLoader(object):
def __init__(self, *args):
print args
def find_module(self, fullname, path=None):
print 'finding', fullname, path
if fullname == 'simple':
return self
return None
sys.path_hooks.append(SempaiLoader)
sys.path.insert(0, 'simple')
|
<commit_before><commit_msg>Add very simple module finder<commit_after>import sys
class SempaiLoader(object):
def __init__(self, *args):
print args
def find_module(self, fullname, path=None):
print 'finding', fullname, path
if fullname == 'simple':
return self
return None
sys.path_hooks.append(SempaiLoader)
sys.path.insert(0, 'simple')
|
|
fe7be655c261af07477fdf49959eff9609832127
|
tests/test_cli.py
|
tests/test_cli.py
|
from valohai_cli.cli import PluginCLI
def test_command_enumeration():
cli = PluginCLI()
assert 'init' in cli.list_commands(None)
assert cli.get_command(None, 'init')
|
Add test for root CLI
|
Add test for root CLI
|
Python
|
mit
|
valohai/valohai-cli
|
Add test for root CLI
|
from valohai_cli.cli import PluginCLI
def test_command_enumeration():
cli = PluginCLI()
assert 'init' in cli.list_commands(None)
assert cli.get_command(None, 'init')
|
<commit_before><commit_msg>Add test for root CLI<commit_after>
|
from valohai_cli.cli import PluginCLI
def test_command_enumeration():
cli = PluginCLI()
assert 'init' in cli.list_commands(None)
assert cli.get_command(None, 'init')
|
Add test for root CLIfrom valohai_cli.cli import PluginCLI
def test_command_enumeration():
cli = PluginCLI()
assert 'init' in cli.list_commands(None)
assert cli.get_command(None, 'init')
|
<commit_before><commit_msg>Add test for root CLI<commit_after>from valohai_cli.cli import PluginCLI
def test_command_enumeration():
cli = PluginCLI()
assert 'init' in cli.list_commands(None)
assert cli.get_command(None, 'init')
|
|
3b34008cbf659c34059ce783470edb69001d3584
|
problem145.py
|
problem145.py
|
#!/usr/bin/env python
"""
A solution for problem 145 from Project Euler.
https://projecteuler.net/problem=145
Some positive integers n have the property that the sum [ n + reverse(n) ] consists entirely of
odd (decimal) digits. For instance, 36 + 63 = 99 and 409 + 904 = 1313. We will call such numbers
reversible; so 36, 63, 409, and 904 are reversible. Leading zeroes are not allowed in either n
or reverse(n).
There are 120 reversible numbers below one-thousand.
How many reversible numbers are there below one-billion (109)?
"""
import time
from reversible import is_reversible
def problem145(max=1000):
count = 0
for number in xrange(1, max):
if is_reversible(number):
count += 1
return count
if __name__ == "__main__":
start = time.time()
count = problem145()
end = time.time()
print "found %s reversible numbers under 1000 in %f seconds" % (count, end - start)
start = time.time()
count = problem145(max=10 ** 9)
end = time.time()
print "found %s reversible numbers under 1000 in %f seconds" % (count, end - start)
|
Add a brute force solution.
|
Add a brute force solution.
It works on the small 1000 number test case but fails horribly on the
10 ** 9 case. Still worth saving before moving on.
|
Python
|
mit
|
smillet15/project-euler
|
Add a brute force solution.
It works on the small 1000 number test case but fails horribly on the
10 ** 9 case. Still worth saving before moving on.
|
#!/usr/bin/env python
"""
A solution for problem 145 from Project Euler.
https://projecteuler.net/problem=145
Some positive integers n have the property that the sum [ n + reverse(n) ] consists entirely of
odd (decimal) digits. For instance, 36 + 63 = 99 and 409 + 904 = 1313. We will call such numbers
reversible; so 36, 63, 409, and 904 are reversible. Leading zeroes are not allowed in either n
or reverse(n).
There are 120 reversible numbers below one-thousand.
How many reversible numbers are there below one-billion (109)?
"""
import time
from reversible import is_reversible
def problem145(max=1000):
count = 0
for number in xrange(1, max):
if is_reversible(number):
count += 1
return count
if __name__ == "__main__":
start = time.time()
count = problem145()
end = time.time()
print "found %s reversible numbers under 1000 in %f seconds" % (count, end - start)
start = time.time()
count = problem145(max=10 ** 9)
end = time.time()
print "found %s reversible numbers under 1000 in %f seconds" % (count, end - start)
|
<commit_before><commit_msg>Add a brute force solution.
It works on the small 1000 number test case but fails horribly on the
10 ** 9 case. Still worth saving before moving on.<commit_after>
|
#!/usr/bin/env python
"""
A solution for problem 145 from Project Euler.
https://projecteuler.net/problem=145
Some positive integers n have the property that the sum [ n + reverse(n) ] consists entirely of
odd (decimal) digits. For instance, 36 + 63 = 99 and 409 + 904 = 1313. We will call such numbers
reversible; so 36, 63, 409, and 904 are reversible. Leading zeroes are not allowed in either n
or reverse(n).
There are 120 reversible numbers below one-thousand.
How many reversible numbers are there below one-billion (109)?
"""
import time
from reversible import is_reversible
def problem145(max=1000):
count = 0
for number in xrange(1, max):
if is_reversible(number):
count += 1
return count
if __name__ == "__main__":
start = time.time()
count = problem145()
end = time.time()
print "found %s reversible numbers under 1000 in %f seconds" % (count, end - start)
start = time.time()
count = problem145(max=10 ** 9)
end = time.time()
print "found %s reversible numbers under 1000 in %f seconds" % (count, end - start)
|
Add a brute force solution.
It works on the small 1000 number test case but fails horribly on the
10 ** 9 case. Still worth saving before moving on.#!/usr/bin/env python
"""
A solution for problem 145 from Project Euler.
https://projecteuler.net/problem=145
Some positive integers n have the property that the sum [ n + reverse(n) ] consists entirely of
odd (decimal) digits. For instance, 36 + 63 = 99 and 409 + 904 = 1313. We will call such numbers
reversible; so 36, 63, 409, and 904 are reversible. Leading zeroes are not allowed in either n
or reverse(n).
There are 120 reversible numbers below one-thousand.
How many reversible numbers are there below one-billion (109)?
"""
import time
from reversible import is_reversible
def problem145(max=1000):
count = 0
for number in xrange(1, max):
if is_reversible(number):
count += 1
return count
if __name__ == "__main__":
start = time.time()
count = problem145()
end = time.time()
print "found %s reversible numbers under 1000 in %f seconds" % (count, end - start)
start = time.time()
count = problem145(max=10 ** 9)
end = time.time()
print "found %s reversible numbers under 1000 in %f seconds" % (count, end - start)
|
<commit_before><commit_msg>Add a brute force solution.
It works on the small 1000 number test case but fails horribly on the
10 ** 9 case. Still worth saving before moving on.<commit_after>#!/usr/bin/env python
"""
A solution for problem 145 from Project Euler.
https://projecteuler.net/problem=145
Some positive integers n have the property that the sum [ n + reverse(n) ] consists entirely of
odd (decimal) digits. For instance, 36 + 63 = 99 and 409 + 904 = 1313. We will call such numbers
reversible; so 36, 63, 409, and 904 are reversible. Leading zeroes are not allowed in either n
or reverse(n).
There are 120 reversible numbers below one-thousand.
How many reversible numbers are there below one-billion (109)?
"""
import time
from reversible import is_reversible
def problem145(max=1000):
count = 0
for number in xrange(1, max):
if is_reversible(number):
count += 1
return count
if __name__ == "__main__":
start = time.time()
count = problem145()
end = time.time()
print "found %s reversible numbers under 1000 in %f seconds" % (count, end - start)
start = time.time()
count = problem145(max=10 ** 9)
end = time.time()
print "found %s reversible numbers under 1000 in %f seconds" % (count, end - start)
|
|
986888c882273a2f69c367641eaea84dd3a1791f
|
test/command_line/test_symmetry.py
|
test/command_line/test_symmetry.py
|
from __future__ import absolute_import, division, print_function
import os
import pytest
import procrunner
def test_symmetry(dials_regression, run_in_tmpdir):
"""Simple test to check that dials.symmetry completes"""
result = procrunner.run_process([
'dials.symmetry',
os.path.join(dials_regression, "xia2-28", "20_integrated_experiments.json"),
os.path.join(dials_regression, "xia2-28", "20_integrated.pickle"),
os.path.join(dials_regression, "xia2-28", "25_integrated_experiments.json"),
os.path.join(dials_regression, "xia2-28", "25_integrated.pickle")
])
assert result['exitcode'] == 0
assert result['stderr'] == ''
assert os.path.exists("reindexed_reflections.pickle")
assert os.path.exists("reindexed_experiments.json")
|
Add command-line test for dials.symmetry
|
Add command-line test for dials.symmetry
|
Python
|
bsd-3-clause
|
dials/dials,dials/dials,dials/dials,dials/dials,dials/dials
|
Add command-line test for dials.symmetry
|
from __future__ import absolute_import, division, print_function
import os
import pytest
import procrunner
def test_symmetry(dials_regression, run_in_tmpdir):
"""Simple test to check that dials.symmetry completes"""
result = procrunner.run_process([
'dials.symmetry',
os.path.join(dials_regression, "xia2-28", "20_integrated_experiments.json"),
os.path.join(dials_regression, "xia2-28", "20_integrated.pickle"),
os.path.join(dials_regression, "xia2-28", "25_integrated_experiments.json"),
os.path.join(dials_regression, "xia2-28", "25_integrated.pickle")
])
assert result['exitcode'] == 0
assert result['stderr'] == ''
assert os.path.exists("reindexed_reflections.pickle")
assert os.path.exists("reindexed_experiments.json")
|
<commit_before><commit_msg>Add command-line test for dials.symmetry<commit_after>
|
from __future__ import absolute_import, division, print_function
import os
import pytest
import procrunner
def test_symmetry(dials_regression, run_in_tmpdir):
"""Simple test to check that dials.symmetry completes"""
result = procrunner.run_process([
'dials.symmetry',
os.path.join(dials_regression, "xia2-28", "20_integrated_experiments.json"),
os.path.join(dials_regression, "xia2-28", "20_integrated.pickle"),
os.path.join(dials_regression, "xia2-28", "25_integrated_experiments.json"),
os.path.join(dials_regression, "xia2-28", "25_integrated.pickle")
])
assert result['exitcode'] == 0
assert result['stderr'] == ''
assert os.path.exists("reindexed_reflections.pickle")
assert os.path.exists("reindexed_experiments.json")
|
Add command-line test for dials.symmetryfrom __future__ import absolute_import, division, print_function
import os
import pytest
import procrunner
def test_symmetry(dials_regression, run_in_tmpdir):
"""Simple test to check that dials.symmetry completes"""
result = procrunner.run_process([
'dials.symmetry',
os.path.join(dials_regression, "xia2-28", "20_integrated_experiments.json"),
os.path.join(dials_regression, "xia2-28", "20_integrated.pickle"),
os.path.join(dials_regression, "xia2-28", "25_integrated_experiments.json"),
os.path.join(dials_regression, "xia2-28", "25_integrated.pickle")
])
assert result['exitcode'] == 0
assert result['stderr'] == ''
assert os.path.exists("reindexed_reflections.pickle")
assert os.path.exists("reindexed_experiments.json")
|
<commit_before><commit_msg>Add command-line test for dials.symmetry<commit_after>from __future__ import absolute_import, division, print_function
import os
import pytest
import procrunner
def test_symmetry(dials_regression, run_in_tmpdir):
"""Simple test to check that dials.symmetry completes"""
result = procrunner.run_process([
'dials.symmetry',
os.path.join(dials_regression, "xia2-28", "20_integrated_experiments.json"),
os.path.join(dials_regression, "xia2-28", "20_integrated.pickle"),
os.path.join(dials_regression, "xia2-28", "25_integrated_experiments.json"),
os.path.join(dials_regression, "xia2-28", "25_integrated.pickle")
])
assert result['exitcode'] == 0
assert result['stderr'] == ''
assert os.path.exists("reindexed_reflections.pickle")
assert os.path.exists("reindexed_experiments.json")
|
|
ac531c43efdba1645585d88ee87eaf313c07748b
|
archive/archive_api/src/progress_manager.py
|
archive/archive_api/src/progress_manager.py
|
# -*- encoding: utf-8
import requests
class ProgressError(Exception):
"""Raised if we get an unexpected response from the progress service."""
pass
class ProgressManager:
"""
Handles requests to/from the progress service.
"""
def __init__(self, endpoint, sess=None):
self.endpoint = endpoint
self.sess = sess or requests.Session()
def create_request(self, upload_url, callback_url):
"""
Make a request to the progress service to start a new request.
Returns the ID of the new ingest.
"""
# The service expects to receive a JSON dictionary of the following
# form:
#
# {
# "uploadUrl": "...",
# "callbackUrl": "..."
# }
#
# Here "callbackUrl" is optional. If successful, the service returns
# a 202 Created and the new ID in the path parameter of the
# Location header.
#
data = {'uploadUrl': upload_url}
if callback_url is not None:
data['callbackUrl'] = callback_url
resp = self.sess.post(f'{self.endpoint}/progress', data=data)
print(resp.headers)
return 'foo'
|
Add an initial ProgressManager implementation
|
Add an initial ProgressManager implementation
|
Python
|
mit
|
wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api
|
Add an initial ProgressManager implementation
|
# -*- encoding: utf-8
import requests
class ProgressError(Exception):
"""Raised if we get an unexpected response from the progress service."""
pass
class ProgressManager:
"""
Handles requests to/from the progress service.
"""
def __init__(self, endpoint, sess=None):
self.endpoint = endpoint
self.sess = sess or requests.Session()
def create_request(self, upload_url, callback_url):
"""
Make a request to the progress service to start a new request.
Returns the ID of the new ingest.
"""
# The service expects to receive a JSON dictionary of the following
# form:
#
# {
# "uploadUrl": "...",
# "callbackUrl": "..."
# }
#
# Here "callbackUrl" is optional. If successful, the service returns
# a 202 Created and the new ID in the path parameter of the
# Location header.
#
data = {'uploadUrl': upload_url}
if callback_url is not None:
data['callbackUrl'] = callback_url
resp = self.sess.post(f'{self.endpoint}/progress', data=data)
print(resp.headers)
return 'foo'
|
<commit_before><commit_msg>Add an initial ProgressManager implementation<commit_after>
|
# -*- encoding: utf-8
import requests
class ProgressError(Exception):
"""Raised if we get an unexpected response from the progress service."""
pass
class ProgressManager:
"""
Handles requests to/from the progress service.
"""
def __init__(self, endpoint, sess=None):
self.endpoint = endpoint
self.sess = sess or requests.Session()
def create_request(self, upload_url, callback_url):
"""
Make a request to the progress service to start a new request.
Returns the ID of the new ingest.
"""
# The service expects to receive a JSON dictionary of the following
# form:
#
# {
# "uploadUrl": "...",
# "callbackUrl": "..."
# }
#
# Here "callbackUrl" is optional. If successful, the service returns
# a 202 Created and the new ID in the path parameter of the
# Location header.
#
data = {'uploadUrl': upload_url}
if callback_url is not None:
data['callbackUrl'] = callback_url
resp = self.sess.post(f'{self.endpoint}/progress', data=data)
print(resp.headers)
return 'foo'
|
Add an initial ProgressManager implementation# -*- encoding: utf-8
import requests
class ProgressError(Exception):
"""Raised if we get an unexpected response from the progress service."""
pass
class ProgressManager:
"""
Handles requests to/from the progress service.
"""
def __init__(self, endpoint, sess=None):
self.endpoint = endpoint
self.sess = sess or requests.Session()
def create_request(self, upload_url, callback_url):
"""
Make a request to the progress service to start a new request.
Returns the ID of the new ingest.
"""
# The service expects to receive a JSON dictionary of the following
# form:
#
# {
# "uploadUrl": "...",
# "callbackUrl": "..."
# }
#
# Here "callbackUrl" is optional. If successful, the service returns
# a 202 Created and the new ID in the path parameter of the
# Location header.
#
data = {'uploadUrl': upload_url}
if callback_url is not None:
data['callbackUrl'] = callback_url
resp = self.sess.post(f'{self.endpoint}/progress', data=data)
print(resp.headers)
return 'foo'
|
<commit_before><commit_msg>Add an initial ProgressManager implementation<commit_after># -*- encoding: utf-8
import requests
class ProgressError(Exception):
"""Raised if we get an unexpected response from the progress service."""
pass
class ProgressManager:
"""
Handles requests to/from the progress service.
"""
def __init__(self, endpoint, sess=None):
self.endpoint = endpoint
self.sess = sess or requests.Session()
def create_request(self, upload_url, callback_url):
"""
Make a request to the progress service to start a new request.
Returns the ID of the new ingest.
"""
# The service expects to receive a JSON dictionary of the following
# form:
#
# {
# "uploadUrl": "...",
# "callbackUrl": "..."
# }
#
# Here "callbackUrl" is optional. If successful, the service returns
# a 202 Created and the new ID in the path parameter of the
# Location header.
#
data = {'uploadUrl': upload_url}
if callback_url is not None:
data['callbackUrl'] = callback_url
resp = self.sess.post(f'{self.endpoint}/progress', data=data)
print(resp.headers)
return 'foo'
|
|
fad6285f79a8681994c25261941be49d7e43617c
|
etk/extractors/language_identification_extractor.py
|
etk/extractors/language_identification_extractor.py
|
from typing import List
from etk.extraction import Extraction
from etk.extractor import Extractor, InputType
class LangaugeIdentificationExtractor(Extractor):
"""
Identify the language used in text, returning the identifier language using ISO 639-1 codes
Uses two libraries:
- https://github.com/davidjurgens/equilid
- https://github.com/saffsd/langid.py
TODO: define Enum to select which method to use.
TODO: define dictionary to translate ISO 639-3 to ISO 639-1 codes
https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes, perhaps there is an online source that has this
"""
def __init__(self):
Extractor.__init__(self,
input_type=InputType.TEXT,
category="Text extractor",
name="Language Identification")
def extract(self, text: str, method: str) -> List[Extraction]:
"""
Args:
text: any text, can contain HTML
method: specifies which of the two algorithms to use
Returns: an extraction containing the language code used in the text.
Returns the empty list of the extractor fails to identify the language in the text.
"""
pass
|
Define API for language identification extractor.
|
Define API for language identification extractor.
|
Python
|
mit
|
usc-isi-i2/etk,usc-isi-i2/etk,usc-isi-i2/etk
|
Define API for language identification extractor.
|
from typing import List
from etk.extraction import Extraction
from etk.extractor import Extractor, InputType
class LangaugeIdentificationExtractor(Extractor):
"""
Identify the language used in text, returning the identifier language using ISO 639-1 codes
Uses two libraries:
- https://github.com/davidjurgens/equilid
- https://github.com/saffsd/langid.py
TODO: define Enum to select which method to use.
TODO: define dictionary to translate ISO 639-3 to ISO 639-1 codes
https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes, perhaps there is an online source that has this
"""
def __init__(self):
Extractor.__init__(self,
input_type=InputType.TEXT,
category="Text extractor",
name="Language Identification")
def extract(self, text: str, method: str) -> List[Extraction]:
"""
Args:
text: any text, can contain HTML
method: specifies which of the two algorithms to use
Returns: an extraction containing the language code used in the text.
Returns the empty list of the extractor fails to identify the language in the text.
"""
pass
|
<commit_before><commit_msg>Define API for language identification extractor.<commit_after>
|
from typing import List
from etk.extraction import Extraction
from etk.extractor import Extractor, InputType
class LangaugeIdentificationExtractor(Extractor):
"""
Identify the language used in text, returning the identifier language using ISO 639-1 codes
Uses two libraries:
- https://github.com/davidjurgens/equilid
- https://github.com/saffsd/langid.py
TODO: define Enum to select which method to use.
TODO: define dictionary to translate ISO 639-3 to ISO 639-1 codes
https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes, perhaps there is an online source that has this
"""
def __init__(self):
Extractor.__init__(self,
input_type=InputType.TEXT,
category="Text extractor",
name="Language Identification")
def extract(self, text: str, method: str) -> List[Extraction]:
"""
Args:
text: any text, can contain HTML
method: specifies which of the two algorithms to use
Returns: an extraction containing the language code used in the text.
Returns the empty list of the extractor fails to identify the language in the text.
"""
pass
|
Define API for language identification extractor.from typing import List
from etk.extraction import Extraction
from etk.extractor import Extractor, InputType
class LangaugeIdentificationExtractor(Extractor):
"""
Identify the language used in text, returning the identifier language using ISO 639-1 codes
Uses two libraries:
- https://github.com/davidjurgens/equilid
- https://github.com/saffsd/langid.py
TODO: define Enum to select which method to use.
TODO: define dictionary to translate ISO 639-3 to ISO 639-1 codes
https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes, perhaps there is an online source that has this
"""
def __init__(self):
Extractor.__init__(self,
input_type=InputType.TEXT,
category="Text extractor",
name="Language Identification")
def extract(self, text: str, method: str) -> List[Extraction]:
"""
Args:
text: any text, can contain HTML
method: specifies which of the two algorithms to use
Returns: an extraction containing the language code used in the text.
Returns the empty list of the extractor fails to identify the language in the text.
"""
pass
|
<commit_before><commit_msg>Define API for language identification extractor.<commit_after>from typing import List
from etk.extraction import Extraction
from etk.extractor import Extractor, InputType
class LangaugeIdentificationExtractor(Extractor):
"""
Identify the language used in text, returning the identifier language using ISO 639-1 codes
Uses two libraries:
- https://github.com/davidjurgens/equilid
- https://github.com/saffsd/langid.py
TODO: define Enum to select which method to use.
TODO: define dictionary to translate ISO 639-3 to ISO 639-1 codes
https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes, perhaps there is an online source that has this
"""
def __init__(self):
Extractor.__init__(self,
input_type=InputType.TEXT,
category="Text extractor",
name="Language Identification")
def extract(self, text: str, method: str) -> List[Extraction]:
"""
Args:
text: any text, can contain HTML
method: specifies which of the two algorithms to use
Returns: an extraction containing the language code used in the text.
Returns the empty list of the extractor fails to identify the language in the text.
"""
pass
|
|
cd5a846e82ec023a1d69ff832924493e8dfc3068
|
tests/api_tests/base/test_utils.py
|
tests/api_tests/base/test_utils.py
|
from nose.tools import * # noqa
from api.base import utils as api_utils
from tests.base import ApiTestCase
class DummyAttrAttr(object):
def __init__(self, key):
self.key = key
class DummyAttr(object):
def __init__(self, key):
self.key = key
self.attr_attr = DummyAttrAttr(key.upper())
class Dummy(object):
def __init__(self, key):
self.attr = DummyAttr(key)
self.hash = {
'bang': DummyAttr(key)
}
class APIUtilsTestCase(ApiTestCase):
def setUp(self):
self.dummy = Dummy('foo')
self.data = {
'foo': {
'bar': 'baz'
}
}
def test_deep_get_object(self):
attr = api_utils.deep_get(self.dummy, 'attr')
assert_true(isinstance(attr, DummyAttr))
assert_equal(attr.key, 'foo')
def test_deep_get_object_multiple_depth(self):
attr_attr = api_utils.deep_get(self.dummy, 'attr.attr_attr')
assert_true(isinstance(attr_attr, DummyAttrAttr))
assert_equal(attr_attr.key, 'FOO')
def test_deep_get_dict(self):
foo = api_utils.deep_get(self.data, 'foo')
assert_true(isinstance(foo, dict))
assert_equal(foo, {
'bar': 'baz'
})
def test_deep_get_dict_multiple_depth(self):
bar = api_utils.deep_get(self.data, 'foo.bar')
assert_true(isinstance(bar, str))
assert_equal(bar, 'baz')
def test_deep_get_object_and_dict(self):
hash_bang_attr = api_utils.deep_get(self.dummy, 'hash.bang.attr_attr')
assert_true(isinstance(hash_bang_attr, DummyAttrAttr))
assert_equal(hash_bang_attr.key, 'FOO')
def test_deep_get_key_not_found(self):
hash_bang_attr = api_utils.deep_get(self.dummy, 'hash.bang.baz')
assert_equal(hash_bang_attr, None)
def test_soft_get_object(self):
attr = api_utils.soft_get(self.dummy, 'attr')
assert_equal(attr.key, 'foo')
def test_soft_get_object_not_found(self):
bat = api_utils.soft_get(self.dummy, 'bat')
assert_equal(bat, None)
def test_soft_get_dict(self):
foo = api_utils.soft_get(self.data, 'foo')
assert_equal(foo, {
'bar': 'baz'
})
def test_soft_get_dict_not_found(self):
bat = api_utils.soft_get(self.data, 'bat')
assert_equal(bat, None)
|
Add unit tests for deep_get and soft_get api utils
|
Add unit tests for deep_get and soft_get api utils
|
Python
|
apache-2.0
|
mluo613/osf.io,kwierman/osf.io,leb2dg/osf.io,samchrisinger/osf.io,Nesiehr/osf.io,Nesiehr/osf.io,rdhyee/osf.io,laurenrevere/osf.io,kch8qx/osf.io,zamattiac/osf.io,hmoco/osf.io,asanfilippo7/osf.io,zachjanicki/osf.io,hmoco/osf.io,brandonPurvis/osf.io,leb2dg/osf.io,GageGaskins/osf.io,kch8qx/osf.io,abought/osf.io,pattisdr/osf.io,abought/osf.io,brandonPurvis/osf.io,billyhunt/osf.io,kwierman/osf.io,Ghalko/osf.io,TomHeatwole/osf.io,zachjanicki/osf.io,hmoco/osf.io,amyshi188/osf.io,baylee-d/osf.io,danielneis/osf.io,GageGaskins/osf.io,billyhunt/osf.io,aaxelb/osf.io,icereval/osf.io,chrisseto/osf.io,samanehsan/osf.io,zachjanicki/osf.io,danielneis/osf.io,monikagrabowska/osf.io,mluke93/osf.io,mfraezz/osf.io,CenterForOpenScience/osf.io,wearpants/osf.io,asanfilippo7/osf.io,laurenrevere/osf.io,cslzchen/osf.io,felliott/osf.io,erinspace/osf.io,Johnetordoff/osf.io,DanielSBrown/osf.io,alexschiller/osf.io,emetsger/osf.io,icereval/osf.io,ticklemepierce/osf.io,leb2dg/osf.io,brianjgeiger/osf.io,caneruguz/osf.io,caseyrollins/osf.io,RomanZWang/osf.io,crcresearch/osf.io,samchrisinger/osf.io,brianjgeiger/osf.io,pattisdr/osf.io,mluke93/osf.io,mattclark/osf.io,zamattiac/osf.io,binoculars/osf.io,RomanZWang/osf.io,laurenrevere/osf.io,brianjgeiger/osf.io,caseyrollins/osf.io,rdhyee/osf.io,crcresearch/osf.io,sloria/osf.io,billyhunt/osf.io,aaxelb/osf.io,acshi/osf.io,KAsante95/osf.io,asanfilippo7/osf.io,binoculars/osf.io,KAsante95/osf.io,GageGaskins/osf.io,mfraezz/osf.io,mluo613/osf.io,RomanZWang/osf.io,wearpants/osf.io,acshi/osf.io,HalcyonChimera/osf.io,cslzchen/osf.io,amyshi188/osf.io,erinspace/osf.io,ticklemepierce/osf.io,saradbowman/osf.io,HalcyonChimera/osf.io,baylee-d/osf.io,crcresearch/osf.io,kch8qx/osf.io,mluo613/osf.io,jnayak1/osf.io,leb2dg/osf.io,acshi/osf.io,doublebits/osf.io,samanehsan/osf.io,brianjgeiger/osf.io,Nesiehr/osf.io,TomBaxter/osf.io,chrisseto/osf.io,HalcyonChimera/osf.io,cwisecarver/osf.io,baylee-d/osf.io,emetsger/osf.io,aaxelb/osf.io,zamattiac/osf.io,mattclark/osf.io,chennan47/osf.io,adlius/osf.io,danielneis/osf.io,jnayak1/osf.io,mluo613/osf.io,kch8qx/osf.io,alexschiller/osf.io,erinspace/osf.io,cslzchen/osf.io,mluke93/osf.io,CenterForOpenScience/osf.io,amyshi188/osf.io,brandonPurvis/osf.io,SSJohns/osf.io,SSJohns/osf.io,felliott/osf.io,jnayak1/osf.io,monikagrabowska/osf.io,abought/osf.io,sloria/osf.io,felliott/osf.io,monikagrabowska/osf.io,acshi/osf.io,rdhyee/osf.io,Ghalko/osf.io,zachjanicki/osf.io,alexschiller/osf.io,chrisseto/osf.io,TomBaxter/osf.io,KAsante95/osf.io,wearpants/osf.io,adlius/osf.io,brandonPurvis/osf.io,samanehsan/osf.io,kwierman/osf.io,zamattiac/osf.io,emetsger/osf.io,caneruguz/osf.io,monikagrabowska/osf.io,abought/osf.io,ticklemepierce/osf.io,TomBaxter/osf.io,cwisecarver/osf.io,mluke93/osf.io,mattclark/osf.io,samchrisinger/osf.io,cwisecarver/osf.io,KAsante95/osf.io,Johnetordoff/osf.io,alexschiller/osf.io,samchrisinger/osf.io,GageGaskins/osf.io,HalcyonChimera/osf.io,acshi/osf.io,Ghalko/osf.io,amyshi188/osf.io,cwisecarver/osf.io,jnayak1/osf.io,DanielSBrown/osf.io,hmoco/osf.io,emetsger/osf.io,cslzchen/osf.io,mfraezz/osf.io,felliott/osf.io,Ghalko/osf.io,monikagrabowska/osf.io,mluo613/osf.io,GageGaskins/osf.io,doublebits/osf.io,chrisseto/osf.io,RomanZWang/osf.io,doublebits/osf.io,sloria/osf.io,pattisdr/osf.io,caseyrollins/osf.io,alexschiller/osf.io,danielneis/osf.io,kch8qx/osf.io,Nesiehr/osf.io,asanfilippo7/osf.io,kwierman/osf.io,billyhunt/osf.io,SSJohns/osf.io,SSJohns/osf.io,CenterForOpenScience/osf.io,samanehsan/osf.io,TomHeatwole/osf.io,brandonPurvis/osf.io,RomanZWang/osf.io,doublebits/osf.io,TomHeatwole/osf.io,chennan47/osf.io,chennan47/osf.io,wearpants/osf.io,saradbowman/osf.io,adlius/osf.io,Johnetordoff/osf.io,rdhyee/osf.io,ticklemepierce/osf.io,DanielSBrown/osf.io,CenterForOpenScience/osf.io,billyhunt/osf.io,icereval/osf.io,aaxelb/osf.io,Johnetordoff/osf.io,adlius/osf.io,doublebits/osf.io,binoculars/osf.io,KAsante95/osf.io,caneruguz/osf.io,caneruguz/osf.io,TomHeatwole/osf.io,mfraezz/osf.io,DanielSBrown/osf.io
|
Add unit tests for deep_get and soft_get api utils
|
from nose.tools import * # noqa
from api.base import utils as api_utils
from tests.base import ApiTestCase
class DummyAttrAttr(object):
def __init__(self, key):
self.key = key
class DummyAttr(object):
def __init__(self, key):
self.key = key
self.attr_attr = DummyAttrAttr(key.upper())
class Dummy(object):
def __init__(self, key):
self.attr = DummyAttr(key)
self.hash = {
'bang': DummyAttr(key)
}
class APIUtilsTestCase(ApiTestCase):
def setUp(self):
self.dummy = Dummy('foo')
self.data = {
'foo': {
'bar': 'baz'
}
}
def test_deep_get_object(self):
attr = api_utils.deep_get(self.dummy, 'attr')
assert_true(isinstance(attr, DummyAttr))
assert_equal(attr.key, 'foo')
def test_deep_get_object_multiple_depth(self):
attr_attr = api_utils.deep_get(self.dummy, 'attr.attr_attr')
assert_true(isinstance(attr_attr, DummyAttrAttr))
assert_equal(attr_attr.key, 'FOO')
def test_deep_get_dict(self):
foo = api_utils.deep_get(self.data, 'foo')
assert_true(isinstance(foo, dict))
assert_equal(foo, {
'bar': 'baz'
})
def test_deep_get_dict_multiple_depth(self):
bar = api_utils.deep_get(self.data, 'foo.bar')
assert_true(isinstance(bar, str))
assert_equal(bar, 'baz')
def test_deep_get_object_and_dict(self):
hash_bang_attr = api_utils.deep_get(self.dummy, 'hash.bang.attr_attr')
assert_true(isinstance(hash_bang_attr, DummyAttrAttr))
assert_equal(hash_bang_attr.key, 'FOO')
def test_deep_get_key_not_found(self):
hash_bang_attr = api_utils.deep_get(self.dummy, 'hash.bang.baz')
assert_equal(hash_bang_attr, None)
def test_soft_get_object(self):
attr = api_utils.soft_get(self.dummy, 'attr')
assert_equal(attr.key, 'foo')
def test_soft_get_object_not_found(self):
bat = api_utils.soft_get(self.dummy, 'bat')
assert_equal(bat, None)
def test_soft_get_dict(self):
foo = api_utils.soft_get(self.data, 'foo')
assert_equal(foo, {
'bar': 'baz'
})
def test_soft_get_dict_not_found(self):
bat = api_utils.soft_get(self.data, 'bat')
assert_equal(bat, None)
|
<commit_before><commit_msg>Add unit tests for deep_get and soft_get api utils<commit_after>
|
from nose.tools import * # noqa
from api.base import utils as api_utils
from tests.base import ApiTestCase
class DummyAttrAttr(object):
def __init__(self, key):
self.key = key
class DummyAttr(object):
def __init__(self, key):
self.key = key
self.attr_attr = DummyAttrAttr(key.upper())
class Dummy(object):
def __init__(self, key):
self.attr = DummyAttr(key)
self.hash = {
'bang': DummyAttr(key)
}
class APIUtilsTestCase(ApiTestCase):
def setUp(self):
self.dummy = Dummy('foo')
self.data = {
'foo': {
'bar': 'baz'
}
}
def test_deep_get_object(self):
attr = api_utils.deep_get(self.dummy, 'attr')
assert_true(isinstance(attr, DummyAttr))
assert_equal(attr.key, 'foo')
def test_deep_get_object_multiple_depth(self):
attr_attr = api_utils.deep_get(self.dummy, 'attr.attr_attr')
assert_true(isinstance(attr_attr, DummyAttrAttr))
assert_equal(attr_attr.key, 'FOO')
def test_deep_get_dict(self):
foo = api_utils.deep_get(self.data, 'foo')
assert_true(isinstance(foo, dict))
assert_equal(foo, {
'bar': 'baz'
})
def test_deep_get_dict_multiple_depth(self):
bar = api_utils.deep_get(self.data, 'foo.bar')
assert_true(isinstance(bar, str))
assert_equal(bar, 'baz')
def test_deep_get_object_and_dict(self):
hash_bang_attr = api_utils.deep_get(self.dummy, 'hash.bang.attr_attr')
assert_true(isinstance(hash_bang_attr, DummyAttrAttr))
assert_equal(hash_bang_attr.key, 'FOO')
def test_deep_get_key_not_found(self):
hash_bang_attr = api_utils.deep_get(self.dummy, 'hash.bang.baz')
assert_equal(hash_bang_attr, None)
def test_soft_get_object(self):
attr = api_utils.soft_get(self.dummy, 'attr')
assert_equal(attr.key, 'foo')
def test_soft_get_object_not_found(self):
bat = api_utils.soft_get(self.dummy, 'bat')
assert_equal(bat, None)
def test_soft_get_dict(self):
foo = api_utils.soft_get(self.data, 'foo')
assert_equal(foo, {
'bar': 'baz'
})
def test_soft_get_dict_not_found(self):
bat = api_utils.soft_get(self.data, 'bat')
assert_equal(bat, None)
|
Add unit tests for deep_get and soft_get api utilsfrom nose.tools import * # noqa
from api.base import utils as api_utils
from tests.base import ApiTestCase
class DummyAttrAttr(object):
def __init__(self, key):
self.key = key
class DummyAttr(object):
def __init__(self, key):
self.key = key
self.attr_attr = DummyAttrAttr(key.upper())
class Dummy(object):
def __init__(self, key):
self.attr = DummyAttr(key)
self.hash = {
'bang': DummyAttr(key)
}
class APIUtilsTestCase(ApiTestCase):
def setUp(self):
self.dummy = Dummy('foo')
self.data = {
'foo': {
'bar': 'baz'
}
}
def test_deep_get_object(self):
attr = api_utils.deep_get(self.dummy, 'attr')
assert_true(isinstance(attr, DummyAttr))
assert_equal(attr.key, 'foo')
def test_deep_get_object_multiple_depth(self):
attr_attr = api_utils.deep_get(self.dummy, 'attr.attr_attr')
assert_true(isinstance(attr_attr, DummyAttrAttr))
assert_equal(attr_attr.key, 'FOO')
def test_deep_get_dict(self):
foo = api_utils.deep_get(self.data, 'foo')
assert_true(isinstance(foo, dict))
assert_equal(foo, {
'bar': 'baz'
})
def test_deep_get_dict_multiple_depth(self):
bar = api_utils.deep_get(self.data, 'foo.bar')
assert_true(isinstance(bar, str))
assert_equal(bar, 'baz')
def test_deep_get_object_and_dict(self):
hash_bang_attr = api_utils.deep_get(self.dummy, 'hash.bang.attr_attr')
assert_true(isinstance(hash_bang_attr, DummyAttrAttr))
assert_equal(hash_bang_attr.key, 'FOO')
def test_deep_get_key_not_found(self):
hash_bang_attr = api_utils.deep_get(self.dummy, 'hash.bang.baz')
assert_equal(hash_bang_attr, None)
def test_soft_get_object(self):
attr = api_utils.soft_get(self.dummy, 'attr')
assert_equal(attr.key, 'foo')
def test_soft_get_object_not_found(self):
bat = api_utils.soft_get(self.dummy, 'bat')
assert_equal(bat, None)
def test_soft_get_dict(self):
foo = api_utils.soft_get(self.data, 'foo')
assert_equal(foo, {
'bar': 'baz'
})
def test_soft_get_dict_not_found(self):
bat = api_utils.soft_get(self.data, 'bat')
assert_equal(bat, None)
|
<commit_before><commit_msg>Add unit tests for deep_get and soft_get api utils<commit_after>from nose.tools import * # noqa
from api.base import utils as api_utils
from tests.base import ApiTestCase
class DummyAttrAttr(object):
def __init__(self, key):
self.key = key
class DummyAttr(object):
def __init__(self, key):
self.key = key
self.attr_attr = DummyAttrAttr(key.upper())
class Dummy(object):
def __init__(self, key):
self.attr = DummyAttr(key)
self.hash = {
'bang': DummyAttr(key)
}
class APIUtilsTestCase(ApiTestCase):
def setUp(self):
self.dummy = Dummy('foo')
self.data = {
'foo': {
'bar': 'baz'
}
}
def test_deep_get_object(self):
attr = api_utils.deep_get(self.dummy, 'attr')
assert_true(isinstance(attr, DummyAttr))
assert_equal(attr.key, 'foo')
def test_deep_get_object_multiple_depth(self):
attr_attr = api_utils.deep_get(self.dummy, 'attr.attr_attr')
assert_true(isinstance(attr_attr, DummyAttrAttr))
assert_equal(attr_attr.key, 'FOO')
def test_deep_get_dict(self):
foo = api_utils.deep_get(self.data, 'foo')
assert_true(isinstance(foo, dict))
assert_equal(foo, {
'bar': 'baz'
})
def test_deep_get_dict_multiple_depth(self):
bar = api_utils.deep_get(self.data, 'foo.bar')
assert_true(isinstance(bar, str))
assert_equal(bar, 'baz')
def test_deep_get_object_and_dict(self):
hash_bang_attr = api_utils.deep_get(self.dummy, 'hash.bang.attr_attr')
assert_true(isinstance(hash_bang_attr, DummyAttrAttr))
assert_equal(hash_bang_attr.key, 'FOO')
def test_deep_get_key_not_found(self):
hash_bang_attr = api_utils.deep_get(self.dummy, 'hash.bang.baz')
assert_equal(hash_bang_attr, None)
def test_soft_get_object(self):
attr = api_utils.soft_get(self.dummy, 'attr')
assert_equal(attr.key, 'foo')
def test_soft_get_object_not_found(self):
bat = api_utils.soft_get(self.dummy, 'bat')
assert_equal(bat, None)
def test_soft_get_dict(self):
foo = api_utils.soft_get(self.data, 'foo')
assert_equal(foo, {
'bar': 'baz'
})
def test_soft_get_dict_not_found(self):
bat = api_utils.soft_get(self.data, 'bat')
assert_equal(bat, None)
|
|
c0bae380d83283541860bbd709425e653678a3cf
|
txircd/modules/ircv3/servertime.py
|
txircd/modules/ircv3/servertime.py
|
from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class ServerTime(ModuleData):
implements(IPlugin, IModuleData)
name = "ServerTime"
def actions(self):
return [ ("capabilitylist", 1, self.addCapability) ]
def load(self):
if "unloading-server-time" in self.ircd.dataCache:
del self.ircd.dataCache["unloading-server-time"]
return
if "cap-add" in self.ircd.functionCache:
self.ircd.functionCache["cap-add"]("server-time")
def unload(self):
self.ircd.dataCache["unloading-server-time"] = True
def fullUnload(self):
del self.ircd.dataCache["unloading-server-time"]
if "cap-del" in self.ircd.functionCache:
self.ircd.functionCache["cap-del"]("server-time")
def addCapability(self, user, capList):
capList.append("server-time")
serverTime = ServerTime()
|
Implement server-time; this just enables server-time to be used by other modules
|
Implement server-time; this just enables server-time to be used by other modules
|
Python
|
bsd-3-clause
|
ElementalAlchemist/txircd,Heufneutje/txircd
|
Implement server-time; this just enables server-time to be used by other modules
|
from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class ServerTime(ModuleData):
implements(IPlugin, IModuleData)
name = "ServerTime"
def actions(self):
return [ ("capabilitylist", 1, self.addCapability) ]
def load(self):
if "unloading-server-time" in self.ircd.dataCache:
del self.ircd.dataCache["unloading-server-time"]
return
if "cap-add" in self.ircd.functionCache:
self.ircd.functionCache["cap-add"]("server-time")
def unload(self):
self.ircd.dataCache["unloading-server-time"] = True
def fullUnload(self):
del self.ircd.dataCache["unloading-server-time"]
if "cap-del" in self.ircd.functionCache:
self.ircd.functionCache["cap-del"]("server-time")
def addCapability(self, user, capList):
capList.append("server-time")
serverTime = ServerTime()
|
<commit_before><commit_msg>Implement server-time; this just enables server-time to be used by other modules<commit_after>
|
from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class ServerTime(ModuleData):
implements(IPlugin, IModuleData)
name = "ServerTime"
def actions(self):
return [ ("capabilitylist", 1, self.addCapability) ]
def load(self):
if "unloading-server-time" in self.ircd.dataCache:
del self.ircd.dataCache["unloading-server-time"]
return
if "cap-add" in self.ircd.functionCache:
self.ircd.functionCache["cap-add"]("server-time")
def unload(self):
self.ircd.dataCache["unloading-server-time"] = True
def fullUnload(self):
del self.ircd.dataCache["unloading-server-time"]
if "cap-del" in self.ircd.functionCache:
self.ircd.functionCache["cap-del"]("server-time")
def addCapability(self, user, capList):
capList.append("server-time")
serverTime = ServerTime()
|
Implement server-time; this just enables server-time to be used by other modulesfrom twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class ServerTime(ModuleData):
implements(IPlugin, IModuleData)
name = "ServerTime"
def actions(self):
return [ ("capabilitylist", 1, self.addCapability) ]
def load(self):
if "unloading-server-time" in self.ircd.dataCache:
del self.ircd.dataCache["unloading-server-time"]
return
if "cap-add" in self.ircd.functionCache:
self.ircd.functionCache["cap-add"]("server-time")
def unload(self):
self.ircd.dataCache["unloading-server-time"] = True
def fullUnload(self):
del self.ircd.dataCache["unloading-server-time"]
if "cap-del" in self.ircd.functionCache:
self.ircd.functionCache["cap-del"]("server-time")
def addCapability(self, user, capList):
capList.append("server-time")
serverTime = ServerTime()
|
<commit_before><commit_msg>Implement server-time; this just enables server-time to be used by other modules<commit_after>from twisted.plugin import IPlugin
from txircd.module_interface import IModuleData, ModuleData
from zope.interface import implements
class ServerTime(ModuleData):
implements(IPlugin, IModuleData)
name = "ServerTime"
def actions(self):
return [ ("capabilitylist", 1, self.addCapability) ]
def load(self):
if "unloading-server-time" in self.ircd.dataCache:
del self.ircd.dataCache["unloading-server-time"]
return
if "cap-add" in self.ircd.functionCache:
self.ircd.functionCache["cap-add"]("server-time")
def unload(self):
self.ircd.dataCache["unloading-server-time"] = True
def fullUnload(self):
del self.ircd.dataCache["unloading-server-time"]
if "cap-del" in self.ircd.functionCache:
self.ircd.functionCache["cap-del"]("server-time")
def addCapability(self, user, capList):
capList.append("server-time")
serverTime = ServerTime()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.